From b071195deba14b37ce896c26f20349b46e5f9fd2 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Tue, 28 Oct 2008 16:05:40 -0700 Subject: net: replace all current users of NIP6_SEQFMT with %#p6 The define in kernel.h can be done away with at a later time. Signed-off-by: Harvey Harrison Signed-off-by: David S. Miller --- fs/cifs/cifs_spnego.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index fcee9298b62..ae4be823e49 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c @@ -124,8 +124,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) sprintf(dp, "ip4=" NIPQUAD_FMT, NIPQUAD(server->addr.sockAddr.sin_addr)); else if (server->addr.sockAddr.sin_family == AF_INET6) - sprintf(dp, "ip6=" NIP6_SEQFMT, - NIP6(server->addr.sockAddr6.sin6_addr)); + sprintf(dp, "ip6=%#p6", &server->addr.sockAddr6.sin6_addr); else goto out; -- cgit v1.2.3-70-g09d2 From 1afa67f5e70b4733d5b237df61e6d639af6283bb Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Tue, 28 Oct 2008 16:06:44 -0700 Subject: misc: replace NIP6_FMT with %p6 format specifier The iscsi_ibft.c changes are almost certainly a bugfix as the pointer 'ip' is a u8 *, so they never print the last 8 bytes of the IPv6 address, and the eight bytes they do print have a zero byte with them in each 16-bit word. Other than that, this should cause no difference in functionality. Signed-off-by: Harvey Harrison Signed-off-by: David S. Miller --- drivers/firmware/iscsi_ibft.c | 4 +--- drivers/scsi/iscsi_tcp.c | 2 +- fs/lockd/host.c | 2 +- fs/nfs/super.c | 3 +-- security/selinux/avc.c | 2 +- 5 files changed, 5 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 4353414a0b7..0a6472097a8 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -290,9 +290,7 @@ static ssize_t sprintf_ipaddr(char *buf, u8 *ip) /* * IPv6 */ - str += sprintf(str, NIP6_FMT, ntohs(ip[0]), ntohs(ip[1]), - ntohs(ip[2]), ntohs(ip[3]), ntohs(ip[4]), - ntohs(ip[5]), ntohs(ip[6]), ntohs(ip[7])); + str += sprintf(str, "%p6", ip); } str += sprintf(str, "\n"); return str - buf; diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index ed6c54cae7b..ef929aef7c1 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -1608,7 +1608,7 @@ static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock, case AF_INET6: sin6 = (struct sockaddr_in6 *)addr; spin_lock_bh(&conn->session->lock); - sprintf(buf, NIP6_FMT, NIP6(sin6->sin6_addr)); + sprintf(buf, "%p6", &sin6->sin6_addr); *port = be16_to_cpu(sin6->sin6_port); spin_unlock_bh(&conn->session->lock); break; diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 9fd8889097b..344e6b475e0 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -122,7 +122,7 @@ static void nlm_display_address(const struct sockaddr *sap, snprintf(buf, len, NIPQUAD_FMT, NIPQUAD(sin6->sin6_addr.s6_addr32[3])); else - snprintf(buf, len, NIP6_FMT, NIP6(sin6->sin6_addr)); + snprintf(buf, len, "%p6", &sin6->sin6_addr); break; default: snprintf(buf, len, "unsupported address family"); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index f48db679a1c..5fe77219df7 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -468,8 +468,7 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss, } case AF_INET6: { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; - seq_printf(m, ",mountaddr=" NIP6_FMT, - NIP6(sin6->sin6_addr)); + seq_printf(m, ",mountaddr=%p6", &sin6->sin6_addr); break; } default: diff --git a/security/selinux/avc.c b/security/selinux/avc.c index cb30c7e350b..c91008f438a 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -495,7 +495,7 @@ static inline void avc_print_ipv6_addr(struct audit_buffer *ab, char *name1, char *name2) { if (!ipv6_addr_any(addr)) - audit_log_format(ab, " %s=" NIP6_FMT, name1, NIP6(*addr)); + audit_log_format(ab, " %s=%p6", name1, addr); if (port) audit_log_format(ab, " %s=%d", name2, ntohs(port)); } -- cgit v1.2.3-70-g09d2 From 4b7a4274ca63dadd9c4f17fc953f3a5d19855c4c Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Wed, 29 Oct 2008 12:50:24 -0700 Subject: net: replace %#p6 format specifier with %pi6 gcc warns when using the # modifier with the %p format specifier, so we can't use this to omit the colons when needed, introduces %pi6 instead. Signed-off-by: Harvey Harrison Signed-off-by: David S. Miller --- fs/cifs/cifs_spnego.c | 2 +- net/ipv6/addrconf.c | 2 +- net/ipv6/anycast.c | 2 +- net/ipv6/ip6_flowlabel.c | 2 +- net/ipv6/mcast.c | 4 ++-- net/ipv6/route.c | 6 +++--- net/sunrpc/xprtsock.c | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index ae4be823e49..0e186cd20ad 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c @@ -124,7 +124,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) sprintf(dp, "ip4=" NIPQUAD_FMT, NIPQUAD(server->addr.sockAddr.sin_addr)); else if (server->addr.sockAddr.sin_family == AF_INET6) - sprintf(dp, "ip6=%#p6", &server->addr.sockAddr6.sin6_addr); + sprintf(dp, "ip6=%pi6", &server->addr.sockAddr6.sin6_addr); else goto out; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 113c4d93153..ff7ae05f72e 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2986,7 +2986,7 @@ static void if6_seq_stop(struct seq_file *seq, void *v) static int if6_seq_show(struct seq_file *seq, void *v) { struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v; - seq_printf(seq, "%#p6 %02x %02x %02x %02x %8s\n", + seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n", &ifp->addr, ifp->idev->dev->ifindex, ifp->prefix_len, diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index ae494e30e55..1ae58bec1de 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c @@ -512,7 +512,7 @@ static int ac6_seq_show(struct seq_file *seq, void *v) struct ifacaddr6 *im = (struct ifacaddr6 *)v; struct ac6_iter_state *state = ac6_seq_private(seq); - seq_printf(seq, "%-4d %-15s %#p6 %5d\n", + seq_printf(seq, "%-4d %-15s %pi6 %5d\n", state->dev->ifindex, state->dev->name, &im->aca_addr, im->aca_users); return 0; diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index d5710235350..7927a8498d1 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -696,7 +696,7 @@ static int ip6fl_seq_show(struct seq_file *seq, void *v) else { struct ip6_flowlabel *fl = v; seq_printf(seq, - "%05X %-1d %-6d %-6d %-6ld %-8ld %#p6 %-4d\n", + "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n", (unsigned)ntohl(fl->label), fl->share, (unsigned)fl->owner, diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 4416cdc3e62..a76199ecad2 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -2430,7 +2430,7 @@ static int igmp6_mc_seq_show(struct seq_file *seq, void *v) struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq); seq_printf(seq, - "%-4d %-15s %#p6 %5d %08X %ld\n", + "%-4d %-15s %pi6 %5d %08X %ld\n", state->dev->ifindex, state->dev->name, &im->mca_addr, im->mca_users, im->mca_flags, @@ -2591,7 +2591,7 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) "Source Address", "INC", "EXC"); } else { seq_printf(seq, - "%3d %6.6s %#p6 %#p6 %6lu %6lu\n", + "%3d %6.6s %pi6 %pi6 %6lu %6lu\n", state->dev->ifindex, state->dev->name, &state->im->mca_addr, &psf->sf_addr, diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 720bfedd318..d69fa462d3f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2408,16 +2408,16 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg) { struct seq_file *m = p_arg; - seq_printf(m, "%#p6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen); + seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen); #ifdef CONFIG_IPV6_SUBTREES - seq_printf(m, "%#p6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen); + seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen); #else seq_puts(m, "00000000000000000000000000000000 00 "); #endif if (rt->rt6i_nexthop) { - seq_printf(m, "%#p6", rt->rt6i_nexthop->primary_key); + seq_printf(m, "%pi6", rt->rt6i_nexthop->primary_key); } else { seq_puts(m, "00000000000000000000000000000000"); } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 34985223895..3c9aff58457 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -365,7 +365,7 @@ static void xs_format_ipv6_peer_addresses(struct rpc_xprt *xprt, buf = kzalloc(36, GFP_KERNEL); if (buf) - snprintf(buf, 36, "%#p6", &addr->sin6_addr); + snprintf(buf, 36, "%pi6", &addr->sin6_addr); xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = buf; -- cgit v1.2.3-70-g09d2 From 5b095d98928fdb9e3b75be20a54b7a6cbf6ca9ad Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Wed, 29 Oct 2008 12:52:50 -0700 Subject: net: replace %p6 with %pI6 Signed-off-by: Harvey Harrison Signed-off-by: David S. Miller --- drivers/firmware/iscsi_ibft.c | 2 +- drivers/infiniband/core/sysfs.c | 2 +- drivers/infiniband/hw/mthca/mthca_mcg.c | 4 ++-- drivers/infiniband/ulp/ipoib/ipoib_cm.c | 4 ++-- drivers/infiniband/ulp/ipoib/ipoib_main.c | 12 +++++------ drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 30 +++++++++++++------------- drivers/infiniband/ulp/srp/ib_srp.c | 6 +++--- drivers/net/mlx4/mcg.c | 4 ++-- drivers/scsi/iscsi_tcp.c | 2 +- fs/lockd/host.c | 2 +- fs/nfs/super.c | 2 +- include/linux/sunrpc/svc_xprt.h | 2 +- include/net/ip_vs.h | 2 +- include/net/netfilter/nf_conntrack_tuple.h | 2 +- include/net/sctp/sctp.h | 2 +- net/bridge/netfilter/ebt_log.c | 2 +- net/ipv4/tcp_input.c | 2 +- net/ipv4/tcp_timer.c | 2 +- net/ipv6/addrlabel.c | 10 ++++----- net/ipv6/ah6.c | 2 +- net/ipv6/esp6.c | 2 +- net/ipv6/exthdrs.c | 2 +- net/ipv6/icmp.c | 2 +- net/ipv6/ip6mr.c | 2 +- net/ipv6/ipcomp6.c | 2 +- net/ipv6/ndisc.c | 2 +- net/ipv6/netfilter/ip6t_LOG.c | 2 +- net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 2 +- net/ipv6/tcp_ipv6.c | 2 +- net/netfilter/ipvs/ip_vs_conn.c | 4 ++-- net/netfilter/ipvs/ip_vs_core.c | 4 ++-- net/netfilter/ipvs/ip_vs_ctl.c | 4 ++-- net/netfilter/ipvs/ip_vs_proto.c | 6 +++--- net/netfilter/ipvs/ip_vs_proto_ah_esp.c | 2 +- net/netfilter/ipvs/ip_vs_xmit.c | 8 +++---- net/netfilter/nf_conntrack_ftp.c | 2 +- net/netfilter/nf_conntrack_h323_main.c | 4 ++-- net/netfilter/xt_hashlimit.c | 2 +- net/netfilter/xt_recent.c | 2 +- net/netlabel/netlabel_addrlist.c | 2 +- net/sctp/ipv6.c | 18 ++++++++-------- net/sctp/sm_statefuns.c | 2 +- net/sunrpc/clnt.c | 2 +- net/sunrpc/rpcb_clnt.c | 2 +- net/sunrpc/svcauth_unix.c | 4 ++-- net/sunrpc/xprtsock.c | 8 +++---- net/xfrm/xfrm_policy.c | 4 ++-- net/xfrm/xfrm_state.c | 4 ++-- security/selinux/avc.c | 2 +- 49 files changed, 100 insertions(+), 100 deletions(-) (limited to 'fs') diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 0a6472097a8..acb82aff880 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -290,7 +290,7 @@ static ssize_t sprintf_ipaddr(char *buf, u8 *ip) /* * IPv6 */ - str += sprintf(str, "%p6", ip); + str += sprintf(str, "%pI6", ip); } str += sprintf(str, "\n"); return str - buf; diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index e985193d631..4f4d1bb9f06 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -262,7 +262,7 @@ static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr, if (ret) return ret; - return sprintf(buf, "%p6\n", gid.raw); + return sprintf(buf, "%pI6\n", gid.raw); } static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr, diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c index 693bed0b2d1..d4c81053e43 100644 --- a/drivers/infiniband/hw/mthca/mthca_mcg.c +++ b/drivers/infiniband/hw/mthca/mthca_mcg.c @@ -87,7 +87,7 @@ static int find_mgm(struct mthca_dev *dev, } if (0) - mthca_dbg(dev, "Hash for %p6 is %04x\n", gid, *hash); + mthca_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash); *index = *hash; *prev = -1; @@ -254,7 +254,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) goto out; if (index == -1) { - mthca_err(dev, "MGID %p6 not found\n", gid->raw); + mthca_err(dev, "MGID %pI6 not found\n", gid->raw); err = -EINVAL; goto out; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index d98d87bfe36..47d588ba2a7 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1128,7 +1128,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, goto err_send_cm; } - ipoib_dbg(priv, "Request connection 0x%x for gid %p6 qpn 0x%x\n", + ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n", p->qp->qp_num, pathrec->dgid.raw, qpn); return 0; @@ -1276,7 +1276,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { list_move(&tx->list, &priv->cm.reap_list); queue_work(ipoib_workqueue, &priv->cm.reap_task); - ipoib_dbg(priv, "Reap connection for gid %p6\n", + ipoib_dbg(priv, "Reap connection for gid %pI6\n", tx->neigh->dgid.raw); tx->neigh = NULL; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index e7f4f94c3e9..b3a671895bd 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -359,7 +359,7 @@ void ipoib_mark_paths_invalid(struct net_device *dev) spin_lock_irq(&priv->lock); list_for_each_entry_safe(path, tp, &priv->path_list, list) { - ipoib_dbg(priv, "mark path LID 0x%04x GID %p6 invalid\n", + ipoib_dbg(priv, "mark path LID 0x%04x GID %pI6 invalid\n", be16_to_cpu(path->pathrec.dlid), path->pathrec.dgid.raw); path->valid = 0; @@ -413,10 +413,10 @@ static void path_rec_completion(int status, unsigned long flags; if (!status) - ipoib_dbg(priv, "PathRec LID 0x%04x for GID %p6\n", + ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n", be16_to_cpu(pathrec->dlid), pathrec->dgid.raw); else - ipoib_dbg(priv, "PathRec status %d for GID %p6\n", + ipoib_dbg(priv, "PathRec status %d for GID %pI6\n", status, path->pathrec.dgid.raw); skb_queue_head_init(&skqueue); @@ -527,7 +527,7 @@ static int path_rec_start(struct net_device *dev, { struct ipoib_dev_priv *priv = netdev_priv(dev); - ipoib_dbg(priv, "Start path record lookup for %p6\n", + ipoib_dbg(priv, "Start path record lookup for %pI6\n", path->pathrec.dgid.raw); init_completion(&path->done); @@ -764,7 +764,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) && (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) { - ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %p6\n", + ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n", skb->dst ? "neigh" : "dst", be16_to_cpup((__be16 *) skb->data), IPOIB_QPN(phdr->hwaddr), @@ -844,7 +844,7 @@ static void ipoib_neigh_cleanup(struct neighbour *n) else return; ipoib_dbg(priv, - "neigh_cleanup for %06x %p6\n", + "neigh_cleanup for %06x %pI6\n", IPOIB_QPN(n->ha), n->ha + 4); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 0de79cf4c07..a2eb3b9789e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -71,7 +71,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) struct ipoib_neigh *neigh, *tmp; int tx_dropped = 0; - ipoib_dbg_mcast(netdev_priv(dev), "deleting multicast group %p6\n", + ipoib_dbg_mcast(netdev_priv(dev), "deleting multicast group %pI6\n", mcast->mcmember.mgid.raw); spin_lock_irq(&priv->lock); @@ -204,7 +204,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { - ipoib_warn(priv, "multicast group %p6 already attached\n", + ipoib_warn(priv, "multicast group %pI6 already attached\n", mcast->mcmember.mgid.raw); return 0; @@ -213,7 +213,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid), &mcast->mcmember.mgid, set_qkey); if (ret < 0) { - ipoib_warn(priv, "couldn't attach QP to multicast group %p6\n", + ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n", mcast->mcmember.mgid.raw); clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags); @@ -245,7 +245,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, mcast->ah = ah; spin_unlock_irq(&priv->lock); - ipoib_dbg_mcast(priv, "MGID %p6 AV %p, LID 0x%04x, SL %d\n", + ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n", mcast->mcmember.mgid.raw, mcast->ah->ah, be16_to_cpu(mcast->mcmember.mlid), @@ -291,7 +291,7 @@ ipoib_mcast_sendonly_join_complete(int status, if (status) { if (mcast->logcount++ < 20) - ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %p6, status %d\n", + ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n", mcast->mcmember.mgid.raw, status); /* Flush out any queued packets */ @@ -351,7 +351,7 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast) ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n", ret); } else { - ipoib_dbg_mcast(priv, "no multicast record for %p6, starting join\n", + ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n", mcast->mcmember.mgid.raw); } @@ -380,7 +380,7 @@ static int ipoib_mcast_join_complete(int status, struct net_device *dev = mcast->dev; struct ipoib_dev_priv *priv = netdev_priv(dev); - ipoib_dbg_mcast(priv, "join completion for %p6 (status %d)\n", + ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n", mcast->mcmember.mgid.raw, status); /* We trap for port events ourselves. */ @@ -410,10 +410,10 @@ static int ipoib_mcast_join_complete(int status, if (mcast->logcount++ < 20) { if (status == -ETIMEDOUT) { - ipoib_dbg_mcast(priv, "multicast join failed for %p6, status %d\n", + ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", mcast->mcmember.mgid.raw, status); } else { - ipoib_warn(priv, "multicast join failed for %p6, status %d\n", + ipoib_warn(priv, "multicast join failed for %pI6, status %d\n", mcast->mcmember.mgid.raw, status); } } @@ -446,7 +446,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, ib_sa_comp_mask comp_mask; int ret = 0; - ipoib_dbg_mcast(priv, "joining MGID %p6\n", mcast->mcmember.mgid.raw); + ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); rec.mgid = mcast->mcmember.mgid; rec.port_gid = priv->local_gid; @@ -631,7 +631,7 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) ib_sa_free_multicast(mcast->mc); if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) { - ipoib_dbg_mcast(priv, "leaving MGID %p6\n", + ipoib_dbg_mcast(priv, "leaving MGID %pI6\n", mcast->mcmember.mgid.raw); /* Remove ourselves from the multicast group */ @@ -663,7 +663,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) mcast = __ipoib_mcast_find(dev, mgid); if (!mcast) { /* Let's create a new send only group now */ - ipoib_dbg_mcast(priv, "setting up send only multicast group for %p6\n", + ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n", mgid); mcast = ipoib_mcast_alloc(dev, 0); @@ -797,13 +797,13 @@ void ipoib_mcast_restart_task(struct work_struct *work) /* ignore group which is directly joined by userspace */ if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) && !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) { - ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %p6\n", + ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n", mgid.raw); continue; } /* Not found or send-only group, let's add a new entry */ - ipoib_dbg_mcast(priv, "adding multicast entry for mgid %p6\n", + ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n", mgid.raw); nmcast = ipoib_mcast_alloc(dev, 0); @@ -837,7 +837,7 @@ void ipoib_mcast_restart_task(struct work_struct *work) list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) { if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) && !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { - ipoib_dbg_mcast(priv, "deleting multicast group %p6\n", + ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n", mcast->mcmember.mgid.raw); rb_erase(&mcast->rb_node, &priv->multicast_tree); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index bc825310c6d..7c13db885bf 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -1514,7 +1514,7 @@ static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, target->state == SRP_TARGET_REMOVED) return -ENODEV; - return sprintf(buf, "%p6\n", target->path.dgid.raw); + return sprintf(buf, "%pI6\n", target->path.dgid.raw); } static ssize_t show_orig_dgid(struct device *dev, @@ -1526,7 +1526,7 @@ static ssize_t show_orig_dgid(struct device *dev, target->state == SRP_TARGET_REMOVED) return -ENODEV; - return sprintf(buf, "%p6\n", target->orig_dgid); + return sprintf(buf, "%pI6\n", target->orig_dgid); } static ssize_t show_zero_req_lim(struct device *dev, @@ -1867,7 +1867,7 @@ static ssize_t srp_create_target(struct device *dev, shost_printk(KERN_DEBUG, target->scsi_host, PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x " - "service_id %016llx dgid %p6\n", + "service_id %016llx dgid %pI6\n", (unsigned long long) be64_to_cpu(target->id_ext), (unsigned long long) be64_to_cpu(target->ioc_guid), be16_to_cpu(target->path.pkey), diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c index 6f79e84a5c9..b1622062b12 100644 --- a/drivers/net/mlx4/mcg.c +++ b/drivers/net/mlx4/mcg.c @@ -118,7 +118,7 @@ static int find_mgm(struct mlx4_dev *dev, return err; if (0) - mlx4_dbg(dev, "Hash for %p6 is %04x\n", gid, *hash); + mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash); *index = *hash; *prev = -1; @@ -267,7 +267,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]) goto out; if (index == -1) { - mlx4_err(dev, "MGID %p6 not found\n", gid); + mlx4_err(dev, "MGID %pI6 not found\n", gid); err = -EINVAL; goto out; } diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index ef929aef7c1..24d09028a27 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -1608,7 +1608,7 @@ static int iscsi_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock, case AF_INET6: sin6 = (struct sockaddr_in6 *)addr; spin_lock_bh(&conn->session->lock); - sprintf(buf, "%p6", &sin6->sin6_addr); + sprintf(buf, "%pI6", &sin6->sin6_addr); *port = be16_to_cpu(sin6->sin6_port); spin_unlock_bh(&conn->session->lock); break; diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 344e6b475e0..c8ab7d70390 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -122,7 +122,7 @@ static void nlm_display_address(const struct sockaddr *sap, snprintf(buf, len, NIPQUAD_FMT, NIPQUAD(sin6->sin6_addr.s6_addr32[3])); else - snprintf(buf, len, "%p6", &sin6->sin6_addr); + snprintf(buf, len, "%pI6", &sin6->sin6_addr); break; default: snprintf(buf, len, "unsupported address family"); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 5fe77219df7..eb391d8d70b 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -468,7 +468,7 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss, } case AF_INET6: { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; - seq_printf(m, ",mountaddr=%p6", &sin6->sin6_addr); + seq_printf(m, ",mountaddr=%pI6", &sin6->sin6_addr); break; } default: diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 42e01c93c7e..51cb75ea42d 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -145,7 +145,7 @@ static inline char *__svc_print_addr(struct sockaddr *addr, break; case AF_INET6: - snprintf(buf, len, "%p6, port=%u", + snprintf(buf, len, "%pI6, port=%u", &((struct sockaddr_in6 *)addr)->sin6_addr, ntohs(((struct sockaddr_in6 *) addr)->sin6_port)); break; diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 6a669206709..af48cada561 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -87,7 +87,7 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len, int len; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) - len = snprintf(&buf[*idx], buf_len - *idx, "[%p6]", + len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6]", &addr->in6) + 1; else #endif diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h index 303efaf68d0..42f1fc96f3e 100644 --- a/include/net/netfilter/nf_conntrack_tuple.h +++ b/include/net/netfilter/nf_conntrack_tuple.h @@ -122,7 +122,7 @@ static inline void nf_ct_dump_tuple_ip(const struct nf_conntrack_tuple *t) static inline void nf_ct_dump_tuple_ipv6(const struct nf_conntrack_tuple *t) { #ifdef DEBUG - printk("tuple %p: %u %p6 %hu -> %p6 %hu\n", + printk("tuple %p: %u %pI6 %hu -> %pI6 %hu\n", t, t->dst.protonum, t->src.u3.all, ntohs(t->src.u.all), t->dst.u3.all, ntohs(t->dst.u.all)); diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index a84c3976e1b..e71b0f7ce88 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -285,7 +285,7 @@ extern int sctp_debug_flag; if (sctp_debug_flag) { \ if (saddr->sa.sa_family == AF_INET6) { \ printk(KERN_DEBUG \ - lead "%p6" trail, \ + lead "%pI6" trail, \ leadparm, \ &saddr->v6.sin6_addr, \ otherparms); \ diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index 3654f3ebdb8..5e7cff3542f 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c @@ -133,7 +133,7 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum, printk(" INCOMPLETE IPv6 header"); goto out; } - printk(" IPv6 SRC=%p6 IPv6 DST=%p6, IPv6 priority=0x%01X, Next Header=%d", + printk(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d", &ih->saddr, &ih->daddr, ih->priority, ih->nexthdr); nexthdr = ih->nexthdr; offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 191c06bb0f6..04909e4b3c4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2346,7 +2346,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); - printk(KERN_DEBUG "Undo %s %p6/%u c%u l%u ss%u/%u p%u\n", + printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", msg, &np->daddr, ntohs(inet->dport), tp->snd_cwnd, tcp_left_out(tp), diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 4e6ee520523..979c9d604eb 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -306,7 +306,7 @@ static void tcp_retransmit_timer(struct sock *sk) #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); - LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer %p6:%u/%u shrinks window %u:%u. Repaired.\n", + LIMIT_NETDEBUG(KERN_DEBUG "TCP: Treason uncloaked! Peer %pI6:%u/%u shrinks window %u:%u. Repaired.\n", &np->daddr, ntohs(inet->dport), inet->num, tp->snd_una, tp->snd_nxt); } diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index d28036659d2..6ff73c4c126 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -186,7 +186,7 @@ u32 ipv6_addr_label(struct net *net, label = p ? p->label : IPV6_ADDR_LABEL_DEFAULT; rcu_read_unlock(); - ADDRLABEL(KERN_DEBUG "%s(addr=%p6, type=%d, ifindex=%d) => %08x\n", + ADDRLABEL(KERN_DEBUG "%s(addr=%pI6, type=%d, ifindex=%d) => %08x\n", __func__, addr, type, ifindex, label); return label; @@ -201,7 +201,7 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net, struct ip6addrlbl_entry *newp; int addrtype; - ADDRLABEL(KERN_DEBUG "%s(prefix=%p6, prefixlen=%d, ifindex=%d, label=%u)\n", + ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d, label=%u)\n", __func__, prefix, prefixlen, ifindex, (unsigned int)label); addrtype = ipv6_addr_type(prefix) & (IPV6_ADDR_MAPPED | IPV6_ADDR_COMPATv4 | IPV6_ADDR_LOOPBACK); @@ -289,7 +289,7 @@ static int ip6addrlbl_add(struct net *net, struct ip6addrlbl_entry *newp; int ret = 0; - ADDRLABEL(KERN_DEBUG "%s(prefix=%p6, prefixlen=%d, ifindex=%d, label=%u, replace=%d)\n", + ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d, label=%u, replace=%d)\n", __func__, prefix, prefixlen, ifindex, (unsigned int)label, replace); @@ -313,7 +313,7 @@ static int __ip6addrlbl_del(struct net *net, struct hlist_node *pos, *n; int ret = -ESRCH; - ADDRLABEL(KERN_DEBUG "%s(prefix=%p6, prefixlen=%d, ifindex=%d)\n", + ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n", __func__, prefix, prefixlen, ifindex); hlist_for_each_entry_safe(p, pos, n, &ip6addrlbl_table.head, list) { @@ -337,7 +337,7 @@ static int ip6addrlbl_del(struct net *net, struct in6_addr prefix_buf; int ret; - ADDRLABEL(KERN_DEBUG "%s(prefix=%p6, prefixlen=%d, ifindex=%d)\n", + ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n", __func__, prefix, prefixlen, ifindex); ipv6_addr_prefix(&prefix_buf, prefix, prefixlen); diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 9bc43f2527c..7a8a01369e5 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -419,7 +419,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (!x) return; - NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/%p6\n", + NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/%pI6\n", ntohl(ah->spi), &iph->daddr); xfrm_state_put(x); diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index ec4be188c34..c02a6308def 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -367,7 +367,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6); if (!x) return; - printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%p6\n", + printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n", ntohl(esph->spi), &iph->daddr); xfrm_state_put(x); } diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index a8905146835..1c7f400a3cf 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -219,7 +219,7 @@ static int ipv6_dest_hao(struct sk_buff *skb, int optoff) if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) { LIMIT_NETDEBUG( - KERN_DEBUG "hao is not an unicast addr: %p6\n", &hao->addr); + KERN_DEBUG "hao is not an unicast addr: %pI6\n", &hao->addr); goto discard; } diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index b3fa38e40dc..3c2821f9b52 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -681,7 +681,7 @@ static int icmpv6_rcv(struct sk_buff *skb) skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6, 0)); if (__skb_checksum_complete(skb)) { - LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%p6 > %p6]\n", + LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%pI6 > %pI6]\n", saddr, daddr); goto discard_it; } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 798e6a29dd8..c491fb98a5e 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -297,7 +297,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) const struct mfc6_cache *mfc = v; const struct ipmr_mfc_iter *it = seq->private; - seq_printf(seq, "%p6 %p6 %-3d %8ld %8ld %8ld", + seq_printf(seq, "%pI6 %pI6 %-3d %8ld %8ld %8ld", &mfc->mf6c_mcastgrp, &mfc->mf6c_origin, mfc->mf6c_parent, mfc->mfc_un.res.pkt, diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 9566d8f7314..d4576a9c154 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -67,7 +67,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, if (!x) return; - printk(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%p6\n", + printk(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI6\n", spi, &iph->daddr); xfrm_state_put(x); } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 191bb0722a7..2a6752dae09 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -647,7 +647,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) if ((probes -= neigh->parms->ucast_probes) < 0) { if (!(neigh->nud_state & NUD_VALID)) { - ND_PRINTK1(KERN_DEBUG "%s(): trying to ucast probe in NUD_INVALID: %p6\n", + ND_PRINTK1(KERN_DEBUG "%s(): trying to ucast probe in NUD_INVALID: %pI6\n", __func__, target); } ndisc_send_ns(dev, neigh, target, target, saddr); diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index a61ce301000..02885e8bb69 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c @@ -61,7 +61,7 @@ static void dump_packet(const struct nf_loginfo *info, } /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */ - printk("SRC=%p6 DST=%p6 ", &ih->saddr, &ih->daddr); + printk("SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr); /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */ printk("LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ", diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index b165a273c6c..727b9530448 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -56,7 +56,7 @@ static bool ipv6_invert_tuple(struct nf_conntrack_tuple *tuple, static int ipv6_print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple) { - return seq_printf(s, "src=%p6 dst=%p6 ", + return seq_printf(s, "src=%pI6 dst=%pI6 ", tuple->src.u3.ip6, tuple->dst.u3.ip6); } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 483550cfdf3..984276463a8 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -872,7 +872,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) if (genhash || memcmp(hash_location, newhash, 16) != 0) { if (net_ratelimit()) { - printk(KERN_INFO "MD5 Hash %s for (%p6, %u)->(%p6, %u)\n", + printk(KERN_INFO "MD5 Hash %s for (%pI6, %u)->(%pI6, %u)\n", genhash ? "failed" : "mismatch", &ip6h->saddr, ntohs(th->source), &ip6h->daddr, ntohs(th->dest)); diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 89bf65be6f2..60aba45023f 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -820,7 +820,7 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v) #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) - seq_printf(seq, "%-3s %p6 %04X %p6 %04X %p6 %04X %-11s %7lu\n", + seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X %pI6 %04X %-11s %7lu\n", ip_vs_proto_name(cp->protocol), &cp->caddr.in6, ntohs(cp->cport), &cp->vaddr.in6, ntohs(cp->vport), @@ -881,7 +881,7 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v) #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) - seq_printf(seq, "%-3s %p6 %04X %p6 %04X %p6 %04X %-11s %-6s %7lu\n", + seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X %pI6 %04X %-11s %-6s %7lu\n", ip_vs_proto_name(cp->protocol), &cp->caddr.in6, ntohs(cp->cport), &cp->vaddr.in6, ntohs(cp->vport), diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 9400587a01e..c3c68443b5b 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -805,7 +805,7 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related) if (ic == NULL) return NF_DROP; - IP_VS_DBG(12, "Outgoing ICMPv6 (%d,%d) %p6->%p6\n", + IP_VS_DBG(12, "Outgoing ICMPv6 (%d,%d) %pI6->%pI6\n", ic->icmp6_type, ntohs(icmpv6_id(ic)), &iph->saddr, &iph->daddr); @@ -1175,7 +1175,7 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) if (ic == NULL) return NF_DROP; - IP_VS_DBG(12, "Incoming ICMPv6 (%d,%d) %p6->%p6\n", + IP_VS_DBG(12, "Incoming ICMPv6 (%d,%d) %pI6->%pI6\n", ic->icmp6_type, ntohs(icmpv6_id(ic)), &iph->saddr, &iph->daddr); diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 28c47c0d514..76db27ec963 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1867,7 +1867,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) if (iter->table == ip_vs_svc_table) { #ifdef CONFIG_IP_VS_IPV6 if (svc->af == AF_INET6) - seq_printf(seq, "%s [%p6]:%04X %s ", + seq_printf(seq, "%s [%pI6]:%04X %s ", ip_vs_proto_name(svc->protocol), &svc->addr.in6, ntohs(svc->port), @@ -1895,7 +1895,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) #ifdef CONFIG_IP_VS_IPV6 if (dest->af == AF_INET6) seq_printf(seq, - " -> [%p6]:%04X" + " -> [%pI6]:%04X" " %-7s %-6d %-10d %-10d\n", &dest->addr.in6, ntohs(dest->port), diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index d7ce4f1839c..54cd67fbfe7 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -203,7 +203,7 @@ ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp, if (ih == NULL) sprintf(buf, "%s TRUNCATED", pp->name); else if (ih->nexthdr == IPPROTO_FRAGMENT) - sprintf(buf, "%s %p6->%p6 frag", + sprintf(buf, "%s %pI6->%pI6 frag", pp->name, &ih->saddr, &ih->daddr); else { __be16 _ports[2], *pptr; @@ -211,10 +211,10 @@ ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp, pptr = skb_header_pointer(skb, offset + sizeof(struct ipv6hdr), sizeof(_ports), _ports); if (pptr == NULL) - sprintf(buf, "%s TRUNCATED %p6->%p6", + sprintf(buf, "%s TRUNCATED %pI6->%pI6", pp->name, &ih->saddr, &ih->daddr); else - sprintf(buf, "%s %p6:%u->%p6:%u", + sprintf(buf, "%s %pI6:%u->%pI6:%u", pp->name, &ih->saddr, ntohs(pptr[0]), &ih->daddr, ntohs(pptr[1])); diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c index 59f2d11b683..6ede8881204 100644 --- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c +++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c @@ -154,7 +154,7 @@ ah_esp_debug_packet_v6(struct ip_vs_protocol *pp, const struct sk_buff *skb, if (ih == NULL) sprintf(buf, "%s TRUNCATED", pp->name); else - sprintf(buf, "%s %p6->%p6", + sprintf(buf, "%s %pI6->%pI6", pp->name, &ih->saddr, &ih->daddr); printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf); diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index be34c335cab..fc342dda950 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -141,12 +141,12 @@ __ip_vs_get_out_rt_v6(struct ip_vs_conn *cp) NULL, &fl); if (!rt) { spin_unlock(&dest->dst_lock); - IP_VS_DBG_RL("ip6_route_output error, dest: %p6\n", + IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n", &dest->addr.in6); return NULL; } __ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst)); - IP_VS_DBG(10, "new dst %p6, refcnt=%d\n", + IP_VS_DBG(10, "new dst %pI6, refcnt=%d\n", &dest->addr.in6, atomic_read(&rt->u.dst.__refcnt)); } @@ -166,7 +166,7 @@ __ip_vs_get_out_rt_v6(struct ip_vs_conn *cp) rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); if (!rt) { - IP_VS_DBG_RL("ip6_route_output error, dest: %p6\n", + IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n", &cp->daddr.in6); return NULL; } @@ -300,7 +300,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); if (!rt) { - IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): ip6_route_output error, dest: %p6\n", + IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): ip6_route_output error, dest: %pI6\n", &iph->daddr); goto tx_error_icmp; } diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index 05bf82d345c..8cab6d59590 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c @@ -467,7 +467,7 @@ static int help(struct sk_buff *skb, NIPQUAD(cmd.u3.ip), NIPQUAD(ct->tuplehash[dir].tuple.src.u3.ip)); } else { - pr_debug("conntrack_ftp: NOT RECORDING: %p6 != %p6\n", + pr_debug("conntrack_ftp: NOT RECORDING: %pI6 != %pI6\n", cmd.u3.ip6, ct->tuplehash[dir].tuple.src.u3.ip6); } diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 29e49b1c80b..99bc803d1dd 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -850,7 +850,7 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct, get_h225_addr(ct, *data, &setup->destCallSignalAddress, &addr, &port) && memcmp(&addr, &ct->tuplehash[!dir].tuple.src.u3, sizeof(addr))) { - pr_debug("nf_ct_q931: set destCallSignalAddress %p6:%hu->%p6:%hu\n", + pr_debug("nf_ct_q931: set destCallSignalAddress %pI6:%hu->%pI6:%hu\n", &addr, ntohs(port), &ct->tuplehash[!dir].tuple.src.u3, ntohs(ct->tuplehash[!dir].tuple.src.u.tcp.port)); ret = set_h225_addr(skb, data, dataoff, @@ -866,7 +866,7 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct, get_h225_addr(ct, *data, &setup->sourceCallSignalAddress, &addr, &port) && memcmp(&addr, &ct->tuplehash[!dir].tuple.dst.u3, sizeof(addr))) { - pr_debug("nf_ct_q931: set sourceCallSignalAddress %p6:%hu->%p6:%hu\n", + pr_debug("nf_ct_q931: set sourceCallSignalAddress %pI6:%hu->%pI6:%hu\n", &addr, ntohs(port), &ct->tuplehash[!dir].tuple.dst.u3, ntohs(ct->tuplehash[!dir].tuple.dst.u.tcp.port)); ret = set_h225_addr(skb, data, dataoff, diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index f04c6ed4367..6379717f904 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -904,7 +904,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family, ent->rateinfo.cost); #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) case NFPROTO_IPV6: - return seq_printf(s, "%ld %p6:%u->%p6:%u %u %u %u\n", + return seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n", (long)(ent->expires - jiffies)/HZ, &ent->dst.ip6.src, ntohs(ent->dst.src_port), diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index a377ea333e1..b785727a5bf 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -426,7 +426,7 @@ static int recent_seq_show(struct seq_file *seq, void *v) "oldest_pkt: %u", NIPQUAD(e->addr.ip), e->ttl, e->stamps[i], e->index); else - seq_printf(seq, "src=%p6 ttl: %u last_seen: %lu oldest_pkt: %u", + seq_printf(seq, "src=%pI6 ttl: %u last_seen: %lu oldest_pkt: %u", &e->addr.in6, e->ttl, e->stamps[i], e->index); for (i = 0; i < e->nstamps; i++) seq_printf(seq, "%s %lu", i ? "," : "", e->stamps[i]); diff --git a/net/netlabel/netlabel_addrlist.c b/net/netlabel/netlabel_addrlist.c index 614c95ec39d..8b1c58b0820 100644 --- a/net/netlabel/netlabel_addrlist.c +++ b/net/netlabel/netlabel_addrlist.c @@ -370,7 +370,7 @@ void netlbl_af6list_audit_addr(struct audit_buffer *audit_buf, if (dev != NULL) audit_log_format(audit_buf, " netif=%s", dev); - audit_log_format(audit_buf, " %s=%p6", dir, addr); + audit_log_format(audit_buf, " %s=%pI6", dir, addr); if (ntohl(mask->s6_addr32[3]) != 0xffffffff) { u32 mask_len = 0; u32 mask_val; diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index e82668bd2b5..ceaa4aa066e 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -223,7 +223,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) ipv6_addr_copy(&fl.fl6_dst, rt0->addr); } - SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%p6 dst:%p6\n", + SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, skb->len, &fl.fl6_src, &fl.fl6_dst); @@ -251,18 +251,18 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc, fl.oif = daddr->v6.sin6_scope_id; - SCTP_DEBUG_PRINTK("%s: DST=%p6 ", __func__, &fl.fl6_dst); + SCTP_DEBUG_PRINTK("%s: DST=%pI6 ", __func__, &fl.fl6_dst); if (saddr) { ipv6_addr_copy(&fl.fl6_src, &saddr->v6.sin6_addr); - SCTP_DEBUG_PRINTK("SRC=%p6 - ", &fl.fl6_src); + SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl.fl6_src); } dst = ip6_route_output(&init_net, NULL, &fl); if (!dst->error) { struct rt6_info *rt; rt = (struct rt6_info *)dst; - SCTP_DEBUG_PRINTK("rt6_dst:%p6 rt6_src:%p6\n", + SCTP_DEBUG_PRINTK("rt6_dst:%pI6 rt6_src:%pI6\n", &rt->rt6i_dst.addr, &rt->rt6i_src.addr); return dst; } @@ -309,7 +309,7 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk, __u8 matchlen = 0; __u8 bmatchlen; - SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p daddr:%p6 ", + SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p daddr:%pI6 ", __func__, asoc, dst, &daddr->v6.sin6_addr); if (!asoc) { @@ -318,7 +318,7 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk, &daddr->v6.sin6_addr, inet6_sk(&sk->inet.sk)->srcprefs, &saddr->v6.sin6_addr); - SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: %p6\n", + SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: %pI6\n", &saddr->v6.sin6_addr); return; } @@ -347,10 +347,10 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk, if (baddr) { memcpy(saddr, baddr, sizeof(union sctp_addr)); - SCTP_DEBUG_PRINTK("saddr: %p6\n", &saddr->v6.sin6_addr); + SCTP_DEBUG_PRINTK("saddr: %pI6\n", &saddr->v6.sin6_addr); } else { printk(KERN_ERR "%s: asoc:%p Could not find a valid source " - "address for the dest:%p6\n", + "address for the dest:%pI6\n", __func__, asoc, &daddr->v6.sin6_addr); } @@ -720,7 +720,7 @@ static int sctp_v6_is_ce(const struct sk_buff *skb) /* Dump the v6 addr to the seq file. */ static void sctp_v6_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr) { - seq_printf(seq, "%p6 ", &addr->v6.sin6_addr); + seq_printf(seq, "%pI6 ", &addr->v6.sin6_addr); } static void sctp_v6_ecn_capable(struct sock *sk) diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 9f370964e73..d07b484b873 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1123,7 +1123,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, if (from_addr.sa.sa_family == AF_INET6) { if (net_ratelimit()) printk(KERN_WARNING - "%s association %p could not find address %p6\n", + "%s association %p could not find address %pI6\n", __func__, asoc, &from_addr.v6.sin6_addr); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 26f61fd11ea..8f067497c21 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -278,7 +278,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) case AF_INET6: { struct sockaddr_in6 *sin = (struct sockaddr_in6 *)args->address; - snprintf(servername, sizeof(servername), "%p6", + snprintf(servername, sizeof(servername), "%pI6", &sin->sin6_addr); break; } diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 968ec1f66bc..4c8adadc214 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -305,7 +305,7 @@ static int rpcb_register_netid6(struct sockaddr_in6 *address_to_register, snprintf(buf, sizeof(buf), "::.%u.%u", port >> 8, port & 0xff); else - snprintf(buf, sizeof(buf), "%p6.%u.%u", + snprintf(buf, sizeof(buf), "%pI6.%u.%u", &address_to_register->sin6_addr, port >> 8, port & 0xff); map->r_addr = buf; diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index eb640c1a1bc..16f714a247b 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -168,7 +168,7 @@ static void ip_map_request(struct cache_detail *cd, ntohl(im->m_addr.s6_addr32[3]) >> 8 & 0xff, ntohl(im->m_addr.s6_addr32[3]) >> 0 & 0xff); } else { - snprintf(text_addr, 40, "%p6", &im->m_addr); + snprintf(text_addr, 40, "%pI6", &im->m_addr); } qword_add(bpp, blen, im->m_class); qword_add(bpp, blen, text_addr); @@ -286,7 +286,7 @@ static int ip_map_show(struct seq_file *m, ntohl(addr.s6_addr32[3]) >> 0 & 0xff, dom); } else { - seq_printf(m, "%s %p6 %s\n", im->m_class, &addr, dom); + seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom); } return 0; } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 3c9aff58457..f9ce3c9949d 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -341,7 +341,7 @@ static void xs_format_ipv6_peer_addresses(struct rpc_xprt *xprt, buf = kzalloc(40, GFP_KERNEL); if (buf) { - snprintf(buf, 40, "%p6",&addr->sin6_addr); + snprintf(buf, 40, "%pI6",&addr->sin6_addr); } xprt->address_strings[RPC_DISPLAY_ADDR] = buf; @@ -356,7 +356,7 @@ static void xs_format_ipv6_peer_addresses(struct rpc_xprt *xprt, buf = kzalloc(64, GFP_KERNEL); if (buf) { - snprintf(buf, 64, "addr=%p6 port=%u proto=%s", + snprintf(buf, 64, "addr=%pI6 port=%u proto=%s", &addr->sin6_addr, ntohs(addr->sin6_port), protocol); @@ -378,7 +378,7 @@ static void xs_format_ipv6_peer_addresses(struct rpc_xprt *xprt, buf = kzalloc(50, GFP_KERNEL); if (buf) { - snprintf(buf, 50, "%p6.%u.%u", + snprintf(buf, 50, "%pI6.%u.%u", &addr->sin6_addr, ntohs(addr->sin6_port) >> 8, ntohs(addr->sin6_port) & 0xff); @@ -1407,7 +1407,7 @@ static int xs_bind6(struct sock_xprt *transport, struct socket *sock) if (port > last) nloop++; } while (err == -EADDRINUSE && nloop != 2); - dprintk("RPC: xs_bind6 %p6:%u: %s (%d)\n", + dprintk("RPC: xs_bind6 %pI6:%u: %s (%d)\n", &myaddr.sin6_addr, port, err ? "failed" : "ok", err); return err; } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f052b069f98..80b13eea30e 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2467,11 +2467,11 @@ static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, sel->prefixlen_d); break; case AF_INET6: - audit_log_format(audit_buf, " src=%p6", sel->saddr.a6); + audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6); if (sel->prefixlen_s != 128) audit_log_format(audit_buf, " src_prefixlen=%d", sel->prefixlen_s); - audit_log_format(audit_buf, " dst=%p6", sel->daddr.a6); + audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6); if (sel->prefixlen_d != 128) audit_log_format(audit_buf, " dst_prefixlen=%d", sel->prefixlen_d); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 7944861fb9b..304eca4ac97 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2115,7 +2115,7 @@ static void xfrm_audit_helper_sainfo(struct xfrm_state *x, NIPQUAD(x->id.daddr.a4)); break; case AF_INET6: - audit_log_format(audit_buf, " src=%p6 dst=%p6", + audit_log_format(audit_buf, " src=%pI6 dst=%pI6", x->props.saddr.a6, x->id.daddr.a6); break; } @@ -2140,7 +2140,7 @@ static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family, case AF_INET6: iph6 = ipv6_hdr(skb); audit_log_format(audit_buf, - " src=%p6 dst=%p6 flowlbl=0x%x%02x%02x", + " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x", &iph6->saddr,&iph6->daddr, iph6->flow_lbl[0] & 0x0f, iph6->flow_lbl[1], diff --git a/security/selinux/avc.c b/security/selinux/avc.c index c91008f438a..ed6af12cdf4 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -495,7 +495,7 @@ static inline void avc_print_ipv6_addr(struct audit_buffer *ab, char *name1, char *name2) { if (!ipv6_addr_any(addr)) - audit_log_format(ab, " %s=%p6", name1, addr); + audit_log_format(ab, " %s=%pI6", name1, addr); if (port) audit_log_format(ab, " %s=%d", name2, ntohs(port)); } -- cgit v1.2.3-70-g09d2 From 07c8f67587724b417f60bffb32c448dd94647b54 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 16:11:59 +1100 Subject: [XFS] Make use of the init-once slab optimisation. To avoid having to initialise some fields of the XFS inode on every allocation, we can use the slab init-once feature to initialise them. All we have to guarantee is that when we free the inode, all it's entries are in the initial state. Add asserts where possible to ensure debug kernels check this initial state before freeing and after allocation. SGI-PV: 981498 SGI-Modid: xfs-linux-melb:xfs-kern:31925a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_super.c | 37 ++++++++++++++- fs/xfs/xfs_iget.c | 15 ------ fs/xfs/xfs_inode.c | 111 +++++++++++++++++++++++++++++-------------- fs/xfs/xfs_inode.h | 1 + fs/xfs/xfs_itable.c | 14 +++--- 5 files changed, 119 insertions(+), 59 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 37ebe36056e..d1c4dec51a3 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -887,6 +887,41 @@ xfs_fs_inode_init_once( inode_init_once((struct inode *)vnode); } + +/* + * Slab object creation initialisation for the XFS inode. + * This covers only the idempotent fields in the XFS inode; + * all other fields need to be initialised on allocation + * from the slab. This avoids the need to repeatedly intialise + * fields in the xfs inode that left in the initialise state + * when freeing the inode. + */ +void +xfs_inode_init_once( + kmem_zone_t *zone, + void *inode) +{ + struct xfs_inode *ip = inode; + + memset(ip, 0, sizeof(struct xfs_inode)); + atomic_set(&ip->i_iocount, 0); + atomic_set(&ip->i_pincount, 0); + spin_lock_init(&ip->i_flags_lock); + INIT_LIST_HEAD(&ip->i_reclaim); + init_waitqueue_head(&ip->i_ipin_wait); + /* + * Because we want to use a counting completion, complete + * the flush completion once to allow a single access to + * the flush completion without blocking. + */ + init_completion(&ip->i_flush); + complete(&ip->i_flush); + + mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, + "xfsino", ip->i_ino); + mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); +} + /* * Attempt to flush the inode, this will actually fail * if the inode is pinned, but we dirty the inode again @@ -2018,7 +2053,7 @@ xfs_init_zones(void) xfs_inode_zone = kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | - KM_ZONE_SPREAD, NULL); + KM_ZONE_SPREAD, xfs_inode_init_once); if (!xfs_inode_zone) goto out_destroy_efi_zone; diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index e229e9e001c..5be89d760a9 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -210,21 +210,6 @@ finish_inode: xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); - - mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, - "xfsino", ip->i_ino); - mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); - init_waitqueue_head(&ip->i_ipin_wait); - atomic_set(&ip->i_pincount, 0); - - /* - * Because we want to use a counting completion, complete - * the flush completion once to allow a single access to - * the flush completion without blocking. - */ - init_completion(&ip->i_flush); - complete(&ip->i_flush); - if (lock_flags) xfs_ilock(ip, lock_flags); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index dbd9cef852e..b0c604e1bd4 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -787,6 +787,70 @@ xfs_dic2xflags( (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0); } +/* + * Allocate and initialise an xfs_inode. + */ +struct xfs_inode * +xfs_inode_alloc( + struct xfs_mount *mp, + xfs_ino_t ino) +{ + struct xfs_inode *ip; + + /* + * if this didn't occur in transactions, we could use + * KM_MAYFAIL and return NULL here on ENOMEM. Set the + * code up to do this anyway. + */ + ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); + if (!ip) + return NULL; + + ASSERT(atomic_read(&ip->i_iocount) == 0); + ASSERT(atomic_read(&ip->i_pincount) == 0); + ASSERT(!spin_is_locked(&ip->i_flags_lock)); + ASSERT(list_empty(&ip->i_reclaim)); + + ip->i_ino = ino; + ip->i_mount = mp; + ip->i_blkno = 0; + ip->i_len = 0; + ip->i_boffset =0; + ip->i_afp = NULL; + memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); + ip->i_flags = 0; + ip->i_update_core = 0; + ip->i_update_size = 0; + ip->i_delayed_blks = 0; + memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); + ip->i_size = 0; + ip->i_new_size = 0; + + /* + * Initialize inode's trace buffers. + */ +#ifdef XFS_INODE_TRACE + ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS); +#endif +#ifdef XFS_BMAP_TRACE + ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS); +#endif +#ifdef XFS_BMBT_TRACE + ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS); +#endif +#ifdef XFS_RW_TRACE + ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS); +#endif +#ifdef XFS_ILOCK_TRACE + ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS); +#endif +#ifdef XFS_DIR2_TRACE + ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); +#endif + + return ip; +} + /* * Given a mount structure and an inode number, return a pointer * to a newly allocated in-core inode corresponding to the given @@ -809,13 +873,9 @@ xfs_iread( xfs_inode_t *ip; int error; - ASSERT(xfs_inode_zone != NULL); - - ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP); - ip->i_ino = ino; - ip->i_mount = mp; - atomic_set(&ip->i_iocount, 0); - spin_lock_init(&ip->i_flags_lock); + ip = xfs_inode_alloc(mp, ino); + if (!ip) + return ENOMEM; /* * Get pointer's to the on-disk inode and the buffer containing it. @@ -830,35 +890,12 @@ xfs_iread( return error; } - /* - * Initialize inode's trace buffers. - * Do this before xfs_iformat in case it adds entries. - */ -#ifdef XFS_INODE_TRACE - ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_BMAP_TRACE - ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_BMBT_TRACE - ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_RW_TRACE - ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_ILOCK_TRACE - ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_DIR2_TRACE - ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); -#endif - /* * If we got something that isn't an inode it means someone * (nfs or dmi) has a stale handle. */ if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC) { - kmem_zone_free(xfs_inode_zone, ip); + xfs_idestroy(ip); xfs_trans_brelse(tp, bp); #ifdef DEBUG xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " @@ -881,7 +918,7 @@ xfs_iread( xfs_dinode_from_disk(&ip->i_d, &dip->di_core); error = xfs_iformat(ip, dip); if (error) { - kmem_zone_free(xfs_inode_zone, ip); + xfs_idestroy(ip); xfs_trans_brelse(tp, bp); #ifdef DEBUG xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " @@ -911,8 +948,6 @@ xfs_iread( XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); } - INIT_LIST_HEAD(&ip->i_reclaim); - /* * The inode format changed when we moved the link count and * made it 32 bits long. If this is an old format inode, @@ -2631,8 +2666,6 @@ xfs_idestroy( } if (ip->i_afp) xfs_idestroy_fork(ip, XFS_ATTR_FORK); - mrfree(&ip->i_lock); - mrfree(&ip->i_iolock); #ifdef XFS_INODE_TRACE ktrace_free(ip->i_trace); @@ -2671,7 +2704,13 @@ xfs_idestroy( spin_unlock(&mp->m_ail_lock); } xfs_inode_item_destroy(ip); + ip->i_itemp = NULL; } + /* asserts to verify all state is correct here */ + ASSERT(atomic_read(&ip->i_iocount) == 0); + ASSERT(atomic_read(&ip->i_pincount) == 0); + ASSERT(!spin_is_locked(&ip->i_flags_lock)); + ASSERT(list_empty(&ip->i_reclaim)); kmem_zone_free(xfs_inode_zone, ip); } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 1420c49674d..3af1f6dd149 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -513,6 +513,7 @@ int xfs_itruncate_finish(struct xfs_trans **, xfs_inode_t *, xfs_fsize_t, int, int); int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); +struct xfs_inode * xfs_inode_alloc(struct xfs_mount *, xfs_ino_t); void xfs_idestroy_fork(xfs_inode_t *, int); void xfs_idestroy(xfs_inode_t *); void xfs_idata_realloc(xfs_inode_t *, int, int); diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index cf6754a3c5b..4f4c9394106 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -594,21 +594,21 @@ xfs_bulkstat( /* * Get the inode cluster buffer */ - ASSERT(xfs_inode_zone != NULL); - ip = kmem_zone_zalloc(xfs_inode_zone, - KM_SLEEP); - ip->i_ino = ino; - ip->i_mount = mp; - spin_lock_init(&ip->i_flags_lock); if (bp) xfs_buf_relse(bp); + ip = xfs_inode_alloc(mp, ino); + if (!ip) { + bp = NULL; + rval = ENOMEM; + break; + } error = xfs_itobp(mp, NULL, ip, &dip, &bp, bno, XFS_IMAP_BULKSTAT, XFS_BUF_LOCK); if (!error) clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; - kmem_zone_free(xfs_inode_zone, ip); + xfs_idestroy(ip); if (XFS_TEST_ERROR(error != 0, mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK, XFS_RANDOM_BULKSTAT_READ_CHUNK)) { -- cgit v1.2.3-70-g09d2 From be8b78a626dd9bc92c12e9ac34f3bc3db1204d25 Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Thu, 30 Oct 2008 16:42:34 +1100 Subject: [XFS] Remove kmem_zone_t argument from xfs_inode_init_once() kmem cache constructor no longer takes a kmem_zone_t argument. SGI-PV: 957103 SGI-Modid: xfs-linux-melb:xfs-kern:32254a Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_super.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index d1c4dec51a3..9bfb26066a8 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -898,7 +898,6 @@ xfs_fs_inode_init_once( */ void xfs_inode_init_once( - kmem_zone_t *zone, void *inode) { struct xfs_inode *ip = inode; -- cgit v1.2.3-70-g09d2 From d07c60e54fb7647d8247ae392f128e8ee8f3e5f3 Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Thu, 30 Oct 2008 16:50:35 +1100 Subject: [XFS] Use xfs_idestroy() to cleanup an inode. SGI-PV: 981498 SGI-Modid: xfs-linux-melb:xfs-kern:31927a Signed-off-by: Lachlan McIlroy Signed-off-by: David Chinner --- fs/xfs/xfs_inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index b0c604e1bd4..2a158a26286 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -886,7 +886,7 @@ xfs_iread( */ error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags, XFS_BUF_LOCK); if (error) { - kmem_zone_free(xfs_inode_zone, ip); + xfs_idestroy(ip); return error; } -- cgit v1.2.3-70-g09d2 From 46039928c9abe466ed1bc0da20c2e596b1d41236 Mon Sep 17 00:00:00 2001 From: Barry Naujok Date: Thu, 30 Oct 2008 16:52:35 +1100 Subject: [XFS] Remove final remnants of dirv1 macros and other stuff SGI-PV: 981498 SGI-Modid: xfs-linux-melb:xfs-kern:32002a Signed-off-by: Barry Naujok Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_da_btree.h | 20 -------------------- fs/xfs/xfs_mount.h | 1 - 2 files changed, 21 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h index 8be0b00ede9..599e270e695 100644 --- a/fs/xfs/xfs_da_btree.h +++ b/fs/xfs/xfs_da_btree.h @@ -72,27 +72,7 @@ typedef struct xfs_da_intnode { typedef struct xfs_da_node_hdr xfs_da_node_hdr_t; typedef struct xfs_da_node_entry xfs_da_node_entry_t; -#define XFS_DA_MAXHASH ((xfs_dahash_t)-1) /* largest valid hash value */ - #define XFS_LBSIZE(mp) (mp)->m_sb.sb_blocksize -#define XFS_LBLOG(mp) (mp)->m_sb.sb_blocklog - -#define XFS_DA_MAKE_BNOENTRY(mp,bno,entry) \ - (((bno) << (mp)->m_dircook_elog) | (entry)) -#define XFS_DA_MAKE_COOKIE(mp,bno,entry,hash) \ - (((xfs_off_t)XFS_DA_MAKE_BNOENTRY(mp, bno, entry) << 32) | (hash)) -#define XFS_DA_COOKIE_HASH(mp,cookie) ((xfs_dahash_t)cookie) -#define XFS_DA_COOKIE_BNO(mp,cookie) \ - ((((xfs_off_t)(cookie) >> 31) == -1LL ? \ - (xfs_dablk_t)0 : \ - (xfs_dablk_t)((xfs_off_t)(cookie) >> \ - ((mp)->m_dircook_elog + 32)))) -#define XFS_DA_COOKIE_ENTRY(mp,cookie) \ - ((((xfs_off_t)(cookie) >> 31) == -1LL ? \ - (xfs_dablk_t)0 : \ - (xfs_dablk_t)(((xfs_off_t)(cookie) >> 32) & \ - ((1 << (mp)->m_dircook_elog) - 1)))) - /*======================================================================== * Btree searching and modification structure definitions. diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index f3c1024b124..49d647e730e 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -267,7 +267,6 @@ typedef struct xfs_mount { xfs_buftarg_t *m_ddev_targp; /* saves taking the address */ xfs_buftarg_t *m_logdev_targp;/* ptr to log device */ xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */ - __uint8_t m_dircook_elog; /* log d-cookie entry bits */ __uint8_t m_blkbit_log; /* blocklog + NBBY */ __uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ __uint8_t m_agno_log; /* log #ag's */ -- cgit v1.2.3-70-g09d2 From a357a1215602f79182abdde27aaddc7166dbd709 Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Thu, 30 Oct 2008 16:53:25 +1100 Subject: [XFS] Fix use-after-free with log and quotas Destroying the quota stuff on unmount can access the log - ie XFS_QM_DONE() ends up in xfs_dqunlock() which calls xfs_trans_unlocked_item() and then xfs_log_move_tail(). By this time the log has already been destroyed. Just move the cleanup of the quota code earlier in xfs_unmountfs() before the call to xfs_log_unmount(). Moving XFS_QM_DONE() up near XFS_QM_DQPURGEALL() seems like a good spot. SGI-PV: 987086 SGI-Modid: xfs-linux-melb:xfs-kern:32148a Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig Signed-off-by: Peter Leckie --- fs/xfs/xfs_mount.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index a4503f5e949..15f5dd22fbb 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1245,6 +1245,9 @@ xfs_unmountfs( XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING); + if (mp->m_quotainfo) + XFS_QM_DONE(mp); + /* * Flush out the log synchronously so that we know for sure * that nothing is pinned. This is important because bflush() @@ -1297,8 +1300,6 @@ xfs_unmountfs( xfs_errortag_clearall(mp, 0); #endif xfs_free_perag(mp); - if (mp->m_quotainfo) - XFS_QM_DONE(mp); } STATIC void -- cgit v1.2.3-70-g09d2 From f338f9036400e453ab553b16639a9cc838b02d44 Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Thu, 30 Oct 2008 16:53:38 +1100 Subject: [XFS] Unlock inode before calling xfs_idestroy() Lock debugging reported the ilock was being destroyed without being unlocked. We don't need to lock the inode until we are going to insert it into the radix tree. SGI-PV: 987246 SGI-Modid: xfs-linux-melb:xfs-kern:32159a Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/xfs_iget.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 5be89d760a9..4c92d190b3b 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -210,9 +210,6 @@ finish_inode: xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); - if (lock_flags) - xfs_ilock(ip, lock_flags); - if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { xfs_idestroy(ip); xfs_put_perag(mp, pag); @@ -228,6 +225,10 @@ finish_inode: delay(1); goto again; } + + if (lock_flags) + xfs_ilock(ip, lock_flags); + mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); first_index = agino & mask; write_lock(&pag->pag_ici_lock); @@ -239,6 +240,8 @@ finish_inode: BUG_ON(error != -EEXIST); write_unlock(&pag->pag_ici_lock); radix_tree_preload_end(); + if (lock_flags) + xfs_iunlock(ip, lock_flags); xfs_idestroy(ip); XFS_STATS_INC(xs_ig_dup); goto again; -- cgit v1.2.3-70-g09d2 From f2277f06e626d694e61bb356524ff536ced24acf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:53:47 +1100 Subject: [XFS] kill struct xfs_btree_hdr This type is only embedded in struct xfs_btree_block and never used directly. By moving the fields directly into struct xfs_btree_block a lot of the macros for struct xfs_btree_sblock and struct xfs_btree_lblock can be used for struct xfs_btree_block too now which helps greatly with some of the migrations during implementing the generic btree code. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32174a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_btree.c | 12 ++++++------ fs/xfs/xfs_btree.h | 7 +------ 2 files changed, 7 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index cc593a84c34..31002093bfb 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -62,13 +62,13 @@ xfs_btree_maxrecs( case XFS_BTNUM_BNO: case XFS_BTNUM_CNT: return (int)XFS_ALLOC_BLOCK_MAXRECS( - be16_to_cpu(block->bb_h.bb_level), cur); + be16_to_cpu(block->bb_level), cur); case XFS_BTNUM_BMAP: return (int)XFS_BMAP_BLOCK_IMAXRECS( - be16_to_cpu(block->bb_h.bb_level), cur); + be16_to_cpu(block->bb_level), cur); case XFS_BTNUM_INO: return (int)XFS_INOBT_BLOCK_MAXRECS( - be16_to_cpu(block->bb_h.bb_level), cur); + be16_to_cpu(block->bb_level), cur); default: ASSERT(0); return 0; @@ -634,7 +634,7 @@ xfs_btree_firstrec( /* * It's empty, there is no such record. */ - if (!block->bb_h.bb_numrecs) + if (!block->bb_numrecs) return 0; /* * Set the ptr value to 1, that's the first record/key. @@ -663,12 +663,12 @@ xfs_btree_lastrec( /* * It's empty, there is no such record. */ - if (!block->bb_h.bb_numrecs) + if (!block->bb_numrecs) return 0; /* * Set the ptr value to numrecs, that's the last record/key. */ - cur->bc_ptrs[level] = be16_to_cpu(block->bb_h.bb_numrecs); + cur->bc_ptrs[level] = be16_to_cpu(block->bb_numrecs); return 1; } diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 1f528a2a375..332b9f1da20 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -63,15 +63,10 @@ typedef struct xfs_btree_lblock { /* * Combined header and structure, used by common code. */ -typedef struct xfs_btree_hdr -{ +typedef struct xfs_btree_block { __be32 bb_magic; /* magic number for block type */ __be16 bb_level; /* 0 is a leaf */ __be16 bb_numrecs; /* current # of data records */ -} xfs_btree_hdr_t; - -typedef struct xfs_btree_block { - xfs_btree_hdr_t bb_h; /* header */ union { struct { __be32 bb_leftsib; -- cgit v1.2.3-70-g09d2 From 561f7d17390d00444e6cd0b02b7516c91528082e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:53:59 +1100 Subject: [XFS] split up xfs_btree_init_cursor xfs_btree_init_cursor contains close to little shared code for the different btrees and will get even more non-common code in the future. Split it up into one routine per btree type. Because xfs_btree_dup_cursor needs to call the init routine for a generic btree cursor add a new btree operation vector that contains a dup_cursor method that initializes a new cursor based on an existing one. The btree operations vector is based on an idea and code from Dave Chinner and will grow more entries later during this series. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32176a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc.c | 34 +++++++-------- fs/xfs/xfs_alloc_btree.c | 45 +++++++++++++++++++ fs/xfs/xfs_alloc_btree.h | 5 +++ fs/xfs/xfs_bmap.c | 17 +++----- fs/xfs/xfs_bmap_btree.c | 59 +++++++++++++++++++++++++ fs/xfs/xfs_bmap_btree.h | 4 ++ fs/xfs/xfs_btree.c | 107 ++-------------------------------------------- fs/xfs/xfs_btree.h | 20 +++------ fs/xfs/xfs_ialloc.c | 12 ++---- fs/xfs/xfs_ialloc_btree.c | 41 ++++++++++++++++++ fs/xfs/xfs_ialloc_btree.h | 4 ++ fs/xfs/xfs_itable.c | 6 +-- 12 files changed, 196 insertions(+), 158 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 1956f83489f..69833eb1de4 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -640,8 +640,8 @@ xfs_alloc_ag_vextent_exact( /* * Allocate/initialize a cursor for the by-number freespace btree. */ - bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, - args->agno, XFS_BTNUM_BNO, NULL, 0); + bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_BNO); /* * Lookup bno and minlen in the btree (minlen is irrelevant, really). * Look for the closest free block <= bno, it must contain bno @@ -696,8 +696,8 @@ xfs_alloc_ag_vextent_exact( * We are allocating agbno for rlen [agbno .. end] * Allocate/initialize a cursor for the by-size btree. */ - cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, - args->agno, XFS_BTNUM_CNT, NULL, 0); + cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_CNT); ASSERT(args->agbno + args->len <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, @@ -759,8 +759,8 @@ xfs_alloc_ag_vextent_near( /* * Get a cursor for the by-size btree. */ - cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, - args->agno, XFS_BTNUM_CNT, NULL, 0); + cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_CNT); ltlen = 0; bno_cur_lt = bno_cur_gt = NULL; /* @@ -886,8 +886,8 @@ xfs_alloc_ag_vextent_near( /* * Set up a cursor for the by-bno tree. */ - bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp, - args->agbp, args->agno, XFS_BTNUM_BNO, NULL, 0); + bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, + args->agbp, args->agno, XFS_BTNUM_BNO); /* * Fix up the btree entries. */ @@ -914,8 +914,8 @@ xfs_alloc_ag_vextent_near( /* * Allocate and initialize the cursor for the leftward search. */ - bno_cur_lt = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, - args->agno, XFS_BTNUM_BNO, NULL, 0); + bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_BNO); /* * Lookup <= bno to find the leftward search's starting point. */ @@ -1267,8 +1267,8 @@ xfs_alloc_ag_vextent_size( /* * Allocate and initialize a cursor for the by-size btree. */ - cnt_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, - args->agno, XFS_BTNUM_CNT, NULL, 0); + cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_CNT); bno_cur = NULL; /* * Look for an entry >= maxlen+alignment-1 blocks. @@ -1372,8 +1372,8 @@ xfs_alloc_ag_vextent_size( /* * Allocate and initialize a cursor for the by-block tree. */ - bno_cur = xfs_btree_init_cursor(args->mp, args->tp, args->agbp, - args->agno, XFS_BTNUM_BNO, NULL, 0); + bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, + args->agno, XFS_BTNUM_BNO); if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, rbno, rlen, XFSA_FIXUP_CNT_OK))) goto error0; @@ -1515,8 +1515,7 @@ xfs_free_ag_extent( /* * Allocate and initialize a cursor for the by-block btree. */ - bno_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO, NULL, - 0); + bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO); cnt_cur = NULL; /* * Look for a neighboring block on the left (lower block numbers) @@ -1575,8 +1574,7 @@ xfs_free_ag_extent( /* * Now allocate and initialize a cursor for the by-size tree. */ - cnt_cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT, NULL, - 0); + cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT); /* * Have both left and right contiguous neighbors. * Merge all three into a single free block. diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 3ce2645508a..60c121f1e81 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -2209,3 +2209,48 @@ xfs_alloc_update( } return 0; } + +STATIC struct xfs_btree_cur * +xfs_allocbt_dup_cursor( + struct xfs_btree_cur *cur) +{ + return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp, + cur->bc_private.a.agbp, cur->bc_private.a.agno, + cur->bc_btnum); +} + +static const struct xfs_btree_ops xfs_allocbt_ops = { + .dup_cursor = xfs_allocbt_dup_cursor, +}; + +/* + * Allocate a new allocation btree cursor. + */ +struct xfs_btree_cur * /* new alloc btree cursor */ +xfs_allocbt_init_cursor( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_buf *agbp, /* buffer for agf structure */ + xfs_agnumber_t agno, /* allocation group number */ + xfs_btnum_t btnum) /* btree identifier */ +{ + struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); + struct xfs_btree_cur *cur; + + ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT); + + cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); + + cur->bc_tp = tp; + cur->bc_mp = mp; + cur->bc_nlevels = be32_to_cpu(agf->agf_levels[btnum]); + cur->bc_btnum = btnum; + cur->bc_blocklog = mp->m_sb.sb_blocklog; + + cur->bc_ops = &xfs_allocbt_ops; + + cur->bc_private.a.agbp = agbp; + cur->bc_private.a.agno = agno; + + return cur; +} diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h index 5bd1a2c8bd0..60735384a4c 100644 --- a/fs/xfs/xfs_alloc_btree.h +++ b/fs/xfs/xfs_alloc_btree.h @@ -152,4 +152,9 @@ extern int xfs_alloc_lookup_le(struct xfs_btree_cur *cur, xfs_agblock_t bno, extern int xfs_alloc_update(struct xfs_btree_cur *cur, xfs_agblock_t bno, xfs_extlen_t len); + +extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *, + struct xfs_trans *, struct xfs_buf *, + xfs_agnumber_t, xfs_btnum_t); + #endif /* __XFS_ALLOC_BTREE_H__ */ diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index a1aab9275d5..a84d0c30b48 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -422,8 +422,7 @@ xfs_bmap_add_attrfork_btree( if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip)) *flags |= XFS_ILOG_DBROOT; else { - cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip, - XFS_DATA_FORK); + cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK); cur->bc_private.b.flist = flist; cur->bc_private.b.firstblock = *firstblock; if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat))) @@ -3441,8 +3440,7 @@ xfs_bmap_extents_to_btree( * Need a cursor. Can't allocate until bb_level is filled in. */ mp = ip->i_mount; - cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip, - whichfork); + cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); cur->bc_private.b.firstblock = *firstblock; cur->bc_private.b.flist = flist; cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0; @@ -5029,8 +5027,7 @@ xfs_bmapi( if (abno == NULLFSBLOCK) break; if ((ifp->if_flags & XFS_IFBROOT) && !cur) { - cur = xfs_btree_init_cursor(mp, - tp, NULL, 0, XFS_BTNUM_BMAP, + cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); cur->bc_private.b.firstblock = *firstblock; @@ -5147,9 +5144,8 @@ xfs_bmapi( */ ASSERT(mval->br_blockcount <= len); if ((ifp->if_flags & XFS_IFBROOT) && !cur) { - cur = xfs_btree_init_cursor(mp, - tp, NULL, 0, XFS_BTNUM_BMAP, - ip, whichfork); + cur = xfs_bmbt_init_cursor(mp, + tp, ip, whichfork); cur->bc_private.b.firstblock = *firstblock; cur->bc_private.b.flist = flist; @@ -5440,8 +5436,7 @@ xfs_bunmapi( logflags = 0; if (ifp->if_flags & XFS_IFBROOT) { ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); - cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip, - whichfork); + cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); cur->bc_private.b.firstblock = *firstblock; cur->bc_private.b.flist = flist; cur->bc_private.b.flags = 0; diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 23efad29a5c..cfbdd00045c 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -2608,3 +2608,62 @@ xfs_check_nostate_extents( } return 0; } + + +STATIC struct xfs_btree_cur * +xfs_bmbt_dup_cursor( + struct xfs_btree_cur *cur) +{ + struct xfs_btree_cur *new; + + new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp, + cur->bc_private.b.ip, cur->bc_private.b.whichfork); + + /* + * Copy the firstblock, flist, and flags values, + * since init cursor doesn't get them. + */ + new->bc_private.b.firstblock = cur->bc_private.b.firstblock; + new->bc_private.b.flist = cur->bc_private.b.flist; + new->bc_private.b.flags = cur->bc_private.b.flags; + + return new; +} + +static const struct xfs_btree_ops xfs_bmbt_ops = { + .dup_cursor = xfs_bmbt_dup_cursor, +}; + +/* + * Allocate a new bmap btree cursor. + */ +struct xfs_btree_cur * /* new bmap btree cursor */ +xfs_bmbt_init_cursor( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_inode *ip, /* inode owning the btree */ + int whichfork) /* data or attr fork */ +{ + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); + struct xfs_btree_cur *cur; + + cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); + + cur->bc_tp = tp; + cur->bc_mp = mp; + cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1; + cur->bc_btnum = XFS_BTNUM_BMAP; + cur->bc_blocklog = mp->m_sb.sb_blocklog; + + cur->bc_ops = &xfs_bmbt_ops; + + cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork); + cur->bc_private.b.ip = ip; + cur->bc_private.b.firstblock = NULLFSBLOCK; + cur->bc_private.b.flist = NULL; + cur->bc_private.b.allocated = 0; + cur->bc_private.b.flags = 0; + cur->bc_private.b.whichfork = whichfork; + + return cur; +} diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index cd0d4b4bb81..4f12fff5497 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -24,6 +24,7 @@ struct xfs_btree_cur; struct xfs_btree_lblock; struct xfs_mount; struct xfs_inode; +struct xfs_trans; /* * Bmap root header, on-disk form only. @@ -300,6 +301,9 @@ extern void xfs_bmbt_to_bmdr(xfs_bmbt_block_t *, int, xfs_bmdr_block_t *, int); extern int xfs_bmbt_update(struct xfs_btree_cur *, xfs_fileoff_t, xfs_fsblock_t, xfs_filblks_t, xfs_exntst_t); +extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *, + struct xfs_trans *, struct xfs_inode *, int); + #endif /* __KERNEL__ */ #endif /* __XFS_BMAP_BTREE_H__ */ diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 31002093bfb..074f7f6aa27 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -387,16 +387,17 @@ xfs_btree_dup_cursor( tp = cur->bc_tp; mp = cur->bc_mp; + /* * Allocate a new cursor like the old one. */ - new = xfs_btree_init_cursor(mp, tp, cur->bc_private.a.agbp, - cur->bc_private.a.agno, cur->bc_btnum, cur->bc_private.b.ip, - cur->bc_private.b.whichfork); + new = cur->bc_ops->dup_cursor(cur); + /* * Copy the record currently in the cursor. */ new->bc_rec = cur->bc_rec; + /* * For each level current, re-get the buffer and copy the ptr value. */ @@ -416,15 +417,6 @@ xfs_btree_dup_cursor( } else new->bc_bufs[i] = NULL; } - /* - * For bmap btrees, copy the firstblock, flist, and flags values, - * since init cursor doesn't get them. - */ - if (new->bc_btnum == XFS_BTNUM_BMAP) { - new->bc_private.b.firstblock = cur->bc_private.b.firstblock; - new->bc_private.b.flist = cur->bc_private.b.flist; - new->bc_private.b.flags = cur->bc_private.b.flags; - } *ncur = new; return 0; } @@ -504,97 +496,6 @@ xfs_btree_get_bufs( return bp; } -/* - * Allocate a new btree cursor. - * The cursor is either for allocation (A) or bmap (B) or inodes (I). - */ -xfs_btree_cur_t * /* new btree cursor */ -xfs_btree_init_cursor( - xfs_mount_t *mp, /* file system mount point */ - xfs_trans_t *tp, /* transaction pointer */ - xfs_buf_t *agbp, /* (A only) buffer for agf structure */ - /* (I only) buffer for agi structure */ - xfs_agnumber_t agno, /* (AI only) allocation group number */ - xfs_btnum_t btnum, /* btree identifier */ - xfs_inode_t *ip, /* (B only) inode owning the btree */ - int whichfork) /* (B only) data or attr fork */ -{ - xfs_agf_t *agf; /* (A) allocation group freespace */ - xfs_agi_t *agi; /* (I) allocation group inodespace */ - xfs_btree_cur_t *cur; /* return value */ - xfs_ifork_t *ifp; /* (I) inode fork pointer */ - int nlevels=0; /* number of levels in the btree */ - - ASSERT(xfs_btree_cur_zone != NULL); - /* - * Allocate a new cursor. - */ - cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); - /* - * Deduce the number of btree levels from the arguments. - */ - switch (btnum) { - case XFS_BTNUM_BNO: - case XFS_BTNUM_CNT: - agf = XFS_BUF_TO_AGF(agbp); - nlevels = be32_to_cpu(agf->agf_levels[btnum]); - break; - case XFS_BTNUM_BMAP: - ifp = XFS_IFORK_PTR(ip, whichfork); - nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1; - break; - case XFS_BTNUM_INO: - agi = XFS_BUF_TO_AGI(agbp); - nlevels = be32_to_cpu(agi->agi_level); - break; - default: - ASSERT(0); - } - /* - * Fill in the common fields. - */ - cur->bc_tp = tp; - cur->bc_mp = mp; - cur->bc_nlevels = nlevels; - cur->bc_btnum = btnum; - cur->bc_blocklog = mp->m_sb.sb_blocklog; - /* - * Fill in private fields. - */ - switch (btnum) { - case XFS_BTNUM_BNO: - case XFS_BTNUM_CNT: - /* - * Allocation btree fields. - */ - cur->bc_private.a.agbp = agbp; - cur->bc_private.a.agno = agno; - break; - case XFS_BTNUM_INO: - /* - * Inode allocation btree fields. - */ - cur->bc_private.a.agbp = agbp; - cur->bc_private.a.agno = agno; - break; - case XFS_BTNUM_BMAP: - /* - * Bmap btree fields. - */ - cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork); - cur->bc_private.b.ip = ip; - cur->bc_private.b.firstblock = NULLFSBLOCK; - cur->bc_private.b.flist = NULL; - cur->bc_private.b.allocated = 0; - cur->bc_private.b.flags = 0; - cur->bc_private.b.whichfork = whichfork; - break; - default: - ASSERT(0); - } - return cur; -} - /* * Check for the cursor referring to the last block at the given level. */ diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 332b9f1da20..d30ee749860 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -131,6 +131,11 @@ extern const __uint32_t xfs_magics[]; #define XFS_BTREE_MAXLEVELS 8 /* max of all btrees */ +struct xfs_btree_ops { + /* cursor operations */ + struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *); +}; + /* * Btree cursor structure. * This collects all information needed by the btree code in one place. @@ -139,6 +144,7 @@ typedef struct xfs_btree_cur { struct xfs_trans *bc_tp; /* transaction we're in, if any */ struct xfs_mount *bc_mp; /* file system mount struct */ + const struct xfs_btree_ops *bc_ops; union { xfs_alloc_rec_incore_t a; xfs_bmbt_irec_t b; @@ -307,20 +313,6 @@ xfs_btree_get_bufs( xfs_agblock_t agbno, /* allocation group block number */ uint lock); /* lock flags for get_buf */ -/* - * Allocate a new btree cursor. - * The cursor is either for allocation (A) or bmap (B). - */ -xfs_btree_cur_t * /* new btree cursor */ -xfs_btree_init_cursor( - struct xfs_mount *mp, /* file system mount point */ - struct xfs_trans *tp, /* transaction pointer */ - struct xfs_buf *agbp, /* (A only) buffer for agf structure */ - xfs_agnumber_t agno, /* (A only) allocation group number */ - xfs_btnum_t btnum, /* btree identifier */ - struct xfs_inode *ip, /* (B only) inode owning the btree */ - int whichfork); /* (B only) data/attr fork */ - /* * Check for the cursor referring to the last block at the given level. */ diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index aad8c5da38a..11bb169561c 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -335,8 +335,7 @@ xfs_ialloc_ag_alloc( /* * Insert records describing the new inode chunk into the btree. */ - cur = xfs_btree_init_cursor(args.mp, tp, agbp, agno, - XFS_BTNUM_INO, (xfs_inode_t *)0, 0); + cur = xfs_inobt_init_cursor(args.mp, tp, agbp, agno); for (thisino = newino; thisino < newino + newlen; thisino += XFS_INODES_PER_CHUNK) { @@ -676,8 +675,7 @@ nextag: */ agno = tagno; *IO_agbp = NULL; - cur = xfs_btree_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno), - XFS_BTNUM_INO, (xfs_inode_t *)0, 0); + cur = xfs_inobt_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno)); /* * If pagino is 0 (this is the root inode allocation) use newino. * This must work because we've just allocated some. @@ -1022,8 +1020,7 @@ xfs_difree( /* * Initialize the cursor. */ - cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO, - (xfs_inode_t *)0, 0); + cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); #ifdef DEBUG if (cur->bc_nlevels == 1) { int freecount = 0; @@ -1259,8 +1256,7 @@ xfs_dilocate( #endif /* DEBUG */ return error; } - cur = xfs_btree_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO, - (xfs_inode_t *)0, 0); + cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) { #ifdef DEBUG xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: " diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 83502f3edef..8c0c4748a8d 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -2076,3 +2076,44 @@ xfs_inobt_update( } return 0; } + +STATIC struct xfs_btree_cur * +xfs_inobt_dup_cursor( + struct xfs_btree_cur *cur) +{ + return xfs_inobt_init_cursor(cur->bc_mp, cur->bc_tp, + cur->bc_private.a.agbp, cur->bc_private.a.agno); +} + +static const struct xfs_btree_ops xfs_inobt_ops = { + .dup_cursor = xfs_inobt_dup_cursor, +}; + +/* + * Allocate a new inode btree cursor. + */ +struct xfs_btree_cur * /* new inode btree cursor */ +xfs_inobt_init_cursor( + struct xfs_mount *mp, /* file system mount point */ + struct xfs_trans *tp, /* transaction pointer */ + struct xfs_buf *agbp, /* buffer for agi structure */ + xfs_agnumber_t agno) /* allocation group number */ +{ + struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); + struct xfs_btree_cur *cur; + + cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP); + + cur->bc_tp = tp; + cur->bc_mp = mp; + cur->bc_nlevels = be32_to_cpu(agi->agi_level); + cur->bc_btnum = XFS_BTNUM_INO; + cur->bc_blocklog = mp->m_sb.sb_blocklog; + + cur->bc_ops = &xfs_inobt_ops; + + cur->bc_private.a.agbp = agbp; + cur->bc_private.a.agno = agno; + + return cur; +} diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h index 8efc4a5b8b9..eea409349eb 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/xfs_ialloc_btree.h @@ -175,4 +175,8 @@ extern int xfs_inobt_lookup_le(struct xfs_btree_cur *cur, xfs_agino_t ino, extern int xfs_inobt_update(struct xfs_btree_cur *cur, xfs_agino_t ino, __int32_t fcnt, xfs_inofree_t free); + +extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *, + struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t); + #endif /* __XFS_IALLOC_BTREE_H__ */ diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 4f4c9394106..a5f02f0e4c2 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -416,8 +416,7 @@ xfs_bulkstat( /* * Allocate and initialize a btree cursor for ialloc btree. */ - cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_INO, - (xfs_inode_t *)0, 0); + cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno); irbp = irbuf; irbufend = irbuf + nirbuf; end_of_ag = 0; @@ -842,8 +841,7 @@ xfs_inumbers( agino = 0; continue; } - cur = xfs_btree_init_cursor(mp, NULL, agbp, agno, - XFS_BTNUM_INO, (xfs_inode_t *)0, 0); + cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno); error = xfs_inobt_lookup_ge(cur, agino, 0, 0, &tmp); if (error) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); -- cgit v1.2.3-70-g09d2 From de227dd9604934d2a6d33cd332d1be431719c93e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:54:12 +1100 Subject: [XFS] add generic btree types Add generic union types for btree pointers, keys and records. The generic btree pointer contains either a 32 and 64bit big endian scalar for short and long form btrees, and the key and record contain the relevant type for each possible btree. Split out from a bigger patch from Dave Chinner and simplified a little further. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32178a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_btree.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'fs') diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index d30ee749860..428e81f0e27 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -79,6 +79,31 @@ typedef struct xfs_btree_block { } bb_u; /* rest */ } xfs_btree_block_t; +/* + * Generic key, ptr and record wrapper structures. + * + * These are disk format structures, and are converted where necessary + * by the btree specific code that needs to interpret them. + */ +union xfs_btree_ptr { + __be32 s; /* short form ptr */ + __be64 l; /* long form ptr */ +}; + +union xfs_btree_key { + xfs_bmbt_key_t bmbt; + xfs_bmdr_key_t bmbr; /* bmbt root block */ + xfs_alloc_key_t alloc; + xfs_inobt_key_t inobt; +}; + +union xfs_btree_rec { + xfs_bmbt_rec_t bmbt; + xfs_bmdr_rec_t bmbr; /* bmbt root block */ + xfs_alloc_rec_t alloc; + xfs_inobt_rec_t inobt; +}; + /* * For logging record fields. */ -- cgit v1.2.3-70-g09d2 From 8186e517fab1854554c48955cdbcbb6710e7baef Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:54:22 +1100 Subject: [XFS] make btree root in inode support generic The bmap btree is rooted in the inode and not in a disk block. Make the support for this feature more generic by adding a btree flag to for this feature instead of relying on the XFS_BTNUM_BMAP btnum check. Also clean up xfs_btree_get_block where this new flag is used. Based upon a patch from Dave Chinner. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32180a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_bmap_btree.c | 1 + fs/xfs/xfs_btree.c | 47 +++++++++++++++++++++++++++-------------------- fs/xfs/xfs_btree.h | 5 +++++ 3 files changed, 33 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index cfbdd00045c..d9bbed676e0 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -2656,6 +2656,7 @@ xfs_bmbt_init_cursor( cur->bc_blocklog = mp->m_sb.sb_blocklog; cur->bc_ops = &xfs_bmbt_ops; + cur->bc_flags = XFS_BTREE_ROOT_IN_INODE; cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork); cur->bc_private.b.ip = ip; diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 074f7f6aa27..57e858fbf68 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -421,33 +421,40 @@ xfs_btree_dup_cursor( return 0; } +/* + * Get a the root block which is stored in the inode. + * + * For now this btree implementation assumes the btree root is always + * stored in the if_broot field of an inode fork. + */ +STATIC struct xfs_btree_block * +xfs_btree_get_iroot( + struct xfs_btree_cur *cur) +{ + struct xfs_ifork *ifp; + + ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork); + return (struct xfs_btree_block *)ifp->if_broot; +} + /* * Retrieve the block pointer from the cursor at the given level. - * This may be a bmap btree root or from a buffer. + * This may be an inode btree root or from a buffer. */ -STATIC xfs_btree_block_t * /* generic btree block pointer */ +STATIC struct xfs_btree_block * /* generic btree block pointer */ xfs_btree_get_block( - xfs_btree_cur_t *cur, /* btree cursor */ + struct xfs_btree_cur *cur, /* btree cursor */ int level, /* level in btree */ - xfs_buf_t **bpp) /* buffer containing the block */ + struct xfs_buf **bpp) /* buffer containing the block */ { - xfs_btree_block_t *block; /* return value */ - xfs_buf_t *bp; /* return buffer */ - xfs_ifork_t *ifp; /* inode fork pointer */ - int whichfork; /* data or attr fork */ - - if (cur->bc_btnum == XFS_BTNUM_BMAP && level == cur->bc_nlevels - 1) { - whichfork = cur->bc_private.b.whichfork; - ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, whichfork); - block = (xfs_btree_block_t *)ifp->if_broot; - bp = NULL; - } else { - bp = cur->bc_bufs[level]; - block = XFS_BUF_TO_BLOCK(bp); + if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && + (level == cur->bc_nlevels - 1)) { + *bpp = NULL; + return xfs_btree_get_iroot(cur); } - ASSERT(block != NULL); - *bpp = bp; - return block; + + *bpp = cur->bc_bufs[level]; + return XFS_BUF_TO_BLOCK(*bpp); } /* diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 428e81f0e27..fefbc69e500 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -170,6 +170,7 @@ typedef struct xfs_btree_cur struct xfs_trans *bc_tp; /* transaction we're in, if any */ struct xfs_mount *bc_mp; /* file system mount struct */ const struct xfs_btree_ops *bc_ops; + uint bc_flags; /* btree features - below */ union { xfs_alloc_rec_incore_t a; xfs_bmbt_irec_t b; @@ -201,6 +202,10 @@ typedef struct xfs_btree_cur } bc_private; /* per-btree type data */ } xfs_btree_cur_t; +/* cursor flags */ +#define XFS_BTREE_ROOT_IN_INODE (1<<1) /* root may be variable size */ + + #define XFS_BTREE_NOERROR 0 #define XFS_BTREE_ERROR 1 -- cgit v1.2.3-70-g09d2 From e99ab90d6a9e8ac92f05d2c31d44aa7feee15394 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:54:33 +1100 Subject: [XFS] add a long pointers flag to xfs_btree_cur Add a flag to the xfs btree cursor when using long (64bit) block pointers instead of checking btnum == XFS_BTNUM_BMAP. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32181a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_bmap_btree.c | 2 +- fs/xfs/xfs_btree.c | 6 +++--- fs/xfs/xfs_btree.h | 6 +----- 3 files changed, 5 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index d9bbed676e0..1ec494e111b 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -2656,7 +2656,7 @@ xfs_bmbt_init_cursor( cur->bc_blocklog = mp->m_sb.sb_blocklog; cur->bc_ops = &xfs_bmbt_ops; - cur->bc_flags = XFS_BTREE_ROOT_IN_INODE; + cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE; cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork); cur->bc_private.b.ip = ip; diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 57e858fbf68..59796b42e9c 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -90,7 +90,7 @@ xfs_btree_check_block( int level, /* level of the btree block */ xfs_buf_t *bp) /* buffer containing block, if any */ { - if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) xfs_btree_check_lblock(cur, (xfs_btree_lblock_t *)block, level, bp); else @@ -516,7 +516,7 @@ xfs_btree_islastblock( block = xfs_btree_get_block(cur, level, &bp); xfs_btree_check_block(cur, block, level, bp); - if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) return be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO; else return be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK; @@ -808,7 +808,7 @@ xfs_btree_setbuf( if (!bp) return; b = XFS_BUF_TO_BLOCK(bp); - if (XFS_BTREE_LONG_PTRS(cur->bc_btnum)) { + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO) cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; if (be64_to_cpu(b->bb_u.l.bb_rightsib) == NULLDFSBNO) diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index fefbc69e500..dd93fd39c56 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -115,11 +115,6 @@ union xfs_btree_rec { #define XFS_BB_NUM_BITS 5 #define XFS_BB_ALL_BITS ((1 << XFS_BB_NUM_BITS) - 1) -/* - * Boolean to select which form of xfs_btree_block_t.bb_u to use. - */ -#define XFS_BTREE_LONG_PTRS(btnum) ((btnum) == XFS_BTNUM_BMAP) - /* * Magic numbers for btree blocks. */ @@ -203,6 +198,7 @@ typedef struct xfs_btree_cur } xfs_btree_cur_t; /* cursor flags */ +#define XFS_BTREE_LONG_PTRS (1<<0) /* pointers are 64bits long */ #define XFS_BTREE_ROOT_IN_INODE (1<<1) /* root may be variable size */ -- cgit v1.2.3-70-g09d2 From b524bfeee2152fa64b6210f28ced80489b9d2439 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:54:43 +1100 Subject: [XFS] refactor xfs_btree_readahead From: Dave Chinner Refactor xfs_btree_readahead to make it more readable: (a) remove the inline xfs_btree_readahead wrapper and move all checks out of line into the main routine. (b) factor out helpers for short/long form btrees (c) move check for root in inodes from the callers into xfs_btree_readahead [hch: split out from a big patch and minor cleanups] SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32182a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_bmap_btree.c | 15 +++--- fs/xfs/xfs_btree.c | 118 ++++++++++++++++++++++++++++-------------------- fs/xfs/xfs_btree.h | 15 +----- 3 files changed, 76 insertions(+), 72 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 1ec494e111b..519249e2053 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -1721,8 +1721,9 @@ xfs_bmbt_decrement( XFS_BMBT_TRACE_CURSOR(cur, ENTRY); XFS_BMBT_TRACE_ARGI(cur, level); ASSERT(level < cur->bc_nlevels); - if (level < cur->bc_nlevels - 1) - xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); + + xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); + if (--cur->bc_ptrs[level] > 0) { XFS_BMBT_TRACE_CURSOR(cur, EXIT); *stat = 1; @@ -1743,8 +1744,7 @@ xfs_bmbt_decrement( for (lev = level + 1; lev < cur->bc_nlevels; lev++) { if (--cur->bc_ptrs[lev] > 0) break; - if (lev < cur->bc_nlevels - 1) - xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); + xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); } if (lev == cur->bc_nlevels) { XFS_BMBT_TRACE_CURSOR(cur, EXIT); @@ -1995,8 +1995,8 @@ xfs_bmbt_increment( XFS_BMBT_TRACE_CURSOR(cur, ENTRY); XFS_BMBT_TRACE_ARGI(cur, level); ASSERT(level < cur->bc_nlevels); - if (level < cur->bc_nlevels - 1) - xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); + + xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); block = xfs_bmbt_get_block(cur, level, &bp); #ifdef DEBUG if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { @@ -2024,8 +2024,7 @@ xfs_bmbt_increment( #endif if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs)) break; - if (lev < cur->bc_nlevels - 1) - xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); + xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); } if (lev == cur->bc_nlevels) { XFS_BMBT_TRACE_CURSOR(cur, EXIT); diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 59796b42e9c..4d793e4ccdc 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -725,66 +725,84 @@ xfs_btree_reada_bufs( xfs_baread(mp->m_ddev_targp, d, mp->m_bsize * count); } +STATIC int +xfs_btree_readahead_lblock( + struct xfs_btree_cur *cur, + int lr, + struct xfs_btree_block *block) +{ + int rval = 0; + xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); + xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); + + if ((lr & XFS_BTCUR_LEFTRA) && left != NULLDFSBNO) { + xfs_btree_reada_bufl(cur->bc_mp, left, 1); + rval++; + } + + if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLDFSBNO) { + xfs_btree_reada_bufl(cur->bc_mp, right, 1); + rval++; + } + + return rval; +} + +STATIC int +xfs_btree_readahead_sblock( + struct xfs_btree_cur *cur, + int lr, + struct xfs_btree_block *block) +{ + int rval = 0; + xfs_agblock_t left = be32_to_cpu(block->bb_u.s.bb_leftsib); + xfs_agblock_t right = be32_to_cpu(block->bb_u.s.bb_rightsib); + + + if ((lr & XFS_BTCUR_LEFTRA) && left != NULLAGBLOCK) { + xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, + left, 1); + rval++; + } + + if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLAGBLOCK) { + xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, + right, 1); + rval++; + } + + return rval; +} + /* * Read-ahead btree blocks, at the given level. * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA. */ int -xfs_btree_readahead_core( - xfs_btree_cur_t *cur, /* btree cursor */ +xfs_btree_readahead( + struct xfs_btree_cur *cur, /* btree cursor */ int lev, /* level in btree */ int lr) /* left/right bits */ { - xfs_alloc_block_t *a; - xfs_bmbt_block_t *b; - xfs_inobt_block_t *i; - int rval = 0; + struct xfs_btree_block *block; + + /* + * No readahead needed if we are at the root level and the + * btree root is stored in the inode. + */ + if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && + (lev == cur->bc_nlevels - 1)) + return 0; + + if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev]) + return 0; - ASSERT(cur->bc_bufs[lev] != NULL); cur->bc_ra[lev] |= lr; - switch (cur->bc_btnum) { - case XFS_BTNUM_BNO: - case XFS_BTNUM_CNT: - a = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]); - if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(a->bb_leftsib) != NULLAGBLOCK) { - xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, - be32_to_cpu(a->bb_leftsib), 1); - rval++; - } - if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(a->bb_rightsib) != NULLAGBLOCK) { - xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, - be32_to_cpu(a->bb_rightsib), 1); - rval++; - } - break; - case XFS_BTNUM_BMAP: - b = XFS_BUF_TO_BMBT_BLOCK(cur->bc_bufs[lev]); - if ((lr & XFS_BTCUR_LEFTRA) && be64_to_cpu(b->bb_leftsib) != NULLDFSBNO) { - xfs_btree_reada_bufl(cur->bc_mp, be64_to_cpu(b->bb_leftsib), 1); - rval++; - } - if ((lr & XFS_BTCUR_RIGHTRA) && be64_to_cpu(b->bb_rightsib) != NULLDFSBNO) { - xfs_btree_reada_bufl(cur->bc_mp, be64_to_cpu(b->bb_rightsib), 1); - rval++; - } - break; - case XFS_BTNUM_INO: - i = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]); - if ((lr & XFS_BTCUR_LEFTRA) && be32_to_cpu(i->bb_leftsib) != NULLAGBLOCK) { - xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, - be32_to_cpu(i->bb_leftsib), 1); - rval++; - } - if ((lr & XFS_BTCUR_RIGHTRA) && be32_to_cpu(i->bb_rightsib) != NULLAGBLOCK) { - xfs_btree_reada_bufs(cur->bc_mp, cur->bc_private.a.agno, - be32_to_cpu(i->bb_rightsib), 1); - rval++; - } - break; - default: - ASSERT(0); - } - return rval; + block = XFS_BUF_TO_BLOCK(cur->bc_bufs[lev]); + + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) + return xfs_btree_readahead_lblock(cur, lr, block); + return xfs_btree_readahead_sblock(cur, lr, block); } /* diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index dd93fd39c56..8be838f0154 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -421,23 +421,10 @@ xfs_btree_reada_bufs( * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA. */ int /* readahead block count */ -xfs_btree_readahead_core( - xfs_btree_cur_t *cur, /* btree cursor */ - int lev, /* level in btree */ - int lr); /* left/right bits */ - -static inline int /* readahead block count */ xfs_btree_readahead( xfs_btree_cur_t *cur, /* btree cursor */ int lev, /* level in btree */ - int lr) /* left/right bits */ -{ - if ((cur->bc_ra[lev] | lr) == cur->bc_ra[lev]) - return 0; - - return xfs_btree_readahead_core(cur, lev, lr); -} - + int lr); /* left/right bits */ /* * Set the buffer for level "lev" in the cursor to bp, releasing -- cgit v1.2.3-70-g09d2 From a23f6ef8ce966abc0f6e24a81ceb6a74ed30693b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:54:53 +1100 Subject: [XFS] refactor btree validation helpers Move the various btree validation helpers around in xfs_btree.c so that they are close to each other and in common #ifdef DEBUG sections. Also add a new xfs_btree_check_ptr helper to check a btree ptr that can be either long or short form. Split out from a bigger patch from Dave Chinner with various small changes applied by me. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32183a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_btree.c | 196 +++++++++++++++++++++++++++-------------------------- fs/xfs/xfs_btree.h | 97 ++++++++++++++------------ 2 files changed, 155 insertions(+), 138 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 4d793e4ccdc..966d58d50fa 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -80,24 +80,6 @@ xfs_btree_maxrecs( */ #ifdef DEBUG -/* - * Debug routine: check that block header is ok. - */ -void -xfs_btree_check_block( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_btree_block_t *block, /* generic btree block pointer */ - int level, /* level of the btree block */ - xfs_buf_t *bp) /* buffer containing block, if any */ -{ - if (cur->bc_flags & XFS_BTREE_LONG_PTRS) - xfs_btree_check_lblock(cur, (xfs_btree_lblock_t *)block, level, - bp); - else - xfs_btree_check_sblock(cur, (xfs_btree_sblock_t *)block, level, - bp); -} - /* * Debug routine: check that keys are in the right order. */ @@ -150,65 +132,7 @@ xfs_btree_check_key( ASSERT(0); } } -#endif /* DEBUG */ - -/* - * Checking routine: check that long form block header is ok. - */ -/* ARGSUSED */ -int /* error (0 or EFSCORRUPTED) */ -xfs_btree_check_lblock( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_btree_lblock_t *block, /* btree long form block pointer */ - int level, /* level of the btree block */ - xfs_buf_t *bp) /* buffer for block, if any */ -{ - int lblock_ok; /* block passes checks */ - xfs_mount_t *mp; /* file system mount point */ - - mp = cur->bc_mp; - lblock_ok = - be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] && - be16_to_cpu(block->bb_level) == level && - be16_to_cpu(block->bb_numrecs) <= - xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) && - block->bb_leftsib && - (be64_to_cpu(block->bb_leftsib) == NULLDFSBNO || - XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_leftsib))) && - block->bb_rightsib && - (be64_to_cpu(block->bb_rightsib) == NULLDFSBNO || - XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_rightsib))); - if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK, - XFS_RANDOM_BTREE_CHECK_LBLOCK))) { - if (bp) - xfs_buftrace("LBTREE ERROR", bp); - XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW, - mp); - return XFS_ERROR(EFSCORRUPTED); - } - return 0; -} -/* - * Checking routine: check that (long) pointer is ok. - */ -int /* error (0 or EFSCORRUPTED) */ -xfs_btree_check_lptr( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_dfsbno_t ptr, /* btree block disk address */ - int level) /* btree block level */ -{ - xfs_mount_t *mp; /* file system mount point */ - - mp = cur->bc_mp; - XFS_WANT_CORRUPTED_RETURN( - level > 0 && - ptr != NULLDFSBNO && - XFS_FSB_SANITY_CHECK(mp, ptr)); - return 0; -} - -#ifdef DEBUG /* * Debug routine: check that records are in the right order. */ @@ -268,19 +192,49 @@ xfs_btree_check_rec( } #endif /* DEBUG */ -/* - * Checking routine: check that block header is ok. - */ -/* ARGSUSED */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_lblock( + struct xfs_btree_cur *cur, /* btree cursor */ + struct xfs_btree_lblock *block, /* btree long form block pointer */ + int level, /* level of the btree block */ + struct xfs_buf *bp) /* buffer for block, if any */ +{ + int lblock_ok; /* block passes checks */ + struct xfs_mount *mp; /* file system mount point */ + + mp = cur->bc_mp; + lblock_ok = + be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] && + be16_to_cpu(block->bb_level) == level && + be16_to_cpu(block->bb_numrecs) <= + xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) && + block->bb_leftsib && + (be64_to_cpu(block->bb_leftsib) == NULLDFSBNO || + XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_leftsib))) && + block->bb_rightsib && + (be64_to_cpu(block->bb_rightsib) == NULLDFSBNO || + XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_rightsib))); + if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, + XFS_ERRTAG_BTREE_CHECK_LBLOCK, + XFS_RANDOM_BTREE_CHECK_LBLOCK))) { + if (bp) + xfs_buftrace("LBTREE ERROR", bp); + XFS_ERROR_REPORT("xfs_btree_check_lblock", XFS_ERRLEVEL_LOW, + mp); + return XFS_ERROR(EFSCORRUPTED); + } + return 0; +} + int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_sblock( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_btree_sblock_t *block, /* btree short form block pointer */ + struct xfs_btree_cur *cur, /* btree cursor */ + struct xfs_btree_sblock *block, /* btree short form block pointer */ int level, /* level of the btree block */ - xfs_buf_t *bp) /* buffer containing block */ + struct xfs_buf *bp) /* buffer containing block */ { - xfs_buf_t *agbp; /* buffer for ag. freespace struct */ - xfs_agf_t *agf; /* ag. freespace structure */ + struct xfs_buf *agbp; /* buffer for ag. freespace struct */ + struct xfs_agf *agf; /* ag. freespace structure */ xfs_agblock_t agflen; /* native ag. freespace length */ int sblock_ok; /* block passes checks */ @@ -311,26 +265,78 @@ xfs_btree_check_sblock( } /* - * Checking routine: check that (short) pointer is ok. + * Debug routine: check that block header is ok. + */ +int +xfs_btree_check_block( + struct xfs_btree_cur *cur, /* btree cursor */ + struct xfs_btree_block *block, /* generic btree block pointer */ + int level, /* level of the btree block */ + struct xfs_buf *bp) /* buffer containing block, if any */ +{ + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { + return xfs_btree_check_lblock(cur, + (struct xfs_btree_lblock *)block, level, bp); + } else { + return xfs_btree_check_sblock(cur, + (struct xfs_btree_sblock *)block, level, bp); + } +} + +/* + * Check that (long) pointer is ok. + */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_lptr( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_dfsbno_t bno, /* btree block disk address */ + int level) /* btree block level */ +{ + XFS_WANT_CORRUPTED_RETURN( + level > 0 && + bno != NULLDFSBNO && + XFS_FSB_SANITY_CHECK(cur->bc_mp, bno)); + return 0; +} + +/* + * Check that (short) pointer is ok. */ int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_sptr( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_agblock_t ptr, /* btree block disk address */ - int level) /* btree block level */ + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* btree block disk address */ + int level) /* btree block level */ { - xfs_buf_t *agbp; /* buffer for ag. freespace struct */ - xfs_agf_t *agf; /* ag. freespace structure */ + xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks; - agbp = cur->bc_private.a.agbp; - agf = XFS_BUF_TO_AGF(agbp); XFS_WANT_CORRUPTED_RETURN( level > 0 && - ptr != NULLAGBLOCK && ptr != 0 && - ptr < be32_to_cpu(agf->agf_length)); + bno != NULLAGBLOCK && + bno != 0 && + bno < agblocks); return 0; } +/* + * Check that block ptr is ok. + */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_ptr( + struct xfs_btree_cur *cur, /* btree cursor */ + union xfs_btree_ptr *ptr, /* btree block disk address */ + int index, /* offset from ptr to check */ + int level) /* btree block level */ +{ + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { + return xfs_btree_check_lptr(cur, + be64_to_cpu((&ptr->l)[index]), level); + } else { + return xfs_btree_check_sptr(cur, + be32_to_cpu((&ptr->s)[index]), level); + } +} + /* * Delete the btree cursor. */ diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 8be838f0154..a57918276d9 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -215,81 +215,92 @@ typedef struct xfs_btree_cur #ifdef __KERNEL__ -#ifdef DEBUG /* - * Debug routine: check that block header is ok. + * Check that long form block header is ok. */ -void -xfs_btree_check_block( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_btree_block_t *block, /* generic btree block pointer */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_lblock( + struct xfs_btree_cur *cur, /* btree cursor */ + struct xfs_btree_lblock *block, /* btree long form block pointer */ int level, /* level of the btree block */ struct xfs_buf *bp); /* buffer containing block, if any */ /* - * Debug routine: check that keys are in the right order. + * Check that short form block header is ok. */ -void -xfs_btree_check_key( - xfs_btnum_t btnum, /* btree identifier */ - void *ak1, /* pointer to left (lower) key */ - void *ak2); /* pointer to right (higher) key */ - -/* - * Debug routine: check that records are in the right order. - */ -void -xfs_btree_check_rec( - xfs_btnum_t btnum, /* btree identifier */ - void *ar1, /* pointer to left (lower) record */ - void *ar2); /* pointer to right (higher) record */ -#else -#define xfs_btree_check_block(a,b,c,d) -#define xfs_btree_check_key(a,b,c) -#define xfs_btree_check_rec(a,b,c) -#endif /* DEBUG */ +int /* error (0 or EFSCORRUPTED) */ +xfs_btree_check_sblock( + struct xfs_btree_cur *cur, /* btree cursor */ + struct xfs_btree_sblock *block, /* btree short form block pointer */ + int level, /* level of the btree block */ + struct xfs_buf *bp); /* buffer containing block */ /* - * Checking routine: check that long form block header is ok. + * Check that block header is ok. */ -int /* error (0 or EFSCORRUPTED) */ -xfs_btree_check_lblock( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_btree_lblock_t *block, /* btree long form block pointer */ +int +xfs_btree_check_block( + struct xfs_btree_cur *cur, /* btree cursor */ + struct xfs_btree_block *block, /* generic btree block pointer */ int level, /* level of the btree block */ struct xfs_buf *bp); /* buffer containing block, if any */ /* - * Checking routine: check that (long) pointer is ok. + * Check that (long) pointer is ok. */ int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_lptr( - xfs_btree_cur_t *cur, /* btree cursor */ + struct xfs_btree_cur *cur, /* btree cursor */ xfs_dfsbno_t ptr, /* btree block disk address */ int level); /* btree block level */ #define xfs_btree_check_lptr_disk(cur, ptr, level) \ xfs_btree_check_lptr(cur, be64_to_cpu(ptr), level) + /* - * Checking routine: check that short form block header is ok. + * Check that (short) pointer is ok. */ int /* error (0 or EFSCORRUPTED) */ -xfs_btree_check_sblock( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_btree_sblock_t *block, /* btree short form block pointer */ - int level, /* level of the btree block */ - struct xfs_buf *bp); /* buffer containing block */ +xfs_btree_check_sptr( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t ptr, /* btree block disk address */ + int level); /* btree block level */ /* - * Checking routine: check that (short) pointer is ok. + * Check that (short) pointer is ok. */ int /* error (0 or EFSCORRUPTED) */ -xfs_btree_check_sptr( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_agblock_t ptr, /* btree block disk address */ +xfs_btree_check_ptr( + struct xfs_btree_cur *cur, /* btree cursor */ + union xfs_btree_ptr *ptr, /* btree block disk address */ + int index, /* offset from ptr to check */ int level); /* btree block level */ +#ifdef DEBUG + +/* + * Debug routine: check that keys are in the right order. + */ +void +xfs_btree_check_key( + xfs_btnum_t btnum, /* btree identifier */ + void *ak1, /* pointer to left (lower) key */ + void *ak2); /* pointer to right (higher) key */ + +/* + * Debug routine: check that records are in the right order. + */ +void +xfs_btree_check_rec( + xfs_btnum_t btnum, /* btree identifier */ + void *ar1, /* pointer to left (lower) record */ + void *ar2); /* pointer to right (higher) record */ +#else +#define xfs_btree_check_key(a, b, c) +#define xfs_btree_check_rec(a, b, c) +#endif /* DEBUG */ + /* * Delete the btree cursor. */ -- cgit v1.2.3-70-g09d2 From 854929f05831d3a290a802815ee955b96c740c61 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 16:55:03 +1100 Subject: [XFS] add new btree statistics From: Dave Chinner Introduce statistics coverage of all the btrees and cover all the btree operations, not just some. Invaluable for determining test code coverage of all the btree operations.... SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32184a Signed-off-by: David Chinner Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell --- fs/xfs/linux-2.6/xfs_stats.c | 4 +++ fs/xfs/linux-2.6/xfs_stats.h | 65 ++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 28 +++++++++++++++++++ 3 files changed, 97 insertions(+) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c index 3d5b67c075c..64f4ec90b8b 100644 --- a/fs/xfs/linux-2.6/xfs_stats.c +++ b/fs/xfs/linux-2.6/xfs_stats.c @@ -53,6 +53,10 @@ xfs_read_xfsstats( { "icluster", XFSSTAT_END_INODE_CLUSTER }, { "vnodes", XFSSTAT_END_VNODE_OPS }, { "buf", XFSSTAT_END_BUF }, + { "abtb2", XFSSTAT_END_ABTB_V2 }, + { "abtc2", XFSSTAT_END_ABTC_V2 }, + { "bmbt2", XFSSTAT_END_BMBT_V2 }, + { "ibt2", XFSSTAT_END_IBT_V2 }, }; /* Loop over all stats groups */ diff --git a/fs/xfs/linux-2.6/xfs_stats.h b/fs/xfs/linux-2.6/xfs_stats.h index e83820febc9..736854b1ca1 100644 --- a/fs/xfs/linux-2.6/xfs_stats.h +++ b/fs/xfs/linux-2.6/xfs_stats.h @@ -118,6 +118,71 @@ struct xfsstats { __uint32_t xb_page_retries; __uint32_t xb_page_found; __uint32_t xb_get_read; +/* Version 2 btree counters */ +#define XFSSTAT_END_ABTB_V2 (XFSSTAT_END_BUF+15) + __uint32_t xs_abtb_2_lookup; + __uint32_t xs_abtb_2_compare; + __uint32_t xs_abtb_2_insrec; + __uint32_t xs_abtb_2_delrec; + __uint32_t xs_abtb_2_newroot; + __uint32_t xs_abtb_2_killroot; + __uint32_t xs_abtb_2_increment; + __uint32_t xs_abtb_2_decrement; + __uint32_t xs_abtb_2_lshift; + __uint32_t xs_abtb_2_rshift; + __uint32_t xs_abtb_2_split; + __uint32_t xs_abtb_2_join; + __uint32_t xs_abtb_2_alloc; + __uint32_t xs_abtb_2_free; + __uint32_t xs_abtb_2_moves; +#define XFSSTAT_END_ABTC_V2 (XFSSTAT_END_ABTB_V2+15) + __uint32_t xs_abtc_2_lookup; + __uint32_t xs_abtc_2_compare; + __uint32_t xs_abtc_2_insrec; + __uint32_t xs_abtc_2_delrec; + __uint32_t xs_abtc_2_newroot; + __uint32_t xs_abtc_2_killroot; + __uint32_t xs_abtc_2_increment; + __uint32_t xs_abtc_2_decrement; + __uint32_t xs_abtc_2_lshift; + __uint32_t xs_abtc_2_rshift; + __uint32_t xs_abtc_2_split; + __uint32_t xs_abtc_2_join; + __uint32_t xs_abtc_2_alloc; + __uint32_t xs_abtc_2_free; + __uint32_t xs_abtc_2_moves; +#define XFSSTAT_END_BMBT_V2 (XFSSTAT_END_ABTC_V2+15) + __uint32_t xs_bmbt_2_lookup; + __uint32_t xs_bmbt_2_compare; + __uint32_t xs_bmbt_2_insrec; + __uint32_t xs_bmbt_2_delrec; + __uint32_t xs_bmbt_2_newroot; + __uint32_t xs_bmbt_2_killroot; + __uint32_t xs_bmbt_2_increment; + __uint32_t xs_bmbt_2_decrement; + __uint32_t xs_bmbt_2_lshift; + __uint32_t xs_bmbt_2_rshift; + __uint32_t xs_bmbt_2_split; + __uint32_t xs_bmbt_2_join; + __uint32_t xs_bmbt_2_alloc; + __uint32_t xs_bmbt_2_free; + __uint32_t xs_bmbt_2_moves; +#define XFSSTAT_END_IBT_V2 (XFSSTAT_END_BMBT_V2+15) + __uint32_t xs_ibt_2_lookup; + __uint32_t xs_ibt_2_compare; + __uint32_t xs_ibt_2_insrec; + __uint32_t xs_ibt_2_delrec; + __uint32_t xs_ibt_2_newroot; + __uint32_t xs_ibt_2_killroot; + __uint32_t xs_ibt_2_increment; + __uint32_t xs_ibt_2_decrement; + __uint32_t xs_ibt_2_lshift; + __uint32_t xs_ibt_2_rshift; + __uint32_t xs_ibt_2_split; + __uint32_t xs_ibt_2_join; + __uint32_t xs_ibt_2_alloc; + __uint32_t xs_ibt_2_free; + __uint32_t xs_ibt_2_moves; /* Extra precision counters */ __uint64_t xs_xstrat_bytes; __uint64_t xs_write_bytes; diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index a57918276d9..57d3bd37526 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -120,6 +120,34 @@ union xfs_btree_rec { */ extern const __uint32_t xfs_magics[]; +/* + * Generic stats interface + */ +#define __XFS_BTREE_STATS_INC(type, stat) \ + XFS_STATS_INC(xs_ ## type ## _2_ ## stat) +#define XFS_BTREE_STATS_INC(cur, stat) \ +do { \ + switch (cur->bc_btnum) { \ + case XFS_BTNUM_BNO: __XFS_BTREE_STATS_INC(abtb, stat); break; \ + case XFS_BTNUM_CNT: __XFS_BTREE_STATS_INC(abtc, stat); break; \ + case XFS_BTNUM_BMAP: __XFS_BTREE_STATS_INC(bmbt, stat); break; \ + case XFS_BTNUM_INO: __XFS_BTREE_STATS_INC(ibt, stat); break; \ + case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \ + } \ +} while (0) + +#define __XFS_BTREE_STATS_ADD(type, stat, val) \ + XFS_STATS_ADD(xs_ ## type ## _2_ ## stat, val) +#define XFS_BTREE_STATS_ADD(cur, stat, val) \ +do { \ + switch (cur->bc_btnum) { \ + case XFS_BTNUM_BNO: __XFS_BTREE_STATS_ADD(abtb, stat, val); break; \ + case XFS_BTNUM_CNT: __XFS_BTREE_STATS_ADD(abtc, stat, val); break; \ + case XFS_BTNUM_BMAP: __XFS_BTREE_STATS_ADD(bmbt, stat, val); break; \ + case XFS_BTNUM_INO: __XFS_BTREE_STATS_ADD(ibt, stat, val); break; \ + case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \ + } \ +} while (0) /* * Maximum and minimum records in a btree block. * Given block size, type prefix, and leaf flag (0 or 1). -- cgit v1.2.3-70-g09d2 From 8c4ed633e65d0bd0a25d45aad9b4646e3236cad7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:55:13 +1100 Subject: [XFS] make btree tracing generic Make the existing bmap btree tracing generic so that it applies to all btree types. Some fragments lifted from a patch by Dave Chinner. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32187a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/Makefile | 3 +- fs/xfs/linux-2.6/xfs_super.c | 24 ++- fs/xfs/xfs.h | 2 +- fs/xfs/xfs_alloc_btree.c | 73 +++++++++ fs/xfs/xfs_bmap_btree.c | 356 ++++++++++++++----------------------------- fs/xfs/xfs_bmap_btree.h | 18 --- fs/xfs/xfs_btree.h | 19 +++ fs/xfs/xfs_ialloc_btree.c | 73 +++++++++ fs/xfs/xfs_inode.c | 5 +- fs/xfs/xfs_inode.h | 2 +- 10 files changed, 309 insertions(+), 266 deletions(-) (limited to 'fs') diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 737c9a42536..75b2be72c39 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -91,7 +91,8 @@ xfs-y += xfs_alloc.o \ xfs_dmops.o \ xfs_qmops.o -xfs-$(CONFIG_XFS_TRACE) += xfs_dir2_trace.o +xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o \ + xfs_dir2_trace.o # Objects in linux/ xfs-y += $(addprefix $(XFS_LINUX)/, \ diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 9bfb26066a8..0c71c215853 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -36,6 +36,7 @@ #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" +#include "xfs_btree_trace.h" #include "xfs_ialloc.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" @@ -1916,10 +1917,19 @@ xfs_alloc_trace_bufs(void) if (!xfs_bmap_trace_buf) goto out_free_alloc_trace; #endif -#ifdef XFS_BMBT_TRACE +#ifdef XFS_BTREE_TRACE + xfs_allocbt_trace_buf = ktrace_alloc(XFS_ALLOCBT_TRACE_SIZE, + KM_MAYFAIL); + if (!xfs_allocbt_trace_buf) + goto out_free_bmap_trace; + + xfs_inobt_trace_buf = ktrace_alloc(XFS_INOBT_TRACE_SIZE, KM_MAYFAIL); + if (!xfs_inobt_trace_buf) + goto out_free_allocbt_trace; + xfs_bmbt_trace_buf = ktrace_alloc(XFS_BMBT_TRACE_SIZE, KM_MAYFAIL); if (!xfs_bmbt_trace_buf) - goto out_free_bmap_trace; + goto out_free_inobt_trace; #endif #ifdef XFS_ATTR_TRACE xfs_attr_trace_buf = ktrace_alloc(XFS_ATTR_TRACE_SIZE, KM_MAYFAIL); @@ -1941,8 +1951,12 @@ xfs_alloc_trace_bufs(void) ktrace_free(xfs_attr_trace_buf); out_free_bmbt_trace: #endif -#ifdef XFS_BMBT_TRACE +#ifdef XFS_BTREE_TRACE ktrace_free(xfs_bmbt_trace_buf); + out_free_inobt_trace: + ktrace_free(xfs_inobt_trace_buf); + out_free_allocbt_trace: + ktrace_free(xfs_allocbt_trace_buf); out_free_bmap_trace: #endif #ifdef XFS_BMAP_TRACE @@ -1965,8 +1979,10 @@ xfs_free_trace_bufs(void) #ifdef XFS_ATTR_TRACE ktrace_free(xfs_attr_trace_buf); #endif -#ifdef XFS_BMBT_TRACE +#ifdef XFS_BTREE_TRACE ktrace_free(xfs_bmbt_trace_buf); + ktrace_free(xfs_inobt_trace_buf); + ktrace_free(xfs_allocbt_trace_buf); #endif #ifdef XFS_BMAP_TRACE ktrace_free(xfs_bmap_trace_buf); diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h index 540e4c98982..17254b529c5 100644 --- a/fs/xfs/xfs.h +++ b/fs/xfs/xfs.h @@ -30,7 +30,7 @@ #define XFS_ATTR_TRACE 1 #define XFS_BLI_TRACE 1 #define XFS_BMAP_TRACE 1 -#define XFS_BMBT_TRACE 1 +#define XFS_BTREE_TRACE 1 #define XFS_DIR2_TRACE 1 #define XFS_DQUOT_TRACE 1 #define XFS_ILOCK_TRACE 1 diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 60c121f1e81..9c91dfcb6f2 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -2219,8 +2219,81 @@ xfs_allocbt_dup_cursor( cur->bc_btnum); } +#ifdef XFS_BTREE_TRACE +ktrace_t *xfs_allocbt_trace_buf; + +STATIC void +xfs_allocbt_trace_enter( + struct xfs_btree_cur *cur, + const char *func, + char *s, + int type, + int line, + __psunsigned_t a0, + __psunsigned_t a1, + __psunsigned_t a2, + __psunsigned_t a3, + __psunsigned_t a4, + __psunsigned_t a5, + __psunsigned_t a6, + __psunsigned_t a7, + __psunsigned_t a8, + __psunsigned_t a9, + __psunsigned_t a10) +{ + ktrace_enter(xfs_allocbt_trace_buf, (void *)(__psint_t)type, + (void *)func, (void *)s, NULL, (void *)cur, + (void *)a0, (void *)a1, (void *)a2, (void *)a3, + (void *)a4, (void *)a5, (void *)a6, (void *)a7, + (void *)a8, (void *)a9, (void *)a10); +} + +STATIC void +xfs_allocbt_trace_cursor( + struct xfs_btree_cur *cur, + __uint32_t *s0, + __uint64_t *l0, + __uint64_t *l1) +{ + *s0 = cur->bc_private.a.agno; + *l0 = cur->bc_rec.a.ar_startblock; + *l1 = cur->bc_rec.a.ar_blockcount; +} + +STATIC void +xfs_allocbt_trace_key( + struct xfs_btree_cur *cur, + union xfs_btree_key *key, + __uint64_t *l0, + __uint64_t *l1) +{ + *l0 = be32_to_cpu(key->alloc.ar_startblock); + *l1 = be32_to_cpu(key->alloc.ar_blockcount); +} + +STATIC void +xfs_allocbt_trace_record( + struct xfs_btree_cur *cur, + union xfs_btree_rec *rec, + __uint64_t *l0, + __uint64_t *l1, + __uint64_t *l2) +{ + *l0 = be32_to_cpu(rec->alloc.ar_startblock); + *l1 = be32_to_cpu(rec->alloc.ar_blockcount); + *l2 = 0; +} +#endif /* XFS_BTREE_TRACE */ + static const struct xfs_btree_ops xfs_allocbt_ops = { .dup_cursor = xfs_allocbt_dup_cursor, + +#ifdef XFS_BTREE_TRACE + .trace_enter = xfs_allocbt_trace_enter, + .trace_cursor = xfs_allocbt_trace_cursor, + .trace_key = xfs_allocbt_trace_key, + .trace_record = xfs_allocbt_trace_record, +#endif }; /* diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 519249e2053..16f2fde6433 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -37,16 +37,13 @@ #include "xfs_inode_item.h" #include "xfs_alloc.h" #include "xfs_btree.h" +#include "xfs_btree_trace.h" #include "xfs_ialloc.h" #include "xfs_itable.h" #include "xfs_bmap.h" #include "xfs_error.h" #include "xfs_quota.h" -#if defined(XFS_BMBT_TRACE) -ktrace_t *xfs_bmbt_trace_buf; -#endif - /* * Prototypes for internal btree functions. */ @@ -61,245 +58,33 @@ STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *, __uint64_t *, xfs_btree_cur_t **, int *); STATIC int xfs_bmbt_updkey(xfs_btree_cur_t *, xfs_bmbt_key_t *, int); - -#if defined(XFS_BMBT_TRACE) - -static char ARGS[] = "args"; -static char ENTRY[] = "entry"; -static char ERROR[] = "error"; #undef EXIT -static char EXIT[] = "exit"; -/* - * Add a trace buffer entry for the arguments given to the routine, - * generic form. - */ -STATIC void -xfs_bmbt_trace_enter( - const char *func, - xfs_btree_cur_t *cur, - char *s, - int type, - int line, - __psunsigned_t a0, - __psunsigned_t a1, - __psunsigned_t a2, - __psunsigned_t a3, - __psunsigned_t a4, - __psunsigned_t a5, - __psunsigned_t a6, - __psunsigned_t a7, - __psunsigned_t a8, - __psunsigned_t a9, - __psunsigned_t a10) -{ - xfs_inode_t *ip; - int whichfork; +#define ENTRY XBT_ENTRY +#define ERROR XBT_ERROR +#define EXIT XBT_EXIT - ip = cur->bc_private.b.ip; - whichfork = cur->bc_private.b.whichfork; - ktrace_enter(xfs_bmbt_trace_buf, - (void *)((__psint_t)type | (whichfork << 8) | (line << 16)), - (void *)func, (void *)s, (void *)ip, (void *)cur, - (void *)a0, (void *)a1, (void *)a2, (void *)a3, - (void *)a4, (void *)a5, (void *)a6, (void *)a7, - (void *)a8, (void *)a9, (void *)a10); - ASSERT(ip->i_btrace); - ktrace_enter(ip->i_btrace, - (void *)((__psint_t)type | (whichfork << 8) | (line << 16)), - (void *)func, (void *)s, (void *)ip, (void *)cur, - (void *)a0, (void *)a1, (void *)a2, (void *)a3, - (void *)a4, (void *)a5, (void *)a6, (void *)a7, - (void *)a8, (void *)a9, (void *)a10); -} /* - * Add a trace buffer entry for arguments, for a buffer & 1 integer arg. + * Keep the XFS_BMBT_TRACE_ names around for now until all code using them + * is converted to be generic and thus switches to the XFS_BTREE_TRACE_ names. */ -STATIC void -xfs_bmbt_trace_argbi( - const char *func, - xfs_btree_cur_t *cur, - xfs_buf_t *b, - int i, - int line) -{ - xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGBI, line, - (__psunsigned_t)b, i, 0, 0, - 0, 0, 0, 0, - 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for a buffer & 2 integer args. - */ -STATIC void -xfs_bmbt_trace_argbii( - const char *func, - xfs_btree_cur_t *cur, - xfs_buf_t *b, - int i0, - int i1, - int line) -{ - xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGBII, line, - (__psunsigned_t)b, i0, i1, 0, - 0, 0, 0, 0, - 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for 3 block-length args - * and an integer arg. - */ -STATIC void -xfs_bmbt_trace_argfffi( - const char *func, - xfs_btree_cur_t *cur, - xfs_dfiloff_t o, - xfs_dfsbno_t b, - xfs_dfilblks_t i, - int j, - int line) -{ - xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGFFFI, line, - o >> 32, (int)o, b >> 32, (int)b, - i >> 32, (int)i, (int)j, 0, - 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for one integer arg. - */ -STATIC void -xfs_bmbt_trace_argi( - const char *func, - xfs_btree_cur_t *cur, - int i, - int line) -{ - xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGI, line, - i, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for int, fsblock, key. - */ -STATIC void -xfs_bmbt_trace_argifk( - const char *func, - xfs_btree_cur_t *cur, - int i, - xfs_fsblock_t f, - xfs_dfiloff_t o, - int line) -{ - xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line, - i, (xfs_dfsbno_t)f >> 32, (int)f, o >> 32, - (int)o, 0, 0, 0, - 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for int, fsblock, rec. - */ -STATIC void -xfs_bmbt_trace_argifr( - const char *func, - xfs_btree_cur_t *cur, - int i, - xfs_fsblock_t f, - xfs_bmbt_rec_t *r, - int line) -{ - xfs_dfsbno_t b; - xfs_dfilblks_t c; - xfs_dfsbno_t d; - xfs_dfiloff_t o; - xfs_bmbt_irec_t s; - - d = (xfs_dfsbno_t)f; - xfs_bmbt_disk_get_all(r, &s); - o = (xfs_dfiloff_t)s.br_startoff; - b = (xfs_dfsbno_t)s.br_startblock; - c = s.br_blockcount; - xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFR, line, - i, d >> 32, (int)d, o >> 32, - (int)o, b >> 32, (int)b, c >> 32, - (int)c, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for int, key. - */ -STATIC void -xfs_bmbt_trace_argik( - const char *func, - xfs_btree_cur_t *cur, - int i, - xfs_bmbt_key_t *k, - int line) -{ - xfs_dfiloff_t o; - - o = be64_to_cpu(k->br_startoff); - xfs_bmbt_trace_enter(func, cur, ARGS, XFS_BMBT_KTRACE_ARGIFK, line, - i, o >> 32, (int)o, 0, - 0, 0, 0, 0, - 0, 0, 0); -} - -/* - * Add a trace buffer entry for the cursor/operation. - */ -STATIC void -xfs_bmbt_trace_cursor( - const char *func, - xfs_btree_cur_t *cur, - char *s, - int line) -{ - xfs_bmbt_rec_host_t r; - - xfs_bmbt_set_all(&r, &cur->bc_rec.b); - xfs_bmbt_trace_enter(func, cur, s, XFS_BMBT_KTRACE_CUR, line, - (cur->bc_nlevels << 24) | (cur->bc_private.b.flags << 16) | - cur->bc_private.b.allocated, - r.l0 >> 32, (int)r.l0, - r.l1 >> 32, (int)r.l1, - (unsigned long)cur->bc_bufs[0], (unsigned long)cur->bc_bufs[1], - (unsigned long)cur->bc_bufs[2], (unsigned long)cur->bc_bufs[3], - (cur->bc_ptrs[0] << 16) | cur->bc_ptrs[1], - (cur->bc_ptrs[2] << 16) | cur->bc_ptrs[3]); -} - -#define XFS_BMBT_TRACE_ARGBI(c,b,i) \ - xfs_bmbt_trace_argbi(__func__, c, b, i, __LINE__) -#define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \ - xfs_bmbt_trace_argbii(__func__, c, b, i, j, __LINE__) -#define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \ - xfs_bmbt_trace_argfffi(__func__, c, o, b, i, j, __LINE__) -#define XFS_BMBT_TRACE_ARGI(c,i) \ - xfs_bmbt_trace_argi(__func__, c, i, __LINE__) -#define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \ - xfs_bmbt_trace_argifk(__func__, c, i, f, s, __LINE__) -#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \ - xfs_bmbt_trace_argifr(__func__, c, i, f, r, __LINE__) -#define XFS_BMBT_TRACE_ARGIK(c,i,k) \ - xfs_bmbt_trace_argik(__func__, c, i, k, __LINE__) -#define XFS_BMBT_TRACE_CURSOR(c,s) \ - xfs_bmbt_trace_cursor(__func__, c, s, __LINE__) -#else -#define XFS_BMBT_TRACE_ARGBI(c,b,i) -#define XFS_BMBT_TRACE_ARGBII(c,b,i,j) -#define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) -#define XFS_BMBT_TRACE_ARGI(c,i) -#define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) -#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) -#define XFS_BMBT_TRACE_ARGIK(c,i,k) -#define XFS_BMBT_TRACE_CURSOR(c,s) -#endif /* XFS_BMBT_TRACE */ +#define XFS_BMBT_TRACE_ARGBI(c,b,i) \ + XFS_BTREE_TRACE_ARGBI(c,b,i) +#define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \ + XFS_BTREE_TRACE_ARGBII(c,b,i,j) +#define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \ + XFS_BTREE_TRACE_ARGFFFI(c,o,b,i,j) +#define XFS_BMBT_TRACE_ARGI(c,i) \ + XFS_BTREE_TRACE_ARGI(c,i) +#define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \ + XFS_BTREE_TRACE_ARGIPK(c,i,(union xfs_btree_ptr)f,s) +#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \ + XFS_BTREE_TRACE_ARGIPR(c,i, \ + (union xfs_btree_ptr)f, (union xfs_btree_rec *)r) +#define XFS_BMBT_TRACE_ARGIK(c,i,k) \ + XFS_BTREE_TRACE_ARGIK(c,i,(union xfs_btree_key *)k) +#define XFS_BMBT_TRACE_CURSOR(c,s) \ + XFS_BTREE_TRACE_CURSOR(c,s) /* @@ -1485,7 +1270,8 @@ xfs_bmbt_split( xfs_bmbt_rec_t *rrp; /* right record pointer */ XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - XFS_BMBT_TRACE_ARGIFK(cur, level, *bnop, *startoff); + // disable until merged into common code +// XFS_BMBT_TRACE_ARGIFK(cur, level, *bnop, *startoff); args.tp = cur->bc_tp; args.mp = cur->bc_mp; lbp = cur->bc_bufs[level]; @@ -2629,8 +2415,100 @@ xfs_bmbt_dup_cursor( return new; } +#ifdef XFS_BTREE_TRACE +ktrace_t *xfs_bmbt_trace_buf; + +STATIC void +xfs_bmbt_trace_enter( + struct xfs_btree_cur *cur, + const char *func, + char *s, + int type, + int line, + __psunsigned_t a0, + __psunsigned_t a1, + __psunsigned_t a2, + __psunsigned_t a3, + __psunsigned_t a4, + __psunsigned_t a5, + __psunsigned_t a6, + __psunsigned_t a7, + __psunsigned_t a8, + __psunsigned_t a9, + __psunsigned_t a10) +{ + struct xfs_inode *ip = cur->bc_private.b.ip; + int whichfork = cur->bc_private.b.whichfork; + + ktrace_enter(xfs_bmbt_trace_buf, + (void *)((__psint_t)type | (whichfork << 8) | (line << 16)), + (void *)func, (void *)s, (void *)ip, (void *)cur, + (void *)a0, (void *)a1, (void *)a2, (void *)a3, + (void *)a4, (void *)a5, (void *)a6, (void *)a7, + (void *)a8, (void *)a9, (void *)a10); + ktrace_enter(ip->i_btrace, + (void *)((__psint_t)type | (whichfork << 8) | (line << 16)), + (void *)func, (void *)s, (void *)ip, (void *)cur, + (void *)a0, (void *)a1, (void *)a2, (void *)a3, + (void *)a4, (void *)a5, (void *)a6, (void *)a7, + (void *)a8, (void *)a9, (void *)a10); +} + +STATIC void +xfs_bmbt_trace_cursor( + struct xfs_btree_cur *cur, + __uint32_t *s0, + __uint64_t *l0, + __uint64_t *l1) +{ + struct xfs_bmbt_rec_host r; + + xfs_bmbt_set_all(&r, &cur->bc_rec.b); + + *s0 = (cur->bc_nlevels << 24) | + (cur->bc_private.b.flags << 16) | + cur->bc_private.b.allocated; + *l0 = r.l0; + *l1 = r.l1; +} + +STATIC void +xfs_bmbt_trace_key( + struct xfs_btree_cur *cur, + union xfs_btree_key *key, + __uint64_t *l0, + __uint64_t *l1) +{ + *l0 = be64_to_cpu(key->bmbt.br_startoff); + *l1 = 0; +} + +STATIC void +xfs_bmbt_trace_record( + struct xfs_btree_cur *cur, + union xfs_btree_rec *rec, + __uint64_t *l0, + __uint64_t *l1, + __uint64_t *l2) +{ + struct xfs_bmbt_irec irec; + + xfs_bmbt_disk_get_all(&rec->bmbt, &irec); + *l0 = irec.br_startoff; + *l1 = irec.br_startblock; + *l2 = irec.br_blockcount; +} +#endif /* XFS_BTREE_TRACE */ + static const struct xfs_btree_ops xfs_bmbt_ops = { .dup_cursor = xfs_bmbt_dup_cursor, + +#ifdef XFS_BTREE_TRACE + .trace_enter = xfs_bmbt_trace_enter, + .trace_cursor = xfs_bmbt_trace_cursor, + .trace_key = xfs_bmbt_trace_key, + .trace_record = xfs_bmbt_trace_record, +#endif }; /* diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 4f12fff5497..5628d89cea4 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -233,24 +233,6 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; #ifdef __KERNEL__ -#if defined(XFS_BMBT_TRACE) -/* - * Trace buffer entry types. - */ -#define XFS_BMBT_KTRACE_ARGBI 1 -#define XFS_BMBT_KTRACE_ARGBII 2 -#define XFS_BMBT_KTRACE_ARGFFFI 3 -#define XFS_BMBT_KTRACE_ARGI 4 -#define XFS_BMBT_KTRACE_ARGIFK 5 -#define XFS_BMBT_KTRACE_ARGIFR 6 -#define XFS_BMBT_KTRACE_ARGIK 7 -#define XFS_BMBT_KTRACE_CUR 8 - -#define XFS_BMBT_TRACE_SIZE 4096 /* size of global trace buffer */ -#define XFS_BMBT_KTRACE_SIZE 32 /* size of per-inode trace buffer */ -extern ktrace_t *xfs_bmbt_trace_buf; -#endif - /* * Prototypes for xfs_bmap.c to call. */ diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 57d3bd37526..0647a0eff0d 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -182,6 +182,25 @@ do { \ struct xfs_btree_ops { /* cursor operations */ struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *); + + /* btree tracing */ +#ifdef XFS_BTREE_TRACE + void (*trace_enter)(struct xfs_btree_cur *, const char *, + char *, int, int, __psunsigned_t, + __psunsigned_t, __psunsigned_t, + __psunsigned_t, __psunsigned_t, + __psunsigned_t, __psunsigned_t, + __psunsigned_t, __psunsigned_t, + __psunsigned_t, __psunsigned_t); + void (*trace_cursor)(struct xfs_btree_cur *, __uint32_t *, + __uint64_t *, __uint64_t *); + void (*trace_key)(struct xfs_btree_cur *, + union xfs_btree_key *, __uint64_t *, + __uint64_t *); + void (*trace_record)(struct xfs_btree_cur *, + union xfs_btree_rec *, __uint64_t *, + __uint64_t *, __uint64_t *); +#endif }; /* diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 8c0c4748a8d..fc99524b17a 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -2085,8 +2085,81 @@ xfs_inobt_dup_cursor( cur->bc_private.a.agbp, cur->bc_private.a.agno); } +#ifdef XFS_BTREE_TRACE +ktrace_t *xfs_inobt_trace_buf; + +STATIC void +xfs_inobt_trace_enter( + struct xfs_btree_cur *cur, + const char *func, + char *s, + int type, + int line, + __psunsigned_t a0, + __psunsigned_t a1, + __psunsigned_t a2, + __psunsigned_t a3, + __psunsigned_t a4, + __psunsigned_t a5, + __psunsigned_t a6, + __psunsigned_t a7, + __psunsigned_t a8, + __psunsigned_t a9, + __psunsigned_t a10) +{ + ktrace_enter(xfs_inobt_trace_buf, (void *)(__psint_t)type, + (void *)func, (void *)s, NULL, (void *)cur, + (void *)a0, (void *)a1, (void *)a2, (void *)a3, + (void *)a4, (void *)a5, (void *)a6, (void *)a7, + (void *)a8, (void *)a9, (void *)a10); +} + +STATIC void +xfs_inobt_trace_cursor( + struct xfs_btree_cur *cur, + __uint32_t *s0, + __uint64_t *l0, + __uint64_t *l1) +{ + *s0 = cur->bc_private.a.agno; + *l0 = cur->bc_rec.i.ir_startino; + *l1 = cur->bc_rec.i.ir_free; +} + +STATIC void +xfs_inobt_trace_key( + struct xfs_btree_cur *cur, + union xfs_btree_key *key, + __uint64_t *l0, + __uint64_t *l1) +{ + *l0 = be32_to_cpu(key->inobt.ir_startino); + *l1 = 0; +} + +STATIC void +xfs_inobt_trace_record( + struct xfs_btree_cur *cur, + union xfs_btree_rec *rec, + __uint64_t *l0, + __uint64_t *l1, + __uint64_t *l2) +{ + *l0 = be32_to_cpu(rec->inobt.ir_startino); + *l1 = be32_to_cpu(rec->inobt.ir_freecount); + *l2 = be64_to_cpu(rec->inobt.ir_free); +} +#endif /* XFS_BTREE_TRACE */ + static const struct xfs_btree_ops xfs_inobt_ops = { .dup_cursor = xfs_inobt_dup_cursor, + +#ifdef XFS_BTREE_TRACE + .trace_enter = xfs_inobt_trace_enter, + .trace_cursor = xfs_inobt_trace_cursor, + .trace_key = xfs_inobt_trace_key, + .trace_record = xfs_inobt_trace_record, +#endif }; /* diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 2a158a26286..cc0474ddd2d 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -41,6 +41,7 @@ #include "xfs_buf_item.h" #include "xfs_inode_item.h" #include "xfs_btree.h" +#include "xfs_btree_trace.h" #include "xfs_alloc.h" #include "xfs_ialloc.h" #include "xfs_bmap.h" @@ -835,7 +836,7 @@ xfs_inode_alloc( #ifdef XFS_BMAP_TRACE ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS); #endif -#ifdef XFS_BMBT_TRACE +#ifdef XFS_BTREE_TRACE ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS); #endif #ifdef XFS_RW_TRACE @@ -2673,7 +2674,7 @@ xfs_idestroy( #ifdef XFS_BMAP_TRACE ktrace_free(ip->i_xtrace); #endif -#ifdef XFS_BMBT_TRACE +#ifdef XFS_BTREE_TRACE ktrace_free(ip->i_btrace); #endif #ifdef XFS_RW_TRACE diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 3af1f6dd149..2a69a7dee22 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -245,7 +245,7 @@ typedef struct xfs_inode { #ifdef XFS_BMAP_TRACE struct ktrace *i_xtrace; /* inode extent list trace */ #endif -#ifdef XFS_BMBT_TRACE +#ifdef XFS_BTREE_TRACE struct ktrace *i_btrace; /* inode bmap btree trace */ #endif #ifdef XFS_RW_TRACE -- cgit v1.2.3-70-g09d2 From ce5e42db421a41b1ad0cfd68c6058566b963e14b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:55:23 +1100 Subject: [XFS] add get_maxrecs btree operation Factor xfs_btree_maxrecs into a per-btree operation. The get_maxrecs method is based on a patch from Dave Chinner. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32188a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc_btree.c | 9 +++++++++ fs/xfs/xfs_bmap_btree.c | 9 +++++++++ fs/xfs/xfs_btree.c | 29 ++--------------------------- fs/xfs/xfs_btree.h | 3 +++ fs/xfs/xfs_ialloc_btree.c | 9 +++++++++ 5 files changed, 32 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 9c91dfcb6f2..1f268b6f436 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -2219,6 +2219,14 @@ xfs_allocbt_dup_cursor( cur->bc_btnum); } +STATIC int +xfs_allocbt_get_maxrecs( + struct xfs_btree_cur *cur, + int level) +{ + return cur->bc_mp->m_alloc_mxr[level != 0]; +} + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_allocbt_trace_buf; @@ -2287,6 +2295,7 @@ xfs_allocbt_trace_record( static const struct xfs_btree_ops xfs_allocbt_ops = { .dup_cursor = xfs_allocbt_dup_cursor, + .get_maxrecs = xfs_allocbt_get_maxrecs, #ifdef XFS_BTREE_TRACE .trace_enter = xfs_allocbt_trace_enter, diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 16f2fde6433..bdcfbea1e06 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -2415,6 +2415,14 @@ xfs_bmbt_dup_cursor( return new; } +STATIC int +xfs_bmbt_get_maxrecs( + struct xfs_btree_cur *cur, + int level) +{ + return XFS_BMAP_BLOCK_IMAXRECS(level, cur); +} + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_bmbt_trace_buf; @@ -2502,6 +2510,7 @@ xfs_bmbt_trace_record( static const struct xfs_btree_ops xfs_bmbt_ops = { .dup_cursor = xfs_bmbt_dup_cursor, + .get_maxrecs = xfs_bmbt_get_maxrecs, #ifdef XFS_BTREE_TRACE .trace_enter = xfs_bmbt_trace_enter, diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 966d58d50fa..893e86f2ad5 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -50,31 +50,6 @@ const __uint32_t xfs_magics[XFS_BTNUM_MAX] = { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC }; -/* - * Checking routine: return maxrecs for the block. - */ -STATIC int /* number of records fitting in block */ -xfs_btree_maxrecs( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_btree_block_t *block) /* generic btree block pointer */ -{ - switch (cur->bc_btnum) { - case XFS_BTNUM_BNO: - case XFS_BTNUM_CNT: - return (int)XFS_ALLOC_BLOCK_MAXRECS( - be16_to_cpu(block->bb_level), cur); - case XFS_BTNUM_BMAP: - return (int)XFS_BMAP_BLOCK_IMAXRECS( - be16_to_cpu(block->bb_level), cur); - case XFS_BTNUM_INO: - return (int)XFS_INOBT_BLOCK_MAXRECS( - be16_to_cpu(block->bb_level), cur); - default: - ASSERT(0); - return 0; - } -} - /* * External routines. */ @@ -207,7 +182,7 @@ xfs_btree_check_lblock( be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] && be16_to_cpu(block->bb_level) == level && be16_to_cpu(block->bb_numrecs) <= - xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) && + cur->bc_ops->get_maxrecs(cur, level) && block->bb_leftsib && (be64_to_cpu(block->bb_leftsib) == NULLDFSBNO || XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_leftsib))) && @@ -245,7 +220,7 @@ xfs_btree_check_sblock( be32_to_cpu(block->bb_magic) == xfs_magics[cur->bc_btnum] && be16_to_cpu(block->bb_level) == level && be16_to_cpu(block->bb_numrecs) <= - xfs_btree_maxrecs(cur, (xfs_btree_block_t *)block) && + cur->bc_ops->get_maxrecs(cur, level) && (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK || be32_to_cpu(block->bb_leftsib) < agflen) && block->bb_leftsib && diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 0647a0eff0d..5398cd0d4d4 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -183,6 +183,9 @@ struct xfs_btree_ops { /* cursor operations */ struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *); + /* records in block/level */ + int (*get_maxrecs)(struct xfs_btree_cur *cur, int level); + /* btree tracing */ #ifdef XFS_BTREE_TRACE void (*trace_enter)(struct xfs_btree_cur *, const char *, diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index fc99524b17a..18867f1aaca 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -2085,6 +2085,14 @@ xfs_inobt_dup_cursor( cur->bc_private.a.agbp, cur->bc_private.a.agno); } +STATIC int +xfs_inobt_get_maxrecs( + struct xfs_btree_cur *cur, + int level) +{ + return cur->bc_mp->m_inobt_mxr[level != 0]; +} + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_inobt_trace_buf; @@ -2153,6 +2161,7 @@ xfs_inobt_trace_record( static const struct xfs_btree_ops xfs_inobt_ops = { .dup_cursor = xfs_inobt_dup_cursor, + .get_maxrecs = xfs_inobt_get_maxrecs, #ifdef XFS_BTREE_TRACE .trace_enter = xfs_inobt_trace_enter, -- cgit v1.2.3-70-g09d2 From 65f1eaeac0efc968797f3ac955b85ba3f5d4f9c8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:55:34 +1100 Subject: [XFS] add helpers for addressing entities inside a btree block Add new helpers in xfs_btree.c to find the record, key and block pointer entries inside a btree block. To implement this genericly the ->get_maxrecs methods and two new xfs_btree_ops entries for the key and record sizes are used. Also add a big comment describing how the addressing inside a btree block works. Note that these helpers are unused until users are introduced in the next patches and this patch will thus cause some harmless compiler warnings. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32189a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc_btree.c | 3 ++ fs/xfs/xfs_bmap_btree.c | 3 ++ fs/xfs/xfs_btree.c | 130 ++++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 13 +++++ fs/xfs/xfs_ialloc_btree.c | 3 ++ 5 files changed, 152 insertions(+) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 1f268b6f436..9e2421c31a3 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -2294,6 +2294,9 @@ xfs_allocbt_trace_record( #endif /* XFS_BTREE_TRACE */ static const struct xfs_btree_ops xfs_allocbt_ops = { + .rec_len = sizeof(xfs_alloc_rec_t), + .key_len = sizeof(xfs_alloc_key_t), + .dup_cursor = xfs_allocbt_dup_cursor, .get_maxrecs = xfs_allocbt_get_maxrecs, diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index bdcfbea1e06..a71010abf6e 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -2509,6 +2509,9 @@ xfs_bmbt_trace_record( #endif /* XFS_BTREE_TRACE */ static const struct xfs_btree_ops xfs_bmbt_ops = { + .rec_len = sizeof(xfs_bmbt_rec_t), + .key_len = sizeof(xfs_bmbt_key_t), + .dup_cursor = xfs_bmbt_dup_cursor, .get_maxrecs = xfs_bmbt_get_maxrecs, diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 893e86f2ad5..4aec7c7d5ba 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -402,6 +402,136 @@ xfs_btree_dup_cursor( return 0; } +/* + * XFS btree block layout and addressing: + * + * There are two types of blocks in the btree: leaf and non-leaf blocks. + * + * The leaf record start with a header then followed by records containing + * the values. A non-leaf block also starts with the same header, and + * then first contains lookup keys followed by an equal number of pointers + * to the btree blocks at the previous level. + * + * +--------+-------+-------+-------+-------+-------+-------+ + * Leaf: | header | rec 1 | rec 2 | rec 3 | rec 4 | rec 5 | rec N | + * +--------+-------+-------+-------+-------+-------+-------+ + * + * +--------+-------+-------+-------+-------+-------+-------+ + * Non-Leaf: | header | key 1 | key 2 | key N | ptr 1 | ptr 2 | ptr N | + * +--------+-------+-------+-------+-------+-------+-------+ + * + * The header is called struct xfs_btree_block for reasons better left unknown + * and comes in different versions for short (32bit) and long (64bit) block + * pointers. The record and key structures are defined by the btree instances + * and opaque to the btree core. The block pointers are simple disk endian + * integers, available in a short (32bit) and long (64bit) variant. + * + * The helpers below calculate the offset of a given record, key or pointer + * into a btree block (xfs_btree_*_offset) or return a pointer to the given + * record, key or pointer (xfs_btree_*_addr). Note that all addressing + * inside the btree block is done using indices starting at one, not zero! + */ + +/* + * Return size of the btree block header for this btree instance. + */ +static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur) +{ + return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ? + sizeof(struct xfs_btree_lblock) : + sizeof(struct xfs_btree_sblock); +} + +/* + * Return size of btree block pointers for this btree instance. + */ +static inline size_t xfs_btree_ptr_len(struct xfs_btree_cur *cur) +{ + return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ? + sizeof(__be64) : sizeof(__be32); +} + +/* + * Calculate offset of the n-th record in a btree block. + */ +STATIC size_t +xfs_btree_rec_offset( + struct xfs_btree_cur *cur, + int n) +{ + return xfs_btree_block_len(cur) + + (n - 1) * cur->bc_ops->rec_len; +} + +/* + * Calculate offset of the n-th key in a btree block. + */ +STATIC size_t +xfs_btree_key_offset( + struct xfs_btree_cur *cur, + int n) +{ + return xfs_btree_block_len(cur) + + (n - 1) * cur->bc_ops->key_len; +} + +/* + * Calculate offset of the n-th block pointer in a btree block. + */ +STATIC size_t +xfs_btree_ptr_offset( + struct xfs_btree_cur *cur, + int n, + int level) +{ + return xfs_btree_block_len(cur) + + cur->bc_ops->get_maxrecs(cur, level) * cur->bc_ops->key_len + + (n - 1) * xfs_btree_ptr_len(cur); +} + +/* + * Return a pointer to the n-th record in the btree block. + */ +STATIC union xfs_btree_rec * +xfs_btree_rec_addr( + struct xfs_btree_cur *cur, + int n, + struct xfs_btree_block *block) +{ + return (union xfs_btree_rec *) + ((char *)block + xfs_btree_rec_offset(cur, n)); +} + +/* + * Return a pointer to the n-th key in the btree block. + */ +STATIC union xfs_btree_key * +xfs_btree_key_addr( + struct xfs_btree_cur *cur, + int n, + struct xfs_btree_block *block) +{ + return (union xfs_btree_key *) + ((char *)block + xfs_btree_key_offset(cur, n)); +} + +/* + * Return a pointer to the n-th block pointer in the btree block. + */ +STATIC union xfs_btree_ptr * +xfs_btree_ptr_addr( + struct xfs_btree_cur *cur, + int n, + struct xfs_btree_block *block) +{ + int level = xfs_btree_get_level(block); + + ASSERT(block->bb_level != 0); + + return (union xfs_btree_ptr *) + ((char *)block + xfs_btree_ptr_offset(cur, n, level)); +} + /* * Get a the root block which is stored in the inode. * diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 5398cd0d4d4..593f82b01b6 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -180,6 +180,10 @@ do { \ #define XFS_BTREE_MAXLEVELS 8 /* max of all btrees */ struct xfs_btree_ops { + /* size of the key and record structures */ + size_t key_len; + size_t rec_len; + /* cursor operations */ struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *); @@ -497,6 +501,15 @@ xfs_btree_setbuf( int lev, /* level in btree */ struct xfs_buf *bp); /* new buffer to set */ + +/* + * Helpers. + */ +static inline int xfs_btree_get_level(struct xfs_btree_block *block) +{ + return be16_to_cpu(block->bb_level); +} + #endif /* __KERNEL__ */ diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 18867f1aaca..fc6db94492d 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -2160,6 +2160,9 @@ xfs_inobt_trace_record( #endif /* XFS_BTREE_TRACE */ static const struct xfs_btree_ops xfs_inobt_ops = { + .rec_len = sizeof(xfs_inobt_rec_t), + .key_len = sizeof(xfs_inobt_key_t), + .dup_cursor = xfs_inobt_dup_cursor, .get_maxrecs = xfs_inobt_get_maxrecs, -- cgit v1.2.3-70-g09d2 From 637aa50f461b8ea6b1e8bf9877b0d13d00085043 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:55:45 +1100 Subject: [XFS] implement generic xfs_btree_increment From: Dave Chinner Because this is the first major generic btree routine this patch includes some infrastrucure, first a few routines to deal with a btree block that can be either in short or long form, second xfs_btree_read_buf_block, which is the new central routine to read a btree block given a cursor, and third the new xfs_btree_ptr_addr routine to calculate the address for a given btree pointer record. [hch: split out from bigger patch and minor adaptions] SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32190a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc.c | 12 +-- fs/xfs/xfs_alloc_btree.c | 99 +-------------------- fs/xfs/xfs_alloc_btree.h | 6 -- fs/xfs/xfs_bmap.c | 4 +- fs/xfs/xfs_bmap_btree.c | 88 +----------------- fs/xfs/xfs_bmap_btree.h | 1 - fs/xfs/xfs_btree.c | 222 ++++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 10 +++ fs/xfs/xfs_ialloc.c | 14 +-- fs/xfs/xfs_ialloc_btree.c | 99 +-------------------- fs/xfs/xfs_ialloc_btree.h | 6 -- fs/xfs/xfs_itable.c | 6 +- 12 files changed, 262 insertions(+), 305 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 69833eb1de4..b8bb694b7da 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -818,7 +818,7 @@ xfs_alloc_ag_vextent_near( XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if (ltlen >= args->minlen) break; - if ((error = xfs_alloc_increment(cnt_cur, 0, &i))) + if ((error = xfs_btree_increment(cnt_cur, 0, &i))) goto error0; } while (i); ASSERT(ltlen >= args->minlen); @@ -828,7 +828,7 @@ xfs_alloc_ag_vextent_near( i = cnt_cur->bc_ptrs[0]; for (j = 1, blen = 0, bdiff = 0; !error && j && (blen < args->maxlen || bdiff > 0); - error = xfs_alloc_increment(cnt_cur, 0, &j)) { + error = xfs_btree_increment(cnt_cur, 0, &j)) { /* * For each entry, decide if it's better than * the previous best entry. @@ -938,7 +938,7 @@ xfs_alloc_ag_vextent_near( * Increment the cursor, so we will point at the entry just right * of the leftward entry if any, or to the leftmost entry. */ - if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i))) + if ((error = xfs_btree_increment(bno_cur_gt, 0, &i))) goto error0; if (!i) { /* @@ -977,7 +977,7 @@ xfs_alloc_ag_vextent_near( args->minlen, >bnoa, >lena); if (gtlena >= args->minlen) break; - if ((error = xfs_alloc_increment(bno_cur_gt, 0, &i))) + if ((error = xfs_btree_increment(bno_cur_gt, 0, &i))) goto error0; if (!i) { xfs_btree_del_cursor(bno_cur_gt, @@ -1066,7 +1066,7 @@ xfs_alloc_ag_vextent_near( /* * Fell off the right end. */ - if ((error = xfs_alloc_increment( + if ((error = xfs_btree_increment( bno_cur_gt, 0, &i))) goto error0; if (!i) { @@ -1548,7 +1548,7 @@ xfs_free_ag_extent( * Look for a neighboring block on the right (higher block numbers) * that is contiguous with this space. */ - if ((error = xfs_alloc_increment(bno_cur, 0, &haveright))) + if ((error = xfs_btree_increment(bno_cur, 0, &haveright))) goto error0; if (haveright) { /* diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 9e2421c31a3..febc2d5ea29 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -303,7 +303,7 @@ xfs_alloc_delrec( */ i = xfs_btree_lastrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_alloc_increment(tcur, level, &i))) + if ((error = xfs_btree_increment(tcur, level, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); i = xfs_btree_lastrec(tcur, level); @@ -517,7 +517,7 @@ xfs_alloc_delrec( * us, increment the cursor at that level. */ else if (level + 1 < cur->bc_nlevels && - (error = xfs_alloc_increment(cur, level + 1, &i))) + (error = xfs_btree_increment(cur, level + 1, &i))) return error; /* * Fix up the number of records in the surviving block. @@ -1134,7 +1134,7 @@ xfs_alloc_lookup( int i; cur->bc_ptrs[0] = keyno; - if ((error = xfs_alloc_increment(cur, 0, &i))) + if ((error = xfs_btree_increment(cur, 0, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); *stat = 1; @@ -1570,7 +1570,7 @@ xfs_alloc_rshift( return error; i = xfs_btree_lastrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_alloc_increment(tcur, level, &i)) || + if ((error = xfs_btree_increment(tcur, level, &i)) || (error = xfs_alloc_updkey(tcur, rkp, level + 1))) goto error0; xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); @@ -1942,97 +1942,6 @@ xfs_alloc_get_rec( return 0; } -/* - * Increment cursor by one record at the level. - * For nonzero levels the leaf-ward information is untouched. - */ -int /* error */ -xfs_alloc_increment( - xfs_btree_cur_t *cur, /* btree cursor */ - int level, /* level in btree, 0 is leaf */ - int *stat) /* success/failure */ -{ - xfs_alloc_block_t *block; /* btree block */ - xfs_buf_t *bp; /* tree block buffer */ - int error; /* error return value */ - int lev; /* btree level */ - - ASSERT(level < cur->bc_nlevels); - /* - * Read-ahead to the right at this level. - */ - xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); - /* - * Get a pointer to the btree block. - */ - bp = cur->bc_bufs[level]; - block = XFS_BUF_TO_ALLOC_BLOCK(bp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, level, bp))) - return error; -#endif - /* - * Increment the ptr at this level. If we're still in the block - * then we're done. - */ - if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) { - *stat = 1; - return 0; - } - /* - * If we just went off the right edge of the tree, return failure. - */ - if (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK) { - *stat = 0; - return 0; - } - /* - * March up the tree incrementing pointers. - * Stop when we don't go off the right edge of a block. - */ - for (lev = level + 1; lev < cur->bc_nlevels; lev++) { - bp = cur->bc_bufs[lev]; - block = XFS_BUF_TO_ALLOC_BLOCK(bp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) - return error; -#endif - if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs)) - break; - /* - * Read-ahead the right block, we're going to read it - * in the next loop. - */ - xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); - } - /* - * If we went off the root then we are seriously confused. - */ - ASSERT(lev < cur->bc_nlevels); - /* - * Now walk back down the tree, fixing up the cursor's buffer - * pointers and key numbers. - */ - for (bp = cur->bc_bufs[lev], block = XFS_BUF_TO_ALLOC_BLOCK(bp); - lev > level; ) { - xfs_agblock_t agbno; /* block number of btree block */ - - agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); - if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, agbno, 0, &bp, - XFS_ALLOC_BTREE_REF))) - return error; - lev--; - xfs_btree_setbuf(cur, lev, bp); - block = XFS_BUF_TO_ALLOC_BLOCK(bp); - if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) - return error; - cur->bc_ptrs[lev] = 1; - } - *stat = 1; - return 0; -} - /* * Insert the current record at the point referenced by cur. * The cursor may be inconsistent on return if splits have been done. diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h index 60735384a4c..643cfabbf67 100644 --- a/fs/xfs/xfs_alloc_btree.h +++ b/fs/xfs/xfs_alloc_btree.h @@ -113,12 +113,6 @@ extern int xfs_alloc_delete(struct xfs_btree_cur *cur, int *stat); extern int xfs_alloc_get_rec(struct xfs_btree_cur *cur, xfs_agblock_t *bno, xfs_extlen_t *len, int *stat); -/* - * Increment cursor by one record at the level. - * For nonzero levels the leaf-ward information is untouched. - */ -extern int xfs_alloc_increment(struct xfs_btree_cur *cur, int level, int *stat); - /* * Insert the current record at the point referenced by cur. * The cursor may be inconsistent on return if splits have been done. diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index a84d0c30b48..4b1ec44c80a 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -1646,7 +1646,7 @@ xfs_bmap_add_extent_unwritten_real( PREV.br_blockcount - new->br_blockcount, oldext))) goto done; - if ((error = xfs_bmbt_increment(cur, 0, &i))) + if ((error = xfs_btree_increment(cur, 0, &i))) goto done; if ((error = xfs_bmbt_update(cur, new->br_startoff, new->br_startblock, @@ -3253,7 +3253,7 @@ xfs_bmap_del_extent( got.br_startblock, temp, got.br_state))) goto done; - if ((error = xfs_bmbt_increment(cur, 0, &i))) + if ((error = xfs_btree_increment(cur, 0, &i))) goto done; cur->bc_rec.b = new; error = xfs_bmbt_insert(cur, &i); diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index a71010abf6e..2d29a4980cf 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -254,7 +254,7 @@ xfs_bmbt_delrec( if (rbno != NULLFSBLOCK) { i = xfs_btree_lastrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_bmbt_increment(tcur, level, &i))) { + if ((error = xfs_btree_increment(tcur, level, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } @@ -445,7 +445,7 @@ xfs_bmbt_delrec( cur->bc_bufs[level] = lbp; cur->bc_ptrs[level] += lrecs; cur->bc_ra[level] = 0; - } else if ((error = xfs_bmbt_increment(cur, level + 1, &i))) { + } else if ((error = xfs_btree_increment(cur, level + 1, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } @@ -929,7 +929,7 @@ xfs_bmbt_lookup( if (dir == XFS_LOOKUP_GE && keyno > be16_to_cpu(block->bb_numrecs) && be64_to_cpu(block->bb_rightsib) != NULLDFSBNO) { cur->bc_ptrs[0] = keyno; - if ((error = xfs_bmbt_increment(cur, 0, &i))) { + if ((error = xfs_btree_increment(cur, 0, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; } @@ -1202,7 +1202,7 @@ xfs_bmbt_rshift( } i = xfs_btree_lastrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_bmbt_increment(tcur, level, &i))) { + if ((error = xfs_btree_increment(tcur, level, &i))) { XFS_BMBT_TRACE_CURSOR(tcur, ERROR); goto error1; } @@ -1760,86 +1760,6 @@ xfs_bmbt_disk_get_startoff( XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; } -/* - * Increment cursor by one record at the level. - * For nonzero levels the leaf-ward information is untouched. - */ -int /* error */ -xfs_bmbt_increment( - xfs_btree_cur_t *cur, - int level, - int *stat) /* success/failure */ -{ - xfs_bmbt_block_t *block; - xfs_buf_t *bp; - int error; /* error return value */ - xfs_fsblock_t fsbno; - int lev; - xfs_mount_t *mp; - xfs_trans_t *tp; - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - XFS_BMBT_TRACE_ARGI(cur, level); - ASSERT(level < cur->bc_nlevels); - - xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); - block = xfs_bmbt_get_block(cur, level, &bp); -#ifdef DEBUG - if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } -#endif - if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; - } - if (be64_to_cpu(block->bb_rightsib) == NULLDFSBNO) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - for (lev = level + 1; lev < cur->bc_nlevels; lev++) { - block = xfs_bmbt_get_block(cur, lev, &bp); -#ifdef DEBUG - if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } -#endif - if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs)) - break; - xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); - } - if (lev == cur->bc_nlevels) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - tp = cur->bc_tp; - mp = cur->bc_mp; - for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) { - fsbno = be64_to_cpu(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur)); - if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp, - XFS_BMAP_BTREE_REF))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - lev--; - xfs_btree_setbuf(cur, lev, bp); - block = XFS_BUF_TO_BMBT_BLOCK(bp); - if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - cur->bc_ptrs[lev] = 1; - } - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; -} - /* * Insert the current record at the point referenced by cur. * diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 5628d89cea4..a45be38d9a3 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -251,7 +251,6 @@ extern void xfs_bmbt_disk_get_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s); extern xfs_filblks_t xfs_bmbt_disk_get_blockcount(xfs_bmbt_rec_t *r); extern xfs_fileoff_t xfs_bmbt_disk_get_startoff(xfs_bmbt_rec_t *r); -extern int xfs_bmbt_increment(struct xfs_btree_cur *, int, int *); extern int xfs_bmbt_insert(struct xfs_btree_cur *, int *); extern void xfs_bmbt_log_block(struct xfs_btree_cur *, struct xfs_buf *, int); extern void xfs_bmbt_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int, diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 4aec7c7d5ba..e9ab86b7990 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -35,6 +35,7 @@ #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" +#include "xfs_btree_trace.h" #include "xfs_ialloc.h" #include "xfs_error.h" @@ -949,3 +950,224 @@ xfs_btree_setbuf( cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; } } + +STATIC int +xfs_btree_ptr_is_null( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr) +{ + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) + return be64_to_cpu(ptr->l) == NULLFSBLOCK; + else + return be32_to_cpu(ptr->s) == NULLAGBLOCK; +} + +/* + * Get/set/init sibling pointers + */ +STATIC void +xfs_btree_get_sibling( + struct xfs_btree_cur *cur, + struct xfs_btree_block *block, + union xfs_btree_ptr *ptr, + int lr) +{ + ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB); + + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { + if (lr == XFS_BB_RIGHTSIB) + ptr->l = block->bb_u.l.bb_rightsib; + else + ptr->l = block->bb_u.l.bb_leftsib; + } else { + if (lr == XFS_BB_RIGHTSIB) + ptr->s = block->bb_u.s.bb_rightsib; + else + ptr->s = block->bb_u.s.bb_leftsib; + } +} + +STATIC xfs_daddr_t +xfs_btree_ptr_to_daddr( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr) +{ + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { + ASSERT(be64_to_cpu(ptr->l) != NULLFSBLOCK); + + return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); + } else { + ASSERT(cur->bc_private.a.agno != NULLAGNUMBER); + ASSERT(be32_to_cpu(ptr->s) != NULLAGBLOCK); + + return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno, + be32_to_cpu(ptr->s)); + } +} + +STATIC void +xfs_btree_set_refs( + struct xfs_btree_cur *cur, + struct xfs_buf *bp) +{ + switch (cur->bc_btnum) { + case XFS_BTNUM_BNO: + case XFS_BTNUM_CNT: + XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_ALLOC_BTREE_REF); + break; + case XFS_BTNUM_INO: + XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_INOMAP, XFS_INO_BTREE_REF); + break; + case XFS_BTNUM_BMAP: + XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_BMAP_BTREE_REF); + break; + default: + ASSERT(0); + } +} + +/* + * Read in the buffer at the given ptr and return the buffer and + * the block pointer within the buffer. + */ +STATIC int +xfs_btree_read_buf_block( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr, + int level, + int flags, + struct xfs_btree_block **block, + struct xfs_buf **bpp) +{ + struct xfs_mount *mp = cur->bc_mp; + xfs_daddr_t d; + int error; + + /* need to sort out how callers deal with failures first */ + ASSERT(!(flags & XFS_BUF_TRYLOCK)); + + d = xfs_btree_ptr_to_daddr(cur, ptr); + error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d, + mp->m_bsize, flags, bpp); + if (error) + return error; + + ASSERT(*bpp != NULL); + ASSERT(!XFS_BUF_GETERROR(*bpp)); + + xfs_btree_set_refs(cur, *bpp); + *block = XFS_BUF_TO_BLOCK(*bpp); + + error = xfs_btree_check_block(cur, *block, level, *bpp); + if (error) + xfs_trans_brelse(cur->bc_tp, *bpp); + return error; +} + +/* + * Increment cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_btree_increment( + struct xfs_btree_cur *cur, + int level, + int *stat) /* success/failure */ +{ + struct xfs_btree_block *block; + union xfs_btree_ptr ptr; + struct xfs_buf *bp; + int error; /* error return value */ + int lev; + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_TRACE_ARGI(cur, level); + + ASSERT(level < cur->bc_nlevels); + + /* Read-ahead to the right at this level. */ + xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); + + /* Get a pointer to the btree block. */ + block = xfs_btree_get_block(cur, level, &bp); + +#ifdef DEBUG + error = xfs_btree_check_block(cur, block, level, bp); + if (error) + goto error0; +#endif + + /* We're done if we remain in the block after the increment. */ + if (++cur->bc_ptrs[level] <= xfs_btree_get_numrecs(block)) + goto out1; + + /* Fail if we just went off the right edge of the tree. */ + xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); + if (xfs_btree_ptr_is_null(cur, &ptr)) + goto out0; + + XFS_BTREE_STATS_INC(cur, increment); + + /* + * March up the tree incrementing pointers. + * Stop when we don't go off the right edge of a block. + */ + for (lev = level + 1; lev < cur->bc_nlevels; lev++) { + block = xfs_btree_get_block(cur, lev, &bp); + +#ifdef DEBUG + error = xfs_btree_check_block(cur, block, lev, bp); + if (error) + goto error0; +#endif + + if (++cur->bc_ptrs[lev] <= xfs_btree_get_numrecs(block)) + break; + + /* Read-ahead the right block for the next loop. */ + xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); + } + + /* + * If we went off the root then we are either seriously + * confused or have the tree root in an inode. + */ + if (lev == cur->bc_nlevels) { + if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) + goto out0; + ASSERT(0); + error = EFSCORRUPTED; + goto error0; + } + ASSERT(lev < cur->bc_nlevels); + + /* + * Now walk back down the tree, fixing up the cursor's buffer + * pointers and key numbers. + */ + for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { + union xfs_btree_ptr *ptrp; + + ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block); + error = xfs_btree_read_buf_block(cur, ptrp, --lev, + 0, &block, &bp); + if (error) + goto error0; + + xfs_btree_setbuf(cur, lev, bp); + cur->bc_ptrs[lev] = 1; + } +out1: + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 1; + return 0; + +out0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 0; + return 0; + +error0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; +} diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 593f82b01b6..f5a4b8ec4cd 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -502,9 +502,19 @@ xfs_btree_setbuf( struct xfs_buf *bp); /* new buffer to set */ +/* + * Common btree core entry points. + */ +int xfs_btree_increment(struct xfs_btree_cur *, int, int *); + /* * Helpers. */ +static inline int xfs_btree_get_numrecs(struct xfs_btree_block *block) +{ + return be16_to_cpu(block->bb_numrecs); +} + static inline int xfs_btree_get_level(struct xfs_btree_block *block) { return be16_to_cpu(block->bb_level); diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 11bb169561c..3a8f0670e07 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -695,7 +695,7 @@ nextag: goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); freecount += rec.ir_freecount; - if ((error = xfs_inobt_increment(cur, 0, &i))) + if ((error = xfs_btree_increment(cur, 0, &i))) goto error0; } while (i == 1); @@ -753,7 +753,7 @@ nextag: /* * Search right with cur, go forward 1 record. */ - if ((error = xfs_inobt_increment(cur, 0, &i))) + if ((error = xfs_btree_increment(cur, 0, &i))) goto error1; doneright = !i; if (!doneright) { @@ -835,7 +835,7 @@ nextag: * further right. */ else { - if ((error = xfs_inobt_increment(cur, 0, + if ((error = xfs_btree_increment(cur, 0, &i))) goto error1; doneright = !i; @@ -890,7 +890,7 @@ nextag: XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if (rec.ir_freecount > 0) break; - if ((error = xfs_inobt_increment(cur, 0, &i))) + if ((error = xfs_btree_increment(cur, 0, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); } @@ -924,7 +924,7 @@ nextag: goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); freecount += rec.ir_freecount; - if ((error = xfs_inobt_increment(cur, 0, &i))) + if ((error = xfs_btree_increment(cur, 0, &i))) goto error0; } while (i == 1); ASSERT(freecount == be32_to_cpu(agi->agi_freecount) || @@ -1033,7 +1033,7 @@ xfs_difree( goto error0; if (i) { freecount += rec.ir_freecount; - if ((error = xfs_inobt_increment(cur, 0, &i))) + if ((error = xfs_btree_increment(cur, 0, &i))) goto error0; } } while (i == 1); @@ -1138,7 +1138,7 @@ xfs_difree( goto error0; if (i) { freecount += rec.ir_freecount; - if ((error = xfs_inobt_increment(cur, 0, &i))) + if ((error = xfs_btree_increment(cur, 0, &i))) goto error0; } } while (i == 1); diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index fc6db94492d..41717da6369 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -253,7 +253,7 @@ xfs_inobt_delrec( */ i = xfs_btree_lastrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_inobt_increment(tcur, level, &i))) + if ((error = xfs_btree_increment(tcur, level, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); i = xfs_btree_lastrec(tcur, level); @@ -463,7 +463,7 @@ xfs_inobt_delrec( * us, increment the cursor at that level. */ else if (level + 1 < cur->bc_nlevels && - (error = xfs_alloc_increment(cur, level + 1, &i))) + (error = xfs_btree_increment(cur, level + 1, &i))) return error; /* * Fix up the number of records in the surviving block. @@ -1014,7 +1014,7 @@ xfs_inobt_lookup( int i; cur->bc_ptrs[0] = keyno; - if ((error = xfs_inobt_increment(cur, 0, &i))) + if ((error = xfs_btree_increment(cur, 0, &i))) return error; ASSERT(i == 1); *stat = 1; @@ -1443,7 +1443,7 @@ xfs_inobt_rshift( if ((error = xfs_btree_dup_cursor(cur, &tcur))) return error; xfs_btree_lastrec(tcur, level); - if ((error = xfs_inobt_increment(tcur, level, &i)) || + if ((error = xfs_btree_increment(tcur, level, &i)) || (error = xfs_inobt_updkey(tcur, rkp, level + 1))) { xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); return error; @@ -1820,97 +1820,6 @@ xfs_inobt_get_rec( return 0; } -/* - * Increment cursor by one record at the level. - * For nonzero levels the leaf-ward information is untouched. - */ -int /* error */ -xfs_inobt_increment( - xfs_btree_cur_t *cur, /* btree cursor */ - int level, /* level in btree, 0 is leaf */ - int *stat) /* success/failure */ -{ - xfs_inobt_block_t *block; /* btree block */ - xfs_buf_t *bp; /* buffer containing btree block */ - int error; /* error return value */ - int lev; /* btree level */ - - ASSERT(level < cur->bc_nlevels); - /* - * Read-ahead to the right at this level. - */ - xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA); - /* - * Get a pointer to the btree block. - */ - bp = cur->bc_bufs[level]; - block = XFS_BUF_TO_INOBT_BLOCK(bp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, level, bp))) - return error; -#endif - /* - * Increment the ptr at this level. If we're still in the block - * then we're done. - */ - if (++cur->bc_ptrs[level] <= be16_to_cpu(block->bb_numrecs)) { - *stat = 1; - return 0; - } - /* - * If we just went off the right edge of the tree, return failure. - */ - if (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK) { - *stat = 0; - return 0; - } - /* - * March up the tree incrementing pointers. - * Stop when we don't go off the right edge of a block. - */ - for (lev = level + 1; lev < cur->bc_nlevels; lev++) { - bp = cur->bc_bufs[lev]; - block = XFS_BUF_TO_INOBT_BLOCK(bp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) - return error; -#endif - if (++cur->bc_ptrs[lev] <= be16_to_cpu(block->bb_numrecs)) - break; - /* - * Read-ahead the right block, we're going to read it - * in the next loop. - */ - xfs_btree_readahead(cur, lev, XFS_BTCUR_RIGHTRA); - } - /* - * If we went off the root then we are seriously confused. - */ - ASSERT(lev < cur->bc_nlevels); - /* - * Now walk back down the tree, fixing up the cursor's buffer - * pointers and key numbers. - */ - for (bp = cur->bc_bufs[lev], block = XFS_BUF_TO_INOBT_BLOCK(bp); - lev > level; ) { - xfs_agblock_t agbno; /* block number of btree block */ - - agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); - if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, agbno, 0, &bp, - XFS_INO_BTREE_REF))) - return error; - lev--; - xfs_btree_setbuf(cur, lev, bp); - block = XFS_BUF_TO_INOBT_BLOCK(bp); - if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) - return error; - cur->bc_ptrs[lev] = 1; - } - *stat = 1; - return 0; -} - /* * Insert the current record at the point referenced by cur. * The cursor may be inconsistent on return if splits have been done. diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h index eea409349eb..07fed62bcb7 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/xfs_ialloc_btree.h @@ -135,12 +135,6 @@ extern int xfs_inobt_delete(struct xfs_btree_cur *cur, int *stat); extern int xfs_inobt_get_rec(struct xfs_btree_cur *cur, xfs_agino_t *ino, __int32_t *fcnt, xfs_inofree_t *free, int *stat); -/* - * Increment cursor by one record at the level. - * For nonzero levels the leaf-ward information is untouched. - */ -extern int xfs_inobt_increment(struct xfs_btree_cur *cur, int level, int *stat); - /* * Insert the current record at the point referenced by cur. * The cursor may be inconsistent on return if splits have been done. diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index a5f02f0e4c2..42a214b8df9 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -471,7 +471,7 @@ xfs_bulkstat( * In any case, increment to the next record. */ if (!error) - error = xfs_inobt_increment(cur, 0, &tmp); + error = xfs_btree_increment(cur, 0, &tmp); } else { /* * Start of ag. Lookup the first inode chunk. @@ -538,7 +538,7 @@ xfs_bulkstat( * Set agino to after this chunk and bump the cursor. */ agino = gino + XFS_INODES_PER_CHUNK; - error = xfs_inobt_increment(cur, 0, &tmp); + error = xfs_btree_increment(cur, 0, &tmp); cond_resched(); } /* @@ -885,7 +885,7 @@ xfs_inumbers( bufidx = 0; } if (left) { - error = xfs_inobt_increment(cur, 0, &tmp); + error = xfs_btree_increment(cur, 0, &tmp); if (error) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); cur = NULL; -- cgit v1.2.3-70-g09d2 From 8df4da4a0a642d3a016028c0d922bcb4d5a4a6d7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:55:58 +1100 Subject: [XFS] implement generic xfs_btree_decrement From: Dave Chinner [hch: split out from bigger patch and minor adaptions] SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32191a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc.c | 12 +++--- fs/xfs/xfs_alloc_btree.c | 98 ++++------------------------------------------ fs/xfs/xfs_alloc_btree.h | 6 --- fs/xfs/xfs_bmap.c | 14 +++---- fs/xfs/xfs_bmap_btree.c | 90 ++++-------------------------------------- fs/xfs/xfs_bmap_btree.h | 1 - fs/xfs/xfs_btree.c | 99 +++++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 1 + fs/xfs/xfs_ialloc.c | 4 +- fs/xfs/xfs_ialloc_btree.c | 98 ++++------------------------------------------ fs/xfs/xfs_ialloc_btree.h | 6 --- 11 files changed, 137 insertions(+), 292 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index b8bb694b7da..7ca6903e235 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -961,7 +961,7 @@ xfs_alloc_ag_vextent_near( args->minlen, <bnoa, <lena); if (ltlena >= args->minlen) break; - if ((error = xfs_alloc_decrement(bno_cur_lt, 0, &i))) + if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i))) goto error0; if (!i) { xfs_btree_del_cursor(bno_cur_lt, @@ -1162,7 +1162,7 @@ xfs_alloc_ag_vextent_near( /* * Fell off the left end. */ - if ((error = xfs_alloc_decrement( + if ((error = xfs_btree_decrement( bno_cur_lt, 0, &i))) goto error0; if (!i) { @@ -1321,7 +1321,7 @@ xfs_alloc_ag_vextent_size( bestflen = flen; bestfbno = fbno; for (;;) { - if ((error = xfs_alloc_decrement(cnt_cur, 0, &i))) + if ((error = xfs_btree_decrement(cnt_cur, 0, &i))) goto error0; if (i == 0) break; @@ -1416,7 +1416,7 @@ xfs_alloc_ag_vextent_small( xfs_extlen_t flen; int i; - if ((error = xfs_alloc_decrement(ccur, 0, &i))) + if ((error = xfs_btree_decrement(ccur, 0, &i))) goto error0; if (i) { if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i))) @@ -1607,7 +1607,7 @@ xfs_free_ag_extent( /* * Move the by-block cursor back to the left neighbor. */ - if ((error = xfs_alloc_decrement(bno_cur, 0, &i))) + if ((error = xfs_btree_decrement(bno_cur, 0, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); #ifdef DEBUG @@ -1653,7 +1653,7 @@ xfs_free_ag_extent( * Back up the by-block cursor to the left neighbor, and * update its length. */ - if ((error = xfs_alloc_decrement(bno_cur, 0, &i))) + if ((error = xfs_btree_decrement(bno_cur, 0, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); nbno = ltbno; diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index febc2d5ea29..6b45481ad5b 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -256,7 +256,7 @@ xfs_alloc_delrec( xfs_btree_setbuf(cur, level, NULL); cur->bc_nlevels--; } else if (level > 0 && - (error = xfs_alloc_decrement(cur, level, &i))) + (error = xfs_btree_decrement(cur, level, &i))) return error; *stat = 1; return 0; @@ -272,7 +272,7 @@ xfs_alloc_delrec( * the minimum, we're done. */ if (numrecs >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { - if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) + if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) return error; *stat = 1; return 0; @@ -336,7 +336,7 @@ xfs_alloc_delrec( xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); if (level > 0 && - (error = xfs_alloc_decrement(cur, level, + (error = xfs_btree_decrement(cur, level, &i))) return error; *stat = 1; @@ -352,7 +352,7 @@ xfs_alloc_delrec( if (lbno != NULLAGBLOCK) { i = xfs_btree_firstrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_alloc_decrement(tcur, level, &i))) + if ((error = xfs_btree_decrement(tcur, level, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); } @@ -368,7 +368,7 @@ xfs_alloc_delrec( */ i = xfs_btree_firstrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_alloc_decrement(tcur, level, &i))) + if ((error = xfs_btree_decrement(tcur, level, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); xfs_btree_firstrec(tcur, level); @@ -468,7 +468,7 @@ xfs_alloc_delrec( * Just return. This is probably a logic error, but it's not fatal. */ else { - if (level > 0 && (error = xfs_alloc_decrement(cur, level, &i))) + if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) return error; *stat = 1; return 0; @@ -1779,90 +1779,6 @@ xfs_alloc_updkey( * Externally visible routines. */ -/* - * Decrement cursor by one record at the level. - * For nonzero levels the leaf-ward information is untouched. - */ -int /* error */ -xfs_alloc_decrement( - xfs_btree_cur_t *cur, /* btree cursor */ - int level, /* level in btree, 0 is leaf */ - int *stat) /* success/failure */ -{ - xfs_alloc_block_t *block; /* btree block */ - int error; /* error return value */ - int lev; /* btree level */ - - ASSERT(level < cur->bc_nlevels); - /* - * Read-ahead to the left at this level. - */ - xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); - /* - * Decrement the ptr at this level. If we're still in the block - * then we're done. - */ - if (--cur->bc_ptrs[level] > 0) { - *stat = 1; - return 0; - } - /* - * Get a pointer to the btree block. - */ - block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[level]); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, level, - cur->bc_bufs[level]))) - return error; -#endif - /* - * If we just went off the left edge of the tree, return failure. - */ - if (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK) { - *stat = 0; - return 0; - } - /* - * March up the tree decrementing pointers. - * Stop when we don't go off the left edge of a block. - */ - for (lev = level + 1; lev < cur->bc_nlevels; lev++) { - if (--cur->bc_ptrs[lev] > 0) - break; - /* - * Read-ahead the left block, we're going to read it - * in the next loop. - */ - xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); - } - /* - * If we went off the root then we are seriously confused. - */ - ASSERT(lev < cur->bc_nlevels); - /* - * Now walk back down the tree, fixing up the cursor's buffer - * pointers and key numbers. - */ - for (block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[lev]); lev > level; ) { - xfs_agblock_t agbno; /* block number of btree block */ - xfs_buf_t *bp; /* buffer pointer for block */ - - agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); - if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, agbno, 0, &bp, - XFS_ALLOC_BTREE_REF))) - return error; - lev--; - xfs_btree_setbuf(cur, lev, bp); - block = XFS_BUF_TO_ALLOC_BLOCK(bp); - if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) - return error; - cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs); - } - *stat = 1; - return 0; -} - /* * Delete the record pointed to by cur. * The cursor refers to the place where the record was (could be inserted) @@ -1889,7 +1805,7 @@ xfs_alloc_delete( if (i == 0) { for (level = 1; level < cur->bc_nlevels; level++) { if (cur->bc_ptrs[level] == 0) { - if ((error = xfs_alloc_decrement(cur, level, &i))) + if ((error = xfs_btree_decrement(cur, level, &i))) return error; break; } diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h index 643cfabbf67..b59d7fc78fe 100644 --- a/fs/xfs/xfs_alloc_btree.h +++ b/fs/xfs/xfs_alloc_btree.h @@ -94,12 +94,6 @@ typedef struct xfs_btree_sblock xfs_alloc_block_t; #define XFS_ALLOC_PTR_ADDR(bb,i,cur) \ XFS_BTREE_PTR_ADDR(xfs_alloc, bb, i, XFS_ALLOC_BLOCK_MAXRECS(1, cur)) -/* - * Decrement cursor by one record at the level. - * For nonzero levels the leaf-ward information is untouched. - */ -extern int xfs_alloc_decrement(struct xfs_btree_cur *cur, int level, int *stat); - /* * Delete the record pointed to by cur. * The cursor refers to the place where the record was (could be inserted) diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 4b1ec44c80a..bdbab54948f 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -820,7 +820,7 @@ xfs_bmap_add_extent_delay_real( if ((error = xfs_bmbt_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); - if ((error = xfs_bmbt_decrement(cur, 0, &i))) + if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, @@ -1381,13 +1381,13 @@ xfs_bmap_add_extent_unwritten_real( if ((error = xfs_bmbt_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); - if ((error = xfs_bmbt_decrement(cur, 0, &i))) + if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_bmbt_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); - if ((error = xfs_bmbt_decrement(cur, 0, &i))) + if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, @@ -1430,7 +1430,7 @@ xfs_bmap_add_extent_unwritten_real( if ((error = xfs_bmbt_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); - if ((error = xfs_bmbt_decrement(cur, 0, &i))) + if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, @@ -1473,7 +1473,7 @@ xfs_bmap_add_extent_unwritten_real( if ((error = xfs_bmbt_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); - if ((error = xfs_bmbt_decrement(cur, 0, &i))) + if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_bmbt_update(cur, new->br_startoff, @@ -1556,7 +1556,7 @@ xfs_bmap_add_extent_unwritten_real( PREV.br_blockcount - new->br_blockcount, oldext))) goto done; - if ((error = xfs_bmbt_decrement(cur, 0, &i))) + if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; if (xfs_bmbt_update(cur, LEFT.br_startoff, LEFT.br_startblock, @@ -2108,7 +2108,7 @@ xfs_bmap_add_extent_hole_real( if ((error = xfs_bmbt_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); - if ((error = xfs_bmbt_decrement(cur, 0, &i))) + if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_bmbt_update(cur, left.br_startoff, diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 2d29a4980cf..7b5181d34a5 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -203,7 +203,7 @@ xfs_bmbt_delrec( XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } - if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &j))) { + if (level > 0 && (error = xfs_btree_decrement(cur, level, &j))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } @@ -216,7 +216,7 @@ xfs_bmbt_delrec( goto error0; } if (numrecs >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) { - if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &j))) { + if (level > 0 && (error = xfs_btree_decrement(cur, level, &j))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } @@ -237,7 +237,7 @@ xfs_bmbt_delrec( XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } - if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) { + if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } @@ -282,7 +282,7 @@ xfs_bmbt_delrec( xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); tcur = NULL; if (level > 0) { - if ((error = xfs_bmbt_decrement(cur, + if ((error = xfs_btree_decrement(cur, level, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); @@ -298,7 +298,7 @@ xfs_bmbt_delrec( if (lbno != NULLFSBLOCK) { i = xfs_btree_firstrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_bmbt_decrement(tcur, level, &i))) { + if ((error = xfs_btree_decrement(tcur, level, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } @@ -311,7 +311,7 @@ xfs_bmbt_delrec( /* * decrement to last in block */ - if ((error = xfs_bmbt_decrement(tcur, level, &i))) { + if ((error = xfs_btree_decrement(tcur, level, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } @@ -383,7 +383,7 @@ xfs_bmbt_delrec( } lrecs = be16_to_cpu(left->bb_numrecs); } else { - if (level > 0 && (error = xfs_bmbt_decrement(cur, level, &i))) { + if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } @@ -1486,80 +1486,6 @@ xfs_bmdr_to_bmbt( memcpy(tpp, fpp, sizeof(*fpp) * dmxr); } -/* - * Decrement cursor by one record at the level. - * For nonzero levels the leaf-ward information is untouched. - */ -int /* error */ -xfs_bmbt_decrement( - xfs_btree_cur_t *cur, - int level, - int *stat) /* success/failure */ -{ - xfs_bmbt_block_t *block; - xfs_buf_t *bp; - int error; /* error return value */ - xfs_fsblock_t fsbno; - int lev; - xfs_mount_t *mp; - xfs_trans_t *tp; - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - XFS_BMBT_TRACE_ARGI(cur, level); - ASSERT(level < cur->bc_nlevels); - - xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); - - if (--cur->bc_ptrs[level] > 0) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; - } - block = xfs_bmbt_get_block(cur, level, &bp); -#ifdef DEBUG - if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } -#endif - if (be64_to_cpu(block->bb_leftsib) == NULLDFSBNO) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - for (lev = level + 1; lev < cur->bc_nlevels; lev++) { - if (--cur->bc_ptrs[lev] > 0) - break; - xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); - } - if (lev == cur->bc_nlevels) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - tp = cur->bc_tp; - mp = cur->bc_mp; - for (block = xfs_bmbt_get_block(cur, lev, &bp); lev > level; ) { - fsbno = be64_to_cpu(*XFS_BMAP_PTR_IADDR(block, cur->bc_ptrs[lev], cur)); - if ((error = xfs_btree_read_bufl(mp, tp, fsbno, 0, &bp, - XFS_BMAP_BTREE_REF))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - lev--; - xfs_btree_setbuf(cur, lev, bp); - block = XFS_BUF_TO_BMBT_BLOCK(bp); - if ((error = xfs_btree_check_lblock(cur, block, lev, bp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs); - } - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; -} - /* * Delete the record pointed to by cur. */ @@ -1582,7 +1508,7 @@ xfs_bmbt_delete( if (i == 0) { for (level = 1; level < cur->bc_nlevels; level++) { if (cur->bc_ptrs[level] == 0) { - if ((error = xfs_bmbt_decrement(cur, level, + if ((error = xfs_btree_decrement(cur, level, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index a45be38d9a3..1e0f1d10505 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -237,7 +237,6 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; * Prototypes for xfs_bmap.c to call. */ extern void xfs_bmdr_to_bmbt(xfs_bmdr_block_t *, int, xfs_bmbt_block_t *, int); -extern int xfs_bmbt_decrement(struct xfs_btree_cur *, int, int *); extern int xfs_bmbt_delete(struct xfs_btree_cur *, int *); extern void xfs_bmbt_get_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s); extern xfs_bmbt_block_t *xfs_bmbt_get_block(struct xfs_btree_cur *cur, diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index e9ab86b7990..3d561f8f78d 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -1171,3 +1171,102 @@ error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } + +/* + * Decrement cursor by one record at the level. + * For nonzero levels the leaf-ward information is untouched. + */ +int /* error */ +xfs_btree_decrement( + struct xfs_btree_cur *cur, + int level, + int *stat) /* success/failure */ +{ + struct xfs_btree_block *block; + xfs_buf_t *bp; + int error; /* error return value */ + int lev; + union xfs_btree_ptr ptr; + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_TRACE_ARGI(cur, level); + + ASSERT(level < cur->bc_nlevels); + + /* Read-ahead to the left at this level. */ + xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); + + /* We're done if we remain in the block after the decrement. */ + if (--cur->bc_ptrs[level] > 0) + goto out1; + + /* Get a pointer to the btree block. */ + block = xfs_btree_get_block(cur, level, &bp); + +#ifdef DEBUG + error = xfs_btree_check_block(cur, block, level, bp); + if (error) + goto error0; +#endif + + /* Fail if we just went off the left edge of the tree. */ + xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB); + if (xfs_btree_ptr_is_null(cur, &ptr)) + goto out0; + + XFS_BTREE_STATS_INC(cur, decrement); + + /* + * March up the tree decrementing pointers. + * Stop when we don't go off the left edge of a block. + */ + for (lev = level + 1; lev < cur->bc_nlevels; lev++) { + if (--cur->bc_ptrs[lev] > 0) + break; + /* Read-ahead the left block for the next loop. */ + xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); + } + + /* + * If we went off the root then we are seriously confused. + * or the root of the tree is in an inode. + */ + if (lev == cur->bc_nlevels) { + if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) + goto out0; + ASSERT(0); + error = EFSCORRUPTED; + goto error0; + } + ASSERT(lev < cur->bc_nlevels); + + /* + * Now walk back down the tree, fixing up the cursor's buffer + * pointers and key numbers. + */ + for (block = xfs_btree_get_block(cur, lev, &bp); lev > level; ) { + union xfs_btree_ptr *ptrp; + + ptrp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[lev], block); + error = xfs_btree_read_buf_block(cur, ptrp, --lev, + 0, &block, &bp); + if (error) + goto error0; + xfs_btree_setbuf(cur, lev, bp); + cur->bc_ptrs[lev] = xfs_btree_get_numrecs(block); + } +out1: + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 1; + return 0; + +out0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 0; + return 0; + +error0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; +} + diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index f5a4b8ec4cd..52b2da6ab32 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -506,6 +506,7 @@ xfs_btree_setbuf( * Common btree core entry points. */ int xfs_btree_increment(struct xfs_btree_cur *, int, int *); +int xfs_btree_decrement(struct xfs_btree_cur *, int, int *); /* * Helpers. diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 3a8f0670e07..d36b42bf3ff 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -739,7 +739,7 @@ nextag: /* * Search left with tcur, back up 1 record. */ - if ((error = xfs_inobt_decrement(tcur, 0, &i))) + if ((error = xfs_btree_decrement(tcur, 0, &i))) goto error1; doneleft = !i; if (!doneleft) { @@ -815,7 +815,7 @@ nextag: * further left. */ if (useleft) { - if ((error = xfs_inobt_decrement(tcur, 0, + if ((error = xfs_btree_decrement(tcur, 0, &i))) goto error1; doneleft = !i; diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 41717da6369..9099a32f997 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -205,7 +205,7 @@ xfs_inobt_delrec( cur->bc_bufs[level] = NULL; cur->bc_nlevels--; } else if (level > 0 && - (error = xfs_inobt_decrement(cur, level, &i))) + (error = xfs_btree_decrement(cur, level, &i))) return error; *stat = 1; return 0; @@ -222,7 +222,7 @@ xfs_inobt_delrec( */ if (numrecs >= XFS_INOBT_BLOCK_MINRECS(level, cur)) { if (level > 0 && - (error = xfs_inobt_decrement(cur, level, &i))) + (error = xfs_btree_decrement(cur, level, &i))) return error; *stat = 1; return 0; @@ -286,7 +286,7 @@ xfs_inobt_delrec( xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); if (level > 0 && - (error = xfs_inobt_decrement(cur, level, + (error = xfs_btree_decrement(cur, level, &i))) return error; *stat = 1; @@ -301,7 +301,7 @@ xfs_inobt_delrec( rrecs = be16_to_cpu(right->bb_numrecs); if (lbno != NULLAGBLOCK) { xfs_btree_firstrec(tcur, level); - if ((error = xfs_inobt_decrement(tcur, level, &i))) + if ((error = xfs_btree_decrement(tcur, level, &i))) goto error0; } } @@ -315,7 +315,7 @@ xfs_inobt_delrec( * previous block. */ xfs_btree_firstrec(tcur, level); - if ((error = xfs_inobt_decrement(tcur, level, &i))) + if ((error = xfs_btree_decrement(tcur, level, &i))) goto error0; xfs_btree_firstrec(tcur, level); /* @@ -414,7 +414,7 @@ xfs_inobt_delrec( * Just return. This is probably a logic error, but it's not fatal. */ else { - if (level > 0 && (error = xfs_inobt_decrement(cur, level, &i))) + if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) return error; *stat = 1; return 0; @@ -1655,90 +1655,6 @@ xfs_inobt_updkey( * Externally visible routines. */ -/* - * Decrement cursor by one record at the level. - * For nonzero levels the leaf-ward information is untouched. - */ -int /* error */ -xfs_inobt_decrement( - xfs_btree_cur_t *cur, /* btree cursor */ - int level, /* level in btree, 0 is leaf */ - int *stat) /* success/failure */ -{ - xfs_inobt_block_t *block; /* btree block */ - int error; - int lev; /* btree level */ - - ASSERT(level < cur->bc_nlevels); - /* - * Read-ahead to the left at this level. - */ - xfs_btree_readahead(cur, level, XFS_BTCUR_LEFTRA); - /* - * Decrement the ptr at this level. If we're still in the block - * then we're done. - */ - if (--cur->bc_ptrs[level] > 0) { - *stat = 1; - return 0; - } - /* - * Get a pointer to the btree block. - */ - block = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[level]); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, level, - cur->bc_bufs[level]))) - return error; -#endif - /* - * If we just went off the left edge of the tree, return failure. - */ - if (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK) { - *stat = 0; - return 0; - } - /* - * March up the tree decrementing pointers. - * Stop when we don't go off the left edge of a block. - */ - for (lev = level + 1; lev < cur->bc_nlevels; lev++) { - if (--cur->bc_ptrs[lev] > 0) - break; - /* - * Read-ahead the left block, we're going to read it - * in the next loop. - */ - xfs_btree_readahead(cur, lev, XFS_BTCUR_LEFTRA); - } - /* - * If we went off the root then we are seriously confused. - */ - ASSERT(lev < cur->bc_nlevels); - /* - * Now walk back down the tree, fixing up the cursor's buffer - * pointers and key numbers. - */ - for (block = XFS_BUF_TO_INOBT_BLOCK(cur->bc_bufs[lev]); lev > level; ) { - xfs_agblock_t agbno; /* block number of btree block */ - xfs_buf_t *bp; /* buffer containing btree block */ - - agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, cur->bc_ptrs[lev], cur)); - if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, agbno, 0, &bp, - XFS_INO_BTREE_REF))) - return error; - lev--; - xfs_btree_setbuf(cur, lev, bp); - block = XFS_BUF_TO_INOBT_BLOCK(bp); - if ((error = xfs_btree_check_sblock(cur, block, lev, bp))) - return error; - cur->bc_ptrs[lev] = be16_to_cpu(block->bb_numrecs); - } - *stat = 1; - return 0; -} - /* * Delete the record pointed to by cur. * The cursor refers to the place where the record was (could be inserted) @@ -1765,7 +1681,7 @@ xfs_inobt_delete( if (i == 0) { for (level = 1; level < cur->bc_nlevels; level++) { if (cur->bc_ptrs[level] == 0) { - if ((error = xfs_inobt_decrement(cur, level, &i))) + if ((error = xfs_btree_decrement(cur, level, &i))) return error; break; } diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h index 07fed62bcb7..84554595d28 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/xfs_ialloc_btree.h @@ -116,12 +116,6 @@ typedef struct xfs_btree_sblock xfs_inobt_block_t; (XFS_BTREE_PTR_ADDR(xfs_inobt, bb, \ i, XFS_INOBT_BLOCK_MAXRECS(1, cur))) -/* - * Decrement cursor by one record at the level. - * For nonzero levels the leaf-ward information is untouched. - */ -extern int xfs_inobt_decrement(struct xfs_btree_cur *cur, int level, int *stat); - /* * Delete the record pointed to by cur. * The cursor refers to the place where the record was (could be inserted) -- cgit v1.2.3-70-g09d2 From fe033cc848489851f0c7de48f0b1bab5d744ad8a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:56:09 +1100 Subject: [XFS] implement generic xfs_btree_lookup From: Dave Chinner [hch: split out from bigger patch and minor adaptions] SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32192a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc.c | 48 +++++++ fs/xfs/xfs_alloc_btree.c | 312 +++++++--------------------------------------- fs/xfs/xfs_alloc_btree.h | 20 --- fs/xfs/xfs_bmap.c | 29 +++++ fs/xfs/xfs_bmap_btree.c | 197 +++++------------------------ fs/xfs/xfs_bmap_btree.h | 4 - fs/xfs/xfs_btree.c | 219 ++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 11 ++ fs/xfs/xfs_ialloc.c | 53 ++++++++ fs/xfs/xfs_ialloc.h | 15 +++ fs/xfs/xfs_ialloc_btree.c | 294 ++++++------------------------------------- fs/xfs/xfs_ialloc_btree.h | 20 --- 12 files changed, 487 insertions(+), 735 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 7ca6903e235..6bda0ae26c2 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -89,6 +89,54 @@ STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *, * Internal functions. */ +/* + * Lookup the record equal to [bno, len] in the btree given by cur. + */ +STATIC int /* error */ +xfs_alloc_lookup_eq( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat) /* success/failure */ +{ + cur->bc_rec.a.ar_startblock = bno; + cur->bc_rec.a.ar_blockcount = len; + return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); +} + +/* + * Lookup the first record greater than or equal to [bno, len] + * in the btree given by cur. + */ +STATIC int /* error */ +xfs_alloc_lookup_ge( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat) /* success/failure */ +{ + cur->bc_rec.a.ar_startblock = bno; + cur->bc_rec.a.ar_blockcount = len; + return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); +} + +/* + * Lookup the first record less than or equal to [bno, len] + * in the btree given by cur. + */ +STATIC int /* error */ +xfs_alloc_lookup_le( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat) /* success/failure */ +{ + cur->bc_rec.a.ar_startblock = bno; + cur->bc_rec.a.ar_blockcount = len; + return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat); +} + + /* * Compute aligned version of the found extent. * Takes alignment and min length into account. diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 6b45481ad5b..b81fbf1216e 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -937,223 +937,6 @@ xfs_alloc_log_recs( xfs_trans_log_buf(cur->bc_tp, bp, first, last); } -/* - * Lookup the record. The cursor is made to point to it, based on dir. - * Return 0 if can't find any such record, 1 for success. - */ -STATIC int /* error */ -xfs_alloc_lookup( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_lookup_t dir, /* <=, ==, or >= */ - int *stat) /* success/failure */ -{ - xfs_agblock_t agbno; /* a.g. relative btree block number */ - xfs_agnumber_t agno; /* allocation group number */ - xfs_alloc_block_t *block=NULL; /* current btree block */ - int diff; /* difference for the current key */ - int error; /* error return value */ - int keyno=0; /* current key number */ - int level; /* level in the btree */ - xfs_mount_t *mp; /* file system mount point */ - - XFS_STATS_INC(xs_abt_lookup); - /* - * Get the allocation group header, and the root block number. - */ - mp = cur->bc_mp; - - { - xfs_agf_t *agf; /* a.g. freespace header */ - - agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); - agno = be32_to_cpu(agf->agf_seqno); - agbno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]); - } - /* - * Iterate over each level in the btree, starting at the root. - * For each level above the leaves, find the key we need, based - * on the lookup record, then follow the corresponding block - * pointer down to the next level. - */ - for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { - xfs_buf_t *bp; /* buffer pointer for btree block */ - xfs_daddr_t d; /* disk address of btree block */ - - /* - * Get the disk address we're looking for. - */ - d = XFS_AGB_TO_DADDR(mp, agno, agbno); - /* - * If the old buffer at this level is for a different block, - * throw it away, otherwise just use it. - */ - bp = cur->bc_bufs[level]; - if (bp && XFS_BUF_ADDR(bp) != d) - bp = NULL; - if (!bp) { - /* - * Need to get a new buffer. Read it, then - * set it in the cursor, releasing the old one. - */ - if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, agno, - agbno, 0, &bp, XFS_ALLOC_BTREE_REF))) - return error; - xfs_btree_setbuf(cur, level, bp); - /* - * Point to the btree block, now that we have the buffer - */ - block = XFS_BUF_TO_ALLOC_BLOCK(bp); - if ((error = xfs_btree_check_sblock(cur, block, level, - bp))) - return error; - } else - block = XFS_BUF_TO_ALLOC_BLOCK(bp); - /* - * If we already had a key match at a higher level, we know - * we need to use the first entry in this block. - */ - if (diff == 0) - keyno = 1; - /* - * Otherwise we need to search this block. Do a binary search. - */ - else { - int high; /* high entry number */ - xfs_alloc_key_t *kkbase=NULL;/* base of keys in block */ - xfs_alloc_rec_t *krbase=NULL;/* base of records in block */ - int low; /* low entry number */ - - /* - * Get a pointer to keys or records. - */ - if (level > 0) - kkbase = XFS_ALLOC_KEY_ADDR(block, 1, cur); - else - krbase = XFS_ALLOC_REC_ADDR(block, 1, cur); - /* - * Set low and high entry numbers, 1-based. - */ - low = 1; - if (!(high = be16_to_cpu(block->bb_numrecs))) { - /* - * If the block is empty, the tree must - * be an empty leaf. - */ - ASSERT(level == 0 && cur->bc_nlevels == 1); - cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; - *stat = 0; - return 0; - } - /* - * Binary search the block. - */ - while (low <= high) { - xfs_extlen_t blockcount; /* key value */ - xfs_agblock_t startblock; /* key value */ - - XFS_STATS_INC(xs_abt_compare); - /* - * keyno is average of low and high. - */ - keyno = (low + high) >> 1; - /* - * Get startblock & blockcount. - */ - if (level > 0) { - xfs_alloc_key_t *kkp; - - kkp = kkbase + keyno - 1; - startblock = be32_to_cpu(kkp->ar_startblock); - blockcount = be32_to_cpu(kkp->ar_blockcount); - } else { - xfs_alloc_rec_t *krp; - - krp = krbase + keyno - 1; - startblock = be32_to_cpu(krp->ar_startblock); - blockcount = be32_to_cpu(krp->ar_blockcount); - } - /* - * Compute difference to get next direction. - */ - if (cur->bc_btnum == XFS_BTNUM_BNO) - diff = (int)startblock - - (int)cur->bc_rec.a.ar_startblock; - else if (!(diff = (int)blockcount - - (int)cur->bc_rec.a.ar_blockcount)) - diff = (int)startblock - - (int)cur->bc_rec.a.ar_startblock; - /* - * Less than, move right. - */ - if (diff < 0) - low = keyno + 1; - /* - * Greater than, move left. - */ - else if (diff > 0) - high = keyno - 1; - /* - * Equal, we're done. - */ - else - break; - } - } - /* - * If there are more levels, set up for the next level - * by getting the block number and filling in the cursor. - */ - if (level > 0) { - /* - * If we moved left, need the previous key number, - * unless there isn't one. - */ - if (diff > 0 && --keyno < 1) - keyno = 1; - agbno = be32_to_cpu(*XFS_ALLOC_PTR_ADDR(block, keyno, cur)); -#ifdef DEBUG - if ((error = xfs_btree_check_sptr(cur, agbno, level))) - return error; -#endif - cur->bc_ptrs[level] = keyno; - } - } - /* - * Done with the search. - * See if we need to adjust the results. - */ - if (dir != XFS_LOOKUP_LE && diff < 0) { - keyno++; - /* - * If ge search and we went off the end of the block, but it's - * not the last block, we're in the wrong block. - */ - if (dir == XFS_LOOKUP_GE && - keyno > be16_to_cpu(block->bb_numrecs) && - be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) { - int i; - - cur->bc_ptrs[0] = keyno; - if ((error = xfs_btree_increment(cur, 0, &i))) - return error; - XFS_WANT_CORRUPTED_RETURN(i == 1); - *stat = 1; - return 0; - } - } - else if (dir == XFS_LOOKUP_LE && diff > 0) - keyno--; - cur->bc_ptrs[0] = keyno; - /* - * Return if we succeeded or not. - */ - if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) - *stat = 0; - else - *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); - return 0; -} - /* * Move 1 record left from cur/level if possible. * Update cur to reflect the new path. @@ -1918,53 +1701,6 @@ xfs_alloc_insert( return 0; } -/* - * Lookup the record equal to [bno, len] in the btree given by cur. - */ -int /* error */ -xfs_alloc_lookup_eq( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_agblock_t bno, /* starting block of extent */ - xfs_extlen_t len, /* length of extent */ - int *stat) /* success/failure */ -{ - cur->bc_rec.a.ar_startblock = bno; - cur->bc_rec.a.ar_blockcount = len; - return xfs_alloc_lookup(cur, XFS_LOOKUP_EQ, stat); -} - -/* - * Lookup the first record greater than or equal to [bno, len] - * in the btree given by cur. - */ -int /* error */ -xfs_alloc_lookup_ge( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_agblock_t bno, /* starting block of extent */ - xfs_extlen_t len, /* length of extent */ - int *stat) /* success/failure */ -{ - cur->bc_rec.a.ar_startblock = bno; - cur->bc_rec.a.ar_blockcount = len; - return xfs_alloc_lookup(cur, XFS_LOOKUP_GE, stat); -} - -/* - * Lookup the first record less than or equal to [bno, len] - * in the btree given by cur. - */ -int /* error */ -xfs_alloc_lookup_le( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_agblock_t bno, /* starting block of extent */ - xfs_extlen_t len, /* length of extent */ - int *stat) /* success/failure */ -{ - cur->bc_rec.a.ar_startblock = bno; - cur->bc_rec.a.ar_blockcount = len; - return xfs_alloc_lookup(cur, XFS_LOOKUP_LE, stat); -} - /* * Update the record referred to by cur, to the value given by [bno, len]. * This either works (return 0) or gets an EFSCORRUPTED error. @@ -2052,6 +1788,51 @@ xfs_allocbt_get_maxrecs( return cur->bc_mp->m_alloc_mxr[level != 0]; } +STATIC void +xfs_allocbt_init_key_from_rec( + union xfs_btree_key *key, + union xfs_btree_rec *rec) +{ + ASSERT(rec->alloc.ar_startblock != 0); + + key->alloc.ar_startblock = rec->alloc.ar_startblock; + key->alloc.ar_blockcount = rec->alloc.ar_blockcount; +} + +STATIC void +xfs_allocbt_init_ptr_from_cur( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr) +{ + struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); + + ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno)); + ASSERT(agf->agf_roots[cur->bc_btnum] != 0); + + ptr->s = agf->agf_roots[cur->bc_btnum]; +} + +STATIC __int64_t +xfs_allocbt_key_diff( + struct xfs_btree_cur *cur, + union xfs_btree_key *key) +{ + xfs_alloc_rec_incore_t *rec = &cur->bc_rec.a; + xfs_alloc_key_t *kp = &key->alloc; + __int64_t diff; + + if (cur->bc_btnum == XFS_BTNUM_BNO) { + return (__int64_t)be32_to_cpu(kp->ar_startblock) - + rec->ar_startblock; + } + + diff = (__int64_t)be32_to_cpu(kp->ar_blockcount) - rec->ar_blockcount; + if (diff) + return diff; + + return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; +} + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_allocbt_trace_buf; @@ -2124,6 +1905,9 @@ static const struct xfs_btree_ops xfs_allocbt_ops = { .dup_cursor = xfs_allocbt_dup_cursor, .get_maxrecs = xfs_allocbt_get_maxrecs, + .init_key_from_rec = xfs_allocbt_init_key_from_rec, + .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, + .key_diff = xfs_allocbt_key_diff, #ifdef XFS_BTREE_TRACE .trace_enter = xfs_allocbt_trace_enter, diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h index b59d7fc78fe..aa110ff4feb 100644 --- a/fs/xfs/xfs_alloc_btree.h +++ b/fs/xfs/xfs_alloc_btree.h @@ -113,26 +113,6 @@ extern int xfs_alloc_get_rec(struct xfs_btree_cur *cur, xfs_agblock_t *bno, */ extern int xfs_alloc_insert(struct xfs_btree_cur *cur, int *stat); -/* - * Lookup the record equal to [bno, len] in the btree given by cur. - */ -extern int xfs_alloc_lookup_eq(struct xfs_btree_cur *cur, xfs_agblock_t bno, - xfs_extlen_t len, int *stat); - -/* - * Lookup the first record greater than or equal to [bno, len] - * in the btree given by cur. - */ -extern int xfs_alloc_lookup_ge(struct xfs_btree_cur *cur, xfs_agblock_t bno, - xfs_extlen_t len, int *stat); - -/* - * Lookup the first record less than or equal to [bno, len] - * in the btree given by cur. - */ -extern int xfs_alloc_lookup_le(struct xfs_btree_cur *cur, xfs_agblock_t bno, - xfs_extlen_t len, int *stat); - /* * Update the record referred to by cur, to the value given by [bno, len]. * This either works (return 0) or gets an EFSCORRUPTED error. diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index bdbab54948f..1296b4102e9 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -402,6 +402,35 @@ xfs_bmap_disk_count_leaves( * Bmap internal routines. */ +STATIC int /* error */ +xfs_bmbt_lookup_eq( + struct xfs_btree_cur *cur, + xfs_fileoff_t off, + xfs_fsblock_t bno, + xfs_filblks_t len, + int *stat) /* success/failure */ +{ + cur->bc_rec.b.br_startoff = off; + cur->bc_rec.b.br_startblock = bno; + cur->bc_rec.b.br_blockcount = len; + return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); +} + +STATIC int /* error */ +xfs_bmbt_lookup_ge( + struct xfs_btree_cur *cur, + xfs_fileoff_t off, + xfs_fsblock_t bno, + xfs_filblks_t len, + int *stat) /* success/failure */ +{ + cur->bc_rec.b.br_startoff = off; + cur->bc_rec.b.br_startblock = bno; + cur->bc_rec.b.br_blockcount = len; + return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); +} + + /* * Called from xfs_bmap_add_attrfork to handle btree format files. */ diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 7b5181d34a5..8403d154ae0 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -812,146 +812,6 @@ xfs_bmbt_log_ptrs( XFS_BMBT_TRACE_CURSOR(cur, EXIT); } -/* - * Lookup the record. The cursor is made to point to it, based on dir. - */ -STATIC int /* error */ -xfs_bmbt_lookup( - xfs_btree_cur_t *cur, - xfs_lookup_t dir, - int *stat) /* success/failure */ -{ - xfs_bmbt_block_t *block=NULL; - xfs_buf_t *bp; - xfs_daddr_t d; - xfs_sfiloff_t diff; - int error; /* error return value */ - xfs_fsblock_t fsbno=0; - int high; - int i; - int keyno=0; - xfs_bmbt_key_t *kkbase=NULL; - xfs_bmbt_key_t *kkp; - xfs_bmbt_rec_t *krbase=NULL; - xfs_bmbt_rec_t *krp; - int level; - int low; - xfs_mount_t *mp; - xfs_bmbt_ptr_t *pp; - xfs_bmbt_irec_t *rp; - xfs_fileoff_t startoff; - xfs_trans_t *tp; - - XFS_STATS_INC(xs_bmbt_lookup); - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - XFS_BMBT_TRACE_ARGI(cur, (int)dir); - tp = cur->bc_tp; - mp = cur->bc_mp; - rp = &cur->bc_rec.b; - for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { - if (level < cur->bc_nlevels - 1) { - d = XFS_FSB_TO_DADDR(mp, fsbno); - bp = cur->bc_bufs[level]; - if (bp && XFS_BUF_ADDR(bp) != d) - bp = NULL; - if (!bp) { - if ((error = xfs_btree_read_bufl(mp, tp, fsbno, - 0, &bp, XFS_BMAP_BTREE_REF))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - xfs_btree_setbuf(cur, level, bp); - block = XFS_BUF_TO_BMBT_BLOCK(bp); - if ((error = xfs_btree_check_lblock(cur, block, - level, bp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - } else - block = XFS_BUF_TO_BMBT_BLOCK(bp); - } else - block = xfs_bmbt_get_block(cur, level, &bp); - if (diff == 0) - keyno = 1; - else { - if (level > 0) - kkbase = XFS_BMAP_KEY_IADDR(block, 1, cur); - else - krbase = XFS_BMAP_REC_IADDR(block, 1, cur); - low = 1; - if (!(high = be16_to_cpu(block->bb_numrecs))) { - ASSERT(level == 0); - cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - while (low <= high) { - XFS_STATS_INC(xs_bmbt_compare); - keyno = (low + high) >> 1; - if (level > 0) { - kkp = kkbase + keyno - 1; - startoff = be64_to_cpu(kkp->br_startoff); - } else { - krp = krbase + keyno - 1; - startoff = xfs_bmbt_disk_get_startoff(krp); - } - diff = (xfs_sfiloff_t) - (startoff - rp->br_startoff); - if (diff < 0) - low = keyno + 1; - else if (diff > 0) - high = keyno - 1; - else - break; - } - } - if (level > 0) { - if (diff > 0 && --keyno < 1) - keyno = 1; - pp = XFS_BMAP_PTR_IADDR(block, keyno, cur); - fsbno = be64_to_cpu(*pp); -#ifdef DEBUG - if ((error = xfs_btree_check_lptr(cur, fsbno, level))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } -#endif - cur->bc_ptrs[level] = keyno; - } - } - if (dir != XFS_LOOKUP_LE && diff < 0) { - keyno++; - /* - * If ge search and we went off the end of the block, but it's - * not the last block, we're in the wrong block. - */ - if (dir == XFS_LOOKUP_GE && keyno > be16_to_cpu(block->bb_numrecs) && - be64_to_cpu(block->bb_rightsib) != NULLDFSBNO) { - cur->bc_ptrs[0] = keyno; - if ((error = xfs_btree_increment(cur, 0, &i))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - XFS_WANT_CORRUPTED_RETURN(i == 1); - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; - } - } - else if (dir == XFS_LOOKUP_LE && diff > 0) - keyno--; - cur->bc_ptrs[0] = keyno; - if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - } else { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); - } - return 0; -} - /* * Move 1 record left from cur/level if possible. * Update cur to reflect the new path. @@ -1809,34 +1669,6 @@ xfs_bmbt_log_recs( XFS_BMBT_TRACE_CURSOR(cur, EXIT); } -int /* error */ -xfs_bmbt_lookup_eq( - xfs_btree_cur_t *cur, - xfs_fileoff_t off, - xfs_fsblock_t bno, - xfs_filblks_t len, - int *stat) /* success/failure */ -{ - cur->bc_rec.b.br_startoff = off; - cur->bc_rec.b.br_startblock = bno; - cur->bc_rec.b.br_blockcount = len; - return xfs_bmbt_lookup(cur, XFS_LOOKUP_EQ, stat); -} - -int /* error */ -xfs_bmbt_lookup_ge( - xfs_btree_cur_t *cur, - xfs_fileoff_t off, - xfs_fsblock_t bno, - xfs_filblks_t len, - int *stat) /* success/failure */ -{ - cur->bc_rec.b.br_startoff = off; - cur->bc_rec.b.br_startblock = bno; - cur->bc_rec.b.br_blockcount = len; - return xfs_bmbt_lookup(cur, XFS_LOOKUP_GE, stat); -} - /* * Give the bmap btree a new root block. Copy the old broot contents * down into a real block and make the broot point to it. @@ -2269,6 +2101,32 @@ xfs_bmbt_get_maxrecs( return XFS_BMAP_BLOCK_IMAXRECS(level, cur); } +STATIC void +xfs_bmbt_init_key_from_rec( + union xfs_btree_key *key, + union xfs_btree_rec *rec) +{ + key->bmbt.br_startoff = + cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt)); +} + +STATIC void +xfs_bmbt_init_ptr_from_cur( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr) +{ + ptr->l = 0; +} + +STATIC __int64_t +xfs_bmbt_key_diff( + struct xfs_btree_cur *cur, + union xfs_btree_key *key) +{ + return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) - + cur->bc_rec.b.br_startoff; +} + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_bmbt_trace_buf; @@ -2360,6 +2218,9 @@ static const struct xfs_btree_ops xfs_bmbt_ops = { .dup_cursor = xfs_bmbt_dup_cursor, .get_maxrecs = xfs_bmbt_get_maxrecs, + .init_key_from_rec = xfs_bmbt_init_key_from_rec, + .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur, + .key_diff = xfs_bmbt_key_diff, #ifdef XFS_BTREE_TRACE .trace_enter = xfs_bmbt_trace_enter, diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 1e0f1d10505..d04198cdc4b 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -254,10 +254,6 @@ extern int xfs_bmbt_insert(struct xfs_btree_cur *, int *); extern void xfs_bmbt_log_block(struct xfs_btree_cur *, struct xfs_buf *, int); extern void xfs_bmbt_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int, int); -extern int xfs_bmbt_lookup_eq(struct xfs_btree_cur *, xfs_fileoff_t, - xfs_fsblock_t, xfs_filblks_t, int *); -extern int xfs_bmbt_lookup_ge(struct xfs_btree_cur *, xfs_fileoff_t, - xfs_fsblock_t, xfs_filblks_t, int *); /* * Give the bmap btree a new root block. Copy the old broot contents diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 3d561f8f78d..41912a01bec 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -1270,3 +1270,222 @@ error0: return error; } + +STATIC int +xfs_btree_lookup_get_block( + struct xfs_btree_cur *cur, /* btree cursor */ + int level, /* level in the btree */ + union xfs_btree_ptr *pp, /* ptr to btree block */ + struct xfs_btree_block **blkp) /* return btree block */ +{ + struct xfs_buf *bp; /* buffer pointer for btree block */ + int error = 0; + + /* special case the root block if in an inode */ + if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && + (level == cur->bc_nlevels - 1)) { + *blkp = xfs_btree_get_iroot(cur); + return 0; + } + + /* + * If the old buffer at this level for the disk address we are + * looking for re-use it. + * + * Otherwise throw it away and get a new one. + */ + bp = cur->bc_bufs[level]; + if (bp && XFS_BUF_ADDR(bp) == xfs_btree_ptr_to_daddr(cur, pp)) { + *blkp = XFS_BUF_TO_BLOCK(bp); + return 0; + } + + error = xfs_btree_read_buf_block(cur, pp, level, 0, blkp, &bp); + if (error) + return error; + + xfs_btree_setbuf(cur, level, bp); + return 0; +} + +/* + * Get current search key. For level 0 we don't actually have a key + * structure so we make one up from the record. For all other levels + * we just return the right key. + */ +STATIC union xfs_btree_key * +xfs_lookup_get_search_key( + struct xfs_btree_cur *cur, + int level, + int keyno, + struct xfs_btree_block *block, + union xfs_btree_key *kp) +{ + if (level == 0) { + cur->bc_ops->init_key_from_rec(kp, + xfs_btree_rec_addr(cur, keyno, block)); + return kp; + } + + return xfs_btree_key_addr(cur, keyno, block); +} + +/* + * Lookup the record. The cursor is made to point to it, based on dir. + * Return 0 if can't find any such record, 1 for success. + */ +int /* error */ +xfs_btree_lookup( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_lookup_t dir, /* <=, ==, or >= */ + int *stat) /* success/failure */ +{ + struct xfs_btree_block *block; /* current btree block */ + __int64_t diff; /* difference for the current key */ + int error; /* error return value */ + int keyno; /* current key number */ + int level; /* level in the btree */ + union xfs_btree_ptr *pp; /* ptr to btree block */ + union xfs_btree_ptr ptr; /* ptr to btree block */ + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_TRACE_ARGI(cur, dir); + + XFS_BTREE_STATS_INC(cur, lookup); + + block = NULL; + keyno = 0; + + /* initialise start pointer from cursor */ + cur->bc_ops->init_ptr_from_cur(cur, &ptr); + pp = &ptr; + + /* + * Iterate over each level in the btree, starting at the root. + * For each level above the leaves, find the key we need, based + * on the lookup record, then follow the corresponding block + * pointer down to the next level. + */ + for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { + /* Get the block we need to do the lookup on. */ + error = xfs_btree_lookup_get_block(cur, level, pp, &block); + if (error) + goto error0; + + if (diff == 0) { + /* + * If we already had a key match at a higher level, we + * know we need to use the first entry in this block. + */ + keyno = 1; + } else { + /* Otherwise search this block. Do a binary search. */ + + int high; /* high entry number */ + int low; /* low entry number */ + + /* Set low and high entry numbers, 1-based. */ + low = 1; + high = xfs_btree_get_numrecs(block); + if (!high) { + /* Block is empty, must be an empty leaf. */ + ASSERT(level == 0 && cur->bc_nlevels == 1); + + cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 0; + return 0; + } + + /* Binary search the block. */ + while (low <= high) { + union xfs_btree_key key; + union xfs_btree_key *kp; + + XFS_BTREE_STATS_INC(cur, compare); + + /* keyno is average of low and high. */ + keyno = (low + high) >> 1; + + /* Get current search key */ + kp = xfs_lookup_get_search_key(cur, level, + keyno, block, &key); + + /* + * Compute difference to get next direction: + * - less than, move right + * - greater than, move left + * - equal, we're done + */ + diff = cur->bc_ops->key_diff(cur, kp); + if (diff < 0) + low = keyno + 1; + else if (diff > 0) + high = keyno - 1; + else + break; + } + } + + /* + * If there are more levels, set up for the next level + * by getting the block number and filling in the cursor. + */ + if (level > 0) { + /* + * If we moved left, need the previous key number, + * unless there isn't one. + */ + if (diff > 0 && --keyno < 1) + keyno = 1; + pp = xfs_btree_ptr_addr(cur, keyno, block); + +#ifdef DEBUG + error = xfs_btree_check_ptr(cur, pp, 0, level); + if (error) + goto error0; +#endif + cur->bc_ptrs[level] = keyno; + } + } + + /* Done with the search. See if we need to adjust the results. */ + if (dir != XFS_LOOKUP_LE && diff < 0) { + keyno++; + /* + * If ge search and we went off the end of the block, but it's + * not the last block, we're in the wrong block. + */ + xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); + if (dir == XFS_LOOKUP_GE && + keyno > xfs_btree_get_numrecs(block) && + !xfs_btree_ptr_is_null(cur, &ptr)) { + int i; + + cur->bc_ptrs[0] = keyno; + error = xfs_btree_increment(cur, 0, &i); + if (error) + goto error0; + XFS_WANT_CORRUPTED_RETURN(i == 1); + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 1; + return 0; + } + } else if (dir == XFS_LOOKUP_LE && diff > 0) + keyno--; + cur->bc_ptrs[0] = keyno; + + /* Return if we succeeded or not. */ + if (keyno == 0 || keyno > xfs_btree_get_numrecs(block)) + *stat = 0; + else if (dir != XFS_LOOKUP_EQ || diff == 0) + *stat = 1; + else + *stat = 0; + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + return 0; + +error0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; +} diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 52b2da6ab32..c151175a5fd 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -190,6 +190,16 @@ struct xfs_btree_ops { /* records in block/level */ int (*get_maxrecs)(struct xfs_btree_cur *cur, int level); + /* init values of btree structures */ + void (*init_key_from_rec)(union xfs_btree_key *key, + union xfs_btree_rec *rec); + void (*init_ptr_from_cur)(struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr); + + /* difference between key value and cursor value */ + __int64_t (*key_diff)(struct xfs_btree_cur *cur, + union xfs_btree_key *key); + /* btree tracing */ #ifdef XFS_BTREE_TRACE void (*trace_enter)(struct xfs_btree_cur *, const char *, @@ -507,6 +517,7 @@ xfs_btree_setbuf( */ int xfs_btree_increment(struct xfs_btree_cur *, int, int *); int xfs_btree_decrement(struct xfs_btree_cur *, int, int *); +int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *); /* * Helpers. diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index d36b42bf3ff..bbf537f64c4 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -118,6 +118,59 @@ xfs_ialloc_cluster_alignment( return 1; } +/* + * Lookup the record equal to ino in the btree given by cur. + */ +STATIC int /* error */ +xfs_inobt_lookup_eq( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat) /* success/failure */ +{ + cur->bc_rec.i.ir_startino = ino; + cur->bc_rec.i.ir_freecount = fcnt; + cur->bc_rec.i.ir_free = free; + return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat); +} + +/* + * Lookup the first record greater than or equal to ino + * in the btree given by cur. + */ +int /* error */ +xfs_inobt_lookup_ge( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat) /* success/failure */ +{ + cur->bc_rec.i.ir_startino = ino; + cur->bc_rec.i.ir_freecount = fcnt; + cur->bc_rec.i.ir_free = free; + return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); +} + +/* + * Lookup the first record less than or equal to ino + * in the btree given by cur. + */ +int /* error */ +xfs_inobt_lookup_le( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free, /* free inode mask */ + int *stat) /* success/failure */ +{ + cur->bc_rec.i.ir_startino = ino; + cur->bc_rec.i.ir_freecount = fcnt; + cur->bc_rec.i.ir_free = free; + return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat); +} + /* * Allocate new inodes in the allocation group specified by agbp. * Return 0 for success, else error code. diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h index 4e30ec1d13b..4026578bc26 100644 --- a/fs/xfs/xfs_ialloc.h +++ b/fs/xfs/xfs_ialloc.h @@ -154,6 +154,21 @@ xfs_ialloc_pagi_init( struct xfs_trans *tp, /* transaction pointer */ xfs_agnumber_t agno); /* allocation group number */ +/* + * Lookup the first record greater than or equal to ino + * in the btree given by cur. + */ +int xfs_inobt_lookup_ge(struct xfs_btree_cur *cur, xfs_agino_t ino, + __int32_t fcnt, xfs_inofree_t free, int *stat); + +/* + * Lookup the first record less than or equal to ino + * in the btree given by cur. + */ +int xfs_inobt_lookup_le(struct xfs_btree_cur *cur, xfs_agino_t ino, + __int32_t fcnt, xfs_inofree_t free, int *stat); + + #endif /* __KERNEL__ */ #endif /* __XFS_IALLOC_H__ */ diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 9099a32f997..161c3b2e245 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -828,212 +828,6 @@ xfs_inobt_log_recs( xfs_trans_log_buf(cur->bc_tp, bp, first, last); } -/* - * Lookup the record. The cursor is made to point to it, based on dir. - * Return 0 if can't find any such record, 1 for success. - */ -STATIC int /* error */ -xfs_inobt_lookup( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_lookup_t dir, /* <=, ==, or >= */ - int *stat) /* success/failure */ -{ - xfs_agblock_t agbno; /* a.g. relative btree block number */ - xfs_agnumber_t agno; /* allocation group number */ - xfs_inobt_block_t *block=NULL; /* current btree block */ - __int64_t diff; /* difference for the current key */ - int error; /* error return value */ - int keyno=0; /* current key number */ - int level; /* level in the btree */ - xfs_mount_t *mp; /* file system mount point */ - - /* - * Get the allocation group header, and the root block number. - */ - mp = cur->bc_mp; - { - xfs_agi_t *agi; /* a.g. inode header */ - - agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); - agno = be32_to_cpu(agi->agi_seqno); - agbno = be32_to_cpu(agi->agi_root); - } - /* - * Iterate over each level in the btree, starting at the root. - * For each level above the leaves, find the key we need, based - * on the lookup record, then follow the corresponding block - * pointer down to the next level. - */ - for (level = cur->bc_nlevels - 1, diff = 1; level >= 0; level--) { - xfs_buf_t *bp; /* buffer pointer for btree block */ - xfs_daddr_t d; /* disk address of btree block */ - - /* - * Get the disk address we're looking for. - */ - d = XFS_AGB_TO_DADDR(mp, agno, agbno); - /* - * If the old buffer at this level is for a different block, - * throw it away, otherwise just use it. - */ - bp = cur->bc_bufs[level]; - if (bp && XFS_BUF_ADDR(bp) != d) - bp = NULL; - if (!bp) { - /* - * Need to get a new buffer. Read it, then - * set it in the cursor, releasing the old one. - */ - if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, - agno, agbno, 0, &bp, XFS_INO_BTREE_REF))) - return error; - xfs_btree_setbuf(cur, level, bp); - /* - * Point to the btree block, now that we have the buffer - */ - block = XFS_BUF_TO_INOBT_BLOCK(bp); - if ((error = xfs_btree_check_sblock(cur, block, level, - bp))) - return error; - } else - block = XFS_BUF_TO_INOBT_BLOCK(bp); - /* - * If we already had a key match at a higher level, we know - * we need to use the first entry in this block. - */ - if (diff == 0) - keyno = 1; - /* - * Otherwise we need to search this block. Do a binary search. - */ - else { - int high; /* high entry number */ - xfs_inobt_key_t *kkbase=NULL;/* base of keys in block */ - xfs_inobt_rec_t *krbase=NULL;/* base of records in block */ - int low; /* low entry number */ - - /* - * Get a pointer to keys or records. - */ - if (level > 0) - kkbase = XFS_INOBT_KEY_ADDR(block, 1, cur); - else - krbase = XFS_INOBT_REC_ADDR(block, 1, cur); - /* - * Set low and high entry numbers, 1-based. - */ - low = 1; - if (!(high = be16_to_cpu(block->bb_numrecs))) { - /* - * If the block is empty, the tree must - * be an empty leaf. - */ - ASSERT(level == 0 && cur->bc_nlevels == 1); - cur->bc_ptrs[0] = dir != XFS_LOOKUP_LE; - *stat = 0; - return 0; - } - /* - * Binary search the block. - */ - while (low <= high) { - xfs_agino_t startino; /* key value */ - - /* - * keyno is average of low and high. - */ - keyno = (low + high) >> 1; - /* - * Get startino. - */ - if (level > 0) { - xfs_inobt_key_t *kkp; - - kkp = kkbase + keyno - 1; - startino = be32_to_cpu(kkp->ir_startino); - } else { - xfs_inobt_rec_t *krp; - - krp = krbase + keyno - 1; - startino = be32_to_cpu(krp->ir_startino); - } - /* - * Compute difference to get next direction. - */ - diff = (__int64_t) - startino - cur->bc_rec.i.ir_startino; - /* - * Less than, move right. - */ - if (diff < 0) - low = keyno + 1; - /* - * Greater than, move left. - */ - else if (diff > 0) - high = keyno - 1; - /* - * Equal, we're done. - */ - else - break; - } - } - /* - * If there are more levels, set up for the next level - * by getting the block number and filling in the cursor. - */ - if (level > 0) { - /* - * If we moved left, need the previous key number, - * unless there isn't one. - */ - if (diff > 0 && --keyno < 1) - keyno = 1; - agbno = be32_to_cpu(*XFS_INOBT_PTR_ADDR(block, keyno, cur)); -#ifdef DEBUG - if ((error = xfs_btree_check_sptr(cur, agbno, level))) - return error; -#endif - cur->bc_ptrs[level] = keyno; - } - } - /* - * Done with the search. - * See if we need to adjust the results. - */ - if (dir != XFS_LOOKUP_LE && diff < 0) { - keyno++; - /* - * If ge search and we went off the end of the block, but it's - * not the last block, we're in the wrong block. - */ - if (dir == XFS_LOOKUP_GE && - keyno > be16_to_cpu(block->bb_numrecs) && - be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) { - int i; - - cur->bc_ptrs[0] = keyno; - if ((error = xfs_btree_increment(cur, 0, &i))) - return error; - ASSERT(i == 1); - *stat = 1; - return 0; - } - } - else if (dir == XFS_LOOKUP_LE && diff > 0) - keyno--; - cur->bc_ptrs[0] = keyno; - /* - * Return if we succeeded or not. - */ - if (keyno == 0 || keyno > be16_to_cpu(block->bb_numrecs)) - *stat = 0; - else - *stat = ((dir != XFS_LOOKUP_EQ) || (diff == 0)); - return 0; -} - /* * Move 1 record left from cur/level if possible. * Update cur to reflect the new path. @@ -1797,59 +1591,6 @@ xfs_inobt_insert( return 0; } -/* - * Lookup the record equal to ino in the btree given by cur. - */ -int /* error */ -xfs_inobt_lookup_eq( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_agino_t ino, /* starting inode of chunk */ - __int32_t fcnt, /* free inode count */ - xfs_inofree_t free, /* free inode mask */ - int *stat) /* success/failure */ -{ - cur->bc_rec.i.ir_startino = ino; - cur->bc_rec.i.ir_freecount = fcnt; - cur->bc_rec.i.ir_free = free; - return xfs_inobt_lookup(cur, XFS_LOOKUP_EQ, stat); -} - -/* - * Lookup the first record greater than or equal to ino - * in the btree given by cur. - */ -int /* error */ -xfs_inobt_lookup_ge( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_agino_t ino, /* starting inode of chunk */ - __int32_t fcnt, /* free inode count */ - xfs_inofree_t free, /* free inode mask */ - int *stat) /* success/failure */ -{ - cur->bc_rec.i.ir_startino = ino; - cur->bc_rec.i.ir_freecount = fcnt; - cur->bc_rec.i.ir_free = free; - return xfs_inobt_lookup(cur, XFS_LOOKUP_GE, stat); -} - -/* - * Lookup the first record less than or equal to ino - * in the btree given by cur. - */ -int /* error */ -xfs_inobt_lookup_le( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_agino_t ino, /* starting inode of chunk */ - __int32_t fcnt, /* free inode count */ - xfs_inofree_t free, /* free inode mask */ - int *stat) /* success/failure */ -{ - cur->bc_rec.i.ir_startino = ino; - cur->bc_rec.i.ir_freecount = fcnt; - cur->bc_rec.i.ir_free = free; - return xfs_inobt_lookup(cur, XFS_LOOKUP_LE, stat); -} - /* * Update the record referred to by cur, to the value given * by [ino, fcnt, free]. @@ -1918,6 +1659,38 @@ xfs_inobt_get_maxrecs( return cur->bc_mp->m_inobt_mxr[level != 0]; } +STATIC void +xfs_inobt_init_key_from_rec( + union xfs_btree_key *key, + union xfs_btree_rec *rec) +{ + key->inobt.ir_startino = rec->inobt.ir_startino; +} + +/* + * intial value of ptr for lookup + */ +STATIC void +xfs_inobt_init_ptr_from_cur( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr) +{ + struct xfs_agi *agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); + + ASSERT(cur->bc_private.a.agno == be32_to_cpu(agi->agi_seqno)); + + ptr->s = agi->agi_root; +} + +STATIC __int64_t +xfs_inobt_key_diff( + struct xfs_btree_cur *cur, + union xfs_btree_key *key) +{ + return (__int64_t)be32_to_cpu(key->inobt.ir_startino) - + cur->bc_rec.i.ir_startino; +} + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_inobt_trace_buf; @@ -1990,6 +1763,9 @@ static const struct xfs_btree_ops xfs_inobt_ops = { .dup_cursor = xfs_inobt_dup_cursor, .get_maxrecs = xfs_inobt_get_maxrecs, + .init_key_from_rec = xfs_inobt_init_key_from_rec, + .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, + .key_diff = xfs_inobt_key_diff, #ifdef XFS_BTREE_TRACE .trace_enter = xfs_inobt_trace_enter, diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h index 84554595d28..674b459521f 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/xfs_ialloc_btree.h @@ -135,26 +135,6 @@ extern int xfs_inobt_get_rec(struct xfs_btree_cur *cur, xfs_agino_t *ino, */ extern int xfs_inobt_insert(struct xfs_btree_cur *cur, int *stat); -/* - * Lookup the record equal to ino in the btree given by cur. - */ -extern int xfs_inobt_lookup_eq(struct xfs_btree_cur *cur, xfs_agino_t ino, - __int32_t fcnt, xfs_inofree_t free, int *stat); - -/* - * Lookup the first record greater than or equal to ino - * in the btree given by cur. - */ -extern int xfs_inobt_lookup_ge(struct xfs_btree_cur *cur, xfs_agino_t ino, - __int32_t fcnt, xfs_inofree_t free, int *stat); - -/* - * Lookup the first record less than or equal to ino - * in the btree given by cur. - */ -extern int xfs_inobt_lookup_le(struct xfs_btree_cur *cur, xfs_agino_t ino, - __int32_t fcnt, xfs_inofree_t free, int *stat); - /* * Update the record referred to by cur, to the value given * by [ino, fcnt, free]. -- cgit v1.2.3-70-g09d2 From 38bb74237d2d94c1aced2ec626d7d0f317e360da Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:56:22 +1100 Subject: [XFS] implement generic xfs_btree_updkey From: Dave Chinner Note that there are many > 80 char lines introduced due to the xfs_btree_key casts. But the places where this happens is throw-away code once the whole btree code gets merged into a common implementation. The same is true for the temporary xfs_alloc_log_keys define to the new name. All old users will be gone after a few patches. [hch: split out from bigger patch and minor adaptions] SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32193a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc_btree.c | 50 +++------------------------ fs/xfs/xfs_bmap_btree.c | 49 +++----------------------- fs/xfs/xfs_btree.c | 87 +++++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 1 + fs/xfs/xfs_ialloc_btree.c | 50 +++------------------------ 5 files changed, 103 insertions(+), 134 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index b81fbf1216e..28c6a698f56 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -52,7 +52,6 @@ STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *); STATIC int xfs_alloc_rshift(xfs_btree_cur_t *, int, int *); STATIC int xfs_alloc_split(xfs_btree_cur_t *, int, xfs_agblock_t *, xfs_alloc_key_t *, xfs_btree_cur_t **, int *); -STATIC int xfs_alloc_updkey(xfs_btree_cur_t *, xfs_alloc_key_t *, int); /* * Internal functions. @@ -265,7 +264,7 @@ xfs_alloc_delrec( * If we deleted the leftmost entry in the block, update the * key values above us in the tree. */ - if (ptr == 1 && (error = xfs_alloc_updkey(cur, lkp, level + 1))) + if (ptr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)lkp, level + 1))) return error; /* * If the number of records remaining in the block is at least @@ -798,7 +797,7 @@ xfs_alloc_insrec( /* * If we inserted at the start of a block, update the parents' keys. */ - if (optr == 1 && (error = xfs_alloc_updkey(cur, &key, level + 1))) + if (optr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, level + 1))) return error; /* * Look to see if the longest extent in the allocation group @@ -1068,7 +1067,7 @@ xfs_alloc_lshift( /* * Update the parent key values of right. */ - if ((error = xfs_alloc_updkey(cur, rkp, level + 1))) + if ((error = xfs_btree_updkey(cur, (union xfs_btree_key *)rkp, level + 1))) return error; /* * Slide the cursor value left one. @@ -1354,7 +1353,7 @@ xfs_alloc_rshift( i = xfs_btree_lastrec(tcur, level); XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if ((error = xfs_btree_increment(tcur, level, &i)) || - (error = xfs_alloc_updkey(tcur, rkp, level + 1))) + (error = xfs_btree_updkey(tcur, (union xfs_btree_key *)rkp, level + 1))) goto error0; xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); *stat = 1; @@ -1519,45 +1518,6 @@ xfs_alloc_split( return 0; } -/* - * Update keys at all levels from here to the root along the cursor's path. - */ -STATIC int /* error */ -xfs_alloc_updkey( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_alloc_key_t *keyp, /* new key value to update to */ - int level) /* starting level for update */ -{ - int ptr; /* index of key in block */ - - /* - * Go up the tree from this level toward the root. - * At each level, update the key value to the value input. - * Stop when we reach a level where the cursor isn't pointing - * at the first entry in the block. - */ - for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { - xfs_alloc_block_t *block; /* btree block */ - xfs_buf_t *bp; /* buffer for block */ -#ifdef DEBUG - int error; /* error return value */ -#endif - xfs_alloc_key_t *kp; /* ptr to btree block keys */ - - bp = cur->bc_bufs[level]; - block = XFS_BUF_TO_ALLOC_BLOCK(bp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, level, bp))) - return error; -#endif - ptr = cur->bc_ptrs[level]; - kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur); - *kp = *keyp; - xfs_alloc_log_keys(cur, bp, ptr, ptr); - } - return 0; -} - /* * Externally visible routines. */ @@ -1765,7 +1725,7 @@ xfs_alloc_update( key.ar_startblock = cpu_to_be32(bno); key.ar_blockcount = cpu_to_be32(len); - if ((error = xfs_alloc_updkey(cur, &key, 1))) + if ((error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, 1))) return error; } return 0; diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 8403d154ae0..0a56257b7fd 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -56,7 +56,6 @@ STATIC int xfs_bmbt_lshift(xfs_btree_cur_t *, int, int *); STATIC int xfs_bmbt_rshift(xfs_btree_cur_t *, int, int *); STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *, __uint64_t *, xfs_btree_cur_t **, int *); -STATIC int xfs_bmbt_updkey(xfs_btree_cur_t *, xfs_bmbt_key_t *, int); #undef EXIT @@ -211,7 +210,7 @@ xfs_bmbt_delrec( *stat = 1; return 0; } - if (ptr == 1 && (error = xfs_bmbt_updkey(cur, kp, level + 1))) { + if (ptr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)kp, level + 1))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } @@ -635,7 +634,7 @@ xfs_bmbt_insrec( kp + ptr); } #endif - if (optr == 1 && (error = xfs_bmbt_updkey(cur, &key, level + 1))) { + if (optr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, level + 1))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; } @@ -935,7 +934,7 @@ xfs_bmbt_lshift( key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp)); rkp = &key; } - if ((error = xfs_bmbt_updkey(cur, rkp, level + 1))) { + if ((error = xfs_btree_updkey(cur, (union xfs_btree_key *)rkp, level + 1))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; } @@ -1067,7 +1066,7 @@ xfs_bmbt_rshift( goto error1; } XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_bmbt_updkey(tcur, rkp, level + 1))) { + if ((error = xfs_btree_updkey(tcur, (union xfs_btree_key *)rkp, level + 1))) { XFS_BMBT_TRACE_CURSOR(tcur, ERROR); goto error1; } @@ -1276,44 +1275,6 @@ xfs_bmbt_split( return 0; } - -/* - * Update keys for the record. - */ -STATIC int -xfs_bmbt_updkey( - xfs_btree_cur_t *cur, - xfs_bmbt_key_t *keyp, /* on-disk format */ - int level) -{ - xfs_bmbt_block_t *block; - xfs_buf_t *bp; -#ifdef DEBUG - int error; -#endif - xfs_bmbt_key_t *kp; - int ptr; - - ASSERT(level >= 1); - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - XFS_BMBT_TRACE_ARGIK(cur, level, keyp); - for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { - block = xfs_bmbt_get_block(cur, level, &bp); -#ifdef DEBUG - if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } -#endif - ptr = cur->bc_ptrs[level]; - kp = XFS_BMAP_KEY_IADDR(block, ptr, cur); - *kp = *keyp; - xfs_bmbt_log_keys(cur, bp, ptr, ptr); - } - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - return 0; -} - /* * Convert on-disk form of btree root to in-memory form. */ @@ -2039,7 +2000,7 @@ xfs_bmbt_update( return 0; } key.br_startoff = cpu_to_be64(off); - if ((error = xfs_bmbt_updkey(cur, &key, 1))) { + if ((error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, 1))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; } diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 41912a01bec..1459a2b9a72 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -34,6 +34,7 @@ #include "xfs_attr_sf.h" #include "xfs_dinode.h" #include "xfs_inode.h" +#include "xfs_inode_item.h" #include "xfs_btree.h" #include "xfs_btree_trace.h" #include "xfs_ialloc.h" @@ -1064,6 +1065,45 @@ xfs_btree_read_buf_block( return error; } +/* + * Copy keys from one btree block to another. + */ +STATIC void +xfs_btree_copy_keys( + struct xfs_btree_cur *cur, + union xfs_btree_key *dst_key, + union xfs_btree_key *src_key, + int numkeys) +{ + ASSERT(numkeys >= 0); + memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len); +} + +/* + * Log key values from the btree block. + */ +STATIC void +xfs_btree_log_keys( + struct xfs_btree_cur *cur, + struct xfs_buf *bp, + int first, + int last) +{ + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); + + if (bp) { + xfs_trans_log_buf(cur->bc_tp, bp, + xfs_btree_key_offset(cur, first), + xfs_btree_key_offset(cur, last + 1) - 1); + } else { + xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, + xfs_ilog_fbroot(cur->bc_private.b.whichfork)); + } + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); +} + /* * Increment cursor by one record at the level. * For nonzero levels the leaf-ward information is untouched. @@ -1489,3 +1529,50 @@ error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } + +/* + * Update keys at all levels from here to the root along the cursor's path. + */ +int +xfs_btree_updkey( + struct xfs_btree_cur *cur, + union xfs_btree_key *keyp, + int level) +{ + struct xfs_btree_block *block; + struct xfs_buf *bp; + union xfs_btree_key *kp; + int ptr; + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_TRACE_ARGIK(cur, level, keyp); + + ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) || level >= 1); + + /* + * Go up the tree from this level toward the root. + * At each level, update the key value to the value input. + * Stop when we reach a level where the cursor isn't pointing + * at the first entry in the block. + */ + for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { +#ifdef DEBUG + int error; +#endif + block = xfs_btree_get_block(cur, level, &bp); +#ifdef DEBUG + error = xfs_btree_check_block(cur, block, level, bp); + if (error) { + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; + } +#endif + ptr = cur->bc_ptrs[level]; + kp = xfs_btree_key_addr(cur, ptr, block); + xfs_btree_copy_keys(cur, kp, keyp, 1); + xfs_btree_log_keys(cur, bp, ptr, ptr); + } + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + return 0; +} diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index c151175a5fd..ac3f527b0ac 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -518,6 +518,7 @@ xfs_btree_setbuf( int xfs_btree_increment(struct xfs_btree_cur *, int, int *); int xfs_btree_decrement(struct xfs_btree_cur *, int, int *); int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *); +int xfs_btree_updkey(struct xfs_btree_cur *, union xfs_btree_key *, int); /* * Helpers. diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 161c3b2e245..cd8bb519cb5 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -48,7 +48,6 @@ STATIC int xfs_inobt_newroot(xfs_btree_cur_t *, int *); STATIC int xfs_inobt_rshift(xfs_btree_cur_t *, int, int *); STATIC int xfs_inobt_split(xfs_btree_cur_t *, int, xfs_agblock_t *, xfs_inobt_key_t *, xfs_btree_cur_t **, int *); -STATIC int xfs_inobt_updkey(xfs_btree_cur_t *, xfs_inobt_key_t *, int); /* * Single level of the xfs_inobt_delete record deletion routine. @@ -214,7 +213,7 @@ xfs_inobt_delrec( * If we deleted the leftmost entry in the block, update the * key values above us in the tree. */ - if (ptr == 1 && (error = xfs_inobt_updkey(cur, kp, level + 1))) + if (ptr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)kp, level + 1))) return error; /* * If the number of records remaining in the block is at least @@ -723,7 +722,7 @@ xfs_inobt_insrec( /* * If we inserted at the start of a block, update the parents' keys. */ - if (optr == 1 && (error = xfs_inobt_updkey(cur, &key, level + 1))) + if (optr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, level + 1))) return error; /* * Return the new block number, if any. @@ -960,7 +959,7 @@ xfs_inobt_lshift( /* * Update the parent key values of right. */ - if ((error = xfs_inobt_updkey(cur, rkp, level + 1))) + if ((error = xfs_btree_updkey(cur, (union xfs_btree_key *)rkp, level + 1))) return error; /* * Slide the cursor value left one. @@ -1238,7 +1237,7 @@ xfs_inobt_rshift( return error; xfs_btree_lastrec(tcur, level); if ((error = xfs_btree_increment(tcur, level, &i)) || - (error = xfs_inobt_updkey(tcur, rkp, level + 1))) { + (error = xfs_btree_updkey(tcur, (union xfs_btree_key *)rkp, level + 1))) { xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); return error; } @@ -1406,45 +1405,6 @@ xfs_inobt_split( return 0; } -/* - * Update keys at all levels from here to the root along the cursor's path. - */ -STATIC int /* error */ -xfs_inobt_updkey( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_inobt_key_t *keyp, /* new key value to update to */ - int level) /* starting level for update */ -{ - int ptr; /* index of key in block */ - - /* - * Go up the tree from this level toward the root. - * At each level, update the key value to the value input. - * Stop when we reach a level where the cursor isn't pointing - * at the first entry in the block. - */ - for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) { - xfs_buf_t *bp; /* buffer for block */ - xfs_inobt_block_t *block; /* btree block */ -#ifdef DEBUG - int error; /* error return value */ -#endif - xfs_inobt_key_t *kp; /* ptr to btree block keys */ - - bp = cur->bc_bufs[level]; - block = XFS_BUF_TO_INOBT_BLOCK(bp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, level, bp))) - return error; -#endif - ptr = cur->bc_ptrs[level]; - kp = XFS_INOBT_KEY_ADDR(block, ptr, cur); - *kp = *keyp; - xfs_inobt_log_keys(cur, bp, ptr, ptr); - } - return 0; -} - /* * Externally visible routines. */ @@ -1637,7 +1597,7 @@ xfs_inobt_update( xfs_inobt_key_t key; /* key containing [ino] */ key.ir_startino = cpu_to_be32(ino); - if ((error = xfs_inobt_updkey(cur, &key, 1))) + if ((error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, 1))) return error; } return 0; -- cgit v1.2.3-70-g09d2 From 278d0ca14e889c3932a05d1a68675252a12b3466 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:56:32 +1100 Subject: [XFS] implement generic xfs_btree_update From: Dave Chinner The most complicated part here is the lastrec tracking for the alloc btree. Most logic is in the update_lastrec method which has to do some hopefully good enough dirty magic to maintain it. [hch: split out from bigger patch and a rework of the lastrec logic] SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32194a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc.c | 17 +++++++ fs/xfs/xfs_alloc_btree.c | 106 +++++++++++++++------------------------- fs/xfs/xfs_alloc_btree.h | 7 --- fs/xfs/xfs_bmap.c | 18 +++++++ fs/xfs/xfs_bmap_btree.c | 45 ----------------- fs/xfs/xfs_bmap_btree.h | 2 - fs/xfs/xfs_btree.c | 121 ++++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 14 ++++++ fs/xfs/xfs_ialloc.c | 20 ++++++++ fs/xfs/xfs_ialloc_btree.c | 52 -------------------- fs/xfs/xfs_ialloc_btree.h | 8 --- 11 files changed, 228 insertions(+), 182 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 6bda0ae26c2..875e1bae194 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -136,6 +136,23 @@ xfs_alloc_lookup_le( return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat); } +/* + * Update the record referred to by cur to the value given + * by [bno, len]. + * This either works (return 0) or gets an EFSCORRUPTED error. + */ +STATIC int /* error */ +xfs_alloc_update( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len) /* length of extent */ +{ + union xfs_btree_rec rec; + + rec.alloc.ar_startblock = cpu_to_be32(bno); + rec.alloc.ar_blockcount = cpu_to_be32(len); + return xfs_btree_update(cur, &rec); +} /* * Compute aligned version of the found extent. diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 28c6a698f56..c5c32999b81 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -1661,83 +1661,50 @@ xfs_alloc_insert( return 0; } +STATIC struct xfs_btree_cur * +xfs_allocbt_dup_cursor( + struct xfs_btree_cur *cur) +{ + return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp, + cur->bc_private.a.agbp, cur->bc_private.a.agno, + cur->bc_btnum); +} + /* - * Update the record referred to by cur, to the value given by [bno, len]. - * This either works (return 0) or gets an EFSCORRUPTED error. + * Update the longest extent in the AGF */ -int /* error */ -xfs_alloc_update( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_agblock_t bno, /* starting block of extent */ - xfs_extlen_t len) /* length of extent */ +STATIC void +xfs_allocbt_update_lastrec( + struct xfs_btree_cur *cur, + struct xfs_btree_block *block, + union xfs_btree_rec *rec, + int ptr, + int reason) { - xfs_alloc_block_t *block; /* btree block to update */ - int error; /* error return value */ - int ptr; /* current record number (updating) */ + struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); + xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); + __be32 len; - ASSERT(len > 0); - /* - * Pick up the a.g. freelist struct and the current block. - */ - block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0]))) - return error; -#endif - /* - * Get the address of the rec to be updated. - */ - ptr = cur->bc_ptrs[0]; - { - xfs_alloc_rec_t *rp; /* pointer to updated record */ + ASSERT(cur->bc_btnum == XFS_BTNUM_CNT); - rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); + switch (reason) { + case LASTREC_UPDATE: /* - * Fill in the new contents and log them. + * If this is the last leaf block and it's the last record, + * then update the size of the longest extent in the AG. */ - rp->ar_startblock = cpu_to_be32(bno); - rp->ar_blockcount = cpu_to_be32(len); - xfs_alloc_log_recs(cur, cur->bc_bufs[0], ptr, ptr); - } - /* - * If it's the by-size btree and it's the last leaf block and - * it's the last record... then update the size of the longest - * extent in the a.g., which we cache in the a.g. freelist header. - */ - if (cur->bc_btnum == XFS_BTNUM_CNT && - be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && - ptr == be16_to_cpu(block->bb_numrecs)) { - xfs_agf_t *agf; /* a.g. freespace header */ - xfs_agnumber_t seqno; - - agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); - seqno = be32_to_cpu(agf->agf_seqno); - cur->bc_mp->m_perag[seqno].pagf_longest = len; - agf->agf_longest = cpu_to_be32(len); - xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, - XFS_AGF_LONGEST); - } - /* - * Updating first record in leaf. Pass new key value up to our parent. - */ - if (ptr == 1) { - xfs_alloc_key_t key; /* key containing [bno, len] */ - - key.ar_startblock = cpu_to_be32(bno); - key.ar_blockcount = cpu_to_be32(len); - if ((error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, 1))) - return error; + if (ptr != xfs_btree_get_numrecs(block)) + return; + len = rec->alloc.ar_blockcount; + break; + default: + ASSERT(0); + return; } - return 0; -} -STATIC struct xfs_btree_cur * -xfs_allocbt_dup_cursor( - struct xfs_btree_cur *cur) -{ - return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agbp, cur->bc_private.a.agno, - cur->bc_btnum); + agf->agf_longest = len; + cur->bc_mp->m_perag[seqno].pagf_longest = be32_to_cpu(len); + xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST); } STATIC int @@ -1864,6 +1831,7 @@ static const struct xfs_btree_ops xfs_allocbt_ops = { .key_len = sizeof(xfs_alloc_key_t), .dup_cursor = xfs_allocbt_dup_cursor, + .update_lastrec = xfs_allocbt_update_lastrec, .get_maxrecs = xfs_allocbt_get_maxrecs, .init_key_from_rec = xfs_allocbt_init_key_from_rec, .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, @@ -1902,6 +1870,8 @@ xfs_allocbt_init_cursor( cur->bc_blocklog = mp->m_sb.sb_blocklog; cur->bc_ops = &xfs_allocbt_ops; + if (btnum == XFS_BTNUM_CNT) + cur->bc_flags = XFS_BTREE_LASTREC_UPDATE; cur->bc_private.a.agbp = agbp; cur->bc_private.a.agno = agno; diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h index aa110ff4feb..81e2f360781 100644 --- a/fs/xfs/xfs_alloc_btree.h +++ b/fs/xfs/xfs_alloc_btree.h @@ -113,13 +113,6 @@ extern int xfs_alloc_get_rec(struct xfs_btree_cur *cur, xfs_agblock_t *bno, */ extern int xfs_alloc_insert(struct xfs_btree_cur *cur, int *stat); -/* - * Update the record referred to by cur, to the value given by [bno, len]. - * This either works (return 0) or gets an EFSCORRUPTED error. - */ -extern int xfs_alloc_update(struct xfs_btree_cur *cur, xfs_agblock_t bno, - xfs_extlen_t len); - extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *, struct xfs_trans *, struct xfs_buf *, diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 1296b4102e9..7d6c4ace805 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -430,6 +430,24 @@ xfs_bmbt_lookup_ge( return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat); } +/* +* Update the record referred to by cur to the value given + * by [off, bno, len, state]. + * This either works (return 0) or gets an EFSCORRUPTED error. + */ +STATIC int +xfs_bmbt_update( + struct xfs_btree_cur *cur, + xfs_fileoff_t off, + xfs_fsblock_t bno, + xfs_filblks_t len, + xfs_exntst_t state) +{ + union xfs_btree_rec rec; + + xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state); + return xfs_btree_update(cur, &rec); +} /* * Called from xfs_bmap_add_attrfork to handle btree format files. diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 0a56257b7fd..99200a9898f 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -1963,51 +1963,6 @@ xfs_bmbt_to_bmdr( memcpy(tpp, fpp, sizeof(*fpp) * dmxr); } -/* - * Update the record to the passed values. - */ -int -xfs_bmbt_update( - xfs_btree_cur_t *cur, - xfs_fileoff_t off, - xfs_fsblock_t bno, - xfs_filblks_t len, - xfs_exntst_t state) -{ - xfs_bmbt_block_t *block; - xfs_buf_t *bp; - int error; - xfs_bmbt_key_t key; - int ptr; - xfs_bmbt_rec_t *rp; - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - XFS_BMBT_TRACE_ARGFFFI(cur, (xfs_dfiloff_t)off, (xfs_dfsbno_t)bno, - (xfs_dfilblks_t)len, (int)state); - block = xfs_bmbt_get_block(cur, 0, &bp); -#ifdef DEBUG - if ((error = xfs_btree_check_lblock(cur, block, 0, bp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } -#endif - ptr = cur->bc_ptrs[0]; - rp = XFS_BMAP_REC_IADDR(block, ptr, cur); - xfs_bmbt_disk_set_allf(rp, off, bno, len, state); - xfs_bmbt_log_recs(cur, bp, ptr, ptr); - if (ptr > 1) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - return 0; - } - key.br_startoff = cpu_to_be64(off); - if ((error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, 1))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - return 0; -} - /* * Check extent records, which have just been read, for * any bit in the extent flag field. ASSERT on debug diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index d04198cdc4b..6bfd62ec54f 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -274,8 +274,6 @@ extern void xfs_bmbt_disk_set_allf(xfs_bmbt_rec_t *r, xfs_fileoff_t o, xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v); extern void xfs_bmbt_to_bmdr(xfs_bmbt_block_t *, int, xfs_bmdr_block_t *, int); -extern int xfs_bmbt_update(struct xfs_btree_cur *, xfs_fileoff_t, - xfs_fsblock_t, xfs_filblks_t, xfs_exntst_t); extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *, struct xfs_trans *, struct xfs_inode *, int); diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 1459a2b9a72..205272f282d 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -988,6 +988,30 @@ xfs_btree_get_sibling( } } +/* + * Return true if ptr is the last record in the btree and + * we need to track updateѕ to this record. The decision + * will be further refined in the update_lastrec method. + */ +STATIC int +xfs_btree_is_lastrec( + struct xfs_btree_cur *cur, + struct xfs_btree_block *block, + int level) +{ + union xfs_btree_ptr ptr; + + if (level > 0) + return 0; + if (!(cur->bc_flags & XFS_BTREE_LASTREC_UPDATE)) + return 0; + + xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); + if (!xfs_btree_ptr_is_null(cur, &ptr)) + return 0; + return 1; +} + STATIC xfs_daddr_t xfs_btree_ptr_to_daddr( struct xfs_btree_cur *cur, @@ -1079,6 +1103,20 @@ xfs_btree_copy_keys( memcpy(dst_key, src_key, numkeys * cur->bc_ops->key_len); } +/* + * Copy records from one btree block to another. + */ +STATIC void +xfs_btree_copy_recs( + struct xfs_btree_cur *cur, + union xfs_btree_rec *dst_rec, + union xfs_btree_rec *src_rec, + int numrecs) +{ + ASSERT(numrecs >= 0); + memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len); +} + /* * Log key values from the btree block. */ @@ -1104,6 +1142,26 @@ xfs_btree_log_keys( XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); } +/* + * Log record values from the btree block. + */ +STATIC void +xfs_btree_log_recs( + struct xfs_btree_cur *cur, + struct xfs_buf *bp, + int first, + int last) +{ + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); + + xfs_trans_log_buf(cur->bc_tp, bp, + xfs_btree_rec_offset(cur, first), + xfs_btree_rec_offset(cur, last + 1) - 1); + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); +} + /* * Increment cursor by one record at the level. * For nonzero levels the leaf-ward information is untouched. @@ -1576,3 +1634,66 @@ xfs_btree_updkey( XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); return 0; } + +/* + * Update the record referred to by cur to the value in the + * given record. This either works (return 0) or gets an + * EFSCORRUPTED error. + */ +int +xfs_btree_update( + struct xfs_btree_cur *cur, + union xfs_btree_rec *rec) +{ + struct xfs_btree_block *block; + struct xfs_buf *bp; + int error; + int ptr; + union xfs_btree_rec *rp; + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_TRACE_ARGR(cur, rec); + + /* Pick up the current block. */ + block = xfs_btree_get_block(cur, 0, &bp); + +#ifdef DEBUG + error = xfs_btree_check_block(cur, block, 0, bp); + if (error) + goto error0; +#endif + /* Get the address of the rec to be updated. */ + ptr = cur->bc_ptrs[0]; + rp = xfs_btree_rec_addr(cur, ptr, block); + + /* Fill in the new contents and log them. */ + xfs_btree_copy_recs(cur, rp, rec, 1); + xfs_btree_log_recs(cur, bp, ptr, ptr); + + /* + * If we are tracking the last record in the tree and + * we are at the far right edge of the tree, update it. + */ + if (xfs_btree_is_lastrec(cur, block, 0)) { + cur->bc_ops->update_lastrec(cur, block, rec, + ptr, LASTREC_UPDATE); + } + + /* Updating first rec in leaf. Pass new key value up to our parent. */ + if (ptr == 1) { + union xfs_btree_key key; + + cur->bc_ops->init_key_from_rec(&key, rec); + error = xfs_btree_updkey(cur, &key, 1); + if (error) + goto error0; + } + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + return 0; + +error0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; +} + diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index ac3f527b0ac..c3bfa5556c1 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -187,6 +187,12 @@ struct xfs_btree_ops { /* cursor operations */ struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *); + /* update last record information */ + void (*update_lastrec)(struct xfs_btree_cur *cur, + struct xfs_btree_block *block, + union xfs_btree_rec *rec, + int ptr, int reason); + /* records in block/level */ int (*get_maxrecs)(struct xfs_btree_cur *cur, int level); @@ -220,6 +226,12 @@ struct xfs_btree_ops { #endif }; +/* + * Reasons for the update_lastrec method to be called. + */ +#define LASTREC_UPDATE 0 + + /* * Btree cursor structure. * This collects all information needed by the btree code in one place. @@ -264,6 +276,7 @@ typedef struct xfs_btree_cur /* cursor flags */ #define XFS_BTREE_LONG_PTRS (1<<0) /* pointers are 64bits long */ #define XFS_BTREE_ROOT_IN_INODE (1<<1) /* root may be variable size */ +#define XFS_BTREE_LASTREC_UPDATE (1<<2) /* track last rec externally */ #define XFS_BTREE_NOERROR 0 @@ -519,6 +532,7 @@ int xfs_btree_increment(struct xfs_btree_cur *, int, int *); int xfs_btree_decrement(struct xfs_btree_cur *, int, int *); int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *); int xfs_btree_updkey(struct xfs_btree_cur *, union xfs_btree_key *, int); +int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *); /* * Helpers. diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index bbf537f64c4..138651afd44 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -171,6 +171,26 @@ xfs_inobt_lookup_le( return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat); } +/* + * Update the record referred to by cur to the value given + * by [ino, fcnt, free]. + * This either works (return 0) or gets an EFSCORRUPTED error. + */ +STATIC int /* error */ +xfs_inobt_update( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t ino, /* starting inode of chunk */ + __int32_t fcnt, /* free inode count */ + xfs_inofree_t free) /* free inode mask */ +{ + union xfs_btree_rec rec; + + rec.inobt.ir_startino = cpu_to_be32(ino); + rec.inobt.ir_freecount = cpu_to_be32(fcnt); + rec.inobt.ir_free = cpu_to_be64(free); + return xfs_btree_update(cur, &rec); +} + /* * Allocate new inodes in the allocation group specified by agbp. * Return 0 for success, else error code. diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index cd8bb519cb5..d080a6833a8 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -1551,58 +1551,6 @@ xfs_inobt_insert( return 0; } -/* - * Update the record referred to by cur, to the value given - * by [ino, fcnt, free]. - * This either works (return 0) or gets an EFSCORRUPTED error. - */ -int /* error */ -xfs_inobt_update( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_agino_t ino, /* starting inode of chunk */ - __int32_t fcnt, /* free inode count */ - xfs_inofree_t free) /* free inode mask */ -{ - xfs_inobt_block_t *block; /* btree block to update */ - xfs_buf_t *bp; /* buffer containing btree block */ - int error; /* error return value */ - int ptr; /* current record number (updating) */ - xfs_inobt_rec_t *rp; /* pointer to updated record */ - - /* - * Pick up the current block. - */ - bp = cur->bc_bufs[0]; - block = XFS_BUF_TO_INOBT_BLOCK(bp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, 0, bp))) - return error; -#endif - /* - * Get the address of the rec to be updated. - */ - ptr = cur->bc_ptrs[0]; - rp = XFS_INOBT_REC_ADDR(block, ptr, cur); - /* - * Fill in the new contents and log them. - */ - rp->ir_startino = cpu_to_be32(ino); - rp->ir_freecount = cpu_to_be32(fcnt); - rp->ir_free = cpu_to_be64(free); - xfs_inobt_log_recs(cur, bp, ptr, ptr); - /* - * Updating first record in leaf. Pass new key value up to our parent. - */ - if (ptr == 1) { - xfs_inobt_key_t key; /* key containing [ino] */ - - key.ir_startino = cpu_to_be32(ino); - if ((error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, 1))) - return error; - } - return 0; -} - STATIC struct xfs_btree_cur * xfs_inobt_dup_cursor( struct xfs_btree_cur *cur) diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h index 674b459521f..7f77549e82a 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/xfs_ialloc_btree.h @@ -135,14 +135,6 @@ extern int xfs_inobt_get_rec(struct xfs_btree_cur *cur, xfs_agino_t *ino, */ extern int xfs_inobt_insert(struct xfs_btree_cur *cur, int *stat); -/* - * Update the record referred to by cur, to the value given - * by [ino, fcnt, free]. - * This either works (return 0) or gets an EFSCORRUPTED error. - */ -extern int xfs_inobt_update(struct xfs_btree_cur *cur, xfs_agino_t ino, - __int32_t fcnt, xfs_inofree_t free); - extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *, struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t); -- cgit v1.2.3-70-g09d2 From 9eaead51bed957af0070a277d945744a76df0c8b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:56:43 +1100 Subject: [XFS] implement generic xfs_btree_rshift Make the btree right shift code generic. Based on a patch from David Chinner with lots of changes to follow the original btree implementations more closely. While this loses some of the generic helper routines for inserting/moving/removing records it also solves some of the one off bugs in the original code and makes it easier to verify. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32196a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc_btree.c | 136 +------------------- fs/xfs/xfs_bmap_btree.c | 142 +-------------------- fs/xfs/xfs_btree.c | 319 +++++++++++++++++++++++++++++++++++++++++++++- fs/xfs/xfs_btree.h | 7 + fs/xfs/xfs_ialloc_btree.c | 135 +------------------- 5 files changed, 331 insertions(+), 408 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index c5c32999b81..31e42891fc9 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -49,7 +49,6 @@ STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC int xfs_alloc_lshift(xfs_btree_cur_t *, int, int *); STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *); -STATIC int xfs_alloc_rshift(xfs_btree_cur_t *, int, int *); STATIC int xfs_alloc_split(xfs_btree_cur_t *, int, xfs_agblock_t *, xfs_alloc_key_t *, xfs_btree_cur_t **, int *); @@ -391,7 +390,7 @@ xfs_alloc_delrec( */ if (be16_to_cpu(left->bb_numrecs) - 1 >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { - if ((error = xfs_alloc_rshift(tcur, level, &i))) + if ((error = xfs_btree_rshift(tcur, level, &i))) goto error0; if (i) { ASSERT(be16_to_cpu(block->bb_numrecs) >= @@ -683,7 +682,7 @@ xfs_alloc_insrec( /* * First, try shifting an entry to the right neighbor. */ - if ((error = xfs_alloc_rshift(cur, level, &i))) + if ((error = xfs_btree_rshift(cur, level, &i))) return error; if (i) { /* nothing */ @@ -1232,137 +1231,6 @@ xfs_alloc_newroot( return 0; } -/* - * Move 1 record right from cur/level if possible. - * Update cur to reflect the new path. - */ -STATIC int /* error */ -xfs_alloc_rshift( - xfs_btree_cur_t *cur, /* btree cursor */ - int level, /* level to shift record on */ - int *stat) /* success/failure */ -{ - int error; /* error return value */ - int i; /* loop index */ - xfs_alloc_key_t key; /* key value for leaf level upward */ - xfs_buf_t *lbp; /* buffer for left (current) block */ - xfs_alloc_block_t *left; /* left (current) btree block */ - xfs_buf_t *rbp; /* buffer for right neighbor block */ - xfs_alloc_block_t *right; /* right neighbor btree block */ - xfs_alloc_key_t *rkp; /* key pointer for right block */ - xfs_btree_cur_t *tcur; /* temporary cursor */ - - /* - * Set up variables for this block as "left". - */ - lbp = cur->bc_bufs[level]; - left = XFS_BUF_TO_ALLOC_BLOCK(lbp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) - return error; -#endif - /* - * If we've got no right sibling then we can't shift an entry right. - */ - if (be32_to_cpu(left->bb_rightsib) == NULLAGBLOCK) { - *stat = 0; - return 0; - } - /* - * If the cursor entry is the one that would be moved, don't - * do it... it's too complicated. - */ - if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) { - *stat = 0; - return 0; - } - /* - * Set up the right neighbor as "right". - */ - if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), - 0, &rbp, XFS_ALLOC_BTREE_REF))) - return error; - right = XFS_BUF_TO_ALLOC_BLOCK(rbp); - if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) - return error; - /* - * If it's full, it can't take another entry. - */ - if (be16_to_cpu(right->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { - *stat = 0; - return 0; - } - /* - * Make a hole at the start of the right neighbor block, then - * copy the last left block entry to the hole. - */ - if (level > 0) { - xfs_alloc_key_t *lkp; /* key pointer for left block */ - xfs_alloc_ptr_t *lpp; /* address pointer for left block */ - xfs_alloc_ptr_t *rpp; /* address pointer for right block */ - - lkp = XFS_ALLOC_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); - lpp = XFS_ALLOC_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); - rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); - rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); -#ifdef DEBUG - for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) { - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) - return error; - } -#endif - memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); - memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); -#ifdef DEBUG - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level))) - return error; -#endif - *rkp = *lkp; - *rpp = *lpp; - xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); - xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); - xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); - } else { - xfs_alloc_rec_t *lrp; /* record pointer for left block */ - xfs_alloc_rec_t *rrp; /* record pointer for right block */ - - lrp = XFS_ALLOC_REC_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); - rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); - memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); - *rrp = *lrp; - xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); - key.ar_startblock = rrp->ar_startblock; - key.ar_blockcount = rrp->ar_blockcount; - rkp = &key; - xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1); - } - /* - * Decrement and log left's numrecs, bump and log right's numrecs. - */ - be16_add_cpu(&left->bb_numrecs, -1); - xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); - be16_add_cpu(&right->bb_numrecs, 1); - xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); - /* - * Using a temporary cursor, update the parent key values of the - * block on the right. - */ - if ((error = xfs_btree_dup_cursor(cur, &tcur))) - return error; - i = xfs_btree_lastrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_btree_increment(tcur, level, &i)) || - (error = xfs_btree_updkey(tcur, (union xfs_btree_key *)rkp, level + 1))) - goto error0; - xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); - *stat = 1; - return 0; -error0: - xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); - return error; -} - /* * Split cur/level block in half. * Return new block number and its first record (to be inserted into parent). diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 99200a9898f..9d18fa8aa1d 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -53,7 +53,6 @@ STATIC int xfs_bmbt_killroot(xfs_btree_cur_t *); STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC int xfs_bmbt_lshift(xfs_btree_cur_t *, int, int *); -STATIC int xfs_bmbt_rshift(xfs_btree_cur_t *, int, int *); STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *, __uint64_t *, xfs_btree_cur_t **, int *); @@ -327,7 +326,7 @@ xfs_bmbt_delrec( bno = be64_to_cpu(left->bb_rightsib); if (be16_to_cpu(left->bb_numrecs) - 1 >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) { - if ((error = xfs_bmbt_rshift(tcur, level, &i))) { + if ((error = xfs_btree_rshift(tcur, level, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } @@ -538,7 +537,7 @@ xfs_bmbt_insrec( logflags); block = xfs_bmbt_get_block(cur, level, &bp); } else { - if ((error = xfs_bmbt_rshift(cur, level, &i))) { + if ((error = xfs_btree_rshift(cur, level, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; } @@ -944,143 +943,6 @@ xfs_bmbt_lshift( return 0; } -/* - * Move 1 record right from cur/level if possible. - * Update cur to reflect the new path. - */ -STATIC int /* error */ -xfs_bmbt_rshift( - xfs_btree_cur_t *cur, - int level, - int *stat) /* success/failure */ -{ - int error; /* error return value */ - int i; /* loop counter */ - xfs_bmbt_key_t key; /* bmap btree key */ - xfs_buf_t *lbp; /* left buffer pointer */ - xfs_bmbt_block_t *left; /* left btree block */ - xfs_bmbt_key_t *lkp; /* left btree key */ - xfs_bmbt_ptr_t *lpp; /* left address pointer */ - xfs_bmbt_rec_t *lrp; /* left record pointer */ - xfs_mount_t *mp; /* file system mount point */ - xfs_buf_t *rbp; /* right buffer pointer */ - xfs_bmbt_block_t *right; /* right btree block */ - xfs_bmbt_key_t *rkp; /* right btree key */ - xfs_bmbt_ptr_t *rpp; /* right address pointer */ - xfs_bmbt_rec_t *rrp=NULL; /* right record pointer */ - struct xfs_btree_cur *tcur; /* temporary btree cursor */ - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - XFS_BMBT_TRACE_ARGI(cur, level); - if (level == cur->bc_nlevels - 1) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - lbp = cur->bc_bufs[level]; - left = XFS_BUF_TO_BMBT_BLOCK(lbp); -#ifdef DEBUG - if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } -#endif - if (be64_to_cpu(left->bb_rightsib) == NULLDFSBNO) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - mp = cur->bc_mp; - if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(left->bb_rightsib), 0, - &rbp, XFS_BMAP_BTREE_REF))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - right = XFS_BUF_TO_BMBT_BLOCK(rbp); - if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - if (be16_to_cpu(right->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - if (level > 0) { - lkp = XFS_BMAP_KEY_IADDR(left, be16_to_cpu(left->bb_numrecs), cur); - lpp = XFS_BMAP_PTR_IADDR(left, be16_to_cpu(left->bb_numrecs), cur); - rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); - rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); -#ifdef DEBUG - for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) { - if ((error = xfs_btree_check_lptr_disk(cur, rpp[i], level))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - } -#endif - memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); - memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); -#ifdef DEBUG - if ((error = xfs_btree_check_lptr_disk(cur, *lpp, level))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } -#endif - *rkp = *lkp; - *rpp = *lpp; - xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); - xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); - } else { - lrp = XFS_BMAP_REC_IADDR(left, be16_to_cpu(left->bb_numrecs), cur); - rrp = XFS_BMAP_REC_IADDR(right, 1, cur); - memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); - *rrp = *lrp; - xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); - key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp)); - rkp = &key; - } - be16_add_cpu(&left->bb_numrecs, -1); - xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); - be16_add_cpu(&right->bb_numrecs, 1); -#ifdef DEBUG - if (level > 0) - xfs_btree_check_key(XFS_BTNUM_BMAP, rkp, rkp + 1); - else - xfs_btree_check_rec(XFS_BTNUM_BMAP, rrp, rrp + 1); -#endif - xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS); - if ((error = xfs_btree_dup_cursor(cur, &tcur))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - i = xfs_btree_lastrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_btree_increment(tcur, level, &i))) { - XFS_BMBT_TRACE_CURSOR(tcur, ERROR); - goto error1; - } - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_btree_updkey(tcur, (union xfs_btree_key *)rkp, level + 1))) { - XFS_BMBT_TRACE_CURSOR(tcur, ERROR); - goto error1; - } - xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; -error0: - XFS_BMBT_TRACE_CURSOR(cur, ERROR); -error1: - xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); - return error; -} - /* * Determine the extent state. */ diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 205272f282d..e1a21378184 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -1117,6 +1117,77 @@ xfs_btree_copy_recs( memcpy(dst_rec, src_rec, numrecs * cur->bc_ops->rec_len); } +/* + * Copy block pointers from one btree block to another. + */ +STATIC void +xfs_btree_copy_ptrs( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *dst_ptr, + union xfs_btree_ptr *src_ptr, + int numptrs) +{ + ASSERT(numptrs >= 0); + memcpy(dst_ptr, src_ptr, numptrs * xfs_btree_ptr_len(cur)); +} + +/* + * Shift keys one index left/right inside a single btree block. + */ +STATIC void +xfs_btree_shift_keys( + struct xfs_btree_cur *cur, + union xfs_btree_key *key, + int dir, + int numkeys) +{ + char *dst_key; + + ASSERT(numkeys >= 0); + ASSERT(dir == 1 || dir == -1); + + dst_key = (char *)key + (dir * cur->bc_ops->key_len); + memmove(dst_key, key, numkeys * cur->bc_ops->key_len); +} + +/* + * Shift records one index left/right inside a single btree block. + */ +STATIC void +xfs_btree_shift_recs( + struct xfs_btree_cur *cur, + union xfs_btree_rec *rec, + int dir, + int numrecs) +{ + char *dst_rec; + + ASSERT(numrecs >= 0); + ASSERT(dir == 1 || dir == -1); + + dst_rec = (char *)rec + (dir * cur->bc_ops->rec_len); + memmove(dst_rec, rec, numrecs * cur->bc_ops->rec_len); +} + +/* + * Shift block pointers one index left/right inside a single btree block. + */ +STATIC void +xfs_btree_shift_ptrs( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr, + int dir, + int numptrs) +{ + char *dst_ptr; + + ASSERT(numptrs >= 0); + ASSERT(dir == 1 || dir == -1); + + dst_ptr = (char *)ptr + (dir * xfs_btree_ptr_len(cur)); + memmove(dst_ptr, ptr, numptrs * xfs_btree_ptr_len(cur)); +} + /* * Log key values from the btree block. */ @@ -1162,6 +1233,79 @@ xfs_btree_log_recs( XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); } +/* + * Log block pointer fields from a btree block (nonleaf). + */ +STATIC void +xfs_btree_log_ptrs( + struct xfs_btree_cur *cur, /* btree cursor */ + struct xfs_buf *bp, /* buffer containing btree block */ + int first, /* index of first pointer to log */ + int last) /* index of last pointer to log */ +{ + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_TRACE_ARGBII(cur, bp, first, last); + + if (bp) { + struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); + int level = xfs_btree_get_level(block); + + xfs_trans_log_buf(cur->bc_tp, bp, + xfs_btree_ptr_offset(cur, first, level), + xfs_btree_ptr_offset(cur, last + 1, level) - 1); + } else { + xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, + xfs_ilog_fbroot(cur->bc_private.b.whichfork)); + } + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); +} + +/* + * Log fields from a btree block header. + */ +STATIC void +xfs_btree_log_block( + struct xfs_btree_cur *cur, /* btree cursor */ + struct xfs_buf *bp, /* buffer containing btree block */ + int fields) /* mask of fields: XFS_BB_... */ +{ + int first; /* first byte offset logged */ + int last; /* last byte offset logged */ + static const short soffsets[] = { /* table of offsets (short) */ + offsetof(struct xfs_btree_sblock, bb_magic), + offsetof(struct xfs_btree_sblock, bb_level), + offsetof(struct xfs_btree_sblock, bb_numrecs), + offsetof(struct xfs_btree_sblock, bb_leftsib), + offsetof(struct xfs_btree_sblock, bb_rightsib), + sizeof(struct xfs_btree_sblock) + }; + static const short loffsets[] = { /* table of offsets (long) */ + offsetof(struct xfs_btree_lblock, bb_magic), + offsetof(struct xfs_btree_lblock, bb_level), + offsetof(struct xfs_btree_lblock, bb_numrecs), + offsetof(struct xfs_btree_lblock, bb_leftsib), + offsetof(struct xfs_btree_lblock, bb_rightsib), + sizeof(struct xfs_btree_lblock) + }; + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_TRACE_ARGBI(cur, bp, fields); + + if (bp) { + xfs_btree_offsets(fields, + (cur->bc_flags & XFS_BTREE_LONG_PTRS) ? + loffsets : soffsets, + XFS_BB_NUM_BITS, &first, &last); + xfs_trans_log_buf(cur->bc_tp, bp, first, last); + } else { + xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, + xfs_ilog_fbroot(cur->bc_private.b.whichfork)); + } + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); +} + /* * Increment cursor by one record at the level. * For nonzero levels the leaf-ward information is untouched. @@ -1368,7 +1512,6 @@ error0: return error; } - STATIC int xfs_btree_lookup_get_block( struct xfs_btree_cur *cur, /* btree cursor */ @@ -1697,3 +1840,177 @@ error0: return error; } +/* + * Move 1 record right from cur/level if possible. + * Update cur to reflect the new path. + */ +int /* error */ +xfs_btree_rshift( + struct xfs_btree_cur *cur, + int level, + int *stat) /* success/failure */ +{ + union xfs_btree_key key; /* btree key */ + struct xfs_buf *lbp; /* left buffer pointer */ + struct xfs_btree_block *left; /* left btree block */ + struct xfs_buf *rbp; /* right buffer pointer */ + struct xfs_btree_block *right; /* right btree block */ + struct xfs_btree_cur *tcur; /* temporary btree cursor */ + union xfs_btree_ptr rptr; /* right block pointer */ + union xfs_btree_key *rkp; /* right btree key */ + int rrecs; /* right record count */ + int lrecs; /* left record count */ + int error; /* error return value */ + int i; /* loop counter */ + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_TRACE_ARGI(cur, level); + + if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && + (level == cur->bc_nlevels - 1)) + goto out0; + + /* Set up variables for this block as "left". */ + left = xfs_btree_get_block(cur, level, &lbp); + +#ifdef DEBUG + error = xfs_btree_check_block(cur, left, level, lbp); + if (error) + goto error0; +#endif + + /* If we've got no right sibling then we can't shift an entry right. */ + xfs_btree_get_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB); + if (xfs_btree_ptr_is_null(cur, &rptr)) + goto out0; + + /* + * If the cursor entry is the one that would be moved, don't + * do it... it's too complicated. + */ + lrecs = xfs_btree_get_numrecs(left); + if (cur->bc_ptrs[level] >= lrecs) + goto out0; + + /* Set up the right neighbor as "right". */ + error = xfs_btree_read_buf_block(cur, &rptr, level, 0, &right, &rbp); + if (error) + goto error0; + + /* If it's full, it can't take another entry. */ + rrecs = xfs_btree_get_numrecs(right); + if (rrecs == cur->bc_ops->get_maxrecs(cur, level)) + goto out0; + + XFS_BTREE_STATS_INC(cur, rshift); + XFS_BTREE_STATS_ADD(cur, moves, rrecs); + + /* + * Make a hole at the start of the right neighbor block, then + * copy the last left block entry to the hole. + */ + if (level > 0) { + /* It's a nonleaf. make a hole in the keys and ptrs */ + union xfs_btree_key *lkp; + union xfs_btree_ptr *lpp; + union xfs_btree_ptr *rpp; + + lkp = xfs_btree_key_addr(cur, lrecs, left); + lpp = xfs_btree_ptr_addr(cur, lrecs, left); + rkp = xfs_btree_key_addr(cur, 1, right); + rpp = xfs_btree_ptr_addr(cur, 1, right); + +#ifdef DEBUG + for (i = rrecs - 1; i >= 0; i--) { + error = xfs_btree_check_ptr(cur, rpp, i, level); + if (error) + goto error0; + } +#endif + + xfs_btree_shift_keys(cur, rkp, 1, rrecs); + xfs_btree_shift_ptrs(cur, rpp, 1, rrecs); + +#ifdef DEBUG + error = xfs_btree_check_ptr(cur, lpp, 0, level); + if (error) + goto error0; +#endif + + /* Now put the new data in, and log it. */ + xfs_btree_copy_keys(cur, rkp, lkp, 1); + xfs_btree_copy_ptrs(cur, rpp, lpp, 1); + + xfs_btree_log_keys(cur, rbp, 1, rrecs + 1); + xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1); + + xfs_btree_check_key(cur->bc_btnum, rkp, + xfs_btree_key_addr(cur, 2, right)); + } else { + /* It's a leaf. make a hole in the records */ + union xfs_btree_rec *lrp; + union xfs_btree_rec *rrp; + + lrp = xfs_btree_rec_addr(cur, lrecs, left); + rrp = xfs_btree_rec_addr(cur, 1, right); + + xfs_btree_shift_recs(cur, rrp, 1, rrecs); + + /* Now put the new data in, and log it. */ + xfs_btree_copy_recs(cur, rrp, lrp, 1); + xfs_btree_log_recs(cur, rbp, 1, rrecs + 1); + + cur->bc_ops->init_key_from_rec(&key, rrp); + rkp = &key; + + xfs_btree_check_rec(cur->bc_btnum, rrp, + xfs_btree_rec_addr(cur, 2, right)); + } + + /* + * Decrement and log left's numrecs, bump and log right's numrecs. + */ + xfs_btree_set_numrecs(left, --lrecs); + xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS); + + xfs_btree_set_numrecs(right, ++rrecs); + xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS); + + /* + * Using a temporary cursor, update the parent key values of the + * block on the right. + */ + error = xfs_btree_dup_cursor(cur, &tcur); + if (error) + goto error0; + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + + error = xfs_btree_increment(tcur, level, &i); + if (error) + goto error1; + + error = xfs_btree_updkey(tcur, rkp, level + 1); + if (error) + goto error1; + + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 1; + return 0; + +out0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 0; + return 0; + +error0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; + +error1: + XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR); + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); + return error; +} diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index c3bfa5556c1..04311dbeff1 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -533,6 +533,7 @@ int xfs_btree_decrement(struct xfs_btree_cur *, int, int *); int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *); int xfs_btree_updkey(struct xfs_btree_cur *, union xfs_btree_key *, int); int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *); +int xfs_btree_rshift(struct xfs_btree_cur *, int, int *); /* * Helpers. @@ -542,6 +543,12 @@ static inline int xfs_btree_get_numrecs(struct xfs_btree_block *block) return be16_to_cpu(block->bb_numrecs); } +static inline void xfs_btree_set_numrecs(struct xfs_btree_block *block, + __uint16_t numrecs) +{ + block->bb_numrecs = cpu_to_be16(numrecs); +} + static inline int xfs_btree_get_level(struct xfs_btree_block *block) { return be16_to_cpu(block->bb_level); diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index d080a6833a8..457f88a76e1 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -45,7 +45,6 @@ STATIC void xfs_inobt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_inobt_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC int xfs_inobt_lshift(xfs_btree_cur_t *, int, int *); STATIC int xfs_inobt_newroot(xfs_btree_cur_t *, int *); -STATIC int xfs_inobt_rshift(xfs_btree_cur_t *, int, int *); STATIC int xfs_inobt_split(xfs_btree_cur_t *, int, xfs_agblock_t *, xfs_inobt_key_t *, xfs_btree_cur_t **, int *); @@ -337,7 +336,7 @@ xfs_inobt_delrec( */ if (be16_to_cpu(left->bb_numrecs) - 1 >= XFS_INOBT_BLOCK_MINRECS(level, cur)) { - if ((error = xfs_inobt_rshift(tcur, level, &i))) + if ((error = xfs_btree_rshift(tcur, level, &i))) goto error0; if (i) { ASSERT(be16_to_cpu(block->bb_numrecs) >= @@ -608,7 +607,7 @@ xfs_inobt_insrec( /* * First, try shifting an entry to the right neighbor. */ - if ((error = xfs_inobt_rshift(cur, level, &i))) + if ((error = xfs_btree_rshift(cur, level, &i))) return error; if (i) { /* nothing */ @@ -1116,136 +1115,6 @@ xfs_inobt_newroot( return 0; } -/* - * Move 1 record right from cur/level if possible. - * Update cur to reflect the new path. - */ -STATIC int /* error */ -xfs_inobt_rshift( - xfs_btree_cur_t *cur, /* btree cursor */ - int level, /* level to shift record on */ - int *stat) /* success/failure */ -{ - int error; /* error return value */ - int i; /* loop index */ - xfs_inobt_key_t key; /* key value for leaf level upward */ - xfs_buf_t *lbp; /* buffer for left (current) block */ - xfs_inobt_block_t *left; /* left (current) btree block */ - xfs_inobt_key_t *lkp; /* key pointer for left block */ - xfs_inobt_ptr_t *lpp; /* address pointer for left block */ - xfs_inobt_rec_t *lrp; /* record pointer for left block */ - xfs_buf_t *rbp; /* buffer for right neighbor block */ - xfs_inobt_block_t *right; /* right neighbor btree block */ - xfs_inobt_key_t *rkp; /* key pointer for right block */ - xfs_inobt_ptr_t *rpp; /* address pointer for right block */ - xfs_inobt_rec_t *rrp=NULL; /* record pointer for right block */ - xfs_btree_cur_t *tcur; /* temporary cursor */ - - /* - * Set up variables for this block as "left". - */ - lbp = cur->bc_bufs[level]; - left = XFS_BUF_TO_INOBT_BLOCK(lbp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) - return error; -#endif - /* - * If we've got no right sibling then we can't shift an entry right. - */ - if (be32_to_cpu(left->bb_rightsib) == NULLAGBLOCK) { - *stat = 0; - return 0; - } - /* - * If the cursor entry is the one that would be moved, don't - * do it... it's too complicated. - */ - if (cur->bc_ptrs[level] >= be16_to_cpu(left->bb_numrecs)) { - *stat = 0; - return 0; - } - /* - * Set up the right neighbor as "right". - */ - if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), - 0, &rbp, XFS_INO_BTREE_REF))) - return error; - right = XFS_BUF_TO_INOBT_BLOCK(rbp); - if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) - return error; - /* - * If it's full, it can't take another entry. - */ - if (be16_to_cpu(right->bb_numrecs) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { - *stat = 0; - return 0; - } - /* - * Make a hole at the start of the right neighbor block, then - * copy the last left block entry to the hole. - */ - if (level > 0) { - lkp = XFS_INOBT_KEY_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); - lpp = XFS_INOBT_PTR_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); - rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); - rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); -#ifdef DEBUG - for (i = be16_to_cpu(right->bb_numrecs) - 1; i >= 0; i--) { - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) - return error; - } -#endif - memmove(rkp + 1, rkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); - memmove(rpp + 1, rpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); -#ifdef DEBUG - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*lpp), level))) - return error; -#endif - *rkp = *lkp; - *rpp = *lpp; - xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); - xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); - } else { - lrp = XFS_INOBT_REC_ADDR(left, be16_to_cpu(left->bb_numrecs), cur); - rrp = XFS_INOBT_REC_ADDR(right, 1, cur); - memmove(rrp + 1, rrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); - *rrp = *lrp; - xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs) + 1); - key.ir_startino = rrp->ir_startino; - rkp = &key; - } - /* - * Decrement and log left's numrecs, bump and log right's numrecs. - */ - be16_add_cpu(&left->bb_numrecs, -1); - xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); - be16_add_cpu(&right->bb_numrecs, 1); -#ifdef DEBUG - if (level > 0) - xfs_btree_check_key(cur->bc_btnum, rkp, rkp + 1); - else - xfs_btree_check_rec(cur->bc_btnum, rrp, rrp + 1); -#endif - xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); - /* - * Using a temporary cursor, update the parent key values of the - * block on the right. - */ - if ((error = xfs_btree_dup_cursor(cur, &tcur))) - return error; - xfs_btree_lastrec(tcur, level); - if ((error = xfs_btree_increment(tcur, level, &i)) || - (error = xfs_btree_updkey(tcur, (union xfs_btree_key *)rkp, level + 1))) { - xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); - return error; - } - xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); - *stat = 1; - return 0; -} - /* * Split cur/level block in half. * Return new block number and its first record (to be inserted into parent). -- cgit v1.2.3-70-g09d2 From 687b890a184fef263ebb773926e1f4aa69240d01 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:56:53 +1100 Subject: [XFS] implement generic xfs_btree_lshift Make the btree left shift code generic. Based on a patch from David Chinner with lots of changes to follow the original btree implementations more closely. While this loses some of the generic helper routines for inserting/moving/removing records it also solves some of the one off bugs in the original code and makes it easier to verify. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32197a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc_btree.c | 146 +----------------------------------- fs/xfs/xfs_bmap_btree.c | 138 +--------------------------------- fs/xfs/xfs_btree.c | 185 ++++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 1 + fs/xfs/xfs_ialloc_btree.c | 147 +----------------------------------- 5 files changed, 192 insertions(+), 425 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 31e42891fc9..974a412ebc8 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -47,7 +47,6 @@ STATIC void xfs_alloc_log_block(xfs_trans_t *, xfs_buf_t *, int); STATIC void xfs_alloc_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); -STATIC int xfs_alloc_lshift(xfs_btree_cur_t *, int, int *); STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *); STATIC int xfs_alloc_split(xfs_btree_cur_t *, int, xfs_agblock_t *, xfs_alloc_key_t *, xfs_btree_cur_t **, int *); @@ -326,7 +325,7 @@ xfs_alloc_delrec( */ if (be16_to_cpu(right->bb_numrecs) - 1 >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { - if ((error = xfs_alloc_lshift(tcur, level, &i))) + if ((error = xfs_btree_lshift(tcur, level, &i))) goto error0; if (i) { ASSERT(be16_to_cpu(block->bb_numrecs) >= @@ -691,7 +690,7 @@ xfs_alloc_insrec( * Next, try shifting an entry to the left neighbor. */ else { - if ((error = xfs_alloc_lshift(cur, level, &i))) + if ((error = xfs_btree_lshift(cur, level, &i))) return error; if (i) optr = ptr = cur->bc_ptrs[level]; @@ -935,147 +934,6 @@ xfs_alloc_log_recs( xfs_trans_log_buf(cur->bc_tp, bp, first, last); } -/* - * Move 1 record left from cur/level if possible. - * Update cur to reflect the new path. - */ -STATIC int /* error */ -xfs_alloc_lshift( - xfs_btree_cur_t *cur, /* btree cursor */ - int level, /* level to shift record on */ - int *stat) /* success/failure */ -{ - int error; /* error return value */ -#ifdef DEBUG - int i; /* loop index */ -#endif - xfs_alloc_key_t key; /* key value for leaf level upward */ - xfs_buf_t *lbp; /* buffer for left neighbor block */ - xfs_alloc_block_t *left; /* left neighbor btree block */ - int nrec; /* new number of left block entries */ - xfs_buf_t *rbp; /* buffer for right (current) block */ - xfs_alloc_block_t *right; /* right (current) btree block */ - xfs_alloc_key_t *rkp=NULL; /* key pointer for right block */ - xfs_alloc_ptr_t *rpp=NULL; /* address pointer for right block */ - xfs_alloc_rec_t *rrp=NULL; /* record pointer for right block */ - - /* - * Set up variables for this block as "right". - */ - rbp = cur->bc_bufs[level]; - right = XFS_BUF_TO_ALLOC_BLOCK(rbp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) - return error; -#endif - /* - * If we've got no left sibling then we can't shift an entry left. - */ - if (be32_to_cpu(right->bb_leftsib) == NULLAGBLOCK) { - *stat = 0; - return 0; - } - /* - * If the cursor entry is the one that would be moved, don't - * do it... it's too complicated. - */ - if (cur->bc_ptrs[level] <= 1) { - *stat = 0; - return 0; - } - /* - * Set up the left neighbor as "left". - */ - if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib), - 0, &lbp, XFS_ALLOC_BTREE_REF))) - return error; - left = XFS_BUF_TO_ALLOC_BLOCK(lbp); - if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) - return error; - /* - * If it's full, it can't take another entry. - */ - if (be16_to_cpu(left->bb_numrecs) == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { - *stat = 0; - return 0; - } - nrec = be16_to_cpu(left->bb_numrecs) + 1; - /* - * If non-leaf, copy a key and a ptr to the left block. - */ - if (level > 0) { - xfs_alloc_key_t *lkp; /* key pointer for left block */ - xfs_alloc_ptr_t *lpp; /* address pointer for left block */ - - lkp = XFS_ALLOC_KEY_ADDR(left, nrec, cur); - rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); - *lkp = *rkp; - xfs_alloc_log_keys(cur, lbp, nrec, nrec); - lpp = XFS_ALLOC_PTR_ADDR(left, nrec, cur); - rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); -#ifdef DEBUG - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level))) - return error; -#endif - *lpp = *rpp; - xfs_alloc_log_ptrs(cur, lbp, nrec, nrec); - xfs_btree_check_key(cur->bc_btnum, lkp - 1, lkp); - } - /* - * If leaf, copy a record to the left block. - */ - else { - xfs_alloc_rec_t *lrp; /* record pointer for left block */ - - lrp = XFS_ALLOC_REC_ADDR(left, nrec, cur); - rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); - *lrp = *rrp; - xfs_alloc_log_recs(cur, lbp, nrec, nrec); - xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp); - } - /* - * Bump and log left's numrecs, decrement and log right's numrecs. - */ - be16_add_cpu(&left->bb_numrecs, 1); - xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); - be16_add_cpu(&right->bb_numrecs, -1); - xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); - /* - * Slide the contents of right down one entry. - */ - if (level > 0) { -#ifdef DEBUG - for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i + 1]), - level))) - return error; - } -#endif - memmove(rkp, rkp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); - memmove(rpp, rpp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); - xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - } else { - memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); - xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - key.ar_startblock = rrp->ar_startblock; - key.ar_blockcount = rrp->ar_blockcount; - rkp = &key; - } - /* - * Update the parent key values of right. - */ - if ((error = xfs_btree_updkey(cur, (union xfs_btree_key *)rkp, level + 1))) - return error; - /* - * Slide the cursor value left one. - */ - cur->bc_ptrs[level]--; - *stat = 1; - return 0; -} - /* * Allocate a new root block, fill it in. */ diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 9d18fa8aa1d..809bd6ee177 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -52,7 +52,6 @@ STATIC int xfs_bmbt_killroot(xfs_btree_cur_t *); STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); -STATIC int xfs_bmbt_lshift(xfs_btree_cur_t *, int, int *); STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *, __uint64_t *, xfs_btree_cur_t **, int *); @@ -270,7 +269,7 @@ xfs_bmbt_delrec( bno = be64_to_cpu(right->bb_leftsib); if (be16_to_cpu(right->bb_numrecs) - 1 >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) { - if ((error = xfs_bmbt_lshift(tcur, level, &i))) { + if ((error = xfs_btree_lshift(tcur, level, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } @@ -544,7 +543,7 @@ xfs_bmbt_insrec( if (i) { /* nothing */ } else { - if ((error = xfs_bmbt_lshift(cur, level, &i))) { + if ((error = xfs_btree_lshift(cur, level, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; } @@ -810,139 +809,6 @@ xfs_bmbt_log_ptrs( XFS_BMBT_TRACE_CURSOR(cur, EXIT); } -/* - * Move 1 record left from cur/level if possible. - * Update cur to reflect the new path. - */ -STATIC int /* error */ -xfs_bmbt_lshift( - xfs_btree_cur_t *cur, - int level, - int *stat) /* success/failure */ -{ - int error; /* error return value */ -#ifdef DEBUG - int i; /* loop counter */ -#endif - xfs_bmbt_key_t key; /* bmap btree key */ - xfs_buf_t *lbp; /* left buffer pointer */ - xfs_bmbt_block_t *left; /* left btree block */ - xfs_bmbt_key_t *lkp=NULL; /* left btree key */ - xfs_bmbt_ptr_t *lpp; /* left address pointer */ - int lrecs; /* left record count */ - xfs_bmbt_rec_t *lrp=NULL; /* left record pointer */ - xfs_mount_t *mp; /* file system mount point */ - xfs_buf_t *rbp; /* right buffer pointer */ - xfs_bmbt_block_t *right; /* right btree block */ - xfs_bmbt_key_t *rkp=NULL; /* right btree key */ - xfs_bmbt_ptr_t *rpp=NULL; /* right address pointer */ - xfs_bmbt_rec_t *rrp=NULL; /* right record pointer */ - int rrecs; /* right record count */ - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - XFS_BMBT_TRACE_ARGI(cur, level); - if (level == cur->bc_nlevels - 1) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - rbp = cur->bc_bufs[level]; - right = XFS_BUF_TO_BMBT_BLOCK(rbp); -#ifdef DEBUG - if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } -#endif - if (be64_to_cpu(right->bb_leftsib) == NULLDFSBNO) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - if (cur->bc_ptrs[level] <= 1) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - mp = cur->bc_mp; - if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, be64_to_cpu(right->bb_leftsib), 0, - &lbp, XFS_BMAP_BTREE_REF))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - left = XFS_BUF_TO_BMBT_BLOCK(lbp); - if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - if (be16_to_cpu(left->bb_numrecs) == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - lrecs = be16_to_cpu(left->bb_numrecs) + 1; - if (level > 0) { - lkp = XFS_BMAP_KEY_IADDR(left, lrecs, cur); - rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); - *lkp = *rkp; - xfs_bmbt_log_keys(cur, lbp, lrecs, lrecs); - lpp = XFS_BMAP_PTR_IADDR(left, lrecs, cur); - rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); -#ifdef DEBUG - if ((error = xfs_btree_check_lptr_disk(cur, *rpp, level))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } -#endif - *lpp = *rpp; - xfs_bmbt_log_ptrs(cur, lbp, lrecs, lrecs); - } else { - lrp = XFS_BMAP_REC_IADDR(left, lrecs, cur); - rrp = XFS_BMAP_REC_IADDR(right, 1, cur); - *lrp = *rrp; - xfs_bmbt_log_recs(cur, lbp, lrecs, lrecs); - } - left->bb_numrecs = cpu_to_be16(lrecs); - xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS); -#ifdef DEBUG - if (level > 0) - xfs_btree_check_key(XFS_BTNUM_BMAP, lkp - 1, lkp); - else - xfs_btree_check_rec(XFS_BTNUM_BMAP, lrp - 1, lrp); -#endif - rrecs = be16_to_cpu(right->bb_numrecs) - 1; - right->bb_numrecs = cpu_to_be16(rrecs); - xfs_bmbt_log_block(cur, rbp, XFS_BB_NUMRECS); - if (level > 0) { -#ifdef DEBUG - for (i = 0; i < rrecs; i++) { - if ((error = xfs_btree_check_lptr_disk(cur, rpp[i + 1], - level))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - } -#endif - memmove(rkp, rkp + 1, rrecs * sizeof(*rkp)); - memmove(rpp, rpp + 1, rrecs * sizeof(*rpp)); - xfs_bmbt_log_keys(cur, rbp, 1, rrecs); - xfs_bmbt_log_ptrs(cur, rbp, 1, rrecs); - } else { - memmove(rrp, rrp + 1, rrecs * sizeof(*rrp)); - xfs_bmbt_log_recs(cur, rbp, 1, rrecs); - key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(rrp)); - rkp = &key; - } - if ((error = xfs_btree_updkey(cur, (union xfs_btree_key *)rkp, level + 1))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - cur->bc_ptrs[level]--; - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; -} - /* * Determine the extent state. */ diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index e1a21378184..2b0d1422c4c 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -1840,6 +1840,191 @@ error0: return error; } +/* + * Move 1 record left from cur/level if possible. + * Update cur to reflect the new path. + */ +int /* error */ +xfs_btree_lshift( + struct xfs_btree_cur *cur, + int level, + int *stat) /* success/failure */ +{ + union xfs_btree_key key; /* btree key */ + struct xfs_buf *lbp; /* left buffer pointer */ + struct xfs_btree_block *left; /* left btree block */ + int lrecs; /* left record count */ + struct xfs_buf *rbp; /* right buffer pointer */ + struct xfs_btree_block *right; /* right btree block */ + int rrecs; /* right record count */ + union xfs_btree_ptr lptr; /* left btree pointer */ + union xfs_btree_key *rkp = NULL; /* right btree key */ + union xfs_btree_ptr *rpp = NULL; /* right address pointer */ + union xfs_btree_rec *rrp = NULL; /* right record pointer */ + int error; /* error return value */ + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_TRACE_ARGI(cur, level); + + if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && + level == cur->bc_nlevels - 1) + goto out0; + + /* Set up variables for this block as "right". */ + right = xfs_btree_get_block(cur, level, &rbp); + +#ifdef DEBUG + error = xfs_btree_check_block(cur, right, level, rbp); + if (error) + goto error0; +#endif + + /* If we've got no left sibling then we can't shift an entry left. */ + xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); + if (xfs_btree_ptr_is_null(cur, &lptr)) + goto out0; + + /* + * If the cursor entry is the one that would be moved, don't + * do it... it's too complicated. + */ + if (cur->bc_ptrs[level] <= 1) + goto out0; + + /* Set up the left neighbor as "left". */ + error = xfs_btree_read_buf_block(cur, &lptr, level, 0, &left, &lbp); + if (error) + goto error0; + + /* If it's full, it can't take another entry. */ + lrecs = xfs_btree_get_numrecs(left); + if (lrecs == cur->bc_ops->get_maxrecs(cur, level)) + goto out0; + + rrecs = xfs_btree_get_numrecs(right); + + /* + * We add one entry to the left side and remove one for the right side. + * Accout for it here, the changes will be updated on disk and logged + * later. + */ + lrecs++; + rrecs--; + + XFS_BTREE_STATS_INC(cur, lshift); + XFS_BTREE_STATS_ADD(cur, moves, 1); + + /* + * If non-leaf, copy a key and a ptr to the left block. + * Log the changes to the left block. + */ + if (level > 0) { + /* It's a non-leaf. Move keys and pointers. */ + union xfs_btree_key *lkp; /* left btree key */ + union xfs_btree_ptr *lpp; /* left address pointer */ + + lkp = xfs_btree_key_addr(cur, lrecs, left); + rkp = xfs_btree_key_addr(cur, 1, right); + + lpp = xfs_btree_ptr_addr(cur, lrecs, left); + rpp = xfs_btree_ptr_addr(cur, 1, right); +#ifdef DEBUG + error = xfs_btree_check_ptr(cur, rpp, 0, level); + if (error) + goto error0; +#endif + xfs_btree_copy_keys(cur, lkp, rkp, 1); + xfs_btree_copy_ptrs(cur, lpp, rpp, 1); + + xfs_btree_log_keys(cur, lbp, lrecs, lrecs); + xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs); + + xfs_btree_check_key(cur->bc_btnum, + xfs_btree_key_addr(cur, lrecs - 1, left), + lkp); + } else { + /* It's a leaf. Move records. */ + union xfs_btree_rec *lrp; /* left record pointer */ + + lrp = xfs_btree_rec_addr(cur, lrecs, left); + rrp = xfs_btree_rec_addr(cur, 1, right); + + xfs_btree_copy_recs(cur, lrp, rrp, 1); + xfs_btree_log_recs(cur, lbp, lrecs, lrecs); + + xfs_btree_check_rec(cur->bc_btnum, + xfs_btree_rec_addr(cur, lrecs - 1, left), + lrp); + } + + xfs_btree_set_numrecs(left, lrecs); + xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS); + + xfs_btree_set_numrecs(right, rrecs); + xfs_btree_log_block(cur, rbp, XFS_BB_NUMRECS); + + /* + * Slide the contents of right down one entry. + */ + XFS_BTREE_STATS_ADD(cur, moves, rrecs - 1); + if (level > 0) { + /* It's a nonleaf. operate on keys and ptrs */ +#ifdef DEBUG + int i; /* loop index */ + + for (i = 0; i < rrecs; i++) { + error = xfs_btree_check_ptr(cur, rpp, i + 1, level); + if (error) + goto error0; + } +#endif + xfs_btree_shift_keys(cur, + xfs_btree_key_addr(cur, 2, right), + -1, rrecs); + xfs_btree_shift_ptrs(cur, + xfs_btree_ptr_addr(cur, 2, right), + -1, rrecs); + + xfs_btree_log_keys(cur, rbp, 1, rrecs); + xfs_btree_log_ptrs(cur, rbp, 1, rrecs); + } else { + /* It's a leaf. operate on records */ + xfs_btree_shift_recs(cur, + xfs_btree_rec_addr(cur, 2, right), + -1, rrecs); + xfs_btree_log_recs(cur, rbp, 1, rrecs); + + /* + * If it's the first record in the block, we'll need a key + * structure to pass up to the next level (updkey). + */ + cur->bc_ops->init_key_from_rec(&key, + xfs_btree_rec_addr(cur, 1, right)); + rkp = &key; + } + + /* Update the parent key values of right. */ + error = xfs_btree_updkey(cur, rkp, level + 1); + if (error) + goto error0; + + /* Slide the cursor value left one. */ + cur->bc_ptrs[level]--; + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 1; + return 0; + +out0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 0; + return 0; + +error0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; +} + /* * Move 1 record right from cur/level if possible. * Update cur to reflect the new path. diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 04311dbeff1..7cde287b5c9 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -533,6 +533,7 @@ int xfs_btree_decrement(struct xfs_btree_cur *, int, int *); int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *); int xfs_btree_updkey(struct xfs_btree_cur *, union xfs_btree_key *, int); int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *); +int xfs_btree_lshift(struct xfs_btree_cur *, int, int *); int xfs_btree_rshift(struct xfs_btree_cur *, int, int *); /* diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 457f88a76e1..60f5db5d6df 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -43,7 +43,6 @@ STATIC void xfs_inobt_log_block(xfs_trans_t *, xfs_buf_t *, int); STATIC void xfs_inobt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_inobt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_inobt_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); -STATIC int xfs_inobt_lshift(xfs_btree_cur_t *, int, int *); STATIC int xfs_inobt_newroot(xfs_btree_cur_t *, int *); STATIC int xfs_inobt_split(xfs_btree_cur_t *, int, xfs_agblock_t *, xfs_inobt_key_t *, xfs_btree_cur_t **, int *); @@ -276,7 +275,7 @@ xfs_inobt_delrec( */ if (be16_to_cpu(right->bb_numrecs) - 1 >= XFS_INOBT_BLOCK_MINRECS(level, cur)) { - if ((error = xfs_inobt_lshift(tcur, level, &i))) + if ((error = xfs_btree_lshift(tcur, level, &i))) goto error0; if (i) { ASSERT(be16_to_cpu(block->bb_numrecs) >= @@ -616,7 +615,7 @@ xfs_inobt_insrec( * Next, try shifting an entry to the left neighbor. */ else { - if ((error = xfs_inobt_lshift(cur, level, &i))) + if ((error = xfs_btree_lshift(cur, level, &i))) return error; if (i) { optr = ptr = cur->bc_ptrs[level]; @@ -826,148 +825,6 @@ xfs_inobt_log_recs( xfs_trans_log_buf(cur->bc_tp, bp, first, last); } -/* - * Move 1 record left from cur/level if possible. - * Update cur to reflect the new path. - */ -STATIC int /* error */ -xfs_inobt_lshift( - xfs_btree_cur_t *cur, /* btree cursor */ - int level, /* level to shift record on */ - int *stat) /* success/failure */ -{ - int error; /* error return value */ -#ifdef DEBUG - int i; /* loop index */ -#endif - xfs_inobt_key_t key; /* key value for leaf level upward */ - xfs_buf_t *lbp; /* buffer for left neighbor block */ - xfs_inobt_block_t *left; /* left neighbor btree block */ - xfs_inobt_key_t *lkp=NULL; /* key pointer for left block */ - xfs_inobt_ptr_t *lpp; /* address pointer for left block */ - xfs_inobt_rec_t *lrp=NULL; /* record pointer for left block */ - int nrec; /* new number of left block entries */ - xfs_buf_t *rbp; /* buffer for right (current) block */ - xfs_inobt_block_t *right; /* right (current) btree block */ - xfs_inobt_key_t *rkp=NULL; /* key pointer for right block */ - xfs_inobt_ptr_t *rpp=NULL; /* address pointer for right block */ - xfs_inobt_rec_t *rrp=NULL; /* record pointer for right block */ - - /* - * Set up variables for this block as "right". - */ - rbp = cur->bc_bufs[level]; - right = XFS_BUF_TO_INOBT_BLOCK(rbp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) - return error; -#endif - /* - * If we've got no left sibling then we can't shift an entry left. - */ - if (be32_to_cpu(right->bb_leftsib) == NULLAGBLOCK) { - *stat = 0; - return 0; - } - /* - * If the cursor entry is the one that would be moved, don't - * do it... it's too complicated. - */ - if (cur->bc_ptrs[level] <= 1) { - *stat = 0; - return 0; - } - /* - * Set up the left neighbor as "left". - */ - if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, be32_to_cpu(right->bb_leftsib), - 0, &lbp, XFS_INO_BTREE_REF))) - return error; - left = XFS_BUF_TO_INOBT_BLOCK(lbp); - if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) - return error; - /* - * If it's full, it can't take another entry. - */ - if (be16_to_cpu(left->bb_numrecs) == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { - *stat = 0; - return 0; - } - nrec = be16_to_cpu(left->bb_numrecs) + 1; - /* - * If non-leaf, copy a key and a ptr to the left block. - */ - if (level > 0) { - lkp = XFS_INOBT_KEY_ADDR(left, nrec, cur); - rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); - *lkp = *rkp; - xfs_inobt_log_keys(cur, lbp, nrec, nrec); - lpp = XFS_INOBT_PTR_ADDR(left, nrec, cur); - rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); -#ifdef DEBUG - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(*rpp), level))) - return error; -#endif - *lpp = *rpp; - xfs_inobt_log_ptrs(cur, lbp, nrec, nrec); - } - /* - * If leaf, copy a record to the left block. - */ - else { - lrp = XFS_INOBT_REC_ADDR(left, nrec, cur); - rrp = XFS_INOBT_REC_ADDR(right, 1, cur); - *lrp = *rrp; - xfs_inobt_log_recs(cur, lbp, nrec, nrec); - } - /* - * Bump and log left's numrecs, decrement and log right's numrecs. - */ - be16_add_cpu(&left->bb_numrecs, 1); - xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS); -#ifdef DEBUG - if (level > 0) - xfs_btree_check_key(cur->bc_btnum, lkp - 1, lkp); - else - xfs_btree_check_rec(cur->bc_btnum, lrp - 1, lrp); -#endif - be16_add_cpu(&right->bb_numrecs, -1); - xfs_inobt_log_block(cur->bc_tp, rbp, XFS_BB_NUMRECS); - /* - * Slide the contents of right down one entry. - */ - if (level > 0) { -#ifdef DEBUG - for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i + 1]), - level))) - return error; - } -#endif - memmove(rkp, rkp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); - memmove(rpp, rpp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); - xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - } else { - memmove(rrp, rrp + 1, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); - xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - key.ir_startino = rrp->ir_startino; - rkp = &key; - } - /* - * Update the parent key values of right. - */ - if ((error = xfs_btree_updkey(cur, (union xfs_btree_key *)rkp, level + 1))) - return error; - /* - * Slide the cursor value left one. - */ - cur->bc_ptrs[level]--; - *stat = 1; - return 0; -} - /* * Allocate a new root block, fill it in. */ -- cgit v1.2.3-70-g09d2 From f5eb8e7ca58bc1e92436614444006120d21668ba Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:57:03 +1100 Subject: [XFS] implement generic xfs_btree_split Make the btree split code generic. Based on a patch from David Chinner with lots of changes to follow the original btree implementations more closely. While this loses some of the generic helper routines for inserting/moving/removing records it also solves some of the one off bugs in the original code and makes it easier to verify. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32198a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc_btree.c | 200 +++++++-------------------------- fs/xfs/xfs_bmap_btree.c | 275 ++++++++++++++++------------------------------ fs/xfs/xfs_btree.c | 268 ++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 8 ++ fs/xfs/xfs_ialloc_btree.c | 212 +++++++++-------------------------- 5 files changed, 460 insertions(+), 503 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 974a412ebc8..8a8d1aeec52 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -35,6 +35,7 @@ #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" +#include "xfs_btree_trace.h" #include "xfs_ialloc.h" #include "xfs_alloc.h" #include "xfs_error.h" @@ -48,8 +49,6 @@ STATIC void xfs_alloc_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *); -STATIC int xfs_alloc_split(xfs_btree_cur_t *, int, xfs_agblock_t *, - xfs_alloc_key_t *, xfs_btree_cur_t **, int *); /* * Internal functions. @@ -695,15 +694,18 @@ xfs_alloc_insrec( if (i) optr = ptr = cur->bc_ptrs[level]; else { + union xfs_btree_ptr bno = { .s = cpu_to_be32(nbno) }; /* * Next, try splitting the current block in * half. If this works we have to re-set our * variables because we could be in a * different block now. */ - if ((error = xfs_alloc_split(cur, level, &nbno, - &nkey, &ncur, &i))) + if ((error = xfs_btree_split(cur, level, &bno, + (union xfs_btree_key *)&nkey, + &ncur, &i))) return error; + nbno = be32_to_cpu(bno.s); if (i) { bp = cur->bc_bufs[level]; block = XFS_BUF_TO_ALLOC_BLOCK(bp); @@ -1089,160 +1091,6 @@ xfs_alloc_newroot( return 0; } -/* - * Split cur/level block in half. - * Return new block number and its first record (to be inserted into parent). - */ -STATIC int /* error */ -xfs_alloc_split( - xfs_btree_cur_t *cur, /* btree cursor */ - int level, /* level to split */ - xfs_agblock_t *bnop, /* output: block number allocated */ - xfs_alloc_key_t *keyp, /* output: first key of new block */ - xfs_btree_cur_t **curp, /* output: new cursor */ - int *stat) /* success/failure */ -{ - int error; /* error return value */ - int i; /* loop index/record number */ - xfs_agblock_t lbno; /* left (current) block number */ - xfs_buf_t *lbp; /* buffer for left block */ - xfs_alloc_block_t *left; /* left (current) btree block */ - xfs_agblock_t rbno; /* right (new) block number */ - xfs_buf_t *rbp; /* buffer for right block */ - xfs_alloc_block_t *right; /* right (new) btree block */ - - /* - * Allocate the new block from the freelist. - * If we can't do it, we're toast. Give up. - */ - error = xfs_alloc_get_freelist(cur->bc_tp, - cur->bc_private.a.agbp, &rbno, 1); - if (error) - return error; - if (rbno == NULLAGBLOCK) { - *stat = 0; - return 0; - } - xfs_trans_agbtree_delta(cur->bc_tp, 1); - rbp = xfs_btree_get_bufs(cur->bc_mp, cur->bc_tp, cur->bc_private.a.agno, - rbno, 0); - /* - * Set up the new block as "right". - */ - right = XFS_BUF_TO_ALLOC_BLOCK(rbp); - /* - * "Left" is the current (according to the cursor) block. - */ - lbp = cur->bc_bufs[level]; - left = XFS_BUF_TO_ALLOC_BLOCK(lbp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) - return error; -#endif - /* - * Fill in the btree header for the new block. - */ - right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); - right->bb_level = left->bb_level; - right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2); - /* - * Make sure that if there's an odd number of entries now, that - * each new block will have the same number of entries. - */ - if ((be16_to_cpu(left->bb_numrecs) & 1) && - cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) - be16_add_cpu(&right->bb_numrecs, 1); - i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; - /* - * For non-leaf blocks, copy keys and addresses over to the new block. - */ - if (level > 0) { - xfs_alloc_key_t *lkp; /* left btree key pointer */ - xfs_alloc_ptr_t *lpp; /* left btree address pointer */ - xfs_alloc_key_t *rkp; /* right btree key pointer */ - xfs_alloc_ptr_t *rpp; /* right btree address pointer */ - - lkp = XFS_ALLOC_KEY_ADDR(left, i, cur); - lpp = XFS_ALLOC_PTR_ADDR(left, i, cur); - rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); - rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); -#ifdef DEBUG - for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) - return error; - } -#endif - memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); - memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); - xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - *keyp = *rkp; - } - /* - * For leaf blocks, copy records over to the new block. - */ - else { - xfs_alloc_rec_t *lrp; /* left btree record pointer */ - xfs_alloc_rec_t *rrp; /* right btree record pointer */ - - lrp = XFS_ALLOC_REC_ADDR(left, i, cur); - rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); - memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); - xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - keyp->ar_startblock = rrp->ar_startblock; - keyp->ar_blockcount = rrp->ar_blockcount; - } - /* - * Find the left block number by looking in the buffer. - * Adjust numrecs, sibling pointers. - */ - lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp)); - be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); - right->bb_rightsib = left->bb_rightsib; - left->bb_rightsib = cpu_to_be32(rbno); - right->bb_leftsib = cpu_to_be32(lbno); - xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS); - xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); - /* - * If there's a block to the new block's right, make that block - * point back to right instead of to left. - */ - if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) { - xfs_alloc_block_t *rrblock; /* rr btree block */ - xfs_buf_t *rrbp; /* buffer for rrblock */ - - if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp, - cur->bc_private.a.agno, be32_to_cpu(right->bb_rightsib), 0, - &rrbp, XFS_ALLOC_BTREE_REF))) - return error; - rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); - if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) - return error; - rrblock->bb_leftsib = cpu_to_be32(rbno); - xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); - } - /* - * If the cursor is really in the right block, move it there. - * If it's just pointing past the last entry in left, then we'll - * insert there, so don't change anything in that case. - */ - if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) { - xfs_btree_setbuf(cur, level, rbp); - cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs); - } - /* - * If there are more levels, we'll need another cursor which refers to - * the right block, no matter where this cursor was. - */ - if (level + 1 < cur->bc_nlevels) { - if ((error = xfs_btree_dup_cursor(cur, curp))) - return error; - (*curp)->bc_ptrs[level + 1]++; - } - *bnop = rbno; - *stat = 1; - return 0; -} /* * Externally visible routines. @@ -1396,6 +1244,41 @@ xfs_allocbt_dup_cursor( cur->bc_btnum); } +STATIC int +xfs_allocbt_alloc_block( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *start, + union xfs_btree_ptr *new, + int length, + int *stat) +{ + int error; + xfs_agblock_t bno; + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + + /* Allocate the new block from the freelist. If we can't, give up. */ + error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, + &bno, 1); + if (error) { + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; + } + + if (bno == NULLAGBLOCK) { + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 0; + return 0; + } + + xfs_trans_agbtree_delta(cur->bc_tp, 1); + new->s = cpu_to_be32(bno); + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 1; + return 0; +} + /* * Update the longest extent in the AGF */ @@ -1557,6 +1440,7 @@ static const struct xfs_btree_ops xfs_allocbt_ops = { .key_len = sizeof(xfs_alloc_key_t), .dup_cursor = xfs_allocbt_dup_cursor, + .alloc_block = xfs_allocbt_alloc_block, .update_lastrec = xfs_allocbt_update_lastrec, .get_maxrecs = xfs_allocbt_get_maxrecs, .init_key_from_rec = xfs_allocbt_init_key_from_rec, diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 809bd6ee177..e7539263457 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -52,8 +52,6 @@ STATIC int xfs_bmbt_killroot(xfs_btree_cur_t *); STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); -STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *, - __uint64_t *, xfs_btree_cur_t **, int *); #undef EXIT @@ -550,13 +548,17 @@ xfs_bmbt_insrec( if (i) { optr = ptr = cur->bc_ptrs[level]; } else { - if ((error = xfs_bmbt_split(cur, level, - &nbno, &startoff, &ncur, + union xfs_btree_ptr bno = { .l = cpu_to_be64(nbno) }; + union xfs_btree_key skey; + if ((error = xfs_btree_split(cur, level, + &bno, &skey, &ncur, &i))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; } + nbno = be64_to_cpu(bno.l); + startoff = be64_to_cpu(skey.bmbt.br_startoff); if (i) { block = xfs_bmbt_get_block( cur, level, &bp); @@ -825,184 +827,6 @@ xfs_extent_state( return XFS_EXT_NORM; } - -/* - * Split cur/level block in half. - * Return new block number and its first record (to be inserted into parent). - */ -STATIC int /* error */ -xfs_bmbt_split( - xfs_btree_cur_t *cur, - int level, - xfs_fsblock_t *bnop, - __uint64_t *startoff, - xfs_btree_cur_t **curp, - int *stat) /* success/failure */ -{ - xfs_alloc_arg_t args; /* block allocation args */ - int error; /* error return value */ - int i; /* loop counter */ - xfs_fsblock_t lbno; /* left sibling block number */ - xfs_buf_t *lbp; /* left buffer pointer */ - xfs_bmbt_block_t *left; /* left btree block */ - xfs_bmbt_key_t *lkp; /* left btree key */ - xfs_bmbt_ptr_t *lpp; /* left address pointer */ - xfs_bmbt_rec_t *lrp; /* left record pointer */ - xfs_buf_t *rbp; /* right buffer pointer */ - xfs_bmbt_block_t *right; /* right btree block */ - xfs_bmbt_key_t *rkp; /* right btree key */ - xfs_bmbt_ptr_t *rpp; /* right address pointer */ - xfs_bmbt_block_t *rrblock; /* right-right btree block */ - xfs_buf_t *rrbp; /* right-right buffer pointer */ - xfs_bmbt_rec_t *rrp; /* right record pointer */ - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - // disable until merged into common code -// XFS_BMBT_TRACE_ARGIFK(cur, level, *bnop, *startoff); - args.tp = cur->bc_tp; - args.mp = cur->bc_mp; - lbp = cur->bc_bufs[level]; - lbno = XFS_DADDR_TO_FSB(args.mp, XFS_BUF_ADDR(lbp)); - left = XFS_BUF_TO_BMBT_BLOCK(lbp); - args.fsbno = cur->bc_private.b.firstblock; - args.firstblock = args.fsbno; - args.minleft = 0; - if (args.fsbno == NULLFSBLOCK) { - args.fsbno = lbno; - args.type = XFS_ALLOCTYPE_START_BNO; - /* - * Make sure there is sufficient room left in the AG to - * complete a full tree split for an extent insert. If - * we are converting the middle part of an extent then - * we may need space for two tree splits. - * - * We are relying on the caller to make the correct block - * reservation for this operation to succeed. If the - * reservation amount is insufficient then we may fail a - * block allocation here and corrupt the filesystem. - */ - args.minleft = xfs_trans_get_block_res(args.tp); - } else if (cur->bc_private.b.flist->xbf_low) - args.type = XFS_ALLOCTYPE_START_BNO; - else - args.type = XFS_ALLOCTYPE_NEAR_BNO; - args.mod = args.alignment = args.total = args.isfl = - args.userdata = args.minalignslop = 0; - args.minlen = args.maxlen = args.prod = 1; - args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL; - if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return XFS_ERROR(ENOSPC); - } - if ((error = xfs_alloc_vextent(&args))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - if (args.fsbno == NULLFSBLOCK && args.minleft) { - /* - * Could not find an AG with enough free space to satisfy - * a full btree split. Try again without minleft and if - * successful activate the lowspace algorithm. - */ - args.fsbno = 0; - args.type = XFS_ALLOCTYPE_FIRST_AG; - args.minleft = 0; - if ((error = xfs_alloc_vextent(&args))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - cur->bc_private.b.flist->xbf_low = 1; - } - if (args.fsbno == NULLFSBLOCK) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - ASSERT(args.len == 1); - cur->bc_private.b.firstblock = args.fsbno; - cur->bc_private.b.allocated++; - cur->bc_private.b.ip->i_d.di_nblocks++; - xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE); - XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip, - XFS_TRANS_DQ_BCOUNT, 1L); - rbp = xfs_btree_get_bufl(args.mp, args.tp, args.fsbno, 0); - right = XFS_BUF_TO_BMBT_BLOCK(rbp); -#ifdef DEBUG - if ((error = xfs_btree_check_lblock(cur, left, level, rbp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } -#endif - right->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); - right->bb_level = left->bb_level; - right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2); - if ((be16_to_cpu(left->bb_numrecs) & 1) && - cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) - be16_add_cpu(&right->bb_numrecs, 1); - i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; - if (level > 0) { - lkp = XFS_BMAP_KEY_IADDR(left, i, cur); - lpp = XFS_BMAP_PTR_IADDR(left, i, cur); - rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); - rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); -#ifdef DEBUG - for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { - if ((error = xfs_btree_check_lptr_disk(cur, lpp[i], level))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - } -#endif - memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); - memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); - xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - *startoff = be64_to_cpu(rkp->br_startoff); - } else { - lrp = XFS_BMAP_REC_IADDR(left, i, cur); - rrp = XFS_BMAP_REC_IADDR(right, 1, cur); - memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); - xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - *startoff = xfs_bmbt_disk_get_startoff(rrp); - } - be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); - right->bb_rightsib = left->bb_rightsib; - left->bb_rightsib = cpu_to_be64(args.fsbno); - right->bb_leftsib = cpu_to_be64(lbno); - xfs_bmbt_log_block(cur, rbp, XFS_BB_ALL_BITS); - xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); - if (be64_to_cpu(right->bb_rightsib) != NULLDFSBNO) { - if ((error = xfs_btree_read_bufl(args.mp, args.tp, - be64_to_cpu(right->bb_rightsib), 0, &rrbp, - XFS_BMAP_BTREE_REF))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp); - if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - rrblock->bb_leftsib = cpu_to_be64(args.fsbno); - xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB); - } - if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) { - xfs_btree_setbuf(cur, level, rbp); - cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs); - } - if (level + 1 < cur->bc_nlevels) { - if ((error = xfs_btree_dup_cursor(cur, curp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - (*curp)->bc_ptrs[level + 1]++; - } - *bnop = args.fsbno; - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; -} - /* * Convert on-disk form of btree root to in-memory form. */ @@ -1737,6 +1561,92 @@ xfs_bmbt_dup_cursor( return new; } +STATIC int +xfs_bmbt_alloc_block( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *start, + union xfs_btree_ptr *new, + int length, + int *stat) +{ + xfs_alloc_arg_t args; /* block allocation args */ + int error; /* error return value */ + + memset(&args, 0, sizeof(args)); + args.tp = cur->bc_tp; + args.mp = cur->bc_mp; + args.fsbno = cur->bc_private.b.firstblock; + args.firstblock = args.fsbno; + + if (args.fsbno == NULLFSBLOCK) { + args.fsbno = be64_to_cpu(start->l); + args.type = XFS_ALLOCTYPE_START_BNO; + /* + * Make sure there is sufficient room left in the AG to + * complete a full tree split for an extent insert. If + * we are converting the middle part of an extent then + * we may need space for two tree splits. + * + * We are relying on the caller to make the correct block + * reservation for this operation to succeed. If the + * reservation amount is insufficient then we may fail a + * block allocation here and corrupt the filesystem. + */ + args.minleft = xfs_trans_get_block_res(args.tp); + } else if (cur->bc_private.b.flist->xbf_low) { + args.type = XFS_ALLOCTYPE_START_BNO; + } else { + args.type = XFS_ALLOCTYPE_NEAR_BNO; + } + + args.minlen = args.maxlen = args.prod = 1; + args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL; + if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) { + error = XFS_ERROR(ENOSPC); + goto error0; + } + error = xfs_alloc_vextent(&args); + if (error) + goto error0; + + if (args.fsbno == NULLFSBLOCK && args.minleft) { + /* + * Could not find an AG with enough free space to satisfy + * a full btree split. Try again without minleft and if + * successful activate the lowspace algorithm. + */ + args.fsbno = 0; + args.type = XFS_ALLOCTYPE_FIRST_AG; + args.minleft = 0; + error = xfs_alloc_vextent(&args); + if (error) + goto error0; + cur->bc_private.b.flist->xbf_low = 1; + } + if (args.fsbno == NULLFSBLOCK) { + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 0; + return 0; + } + ASSERT(args.len == 1); + cur->bc_private.b.firstblock = args.fsbno; + cur->bc_private.b.allocated++; + cur->bc_private.b.ip->i_d.di_nblocks++; + xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE); + XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip, + XFS_TRANS_DQ_BCOUNT, 1L); + + new->l = cpu_to_be64(args.fsbno); + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 1; + return 0; + + error0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; +} + STATIC int xfs_bmbt_get_maxrecs( struct xfs_btree_cur *cur, @@ -1861,6 +1771,7 @@ static const struct xfs_btree_ops xfs_bmbt_ops = { .key_len = sizeof(xfs_bmbt_key_t), .dup_cursor = xfs_bmbt_dup_cursor, + .alloc_block = xfs_bmbt_alloc_block, .get_maxrecs = xfs_bmbt_get_maxrecs, .init_key_from_rec = xfs_bmbt_init_key_from_rec, .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur, diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 2b0d1422c4c..80576695fbe 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -988,6 +988,48 @@ xfs_btree_get_sibling( } } +STATIC void +xfs_btree_set_sibling( + struct xfs_btree_cur *cur, + struct xfs_btree_block *block, + union xfs_btree_ptr *ptr, + int lr) +{ + ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB); + + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { + if (lr == XFS_BB_RIGHTSIB) + block->bb_u.l.bb_rightsib = ptr->l; + else + block->bb_u.l.bb_leftsib = ptr->l; + } else { + if (lr == XFS_BB_RIGHTSIB) + block->bb_u.s.bb_rightsib = ptr->s; + else + block->bb_u.s.bb_leftsib = ptr->s; + } +} + +STATIC void +xfs_btree_init_block( + struct xfs_btree_cur *cur, + int level, + int numrecs, + struct xfs_btree_block *new) /* new block */ +{ + new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); + new->bb_level = cpu_to_be16(level); + new->bb_numrecs = cpu_to_be16(numrecs); + + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { + new->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK); + new->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK); + } else { + new->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); + new->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); + } +} + /* * Return true if ptr is the last record in the btree and * we need to track updateѕ to this record. The decision @@ -1012,6 +1054,21 @@ xfs_btree_is_lastrec( return 1; } +STATIC void +xfs_btree_buf_to_ptr( + struct xfs_btree_cur *cur, + struct xfs_buf *bp, + union xfs_btree_ptr *ptr) +{ + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) + ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp, + XFS_BUF_ADDR(bp))); + else { + ptr->s = cpu_to_be32(XFS_DADDR_TO_AGBNO(cur->bc_mp, + XFS_BUF_ADDR(bp))); + } +} + STATIC xfs_daddr_t xfs_btree_ptr_to_daddr( struct xfs_btree_cur *cur, @@ -1051,6 +1108,31 @@ xfs_btree_set_refs( } } +STATIC int +xfs_btree_get_buf_block( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr, + int flags, + struct xfs_btree_block **block, + struct xfs_buf **bpp) +{ + struct xfs_mount *mp = cur->bc_mp; + xfs_daddr_t d; + + /* need to sort out how callers deal with failures first */ + ASSERT(!(flags & XFS_BUF_TRYLOCK)); + + d = xfs_btree_ptr_to_daddr(cur, ptr); + *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d, + mp->m_bsize, flags); + + ASSERT(*bpp); + ASSERT(!XFS_BUF_GETERROR(*bpp)); + + *block = XFS_BUF_TO_BLOCK(*bpp); + return 0; +} + /* * Read in the buffer at the given ptr and return the buffer and * the block pointer within the buffer. @@ -2199,3 +2281,189 @@ error1: xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); return error; } + +/* + * Split cur/level block in half. + * Return new block number and the key to its first + * record (to be inserted into parent). + */ +int /* error */ +xfs_btree_split( + struct xfs_btree_cur *cur, + int level, + union xfs_btree_ptr *ptrp, + union xfs_btree_key *key, + struct xfs_btree_cur **curp, + int *stat) /* success/failure */ +{ + union xfs_btree_ptr lptr; /* left sibling block ptr */ + struct xfs_buf *lbp; /* left buffer pointer */ + struct xfs_btree_block *left; /* left btree block */ + union xfs_btree_ptr rptr; /* right sibling block ptr */ + struct xfs_buf *rbp; /* right buffer pointer */ + struct xfs_btree_block *right; /* right btree block */ + union xfs_btree_ptr rrptr; /* right-right sibling ptr */ + struct xfs_buf *rrbp; /* right-right buffer pointer */ + struct xfs_btree_block *rrblock; /* right-right btree block */ + int lrecs; + int rrecs; + int src_index; + int error; /* error return value */ +#ifdef DEBUG + int i; +#endif + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_TRACE_ARGIPK(cur, level, *ptrp, key); + + XFS_BTREE_STATS_INC(cur, split); + + /* Set up left block (current one). */ + left = xfs_btree_get_block(cur, level, &lbp); + +#ifdef DEBUG + error = xfs_btree_check_block(cur, left, level, lbp); + if (error) + goto error0; +#endif + + xfs_btree_buf_to_ptr(cur, lbp, &lptr); + + /* Allocate the new block. If we can't do it, we're toast. Give up. */ + error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, 1, stat); + if (error) + goto error0; + if (*stat == 0) + goto out0; + XFS_BTREE_STATS_INC(cur, alloc); + + /* Set up the new block as "right". */ + error = xfs_btree_get_buf_block(cur, &rptr, 0, &right, &rbp); + if (error) + goto error0; + + /* Fill in the btree header for the new right block. */ + xfs_btree_init_block(cur, xfs_btree_get_level(left), 0, right); + + /* + * Split the entries between the old and the new block evenly. + * Make sure that if there's an odd number of entries now, that + * each new block will have the same number of entries. + */ + lrecs = xfs_btree_get_numrecs(left); + rrecs = lrecs / 2; + if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1) + rrecs++; + src_index = (lrecs - rrecs + 1); + + XFS_BTREE_STATS_ADD(cur, moves, rrecs); + + /* + * Copy btree block entries from the left block over to the + * new block, the right. Update the right block and log the + * changes. + */ + if (level > 0) { + /* It's a non-leaf. Move keys and pointers. */ + union xfs_btree_key *lkp; /* left btree key */ + union xfs_btree_ptr *lpp; /* left address pointer */ + union xfs_btree_key *rkp; /* right btree key */ + union xfs_btree_ptr *rpp; /* right address pointer */ + + lkp = xfs_btree_key_addr(cur, src_index, left); + lpp = xfs_btree_ptr_addr(cur, src_index, left); + rkp = xfs_btree_key_addr(cur, 1, right); + rpp = xfs_btree_ptr_addr(cur, 1, right); + +#ifdef DEBUG + for (i = src_index; i < rrecs; i++) { + error = xfs_btree_check_ptr(cur, lpp, i, level); + if (error) + goto error0; + } +#endif + + xfs_btree_copy_keys(cur, rkp, lkp, rrecs); + xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs); + + xfs_btree_log_keys(cur, rbp, 1, rrecs); + xfs_btree_log_ptrs(cur, rbp, 1, rrecs); + + /* Grab the keys to the entries moved to the right block */ + xfs_btree_copy_keys(cur, key, rkp, 1); + } else { + /* It's a leaf. Move records. */ + union xfs_btree_rec *lrp; /* left record pointer */ + union xfs_btree_rec *rrp; /* right record pointer */ + + lrp = xfs_btree_rec_addr(cur, src_index, left); + rrp = xfs_btree_rec_addr(cur, 1, right); + + xfs_btree_copy_recs(cur, rrp, lrp, rrecs); + xfs_btree_log_recs(cur, rbp, 1, rrecs); + + cur->bc_ops->init_key_from_rec(key, + xfs_btree_rec_addr(cur, 1, right)); + } + + + /* + * Find the left block number by looking in the buffer. + * Adjust numrecs, sibling pointers. + */ + xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB); + xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB); + xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); + xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB); + + lrecs -= rrecs; + xfs_btree_set_numrecs(left, lrecs); + xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs); + + xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS); + xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); + + /* + * If there's a block to the new block's right, make that block + * point back to right instead of to left. + */ + if (!xfs_btree_ptr_is_null(cur, &rrptr)) { + error = xfs_btree_read_buf_block(cur, &rrptr, level, + 0, &rrblock, &rrbp); + if (error) + goto error0; + xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB); + xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB); + } + /* + * If the cursor is really in the right block, move it there. + * If it's just pointing past the last entry in left, then we'll + * insert there, so don't change anything in that case. + */ + if (cur->bc_ptrs[level] > lrecs + 1) { + xfs_btree_setbuf(cur, level, rbp); + cur->bc_ptrs[level] -= lrecs; + } + /* + * If there are more levels, we'll need another cursor which refers + * the right block, no matter where this cursor was. + */ + if (level + 1 < cur->bc_nlevels) { + error = xfs_btree_dup_cursor(cur, curp); + if (error) + goto error0; + (*curp)->bc_ptrs[level + 1]++; + } + *ptrp = rptr; + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 1; + return 0; +out0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 0; + return 0; + +error0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; +} diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 7cde287b5c9..354a6656fad 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -187,6 +187,12 @@ struct xfs_btree_ops { /* cursor operations */ struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *); + /* block allocation / freeing */ + int (*alloc_block)(struct xfs_btree_cur *cur, + union xfs_btree_ptr *start_bno, + union xfs_btree_ptr *new_bno, + int length, int *stat); + /* update last record information */ void (*update_lastrec)(struct xfs_btree_cur *cur, struct xfs_btree_block *block, @@ -535,6 +541,8 @@ int xfs_btree_updkey(struct xfs_btree_cur *, union xfs_btree_key *, int); int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *); int xfs_btree_lshift(struct xfs_btree_cur *, int, int *); int xfs_btree_rshift(struct xfs_btree_cur *, int, int *); +int xfs_btree_split(struct xfs_btree_cur *, int, union xfs_btree_ptr *, + union xfs_btree_key *, struct xfs_btree_cur **, int *); /* * Helpers. diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 60f5db5d6df..c76190a83e4 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -35,6 +35,7 @@ #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" +#include "xfs_btree_trace.h" #include "xfs_ialloc.h" #include "xfs_alloc.h" #include "xfs_error.h" @@ -44,8 +45,6 @@ STATIC void xfs_inobt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_inobt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_inobt_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC int xfs_inobt_newroot(xfs_btree_cur_t *, int *); -STATIC int xfs_inobt_split(xfs_btree_cur_t *, int, xfs_agblock_t *, - xfs_inobt_key_t *, xfs_btree_cur_t **, int *); /* * Single level of the xfs_inobt_delete record deletion routine. @@ -620,15 +619,18 @@ xfs_inobt_insrec( if (i) { optr = ptr = cur->bc_ptrs[level]; } else { + union xfs_btree_ptr bno = { .s = cpu_to_be32(nbno) }; /* * Next, try splitting the current block * in half. If this works we have to * re-set our variables because * we could be in a different block now. */ - if ((error = xfs_inobt_split(cur, level, &nbno, - &nkey, &ncur, &i))) + if ((error = xfs_btree_split(cur, level, &bno, + (union xfs_btree_key *)&nkey, + &ncur, &i))) return error; + nbno = be32_to_cpu(bno.s); if (i) { bp = cur->bc_bufs[level]; block = XFS_BUF_TO_INOBT_BLOCK(bp); @@ -972,165 +974,6 @@ xfs_inobt_newroot( return 0; } -/* - * Split cur/level block in half. - * Return new block number and its first record (to be inserted into parent). - */ -STATIC int /* error */ -xfs_inobt_split( - xfs_btree_cur_t *cur, /* btree cursor */ - int level, /* level to split */ - xfs_agblock_t *bnop, /* output: block number allocated */ - xfs_inobt_key_t *keyp, /* output: first key of new block */ - xfs_btree_cur_t **curp, /* output: new cursor */ - int *stat) /* success/failure */ -{ - xfs_alloc_arg_t args; /* allocation argument structure */ - int error; /* error return value */ - int i; /* loop index/record number */ - xfs_agblock_t lbno; /* left (current) block number */ - xfs_buf_t *lbp; /* buffer for left block */ - xfs_inobt_block_t *left; /* left (current) btree block */ - xfs_inobt_key_t *lkp; /* left btree key pointer */ - xfs_inobt_ptr_t *lpp; /* left btree address pointer */ - xfs_inobt_rec_t *lrp; /* left btree record pointer */ - xfs_buf_t *rbp; /* buffer for right block */ - xfs_inobt_block_t *right; /* right (new) btree block */ - xfs_inobt_key_t *rkp; /* right btree key pointer */ - xfs_inobt_ptr_t *rpp; /* right btree address pointer */ - xfs_inobt_rec_t *rrp; /* right btree record pointer */ - - /* - * Set up left block (current one). - */ - lbp = cur->bc_bufs[level]; - args.tp = cur->bc_tp; - args.mp = cur->bc_mp; - lbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(lbp)); - /* - * Allocate the new block. - * If we can't do it, we're toast. Give up. - */ - args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, lbno); - args.mod = args.minleft = args.alignment = args.total = args.wasdel = - args.isfl = args.userdata = args.minalignslop = 0; - args.minlen = args.maxlen = args.prod = 1; - args.type = XFS_ALLOCTYPE_NEAR_BNO; - if ((error = xfs_alloc_vextent(&args))) - return error; - if (args.fsbno == NULLFSBLOCK) { - *stat = 0; - return 0; - } - ASSERT(args.len == 1); - rbp = xfs_btree_get_bufs(args.mp, args.tp, args.agno, args.agbno, 0); - /* - * Set up the new block as "right". - */ - right = XFS_BUF_TO_INOBT_BLOCK(rbp); - /* - * "Left" is the current (according to the cursor) block. - */ - left = XFS_BUF_TO_INOBT_BLOCK(lbp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) - return error; -#endif - /* - * Fill in the btree header for the new block. - */ - right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); - right->bb_level = left->bb_level; - right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2); - /* - * Make sure that if there's an odd number of entries now, that - * each new block will have the same number of entries. - */ - if ((be16_to_cpu(left->bb_numrecs) & 1) && - cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1) - be16_add_cpu(&right->bb_numrecs, 1); - i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1; - /* - * For non-leaf blocks, copy keys and addresses over to the new block. - */ - if (level > 0) { - lkp = XFS_INOBT_KEY_ADDR(left, i, cur); - lpp = XFS_INOBT_PTR_ADDR(left, i, cur); - rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); - rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); -#ifdef DEBUG - for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) { - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) - return error; - } -#endif - memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp)); - memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp)); - xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - *keyp = *rkp; - } - /* - * For leaf blocks, copy records over to the new block. - */ - else { - lrp = XFS_INOBT_REC_ADDR(left, i, cur); - rrp = XFS_INOBT_REC_ADDR(right, 1, cur); - memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp)); - xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs)); - keyp->ir_startino = rrp->ir_startino; - } - /* - * Find the left block number by looking in the buffer. - * Adjust numrecs, sibling pointers. - */ - be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs))); - right->bb_rightsib = left->bb_rightsib; - left->bb_rightsib = cpu_to_be32(args.agbno); - right->bb_leftsib = cpu_to_be32(lbno); - xfs_inobt_log_block(args.tp, rbp, XFS_BB_ALL_BITS); - xfs_inobt_log_block(args.tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); - /* - * If there's a block to the new block's right, make that block - * point back to right instead of to left. - */ - if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) { - xfs_inobt_block_t *rrblock; /* rr btree block */ - xfs_buf_t *rrbp; /* buffer for rrblock */ - - if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, - be32_to_cpu(right->bb_rightsib), 0, &rrbp, - XFS_INO_BTREE_REF))) - return error; - rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); - if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) - return error; - rrblock->bb_leftsib = cpu_to_be32(args.agbno); - xfs_inobt_log_block(args.tp, rrbp, XFS_BB_LEFTSIB); - } - /* - * If the cursor is really in the right block, move it there. - * If it's just pointing past the last entry in left, then we'll - * insert there, so don't change anything in that case. - */ - if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) { - xfs_btree_setbuf(cur, level, rbp); - cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs); - } - /* - * If there are more levels, we'll need another cursor which refers - * the right block, no matter where this cursor was. - */ - if (level + 1 < cur->bc_nlevels) { - if ((error = xfs_btree_dup_cursor(cur, curp))) - return error; - (*curp)->bc_ptrs[level + 1]++; - } - *bnop = args.agbno; - *stat = 1; - return 0; -} - /* * Externally visible routines. */ @@ -1285,6 +1128,48 @@ xfs_inobt_dup_cursor( cur->bc_private.a.agbp, cur->bc_private.a.agno); } +STATIC int +xfs_inobt_alloc_block( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *start, + union xfs_btree_ptr *new, + int length, + int *stat) +{ + xfs_alloc_arg_t args; /* block allocation args */ + int error; /* error return value */ + xfs_agblock_t sbno = be32_to_cpu(start->s); + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + + memset(&args, 0, sizeof(args)); + args.tp = cur->bc_tp; + args.mp = cur->bc_mp; + args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno); + args.minlen = 1; + args.maxlen = 1; + args.prod = 1; + args.type = XFS_ALLOCTYPE_NEAR_BNO; + + error = xfs_alloc_vextent(&args); + if (error) { + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; + } + if (args.fsbno == NULLFSBLOCK) { + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 0; + return 0; + } + ASSERT(args.len == 1); + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + + new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno)); + *stat = 1; + return 0; +} + + STATIC int xfs_inobt_get_maxrecs( struct xfs_btree_cur *cur, @@ -1396,6 +1281,7 @@ static const struct xfs_btree_ops xfs_inobt_ops = { .key_len = sizeof(xfs_inobt_key_t), .dup_cursor = xfs_inobt_dup_cursor, + .alloc_block = xfs_inobt_alloc_block, .get_maxrecs = xfs_inobt_get_maxrecs, .init_key_from_rec = xfs_inobt_init_key_from_rec, .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, -- cgit v1.2.3-70-g09d2 From 344207ce8474b79be331eb93e6df4cb5bdd48ab2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:57:16 +1100 Subject: [XFS] implement semi-generic xfs_btree_new_root From: Dave Chinner Add a xfs_btree_new_root helper for the alloc and ialloc btrees. The bmap btree needs it's own version and is not converted. [hch: split out from bigger patch and minor adaptions] SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32200a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc_btree.c | 179 ++++++---------------------------------------- fs/xfs/xfs_btree.c | 129 +++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 5 ++ fs/xfs/xfs_ialloc_btree.c | 164 +++++------------------------------------- 4 files changed, 172 insertions(+), 305 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 8a8d1aeec52..f21a3e9cc3d 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -48,7 +48,6 @@ STATIC void xfs_alloc_log_block(xfs_trans_t *, xfs_buf_t *, int); STATIC void xfs_alloc_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); -STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *); /* * Internal functions. @@ -628,7 +627,7 @@ xfs_alloc_insrec( */ if (level >= cur->bc_nlevels) { XFS_STATS_INC(xs_abt_insrec); - if ((error = xfs_alloc_newroot(cur, &i))) + if ((error = xfs_btree_new_root(cur, &i))) return error; *bnop = NULLAGBLOCK; *stat = i; @@ -936,161 +935,6 @@ xfs_alloc_log_recs( xfs_trans_log_buf(cur->bc_tp, bp, first, last); } -/* - * Allocate a new root block, fill it in. - */ -STATIC int /* error */ -xfs_alloc_newroot( - xfs_btree_cur_t *cur, /* btree cursor */ - int *stat) /* success/failure */ -{ - int error; /* error return value */ - xfs_agblock_t lbno; /* left block number */ - xfs_buf_t *lbp; /* left btree buffer */ - xfs_alloc_block_t *left; /* left btree block */ - xfs_mount_t *mp; /* mount structure */ - xfs_agblock_t nbno; /* new block number */ - xfs_buf_t *nbp; /* new (root) buffer */ - xfs_alloc_block_t *new; /* new (root) btree block */ - int nptr; /* new value for key index, 1 or 2 */ - xfs_agblock_t rbno; /* right block number */ - xfs_buf_t *rbp; /* right btree buffer */ - xfs_alloc_block_t *right; /* right btree block */ - - mp = cur->bc_mp; - - ASSERT(cur->bc_nlevels < XFS_AG_MAXLEVELS(mp)); - /* - * Get a buffer from the freelist blocks, for the new root. - */ - error = xfs_alloc_get_freelist(cur->bc_tp, - cur->bc_private.a.agbp, &nbno, 1); - if (error) - return error; - /* - * None available, we fail. - */ - if (nbno == NULLAGBLOCK) { - *stat = 0; - return 0; - } - xfs_trans_agbtree_delta(cur->bc_tp, 1); - nbp = xfs_btree_get_bufs(mp, cur->bc_tp, cur->bc_private.a.agno, nbno, - 0); - new = XFS_BUF_TO_ALLOC_BLOCK(nbp); - /* - * Set the root data in the a.g. freespace structure. - */ - { - xfs_agf_t *agf; /* a.g. freespace header */ - xfs_agnumber_t seqno; - - agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); - agf->agf_roots[cur->bc_btnum] = cpu_to_be32(nbno); - be32_add_cpu(&agf->agf_levels[cur->bc_btnum], 1); - seqno = be32_to_cpu(agf->agf_seqno); - mp->m_perag[seqno].pagf_levels[cur->bc_btnum]++; - xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, - XFS_AGF_ROOTS | XFS_AGF_LEVELS); - } - /* - * At the previous root level there are now two blocks: the old - * root, and the new block generated when it was split. - * We don't know which one the cursor is pointing at, so we - * set up variables "left" and "right" for each case. - */ - lbp = cur->bc_bufs[cur->bc_nlevels - 1]; - left = XFS_BUF_TO_ALLOC_BLOCK(lbp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, left, cur->bc_nlevels - 1, lbp))) - return error; -#endif - if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) { - /* - * Our block is left, pick up the right block. - */ - lbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(lbp)); - rbno = be32_to_cpu(left->bb_rightsib); - if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, - cur->bc_private.a.agno, rbno, 0, &rbp, - XFS_ALLOC_BTREE_REF))) - return error; - right = XFS_BUF_TO_ALLOC_BLOCK(rbp); - if ((error = xfs_btree_check_sblock(cur, right, - cur->bc_nlevels - 1, rbp))) - return error; - nptr = 1; - } else { - /* - * Our block is right, pick up the left block. - */ - rbp = lbp; - right = left; - rbno = XFS_DADDR_TO_AGBNO(mp, XFS_BUF_ADDR(rbp)); - lbno = be32_to_cpu(right->bb_leftsib); - if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, - cur->bc_private.a.agno, lbno, 0, &lbp, - XFS_ALLOC_BTREE_REF))) - return error; - left = XFS_BUF_TO_ALLOC_BLOCK(lbp); - if ((error = xfs_btree_check_sblock(cur, left, - cur->bc_nlevels - 1, lbp))) - return error; - nptr = 2; - } - /* - * Fill in the new block's btree header and log it. - */ - new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); - new->bb_level = cpu_to_be16(cur->bc_nlevels); - new->bb_numrecs = cpu_to_be16(2); - new->bb_leftsib = cpu_to_be32(NULLAGBLOCK); - new->bb_rightsib = cpu_to_be32(NULLAGBLOCK); - xfs_alloc_log_block(cur->bc_tp, nbp, XFS_BB_ALL_BITS); - ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); - /* - * Fill in the key data in the new root. - */ - { - xfs_alloc_key_t *kp; /* btree key pointer */ - - kp = XFS_ALLOC_KEY_ADDR(new, 1, cur); - if (be16_to_cpu(left->bb_level) > 0) { - kp[0] = *XFS_ALLOC_KEY_ADDR(left, 1, cur); - kp[1] = *XFS_ALLOC_KEY_ADDR(right, 1, cur); - } else { - xfs_alloc_rec_t *rp; /* btree record pointer */ - - rp = XFS_ALLOC_REC_ADDR(left, 1, cur); - kp[0].ar_startblock = rp->ar_startblock; - kp[0].ar_blockcount = rp->ar_blockcount; - rp = XFS_ALLOC_REC_ADDR(right, 1, cur); - kp[1].ar_startblock = rp->ar_startblock; - kp[1].ar_blockcount = rp->ar_blockcount; - } - } - xfs_alloc_log_keys(cur, nbp, 1, 2); - /* - * Fill in the pointer data in the new root. - */ - { - xfs_alloc_ptr_t *pp; /* btree address pointer */ - - pp = XFS_ALLOC_PTR_ADDR(new, 1, cur); - pp[0] = cpu_to_be32(lbno); - pp[1] = cpu_to_be32(rbno); - } - xfs_alloc_log_ptrs(cur, nbp, 1, 2); - /* - * Fix up the cursor. - */ - xfs_btree_setbuf(cur, cur->bc_nlevels, nbp); - cur->bc_ptrs[cur->bc_nlevels] = nptr; - cur->bc_nlevels++; - *stat = 1; - return 0; -} - /* * Externally visible routines. @@ -1244,6 +1088,26 @@ xfs_allocbt_dup_cursor( cur->bc_btnum); } +STATIC void +xfs_allocbt_set_root( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr, + int inc) +{ + struct xfs_buf *agbp = cur->bc_private.a.agbp; + struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); + xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); + int btnum = cur->bc_btnum; + + ASSERT(ptr->s != 0); + + agf->agf_roots[btnum] = ptr->s; + be32_add_cpu(&agf->agf_levels[btnum], inc); + cur->bc_mp->m_perag[seqno].pagf_levels[btnum] += inc; + + xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); +} + STATIC int xfs_allocbt_alloc_block( struct xfs_btree_cur *cur, @@ -1440,6 +1304,7 @@ static const struct xfs_btree_ops xfs_allocbt_ops = { .key_len = sizeof(xfs_alloc_key_t), .dup_cursor = xfs_allocbt_dup_cursor, + .set_root = xfs_allocbt_set_root, .alloc_block = xfs_allocbt_alloc_block, .update_lastrec = xfs_allocbt_update_lastrec, .get_maxrecs = xfs_allocbt_get_maxrecs, diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 80576695fbe..8de884c4dab 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -2467,3 +2467,132 @@ error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } + +/* + * Allocate a new root block, fill it in. + */ +int /* error */ +xfs_btree_new_root( + struct xfs_btree_cur *cur, /* btree cursor */ + int *stat) /* success/failure */ +{ + struct xfs_btree_block *block; /* one half of the old root block */ + struct xfs_buf *bp; /* buffer containing block */ + int error; /* error return value */ + struct xfs_buf *lbp; /* left buffer pointer */ + struct xfs_btree_block *left; /* left btree block */ + struct xfs_buf *nbp; /* new (root) buffer */ + struct xfs_btree_block *new; /* new (root) btree block */ + int nptr; /* new value for key index, 1 or 2 */ + struct xfs_buf *rbp; /* right buffer pointer */ + struct xfs_btree_block *right; /* right btree block */ + union xfs_btree_ptr rptr; + union xfs_btree_ptr lptr; + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_STATS_INC(cur, newroot); + + /* initialise our start point from the cursor */ + cur->bc_ops->init_ptr_from_cur(cur, &rptr); + + /* Allocate the new block. If we can't do it, we're toast. Give up. */ + error = cur->bc_ops->alloc_block(cur, &rptr, &lptr, 1, stat); + if (error) + goto error0; + if (*stat == 0) + goto out0; + XFS_BTREE_STATS_INC(cur, alloc); + + /* Set up the new block. */ + error = xfs_btree_get_buf_block(cur, &lptr, 0, &new, &nbp); + if (error) + goto error0; + + /* Set the root in the holding structure increasing the level by 1. */ + cur->bc_ops->set_root(cur, &lptr, 1); + + /* + * At the previous root level there are now two blocks: the old root, + * and the new block generated when it was split. We don't know which + * one the cursor is pointing at, so we set up variables "left" and + * "right" for each case. + */ + block = xfs_btree_get_block(cur, cur->bc_nlevels - 1, &bp); + +#ifdef DEBUG + error = xfs_btree_check_block(cur, block, cur->bc_nlevels - 1, bp); + if (error) + goto error0; +#endif + + xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); + if (!xfs_btree_ptr_is_null(cur, &rptr)) { + /* Our block is left, pick up the right block. */ + lbp = bp; + xfs_btree_buf_to_ptr(cur, lbp, &lptr); + left = block; + error = xfs_btree_read_buf_block(cur, &rptr, + cur->bc_nlevels - 1, 0, &right, &rbp); + if (error) + goto error0; + bp = rbp; + nptr = 1; + } else { + /* Our block is right, pick up the left block. */ + rbp = bp; + xfs_btree_buf_to_ptr(cur, rbp, &rptr); + right = block; + xfs_btree_get_sibling(cur, right, &lptr, XFS_BB_LEFTSIB); + error = xfs_btree_read_buf_block(cur, &lptr, + cur->bc_nlevels - 1, 0, &left, &lbp); + if (error) + goto error0; + bp = lbp; + nptr = 2; + } + /* Fill in the new block's btree header and log it. */ + xfs_btree_init_block(cur, cur->bc_nlevels, 2, new); + xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS); + ASSERT(!xfs_btree_ptr_is_null(cur, &lptr) && + !xfs_btree_ptr_is_null(cur, &rptr)); + + /* Fill in the key data in the new root. */ + if (xfs_btree_get_level(left) > 0) { + xfs_btree_copy_keys(cur, + xfs_btree_key_addr(cur, 1, new), + xfs_btree_key_addr(cur, 1, left), 1); + xfs_btree_copy_keys(cur, + xfs_btree_key_addr(cur, 2, new), + xfs_btree_key_addr(cur, 1, right), 1); + } else { + cur->bc_ops->init_key_from_rec( + xfs_btree_key_addr(cur, 1, new), + xfs_btree_rec_addr(cur, 1, left)); + cur->bc_ops->init_key_from_rec( + xfs_btree_key_addr(cur, 2, new), + xfs_btree_rec_addr(cur, 1, right)); + } + xfs_btree_log_keys(cur, nbp, 1, 2); + + /* Fill in the pointer data in the new root. */ + xfs_btree_copy_ptrs(cur, + xfs_btree_ptr_addr(cur, 1, new), &lptr, 1); + xfs_btree_copy_ptrs(cur, + xfs_btree_ptr_addr(cur, 2, new), &rptr, 1); + xfs_btree_log_ptrs(cur, nbp, 1, 2); + + /* Fix up the cursor. */ + xfs_btree_setbuf(cur, cur->bc_nlevels, nbp); + cur->bc_ptrs[cur->bc_nlevels] = nptr; + cur->bc_nlevels++; + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 1; + return 0; +error0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; +out0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 0; + return 0; +} diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 354a6656fad..18015392feb 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -187,6 +187,10 @@ struct xfs_btree_ops { /* cursor operations */ struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *); + /* update btree root pointer */ + void (*set_root)(struct xfs_btree_cur *cur, + union xfs_btree_ptr *nptr, int level_change); + /* block allocation / freeing */ int (*alloc_block)(struct xfs_btree_cur *cur, union xfs_btree_ptr *start_bno, @@ -543,6 +547,7 @@ int xfs_btree_lshift(struct xfs_btree_cur *, int, int *); int xfs_btree_rshift(struct xfs_btree_cur *, int, int *); int xfs_btree_split(struct xfs_btree_cur *, int, union xfs_btree_ptr *, union xfs_btree_key *, struct xfs_btree_cur **, int *); +int xfs_btree_new_root(struct xfs_btree_cur *, int *); /* * Helpers. diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index c76190a83e4..7ba3c7bb398 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -44,7 +44,6 @@ STATIC void xfs_inobt_log_block(xfs_trans_t *, xfs_buf_t *, int); STATIC void xfs_inobt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_inobt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_inobt_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); -STATIC int xfs_inobt_newroot(xfs_btree_cur_t *, int *); /* * Single level of the xfs_inobt_delete record deletion routine. @@ -556,7 +555,7 @@ xfs_inobt_insrec( * and we're done. */ if (level >= cur->bc_nlevels) { - error = xfs_inobt_newroot(cur, &i); + error = xfs_btree_new_root(cur, &i); *bnop = NULLAGBLOCK; *stat = i; return error; @@ -827,152 +826,6 @@ xfs_inobt_log_recs( xfs_trans_log_buf(cur->bc_tp, bp, first, last); } -/* - * Allocate a new root block, fill it in. - */ -STATIC int /* error */ -xfs_inobt_newroot( - xfs_btree_cur_t *cur, /* btree cursor */ - int *stat) /* success/failure */ -{ - xfs_agi_t *agi; /* a.g. inode header */ - xfs_alloc_arg_t args; /* allocation argument structure */ - xfs_inobt_block_t *block; /* one half of the old root block */ - xfs_buf_t *bp; /* buffer containing block */ - int error; /* error return value */ - xfs_inobt_key_t *kp; /* btree key pointer */ - xfs_agblock_t lbno; /* left block number */ - xfs_buf_t *lbp; /* left buffer pointer */ - xfs_inobt_block_t *left; /* left btree block */ - xfs_buf_t *nbp; /* new (root) buffer */ - xfs_inobt_block_t *new; /* new (root) btree block */ - int nptr; /* new value for key index, 1 or 2 */ - xfs_inobt_ptr_t *pp; /* btree address pointer */ - xfs_agblock_t rbno; /* right block number */ - xfs_buf_t *rbp; /* right buffer pointer */ - xfs_inobt_block_t *right; /* right btree block */ - xfs_inobt_rec_t *rp; /* btree record pointer */ - - ASSERT(cur->bc_nlevels < XFS_IN_MAXLEVELS(cur->bc_mp)); - - /* - * Get a block & a buffer. - */ - agi = XFS_BUF_TO_AGI(cur->bc_private.a.agbp); - args.tp = cur->bc_tp; - args.mp = cur->bc_mp; - args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, - be32_to_cpu(agi->agi_root)); - args.mod = args.minleft = args.alignment = args.total = args.wasdel = - args.isfl = args.userdata = args.minalignslop = 0; - args.minlen = args.maxlen = args.prod = 1; - args.type = XFS_ALLOCTYPE_NEAR_BNO; - if ((error = xfs_alloc_vextent(&args))) - return error; - /* - * None available, we fail. - */ - if (args.fsbno == NULLFSBLOCK) { - *stat = 0; - return 0; - } - ASSERT(args.len == 1); - nbp = xfs_btree_get_bufs(args.mp, args.tp, args.agno, args.agbno, 0); - new = XFS_BUF_TO_INOBT_BLOCK(nbp); - /* - * Set the root data in the a.g. inode structure. - */ - agi->agi_root = cpu_to_be32(args.agbno); - be32_add_cpu(&agi->agi_level, 1); - xfs_ialloc_log_agi(args.tp, cur->bc_private.a.agbp, - XFS_AGI_ROOT | XFS_AGI_LEVEL); - /* - * At the previous root level there are now two blocks: the old - * root, and the new block generated when it was split. - * We don't know which one the cursor is pointing at, so we - * set up variables "left" and "right" for each case. - */ - bp = cur->bc_bufs[cur->bc_nlevels - 1]; - block = XFS_BUF_TO_INOBT_BLOCK(bp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, cur->bc_nlevels - 1, bp))) - return error; -#endif - if (be32_to_cpu(block->bb_rightsib) != NULLAGBLOCK) { - /* - * Our block is left, pick up the right block. - */ - lbp = bp; - lbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(lbp)); - left = block; - rbno = be32_to_cpu(left->bb_rightsib); - if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, - rbno, 0, &rbp, XFS_INO_BTREE_REF))) - return error; - bp = rbp; - right = XFS_BUF_TO_INOBT_BLOCK(rbp); - if ((error = xfs_btree_check_sblock(cur, right, - cur->bc_nlevels - 1, rbp))) - return error; - nptr = 1; - } else { - /* - * Our block is right, pick up the left block. - */ - rbp = bp; - rbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(rbp)); - right = block; - lbno = be32_to_cpu(right->bb_leftsib); - if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno, - lbno, 0, &lbp, XFS_INO_BTREE_REF))) - return error; - bp = lbp; - left = XFS_BUF_TO_INOBT_BLOCK(lbp); - if ((error = xfs_btree_check_sblock(cur, left, - cur->bc_nlevels - 1, lbp))) - return error; - nptr = 2; - } - /* - * Fill in the new block's btree header and log it. - */ - new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]); - new->bb_level = cpu_to_be16(cur->bc_nlevels); - new->bb_numrecs = cpu_to_be16(2); - new->bb_leftsib = cpu_to_be32(NULLAGBLOCK); - new->bb_rightsib = cpu_to_be32(NULLAGBLOCK); - xfs_inobt_log_block(args.tp, nbp, XFS_BB_ALL_BITS); - ASSERT(lbno != NULLAGBLOCK && rbno != NULLAGBLOCK); - /* - * Fill in the key data in the new root. - */ - kp = XFS_INOBT_KEY_ADDR(new, 1, cur); - if (be16_to_cpu(left->bb_level) > 0) { - kp[0] = *XFS_INOBT_KEY_ADDR(left, 1, cur); - kp[1] = *XFS_INOBT_KEY_ADDR(right, 1, cur); - } else { - rp = XFS_INOBT_REC_ADDR(left, 1, cur); - kp[0].ir_startino = rp->ir_startino; - rp = XFS_INOBT_REC_ADDR(right, 1, cur); - kp[1].ir_startino = rp->ir_startino; - } - xfs_inobt_log_keys(cur, nbp, 1, 2); - /* - * Fill in the pointer data in the new root. - */ - pp = XFS_INOBT_PTR_ADDR(new, 1, cur); - pp[0] = cpu_to_be32(lbno); - pp[1] = cpu_to_be32(rbno); - xfs_inobt_log_ptrs(cur, nbp, 1, 2); - /* - * Fix up the cursor. - */ - xfs_btree_setbuf(cur, cur->bc_nlevels, nbp); - cur->bc_ptrs[cur->bc_nlevels] = nptr; - cur->bc_nlevels++; - *stat = 1; - return 0; -} /* * Externally visible routines. @@ -1128,6 +981,20 @@ xfs_inobt_dup_cursor( cur->bc_private.a.agbp, cur->bc_private.a.agno); } +STATIC void +xfs_inobt_set_root( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *nptr, + int inc) /* level change */ +{ + struct xfs_buf *agbp = cur->bc_private.a.agbp; + struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); + + agi->agi_root = nptr->s; + be32_add_cpu(&agi->agi_level, inc); + xfs_ialloc_log_agi(cur->bc_tp, agbp, XFS_AGI_ROOT | XFS_AGI_LEVEL); +} + STATIC int xfs_inobt_alloc_block( struct xfs_btree_cur *cur, @@ -1281,6 +1148,7 @@ static const struct xfs_btree_ops xfs_inobt_ops = { .key_len = sizeof(xfs_inobt_key_t), .dup_cursor = xfs_inobt_dup_cursor, + .set_root = xfs_inobt_set_root, .alloc_block = xfs_inobt_alloc_block, .get_maxrecs = xfs_inobt_get_maxrecs, .init_key_from_rec = xfs_inobt_init_key_from_rec, -- cgit v1.2.3-70-g09d2 From ea77b0a66e6c910ef265d9af522d6303ea6b3055 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:57:28 +1100 Subject: [XFS] move xfs_bmbt_newroot to common code xfs_bmbt_newroot is a mostly generic implementation of moving from an inode root to a real block based root. So move it to xfs_btree.c where it can use all the nice infrastructure there and make it pointer size agnostic The new name for it is xfs_btree_new_iroot, following the old naming but making it clear we're dealing with the root in inode case here, and to avoid confusion with xfs_btree_new_root which is used for the not inode rooted case. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32201a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_bmap.c | 2 +- fs/xfs/xfs_bmap_btree.c | 113 +----------------------------------------------- fs/xfs/xfs_bmap_btree.h | 6 --- fs/xfs/xfs_btree.c | 101 +++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 1 + 5 files changed, 104 insertions(+), 119 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 7d6c4ace805..315bc291268 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -476,7 +476,7 @@ xfs_bmap_add_attrfork_btree( goto error0; /* must be at least one entry */ XFS_WANT_CORRUPTED_GOTO(stat == 1, error0); - if ((error = xfs_bmbt_newroot(cur, flags, &stat))) + if ((error = xfs_btree_new_iroot(cur, flags, &stat))) goto error0; if (stat == 0) { xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index e7539263457..204f276aeaa 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -525,7 +525,7 @@ xfs_bmbt_insrec( cur->bc_private.b.whichfork); block = xfs_bmbt_get_block(cur, level, &bp); } else if (level == cur->bc_nlevels - 1) { - if ((error = xfs_bmbt_newroot(cur, &logflags, stat)) || + if ((error = xfs_btree_new_iroot(cur, &logflags, stat)) || *stat == 0) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); return error; @@ -1182,117 +1182,6 @@ xfs_bmbt_log_recs( XFS_BMBT_TRACE_CURSOR(cur, EXIT); } -/* - * Give the bmap btree a new root block. Copy the old broot contents - * down into a real block and make the broot point to it. - */ -int /* error */ -xfs_bmbt_newroot( - xfs_btree_cur_t *cur, /* btree cursor */ - int *logflags, /* logging flags for inode */ - int *stat) /* return status - 0 fail */ -{ - xfs_alloc_arg_t args; /* allocation arguments */ - xfs_bmbt_block_t *block; /* bmap btree block */ - xfs_buf_t *bp; /* buffer for block */ - xfs_bmbt_block_t *cblock; /* child btree block */ - xfs_bmbt_key_t *ckp; /* child key pointer */ - xfs_bmbt_ptr_t *cpp; /* child ptr pointer */ - int error; /* error return code */ -#ifdef DEBUG - int i; /* loop counter */ -#endif - xfs_bmbt_key_t *kp; /* pointer to bmap btree key */ - int level; /* btree level */ - xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */ - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - level = cur->bc_nlevels - 1; - block = xfs_bmbt_get_block(cur, level, &bp); - /* - * Copy the root into a real block. - */ - args.mp = cur->bc_mp; - pp = XFS_BMAP_PTR_IADDR(block, 1, cur); - args.tp = cur->bc_tp; - args.fsbno = cur->bc_private.b.firstblock; - args.mod = args.minleft = args.alignment = args.total = args.isfl = - args.userdata = args.minalignslop = 0; - args.minlen = args.maxlen = args.prod = 1; - args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL; - args.firstblock = args.fsbno; - if (args.fsbno == NULLFSBLOCK) { -#ifdef DEBUG - if ((error = xfs_btree_check_lptr_disk(cur, *pp, level))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } -#endif - args.fsbno = be64_to_cpu(*pp); - args.type = XFS_ALLOCTYPE_START_BNO; - } else if (cur->bc_private.b.flist->xbf_low) - args.type = XFS_ALLOCTYPE_START_BNO; - else - args.type = XFS_ALLOCTYPE_NEAR_BNO; - if ((error = xfs_alloc_vextent(&args))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - if (args.fsbno == NULLFSBLOCK) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - ASSERT(args.len == 1); - cur->bc_private.b.firstblock = args.fsbno; - cur->bc_private.b.allocated++; - cur->bc_private.b.ip->i_d.di_nblocks++; - XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip, - XFS_TRANS_DQ_BCOUNT, 1L); - bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0); - cblock = XFS_BUF_TO_BMBT_BLOCK(bp); - *cblock = *block; - be16_add_cpu(&block->bb_level, 1); - block->bb_numrecs = cpu_to_be16(1); - cur->bc_nlevels++; - cur->bc_ptrs[level + 1] = 1; - kp = XFS_BMAP_KEY_IADDR(block, 1, cur); - ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur); - memcpy(ckp, kp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*kp)); - cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur); -#ifdef DEBUG - for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) { - if ((error = xfs_btree_check_lptr_disk(cur, pp[i], level))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - } -#endif - memcpy(cpp, pp, be16_to_cpu(cblock->bb_numrecs) * sizeof(*pp)); -#ifdef DEBUG - if ((error = xfs_btree_check_lptr(cur, args.fsbno, level))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } -#endif - *pp = cpu_to_be64(args.fsbno); - xfs_iroot_realloc(cur->bc_private.b.ip, 1 - be16_to_cpu(cblock->bb_numrecs), - cur->bc_private.b.whichfork); - xfs_btree_setbuf(cur, level, bp); - /* - * Do all this logging at the end so that - * the root is at the right level. - */ - xfs_bmbt_log_block(cur, bp, XFS_BB_ALL_BITS); - xfs_bmbt_log_keys(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs)); - xfs_bmbt_log_ptrs(cur, bp, 1, be16_to_cpu(cblock->bb_numrecs)); - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *logflags |= - XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork); - *stat = 1; - return 0; -} - /* * Set all the fields in a bmap extent record from the arguments. */ diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 6bfd62ec54f..26fd8ace3e7 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -255,12 +255,6 @@ extern void xfs_bmbt_log_block(struct xfs_btree_cur *, struct xfs_buf *, int); extern void xfs_bmbt_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int, int); -/* - * Give the bmap btree a new root block. Copy the old broot contents - * down into a real block and make the broot point to it. - */ -extern int xfs_bmbt_newroot(struct xfs_btree_cur *cur, int *lflags, int *stat); - extern void xfs_bmbt_set_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s); extern void xfs_bmbt_set_allf(xfs_bmbt_rec_host_t *r, xfs_fileoff_t o, xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v); diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 8de884c4dab..3b6e01dea66 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -2468,6 +2468,107 @@ error0: return error; } +/* + * Copy the old inode root contents into a real block and make the + * broot point to it. + */ +int /* error */ +xfs_btree_new_iroot( + struct xfs_btree_cur *cur, /* btree cursor */ + int *logflags, /* logging flags for inode */ + int *stat) /* return status - 0 fail */ +{ + struct xfs_buf *cbp; /* buffer for cblock */ + struct xfs_btree_block *block; /* btree block */ + struct xfs_btree_block *cblock; /* child btree block */ + union xfs_btree_key *ckp; /* child key pointer */ + union xfs_btree_ptr *cpp; /* child ptr pointer */ + union xfs_btree_key *kp; /* pointer to btree key */ + union xfs_btree_ptr *pp; /* pointer to block addr */ + union xfs_btree_ptr nptr; /* new block addr */ + int level; /* btree level */ + int error; /* error return code */ +#ifdef DEBUG + int i; /* loop counter */ +#endif + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_STATS_INC(cur, newroot); + + ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE); + + level = cur->bc_nlevels - 1; + + block = xfs_btree_get_iroot(cur); + pp = xfs_btree_ptr_addr(cur, 1, block); + + /* Allocate the new block. If we can't do it, we're toast. Give up. */ + error = cur->bc_ops->alloc_block(cur, pp, &nptr, 1, stat); + if (error) + goto error0; + if (*stat == 0) { + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + return 0; + } + XFS_BTREE_STATS_INC(cur, alloc); + + /* Copy the root into a real block. */ + error = xfs_btree_get_buf_block(cur, &nptr, 0, &cblock, &cbp); + if (error) + goto error0; + + memcpy(cblock, block, xfs_btree_block_len(cur)); + + be16_add_cpu(&block->bb_level, 1); + xfs_btree_set_numrecs(block, 1); + cur->bc_nlevels++; + cur->bc_ptrs[level + 1] = 1; + + kp = xfs_btree_key_addr(cur, 1, block); + ckp = xfs_btree_key_addr(cur, 1, cblock); + xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock)); + + cpp = xfs_btree_ptr_addr(cur, 1, cblock); +#ifdef DEBUG + for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) { + error = xfs_btree_check_ptr(cur, pp, i, level); + if (error) + goto error0; + } +#endif + xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock)); + +#ifdef DEBUG + error = xfs_btree_check_ptr(cur, &nptr, 0, level); + if (error) + goto error0; +#endif + xfs_btree_copy_ptrs(cur, pp, &nptr, 1); + + xfs_iroot_realloc(cur->bc_private.b.ip, + 1 - xfs_btree_get_numrecs(cblock), + cur->bc_private.b.whichfork); + + xfs_btree_setbuf(cur, level, cbp); + + /* + * Do all this logging at the end so that + * the root is at the right level. + */ + xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS); + xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); + xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); + + *logflags |= + XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork); + *stat = 1; + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + return 0; +error0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; +} + /* * Allocate a new root block, fill it in. */ diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 18015392feb..21eec863f00 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -548,6 +548,7 @@ int xfs_btree_rshift(struct xfs_btree_cur *, int, int *); int xfs_btree_split(struct xfs_btree_cur *, int, union xfs_btree_ptr *, union xfs_btree_key *, struct xfs_btree_cur **, int *); int xfs_btree_new_root(struct xfs_btree_cur *, int *); +int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *); /* * Helpers. -- cgit v1.2.3-70-g09d2 From 4b22a57188d87e873346b73c227607715be96399 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:57:40 +1100 Subject: [XFS] implement generic xfs_btree_insert/insrec Make the btree insert code generic. Based on a patch from David Chinner with lots of changes to follow the original btree implementations more closely. While this loses some of the generic helper routines for inserting/moving/removing records it also solves some of the one off bugs in the original code and makes it easier to verify. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32202a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc.c | 10 +- fs/xfs/xfs_alloc_btree.c | 339 ++++--------------------------------------- fs/xfs/xfs_alloc_btree.h | 6 - fs/xfs/xfs_bmap.c | 20 +-- fs/xfs/xfs_bmap_btree.c | 308 +++++++-------------------------------- fs/xfs/xfs_bmap_btree.h | 1 - fs/xfs/xfs_btree.c | 362 ++++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 11 ++ fs/xfs/xfs_ialloc.c | 2 +- fs/xfs/xfs_ialloc_btree.c | 302 +++----------------------------------- fs/xfs/xfs_ialloc_btree.h | 6 - 11 files changed, 494 insertions(+), 873 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 875e1bae194..a983824c12b 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -408,7 +408,7 @@ xfs_alloc_fixup_trees( if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 0); - if ((error = xfs_alloc_insert(cnt_cur, &i))) + if ((error = xfs_btree_insert(cnt_cur, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); } @@ -416,7 +416,7 @@ xfs_alloc_fixup_trees( if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 0); - if ((error = xfs_alloc_insert(cnt_cur, &i))) + if ((error = xfs_btree_insert(cnt_cur, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); } @@ -444,7 +444,7 @@ xfs_alloc_fixup_trees( if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 0); - if ((error = xfs_alloc_insert(bno_cur, &i))) + if ((error = xfs_btree_insert(bno_cur, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); } @@ -1756,7 +1756,7 @@ xfs_free_ag_extent( else { nbno = bno; nlen = len; - if ((error = xfs_alloc_insert(bno_cur, &i))) + if ((error = xfs_btree_insert(bno_cur, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); } @@ -1768,7 +1768,7 @@ xfs_free_ag_extent( if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 0, error0); - if ((error = xfs_alloc_insert(cnt_cur, &i))) + if ((error = xfs_btree_insert(cnt_cur, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index f21a3e9cc3d..818adca77fc 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -582,256 +582,6 @@ error0: return error; } -/* - * Insert one record/level. Return information to the caller - * allowing the next level up to proceed if necessary. - */ -STATIC int /* error */ -xfs_alloc_insrec( - xfs_btree_cur_t *cur, /* btree cursor */ - int level, /* level to insert record at */ - xfs_agblock_t *bnop, /* i/o: block number inserted */ - xfs_alloc_rec_t *recp, /* i/o: record data inserted */ - xfs_btree_cur_t **curp, /* output: new cursor replacing cur */ - int *stat) /* output: success/failure */ -{ - xfs_agf_t *agf; /* allocation group freelist header */ - xfs_alloc_block_t *block; /* btree block record/key lives in */ - xfs_buf_t *bp; /* buffer for block */ - int error; /* error return value */ - int i; /* loop index */ - xfs_alloc_key_t key; /* key value being inserted */ - xfs_alloc_key_t *kp; /* pointer to btree keys */ - xfs_agblock_t nbno; /* block number of allocated block */ - xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */ - xfs_alloc_key_t nkey; /* new key value, from split */ - xfs_alloc_rec_t nrec; /* new record value, for caller */ - int numrecs; - int optr; /* old ptr value */ - xfs_alloc_ptr_t *pp; /* pointer to btree addresses */ - int ptr; /* index in btree block for this rec */ - xfs_alloc_rec_t *rp; /* pointer to btree records */ - - ASSERT(be32_to_cpu(recp->ar_blockcount) > 0); - - /* - * GCC doesn't understand the (arguably complex) control flow in - * this function and complains about uninitialized structure fields - * without this. - */ - memset(&nrec, 0, sizeof(nrec)); - - /* - * If we made it to the root level, allocate a new root block - * and we're done. - */ - if (level >= cur->bc_nlevels) { - XFS_STATS_INC(xs_abt_insrec); - if ((error = xfs_btree_new_root(cur, &i))) - return error; - *bnop = NULLAGBLOCK; - *stat = i; - return 0; - } - /* - * Make a key out of the record data to be inserted, and save it. - */ - key.ar_startblock = recp->ar_startblock; - key.ar_blockcount = recp->ar_blockcount; - optr = ptr = cur->bc_ptrs[level]; - /* - * If we're off the left edge, return failure. - */ - if (ptr == 0) { - *stat = 0; - return 0; - } - XFS_STATS_INC(xs_abt_insrec); - /* - * Get pointers to the btree buffer and block. - */ - bp = cur->bc_bufs[level]; - block = XFS_BUF_TO_ALLOC_BLOCK(bp); - numrecs = be16_to_cpu(block->bb_numrecs); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, level, bp))) - return error; - /* - * Check that the new entry is being inserted in the right place. - */ - if (ptr <= numrecs) { - if (level == 0) { - rp = XFS_ALLOC_REC_ADDR(block, ptr, cur); - xfs_btree_check_rec(cur->bc_btnum, recp, rp); - } else { - kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur); - xfs_btree_check_key(cur->bc_btnum, &key, kp); - } - } -#endif - nbno = NULLAGBLOCK; - ncur = NULL; - /* - * If the block is full, we can't insert the new entry until we - * make the block un-full. - */ - if (numrecs == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { - /* - * First, try shifting an entry to the right neighbor. - */ - if ((error = xfs_btree_rshift(cur, level, &i))) - return error; - if (i) { - /* nothing */ - } - /* - * Next, try shifting an entry to the left neighbor. - */ - else { - if ((error = xfs_btree_lshift(cur, level, &i))) - return error; - if (i) - optr = ptr = cur->bc_ptrs[level]; - else { - union xfs_btree_ptr bno = { .s = cpu_to_be32(nbno) }; - /* - * Next, try splitting the current block in - * half. If this works we have to re-set our - * variables because we could be in a - * different block now. - */ - if ((error = xfs_btree_split(cur, level, &bno, - (union xfs_btree_key *)&nkey, - &ncur, &i))) - return error; - nbno = be32_to_cpu(bno.s); - if (i) { - bp = cur->bc_bufs[level]; - block = XFS_BUF_TO_ALLOC_BLOCK(bp); -#ifdef DEBUG - if ((error = - xfs_btree_check_sblock(cur, - block, level, bp))) - return error; -#endif - ptr = cur->bc_ptrs[level]; - nrec.ar_startblock = nkey.ar_startblock; - nrec.ar_blockcount = nkey.ar_blockcount; - } - /* - * Otherwise the insert fails. - */ - else { - *stat = 0; - return 0; - } - } - } - } - /* - * At this point we know there's room for our new entry in the block - * we're pointing at. - */ - numrecs = be16_to_cpu(block->bb_numrecs); - if (level > 0) { - /* - * It's a non-leaf entry. Make a hole for the new data - * in the key and ptr regions of the block. - */ - kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); - pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); -#ifdef DEBUG - for (i = numrecs; i >= ptr; i--) { - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level))) - return error; - } -#endif - memmove(&kp[ptr], &kp[ptr - 1], - (numrecs - ptr + 1) * sizeof(*kp)); - memmove(&pp[ptr], &pp[ptr - 1], - (numrecs - ptr + 1) * sizeof(*pp)); -#ifdef DEBUG - if ((error = xfs_btree_check_sptr(cur, *bnop, level))) - return error; -#endif - /* - * Now stuff the new data in, bump numrecs and log the new data. - */ - kp[ptr - 1] = key; - pp[ptr - 1] = cpu_to_be32(*bnop); - numrecs++; - block->bb_numrecs = cpu_to_be16(numrecs); - xfs_alloc_log_keys(cur, bp, ptr, numrecs); - xfs_alloc_log_ptrs(cur, bp, ptr, numrecs); -#ifdef DEBUG - if (ptr < numrecs) - xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, - kp + ptr); -#endif - } else { - /* - * It's a leaf entry. Make a hole for the new record. - */ - rp = XFS_ALLOC_REC_ADDR(block, 1, cur); - memmove(&rp[ptr], &rp[ptr - 1], - (numrecs - ptr + 1) * sizeof(*rp)); - /* - * Now stuff the new record in, bump numrecs - * and log the new data. - */ - rp[ptr - 1] = *recp; - numrecs++; - block->bb_numrecs = cpu_to_be16(numrecs); - xfs_alloc_log_recs(cur, bp, ptr, numrecs); -#ifdef DEBUG - if (ptr < numrecs) - xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, - rp + ptr); -#endif - } - /* - * Log the new number of records in the btree header. - */ - xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); - /* - * If we inserted at the start of a block, update the parents' keys. - */ - if (optr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, level + 1))) - return error; - /* - * Look to see if the longest extent in the allocation group - * needs to be updated. - */ - - agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); - if (level == 0 && - cur->bc_btnum == XFS_BTNUM_CNT && - be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && - be32_to_cpu(recp->ar_blockcount) > be32_to_cpu(agf->agf_longest)) { - /* - * If this is a leaf in the by-size btree and there - * is no right sibling block and this block is bigger - * than the previous longest block, update it. - */ - agf->agf_longest = recp->ar_blockcount; - cur->bc_mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest - = be32_to_cpu(recp->ar_blockcount); - xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, - XFS_AGF_LONGEST); - } - /* - * Return the new block number, if any. - * If there is one, give back a record value and a cursor too. - */ - *bnop = nbno; - if (nbno != NULLAGBLOCK) { - *recp = nrec; - *curp = ncur; - } - *stat = 1; - return 0; -} - /* * Log header fields from a btree block. */ @@ -1019,65 +769,6 @@ xfs_alloc_get_rec( return 0; } -/* - * Insert the current record at the point referenced by cur. - * The cursor may be inconsistent on return if splits have been done. - */ -int /* error */ -xfs_alloc_insert( - xfs_btree_cur_t *cur, /* btree cursor */ - int *stat) /* success/failure */ -{ - int error; /* error return value */ - int i; /* result value, 0 for failure */ - int level; /* current level number in btree */ - xfs_agblock_t nbno; /* new block number (split result) */ - xfs_btree_cur_t *ncur; /* new cursor (split result) */ - xfs_alloc_rec_t nrec; /* record being inserted this level */ - xfs_btree_cur_t *pcur; /* previous level's cursor */ - - level = 0; - nbno = NULLAGBLOCK; - nrec.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock); - nrec.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount); - ncur = NULL; - pcur = cur; - /* - * Loop going up the tree, starting at the leaf level. - * Stop when we don't get a split block, that must mean that - * the insert is finished with this level. - */ - do { - /* - * Insert nrec/nbno into this level of the tree. - * Note if we fail, nbno will be null. - */ - if ((error = xfs_alloc_insrec(pcur, level++, &nbno, &nrec, &ncur, - &i))) { - if (pcur != cur) - xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); - return error; - } - /* - * See if the cursor we just used is trash. - * Can't trash the caller's cursor, but otherwise we should - * if ncur is a new cursor or we're about to be done. - */ - if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) { - cur->bc_nlevels = pcur->bc_nlevels; - xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); - } - /* - * If we got a new cursor, switch to it. - */ - if (ncur) { - pcur = ncur; - ncur = NULL; - } - } while (nbno != NULLAGBLOCK); - *stat = i; - return 0; -} STATIC struct xfs_btree_cur * xfs_allocbt_dup_cursor( @@ -1170,6 +861,12 @@ xfs_allocbt_update_lastrec( return; len = rec->alloc.ar_blockcount; break; + case LASTREC_INSREC: + if (be32_to_cpu(rec->alloc.ar_blockcount) <= + be32_to_cpu(agf->agf_longest)) + return; + len = rec->alloc.ar_blockcount; + break; default: ASSERT(0); return; @@ -1199,6 +896,28 @@ xfs_allocbt_init_key_from_rec( key->alloc.ar_blockcount = rec->alloc.ar_blockcount; } +STATIC void +xfs_allocbt_init_rec_from_key( + union xfs_btree_key *key, + union xfs_btree_rec *rec) +{ + ASSERT(key->alloc.ar_startblock != 0); + + rec->alloc.ar_startblock = key->alloc.ar_startblock; + rec->alloc.ar_blockcount = key->alloc.ar_blockcount; +} + +STATIC void +xfs_allocbt_init_rec_from_cur( + struct xfs_btree_cur *cur, + union xfs_btree_rec *rec) +{ + ASSERT(cur->bc_rec.a.ar_startblock != 0); + + rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock); + rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount); +} + STATIC void xfs_allocbt_init_ptr_from_cur( struct xfs_btree_cur *cur, @@ -1309,6 +1028,8 @@ static const struct xfs_btree_ops xfs_allocbt_ops = { .update_lastrec = xfs_allocbt_update_lastrec, .get_maxrecs = xfs_allocbt_get_maxrecs, .init_key_from_rec = xfs_allocbt_init_key_from_rec, + .init_rec_from_key = xfs_allocbt_init_rec_from_key, + .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, .key_diff = xfs_allocbt_key_diff, diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h index 81e2f360781..2e340ef8025 100644 --- a/fs/xfs/xfs_alloc_btree.h +++ b/fs/xfs/xfs_alloc_btree.h @@ -107,12 +107,6 @@ extern int xfs_alloc_delete(struct xfs_btree_cur *cur, int *stat); extern int xfs_alloc_get_rec(struct xfs_btree_cur *cur, xfs_agblock_t *bno, xfs_extlen_t *len, int *stat); -/* - * Insert the current record at the point referenced by cur. - * The cursor may be inconsistent on return if splits have been done. - */ -extern int xfs_alloc_insert(struct xfs_btree_cur *cur, int *stat); - extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *, struct xfs_trans *, struct xfs_buf *, diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 315bc291268..85e2e8b9cf4 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -977,7 +977,7 @@ xfs_bmap_add_extent_delay_real( goto done; XFS_WANT_CORRUPTED_GOTO(i == 0, done); cur->bc_rec.b.br_state = XFS_EXT_NORM; - if ((error = xfs_bmbt_insert(cur, &i))) + if ((error = xfs_btree_insert(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } @@ -1053,7 +1053,7 @@ xfs_bmap_add_extent_delay_real( goto done; XFS_WANT_CORRUPTED_GOTO(i == 0, done); cur->bc_rec.b.br_state = XFS_EXT_NORM; - if ((error = xfs_bmbt_insert(cur, &i))) + if ((error = xfs_btree_insert(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } @@ -1143,7 +1143,7 @@ xfs_bmap_add_extent_delay_real( goto done; XFS_WANT_CORRUPTED_GOTO(i == 0, done); cur->bc_rec.b.br_state = XFS_EXT_NORM; - if ((error = xfs_bmbt_insert(cur, &i))) + if ((error = xfs_btree_insert(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } @@ -1198,7 +1198,7 @@ xfs_bmap_add_extent_delay_real( goto done; XFS_WANT_CORRUPTED_GOTO(i == 0, done); cur->bc_rec.b.br_state = XFS_EXT_NORM; - if ((error = xfs_bmbt_insert(cur, &i))) + if ((error = xfs_btree_insert(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } @@ -1651,7 +1651,7 @@ xfs_bmap_add_extent_unwritten_real( oldext))) goto done; cur->bc_rec.b = *new; - if ((error = xfs_bmbt_insert(cur, &i))) + if ((error = xfs_btree_insert(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } @@ -1741,7 +1741,7 @@ xfs_bmap_add_extent_unwritten_real( goto done; XFS_WANT_CORRUPTED_GOTO(i == 0, done); cur->bc_rec.b.br_state = XFS_EXT_NORM; - if ((error = xfs_bmbt_insert(cur, &i))) + if ((error = xfs_btree_insert(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } @@ -1789,7 +1789,7 @@ xfs_bmap_add_extent_unwritten_real( cur->bc_rec.b = PREV; cur->bc_rec.b.br_blockcount = new->br_startoff - PREV.br_startoff; - if ((error = xfs_bmbt_insert(cur, &i))) + if ((error = xfs_btree_insert(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); /* @@ -1804,7 +1804,7 @@ xfs_bmap_add_extent_unwritten_real( XFS_WANT_CORRUPTED_GOTO(i == 0, done); /* new middle extent - newext */ cur->bc_rec.b.br_state = new->br_state; - if ((error = xfs_bmbt_insert(cur, &i))) + if ((error = xfs_btree_insert(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } @@ -2264,7 +2264,7 @@ xfs_bmap_add_extent_hole_real( goto done; XFS_WANT_CORRUPTED_GOTO(i == 0, done); cur->bc_rec.b.br_state = new->br_state; - if ((error = xfs_bmbt_insert(cur, &i))) + if ((error = xfs_btree_insert(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); } @@ -3303,7 +3303,7 @@ xfs_bmap_del_extent( if ((error = xfs_btree_increment(cur, 0, &i))) goto done; cur->bc_rec.b = new; - error = xfs_bmbt_insert(cur, &i); + error = xfs_btree_insert(cur, &i); if (error && error != ENOSPC) goto done; /* diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 204f276aeaa..2b15df32b7d 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -456,198 +456,6 @@ error0: return error; } -/* - * Insert one record/level. Return information to the caller - * allowing the next level up to proceed if necessary. - */ -STATIC int /* error */ -xfs_bmbt_insrec( - xfs_btree_cur_t *cur, - int level, - xfs_fsblock_t *bnop, - xfs_bmbt_rec_t *recp, - xfs_btree_cur_t **curp, - int *stat) /* no-go/done/continue */ -{ - xfs_bmbt_block_t *block; /* bmap btree block */ - xfs_buf_t *bp; /* buffer for block */ - int error; /* error return value */ - int i; /* loop index */ - xfs_bmbt_key_t key; /* bmap btree key */ - xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */ - int logflags; /* inode logging flags */ - xfs_fsblock_t nbno; /* new block number */ - struct xfs_btree_cur *ncur; /* new btree cursor */ - __uint64_t startoff; /* new btree key value */ - xfs_bmbt_rec_t nrec; /* new record count */ - int optr; /* old key/record index */ - xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */ - int ptr; /* key/record index */ - xfs_bmbt_rec_t *rp=NULL; /* pointer to bmap btree rec */ - int numrecs; - - ASSERT(level < cur->bc_nlevels); - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - XFS_BMBT_TRACE_ARGIFR(cur, level, *bnop, recp); - ncur = NULL; - key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(recp)); - optr = ptr = cur->bc_ptrs[level]; - if (ptr == 0) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - XFS_STATS_INC(xs_bmbt_insrec); - block = xfs_bmbt_get_block(cur, level, &bp); - numrecs = be16_to_cpu(block->bb_numrecs); -#ifdef DEBUG - if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - if (ptr <= numrecs) { - if (level == 0) { - rp = XFS_BMAP_REC_IADDR(block, ptr, cur); - xfs_btree_check_rec(XFS_BTNUM_BMAP, recp, rp); - } else { - kp = XFS_BMAP_KEY_IADDR(block, ptr, cur); - xfs_btree_check_key(XFS_BTNUM_BMAP, &key, kp); - } - } -#endif - nbno = NULLFSBLOCK; - if (numrecs == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { - if (numrecs < XFS_BMAP_BLOCK_DMAXRECS(level, cur)) { - /* - * A root block, that can be made bigger. - */ - xfs_iroot_realloc(cur->bc_private.b.ip, 1, - cur->bc_private.b.whichfork); - block = xfs_bmbt_get_block(cur, level, &bp); - } else if (level == cur->bc_nlevels - 1) { - if ((error = xfs_btree_new_iroot(cur, &logflags, stat)) || - *stat == 0) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, - logflags); - block = xfs_bmbt_get_block(cur, level, &bp); - } else { - if ((error = xfs_btree_rshift(cur, level, &i))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - if (i) { - /* nothing */ - } else { - if ((error = xfs_btree_lshift(cur, level, &i))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - if (i) { - optr = ptr = cur->bc_ptrs[level]; - } else { - union xfs_btree_ptr bno = { .l = cpu_to_be64(nbno) }; - union xfs_btree_key skey; - if ((error = xfs_btree_split(cur, level, - &bno, &skey, &ncur, - &i))) { - XFS_BMBT_TRACE_CURSOR(cur, - ERROR); - return error; - } - nbno = be64_to_cpu(bno.l); - startoff = be64_to_cpu(skey.bmbt.br_startoff); - if (i) { - block = xfs_bmbt_get_block( - cur, level, &bp); -#ifdef DEBUG - if ((error = - xfs_btree_check_lblock(cur, - block, level, bp))) { - XFS_BMBT_TRACE_CURSOR( - cur, ERROR); - return error; - } -#endif - ptr = cur->bc_ptrs[level]; - xfs_bmbt_disk_set_allf(&nrec, - startoff, 0, 0, - XFS_EXT_NORM); - } else { - XFS_BMBT_TRACE_CURSOR(cur, - EXIT); - *stat = 0; - return 0; - } - } - } - } - } - numrecs = be16_to_cpu(block->bb_numrecs); - if (level > 0) { - kp = XFS_BMAP_KEY_IADDR(block, 1, cur); - pp = XFS_BMAP_PTR_IADDR(block, 1, cur); -#ifdef DEBUG - for (i = numrecs; i >= ptr; i--) { - if ((error = xfs_btree_check_lptr_disk(cur, pp[i - 1], - level))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - } -#endif - memmove(&kp[ptr], &kp[ptr - 1], - (numrecs - ptr + 1) * sizeof(*kp)); - memmove(&pp[ptr], &pp[ptr - 1], - (numrecs - ptr + 1) * sizeof(*pp)); -#ifdef DEBUG - if ((error = xfs_btree_check_lptr(cur, *bnop, level))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } -#endif - kp[ptr - 1] = key; - pp[ptr - 1] = cpu_to_be64(*bnop); - numrecs++; - block->bb_numrecs = cpu_to_be16(numrecs); - xfs_bmbt_log_keys(cur, bp, ptr, numrecs); - xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs); - } else { - rp = XFS_BMAP_REC_IADDR(block, 1, cur); - memmove(&rp[ptr], &rp[ptr - 1], - (numrecs - ptr + 1) * sizeof(*rp)); - rp[ptr - 1] = *recp; - numrecs++; - block->bb_numrecs = cpu_to_be16(numrecs); - xfs_bmbt_log_recs(cur, bp, ptr, numrecs); - } - xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS); -#ifdef DEBUG - if (ptr < numrecs) { - if (level == 0) - xfs_btree_check_rec(XFS_BTNUM_BMAP, rp + ptr - 1, - rp + ptr); - else - xfs_btree_check_key(XFS_BTNUM_BMAP, kp + ptr - 1, - kp + ptr); - } -#endif - if (optr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, level + 1))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - *bnop = nbno; - if (nbno != NULLFSBLOCK) { - *recp = nrec; - *curp = ncur; - } - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; -} - STATIC int xfs_bmbt_killroot( xfs_btree_cur_t *cur) @@ -1059,67 +867,6 @@ xfs_bmbt_disk_get_startoff( XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; } -/* - * Insert the current record at the point referenced by cur. - * - * A multi-level split of the tree on insert will invalidate the original - * cursor. All callers of this function should assume that the cursor is - * no longer valid and revalidate it. - */ -int /* error */ -xfs_bmbt_insert( - xfs_btree_cur_t *cur, - int *stat) /* success/failure */ -{ - int error; /* error return value */ - int i; - int level; - xfs_fsblock_t nbno; - xfs_btree_cur_t *ncur; - xfs_bmbt_rec_t nrec; - xfs_btree_cur_t *pcur; - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - level = 0; - nbno = NULLFSBLOCK; - xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b); - ncur = NULL; - pcur = cur; - do { - if ((error = xfs_bmbt_insrec(pcur, level++, &nbno, &nrec, &ncur, - &i))) { - if (pcur != cur) - xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if (pcur != cur && (ncur || nbno == NULLFSBLOCK)) { - cur->bc_nlevels = pcur->bc_nlevels; - cur->bc_private.b.allocated += - pcur->bc_private.b.allocated; - pcur->bc_private.b.allocated = 0; - ASSERT((cur->bc_private.b.firstblock != NULLFSBLOCK) || - XFS_IS_REALTIME_INODE(cur->bc_private.b.ip)); - cur->bc_private.b.firstblock = - pcur->bc_private.b.firstblock; - ASSERT(cur->bc_private.b.flist == - pcur->bc_private.b.flist); - xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); - } - if (ncur) { - pcur = ncur; - ncur = NULL; - } - } while (nbno != NULLFSBLOCK); - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = i; - return 0; -error0: - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; -} - /* * Log fields from the btree block header. */ @@ -1450,6 +1197,21 @@ xfs_bmbt_dup_cursor( return new; } +STATIC void +xfs_bmbt_update_cursor( + struct xfs_btree_cur *src, + struct xfs_btree_cur *dst) +{ + ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) || + (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME)); + ASSERT(dst->bc_private.b.flist == src->bc_private.b.flist); + + dst->bc_private.b.allocated += src->bc_private.b.allocated; + dst->bc_private.b.firstblock = src->bc_private.b.firstblock; + + src->bc_private.b.allocated = 0; +} + STATIC int xfs_bmbt_alloc_block( struct xfs_btree_cur *cur, @@ -1544,6 +1306,23 @@ xfs_bmbt_get_maxrecs( return XFS_BMAP_BLOCK_IMAXRECS(level, cur); } +/* + * Get the maximum records we could store in the on-disk format. + * + * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but + * for the root node this checks the available space in the dinode fork + * so that we can resize the in-memory buffer to match it. After a + * resize to the maximum size this function returns the same value + * as xfs_bmbt_get_maxrecs for the root node, too. + */ +STATIC int +xfs_bmbt_get_dmaxrecs( + struct xfs_btree_cur *cur, + int level) +{ + return XFS_BMAP_BLOCK_DMAXRECS(level, cur); +} + STATIC void xfs_bmbt_init_key_from_rec( union xfs_btree_key *key, @@ -1553,6 +1332,25 @@ xfs_bmbt_init_key_from_rec( cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt)); } +STATIC void +xfs_bmbt_init_rec_from_key( + union xfs_btree_key *key, + union xfs_btree_rec *rec) +{ + ASSERT(key->bmbt.br_startoff != 0); + + xfs_bmbt_disk_set_allf(&rec->bmbt, be64_to_cpu(key->bmbt.br_startoff), + 0, 0, XFS_EXT_NORM); +} + +STATIC void +xfs_bmbt_init_rec_from_cur( + struct xfs_btree_cur *cur, + union xfs_btree_rec *rec) +{ + xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b); +} + STATIC void xfs_bmbt_init_ptr_from_cur( struct xfs_btree_cur *cur, @@ -1660,9 +1458,13 @@ static const struct xfs_btree_ops xfs_bmbt_ops = { .key_len = sizeof(xfs_bmbt_key_t), .dup_cursor = xfs_bmbt_dup_cursor, + .update_cursor = xfs_bmbt_update_cursor, .alloc_block = xfs_bmbt_alloc_block, .get_maxrecs = xfs_bmbt_get_maxrecs, + .get_dmaxrecs = xfs_bmbt_get_dmaxrecs, .init_key_from_rec = xfs_bmbt_init_key_from_rec, + .init_rec_from_key = xfs_bmbt_init_rec_from_key, + .init_rec_from_cur = xfs_bmbt_init_rec_from_cur, .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur, .key_diff = xfs_bmbt_key_diff, diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 26fd8ace3e7..703fe2e3434 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -250,7 +250,6 @@ extern void xfs_bmbt_disk_get_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s); extern xfs_filblks_t xfs_bmbt_disk_get_blockcount(xfs_bmbt_rec_t *r); extern xfs_fileoff_t xfs_bmbt_disk_get_startoff(xfs_bmbt_rec_t *r); -extern int xfs_bmbt_insert(struct xfs_btree_cur *, int *); extern void xfs_bmbt_log_block(struct xfs_btree_cur *, struct xfs_buf *, int); extern void xfs_bmbt_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int, int); diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 3b6e01dea66..36477aae77d 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -963,6 +963,17 @@ xfs_btree_ptr_is_null( return be32_to_cpu(ptr->s) == NULLAGBLOCK; } +STATIC void +xfs_btree_set_ptr_null( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr) +{ + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) + ptr->l = cpu_to_be64(NULLFSBLOCK); + else + ptr->s = cpu_to_be32(NULLAGBLOCK); +} + /* * Get/set/init sibling pointers */ @@ -2697,3 +2708,354 @@ out0: *stat = 0; return 0; } + +STATIC int +xfs_btree_make_block_unfull( + struct xfs_btree_cur *cur, /* btree cursor */ + int level, /* btree level */ + int numrecs,/* # of recs in block */ + int *oindex,/* old tree index */ + int *index, /* new tree index */ + union xfs_btree_ptr *nptr, /* new btree ptr */ + struct xfs_btree_cur **ncur, /* new btree cursor */ + union xfs_btree_rec *nrec, /* new record */ + int *stat) +{ + union xfs_btree_key key; /* new btree key value */ + int error = 0; + + if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && + level == cur->bc_nlevels - 1) { + struct xfs_inode *ip = cur->bc_private.b.ip; + + if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) { + /* A root block that can be made bigger. */ + + xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork); + } else { + /* A root block that needs replacing */ + int logflags = 0; + + error = xfs_btree_new_iroot(cur, &logflags, stat); + if (error || *stat == 0) + return error; + + xfs_trans_log_inode(cur->bc_tp, ip, logflags); + } + + return 0; + } + + /* First, try shifting an entry to the right neighbor. */ + error = xfs_btree_rshift(cur, level, stat); + if (error || *stat) + return error; + + /* Next, try shifting an entry to the left neighbor. */ + error = xfs_btree_lshift(cur, level, stat); + if (error) + return error; + + if (*stat) { + *oindex = *index = cur->bc_ptrs[level]; + return 0; + } + + /* + * Next, try splitting the current block in half. + * + * If this works we have to re-set our variables because we + * could be in a different block now. + */ + error = xfs_btree_split(cur, level, nptr, &key, ncur, stat); + if (error || *stat == 0) + return error; + + + *index = cur->bc_ptrs[level]; + cur->bc_ops->init_rec_from_key(&key, nrec); + return 0; +} + +/* + * Insert one record/level. Return information to the caller + * allowing the next level up to proceed if necessary. + */ +STATIC int +xfs_btree_insrec( + struct xfs_btree_cur *cur, /* btree cursor */ + int level, /* level to insert record at */ + union xfs_btree_ptr *ptrp, /* i/o: block number inserted */ + union xfs_btree_rec *recp, /* i/o: record data inserted */ + struct xfs_btree_cur **curp, /* output: new cursor replacing cur */ + int *stat) /* success/failure */ +{ + struct xfs_btree_block *block; /* btree block */ + struct xfs_buf *bp; /* buffer for block */ + union xfs_btree_key key; /* btree key */ + union xfs_btree_ptr nptr; /* new block ptr */ + struct xfs_btree_cur *ncur; /* new btree cursor */ + union xfs_btree_rec nrec; /* new record count */ + int optr; /* old key/record index */ + int ptr; /* key/record index */ + int numrecs;/* number of records */ + int error; /* error return value */ +#ifdef DEBUG + int i; +#endif + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, recp); + + ncur = NULL; + + /* + * If we have an external root pointer, and we've made it to the + * root level, allocate a new root block and we're done. + */ + if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && + (level >= cur->bc_nlevels)) { + error = xfs_btree_new_root(cur, stat); + xfs_btree_set_ptr_null(cur, ptrp); + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + return error; + } + + /* If we're off the left edge, return failure. */ + ptr = cur->bc_ptrs[level]; + if (ptr == 0) { + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 0; + return 0; + } + + /* Make a key out of the record data to be inserted, and save it. */ + cur->bc_ops->init_key_from_rec(&key, recp); + + optr = ptr; + + XFS_BTREE_STATS_INC(cur, insrec); + + /* Get pointers to the btree buffer and block. */ + block = xfs_btree_get_block(cur, level, &bp); + numrecs = xfs_btree_get_numrecs(block); + +#ifdef DEBUG + error = xfs_btree_check_block(cur, block, level, bp); + if (error) + goto error0; + + /* Check that the new entry is being inserted in the right place. */ + if (ptr <= numrecs) { + if (level == 0) { + xfs_btree_check_rec(cur->bc_btnum, recp, + xfs_btree_rec_addr(cur, ptr, block)); + } else { + xfs_btree_check_key(cur->bc_btnum, &key, + xfs_btree_key_addr(cur, ptr, block)); + } + } +#endif + + /* + * If the block is full, we can't insert the new entry until we + * make the block un-full. + */ + xfs_btree_set_ptr_null(cur, &nptr); + if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) { + error = xfs_btree_make_block_unfull(cur, level, numrecs, + &optr, &ptr, &nptr, &ncur, &nrec, stat); + if (error || *stat == 0) + goto error0; + } + + /* + * The current block may have changed if the block was + * previously full and we have just made space in it. + */ + block = xfs_btree_get_block(cur, level, &bp); + numrecs = xfs_btree_get_numrecs(block); + +#ifdef DEBUG + error = xfs_btree_check_block(cur, block, level, bp); + if (error) + return error; +#endif + + /* + * At this point we know there's room for our new entry in the block + * we're pointing at. + */ + XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1); + + if (level > 0) { + /* It's a nonleaf. make a hole in the keys and ptrs */ + union xfs_btree_key *kp; + union xfs_btree_ptr *pp; + + kp = xfs_btree_key_addr(cur, ptr, block); + pp = xfs_btree_ptr_addr(cur, ptr, block); + +#ifdef DEBUG + for (i = numrecs - ptr; i >= 0; i--) { + error = xfs_btree_check_ptr(cur, pp, i, level); + if (error) + return error; + } +#endif + + xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1); + xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1); + +#ifdef DEBUG + error = xfs_btree_check_ptr(cur, ptrp, 0, level); + if (error) + goto error0; +#endif + + /* Now put the new data in, bump numrecs and log it. */ + xfs_btree_copy_keys(cur, kp, &key, 1); + xfs_btree_copy_ptrs(cur, pp, ptrp, 1); + numrecs++; + xfs_btree_set_numrecs(block, numrecs); + xfs_btree_log_ptrs(cur, bp, ptr, numrecs); + xfs_btree_log_keys(cur, bp, ptr, numrecs); +#ifdef DEBUG + if (ptr < numrecs) { + xfs_btree_check_key(cur->bc_btnum, kp, + xfs_btree_key_addr(cur, ptr + 1, block)); + } +#endif + } else { + /* It's a leaf. make a hole in the records */ + union xfs_btree_rec *rp; + + rp = xfs_btree_rec_addr(cur, ptr, block); + + xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1); + + /* Now put the new data in, bump numrecs and log it. */ + xfs_btree_copy_recs(cur, rp, recp, 1); + xfs_btree_set_numrecs(block, ++numrecs); + xfs_btree_log_recs(cur, bp, ptr, numrecs); +#ifdef DEBUG + if (ptr < numrecs) { + xfs_btree_check_rec(cur->bc_btnum, rp, + xfs_btree_rec_addr(cur, ptr + 1, block)); + } +#endif + } + + /* Log the new number of records in the btree header. */ + xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); + + /* If we inserted at the start of a block, update the parents' keys. */ + if (optr == 1) { + error = xfs_btree_updkey(cur, &key, level + 1); + if (error) + goto error0; + } + + /* + * If we are tracking the last record in the tree and + * we are at the far right edge of the tree, update it. + */ + if (xfs_btree_is_lastrec(cur, block, level)) { + cur->bc_ops->update_lastrec(cur, block, recp, + ptr, LASTREC_INSREC); + } + + /* + * Return the new block number, if any. + * If there is one, give back a record value and a cursor too. + */ + *ptrp = nptr; + if (!xfs_btree_ptr_is_null(cur, &nptr)) { + *recp = nrec; + *curp = ncur; + } + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 1; + return 0; + +error0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; +} + +/* + * Insert the record at the point referenced by cur. + * + * A multi-level split of the tree on insert will invalidate the original + * cursor. All callers of this function should assume that the cursor is + * no longer valid and revalidate it. + */ +int +xfs_btree_insert( + struct xfs_btree_cur *cur, + int *stat) +{ + int error; /* error return value */ + int i; /* result value, 0 for failure */ + int level; /* current level number in btree */ + union xfs_btree_ptr nptr; /* new block number (split result) */ + struct xfs_btree_cur *ncur; /* new cursor (split result) */ + struct xfs_btree_cur *pcur; /* previous level's cursor */ + union xfs_btree_rec rec; /* record to insert */ + + level = 0; + ncur = NULL; + pcur = cur; + + xfs_btree_set_ptr_null(cur, &nptr); + cur->bc_ops->init_rec_from_cur(cur, &rec); + + /* + * Loop going up the tree, starting at the leaf level. + * Stop when we don't get a split block, that must mean that + * the insert is finished with this level. + */ + do { + /* + * Insert nrec/nptr into this level of the tree. + * Note if we fail, nptr will be null. + */ + error = xfs_btree_insrec(pcur, level, &nptr, &rec, &ncur, &i); + if (error) { + if (pcur != cur) + xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); + goto error0; + } + + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + level++; + + /* + * See if the cursor we just used is trash. + * Can't trash the caller's cursor, but otherwise we should + * if ncur is a new cursor or we're about to be done. + */ + if (pcur != cur && + (ncur || xfs_btree_ptr_is_null(cur, &nptr))) { + /* Save the state from the cursor before we trash it */ + if (cur->bc_ops->update_cursor) + cur->bc_ops->update_cursor(pcur, cur); + cur->bc_nlevels = pcur->bc_nlevels; + xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); + } + /* If we got a new cursor, switch to it. */ + if (ncur) { + pcur = ncur; + ncur = NULL; + } + } while (!xfs_btree_ptr_is_null(cur, &nptr)); + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = i; + return 0; +error0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; +} diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 21eec863f00..6f03871f599 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -186,6 +186,8 @@ struct xfs_btree_ops { /* cursor operations */ struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *); + void (*update_cursor)(struct xfs_btree_cur *src, + struct xfs_btree_cur *dst); /* update btree root pointer */ void (*set_root)(struct xfs_btree_cur *cur, @@ -206,9 +208,16 @@ struct xfs_btree_ops { /* records in block/level */ int (*get_maxrecs)(struct xfs_btree_cur *cur, int level); + /* records on disk. Matter for the root in inode case. */ + int (*get_dmaxrecs)(struct xfs_btree_cur *cur, int level); + /* init values of btree structures */ void (*init_key_from_rec)(union xfs_btree_key *key, union xfs_btree_rec *rec); + void (*init_rec_from_key)(union xfs_btree_key *key, + union xfs_btree_rec *rec); + void (*init_rec_from_cur)(struct xfs_btree_cur *cur, + union xfs_btree_rec *rec); void (*init_ptr_from_cur)(struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr); @@ -240,6 +249,7 @@ struct xfs_btree_ops { * Reasons for the update_lastrec method to be called. */ #define LASTREC_UPDATE 0 +#define LASTREC_INSREC 1 /* @@ -549,6 +559,7 @@ int xfs_btree_split(struct xfs_btree_cur *, int, union xfs_btree_ptr *, union xfs_btree_key *, struct xfs_btree_cur **, int *); int xfs_btree_new_root(struct xfs_btree_cur *, int *); int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *); +int xfs_btree_insert(struct xfs_btree_cur *, int *); /* * Helpers. diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 138651afd44..b68e73bb17c 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -418,7 +418,7 @@ xfs_ialloc_ag_alloc( return error; } ASSERT(i == 0); - if ((error = xfs_inobt_insert(cur, &i))) { + if ((error = xfs_btree_insert(cur, &i))) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 7ba3c7bb398..8f66e272056 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -514,228 +514,6 @@ error0: return error; } -/* - * Insert one record/level. Return information to the caller - * allowing the next level up to proceed if necessary. - */ -STATIC int /* error */ -xfs_inobt_insrec( - xfs_btree_cur_t *cur, /* btree cursor */ - int level, /* level to insert record at */ - xfs_agblock_t *bnop, /* i/o: block number inserted */ - xfs_inobt_rec_t *recp, /* i/o: record data inserted */ - xfs_btree_cur_t **curp, /* output: new cursor replacing cur */ - int *stat) /* success/failure */ -{ - xfs_inobt_block_t *block; /* btree block record/key lives in */ - xfs_buf_t *bp; /* buffer for block */ - int error; /* error return value */ - int i; /* loop index */ - xfs_inobt_key_t key; /* key value being inserted */ - xfs_inobt_key_t *kp=NULL; /* pointer to btree keys */ - xfs_agblock_t nbno; /* block number of allocated block */ - xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */ - xfs_inobt_key_t nkey; /* new key value, from split */ - xfs_inobt_rec_t nrec; /* new record value, for caller */ - int numrecs; - int optr; /* old ptr value */ - xfs_inobt_ptr_t *pp; /* pointer to btree addresses */ - int ptr; /* index in btree block for this rec */ - xfs_inobt_rec_t *rp=NULL; /* pointer to btree records */ - - /* - * GCC doesn't understand the (arguably complex) control flow in - * this function and complains about uninitialized structure fields - * without this. - */ - memset(&nrec, 0, sizeof(nrec)); - - /* - * If we made it to the root level, allocate a new root block - * and we're done. - */ - if (level >= cur->bc_nlevels) { - error = xfs_btree_new_root(cur, &i); - *bnop = NULLAGBLOCK; - *stat = i; - return error; - } - /* - * Make a key out of the record data to be inserted, and save it. - */ - key.ir_startino = recp->ir_startino; - optr = ptr = cur->bc_ptrs[level]; - /* - * If we're off the left edge, return failure. - */ - if (ptr == 0) { - *stat = 0; - return 0; - } - /* - * Get pointers to the btree buffer and block. - */ - bp = cur->bc_bufs[level]; - block = XFS_BUF_TO_INOBT_BLOCK(bp); - numrecs = be16_to_cpu(block->bb_numrecs); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, level, bp))) - return error; - /* - * Check that the new entry is being inserted in the right place. - */ - if (ptr <= numrecs) { - if (level == 0) { - rp = XFS_INOBT_REC_ADDR(block, ptr, cur); - xfs_btree_check_rec(cur->bc_btnum, recp, rp); - } else { - kp = XFS_INOBT_KEY_ADDR(block, ptr, cur); - xfs_btree_check_key(cur->bc_btnum, &key, kp); - } - } -#endif - nbno = NULLAGBLOCK; - ncur = NULL; - /* - * If the block is full, we can't insert the new entry until we - * make the block un-full. - */ - if (numrecs == XFS_INOBT_BLOCK_MAXRECS(level, cur)) { - /* - * First, try shifting an entry to the right neighbor. - */ - if ((error = xfs_btree_rshift(cur, level, &i))) - return error; - if (i) { - /* nothing */ - } - /* - * Next, try shifting an entry to the left neighbor. - */ - else { - if ((error = xfs_btree_lshift(cur, level, &i))) - return error; - if (i) { - optr = ptr = cur->bc_ptrs[level]; - } else { - union xfs_btree_ptr bno = { .s = cpu_to_be32(nbno) }; - /* - * Next, try splitting the current block - * in half. If this works we have to - * re-set our variables because - * we could be in a different block now. - */ - if ((error = xfs_btree_split(cur, level, &bno, - (union xfs_btree_key *)&nkey, - &ncur, &i))) - return error; - nbno = be32_to_cpu(bno.s); - if (i) { - bp = cur->bc_bufs[level]; - block = XFS_BUF_TO_INOBT_BLOCK(bp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, - block, level, bp))) - return error; -#endif - ptr = cur->bc_ptrs[level]; - nrec.ir_startino = nkey.ir_startino; - } else { - /* - * Otherwise the insert fails. - */ - *stat = 0; - return 0; - } - } - } - } - /* - * At this point we know there's room for our new entry in the block - * we're pointing at. - */ - numrecs = be16_to_cpu(block->bb_numrecs); - if (level > 0) { - /* - * It's a non-leaf entry. Make a hole for the new data - * in the key and ptr regions of the block. - */ - kp = XFS_INOBT_KEY_ADDR(block, 1, cur); - pp = XFS_INOBT_PTR_ADDR(block, 1, cur); -#ifdef DEBUG - for (i = numrecs; i >= ptr; i--) { - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level))) - return error; - } -#endif - memmove(&kp[ptr], &kp[ptr - 1], - (numrecs - ptr + 1) * sizeof(*kp)); - memmove(&pp[ptr], &pp[ptr - 1], - (numrecs - ptr + 1) * sizeof(*pp)); - /* - * Now stuff the new data in, bump numrecs and log the new data. - */ -#ifdef DEBUG - if ((error = xfs_btree_check_sptr(cur, *bnop, level))) - return error; -#endif - kp[ptr - 1] = key; - pp[ptr - 1] = cpu_to_be32(*bnop); - numrecs++; - block->bb_numrecs = cpu_to_be16(numrecs); - xfs_inobt_log_keys(cur, bp, ptr, numrecs); - xfs_inobt_log_ptrs(cur, bp, ptr, numrecs); - } else { - /* - * It's a leaf entry. Make a hole for the new record. - */ - rp = XFS_INOBT_REC_ADDR(block, 1, cur); - memmove(&rp[ptr], &rp[ptr - 1], - (numrecs - ptr + 1) * sizeof(*rp)); - /* - * Now stuff the new record in, bump numrecs - * and log the new data. - */ - rp[ptr - 1] = *recp; - numrecs++; - block->bb_numrecs = cpu_to_be16(numrecs); - xfs_inobt_log_recs(cur, bp, ptr, numrecs); - } - /* - * Log the new number of records in the btree header. - */ - xfs_inobt_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); -#ifdef DEBUG - /* - * Check that the key/record is in the right place, now. - */ - if (ptr < numrecs) { - if (level == 0) - xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1, - rp + ptr); - else - xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1, - kp + ptr); - } -#endif - /* - * If we inserted at the start of a block, update the parents' keys. - */ - if (optr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, level + 1))) - return error; - /* - * Return the new block number, if any. - * If there is one, give back a record value and a cursor too. - */ - *bnop = nbno; - if (nbno != NULLAGBLOCK) { - *recp = nrec; - *curp = ncur; - } - *stat = 1; - return 0; -} - /* * Log header fields from a btree block. */ @@ -912,66 +690,6 @@ xfs_inobt_get_rec( return 0; } -/* - * Insert the current record at the point referenced by cur. - * The cursor may be inconsistent on return if splits have been done. - */ -int /* error */ -xfs_inobt_insert( - xfs_btree_cur_t *cur, /* btree cursor */ - int *stat) /* success/failure */ -{ - int error; /* error return value */ - int i; /* result value, 0 for failure */ - int level; /* current level number in btree */ - xfs_agblock_t nbno; /* new block number (split result) */ - xfs_btree_cur_t *ncur; /* new cursor (split result) */ - xfs_inobt_rec_t nrec; /* record being inserted this level */ - xfs_btree_cur_t *pcur; /* previous level's cursor */ - - level = 0; - nbno = NULLAGBLOCK; - nrec.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino); - nrec.ir_freecount = cpu_to_be32(cur->bc_rec.i.ir_freecount); - nrec.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free); - ncur = NULL; - pcur = cur; - /* - * Loop going up the tree, starting at the leaf level. - * Stop when we don't get a split block, that must mean that - * the insert is finished with this level. - */ - do { - /* - * Insert nrec/nbno into this level of the tree. - * Note if we fail, nbno will be null. - */ - if ((error = xfs_inobt_insrec(pcur, level++, &nbno, &nrec, &ncur, - &i))) { - if (pcur != cur) - xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR); - return error; - } - /* - * See if the cursor we just used is trash. - * Can't trash the caller's cursor, but otherwise we should - * if ncur is a new cursor or we're about to be done. - */ - if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) { - cur->bc_nlevels = pcur->bc_nlevels; - xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR); - } - /* - * If we got a new cursor, switch to it. - */ - if (ncur) { - pcur = ncur; - ncur = NULL; - } - } while (nbno != NULLAGBLOCK); - *stat = i; - return 0; -} STATIC struct xfs_btree_cur * xfs_inobt_dup_cursor( @@ -1053,6 +771,24 @@ xfs_inobt_init_key_from_rec( key->inobt.ir_startino = rec->inobt.ir_startino; } +STATIC void +xfs_inobt_init_rec_from_key( + union xfs_btree_key *key, + union xfs_btree_rec *rec) +{ + rec->inobt.ir_startino = key->inobt.ir_startino; +} + +STATIC void +xfs_inobt_init_rec_from_cur( + struct xfs_btree_cur *cur, + union xfs_btree_rec *rec) +{ + rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino); + rec->inobt.ir_freecount = cpu_to_be32(cur->bc_rec.i.ir_freecount); + rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free); +} + /* * intial value of ptr for lookup */ @@ -1152,6 +888,8 @@ static const struct xfs_btree_ops xfs_inobt_ops = { .alloc_block = xfs_inobt_alloc_block, .get_maxrecs = xfs_inobt_get_maxrecs, .init_key_from_rec = xfs_inobt_init_key_from_rec, + .init_rec_from_key = xfs_inobt_init_rec_from_key, + .init_rec_from_cur = xfs_inobt_init_rec_from_cur, .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, .key_diff = xfs_inobt_key_diff, diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h index 7f77549e82a..c9cbc4f2168 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/xfs_ialloc_btree.h @@ -129,12 +129,6 @@ extern int xfs_inobt_delete(struct xfs_btree_cur *cur, int *stat); extern int xfs_inobt_get_rec(struct xfs_btree_cur *cur, xfs_agino_t *ino, __int32_t *fcnt, xfs_inofree_t *free, int *stat); -/* - * Insert the current record at the point referenced by cur. - * The cursor may be inconsistent on return if splits have been done. - */ -extern int xfs_inobt_insert(struct xfs_btree_cur *cur, int *stat); - extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *, struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t); -- cgit v1.2.3-70-g09d2 From d4b3a4b7dd62f2e111d4d0afa9ef3f9b6cd955c0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:57:51 +1100 Subject: [XFS] move xfs_bmbt_killroot to common code xfs_bmbt_killroot is a mostly generic implementation of moving from a real block based root to an inode based root. So move it to xfs_btree.c where it can use all the nice infrastructure there and make it pointer size agnostic The new name for it is xfs_btree_kill_iroot, following the old naming but making it clear we're dealing with the root in inode case here, and to avoid confusion with xfs_btree_new_root which is used for the not inode rooted case. I've also added a comment describing what it does and why it's named the way it is. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32203a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc_btree.c | 32 +++++++++++++ fs/xfs/xfs_bmap_btree.c | 116 +++++++++------------------------------------- fs/xfs/xfs_btree.c | 112 ++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 2 + fs/xfs/xfs_ialloc_btree.c | 17 +++++++ 5 files changed, 185 insertions(+), 94 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 818adca77fc..f124ddd91c0 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -834,6 +834,37 @@ xfs_allocbt_alloc_block( return 0; } +STATIC int +xfs_allocbt_free_block( + struct xfs_btree_cur *cur, + struct xfs_buf *bp) +{ + struct xfs_buf *agbp = cur->bc_private.a.agbp; + struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); + xfs_agblock_t bno; + int error; + + bno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(bp)); + error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1); + if (error) + return error; + + /* + * Since blocks move to the free list without the coordination used in + * xfs_bmap_finish, we can't allow block to be available for + * reallocation and non-transaction writing (user data) until we know + * that the transaction that moved it to the free list is permanently + * on disk. We track the blocks by declaring these blocks as "busy"; + * the busy list is maintained on a per-ag basis and each transaction + * records which entries should be removed when the iclog commits to + * disk. If a busy block is allocated, the iclog is pushed up to the + * LSN that freed the block. + */ + xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1); + xfs_trans_agbtree_delta(cur->bc_tp, -1); + return 0; +} + /* * Update the longest extent in the AGF */ @@ -1025,6 +1056,7 @@ static const struct xfs_btree_ops xfs_allocbt_ops = { .dup_cursor = xfs_allocbt_dup_cursor, .set_root = xfs_allocbt_set_root, .alloc_block = xfs_allocbt_alloc_block, + .free_block = xfs_allocbt_free_block, .update_lastrec = xfs_allocbt_update_lastrec, .get_maxrecs = xfs_allocbt_get_maxrecs, .init_key_from_rec = xfs_allocbt_init_key_from_rec, diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 2b15df32b7d..6b7774ebc26 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -49,7 +49,6 @@ */ -STATIC int xfs_bmbt_killroot(xfs_btree_cur_t *); STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); @@ -194,7 +193,7 @@ xfs_bmbt_delrec( if (level == cur->bc_nlevels - 1) { xfs_iroot_realloc(cur->bc_private.b.ip, -1, cur->bc_private.b.whichfork); - if ((error = xfs_bmbt_killroot(cur))) { + if ((error = xfs_btree_kill_iroot(cur))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } @@ -228,7 +227,7 @@ xfs_bmbt_delrec( */ if (lbno == NULLFSBLOCK && rbno == NULLFSBLOCK && level == cur->bc_nlevels - 2) { - if ((error = xfs_bmbt_killroot(cur))) { + if ((error = xfs_btree_kill_iroot(cur))) { XFS_BMBT_TRACE_CURSOR(cur, ERROR); goto error0; } @@ -456,97 +455,6 @@ error0: return error; } -STATIC int -xfs_bmbt_killroot( - xfs_btree_cur_t *cur) -{ - xfs_bmbt_block_t *block; - xfs_bmbt_block_t *cblock; - xfs_buf_t *cbp; - xfs_bmbt_key_t *ckp; - xfs_bmbt_ptr_t *cpp; -#ifdef DEBUG - int error; -#endif - int i; - xfs_bmbt_key_t *kp; - xfs_inode_t *ip; - xfs_ifork_t *ifp; - int level; - xfs_bmbt_ptr_t *pp; - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - level = cur->bc_nlevels - 1; - ASSERT(level >= 1); - /* - * Don't deal with the root block needs to be a leaf case. - * We're just going to turn the thing back into extents anyway. - */ - if (level == 1) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - return 0; - } - block = xfs_bmbt_get_block(cur, level, &cbp); - /* - * Give up if the root has multiple children. - */ - if (be16_to_cpu(block->bb_numrecs) != 1) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - return 0; - } - /* - * Only do this if the next level will fit. - * Then the data must be copied up to the inode, - * instead of freeing the root you free the next level. - */ - cbp = cur->bc_bufs[level - 1]; - cblock = XFS_BUF_TO_BMBT_BLOCK(cbp); - if (be16_to_cpu(cblock->bb_numrecs) > XFS_BMAP_BLOCK_DMAXRECS(level, cur)) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - return 0; - } - ASSERT(be64_to_cpu(cblock->bb_leftsib) == NULLDFSBNO); - ASSERT(be64_to_cpu(cblock->bb_rightsib) == NULLDFSBNO); - ip = cur->bc_private.b.ip; - ifp = XFS_IFORK_PTR(ip, cur->bc_private.b.whichfork); - ASSERT(XFS_BMAP_BLOCK_IMAXRECS(level, cur) == - XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes)); - i = (int)(be16_to_cpu(cblock->bb_numrecs) - XFS_BMAP_BLOCK_IMAXRECS(level, cur)); - if (i) { - xfs_iroot_realloc(ip, i, cur->bc_private.b.whichfork); - block = ifp->if_broot; - } - be16_add_cpu(&block->bb_numrecs, i); - ASSERT(block->bb_numrecs == cblock->bb_numrecs); - kp = XFS_BMAP_KEY_IADDR(block, 1, cur); - ckp = XFS_BMAP_KEY_IADDR(cblock, 1, cur); - memcpy(kp, ckp, be16_to_cpu(block->bb_numrecs) * sizeof(*kp)); - pp = XFS_BMAP_PTR_IADDR(block, 1, cur); - cpp = XFS_BMAP_PTR_IADDR(cblock, 1, cur); -#ifdef DEBUG - for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) { - if ((error = xfs_btree_check_lptr_disk(cur, cpp[i], level - 1))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - } -#endif - memcpy(pp, cpp, be16_to_cpu(block->bb_numrecs) * sizeof(*pp)); - xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1, - cur->bc_private.b.flist, cur->bc_mp); - ip->i_d.di_nblocks--; - XFS_TRANS_MOD_DQUOT_BYINO(cur->bc_mp, cur->bc_tp, ip, - XFS_TRANS_DQ_BCOUNT, -1L); - xfs_trans_binval(cur->bc_tp, cbp); - cur->bc_bufs[level - 1] = NULL; - be16_add_cpu(&block->bb_level, -1); - xfs_trans_log_inode(cur->bc_tp, ip, - XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); - cur->bc_nlevels--; - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - return 0; -} - /* * Log key values from the btree block. */ @@ -1298,6 +1206,25 @@ xfs_bmbt_alloc_block( return error; } +STATIC int +xfs_bmbt_free_block( + struct xfs_btree_cur *cur, + struct xfs_buf *bp) +{ + struct xfs_mount *mp = cur->bc_mp; + struct xfs_inode *ip = cur->bc_private.b.ip; + struct xfs_trans *tp = cur->bc_tp; + xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp)); + + xfs_bmap_add_free(fsbno, 1, cur->bc_private.b.flist, mp); + ip->i_d.di_nblocks--; + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); + xfs_trans_binval(tp, bp); + return 0; +} + STATIC int xfs_bmbt_get_maxrecs( struct xfs_btree_cur *cur, @@ -1460,6 +1387,7 @@ static const struct xfs_btree_ops xfs_bmbt_ops = { .dup_cursor = xfs_bmbt_dup_cursor, .update_cursor = xfs_bmbt_update_cursor, .alloc_block = xfs_bmbt_alloc_block, + .free_block = xfs_bmbt_free_block, .get_maxrecs = xfs_bmbt_get_maxrecs, .get_dmaxrecs = xfs_bmbt_get_dmaxrecs, .init_key_from_rec = xfs_bmbt_init_key_from_rec, diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 36477aae77d..75a8a7b00df 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -3059,3 +3059,115 @@ error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } + +/* + * Try to merge a non-leaf block back into the inode root. + * + * Note: the killroot names comes from the fact that we're effectively + * killing the old root block. But because we can't just delete the + * inode we have to copy the single block it was pointing to into the + * inode. + */ +int +xfs_btree_kill_iroot( + struct xfs_btree_cur *cur) +{ + int whichfork = cur->bc_private.b.whichfork; + struct xfs_inode *ip = cur->bc_private.b.ip; + struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); + struct xfs_btree_block *block; + struct xfs_btree_block *cblock; + union xfs_btree_key *kp; + union xfs_btree_key *ckp; + union xfs_btree_ptr *pp; + union xfs_btree_ptr *cpp; + struct xfs_buf *cbp; + int level; + int index; + int numrecs; +#ifdef DEBUG + union xfs_btree_ptr ptr; + int i; +#endif + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + + ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE); + ASSERT(cur->bc_nlevels > 1); + + /* + * Don't deal with the root block needs to be a leaf case. + * We're just going to turn the thing back into extents anyway. + */ + level = cur->bc_nlevels - 1; + if (level == 1) + goto out0; + + /* + * Give up if the root has multiple children. + */ + block = xfs_btree_get_iroot(cur); + if (xfs_btree_get_numrecs(block) != 1) + goto out0; + + cblock = xfs_btree_get_block(cur, level - 1, &cbp); + numrecs = xfs_btree_get_numrecs(cblock); + + /* + * Only do this if the next level will fit. + * Then the data must be copied up to the inode, + * instead of freeing the root you free the next level. + */ + if (numrecs > cur->bc_ops->get_dmaxrecs(cur, level)) + goto out0; + + XFS_BTREE_STATS_INC(cur, killroot); + +#ifdef DEBUG + xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_LEFTSIB); + ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); + xfs_btree_get_sibling(cur, block, &ptr, XFS_BB_RIGHTSIB); + ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); +#endif + + index = numrecs - cur->bc_ops->get_maxrecs(cur, level); + if (index) { + xfs_iroot_realloc(cur->bc_private.b.ip, index, + cur->bc_private.b.whichfork); + block = (struct xfs_btree_block *)ifp->if_broot; + } + + be16_add_cpu(&block->bb_numrecs, index); + ASSERT(block->bb_numrecs == cblock->bb_numrecs); + + kp = xfs_btree_key_addr(cur, 1, block); + ckp = xfs_btree_key_addr(cur, 1, cblock); + xfs_btree_copy_keys(cur, kp, ckp, numrecs); + + pp = xfs_btree_ptr_addr(cur, 1, block); + cpp = xfs_btree_ptr_addr(cur, 1, cblock); +#ifdef DEBUG + for (i = 0; i < numrecs; i++) { + int error; + + error = xfs_btree_check_ptr(cur, cpp, i, level - 1); + if (error) { + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; + } + } +#endif + xfs_btree_copy_ptrs(cur, pp, cpp, numrecs); + + cur->bc_ops->free_block(cur, cbp); + XFS_BTREE_STATS_INC(cur, free); + + cur->bc_bufs[level - 1] = NULL; + be16_add_cpu(&block->bb_level, -1); + xfs_trans_log_inode(cur->bc_tp, ip, + XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); + cur->bc_nlevels--; +out0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + return 0; +} diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 6f03871f599..ff2552febba 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -198,6 +198,7 @@ struct xfs_btree_ops { union xfs_btree_ptr *start_bno, union xfs_btree_ptr *new_bno, int length, int *stat); + int (*free_block)(struct xfs_btree_cur *cur, struct xfs_buf *bp); /* update last record information */ void (*update_lastrec)(struct xfs_btree_cur *cur, @@ -559,6 +560,7 @@ int xfs_btree_split(struct xfs_btree_cur *, int, union xfs_btree_ptr *, union xfs_btree_key *, struct xfs_btree_cur **, int *); int xfs_btree_new_root(struct xfs_btree_cur *, int *); int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *); +int xfs_btree_kill_iroot(struct xfs_btree_cur *); int xfs_btree_insert(struct xfs_btree_cur *, int *); /* diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 8f66e272056..90f1d4ee772 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -754,6 +754,22 @@ xfs_inobt_alloc_block( return 0; } +STATIC int +xfs_inobt_free_block( + struct xfs_btree_cur *cur, + struct xfs_buf *bp) +{ + xfs_fsblock_t fsbno; + int error; + + fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)); + error = xfs_free_extent(cur->bc_tp, fsbno, 1); + if (error) + return error; + + xfs_trans_binval(cur->bc_tp, bp); + return error; +} STATIC int xfs_inobt_get_maxrecs( @@ -886,6 +902,7 @@ static const struct xfs_btree_ops xfs_inobt_ops = { .dup_cursor = xfs_inobt_dup_cursor, .set_root = xfs_inobt_set_root, .alloc_block = xfs_inobt_alloc_block, + .free_block = xfs_inobt_free_block, .get_maxrecs = xfs_inobt_get_maxrecs, .init_key_from_rec = xfs_inobt_init_key_from_rec, .init_rec_from_key = xfs_inobt_init_rec_from_key, -- cgit v1.2.3-70-g09d2 From 91cca5df9bc85efdabfa645f51d54259ed09f4bf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:58:01 +1100 Subject: [XFS] implement generic xfs_btree_delete/delrec Make the btree delete code generic. Based on a patch from David Chinner with lots of changes to follow the original btree implementations more closely. While this loses some of the generic helper routines for inserting/moving/removing records it also solves some of the one off bugs in the original code and makes it easier to verify. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32205a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc.c | 14 +- fs/xfs/xfs_alloc_btree.c | 744 ++++------------------------------------------ fs/xfs/xfs_alloc_btree.h | 7 - fs/xfs/xfs_bmap.c | 14 +- fs/xfs/xfs_bmap_btree.c | 525 +------------------------------- fs/xfs/xfs_bmap_btree.h | 3 - fs/xfs/xfs_btree.c | 593 ++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 5 + fs/xfs/xfs_ialloc.c | 4 +- fs/xfs/xfs_ialloc_btree.c | 646 +++------------------------------------- fs/xfs/xfs_ialloc_btree.h | 7 - 11 files changed, 723 insertions(+), 1839 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index a983824c12b..e9c70249d2c 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -398,7 +398,7 @@ xfs_alloc_fixup_trees( /* * Delete the entry from the by-size btree. */ - if ((error = xfs_alloc_delete(cnt_cur, &i))) + if ((error = xfs_btree_delete(cnt_cur, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); /* @@ -427,7 +427,7 @@ xfs_alloc_fixup_trees( /* * No remaining freespace, just delete the by-block tree entry. */ - if ((error = xfs_alloc_delete(bno_cur, &i))) + if ((error = xfs_btree_delete(bno_cur, &i))) return error; XFS_WANT_CORRUPTED_RETURN(i == 1); } else { @@ -1651,7 +1651,7 @@ xfs_free_ag_extent( if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_alloc_delete(cnt_cur, &i))) + if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); /* @@ -1660,13 +1660,13 @@ xfs_free_ag_extent( if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_alloc_delete(cnt_cur, &i))) + if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); /* * Delete the old by-block entry for the right block. */ - if ((error = xfs_alloc_delete(bno_cur, &i))) + if ((error = xfs_btree_delete(bno_cur, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); /* @@ -1711,7 +1711,7 @@ xfs_free_ag_extent( if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_alloc_delete(cnt_cur, &i))) + if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); /* @@ -1737,7 +1737,7 @@ xfs_free_ag_extent( if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_alloc_delete(cnt_cur, &i))) + if ((error = xfs_btree_delete(cnt_cur, &i))) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); /* diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index f124ddd91c0..d256b51f913 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -40,691 +40,6 @@ #include "xfs_alloc.h" #include "xfs_error.h" -/* - * Prototypes for internal functions. - */ - -STATIC void xfs_alloc_log_block(xfs_trans_t *, xfs_buf_t *, int); -STATIC void xfs_alloc_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); -STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); -STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); - -/* - * Internal functions. - */ - -/* - * Single level of the xfs_alloc_delete record deletion routine. - * Delete record pointed to by cur/level. - * Remove the record from its block then rebalance the tree. - * Return 0 for error, 1 for done, 2 to go on to the next level. - */ -STATIC int /* error */ -xfs_alloc_delrec( - xfs_btree_cur_t *cur, /* btree cursor */ - int level, /* level removing record from */ - int *stat) /* fail/done/go-on */ -{ - xfs_agf_t *agf; /* allocation group freelist header */ - xfs_alloc_block_t *block; /* btree block record/key lives in */ - xfs_agblock_t bno; /* btree block number */ - xfs_buf_t *bp; /* buffer for block */ - int error; /* error return value */ - int i; /* loop index */ - xfs_alloc_key_t key; /* kp points here if block is level 0 */ - xfs_agblock_t lbno; /* left block's block number */ - xfs_buf_t *lbp; /* left block's buffer pointer */ - xfs_alloc_block_t *left; /* left btree block */ - xfs_alloc_key_t *lkp=NULL; /* left block key pointer */ - xfs_alloc_ptr_t *lpp=NULL; /* left block address pointer */ - int lrecs=0; /* number of records in left block */ - xfs_alloc_rec_t *lrp; /* left block record pointer */ - xfs_mount_t *mp; /* mount structure */ - int ptr; /* index in btree block for this rec */ - xfs_agblock_t rbno; /* right block's block number */ - xfs_buf_t *rbp; /* right block's buffer pointer */ - xfs_alloc_block_t *right; /* right btree block */ - xfs_alloc_key_t *rkp; /* right block key pointer */ - xfs_alloc_ptr_t *rpp; /* right block address pointer */ - int rrecs=0; /* number of records in right block */ - int numrecs; - xfs_alloc_rec_t *rrp; /* right block record pointer */ - xfs_btree_cur_t *tcur; /* temporary btree cursor */ - - /* - * Get the index of the entry being deleted, check for nothing there. - */ - ptr = cur->bc_ptrs[level]; - if (ptr == 0) { - *stat = 0; - return 0; - } - /* - * Get the buffer & block containing the record or key/ptr. - */ - bp = cur->bc_bufs[level]; - block = XFS_BUF_TO_ALLOC_BLOCK(bp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, level, bp))) - return error; -#endif - /* - * Fail if we're off the end of the block. - */ - numrecs = be16_to_cpu(block->bb_numrecs); - if (ptr > numrecs) { - *stat = 0; - return 0; - } - XFS_STATS_INC(xs_abt_delrec); - /* - * It's a nonleaf. Excise the key and ptr being deleted, by - * sliding the entries past them down one. - * Log the changed areas of the block. - */ - if (level > 0) { - lkp = XFS_ALLOC_KEY_ADDR(block, 1, cur); - lpp = XFS_ALLOC_PTR_ADDR(block, 1, cur); -#ifdef DEBUG - for (i = ptr; i < numrecs; i++) { - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level))) - return error; - } -#endif - if (ptr < numrecs) { - memmove(&lkp[ptr - 1], &lkp[ptr], - (numrecs - ptr) * sizeof(*lkp)); - memmove(&lpp[ptr - 1], &lpp[ptr], - (numrecs - ptr) * sizeof(*lpp)); - xfs_alloc_log_ptrs(cur, bp, ptr, numrecs - 1); - xfs_alloc_log_keys(cur, bp, ptr, numrecs - 1); - } - } - /* - * It's a leaf. Excise the record being deleted, by sliding the - * entries past it down one. Log the changed areas of the block. - */ - else { - lrp = XFS_ALLOC_REC_ADDR(block, 1, cur); - if (ptr < numrecs) { - memmove(&lrp[ptr - 1], &lrp[ptr], - (numrecs - ptr) * sizeof(*lrp)); - xfs_alloc_log_recs(cur, bp, ptr, numrecs - 1); - } - /* - * If it's the first record in the block, we'll need a key - * structure to pass up to the next level (updkey). - */ - if (ptr == 1) { - key.ar_startblock = lrp->ar_startblock; - key.ar_blockcount = lrp->ar_blockcount; - lkp = &key; - } - } - /* - * Decrement and log the number of entries in the block. - */ - numrecs--; - block->bb_numrecs = cpu_to_be16(numrecs); - xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); - /* - * See if the longest free extent in the allocation group was - * changed by this operation. True if it's the by-size btree, and - * this is the leaf level, and there is no right sibling block, - * and this was the last record. - */ - agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); - mp = cur->bc_mp; - - if (level == 0 && - cur->bc_btnum == XFS_BTNUM_CNT && - be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK && - ptr > numrecs) { - ASSERT(ptr == numrecs + 1); - /* - * There are still records in the block. Grab the size - * from the last one. - */ - if (numrecs) { - rrp = XFS_ALLOC_REC_ADDR(block, numrecs, cur); - agf->agf_longest = rrp->ar_blockcount; - } - /* - * No free extents left. - */ - else - agf->agf_longest = 0; - mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest = - be32_to_cpu(agf->agf_longest); - xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, - XFS_AGF_LONGEST); - } - /* - * Is this the root level? If so, we're almost done. - */ - if (level == cur->bc_nlevels - 1) { - /* - * If this is the root level, - * and there's only one entry left, - * and it's NOT the leaf level, - * then we can get rid of this level. - */ - if (numrecs == 1 && level > 0) { - /* - * lpp is still set to the first pointer in the block. - * Make it the new root of the btree. - */ - bno = be32_to_cpu(agf->agf_roots[cur->bc_btnum]); - agf->agf_roots[cur->bc_btnum] = *lpp; - be32_add_cpu(&agf->agf_levels[cur->bc_btnum], -1); - mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_levels[cur->bc_btnum]--; - /* - * Put this buffer/block on the ag's freelist. - */ - error = xfs_alloc_put_freelist(cur->bc_tp, - cur->bc_private.a.agbp, NULL, bno, 1); - if (error) - return error; - /* - * Since blocks move to the free list without the - * coordination used in xfs_bmap_finish, we can't allow - * block to be available for reallocation and - * non-transaction writing (user data) until we know - * that the transaction that moved it to the free list - * is permanently on disk. We track the blocks by - * declaring these blocks as "busy"; the busy list is - * maintained on a per-ag basis and each transaction - * records which entries should be removed when the - * iclog commits to disk. If a busy block is - * allocated, the iclog is pushed up to the LSN - * that freed the block. - */ - xfs_alloc_mark_busy(cur->bc_tp, - be32_to_cpu(agf->agf_seqno), bno, 1); - - xfs_trans_agbtree_delta(cur->bc_tp, -1); - xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, - XFS_AGF_ROOTS | XFS_AGF_LEVELS); - /* - * Update the cursor so there's one fewer level. - */ - xfs_btree_setbuf(cur, level, NULL); - cur->bc_nlevels--; - } else if (level > 0 && - (error = xfs_btree_decrement(cur, level, &i))) - return error; - *stat = 1; - return 0; - } - /* - * If we deleted the leftmost entry in the block, update the - * key values above us in the tree. - */ - if (ptr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)lkp, level + 1))) - return error; - /* - * If the number of records remaining in the block is at least - * the minimum, we're done. - */ - if (numrecs >= XFS_ALLOC_BLOCK_MINRECS(level, cur)) { - if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) - return error; - *stat = 1; - return 0; - } - /* - * Otherwise, we have to move some records around to keep the - * tree balanced. Look at the left and right sibling blocks to - * see if we can re-balance by moving only one record. - */ - rbno = be32_to_cpu(block->bb_rightsib); - lbno = be32_to_cpu(block->bb_leftsib); - bno = NULLAGBLOCK; - ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); - /* - * Duplicate the cursor so our btree manipulations here won't - * disrupt the next level up. - */ - if ((error = xfs_btree_dup_cursor(cur, &tcur))) - return error; - /* - * If there's a right sibling, see if it's ok to shift an entry - * out of it. - */ - if (rbno != NULLAGBLOCK) { - /* - * Move the temp cursor to the last entry in the next block. - * Actually any entry but the first would suffice. - */ - i = xfs_btree_lastrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_btree_increment(tcur, level, &i))) - goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - i = xfs_btree_lastrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - /* - * Grab a pointer to the block. - */ - rbp = tcur->bc_bufs[level]; - right = XFS_BUF_TO_ALLOC_BLOCK(rbp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) - goto error0; -#endif - /* - * Grab the current block number, for future use. - */ - bno = be32_to_cpu(right->bb_leftsib); - /* - * If right block is full enough so that removing one entry - * won't make it too empty, and left-shifting an entry out - * of right to us works, we're done. - */ - if (be16_to_cpu(right->bb_numrecs) - 1 >= - XFS_ALLOC_BLOCK_MINRECS(level, cur)) { - if ((error = xfs_btree_lshift(tcur, level, &i))) - goto error0; - if (i) { - ASSERT(be16_to_cpu(block->bb_numrecs) >= - XFS_ALLOC_BLOCK_MINRECS(level, cur)); - xfs_btree_del_cursor(tcur, - XFS_BTREE_NOERROR); - if (level > 0 && - (error = xfs_btree_decrement(cur, level, - &i))) - return error; - *stat = 1; - return 0; - } - } - /* - * Otherwise, grab the number of records in right for - * future reference, and fix up the temp cursor to point - * to our block again (last record). - */ - rrecs = be16_to_cpu(right->bb_numrecs); - if (lbno != NULLAGBLOCK) { - i = xfs_btree_firstrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_btree_decrement(tcur, level, &i))) - goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - } - } - /* - * If there's a left sibling, see if it's ok to shift an entry - * out of it. - */ - if (lbno != NULLAGBLOCK) { - /* - * Move the temp cursor to the first entry in the - * previous block. - */ - i = xfs_btree_firstrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_btree_decrement(tcur, level, &i))) - goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - xfs_btree_firstrec(tcur, level); - /* - * Grab a pointer to the block. - */ - lbp = tcur->bc_bufs[level]; - left = XFS_BUF_TO_ALLOC_BLOCK(lbp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) - goto error0; -#endif - /* - * Grab the current block number, for future use. - */ - bno = be32_to_cpu(left->bb_rightsib); - /* - * If left block is full enough so that removing one entry - * won't make it too empty, and right-shifting an entry out - * of left to us works, we're done. - */ - if (be16_to_cpu(left->bb_numrecs) - 1 >= - XFS_ALLOC_BLOCK_MINRECS(level, cur)) { - if ((error = xfs_btree_rshift(tcur, level, &i))) - goto error0; - if (i) { - ASSERT(be16_to_cpu(block->bb_numrecs) >= - XFS_ALLOC_BLOCK_MINRECS(level, cur)); - xfs_btree_del_cursor(tcur, - XFS_BTREE_NOERROR); - if (level == 0) - cur->bc_ptrs[0]++; - *stat = 1; - return 0; - } - } - /* - * Otherwise, grab the number of records in right for - * future reference. - */ - lrecs = be16_to_cpu(left->bb_numrecs); - } - /* - * Delete the temp cursor, we're done with it. - */ - xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); - /* - * If here, we need to do a join to keep the tree balanced. - */ - ASSERT(bno != NULLAGBLOCK); - /* - * See if we can join with the left neighbor block. - */ - if (lbno != NULLAGBLOCK && - lrecs + numrecs <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { - /* - * Set "right" to be the starting block, - * "left" to be the left neighbor. - */ - rbno = bno; - right = block; - rrecs = be16_to_cpu(right->bb_numrecs); - rbp = bp; - if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, - cur->bc_private.a.agno, lbno, 0, &lbp, - XFS_ALLOC_BTREE_REF))) - return error; - left = XFS_BUF_TO_ALLOC_BLOCK(lbp); - lrecs = be16_to_cpu(left->bb_numrecs); - if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) - return error; - } - /* - * If that won't work, see if we can join with the right neighbor block. - */ - else if (rbno != NULLAGBLOCK && - rrecs + numrecs <= XFS_ALLOC_BLOCK_MAXRECS(level, cur)) { - /* - * Set "left" to be the starting block, - * "right" to be the right neighbor. - */ - lbno = bno; - left = block; - lrecs = be16_to_cpu(left->bb_numrecs); - lbp = bp; - if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, - cur->bc_private.a.agno, rbno, 0, &rbp, - XFS_ALLOC_BTREE_REF))) - return error; - right = XFS_BUF_TO_ALLOC_BLOCK(rbp); - rrecs = be16_to_cpu(right->bb_numrecs); - if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) - return error; - } - /* - * Otherwise, we can't fix the imbalance. - * Just return. This is probably a logic error, but it's not fatal. - */ - else { - if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) - return error; - *stat = 1; - return 0; - } - /* - * We're now going to join "left" and "right" by moving all the stuff - * in "right" to "left" and deleting "right". - */ - if (level > 0) { - /* - * It's a non-leaf. Move keys and pointers. - */ - lkp = XFS_ALLOC_KEY_ADDR(left, lrecs + 1, cur); - lpp = XFS_ALLOC_PTR_ADDR(left, lrecs + 1, cur); - rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur); - rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur); -#ifdef DEBUG - for (i = 0; i < rrecs; i++) { - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) - return error; - } -#endif - memcpy(lkp, rkp, rrecs * sizeof(*lkp)); - memcpy(lpp, rpp, rrecs * sizeof(*lpp)); - xfs_alloc_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs); - xfs_alloc_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs); - } else { - /* - * It's a leaf. Move records. - */ - lrp = XFS_ALLOC_REC_ADDR(left, lrecs + 1, cur); - rrp = XFS_ALLOC_REC_ADDR(right, 1, cur); - memcpy(lrp, rrp, rrecs * sizeof(*lrp)); - xfs_alloc_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs); - } - /* - * If we joined with the left neighbor, set the buffer in the - * cursor to the left block, and fix up the index. - */ - if (bp != lbp) { - xfs_btree_setbuf(cur, level, lbp); - cur->bc_ptrs[level] += lrecs; - } - /* - * If we joined with the right neighbor and there's a level above - * us, increment the cursor at that level. - */ - else if (level + 1 < cur->bc_nlevels && - (error = xfs_btree_increment(cur, level + 1, &i))) - return error; - /* - * Fix up the number of records in the surviving block. - */ - lrecs += rrecs; - left->bb_numrecs = cpu_to_be16(lrecs); - /* - * Fix up the right block pointer in the surviving block, and log it. - */ - left->bb_rightsib = right->bb_rightsib; - xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); - /* - * If there is a right sibling now, make it point to the - * remaining block. - */ - if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) { - xfs_alloc_block_t *rrblock; - xfs_buf_t *rrbp; - - if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, - cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0, - &rrbp, XFS_ALLOC_BTREE_REF))) - return error; - rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp); - if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) - return error; - rrblock->bb_leftsib = cpu_to_be32(lbno); - xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); - } - /* - * Free the deleting block by putting it on the freelist. - */ - error = xfs_alloc_put_freelist(cur->bc_tp, - cur->bc_private.a.agbp, NULL, rbno, 1); - if (error) - return error; - /* - * Since blocks move to the free list without the coordination - * used in xfs_bmap_finish, we can't allow block to be available - * for reallocation and non-transaction writing (user data) - * until we know that the transaction that moved it to the free - * list is permanently on disk. We track the blocks by declaring - * these blocks as "busy"; the busy list is maintained on a - * per-ag basis and each transaction records which entries - * should be removed when the iclog commits to disk. If a - * busy block is allocated, the iclog is pushed up to the - * LSN that freed the block. - */ - xfs_alloc_mark_busy(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1); - xfs_trans_agbtree_delta(cur->bc_tp, -1); - - /* - * Adjust the current level's cursor so that we're left referring - * to the right node, after we're done. - * If this leaves the ptr value 0 our caller will fix it up. - */ - if (level > 0) - cur->bc_ptrs[level]--; - /* - * Return value means the next level up has something to do. - */ - *stat = 2; - return 0; - -error0: - xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); - return error; -} - -/* - * Log header fields from a btree block. - */ -STATIC void -xfs_alloc_log_block( - xfs_trans_t *tp, /* transaction pointer */ - xfs_buf_t *bp, /* buffer containing btree block */ - int fields) /* mask of fields: XFS_BB_... */ -{ - int first; /* first byte offset logged */ - int last; /* last byte offset logged */ - static const short offsets[] = { /* table of offsets */ - offsetof(xfs_alloc_block_t, bb_magic), - offsetof(xfs_alloc_block_t, bb_level), - offsetof(xfs_alloc_block_t, bb_numrecs), - offsetof(xfs_alloc_block_t, bb_leftsib), - offsetof(xfs_alloc_block_t, bb_rightsib), - sizeof(xfs_alloc_block_t) - }; - - xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, &last); - xfs_trans_log_buf(tp, bp, first, last); -} - -/* - * Log keys from a btree block (nonleaf). - */ -STATIC void -xfs_alloc_log_keys( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_buf_t *bp, /* buffer containing btree block */ - int kfirst, /* index of first key to log */ - int klast) /* index of last key to log */ -{ - xfs_alloc_block_t *block; /* btree block to log from */ - int first; /* first byte offset logged */ - xfs_alloc_key_t *kp; /* key pointer in btree block */ - int last; /* last byte offset logged */ - - block = XFS_BUF_TO_ALLOC_BLOCK(bp); - kp = XFS_ALLOC_KEY_ADDR(block, 1, cur); - first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block); - last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block); - xfs_trans_log_buf(cur->bc_tp, bp, first, last); -} - -/* - * Log block pointer fields from a btree block (nonleaf). - */ -STATIC void -xfs_alloc_log_ptrs( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_buf_t *bp, /* buffer containing btree block */ - int pfirst, /* index of first pointer to log */ - int plast) /* index of last pointer to log */ -{ - xfs_alloc_block_t *block; /* btree block to log from */ - int first; /* first byte offset logged */ - int last; /* last byte offset logged */ - xfs_alloc_ptr_t *pp; /* block-pointer pointer in btree blk */ - - block = XFS_BUF_TO_ALLOC_BLOCK(bp); - pp = XFS_ALLOC_PTR_ADDR(block, 1, cur); - first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block); - last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block); - xfs_trans_log_buf(cur->bc_tp, bp, first, last); -} - -/* - * Log records from a btree block (leaf). - */ -STATIC void -xfs_alloc_log_recs( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_buf_t *bp, /* buffer containing btree block */ - int rfirst, /* index of first record to log */ - int rlast) /* index of last record to log */ -{ - xfs_alloc_block_t *block; /* btree block to log from */ - int first; /* first byte offset logged */ - int last; /* last byte offset logged */ - xfs_alloc_rec_t *rp; /* record pointer for btree block */ - - - block = XFS_BUF_TO_ALLOC_BLOCK(bp); - rp = XFS_ALLOC_REC_ADDR(block, 1, cur); -#ifdef DEBUG - { - xfs_agf_t *agf; - xfs_alloc_rec_t *p; - - agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); - for (p = &rp[rfirst - 1]; p <= &rp[rlast - 1]; p++) - ASSERT(be32_to_cpu(p->ar_startblock) + - be32_to_cpu(p->ar_blockcount) <= - be32_to_cpu(agf->agf_length)); - } -#endif - first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); - last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block); - xfs_trans_log_buf(cur->bc_tp, bp, first, last); -} - - -/* - * Externally visible routines. - */ - -/* - * Delete the record pointed to by cur. - * The cursor refers to the place where the record was (could be inserted) - * when the operation returns. - */ -int /* error */ -xfs_alloc_delete( - xfs_btree_cur_t *cur, /* btree cursor */ - int *stat) /* success/failure */ -{ - int error; /* error return value */ - int i; /* result code */ - int level; /* btree level */ - - /* - * Go up the tree, starting at leaf level. - * If 2 is returned then a join was done; go to the next level. - * Otherwise we are done. - */ - for (level = 0, i = 2; i == 2; level++) { - if ((error = xfs_alloc_delrec(cur, level, &i))) - return error; - } - if (i == 0) { - for (level = 1; level < cur->bc_nlevels; level++) { - if (cur->bc_ptrs[level] == 0) { - if ((error = xfs_btree_decrement(cur, level, &i))) - return error; - break; - } - } - } - *stat = i; - return 0; -} /* * Get the data from the pointed-to record. @@ -879,6 +194,7 @@ xfs_allocbt_update_lastrec( struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp); xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); __be32 len; + int numrecs; ASSERT(cur->bc_btnum == XFS_BTNUM_CNT); @@ -897,6 +213,22 @@ xfs_allocbt_update_lastrec( be32_to_cpu(agf->agf_longest)) return; len = rec->alloc.ar_blockcount; + break; + case LASTREC_DELREC: + numrecs = xfs_btree_get_numrecs(block); + if (ptr <= numrecs) + return; + ASSERT(ptr == numrecs + 1); + + if (numrecs) { + xfs_alloc_rec_t *rrp; + + rrp = XFS_ALLOC_REC_ADDR(block, numrecs, cur); + len = rrp->ar_blockcount; + } else { + len = 0; + } + break; default: ASSERT(0); @@ -908,6 +240,14 @@ xfs_allocbt_update_lastrec( xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST); } +STATIC int +xfs_allocbt_get_minrecs( + struct xfs_btree_cur *cur, + int level) +{ + return cur->bc_mp->m_alloc_mnr[level != 0]; +} + STATIC int xfs_allocbt_get_maxrecs( struct xfs_btree_cur *cur, @@ -983,6 +323,38 @@ xfs_allocbt_key_diff( return (__int64_t)be32_to_cpu(kp->ar_startblock) - rec->ar_startblock; } +STATIC int +xfs_allocbt_kill_root( + struct xfs_btree_cur *cur, + struct xfs_buf *bp, + int level, + union xfs_btree_ptr *newroot) +{ + int error; + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_STATS_INC(cur, killroot); + + /* + * Update the root pointer, decreasing the level by 1 and then + * free the old root. + */ + xfs_allocbt_set_root(cur, newroot, -1); + error = xfs_allocbt_free_block(cur, bp); + if (error) { + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; + } + + XFS_BTREE_STATS_INC(cur, free); + + xfs_btree_setbuf(cur, level, NULL); + cur->bc_nlevels--; + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + return 0; +} + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_allocbt_trace_buf; @@ -1055,9 +427,11 @@ static const struct xfs_btree_ops xfs_allocbt_ops = { .dup_cursor = xfs_allocbt_dup_cursor, .set_root = xfs_allocbt_set_root, + .kill_root = xfs_allocbt_kill_root, .alloc_block = xfs_allocbt_alloc_block, .free_block = xfs_allocbt_free_block, .update_lastrec = xfs_allocbt_update_lastrec, + .get_minrecs = xfs_allocbt_get_minrecs, .get_maxrecs = xfs_allocbt_get_maxrecs, .init_key_from_rec = xfs_allocbt_init_key_from_rec, .init_rec_from_key = xfs_allocbt_init_rec_from_key, diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h index 2e340ef8025..8d2e3ec21fd 100644 --- a/fs/xfs/xfs_alloc_btree.h +++ b/fs/xfs/xfs_alloc_btree.h @@ -94,13 +94,6 @@ typedef struct xfs_btree_sblock xfs_alloc_block_t; #define XFS_ALLOC_PTR_ADDR(bb,i,cur) \ XFS_BTREE_PTR_ADDR(xfs_alloc, bb, i, XFS_ALLOC_BLOCK_MAXRECS(1, cur)) -/* - * Delete the record pointed to by cur. - * The cursor refers to the place where the record was (could be inserted) - * when the operation returns. - */ -extern int xfs_alloc_delete(struct xfs_btree_cur *cur, int *stat); - /* * Get the data from the pointed-to record. */ diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 85e2e8b9cf4..74761ca2c63 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -864,7 +864,7 @@ xfs_bmap_add_extent_delay_real( RIGHT.br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); - if ((error = xfs_bmbt_delete(cur, &i))) + if ((error = xfs_btree_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_btree_decrement(cur, 0, &i))) @@ -1425,13 +1425,13 @@ xfs_bmap_add_extent_unwritten_real( RIGHT.br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); - if ((error = xfs_bmbt_delete(cur, &i))) + if ((error = xfs_btree_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_btree_decrement(cur, 0, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); - if ((error = xfs_bmbt_delete(cur, &i))) + if ((error = xfs_btree_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_btree_decrement(cur, 0, &i))) @@ -1474,7 +1474,7 @@ xfs_bmap_add_extent_unwritten_real( &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); - if ((error = xfs_bmbt_delete(cur, &i))) + if ((error = xfs_btree_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_btree_decrement(cur, 0, &i))) @@ -1517,7 +1517,7 @@ xfs_bmap_add_extent_unwritten_real( RIGHT.br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); - if ((error = xfs_bmbt_delete(cur, &i))) + if ((error = xfs_btree_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_btree_decrement(cur, 0, &i))) @@ -2152,7 +2152,7 @@ xfs_bmap_add_extent_hole_real( right.br_blockcount, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); - if ((error = xfs_bmbt_delete(cur, &i))) + if ((error = xfs_btree_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); if ((error = xfs_btree_decrement(cur, 0, &i))) @@ -3216,7 +3216,7 @@ xfs_bmap_del_extent( flags |= XFS_ILOG_FEXT(whichfork); break; } - if ((error = xfs_bmbt_delete(cur, &i))) + if ((error = xfs_btree_delete(cur, &i))) goto done; XFS_WANT_CORRUPTED_GOTO(i == 1, done); break; diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 6b7774ebc26..5b8030561d7 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -44,14 +44,6 @@ #include "xfs_error.h" #include "xfs_quota.h" -/* - * Prototypes for internal btree functions. - */ - - -STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); -STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); - #undef EXIT #define ENTRY XBT_ENTRY @@ -80,453 +72,6 @@ STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); #define XFS_BMBT_TRACE_CURSOR(c,s) \ XFS_BTREE_TRACE_CURSOR(c,s) - -/* - * Internal functions. - */ - -/* - * Delete record pointed to by cur/level. - */ -STATIC int /* error */ -xfs_bmbt_delrec( - xfs_btree_cur_t *cur, - int level, - int *stat) /* success/failure */ -{ - xfs_bmbt_block_t *block; /* bmap btree block */ - xfs_fsblock_t bno; /* fs-relative block number */ - xfs_buf_t *bp; /* buffer for block */ - int error; /* error return value */ - int i; /* loop counter */ - int j; /* temp state */ - xfs_bmbt_key_t key; /* bmap btree key */ - xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */ - xfs_fsblock_t lbno; /* left sibling block number */ - xfs_buf_t *lbp; /* left buffer pointer */ - xfs_bmbt_block_t *left; /* left btree block */ - xfs_bmbt_key_t *lkp; /* left btree key */ - xfs_bmbt_ptr_t *lpp; /* left address pointer */ - int lrecs=0; /* left record count */ - xfs_bmbt_rec_t *lrp; /* left record pointer */ - xfs_mount_t *mp; /* file system mount point */ - xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */ - int ptr; /* key/record index */ - xfs_fsblock_t rbno; /* right sibling block number */ - xfs_buf_t *rbp; /* right buffer pointer */ - xfs_bmbt_block_t *right; /* right btree block */ - xfs_bmbt_key_t *rkp; /* right btree key */ - xfs_bmbt_rec_t *rp; /* pointer to bmap btree rec */ - xfs_bmbt_ptr_t *rpp; /* right address pointer */ - xfs_bmbt_block_t *rrblock; /* right-right btree block */ - xfs_buf_t *rrbp; /* right-right buffer pointer */ - int rrecs=0; /* right record count */ - xfs_bmbt_rec_t *rrp; /* right record pointer */ - xfs_btree_cur_t *tcur; /* temporary btree cursor */ - int numrecs; /* temporary numrec count */ - int numlrecs, numrrecs; - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - XFS_BMBT_TRACE_ARGI(cur, level); - ptr = cur->bc_ptrs[level]; - tcur = NULL; - if (ptr == 0) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - block = xfs_bmbt_get_block(cur, level, &bp); - numrecs = be16_to_cpu(block->bb_numrecs); -#ifdef DEBUG - if ((error = xfs_btree_check_lblock(cur, block, level, bp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } -#endif - if (ptr > numrecs) { - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 0; - return 0; - } - XFS_STATS_INC(xs_bmbt_delrec); - if (level > 0) { - kp = XFS_BMAP_KEY_IADDR(block, 1, cur); - pp = XFS_BMAP_PTR_IADDR(block, 1, cur); -#ifdef DEBUG - for (i = ptr; i < numrecs; i++) { - if ((error = xfs_btree_check_lptr_disk(cur, pp[i], level))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - } -#endif - if (ptr < numrecs) { - memmove(&kp[ptr - 1], &kp[ptr], - (numrecs - ptr) * sizeof(*kp)); - memmove(&pp[ptr - 1], &pp[ptr], - (numrecs - ptr) * sizeof(*pp)); - xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs - 1); - xfs_bmbt_log_keys(cur, bp, ptr, numrecs - 1); - } - } else { - rp = XFS_BMAP_REC_IADDR(block, 1, cur); - if (ptr < numrecs) { - memmove(&rp[ptr - 1], &rp[ptr], - (numrecs - ptr) * sizeof(*rp)); - xfs_bmbt_log_recs(cur, bp, ptr, numrecs - 1); - } - if (ptr == 1) { - key.br_startoff = - cpu_to_be64(xfs_bmbt_disk_get_startoff(rp)); - kp = &key; - } - } - numrecs--; - block->bb_numrecs = cpu_to_be16(numrecs); - xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS); - /* - * We're at the root level. - * First, shrink the root block in-memory. - * Try to get rid of the next level down. - * If we can't then there's nothing left to do. - */ - if (level == cur->bc_nlevels - 1) { - xfs_iroot_realloc(cur->bc_private.b.ip, -1, - cur->bc_private.b.whichfork); - if ((error = xfs_btree_kill_iroot(cur))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - if (level > 0 && (error = xfs_btree_decrement(cur, level, &j))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; - } - if (ptr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)kp, level + 1))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - if (numrecs >= XFS_BMAP_BLOCK_IMINRECS(level, cur)) { - if (level > 0 && (error = xfs_btree_decrement(cur, level, &j))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; - } - rbno = be64_to_cpu(block->bb_rightsib); - lbno = be64_to_cpu(block->bb_leftsib); - /* - * One child of root, need to get a chance to copy its contents - * into the root and delete it. Can't go up to next level, - * there's nothing to delete there. - */ - if (lbno == NULLFSBLOCK && rbno == NULLFSBLOCK && - level == cur->bc_nlevels - 2) { - if ((error = xfs_btree_kill_iroot(cur))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; - } - ASSERT(rbno != NULLFSBLOCK || lbno != NULLFSBLOCK); - if ((error = xfs_btree_dup_cursor(cur, &tcur))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - bno = NULLFSBLOCK; - if (rbno != NULLFSBLOCK) { - i = xfs_btree_lastrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_btree_increment(tcur, level, &i))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - i = xfs_btree_lastrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - rbp = tcur->bc_bufs[level]; - right = XFS_BUF_TO_BMBT_BLOCK(rbp); -#ifdef DEBUG - if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } -#endif - bno = be64_to_cpu(right->bb_leftsib); - if (be16_to_cpu(right->bb_numrecs) - 1 >= - XFS_BMAP_BLOCK_IMINRECS(level, cur)) { - if ((error = xfs_btree_lshift(tcur, level, &i))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - if (i) { - ASSERT(be16_to_cpu(block->bb_numrecs) >= - XFS_BMAP_BLOCK_IMINRECS(level, tcur)); - xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); - tcur = NULL; - if (level > 0) { - if ((error = xfs_btree_decrement(cur, - level, &i))) { - XFS_BMBT_TRACE_CURSOR(cur, - ERROR); - goto error0; - } - } - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; - } - } - rrecs = be16_to_cpu(right->bb_numrecs); - if (lbno != NULLFSBLOCK) { - i = xfs_btree_firstrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_btree_decrement(tcur, level, &i))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - } - } - if (lbno != NULLFSBLOCK) { - i = xfs_btree_firstrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - /* - * decrement to last in block - */ - if ((error = xfs_btree_decrement(tcur, level, &i))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - i = xfs_btree_firstrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - lbp = tcur->bc_bufs[level]; - left = XFS_BUF_TO_BMBT_BLOCK(lbp); -#ifdef DEBUG - if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } -#endif - bno = be64_to_cpu(left->bb_rightsib); - if (be16_to_cpu(left->bb_numrecs) - 1 >= - XFS_BMAP_BLOCK_IMINRECS(level, cur)) { - if ((error = xfs_btree_rshift(tcur, level, &i))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - if (i) { - ASSERT(be16_to_cpu(block->bb_numrecs) >= - XFS_BMAP_BLOCK_IMINRECS(level, tcur)); - xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); - tcur = NULL; - if (level == 0) - cur->bc_ptrs[0]++; - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; - } - } - lrecs = be16_to_cpu(left->bb_numrecs); - } - xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); - tcur = NULL; - mp = cur->bc_mp; - ASSERT(bno != NULLFSBLOCK); - if (lbno != NULLFSBLOCK && - lrecs + be16_to_cpu(block->bb_numrecs) <= XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { - rbno = bno; - right = block; - rbp = bp; - if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, lbno, 0, &lbp, - XFS_BMAP_BTREE_REF))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - left = XFS_BUF_TO_BMBT_BLOCK(lbp); - if ((error = xfs_btree_check_lblock(cur, left, level, lbp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - } else if (rbno != NULLFSBLOCK && - rrecs + be16_to_cpu(block->bb_numrecs) <= - XFS_BMAP_BLOCK_IMAXRECS(level, cur)) { - lbno = bno; - left = block; - lbp = bp; - if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, rbno, 0, &rbp, - XFS_BMAP_BTREE_REF))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - right = XFS_BUF_TO_BMBT_BLOCK(rbp); - if ((error = xfs_btree_check_lblock(cur, right, level, rbp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - lrecs = be16_to_cpu(left->bb_numrecs); - } else { - if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 1; - return 0; - } - numlrecs = be16_to_cpu(left->bb_numrecs); - numrrecs = be16_to_cpu(right->bb_numrecs); - if (level > 0) { - lkp = XFS_BMAP_KEY_IADDR(left, numlrecs + 1, cur); - lpp = XFS_BMAP_PTR_IADDR(left, numlrecs + 1, cur); - rkp = XFS_BMAP_KEY_IADDR(right, 1, cur); - rpp = XFS_BMAP_PTR_IADDR(right, 1, cur); -#ifdef DEBUG - for (i = 0; i < numrrecs; i++) { - if ((error = xfs_btree_check_lptr_disk(cur, rpp[i], level))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - } -#endif - memcpy(lkp, rkp, numrrecs * sizeof(*lkp)); - memcpy(lpp, rpp, numrrecs * sizeof(*lpp)); - xfs_bmbt_log_keys(cur, lbp, numlrecs + 1, numlrecs + numrrecs); - xfs_bmbt_log_ptrs(cur, lbp, numlrecs + 1, numlrecs + numrrecs); - } else { - lrp = XFS_BMAP_REC_IADDR(left, numlrecs + 1, cur); - rrp = XFS_BMAP_REC_IADDR(right, 1, cur); - memcpy(lrp, rrp, numrrecs * sizeof(*lrp)); - xfs_bmbt_log_recs(cur, lbp, numlrecs + 1, numlrecs + numrrecs); - } - be16_add_cpu(&left->bb_numrecs, numrrecs); - left->bb_rightsib = right->bb_rightsib; - xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS); - if (be64_to_cpu(left->bb_rightsib) != NULLDFSBNO) { - if ((error = xfs_btree_read_bufl(mp, cur->bc_tp, - be64_to_cpu(left->bb_rightsib), - 0, &rrbp, XFS_BMAP_BTREE_REF))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp); - if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - rrblock->bb_leftsib = cpu_to_be64(lbno); - xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB); - } - xfs_bmap_add_free(XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(rbp)), 1, - cur->bc_private.b.flist, mp); - cur->bc_private.b.ip->i_d.di_nblocks--; - xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, XFS_ILOG_CORE); - XFS_TRANS_MOD_DQUOT_BYINO(mp, cur->bc_tp, cur->bc_private.b.ip, - XFS_TRANS_DQ_BCOUNT, -1L); - xfs_trans_binval(cur->bc_tp, rbp); - if (bp != lbp) { - cur->bc_bufs[level] = lbp; - cur->bc_ptrs[level] += lrecs; - cur->bc_ra[level] = 0; - } else if ((error = xfs_btree_increment(cur, level + 1, &i))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - goto error0; - } - if (level > 0) - cur->bc_ptrs[level]--; - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = 2; - return 0; - -error0: - if (tcur) - xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); - return error; -} - -/* - * Log key values from the btree block. - */ -STATIC void -xfs_bmbt_log_keys( - xfs_btree_cur_t *cur, - xfs_buf_t *bp, - int kfirst, - int klast) -{ - xfs_trans_t *tp; - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - XFS_BMBT_TRACE_ARGBII(cur, bp, kfirst, klast); - tp = cur->bc_tp; - if (bp) { - xfs_bmbt_block_t *block; - int first; - xfs_bmbt_key_t *kp; - int last; - - block = XFS_BUF_TO_BMBT_BLOCK(bp); - kp = XFS_BMAP_KEY_DADDR(block, 1, cur); - first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block); - last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block); - xfs_trans_log_buf(tp, bp, first, last); - } else { - xfs_inode_t *ip; - - ip = cur->bc_private.b.ip; - xfs_trans_log_inode(tp, ip, - XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); - } - XFS_BMBT_TRACE_CURSOR(cur, EXIT); -} - -/* - * Log pointer values from the btree block. - */ -STATIC void -xfs_bmbt_log_ptrs( - xfs_btree_cur_t *cur, - xfs_buf_t *bp, - int pfirst, - int plast) -{ - xfs_trans_t *tp; - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - XFS_BMBT_TRACE_ARGBII(cur, bp, pfirst, plast); - tp = cur->bc_tp; - if (bp) { - xfs_bmbt_block_t *block; - int first; - int last; - xfs_bmbt_ptr_t *pp; - - block = XFS_BUF_TO_BMBT_BLOCK(bp); - pp = XFS_BMAP_PTR_DADDR(block, 1, cur); - first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block); - last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block); - xfs_trans_log_buf(tp, bp, first, last); - } else { - xfs_inode_t *ip; - - ip = cur->bc_private.b.ip; - xfs_trans_log_inode(tp, ip, - XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); - } - XFS_BMBT_TRACE_CURSOR(cur, EXIT); -} - /* * Determine the extent state. */ @@ -575,42 +120,6 @@ xfs_bmdr_to_bmbt( memcpy(tpp, fpp, sizeof(*fpp) * dmxr); } -/* - * Delete the record pointed to by cur. - */ -int /* error */ -xfs_bmbt_delete( - xfs_btree_cur_t *cur, - int *stat) /* success/failure */ -{ - int error; /* error return value */ - int i; - int level; - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - for (level = 0, i = 2; i == 2; level++) { - if ((error = xfs_bmbt_delrec(cur, level, &i))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - } - if (i == 0) { - for (level = 1; level < cur->bc_nlevels; level++) { - if (cur->bc_ptrs[level] == 0) { - if ((error = xfs_btree_decrement(cur, level, - &i))) { - XFS_BMBT_TRACE_CURSOR(cur, ERROR); - return error; - } - break; - } - } - } - XFS_BMBT_TRACE_CURSOR(cur, EXIT); - *stat = i; - return 0; -} - /* * Convert a compressed bmap extent record to an uncompressed form. * This code must be in sync with the routines xfs_bmbt_get_startoff, @@ -664,31 +173,6 @@ xfs_bmbt_get_all( __xfs_bmbt_get_all(r->l0, r->l1, s); } -/* - * Get the block pointer for the given level of the cursor. - * Fill in the buffer pointer, if applicable. - */ -xfs_bmbt_block_t * -xfs_bmbt_get_block( - xfs_btree_cur_t *cur, - int level, - xfs_buf_t **bpp) -{ - xfs_ifork_t *ifp; - xfs_bmbt_block_t *rval; - - if (level < cur->bc_nlevels - 1) { - *bpp = cur->bc_bufs[level]; - rval = XFS_BUF_TO_BMBT_BLOCK(*bpp); - } else { - *bpp = NULL; - ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, - cur->bc_private.b.whichfork); - rval = ifp->if_broot; - } - return rval; -} - /* * Extract the blockcount field from an in memory bmap extent record. */ @@ -1225,6 +709,14 @@ xfs_bmbt_free_block( return 0; } +STATIC int +xfs_bmbt_get_minrecs( + struct xfs_btree_cur *cur, + int level) +{ + return XFS_BMAP_BLOCK_IMINRECS(level, cur); +} + STATIC int xfs_bmbt_get_maxrecs( struct xfs_btree_cur *cur, @@ -1389,6 +881,7 @@ static const struct xfs_btree_ops xfs_bmbt_ops = { .alloc_block = xfs_bmbt_alloc_block, .free_block = xfs_bmbt_free_block, .get_maxrecs = xfs_bmbt_get_maxrecs, + .get_minrecs = xfs_bmbt_get_minrecs, .get_dmaxrecs = xfs_bmbt_get_dmaxrecs, .init_key_from_rec = xfs_bmbt_init_key_from_rec, .init_rec_from_key = xfs_bmbt_init_rec_from_key, diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 703fe2e3434..952ab395f79 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -237,10 +237,7 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; * Prototypes for xfs_bmap.c to call. */ extern void xfs_bmdr_to_bmbt(xfs_bmdr_block_t *, int, xfs_bmbt_block_t *, int); -extern int xfs_bmbt_delete(struct xfs_btree_cur *, int *); extern void xfs_bmbt_get_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s); -extern xfs_bmbt_block_t *xfs_bmbt_get_block(struct xfs_btree_cur *cur, - int, struct xfs_buf **bpp); extern xfs_filblks_t xfs_bmbt_get_blockcount(xfs_bmbt_rec_host_t *r); extern xfs_fsblock_t xfs_bmbt_get_startblock(xfs_bmbt_rec_host_t *r); extern xfs_fileoff_t xfs_bmbt_get_startoff(xfs_bmbt_rec_host_t *r); diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 75a8a7b00df..28cc7681834 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -3171,3 +3171,596 @@ out0: XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); return 0; } + +STATIC int +xfs_btree_dec_cursor( + struct xfs_btree_cur *cur, + int level, + int *stat) +{ + int error; + int i; + + if (level > 0) { + error = xfs_btree_decrement(cur, level, &i); + if (error) + return error; + } + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 1; + return 0; +} + +/* + * Single level of the btree record deletion routine. + * Delete record pointed to by cur/level. + * Remove the record from its block then rebalance the tree. + * Return 0 for error, 1 for done, 2 to go on to the next level. + */ +STATIC int /* error */ +xfs_btree_delrec( + struct xfs_btree_cur *cur, /* btree cursor */ + int level, /* level removing record from */ + int *stat) /* fail/done/go-on */ +{ + struct xfs_btree_block *block; /* btree block */ + union xfs_btree_ptr cptr; /* current block ptr */ + struct xfs_buf *bp; /* buffer for block */ + int error; /* error return value */ + int i; /* loop counter */ + union xfs_btree_key key; /* storage for keyp */ + union xfs_btree_key *keyp = &key; /* passed to the next level */ + union xfs_btree_ptr lptr; /* left sibling block ptr */ + struct xfs_buf *lbp; /* left buffer pointer */ + struct xfs_btree_block *left; /* left btree block */ + int lrecs = 0; /* left record count */ + int ptr; /* key/record index */ + union xfs_btree_ptr rptr; /* right sibling block ptr */ + struct xfs_buf *rbp; /* right buffer pointer */ + struct xfs_btree_block *right; /* right btree block */ + struct xfs_btree_block *rrblock; /* right-right btree block */ + struct xfs_buf *rrbp; /* right-right buffer pointer */ + int rrecs = 0; /* right record count */ + struct xfs_btree_cur *tcur; /* temporary btree cursor */ + int numrecs; /* temporary numrec count */ + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_TRACE_ARGI(cur, level); + + tcur = NULL; + + /* Get the index of the entry being deleted, check for nothing there. */ + ptr = cur->bc_ptrs[level]; + if (ptr == 0) { + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 0; + return 0; + } + + /* Get the buffer & block containing the record or key/ptr. */ + block = xfs_btree_get_block(cur, level, &bp); + numrecs = xfs_btree_get_numrecs(block); + +#ifdef DEBUG + error = xfs_btree_check_block(cur, block, level, bp); + if (error) + goto error0; +#endif + + /* Fail if we're off the end of the block. */ + if (ptr > numrecs) { + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 0; + return 0; + } + + XFS_BTREE_STATS_INC(cur, delrec); + XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr); + + /* Excise the entries being deleted. */ + if (level > 0) { + /* It's a nonleaf. operate on keys and ptrs */ + union xfs_btree_key *lkp; + union xfs_btree_ptr *lpp; + + lkp = xfs_btree_key_addr(cur, ptr + 1, block); + lpp = xfs_btree_ptr_addr(cur, ptr + 1, block); + +#ifdef DEBUG + for (i = 0; i < numrecs - ptr; i++) { + error = xfs_btree_check_ptr(cur, lpp, i, level); + if (error) + goto error0; + } +#endif + + if (ptr < numrecs) { + xfs_btree_shift_keys(cur, lkp, -1, numrecs - ptr); + xfs_btree_shift_ptrs(cur, lpp, -1, numrecs - ptr); + xfs_btree_log_keys(cur, bp, ptr, numrecs - 1); + xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1); + } + + /* + * If it's the first record in the block, we'll need to pass a + * key up to the next level (updkey). + */ + if (ptr == 1) + keyp = xfs_btree_key_addr(cur, 1, block); + } else { + /* It's a leaf. operate on records */ + if (ptr < numrecs) { + xfs_btree_shift_recs(cur, + xfs_btree_rec_addr(cur, ptr + 1, block), + -1, numrecs - ptr); + xfs_btree_log_recs(cur, bp, ptr, numrecs - 1); + } + + /* + * If it's the first record in the block, we'll need a key + * structure to pass up to the next level (updkey). + */ + if (ptr == 1) { + cur->bc_ops->init_key_from_rec(&key, + xfs_btree_rec_addr(cur, 1, block)); + keyp = &key; + } + } + + /* + * Decrement and log the number of entries in the block. + */ + xfs_btree_set_numrecs(block, --numrecs); + xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); + + /* + * If we are tracking the last record in the tree and + * we are at the far right edge of the tree, update it. + */ + if (xfs_btree_is_lastrec(cur, block, level)) { + cur->bc_ops->update_lastrec(cur, block, NULL, + ptr, LASTREC_DELREC); + } + + /* + * We're at the root level. First, shrink the root block in-memory. + * Try to get rid of the next level down. If we can't then there's + * nothing left to do. + */ + if (level == cur->bc_nlevels - 1) { + if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) { + xfs_iroot_realloc(cur->bc_private.b.ip, -1, + cur->bc_private.b.whichfork); + + error = xfs_btree_kill_iroot(cur); + if (error) + goto error0; + + error = xfs_btree_dec_cursor(cur, level, stat); + if (error) + goto error0; + *stat = 1; + return 0; + } + + /* + * If this is the root level, and there's only one entry left, + * and it's NOT the leaf level, then we can get rid of this + * level. + */ + if (numrecs == 1 && level > 0) { + union xfs_btree_ptr *pp; + /* + * pp is still set to the first pointer in the block. + * Make it the new root of the btree. + */ + pp = xfs_btree_ptr_addr(cur, 1, block); + error = cur->bc_ops->kill_root(cur, bp, level, pp); + if (error) + goto error0; + } else if (level > 0) { + error = xfs_btree_dec_cursor(cur, level, stat); + if (error) + goto error0; + } + *stat = 1; + return 0; + } + + /* + * If we deleted the leftmost entry in the block, update the + * key values above us in the tree. + */ + if (ptr == 1) { + error = xfs_btree_updkey(cur, keyp, level + 1); + if (error) + goto error0; + } + + /* + * If the number of records remaining in the block is at least + * the minimum, we're done. + */ + if (numrecs >= cur->bc_ops->get_minrecs(cur, level)) { + error = xfs_btree_dec_cursor(cur, level, stat); + if (error) + goto error0; + return 0; + } + + /* + * Otherwise, we have to move some records around to keep the + * tree balanced. Look at the left and right sibling blocks to + * see if we can re-balance by moving only one record. + */ + xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); + xfs_btree_get_sibling(cur, block, &lptr, XFS_BB_LEFTSIB); + + if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) { + /* + * One child of root, need to get a chance to copy its contents + * into the root and delete it. Can't go up to next level, + * there's nothing to delete there. + */ + if (xfs_btree_ptr_is_null(cur, &rptr) && + xfs_btree_ptr_is_null(cur, &lptr) && + level == cur->bc_nlevels - 2) { + error = xfs_btree_kill_iroot(cur); + if (!error) + error = xfs_btree_dec_cursor(cur, level, stat); + if (error) + goto error0; + return 0; + } + } + + ASSERT(!xfs_btree_ptr_is_null(cur, &rptr) || + !xfs_btree_ptr_is_null(cur, &lptr)); + + /* + * Duplicate the cursor so our btree manipulations here won't + * disrupt the next level up. + */ + error = xfs_btree_dup_cursor(cur, &tcur); + if (error) + goto error0; + + /* + * If there's a right sibling, see if it's ok to shift an entry + * out of it. + */ + if (!xfs_btree_ptr_is_null(cur, &rptr)) { + /* + * Move the temp cursor to the last entry in the next block. + * Actually any entry but the first would suffice. + */ + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + + error = xfs_btree_increment(tcur, level, &i); + if (error) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + + i = xfs_btree_lastrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + + /* Grab a pointer to the block. */ + right = xfs_btree_get_block(tcur, level, &rbp); +#ifdef DEBUG + error = xfs_btree_check_block(tcur, right, level, rbp); + if (error) + goto error0; +#endif + /* Grab the current block number, for future use. */ + xfs_btree_get_sibling(tcur, right, &cptr, XFS_BB_LEFTSIB); + + /* + * If right block is full enough so that removing one entry + * won't make it too empty, and left-shifting an entry out + * of right to us works, we're done. + */ + if (xfs_btree_get_numrecs(right) - 1 >= + cur->bc_ops->get_minrecs(tcur, level)) { + error = xfs_btree_lshift(tcur, level, &i); + if (error) + goto error0; + if (i) { + ASSERT(xfs_btree_get_numrecs(block) >= + cur->bc_ops->get_minrecs(tcur, level)); + + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + tcur = NULL; + + error = xfs_btree_dec_cursor(cur, level, stat); + if (error) + goto error0; + return 0; + } + } + + /* + * Otherwise, grab the number of records in right for + * future reference, and fix up the temp cursor to point + * to our block again (last record). + */ + rrecs = xfs_btree_get_numrecs(right); + if (!xfs_btree_ptr_is_null(cur, &lptr)) { + i = xfs_btree_firstrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + + error = xfs_btree_decrement(tcur, level, &i); + if (error) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + } + } + + /* + * If there's a left sibling, see if it's ok to shift an entry + * out of it. + */ + if (!xfs_btree_ptr_is_null(cur, &lptr)) { + /* + * Move the temp cursor to the first entry in the + * previous block. + */ + i = xfs_btree_firstrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + + error = xfs_btree_decrement(tcur, level, &i); + if (error) + goto error0; + i = xfs_btree_firstrec(tcur, level); + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + + /* Grab a pointer to the block. */ + left = xfs_btree_get_block(tcur, level, &lbp); +#ifdef DEBUG + error = xfs_btree_check_block(cur, left, level, lbp); + if (error) + goto error0; +#endif + /* Grab the current block number, for future use. */ + xfs_btree_get_sibling(tcur, left, &cptr, XFS_BB_RIGHTSIB); + + /* + * If left block is full enough so that removing one entry + * won't make it too empty, and right-shifting an entry out + * of left to us works, we're done. + */ + if (xfs_btree_get_numrecs(left) - 1 >= + cur->bc_ops->get_minrecs(tcur, level)) { + error = xfs_btree_rshift(tcur, level, &i); + if (error) + goto error0; + if (i) { + ASSERT(xfs_btree_get_numrecs(block) >= + cur->bc_ops->get_minrecs(tcur, level)); + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + tcur = NULL; + if (level == 0) + cur->bc_ptrs[0]++; + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = 1; + return 0; + } + } + + /* + * Otherwise, grab the number of records in right for + * future reference. + */ + lrecs = xfs_btree_get_numrecs(left); + } + + /* Delete the temp cursor, we're done with it. */ + xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); + tcur = NULL; + + /* If here, we need to do a join to keep the tree balanced. */ + ASSERT(!xfs_btree_ptr_is_null(cur, &cptr)); + + if (!xfs_btree_ptr_is_null(cur, &lptr) && + lrecs + xfs_btree_get_numrecs(block) <= + cur->bc_ops->get_maxrecs(cur, level)) { + /* + * Set "right" to be the starting block, + * "left" to be the left neighbor. + */ + rptr = cptr; + right = block; + rbp = bp; + error = xfs_btree_read_buf_block(cur, &lptr, level, + 0, &left, &lbp); + if (error) + goto error0; + + /* + * If that won't work, see if we can join with the right neighbor block. + */ + } else if (!xfs_btree_ptr_is_null(cur, &rptr) && + rrecs + xfs_btree_get_numrecs(block) <= + cur->bc_ops->get_maxrecs(cur, level)) { + /* + * Set "left" to be the starting block, + * "right" to be the right neighbor. + */ + lptr = cptr; + left = block; + lbp = bp; + error = xfs_btree_read_buf_block(cur, &rptr, level, + 0, &right, &rbp); + if (error) + goto error0; + + /* + * Otherwise, we can't fix the imbalance. + * Just return. This is probably a logic error, but it's not fatal. + */ + } else { + error = xfs_btree_dec_cursor(cur, level, stat); + if (error) + goto error0; + return 0; + } + + rrecs = xfs_btree_get_numrecs(right); + lrecs = xfs_btree_get_numrecs(left); + + /* + * We're now going to join "left" and "right" by moving all the stuff + * in "right" to "left" and deleting "right". + */ + XFS_BTREE_STATS_ADD(cur, moves, rrecs); + if (level > 0) { + /* It's a non-leaf. Move keys and pointers. */ + union xfs_btree_key *lkp; /* left btree key */ + union xfs_btree_ptr *lpp; /* left address pointer */ + union xfs_btree_key *rkp; /* right btree key */ + union xfs_btree_ptr *rpp; /* right address pointer */ + + lkp = xfs_btree_key_addr(cur, lrecs + 1, left); + lpp = xfs_btree_ptr_addr(cur, lrecs + 1, left); + rkp = xfs_btree_key_addr(cur, 1, right); + rpp = xfs_btree_ptr_addr(cur, 1, right); +#ifdef DEBUG + for (i = 1; i < rrecs; i++) { + error = xfs_btree_check_ptr(cur, rpp, i, level); + if (error) + goto error0; + } +#endif + xfs_btree_copy_keys(cur, lkp, rkp, rrecs); + xfs_btree_copy_ptrs(cur, lpp, rpp, rrecs); + + xfs_btree_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs); + xfs_btree_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs); + } else { + /* It's a leaf. Move records. */ + union xfs_btree_rec *lrp; /* left record pointer */ + union xfs_btree_rec *rrp; /* right record pointer */ + + lrp = xfs_btree_rec_addr(cur, lrecs + 1, left); + rrp = xfs_btree_rec_addr(cur, 1, right); + + xfs_btree_copy_recs(cur, lrp, rrp, rrecs); + xfs_btree_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs); + } + + XFS_BTREE_STATS_INC(cur, join); + + /* + * Fix up the the number of records and right block pointer in the + * surviving block, and log it. + */ + xfs_btree_set_numrecs(left, lrecs + rrecs); + xfs_btree_get_sibling(cur, right, &cptr, XFS_BB_RIGHTSIB), + xfs_btree_set_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB); + xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); + + /* If there is a right sibling, point it to the remaining block. */ + xfs_btree_get_sibling(cur, left, &cptr, XFS_BB_RIGHTSIB); + if (!xfs_btree_ptr_is_null(cur, &cptr)) { + error = xfs_btree_read_buf_block(cur, &cptr, level, + 0, &rrblock, &rrbp); + if (error) + goto error0; + xfs_btree_set_sibling(cur, rrblock, &lptr, XFS_BB_LEFTSIB); + xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB); + } + + /* Free the deleted block. */ + error = cur->bc_ops->free_block(cur, rbp); + if (error) + goto error0; + XFS_BTREE_STATS_INC(cur, free); + + /* + * If we joined with the left neighbor, set the buffer in the + * cursor to the left block, and fix up the index. + */ + if (bp != lbp) { + cur->bc_bufs[level] = lbp; + cur->bc_ptrs[level] += lrecs; + cur->bc_ra[level] = 0; + } + /* + * If we joined with the right neighbor and there's a level above + * us, increment the cursor at that level. + */ + else if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) || + (level + 1 < cur->bc_nlevels)) { + error = xfs_btree_increment(cur, level + 1, &i); + if (error) + goto error0; + } + + /* + * Readjust the ptr at this level if it's not a leaf, since it's + * still pointing at the deletion point, which makes the cursor + * inconsistent. If this makes the ptr 0, the caller fixes it up. + * We can't use decrement because it would change the next level up. + */ + if (level > 0) + cur->bc_ptrs[level]--; + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + /* Return value means the next level up has something to do. */ + *stat = 2; + return 0; + +error0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + if (tcur) + xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); + return error; +} + +/* + * Delete the record pointed to by cur. + * The cursor refers to the place where the record was (could be inserted) + * when the operation returns. + */ +int /* error */ +xfs_btree_delete( + struct xfs_btree_cur *cur, + int *stat) /* success/failure */ +{ + int error; /* error return value */ + int level; + int i; + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + + /* + * Go up the tree, starting at leaf level. + * + * If 2 is returned then a join was done; go to the next level. + * Otherwise we are done. + */ + for (level = 0, i = 2; i == 2; level++) { + error = xfs_btree_delrec(cur, level, &i); + if (error) + goto error0; + } + + if (i == 0) { + for (level = 1; level < cur->bc_nlevels; level++) { + if (cur->bc_ptrs[level] == 0) { + error = xfs_btree_decrement(cur, level, &i); + if (error) + goto error0; + break; + } + } + } + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + *stat = i; + return 0; +error0: + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; +} diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index ff2552febba..06ef792e0aa 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -192,6 +192,8 @@ struct xfs_btree_ops { /* update btree root pointer */ void (*set_root)(struct xfs_btree_cur *cur, union xfs_btree_ptr *nptr, int level_change); + int (*kill_root)(struct xfs_btree_cur *cur, struct xfs_buf *bp, + int level, union xfs_btree_ptr *newroot); /* block allocation / freeing */ int (*alloc_block)(struct xfs_btree_cur *cur, @@ -207,6 +209,7 @@ struct xfs_btree_ops { int ptr, int reason); /* records in block/level */ + int (*get_minrecs)(struct xfs_btree_cur *cur, int level); int (*get_maxrecs)(struct xfs_btree_cur *cur, int level); /* records on disk. Matter for the root in inode case. */ @@ -251,6 +254,7 @@ struct xfs_btree_ops { */ #define LASTREC_UPDATE 0 #define LASTREC_INSREC 1 +#define LASTREC_DELREC 2 /* @@ -562,6 +566,7 @@ int xfs_btree_new_root(struct xfs_btree_cur *, int *); int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *); int xfs_btree_kill_iroot(struct xfs_btree_cur *); int xfs_btree_insert(struct xfs_btree_cur *, int *); +int xfs_btree_delete(struct xfs_btree_cur *, int *); /* * Helpers. diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index b68e73bb17c..f13f59b13cc 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -1168,8 +1168,8 @@ xfs_difree( xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1)); - if ((error = xfs_inobt_delete(cur, &i))) { - cmn_err(CE_WARN, "xfs_difree: xfs_inobt_delete returned an error %d on %s.\n", + if ((error = xfs_btree_delete(cur, &i))) { + cmn_err(CE_WARN, "xfs_difree: xfs_btree_delete returned an error %d on %s.\n", error, mp->m_fsname); goto error0; } diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 90f1d4ee772..6c0a07d1fed 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -40,611 +40,6 @@ #include "xfs_alloc.h" #include "xfs_error.h" -STATIC void xfs_inobt_log_block(xfs_trans_t *, xfs_buf_t *, int); -STATIC void xfs_inobt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); -STATIC void xfs_inobt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); -STATIC void xfs_inobt_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); - -/* - * Single level of the xfs_inobt_delete record deletion routine. - * Delete record pointed to by cur/level. - * Remove the record from its block then rebalance the tree. - * Return 0 for error, 1 for done, 2 to go on to the next level. - */ -STATIC int /* error */ -xfs_inobt_delrec( - xfs_btree_cur_t *cur, /* btree cursor */ - int level, /* level removing record from */ - int *stat) /* fail/done/go-on */ -{ - xfs_buf_t *agbp; /* buffer for a.g. inode header */ - xfs_mount_t *mp; /* mount structure */ - xfs_agi_t *agi; /* allocation group inode header */ - xfs_inobt_block_t *block; /* btree block record/key lives in */ - xfs_agblock_t bno; /* btree block number */ - xfs_buf_t *bp; /* buffer for block */ - int error; /* error return value */ - int i; /* loop index */ - xfs_inobt_key_t key; /* kp points here if block is level 0 */ - xfs_inobt_key_t *kp = NULL; /* pointer to btree keys */ - xfs_agblock_t lbno; /* left block's block number */ - xfs_buf_t *lbp; /* left block's buffer pointer */ - xfs_inobt_block_t *left; /* left btree block */ - xfs_inobt_key_t *lkp; /* left block key pointer */ - xfs_inobt_ptr_t *lpp; /* left block address pointer */ - int lrecs = 0; /* number of records in left block */ - xfs_inobt_rec_t *lrp; /* left block record pointer */ - xfs_inobt_ptr_t *pp = NULL; /* pointer to btree addresses */ - int ptr; /* index in btree block for this rec */ - xfs_agblock_t rbno; /* right block's block number */ - xfs_buf_t *rbp; /* right block's buffer pointer */ - xfs_inobt_block_t *right; /* right btree block */ - xfs_inobt_key_t *rkp; /* right block key pointer */ - xfs_inobt_rec_t *rp; /* pointer to btree records */ - xfs_inobt_ptr_t *rpp; /* right block address pointer */ - int rrecs = 0; /* number of records in right block */ - int numrecs; - xfs_inobt_rec_t *rrp; /* right block record pointer */ - xfs_btree_cur_t *tcur; /* temporary btree cursor */ - - mp = cur->bc_mp; - - /* - * Get the index of the entry being deleted, check for nothing there. - */ - ptr = cur->bc_ptrs[level]; - if (ptr == 0) { - *stat = 0; - return 0; - } - - /* - * Get the buffer & block containing the record or key/ptr. - */ - bp = cur->bc_bufs[level]; - block = XFS_BUF_TO_INOBT_BLOCK(bp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, level, bp))) - return error; -#endif - /* - * Fail if we're off the end of the block. - */ - - numrecs = be16_to_cpu(block->bb_numrecs); - if (ptr > numrecs) { - *stat = 0; - return 0; - } - /* - * It's a nonleaf. Excise the key and ptr being deleted, by - * sliding the entries past them down one. - * Log the changed areas of the block. - */ - if (level > 0) { - kp = XFS_INOBT_KEY_ADDR(block, 1, cur); - pp = XFS_INOBT_PTR_ADDR(block, 1, cur); -#ifdef DEBUG - for (i = ptr; i < numrecs; i++) { - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i]), level))) - return error; - } -#endif - if (ptr < numrecs) { - memmove(&kp[ptr - 1], &kp[ptr], - (numrecs - ptr) * sizeof(*kp)); - memmove(&pp[ptr - 1], &pp[ptr], - (numrecs - ptr) * sizeof(*kp)); - xfs_inobt_log_keys(cur, bp, ptr, numrecs - 1); - xfs_inobt_log_ptrs(cur, bp, ptr, numrecs - 1); - } - } - /* - * It's a leaf. Excise the record being deleted, by sliding the - * entries past it down one. Log the changed areas of the block. - */ - else { - rp = XFS_INOBT_REC_ADDR(block, 1, cur); - if (ptr < numrecs) { - memmove(&rp[ptr - 1], &rp[ptr], - (numrecs - ptr) * sizeof(*rp)); - xfs_inobt_log_recs(cur, bp, ptr, numrecs - 1); - } - /* - * If it's the first record in the block, we'll need a key - * structure to pass up to the next level (updkey). - */ - if (ptr == 1) { - key.ir_startino = rp->ir_startino; - kp = &key; - } - } - /* - * Decrement and log the number of entries in the block. - */ - numrecs--; - block->bb_numrecs = cpu_to_be16(numrecs); - xfs_inobt_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS); - /* - * Is this the root level? If so, we're almost done. - */ - if (level == cur->bc_nlevels - 1) { - /* - * If this is the root level, - * and there's only one entry left, - * and it's NOT the leaf level, - * then we can get rid of this level. - */ - if (numrecs == 1 && level > 0) { - agbp = cur->bc_private.a.agbp; - agi = XFS_BUF_TO_AGI(agbp); - /* - * pp is still set to the first pointer in the block. - * Make it the new root of the btree. - */ - bno = be32_to_cpu(agi->agi_root); - agi->agi_root = *pp; - be32_add_cpu(&agi->agi_level, -1); - /* - * Free the block. - */ - if ((error = xfs_free_extent(cur->bc_tp, - XFS_AGB_TO_FSB(mp, cur->bc_private.a.agno, bno), 1))) - return error; - xfs_trans_binval(cur->bc_tp, bp); - xfs_ialloc_log_agi(cur->bc_tp, agbp, - XFS_AGI_ROOT | XFS_AGI_LEVEL); - /* - * Update the cursor so there's one fewer level. - */ - cur->bc_bufs[level] = NULL; - cur->bc_nlevels--; - } else if (level > 0 && - (error = xfs_btree_decrement(cur, level, &i))) - return error; - *stat = 1; - return 0; - } - /* - * If we deleted the leftmost entry in the block, update the - * key values above us in the tree. - */ - if (ptr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)kp, level + 1))) - return error; - /* - * If the number of records remaining in the block is at least - * the minimum, we're done. - */ - if (numrecs >= XFS_INOBT_BLOCK_MINRECS(level, cur)) { - if (level > 0 && - (error = xfs_btree_decrement(cur, level, &i))) - return error; - *stat = 1; - return 0; - } - /* - * Otherwise, we have to move some records around to keep the - * tree balanced. Look at the left and right sibling blocks to - * see if we can re-balance by moving only one record. - */ - rbno = be32_to_cpu(block->bb_rightsib); - lbno = be32_to_cpu(block->bb_leftsib); - bno = NULLAGBLOCK; - ASSERT(rbno != NULLAGBLOCK || lbno != NULLAGBLOCK); - /* - * Duplicate the cursor so our btree manipulations here won't - * disrupt the next level up. - */ - if ((error = xfs_btree_dup_cursor(cur, &tcur))) - return error; - /* - * If there's a right sibling, see if it's ok to shift an entry - * out of it. - */ - if (rbno != NULLAGBLOCK) { - /* - * Move the temp cursor to the last entry in the next block. - * Actually any entry but the first would suffice. - */ - i = xfs_btree_lastrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - if ((error = xfs_btree_increment(tcur, level, &i))) - goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - i = xfs_btree_lastrec(tcur, level); - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - /* - * Grab a pointer to the block. - */ - rbp = tcur->bc_bufs[level]; - right = XFS_BUF_TO_INOBT_BLOCK(rbp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) - goto error0; -#endif - /* - * Grab the current block number, for future use. - */ - bno = be32_to_cpu(right->bb_leftsib); - /* - * If right block is full enough so that removing one entry - * won't make it too empty, and left-shifting an entry out - * of right to us works, we're done. - */ - if (be16_to_cpu(right->bb_numrecs) - 1 >= - XFS_INOBT_BLOCK_MINRECS(level, cur)) { - if ((error = xfs_btree_lshift(tcur, level, &i))) - goto error0; - if (i) { - ASSERT(be16_to_cpu(block->bb_numrecs) >= - XFS_INOBT_BLOCK_MINRECS(level, cur)); - xfs_btree_del_cursor(tcur, - XFS_BTREE_NOERROR); - if (level > 0 && - (error = xfs_btree_decrement(cur, level, - &i))) - return error; - *stat = 1; - return 0; - } - } - /* - * Otherwise, grab the number of records in right for - * future reference, and fix up the temp cursor to point - * to our block again (last record). - */ - rrecs = be16_to_cpu(right->bb_numrecs); - if (lbno != NULLAGBLOCK) { - xfs_btree_firstrec(tcur, level); - if ((error = xfs_btree_decrement(tcur, level, &i))) - goto error0; - } - } - /* - * If there's a left sibling, see if it's ok to shift an entry - * out of it. - */ - if (lbno != NULLAGBLOCK) { - /* - * Move the temp cursor to the first entry in the - * previous block. - */ - xfs_btree_firstrec(tcur, level); - if ((error = xfs_btree_decrement(tcur, level, &i))) - goto error0; - xfs_btree_firstrec(tcur, level); - /* - * Grab a pointer to the block. - */ - lbp = tcur->bc_bufs[level]; - left = XFS_BUF_TO_INOBT_BLOCK(lbp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) - goto error0; -#endif - /* - * Grab the current block number, for future use. - */ - bno = be32_to_cpu(left->bb_rightsib); - /* - * If left block is full enough so that removing one entry - * won't make it too empty, and right-shifting an entry out - * of left to us works, we're done. - */ - if (be16_to_cpu(left->bb_numrecs) - 1 >= - XFS_INOBT_BLOCK_MINRECS(level, cur)) { - if ((error = xfs_btree_rshift(tcur, level, &i))) - goto error0; - if (i) { - ASSERT(be16_to_cpu(block->bb_numrecs) >= - XFS_INOBT_BLOCK_MINRECS(level, cur)); - xfs_btree_del_cursor(tcur, - XFS_BTREE_NOERROR); - if (level == 0) - cur->bc_ptrs[0]++; - *stat = 1; - return 0; - } - } - /* - * Otherwise, grab the number of records in right for - * future reference. - */ - lrecs = be16_to_cpu(left->bb_numrecs); - } - /* - * Delete the temp cursor, we're done with it. - */ - xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); - /* - * If here, we need to do a join to keep the tree balanced. - */ - ASSERT(bno != NULLAGBLOCK); - /* - * See if we can join with the left neighbor block. - */ - if (lbno != NULLAGBLOCK && - lrecs + numrecs <= XFS_INOBT_BLOCK_MAXRECS(level, cur)) { - /* - * Set "right" to be the starting block, - * "left" to be the left neighbor. - */ - rbno = bno; - right = block; - rrecs = be16_to_cpu(right->bb_numrecs); - rbp = bp; - if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, - cur->bc_private.a.agno, lbno, 0, &lbp, - XFS_INO_BTREE_REF))) - return error; - left = XFS_BUF_TO_INOBT_BLOCK(lbp); - lrecs = be16_to_cpu(left->bb_numrecs); - if ((error = xfs_btree_check_sblock(cur, left, level, lbp))) - return error; - } - /* - * If that won't work, see if we can join with the right neighbor block. - */ - else if (rbno != NULLAGBLOCK && - rrecs + numrecs <= XFS_INOBT_BLOCK_MAXRECS(level, cur)) { - /* - * Set "left" to be the starting block, - * "right" to be the right neighbor. - */ - lbno = bno; - left = block; - lrecs = be16_to_cpu(left->bb_numrecs); - lbp = bp; - if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, - cur->bc_private.a.agno, rbno, 0, &rbp, - XFS_INO_BTREE_REF))) - return error; - right = XFS_BUF_TO_INOBT_BLOCK(rbp); - rrecs = be16_to_cpu(right->bb_numrecs); - if ((error = xfs_btree_check_sblock(cur, right, level, rbp))) - return error; - } - /* - * Otherwise, we can't fix the imbalance. - * Just return. This is probably a logic error, but it's not fatal. - */ - else { - if (level > 0 && (error = xfs_btree_decrement(cur, level, &i))) - return error; - *stat = 1; - return 0; - } - /* - * We're now going to join "left" and "right" by moving all the stuff - * in "right" to "left" and deleting "right". - */ - if (level > 0) { - /* - * It's a non-leaf. Move keys and pointers. - */ - lkp = XFS_INOBT_KEY_ADDR(left, lrecs + 1, cur); - lpp = XFS_INOBT_PTR_ADDR(left, lrecs + 1, cur); - rkp = XFS_INOBT_KEY_ADDR(right, 1, cur); - rpp = XFS_INOBT_PTR_ADDR(right, 1, cur); -#ifdef DEBUG - for (i = 0; i < rrecs; i++) { - if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(rpp[i]), level))) - return error; - } -#endif - memcpy(lkp, rkp, rrecs * sizeof(*lkp)); - memcpy(lpp, rpp, rrecs * sizeof(*lpp)); - xfs_inobt_log_keys(cur, lbp, lrecs + 1, lrecs + rrecs); - xfs_inobt_log_ptrs(cur, lbp, lrecs + 1, lrecs + rrecs); - } else { - /* - * It's a leaf. Move records. - */ - lrp = XFS_INOBT_REC_ADDR(left, lrecs + 1, cur); - rrp = XFS_INOBT_REC_ADDR(right, 1, cur); - memcpy(lrp, rrp, rrecs * sizeof(*lrp)); - xfs_inobt_log_recs(cur, lbp, lrecs + 1, lrecs + rrecs); - } - /* - * If we joined with the left neighbor, set the buffer in the - * cursor to the left block, and fix up the index. - */ - if (bp != lbp) { - xfs_btree_setbuf(cur, level, lbp); - cur->bc_ptrs[level] += lrecs; - } - /* - * If we joined with the right neighbor and there's a level above - * us, increment the cursor at that level. - */ - else if (level + 1 < cur->bc_nlevels && - (error = xfs_btree_increment(cur, level + 1, &i))) - return error; - /* - * Fix up the number of records in the surviving block. - */ - lrecs += rrecs; - left->bb_numrecs = cpu_to_be16(lrecs); - /* - * Fix up the right block pointer in the surviving block, and log it. - */ - left->bb_rightsib = right->bb_rightsib; - xfs_inobt_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB); - /* - * If there is a right sibling now, make it point to the - * remaining block. - */ - if (be32_to_cpu(left->bb_rightsib) != NULLAGBLOCK) { - xfs_inobt_block_t *rrblock; - xfs_buf_t *rrbp; - - if ((error = xfs_btree_read_bufs(mp, cur->bc_tp, - cur->bc_private.a.agno, be32_to_cpu(left->bb_rightsib), 0, - &rrbp, XFS_INO_BTREE_REF))) - return error; - rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp); - if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp))) - return error; - rrblock->bb_leftsib = cpu_to_be32(lbno); - xfs_inobt_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB); - } - /* - * Free the deleting block. - */ - if ((error = xfs_free_extent(cur->bc_tp, XFS_AGB_TO_FSB(mp, - cur->bc_private.a.agno, rbno), 1))) - return error; - xfs_trans_binval(cur->bc_tp, rbp); - /* - * Readjust the ptr at this level if it's not a leaf, since it's - * still pointing at the deletion point, which makes the cursor - * inconsistent. If this makes the ptr 0, the caller fixes it up. - * We can't use decrement because it would change the next level up. - */ - if (level > 0) - cur->bc_ptrs[level]--; - /* - * Return value means the next level up has something to do. - */ - *stat = 2; - return 0; - -error0: - xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); - return error; -} - -/* - * Log header fields from a btree block. - */ -STATIC void -xfs_inobt_log_block( - xfs_trans_t *tp, /* transaction pointer */ - xfs_buf_t *bp, /* buffer containing btree block */ - int fields) /* mask of fields: XFS_BB_... */ -{ - int first; /* first byte offset logged */ - int last; /* last byte offset logged */ - static const short offsets[] = { /* table of offsets */ - offsetof(xfs_inobt_block_t, bb_magic), - offsetof(xfs_inobt_block_t, bb_level), - offsetof(xfs_inobt_block_t, bb_numrecs), - offsetof(xfs_inobt_block_t, bb_leftsib), - offsetof(xfs_inobt_block_t, bb_rightsib), - sizeof(xfs_inobt_block_t) - }; - - xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, &last); - xfs_trans_log_buf(tp, bp, first, last); -} - -/* - * Log keys from a btree block (nonleaf). - */ -STATIC void -xfs_inobt_log_keys( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_buf_t *bp, /* buffer containing btree block */ - int kfirst, /* index of first key to log */ - int klast) /* index of last key to log */ -{ - xfs_inobt_block_t *block; /* btree block to log from */ - int first; /* first byte offset logged */ - xfs_inobt_key_t *kp; /* key pointer in btree block */ - int last; /* last byte offset logged */ - - block = XFS_BUF_TO_INOBT_BLOCK(bp); - kp = XFS_INOBT_KEY_ADDR(block, 1, cur); - first = (int)((xfs_caddr_t)&kp[kfirst - 1] - (xfs_caddr_t)block); - last = (int)(((xfs_caddr_t)&kp[klast] - 1) - (xfs_caddr_t)block); - xfs_trans_log_buf(cur->bc_tp, bp, first, last); -} - -/* - * Log block pointer fields from a btree block (nonleaf). - */ -STATIC void -xfs_inobt_log_ptrs( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_buf_t *bp, /* buffer containing btree block */ - int pfirst, /* index of first pointer to log */ - int plast) /* index of last pointer to log */ -{ - xfs_inobt_block_t *block; /* btree block to log from */ - int first; /* first byte offset logged */ - int last; /* last byte offset logged */ - xfs_inobt_ptr_t *pp; /* block-pointer pointer in btree blk */ - - block = XFS_BUF_TO_INOBT_BLOCK(bp); - pp = XFS_INOBT_PTR_ADDR(block, 1, cur); - first = (int)((xfs_caddr_t)&pp[pfirst - 1] - (xfs_caddr_t)block); - last = (int)(((xfs_caddr_t)&pp[plast] - 1) - (xfs_caddr_t)block); - xfs_trans_log_buf(cur->bc_tp, bp, first, last); -} - -/* - * Log records from a btree block (leaf). - */ -STATIC void -xfs_inobt_log_recs( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_buf_t *bp, /* buffer containing btree block */ - int rfirst, /* index of first record to log */ - int rlast) /* index of last record to log */ -{ - xfs_inobt_block_t *block; /* btree block to log from */ - int first; /* first byte offset logged */ - int last; /* last byte offset logged */ - xfs_inobt_rec_t *rp; /* record pointer for btree block */ - - block = XFS_BUF_TO_INOBT_BLOCK(bp); - rp = XFS_INOBT_REC_ADDR(block, 1, cur); - first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); - last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block); - xfs_trans_log_buf(cur->bc_tp, bp, first, last); -} - - -/* - * Externally visible routines. - */ - -/* - * Delete the record pointed to by cur. - * The cursor refers to the place where the record was (could be inserted) - * when the operation returns. - */ -int /* error */ -xfs_inobt_delete( - xfs_btree_cur_t *cur, /* btree cursor */ - int *stat) /* success/failure */ -{ - int error; - int i; /* result code */ - int level; /* btree level */ - - /* - * Go up the tree, starting at leaf level. - * If 2 is returned then a join was done; go to the next level. - * Otherwise we are done. - */ - for (level = 0, i = 2; i == 2; level++) { - if ((error = xfs_inobt_delrec(cur, level, &i))) - return error; - } - if (i == 0) { - for (level = 1; level < cur->bc_nlevels; level++) { - if (cur->bc_ptrs[level] == 0) { - if ((error = xfs_btree_decrement(cur, level, &i))) - return error; - break; - } - } - } - *stat = i; - return 0; -} - /* * Get the data from the pointed-to record. @@ -690,6 +85,13 @@ xfs_inobt_get_rec( return 0; } +STATIC int +xfs_inobt_get_minrecs( + struct xfs_btree_cur *cur, + int level) +{ + return cur->bc_mp->m_inobt_mnr[level != 0]; +} STATIC struct xfs_btree_cur * xfs_inobt_dup_cursor( @@ -829,6 +231,38 @@ xfs_inobt_key_diff( cur->bc_rec.i.ir_startino; } +STATIC int +xfs_inobt_kill_root( + struct xfs_btree_cur *cur, + struct xfs_buf *bp, + int level, + union xfs_btree_ptr *newroot) +{ + int error; + + XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); + XFS_BTREE_STATS_INC(cur, killroot); + + /* + * Update the root pointer, decreasing the level by 1 and then + * free the old root. + */ + xfs_inobt_set_root(cur, newroot, -1); + error = xfs_inobt_free_block(cur, bp); + if (error) { + XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); + return error; + } + + XFS_BTREE_STATS_INC(cur, free); + + cur->bc_bufs[level] = NULL; + cur->bc_nlevels--; + + XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); + return 0; +} + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_inobt_trace_buf; @@ -901,8 +335,10 @@ static const struct xfs_btree_ops xfs_inobt_ops = { .dup_cursor = xfs_inobt_dup_cursor, .set_root = xfs_inobt_set_root, + .kill_root = xfs_inobt_kill_root, .alloc_block = xfs_inobt_alloc_block, .free_block = xfs_inobt_free_block, + .get_minrecs = xfs_inobt_get_minrecs, .get_maxrecs = xfs_inobt_get_maxrecs, .init_key_from_rec = xfs_inobt_init_key_from_rec, .init_rec_from_key = xfs_inobt_init_rec_from_key, diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h index c9cbc4f2168..3eff3b6e5fa 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/xfs_ialloc_btree.h @@ -116,13 +116,6 @@ typedef struct xfs_btree_sblock xfs_inobt_block_t; (XFS_BTREE_PTR_ADDR(xfs_inobt, bb, \ i, XFS_INOBT_BLOCK_MAXRECS(1, cur))) -/* - * Delete the record pointed to by cur. - * The cursor refers to the place where the record was (could be inserted) - * when the operation returns. - */ -extern int xfs_inobt_delete(struct xfs_btree_cur *cur, int *stat); - /* * Get the data from the pointed-to record. */ -- cgit v1.2.3-70-g09d2 From 8cc938fe4237e50bea4aa557ed53b06de2319d49 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:58:11 +1100 Subject: [XFS] implement generic xfs_btree_get_rec Not really much reason to make it generic given that it's so small, but this is the last non-method in xfs_alloc_btree.c and xfs_ialloc_btree.c, so it makes the whole btree implementation more structured. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32206a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc.c | 21 +++++++++++++++++++++ fs/xfs/xfs_alloc_btree.c | 44 -------------------------------------------- fs/xfs/xfs_alloc_btree.h | 6 ------ fs/xfs/xfs_btree.c | 41 +++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree.h | 1 + fs/xfs/xfs_ialloc.c | 23 +++++++++++++++++++++++ fs/xfs/xfs_ialloc.h | 5 +++++ fs/xfs/xfs_ialloc_btree.c | 44 -------------------------------------------- fs/xfs/xfs_ialloc_btree.h | 7 ------- 9 files changed, 91 insertions(+), 101 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index e9c70249d2c..54fa69e2776 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -154,6 +154,27 @@ xfs_alloc_update( return xfs_btree_update(cur, &rec); } +/* + * Get the data from the pointed-to record. + */ +STATIC int /* error */ +xfs_alloc_get_rec( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t *bno, /* output: starting block of extent */ + xfs_extlen_t *len, /* output: length of extent */ + int *stat) /* output: success/failure */ +{ + union xfs_btree_rec *rec; + int error; + + error = xfs_btree_get_rec(cur, &rec, stat); + if (!error && *stat == 1) { + *bno = be32_to_cpu(rec->alloc.ar_startblock); + *len = be32_to_cpu(rec->alloc.ar_blockcount); + } + return error; +} + /* * Compute aligned version of the found extent. * Takes alignment and min length into account. diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index d256b51f913..4d44f03858b 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -41,50 +41,6 @@ #include "xfs_error.h" -/* - * Get the data from the pointed-to record. - */ -int /* error */ -xfs_alloc_get_rec( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_agblock_t *bno, /* output: starting block of extent */ - xfs_extlen_t *len, /* output: length of extent */ - int *stat) /* output: success/failure */ -{ - xfs_alloc_block_t *block; /* btree block */ -#ifdef DEBUG - int error; /* error return value */ -#endif - int ptr; /* record number */ - - ptr = cur->bc_ptrs[0]; - block = XFS_BUF_TO_ALLOC_BLOCK(cur->bc_bufs[0]); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, 0, cur->bc_bufs[0]))) - return error; -#endif - /* - * Off the right end or left end, return failure. - */ - if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) { - *stat = 0; - return 0; - } - /* - * Point to the record and extract its data. - */ - { - xfs_alloc_rec_t *rec; /* record data */ - - rec = XFS_ALLOC_REC_ADDR(block, ptr, cur); - *bno = be32_to_cpu(rec->ar_startblock); - *len = be32_to_cpu(rec->ar_blockcount); - } - *stat = 1; - return 0; -} - - STATIC struct xfs_btree_cur * xfs_allocbt_dup_cursor( struct xfs_btree_cur *cur) diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h index 8d2e3ec21fd..22f1d709af7 100644 --- a/fs/xfs/xfs_alloc_btree.h +++ b/fs/xfs/xfs_alloc_btree.h @@ -94,12 +94,6 @@ typedef struct xfs_btree_sblock xfs_alloc_block_t; #define XFS_ALLOC_PTR_ADDR(bb,i,cur) \ XFS_BTREE_PTR_ADDR(xfs_alloc, bb, i, XFS_ALLOC_BLOCK_MAXRECS(1, cur)) -/* - * Get the data from the pointed-to record. - */ -extern int xfs_alloc_get_rec(struct xfs_btree_cur *cur, xfs_agblock_t *bno, - xfs_extlen_t *len, int *stat); - extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *, struct xfs_trans *, struct xfs_buf *, diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 28cc7681834..8503ed5d10a 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -3764,3 +3764,44 @@ error0: XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR); return error; } + +/* + * Get the data from the pointed-to record. + */ +int /* error */ +xfs_btree_get_rec( + struct xfs_btree_cur *cur, /* btree cursor */ + union xfs_btree_rec **recp, /* output: btree record */ + int *stat) /* output: success/failure */ +{ + struct xfs_btree_block *block; /* btree block */ + struct xfs_buf *bp; /* buffer pointer */ + int ptr; /* record number */ +#ifdef DEBUG + int error; /* error return value */ +#endif + + ptr = cur->bc_ptrs[0]; + block = xfs_btree_get_block(cur, 0, &bp); + +#ifdef DEBUG + error = xfs_btree_check_block(cur, block, 0, bp); + if (error) + return error; +#endif + + /* + * Off the right end or left end, return failure. + */ + if (ptr > xfs_btree_get_numrecs(block) || ptr <= 0) { + *stat = 0; + return 0; + } + + /* + * Point to the record and extract its data. + */ + *recp = xfs_btree_rec_addr(cur, ptr, block); + *stat = 1; + return 0; +} diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 06ef792e0aa..cee3684d871 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -567,6 +567,7 @@ int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *); int xfs_btree_kill_iroot(struct xfs_btree_cur *); int xfs_btree_insert(struct xfs_btree_cur *, int *); int xfs_btree_delete(struct xfs_btree_cur *, int *); +int xfs_btree_get_rec(struct xfs_btree_cur *, union xfs_btree_rec **, int *); /* * Helpers. diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index f13f59b13cc..c8a56c52964 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -191,6 +191,29 @@ xfs_inobt_update( return xfs_btree_update(cur, &rec); } +/* + * Get the data from the pointed-to record. + */ +int /* error */ +xfs_inobt_get_rec( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agino_t *ino, /* output: starting inode of chunk */ + __int32_t *fcnt, /* output: number of free inodes */ + xfs_inofree_t *free, /* output: free inode mask */ + int *stat) /* output: success/failure */ +{ + union xfs_btree_rec *rec; + int error; + + error = xfs_btree_get_rec(cur, &rec, stat); + if (!error && *stat == 1) { + *ino = be32_to_cpu(rec->inobt.ir_startino); + *fcnt = be32_to_cpu(rec->inobt.ir_freecount); + *free = be64_to_cpu(rec->inobt.ir_free); + } + return error; +} + /* * Allocate new inodes in the allocation group specified by agbp. * Return 0 for success, else error code. diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h index 4026578bc26..c5745f6d94e 100644 --- a/fs/xfs/xfs_ialloc.h +++ b/fs/xfs/xfs_ialloc.h @@ -168,6 +168,11 @@ int xfs_inobt_lookup_ge(struct xfs_btree_cur *cur, xfs_agino_t ino, int xfs_inobt_lookup_le(struct xfs_btree_cur *cur, xfs_agino_t ino, __int32_t fcnt, xfs_inofree_t free, int *stat); +/* + * Get the data from the pointed-to record. + */ +extern int xfs_inobt_get_rec(struct xfs_btree_cur *cur, xfs_agino_t *ino, + __int32_t *fcnt, xfs_inofree_t *free, int *stat); #endif /* __KERNEL__ */ diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 6c0a07d1fed..9f4e33c945c 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -41,50 +41,6 @@ #include "xfs_error.h" -/* - * Get the data from the pointed-to record. - */ -int /* error */ -xfs_inobt_get_rec( - xfs_btree_cur_t *cur, /* btree cursor */ - xfs_agino_t *ino, /* output: starting inode of chunk */ - __int32_t *fcnt, /* output: number of free inodes */ - xfs_inofree_t *free, /* output: free inode mask */ - int *stat) /* output: success/failure */ -{ - xfs_inobt_block_t *block; /* btree block */ - xfs_buf_t *bp; /* buffer containing btree block */ -#ifdef DEBUG - int error; /* error return value */ -#endif - int ptr; /* record number */ - xfs_inobt_rec_t *rec; /* record data */ - - bp = cur->bc_bufs[0]; - ptr = cur->bc_ptrs[0]; - block = XFS_BUF_TO_INOBT_BLOCK(bp); -#ifdef DEBUG - if ((error = xfs_btree_check_sblock(cur, block, 0, bp))) - return error; -#endif - /* - * Off the right end or left end, return failure. - */ - if (ptr > be16_to_cpu(block->bb_numrecs) || ptr <= 0) { - *stat = 0; - return 0; - } - /* - * Point to the record and extract its data. - */ - rec = XFS_INOBT_REC_ADDR(block, ptr, cur); - *ino = be32_to_cpu(rec->ir_startino); - *fcnt = be32_to_cpu(rec->ir_freecount); - *free = be64_to_cpu(rec->ir_free); - *stat = 1; - return 0; -} - STATIC int xfs_inobt_get_minrecs( struct xfs_btree_cur *cur, diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h index 3eff3b6e5fa..ff7406b4bac 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/xfs_ialloc_btree.h @@ -116,13 +116,6 @@ typedef struct xfs_btree_sblock xfs_inobt_block_t; (XFS_BTREE_PTR_ADDR(xfs_inobt, bb, \ i, XFS_INOBT_BLOCK_MAXRECS(1, cur))) -/* - * Get the data from the pointed-to record. - */ -extern int xfs_inobt_get_rec(struct xfs_btree_cur *cur, xfs_agino_t *ino, - __int32_t *fcnt, xfs_inofree_t *free, int *stat); - - extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *, struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t); -- cgit v1.2.3-70-g09d2 From fd6bcc5b63051392ba709a8fd33173b263669e0a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:58:21 +1100 Subject: [XFS] kill xfs_bmbt_log_block and xfs_bmbt_log_recs These are equivalent to the xfs_btree_* versions, and the only remaining caller can be switched to the generic one after they are exported. Also remove some now dead infrastructure in xfs_bmap_btree.c. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32207a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_bmap.c | 4 +-- fs/xfs/xfs_bmap_btree.c | 88 ------------------------------------------------- fs/xfs/xfs_bmap_btree.h | 4 --- fs/xfs/xfs_btree.c | 4 +-- fs/xfs/xfs_btree.h | 6 ++++ 5 files changed, 10 insertions(+), 96 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 74761ca2c63..5cceb8d3c16 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -3563,8 +3563,8 @@ xfs_bmap_extents_to_btree( * Do all this logging at the end so that * the root is at the right level. */ - xfs_bmbt_log_block(cur, abp, XFS_BB_ALL_BITS); - xfs_bmbt_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); + xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS); + xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs)); ASSERT(*curp == NULL); *curp = cur; *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork); diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 5b8030561d7..7a02d391afe 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -44,33 +44,6 @@ #include "xfs_error.h" #include "xfs_quota.h" -#undef EXIT - -#define ENTRY XBT_ENTRY -#define ERROR XBT_ERROR -#define EXIT XBT_EXIT - -/* - * Keep the XFS_BMBT_TRACE_ names around for now until all code using them - * is converted to be generic and thus switches to the XFS_BTREE_TRACE_ names. - */ -#define XFS_BMBT_TRACE_ARGBI(c,b,i) \ - XFS_BTREE_TRACE_ARGBI(c,b,i) -#define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \ - XFS_BTREE_TRACE_ARGBII(c,b,i,j) -#define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \ - XFS_BTREE_TRACE_ARGFFFI(c,o,b,i,j) -#define XFS_BMBT_TRACE_ARGI(c,i) \ - XFS_BTREE_TRACE_ARGI(c,i) -#define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \ - XFS_BTREE_TRACE_ARGIPK(c,i,(union xfs_btree_ptr)f,s) -#define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \ - XFS_BTREE_TRACE_ARGIPR(c,i, \ - (union xfs_btree_ptr)f, (union xfs_btree_rec *)r) -#define XFS_BMBT_TRACE_ARGIK(c,i,k) \ - XFS_BTREE_TRACE_ARGIK(c,i,(union xfs_btree_key *)k) -#define XFS_BMBT_TRACE_CURSOR(c,s) \ - XFS_BTREE_TRACE_CURSOR(c,s) /* * Determine the extent state. @@ -259,67 +232,6 @@ xfs_bmbt_disk_get_startoff( XFS_MASK64LO(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; } -/* - * Log fields from the btree block header. - */ -void -xfs_bmbt_log_block( - xfs_btree_cur_t *cur, - xfs_buf_t *bp, - int fields) -{ - int first; - int last; - xfs_trans_t *tp; - static const short offsets[] = { - offsetof(xfs_bmbt_block_t, bb_magic), - offsetof(xfs_bmbt_block_t, bb_level), - offsetof(xfs_bmbt_block_t, bb_numrecs), - offsetof(xfs_bmbt_block_t, bb_leftsib), - offsetof(xfs_bmbt_block_t, bb_rightsib), - sizeof(xfs_bmbt_block_t) - }; - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - XFS_BMBT_TRACE_ARGBI(cur, bp, fields); - tp = cur->bc_tp; - if (bp) { - xfs_btree_offsets(fields, offsets, XFS_BB_NUM_BITS, &first, - &last); - xfs_trans_log_buf(tp, bp, first, last); - } else - xfs_trans_log_inode(tp, cur->bc_private.b.ip, - XFS_ILOG_FBROOT(cur->bc_private.b.whichfork)); - XFS_BMBT_TRACE_CURSOR(cur, EXIT); -} - -/* - * Log record values from the btree block. - */ -void -xfs_bmbt_log_recs( - xfs_btree_cur_t *cur, - xfs_buf_t *bp, - int rfirst, - int rlast) -{ - xfs_bmbt_block_t *block; - int first; - int last; - xfs_bmbt_rec_t *rp; - xfs_trans_t *tp; - - XFS_BMBT_TRACE_CURSOR(cur, ENTRY); - XFS_BMBT_TRACE_ARGBII(cur, bp, rfirst, rlast); - ASSERT(bp); - tp = cur->bc_tp; - block = XFS_BUF_TO_BMBT_BLOCK(bp); - rp = XFS_BMAP_REC_DADDR(block, 1, cur); - first = (int)((xfs_caddr_t)&rp[rfirst - 1] - (xfs_caddr_t)block); - last = (int)(((xfs_caddr_t)&rp[rlast] - 1) - (xfs_caddr_t)block); - xfs_trans_log_buf(tp, bp, first, last); - XFS_BMBT_TRACE_CURSOR(cur, EXIT); -} /* * Set all the fields in a bmap extent record from the arguments. diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 952ab395f79..6f38e250570 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -247,10 +247,6 @@ extern void xfs_bmbt_disk_get_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s); extern xfs_filblks_t xfs_bmbt_disk_get_blockcount(xfs_bmbt_rec_t *r); extern xfs_fileoff_t xfs_bmbt_disk_get_startoff(xfs_bmbt_rec_t *r); -extern void xfs_bmbt_log_block(struct xfs_btree_cur *, struct xfs_buf *, int); -extern void xfs_bmbt_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int, - int); - extern void xfs_bmbt_set_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s); extern void xfs_bmbt_set_allf(xfs_bmbt_rec_host_t *r, xfs_fileoff_t o, xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v); diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 8503ed5d10a..88eb00bdeb9 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -1309,7 +1309,7 @@ xfs_btree_log_keys( /* * Log record values from the btree block. */ -STATIC void +void xfs_btree_log_recs( struct xfs_btree_cur *cur, struct xfs_buf *bp, @@ -1357,7 +1357,7 @@ xfs_btree_log_ptrs( /* * Log fields from a btree block header. */ -STATIC void +void xfs_btree_log_block( struct xfs_btree_cur *cur, /* btree cursor */ struct xfs_buf *bp, /* buffer containing btree block */ diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index cee3684d871..d6cc71e31de 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -569,6 +569,12 @@ int xfs_btree_insert(struct xfs_btree_cur *, int *); int xfs_btree_delete(struct xfs_btree_cur *, int *); int xfs_btree_get_rec(struct xfs_btree_cur *, union xfs_btree_rec **, int *); +/* + * Internal btree helpers also used by xfs_bmap.c. + */ +void xfs_btree_log_block(struct xfs_btree_cur *, struct xfs_buf *, int); +void xfs_btree_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int, int); + /* * Helpers. */ -- cgit v1.2.3-70-g09d2 From 4a26e66e7728112f0e1cd7eca3bcc430b3a221c9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:58:32 +1100 Subject: [XFS] add keys_inorder and recs_inorder btree methods Add methods to check whether two keys/records are in the righ order. This replaces the xfs_btree_check_key and xfs_btree_check_rec methods. For the callers from xfs_bmap.c just opencode the bmbt-specific asserts. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32208a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_alloc_btree.c | 44 ++++++++++++++ fs/xfs/xfs_bmap.c | 11 +++- fs/xfs/xfs_bmap_btree.c | 28 +++++++++ fs/xfs/xfs_btree.c | 150 +++++----------------------------------------- fs/xfs/xfs_btree.h | 36 ++++------- fs/xfs/xfs_ialloc_btree.c | 27 +++++++++ 6 files changed, 135 insertions(+), 161 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 4d44f03858b..9e63f8c180d 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -311,6 +311,45 @@ xfs_allocbt_kill_root( return 0; } +#ifdef DEBUG +STATIC int +xfs_allocbt_keys_inorder( + struct xfs_btree_cur *cur, + union xfs_btree_key *k1, + union xfs_btree_key *k2) +{ + if (cur->bc_btnum == XFS_BTNUM_BNO) { + return be32_to_cpu(k1->alloc.ar_startblock) < + be32_to_cpu(k2->alloc.ar_startblock); + } else { + return be32_to_cpu(k1->alloc.ar_blockcount) < + be32_to_cpu(k2->alloc.ar_blockcount) || + (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount && + be32_to_cpu(k1->alloc.ar_startblock) < + be32_to_cpu(k2->alloc.ar_startblock)); + } +} + +STATIC int +xfs_allocbt_recs_inorder( + struct xfs_btree_cur *cur, + union xfs_btree_rec *r1, + union xfs_btree_rec *r2) +{ + if (cur->bc_btnum == XFS_BTNUM_BNO) { + return be32_to_cpu(r1->alloc.ar_startblock) + + be32_to_cpu(r1->alloc.ar_blockcount) <= + be32_to_cpu(r2->alloc.ar_startblock); + } else { + return be32_to_cpu(r1->alloc.ar_blockcount) < + be32_to_cpu(r2->alloc.ar_blockcount) || + (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount && + be32_to_cpu(r1->alloc.ar_startblock) < + be32_to_cpu(r2->alloc.ar_startblock)); + } +} +#endif /* DEBUG */ + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_allocbt_trace_buf; @@ -395,6 +434,11 @@ static const struct xfs_btree_ops xfs_allocbt_ops = { .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, .key_diff = xfs_allocbt_key_diff, +#ifdef DEBUG + .keys_inorder = xfs_allocbt_keys_inorder, + .recs_inorder = xfs_allocbt_recs_inorder, +#endif + #ifdef XFS_BTREE_TRACE .trace_enter = xfs_allocbt_trace_enter, .trace_cursor = xfs_allocbt_trace_cursor, diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 5cceb8d3c16..b7f99d7576d 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -6195,7 +6195,8 @@ xfs_check_block( } if (prevp) { - xfs_btree_check_key(XFS_BTNUM_BMAP, prevp, keyp); + ASSERT(be64_to_cpu(prevp->br_startoff) < + be64_to_cpu(keyp->br_startoff)); } prevp = keyp; @@ -6338,11 +6339,15 @@ xfs_bmap_check_leaf_extents( ep = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1); if (i) { - xfs_btree_check_rec(XFS_BTNUM_BMAP, &last, ep); + ASSERT(xfs_bmbt_disk_get_startoff(&last) + + xfs_bmbt_disk_get_blockcount(&last) <= + xfs_bmbt_disk_get_startoff(ep)); } for (j = 1; j < num_recs; j++) { nextp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, j + 1); - xfs_btree_check_rec(XFS_BTNUM_BMAP, ep, nextp); + ASSERT(xfs_bmbt_disk_get_startoff(ep) + + xfs_bmbt_disk_get_blockcount(ep) <= + xfs_bmbt_disk_get_startoff(nextp)); ep = nextp; } diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 7a02d391afe..c5eeb3241e2 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -699,6 +699,29 @@ xfs_bmbt_key_diff( cur->bc_rec.b.br_startoff; } +#ifdef DEBUG +STATIC int +xfs_bmbt_keys_inorder( + struct xfs_btree_cur *cur, + union xfs_btree_key *k1, + union xfs_btree_key *k2) +{ + return be64_to_cpu(k1->bmbt.br_startoff) < + be64_to_cpu(k2->bmbt.br_startoff); +} + +STATIC int +xfs_bmbt_recs_inorder( + struct xfs_btree_cur *cur, + union xfs_btree_rec *r1, + union xfs_btree_rec *r2) +{ + return xfs_bmbt_disk_get_startoff(&r1->bmbt) + + xfs_bmbt_disk_get_blockcount(&r1->bmbt) <= + xfs_bmbt_disk_get_startoff(&r2->bmbt); +} +#endif /* DEBUG */ + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_bmbt_trace_buf; @@ -801,6 +824,11 @@ static const struct xfs_btree_ops xfs_bmbt_ops = { .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur, .key_diff = xfs_bmbt_key_diff, +#ifdef DEBUG + .keys_inorder = xfs_bmbt_keys_inorder, + .recs_inorder = xfs_bmbt_recs_inorder, +#endif + #ifdef XFS_BTREE_TRACE .trace_enter = xfs_bmbt_trace_enter, .trace_cursor = xfs_bmbt_trace_cursor, diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 88eb00bdeb9..d667d30210a 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -52,122 +52,6 @@ const __uint32_t xfs_magics[XFS_BTNUM_MAX] = { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC }; -/* - * External routines. - */ - -#ifdef DEBUG -/* - * Debug routine: check that keys are in the right order. - */ -void -xfs_btree_check_key( - xfs_btnum_t btnum, /* btree identifier */ - void *ak1, /* pointer to left (lower) key */ - void *ak2) /* pointer to right (higher) key */ -{ - switch (btnum) { - case XFS_BTNUM_BNO: { - xfs_alloc_key_t *k1; - xfs_alloc_key_t *k2; - - k1 = ak1; - k2 = ak2; - ASSERT(be32_to_cpu(k1->ar_startblock) < be32_to_cpu(k2->ar_startblock)); - break; - } - case XFS_BTNUM_CNT: { - xfs_alloc_key_t *k1; - xfs_alloc_key_t *k2; - - k1 = ak1; - k2 = ak2; - ASSERT(be32_to_cpu(k1->ar_blockcount) < be32_to_cpu(k2->ar_blockcount) || - (k1->ar_blockcount == k2->ar_blockcount && - be32_to_cpu(k1->ar_startblock) < be32_to_cpu(k2->ar_startblock))); - break; - } - case XFS_BTNUM_BMAP: { - xfs_bmbt_key_t *k1; - xfs_bmbt_key_t *k2; - - k1 = ak1; - k2 = ak2; - ASSERT(be64_to_cpu(k1->br_startoff) < be64_to_cpu(k2->br_startoff)); - break; - } - case XFS_BTNUM_INO: { - xfs_inobt_key_t *k1; - xfs_inobt_key_t *k2; - - k1 = ak1; - k2 = ak2; - ASSERT(be32_to_cpu(k1->ir_startino) < be32_to_cpu(k2->ir_startino)); - break; - } - default: - ASSERT(0); - } -} - -/* - * Debug routine: check that records are in the right order. - */ -void -xfs_btree_check_rec( - xfs_btnum_t btnum, /* btree identifier */ - void *ar1, /* pointer to left (lower) record */ - void *ar2) /* pointer to right (higher) record */ -{ - switch (btnum) { - case XFS_BTNUM_BNO: { - xfs_alloc_rec_t *r1; - xfs_alloc_rec_t *r2; - - r1 = ar1; - r2 = ar2; - ASSERT(be32_to_cpu(r1->ar_startblock) + - be32_to_cpu(r1->ar_blockcount) <= - be32_to_cpu(r2->ar_startblock)); - break; - } - case XFS_BTNUM_CNT: { - xfs_alloc_rec_t *r1; - xfs_alloc_rec_t *r2; - - r1 = ar1; - r2 = ar2; - ASSERT(be32_to_cpu(r1->ar_blockcount) < be32_to_cpu(r2->ar_blockcount) || - (r1->ar_blockcount == r2->ar_blockcount && - be32_to_cpu(r1->ar_startblock) < be32_to_cpu(r2->ar_startblock))); - break; - } - case XFS_BTNUM_BMAP: { - xfs_bmbt_rec_t *r1; - xfs_bmbt_rec_t *r2; - - r1 = ar1; - r2 = ar2; - ASSERT(xfs_bmbt_disk_get_startoff(r1) + - xfs_bmbt_disk_get_blockcount(r1) <= - xfs_bmbt_disk_get_startoff(r2)); - break; - } - case XFS_BTNUM_INO: { - xfs_inobt_rec_t *r1; - xfs_inobt_rec_t *r2; - - r1 = ar1; - r2 = ar2; - ASSERT(be32_to_cpu(r1->ir_startino) + XFS_INODES_PER_CHUNK <= - be32_to_cpu(r2->ir_startino)); - break; - } - default: - ASSERT(0); - } -} -#endif /* DEBUG */ int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_lblock( @@ -2032,9 +1916,8 @@ xfs_btree_lshift( xfs_btree_log_keys(cur, lbp, lrecs, lrecs); xfs_btree_log_ptrs(cur, lbp, lrecs, lrecs); - xfs_btree_check_key(cur->bc_btnum, - xfs_btree_key_addr(cur, lrecs - 1, left), - lkp); + ASSERT(cur->bc_ops->keys_inorder(cur, + xfs_btree_key_addr(cur, lrecs - 1, left), lkp)); } else { /* It's a leaf. Move records. */ union xfs_btree_rec *lrp; /* left record pointer */ @@ -2045,9 +1928,8 @@ xfs_btree_lshift( xfs_btree_copy_recs(cur, lrp, rrp, 1); xfs_btree_log_recs(cur, lbp, lrecs, lrecs); - xfs_btree_check_rec(cur->bc_btnum, - xfs_btree_rec_addr(cur, lrecs - 1, left), - lrp); + ASSERT(cur->bc_ops->recs_inorder(cur, + xfs_btree_rec_addr(cur, lrecs - 1, left), lrp)); } xfs_btree_set_numrecs(left, lrecs); @@ -2222,8 +2104,8 @@ xfs_btree_rshift( xfs_btree_log_keys(cur, rbp, 1, rrecs + 1); xfs_btree_log_ptrs(cur, rbp, 1, rrecs + 1); - xfs_btree_check_key(cur->bc_btnum, rkp, - xfs_btree_key_addr(cur, 2, right)); + ASSERT(cur->bc_ops->keys_inorder(cur, rkp, + xfs_btree_key_addr(cur, 2, right))); } else { /* It's a leaf. make a hole in the records */ union xfs_btree_rec *lrp; @@ -2241,8 +2123,8 @@ xfs_btree_rshift( cur->bc_ops->init_key_from_rec(&key, rrp); rkp = &key; - xfs_btree_check_rec(cur->bc_btnum, rrp, - xfs_btree_rec_addr(cur, 2, right)); + ASSERT(cur->bc_ops->recs_inorder(cur, rrp, + xfs_btree_rec_addr(cur, 2, right))); } /* @@ -2849,11 +2731,11 @@ xfs_btree_insrec( /* Check that the new entry is being inserted in the right place. */ if (ptr <= numrecs) { if (level == 0) { - xfs_btree_check_rec(cur->bc_btnum, recp, - xfs_btree_rec_addr(cur, ptr, block)); + ASSERT(cur->bc_ops->recs_inorder(cur, recp, + xfs_btree_rec_addr(cur, ptr, block))); } else { - xfs_btree_check_key(cur->bc_btnum, &key, - xfs_btree_key_addr(cur, ptr, block)); + ASSERT(cur->bc_ops->keys_inorder(cur, &key, + xfs_btree_key_addr(cur, ptr, block))); } } #endif @@ -2923,8 +2805,8 @@ xfs_btree_insrec( xfs_btree_log_keys(cur, bp, ptr, numrecs); #ifdef DEBUG if (ptr < numrecs) { - xfs_btree_check_key(cur->bc_btnum, kp, - xfs_btree_key_addr(cur, ptr + 1, block)); + ASSERT(cur->bc_ops->keys_inorder(cur, kp, + xfs_btree_key_addr(cur, ptr + 1, block))); } #endif } else { @@ -2941,8 +2823,8 @@ xfs_btree_insrec( xfs_btree_log_recs(cur, bp, ptr, numrecs); #ifdef DEBUG if (ptr < numrecs) { - xfs_btree_check_rec(cur->bc_btnum, rp, - xfs_btree_rec_addr(cur, ptr + 1, block)); + ASSERT(cur->bc_ops->recs_inorder(cur, rp, + xfs_btree_rec_addr(cur, ptr + 1, block))); } #endif } diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index d6cc71e31de..25a488d4da1 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -229,6 +229,18 @@ struct xfs_btree_ops { __int64_t (*key_diff)(struct xfs_btree_cur *cur, union xfs_btree_key *key); +#ifdef DEBUG + /* check that k1 is lower than k2 */ + int (*keys_inorder)(struct xfs_btree_cur *cur, + union xfs_btree_key *k1, + union xfs_btree_key *k2); + + /* check that r1 is lower than r2 */ + int (*recs_inorder)(struct xfs_btree_cur *cur, + union xfs_btree_rec *r1, + union xfs_btree_rec *r2); +#endif + /* btree tracing */ #ifdef XFS_BTREE_TRACE void (*trace_enter)(struct xfs_btree_cur *, const char *, @@ -379,30 +391,6 @@ xfs_btree_check_ptr( int index, /* offset from ptr to check */ int level); /* btree block level */ -#ifdef DEBUG - -/* - * Debug routine: check that keys are in the right order. - */ -void -xfs_btree_check_key( - xfs_btnum_t btnum, /* btree identifier */ - void *ak1, /* pointer to left (lower) key */ - void *ak2); /* pointer to right (higher) key */ - -/* - * Debug routine: check that records are in the right order. - */ -void -xfs_btree_check_rec( - xfs_btnum_t btnum, /* btree identifier */ - void *ar1, /* pointer to left (lower) record */ - void *ar2); /* pointer to right (higher) record */ -#else -#define xfs_btree_check_key(a, b, c) -#define xfs_btree_check_rec(a, b, c) -#endif /* DEBUG */ - /* * Delete the btree cursor. */ diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 9f4e33c945c..dcd4a956e73 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -219,6 +219,28 @@ xfs_inobt_kill_root( return 0; } +#ifdef DEBUG +STATIC int +xfs_inobt_keys_inorder( + struct xfs_btree_cur *cur, + union xfs_btree_key *k1, + union xfs_btree_key *k2) +{ + return be32_to_cpu(k1->inobt.ir_startino) < + be32_to_cpu(k2->inobt.ir_startino); +} + +STATIC int +xfs_inobt_recs_inorder( + struct xfs_btree_cur *cur, + union xfs_btree_rec *r1, + union xfs_btree_rec *r2) +{ + return be32_to_cpu(r1->inobt.ir_startino) + XFS_INODES_PER_CHUNK <= + be32_to_cpu(r2->inobt.ir_startino); +} +#endif /* DEBUG */ + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_inobt_trace_buf; @@ -302,6 +324,11 @@ static const struct xfs_btree_ops xfs_inobt_ops = { .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, .key_diff = xfs_inobt_key_diff, +#ifdef DEBUG + .keys_inorder = xfs_inobt_keys_inorder, + .recs_inorder = xfs_inobt_recs_inorder, +#endif + #ifdef XFS_BTREE_TRACE .trace_enter = xfs_inobt_trace_enter, .trace_cursor = xfs_inobt_trace_cursor, -- cgit v1.2.3-70-g09d2 From 3cc7524c8445e6244b055f3fa338529188c7c260 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:58:41 +1100 Subject: [XFS] mark various functions in xfs_btree.c static Lots of functionality in xfs_btree.c isn't needed by callers outside of this file anymore, so mark these functions static. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32209a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_btree.c | 22 +++++++++--------- fs/xfs/xfs_btree.h | 67 ------------------------------------------------------ 2 files changed, 11 insertions(+), 78 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index d667d30210a..b735c7299a8 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -87,7 +87,7 @@ xfs_btree_check_lblock( return 0; } -int /* error (0 or EFSCORRUPTED) */ +STATIC int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_sblock( struct xfs_btree_cur *cur, /* btree cursor */ struct xfs_btree_sblock *block, /* btree short form block pointer */ @@ -163,7 +163,7 @@ xfs_btree_check_lptr( /* * Check that (short) pointer is ok. */ -int /* error (0 or EFSCORRUPTED) */ +STATIC int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_sptr( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t bno, /* btree block disk address */ @@ -182,7 +182,7 @@ xfs_btree_check_sptr( /* * Check that block ptr is ok. */ -int /* error (0 or EFSCORRUPTED) */ +STATIC int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_ptr( struct xfs_btree_cur *cur, /* btree cursor */ union xfs_btree_ptr *ptr, /* btree block disk address */ @@ -523,7 +523,7 @@ xfs_btree_islastblock( * Change the cursor to point to the first record at the given level. * Other levels are unaffected. */ -int /* success=1, failure=0 */ +STATIC int /* success=1, failure=0 */ xfs_btree_firstrec( xfs_btree_cur_t *cur, /* btree cursor */ int level) /* level to change */ @@ -552,7 +552,7 @@ xfs_btree_firstrec( * Change the cursor to point to the last record in the current block * at the given level. Other levels are unaffected. */ -int /* success=1, failure=0 */ +STATIC int /* success=1, failure=0 */ xfs_btree_lastrec( xfs_btree_cur_t *cur, /* btree cursor */ int level) /* level to change */ @@ -775,7 +775,7 @@ xfs_btree_readahead_sblock( * Read-ahead btree blocks, at the given level. * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA. */ -int +STATIC int xfs_btree_readahead( struct xfs_btree_cur *cur, /* btree cursor */ int lev, /* level in btree */ @@ -1711,7 +1711,7 @@ error0: /* * Update keys at all levels from here to the root along the cursor's path. */ -int +STATIC int xfs_btree_updkey( struct xfs_btree_cur *cur, union xfs_btree_key *keyp, @@ -1821,7 +1821,7 @@ error0: * Move 1 record left from cur/level if possible. * Update cur to reflect the new path. */ -int /* error */ +STATIC int /* error */ xfs_btree_lshift( struct xfs_btree_cur *cur, int level, @@ -2004,7 +2004,7 @@ error0: * Move 1 record right from cur/level if possible. * Update cur to reflect the new path. */ -int /* error */ +STATIC int /* error */ xfs_btree_rshift( struct xfs_btree_cur *cur, int level, @@ -2180,7 +2180,7 @@ error1: * Return new block number and the key to its first * record (to be inserted into parent). */ -int /* error */ +STATIC int /* error */ xfs_btree_split( struct xfs_btree_cur *cur, int level, @@ -2465,7 +2465,7 @@ error0: /* * Allocate a new root block, fill it in. */ -int /* error */ +STATIC int /* error */ xfs_btree_new_root( struct xfs_btree_cur *cur, /* btree cursor */ int *stat) /* success/failure */ diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 25a488d4da1..2a22c587f1a 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -339,16 +339,6 @@ xfs_btree_check_lblock( int level, /* level of the btree block */ struct xfs_buf *bp); /* buffer containing block, if any */ -/* - * Check that short form block header is ok. - */ -int /* error (0 or EFSCORRUPTED) */ -xfs_btree_check_sblock( - struct xfs_btree_cur *cur, /* btree cursor */ - struct xfs_btree_sblock *block, /* btree short form block pointer */ - int level, /* level of the btree block */ - struct xfs_buf *bp); /* buffer containing block */ - /* * Check that block header is ok. */ @@ -368,29 +358,6 @@ xfs_btree_check_lptr( xfs_dfsbno_t ptr, /* btree block disk address */ int level); /* btree block level */ -#define xfs_btree_check_lptr_disk(cur, ptr, level) \ - xfs_btree_check_lptr(cur, be64_to_cpu(ptr), level) - - -/* - * Check that (short) pointer is ok. - */ -int /* error (0 or EFSCORRUPTED) */ -xfs_btree_check_sptr( - struct xfs_btree_cur *cur, /* btree cursor */ - xfs_agblock_t ptr, /* btree block disk address */ - int level); /* btree block level */ - -/* - * Check that (short) pointer is ok. - */ -int /* error (0 or EFSCORRUPTED) */ -xfs_btree_check_ptr( - struct xfs_btree_cur *cur, /* btree cursor */ - union xfs_btree_ptr *ptr, /* btree block disk address */ - int index, /* offset from ptr to check */ - int level); /* btree block level */ - /* * Delete the btree cursor. */ @@ -408,15 +375,6 @@ xfs_btree_dup_cursor( xfs_btree_cur_t *cur, /* input cursor */ xfs_btree_cur_t **ncur);/* output cursor */ -/* - * Change the cursor to point to the first record in the current block - * at the given level. Other levels are unaffected. - */ -int /* success=1, failure=0 */ -xfs_btree_firstrec( - xfs_btree_cur_t *cur, /* btree cursor */ - int level); /* level to change */ - /* * Get a buffer for the block, return it with no data read. * Long-form addressing. @@ -448,15 +406,6 @@ xfs_btree_islastblock( xfs_btree_cur_t *cur, /* btree cursor */ int level); /* level to check */ -/* - * Change the cursor to point to the last record in the current block - * at the given level. Other levels are unaffected. - */ -int /* success=1, failure=0 */ -xfs_btree_lastrec( - xfs_btree_cur_t *cur, /* btree cursor */ - int level); /* level to change */ - /* * Compute first and last byte offsets for the fields given. * Interprets the offsets table, which contains struct field offsets. @@ -517,16 +466,6 @@ xfs_btree_reada_bufs( xfs_agblock_t agbno, /* allocation group block number */ xfs_extlen_t count); /* count of filesystem blocks */ -/* - * Read-ahead btree blocks, at the given level. - * Bits in lr are set from XFS_BTCUR_{LEFT,RIGHT}RA. - */ -int /* readahead block count */ -xfs_btree_readahead( - xfs_btree_cur_t *cur, /* btree cursor */ - int lev, /* level in btree */ - int lr); /* left/right bits */ - /* * Set the buffer for level "lev" in the cursor to bp, releasing * any previous buffer. @@ -544,13 +483,7 @@ xfs_btree_setbuf( int xfs_btree_increment(struct xfs_btree_cur *, int, int *); int xfs_btree_decrement(struct xfs_btree_cur *, int, int *); int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *); -int xfs_btree_updkey(struct xfs_btree_cur *, union xfs_btree_key *, int); int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *); -int xfs_btree_lshift(struct xfs_btree_cur *, int, int *); -int xfs_btree_rshift(struct xfs_btree_cur *, int, int *); -int xfs_btree_split(struct xfs_btree_cur *, int, union xfs_btree_ptr *, - union xfs_btree_key *, struct xfs_btree_cur **, int *); -int xfs_btree_new_root(struct xfs_btree_cur *, int *); int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *); int xfs_btree_kill_iroot(struct xfs_btree_cur *); int xfs_btree_insert(struct xfs_btree_cur *, int *); -- cgit v1.2.3-70-g09d2 From 7f7c39ccb6045cf1fd5e7684a484c445291b44d4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 16:58:50 +1100 Subject: [XFS] make btree tracing generic Make the existing bmap btree tracing generic so that it applies to all btree types. Some fragments lifted from a patch by Dave Chinner. This adds two files that were missed from the previous btree tracing checkin. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32210a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: Bill O'Donnell Signed-off-by: David Chinner --- fs/xfs/xfs_btree_trace.c | 249 +++++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_btree_trace.h | 116 ++++++++++++++++++++++ 2 files changed, 365 insertions(+) create mode 100644 fs/xfs/xfs_btree_trace.c create mode 100644 fs/xfs/xfs_btree_trace.h (limited to 'fs') diff --git a/fs/xfs/xfs_btree_trace.c b/fs/xfs/xfs_btree_trace.c new file mode 100644 index 00000000000..44ff942a0fd --- /dev/null +++ b/fs/xfs/xfs_btree_trace.c @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2008 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_types.h" +#include "xfs_inum.h" +#include "xfs_bmap_btree.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_inode.h" +#include "xfs_btree.h" +#include "xfs_btree_trace.h" + +STATIC void +xfs_btree_trace_ptr( + struct xfs_btree_cur *cur, + union xfs_btree_ptr ptr, + __psunsigned_t *high, + __psunsigned_t *low) +{ + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { + __u64 val = be64_to_cpu(ptr.l); + *high = val >> 32; + *low = (int)val; + } else { + *high = 0; + *low = be32_to_cpu(ptr.s); + } +} + +/* + * Add a trace buffer entry for arguments, for a buffer & 1 integer arg. + */ +void +xfs_btree_trace_argbi( + const char *func, + struct xfs_btree_cur *cur, + struct xfs_buf *b, + int i, + int line) +{ + cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGBI, + line, (__psunsigned_t)b, i, 0, 0, 0, 0, 0, + 0, 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for a buffer & 2 integer args. + */ +void +xfs_btree_trace_argbii( + const char *func, + struct xfs_btree_cur *cur, + struct xfs_buf *b, + int i0, + int i1, + int line) +{ + cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGBII, + line, (__psunsigned_t)b, i0, i1, 0, 0, 0, 0, + 0, 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for 3 block-length args + * and an integer arg. + */ +void +xfs_btree_trace_argfffi( + const char *func, + struct xfs_btree_cur *cur, + xfs_dfiloff_t o, + xfs_dfsbno_t b, + xfs_dfilblks_t i, + int j, + int line) +{ + cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGFFFI, + line, + o >> 32, (int)o, + b >> 32, (int)b, + i >> 32, (int)i, + (int)j, 0, 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for one integer arg. + */ +void +xfs_btree_trace_argi( + const char *func, + struct xfs_btree_cur *cur, + int i, + int line) +{ + cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGI, + line, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for int, fsblock, key. + */ +void +xfs_btree_trace_argipk( + const char *func, + struct xfs_btree_cur *cur, + int i, + union xfs_btree_ptr ptr, + union xfs_btree_key *key, + int line) +{ + __psunsigned_t high, low; + __uint64_t l0, l1; + + xfs_btree_trace_ptr(cur, ptr, &high, &low); + cur->bc_ops->trace_key(cur, key, &l0, &l1); + cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIPK, + line, i, high, low, + l0 >> 32, (int)l0, + l1 >> 32, (int)l1, + 0, 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for int, fsblock, rec. + */ +void +xfs_btree_trace_argipr( + const char *func, + struct xfs_btree_cur *cur, + int i, + union xfs_btree_ptr ptr, + union xfs_btree_rec *rec, + int line) +{ + __psunsigned_t high, low; + __uint64_t l0, l1, l2; + + xfs_btree_trace_ptr(cur, ptr, &high, &low); + cur->bc_ops->trace_record(cur, rec, &l0, &l1, &l2); + cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIPR, + line, i, + high, low, + l0 >> 32, (int)l0, + l1 >> 32, (int)l1, + l2 >> 32, (int)l2, + 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for int, key. + */ +void +xfs_btree_trace_argik( + const char *func, + struct xfs_btree_cur *cur, + int i, + union xfs_btree_key *key, + int line) +{ + __uint64_t l0, l1; + + cur->bc_ops->trace_key(cur, key, &l0, &l1); + cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIK, + line, i, + l0 >> 32, (int)l0, + l1 >> 32, (int)l1, + 0, 0, 0, 0, 0, 0); +} + +/* + * Add a trace buffer entry for arguments, for record. + */ +void +xfs_btree_trace_argr( + const char *func, + struct xfs_btree_cur *cur, + union xfs_btree_rec *rec, + int line) +{ + __uint64_t l0, l1, l2; + + cur->bc_ops->trace_record(cur, rec, &l0, &l1, &l2); + cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGR, + line, + l0 >> 32, (int)l0, + l1 >> 32, (int)l1, + l2 >> 32, (int)l2, + 0, 0, 0, 0, 0); +} + +/* + * Add a trace buffer entry for the cursor/operation. + */ +void +xfs_btree_trace_cursor( + const char *func, + struct xfs_btree_cur *cur, + int type, + int line) +{ + __uint32_t s0; + __uint64_t l0, l1; + char *s; + + switch (type) { + case XBT_ARGS: + s = "args"; + break; + case XBT_ENTRY: + s = "entry"; + break; + case XBT_ERROR: + s = "error"; + break; + case XBT_EXIT: + s = "exit"; + break; + default: + s = "unknown"; + break; + } + + cur->bc_ops->trace_cursor(cur, &s0, &l0, &l1); + cur->bc_ops->trace_enter(cur, func, s, XFS_BTREE_KTRACE_CUR, line, + s0, + l0 >> 32, (int)l0, + l1 >> 32, (int)l1, + (__psunsigned_t)cur->bc_bufs[0], + (__psunsigned_t)cur->bc_bufs[1], + (__psunsigned_t)cur->bc_bufs[2], + (__psunsigned_t)cur->bc_bufs[3], + (cur->bc_ptrs[0] << 16) | cur->bc_ptrs[1], + (cur->bc_ptrs[2] << 16) | cur->bc_ptrs[3]); +} diff --git a/fs/xfs/xfs_btree_trace.h b/fs/xfs/xfs_btree_trace.h new file mode 100644 index 00000000000..b3f5eb3c3c6 --- /dev/null +++ b/fs/xfs/xfs_btree_trace.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2008 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_BTREE_TRACE_H__ +#define __XFS_BTREE_TRACE_H__ + +struct xfs_btree_cur; +struct xfs_buf; + + +/* + * Trace hooks. + * i,j = integer (32 bit) + * b = btree block buffer (xfs_buf_t) + * p = btree ptr + * r = btree record + * k = btree key + */ + +#ifdef XFS_BTREE_TRACE + +/* + * Trace buffer entry types. + */ +#define XFS_BTREE_KTRACE_ARGBI 1 +#define XFS_BTREE_KTRACE_ARGBII 2 +#define XFS_BTREE_KTRACE_ARGFFFI 3 +#define XFS_BTREE_KTRACE_ARGI 4 +#define XFS_BTREE_KTRACE_ARGIPK 5 +#define XFS_BTREE_KTRACE_ARGIPR 6 +#define XFS_BTREE_KTRACE_ARGIK 7 +#define XFS_BTREE_KTRACE_ARGR 8 +#define XFS_BTREE_KTRACE_CUR 9 + +/* + * Sub-types for cursor traces. + */ +#define XBT_ARGS 0 +#define XBT_ENTRY 1 +#define XBT_ERROR 2 +#define XBT_EXIT 3 + +void xfs_btree_trace_argbi(const char *, struct xfs_btree_cur *, + struct xfs_buf *, int, int); +void xfs_btree_trace_argbii(const char *, struct xfs_btree_cur *, + struct xfs_buf *, int, int, int); +void xfs_btree_trace_argfffi(const char *, struct xfs_btree_cur *, + xfs_dfiloff_t, xfs_dfsbno_t, xfs_dfilblks_t, int, int); +void xfs_btree_trace_argi(const char *, struct xfs_btree_cur *, int, int); +void xfs_btree_trace_argipk(const char *, struct xfs_btree_cur *, int, + union xfs_btree_ptr, union xfs_btree_key *, int); +void xfs_btree_trace_argipr(const char *, struct xfs_btree_cur *, int, + union xfs_btree_ptr, union xfs_btree_rec *, int); +void xfs_btree_trace_argik(const char *, struct xfs_btree_cur *, int, + union xfs_btree_key *, int); +void xfs_btree_trace_argr(const char *, struct xfs_btree_cur *, + union xfs_btree_rec *, int); +void xfs_btree_trace_cursor(const char *, struct xfs_btree_cur *, int, int); + + +#define XFS_ALLOCBT_TRACE_SIZE 4096 /* size of global trace buffer */ +extern ktrace_t *xfs_allocbt_trace_buf; + +#define XFS_INOBT_TRACE_SIZE 4096 /* size of global trace buffer */ +extern ktrace_t *xfs_inobt_trace_buf; + +#define XFS_BMBT_TRACE_SIZE 4096 /* size of global trace buffer */ +#define XFS_BMBT_KTRACE_SIZE 32 /* size of per-inode trace buffer */ +extern ktrace_t *xfs_bmbt_trace_buf; + + +#define XFS_BTREE_TRACE_ARGBI(c, b, i) \ + xfs_btree_trace_argbi(__func__, c, b, i, __LINE__) +#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) \ + xfs_btree_trace_argbii(__func__, c, b, i, j, __LINE__) +#define XFS_BTREE_TRACE_ARGFFFI(c, o, b, i, j) \ + xfs_btree_trace_argfffi(__func__, c, o, b, i, j, __LINE__) +#define XFS_BTREE_TRACE_ARGI(c, i) \ + xfs_btree_trace_argi(__func__, c, i, __LINE__) +#define XFS_BTREE_TRACE_ARGIPK(c, i, p, k) \ + xfs_btree_trace_argipk(__func__, c, i, p, k, __LINE__) +#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r) \ + xfs_btree_trace_argipr(__func__, c, i, p, r, __LINE__) +#define XFS_BTREE_TRACE_ARGIK(c, i, k) \ + xfs_btree_trace_argik(__func__, c, i, k, __LINE__) +#define XFS_BTREE_TRACE_ARGR(c, r) \ + xfs_btree_trace_argr(__func__, c, r, __LINE__) +#define XFS_BTREE_TRACE_CURSOR(c, t) \ + xfs_btree_trace_cursor(__func__, c, t, __LINE__) +#else +#define XFS_BTREE_TRACE_ARGBI(c, b, i) +#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) +#define XFS_BTREE_TRACE_ARGFFFI(c, o, b, i, j) +#define XFS_BTREE_TRACE_ARGI(c, i) +#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s) +#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r) +#define XFS_BTREE_TRACE_ARGIK(c, i, k) +#define XFS_BTREE_TRACE_ARGR(c, r) +#define XFS_BTREE_TRACE_CURSOR(c, t) +#endif /* XFS_BTREE_TRACE */ + +#endif /* __XFS_BTREE_TRACE_H__ */ -- cgit v1.2.3-70-g09d2 From d112f2984592acb774187b3adddc107fb0825500 Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Thu, 30 Oct 2008 16:59:06 +1100 Subject: [XFS] Wait for all I/O on truncate to zero file size It's possible to have outstanding xfs_ioend_t's queued when the file size is zero. This can happen in the direct I/O path when a direct I/O write fails due to ENOSPC. In this case the xfs_ioend_t will still be queued (ie xfs_end_io_direct() does not know that the I/O failed so can't force the xfs_ioend_t to be flushed synchronously). When we truncate a file on unlink we don't know to wait for these xfs_ioend_ts and we can have a use-after-free situation if the inode is reclaimed before the xfs_ioend_t is finally processed. As was suggested by Dave Chinner lets wait for all I/Os to complete when truncating the file size to zero. SGI-PV: 981668 SGI-Modid: xfs-linux-melb:xfs-kern:32216a Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/xfs_inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index cc0474ddd2d..2b1294b8ad7 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1450,7 +1450,7 @@ xfs_itruncate_start( mp = ip->i_mount; /* wait for the completion of any pending DIOs */ - if (new_size < ip->i_size) + if (new_size == 0 || new_size < ip->i_size) vn_iowait(ip); /* -- cgit v1.2.3-70-g09d2 From bc3048e3cd3806ccfd5b16b1a30e5d6013abbd3d Mon Sep 17 00:00:00 2001 From: Peter Leckie Date: Thu, 30 Oct 2008 17:05:04 +1100 Subject: [XFS] Clean up dquot pincount code. This is a code cleanup and optimization that removes a per mount point spinlock from the quota code and cleans up the code. The patch changes the pincount from being an int protected by a spinlock to an atomic_t allowing the pincount to be manipulated without holding the spinlock. This cleanup also protects against random wakup's of both the aild and xfssyncd by reevaluating the pincount after been woken. Two latter patches will address the Spurious wakeups. SGI-PV: 986789 SGI-Modid: xfs-linux-melb:xfs-kern:32215a Signed-off-by: Peter Leckie Signed-off-by: Christoph Hellwig Signed-off-by: David Chinner Signed-off-by: Donald Douwsma Signed-off-by: Lachlan McIlroy --- fs/xfs/quota/xfs_dquot.c | 6 +++--- fs/xfs/quota/xfs_dquot.h | 4 ++-- fs/xfs/quota/xfs_dquot_item.c | 37 +++++++++++-------------------------- fs/xfs/quota/xfs_qm.c | 2 -- fs/xfs/quota/xfs_qm.h | 1 - 5 files changed, 16 insertions(+), 34 deletions(-) (limited to 'fs') diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index f2705f2fd43..d3f4fbbe248 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -101,7 +101,7 @@ xfs_qm_dqinit( if (brandnewdquot) { dqp->dq_flnext = dqp->dq_flprev = dqp; mutex_init(&dqp->q_qlock); - sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq"); + init_waitqueue_head(&dqp->q_pinwait); /* * Because we want to use a counting completion, complete @@ -131,7 +131,7 @@ xfs_qm_dqinit( dqp->q_res_bcount = 0; dqp->q_res_icount = 0; dqp->q_res_rtbcount = 0; - dqp->q_pincount = 0; + atomic_set(&dqp->q_pincount, 0); dqp->q_hash = NULL; ASSERT(dqp->dq_flnext == dqp->dq_flprev); @@ -1489,7 +1489,7 @@ xfs_qm_dqpurge( "xfs_qm_dqpurge: dquot %p flush failed", dqp); xfs_dqflock(dqp); } - ASSERT(dqp->q_pincount == 0); + ASSERT(atomic_read(&dqp->q_pincount) == 0); ASSERT(XFS_FORCED_SHUTDOWN(mp) || !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h index 8958d0faf8d..7e455337e2b 100644 --- a/fs/xfs/quota/xfs_dquot.h +++ b/fs/xfs/quota/xfs_dquot.h @@ -83,8 +83,8 @@ typedef struct xfs_dquot { xfs_qcnt_t q_res_rtbcount;/* total realtime blks used+reserved */ mutex_t q_qlock; /* quota lock */ struct completion q_flush; /* flush completion queue */ - uint q_pincount; /* pin count for this dquot */ - sv_t q_pinwait; /* sync var for pinning */ + atomic_t q_pincount; /* dquot pin count */ + wait_queue_head_t q_pinwait; /* dquot pinning wait queue */ #ifdef XFS_DQUOT_TRACE struct ktrace *q_trace; /* trace header structure */ #endif diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c index f028644caa5..e33f8646418 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/quota/xfs_dquot_item.c @@ -88,25 +88,22 @@ xfs_qm_dquot_logitem_format( /* * Increment the pin count of the given dquot. - * This value is protected by pinlock spinlock in the xQM structure. */ STATIC void xfs_qm_dquot_logitem_pin( xfs_dq_logitem_t *logitem) { - xfs_dquot_t *dqp; + xfs_dquot_t *dqp = logitem->qli_dquot; - dqp = logitem->qli_dquot; ASSERT(XFS_DQ_IS_LOCKED(dqp)); - spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); - dqp->q_pincount++; - spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); + atomic_inc(dqp->q_pincount); } /* * Decrement the pin count of the given dquot, and wake up * anyone in xfs_dqwait_unpin() if the count goes to 0. The - * dquot must have been previously pinned with a call to xfs_dqpin(). + * dquot must have been previously pinned with a call to + * xfs_qm_dquot_logitem_pin(). */ /* ARGSUSED */ STATIC void @@ -114,16 +111,11 @@ xfs_qm_dquot_logitem_unpin( xfs_dq_logitem_t *logitem, int stale) { - xfs_dquot_t *dqp; + xfs_dquot_t *dqp = logitem->qli_dquot; - dqp = logitem->qli_dquot; - ASSERT(dqp->q_pincount > 0); - spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); - dqp->q_pincount--; - if (dqp->q_pincount == 0) { - sv_broadcast(&dqp->q_pinwait); - } - spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); + ASSERT(atomic_read(&dqp->q_pincount) > 0); + if (atomic_dec_and_test(&dqp->q_pincount)) + wake_up(&dqp->q_pinwait); } /* ARGSUSED */ @@ -193,21 +185,14 @@ xfs_qm_dqunpin_wait( xfs_dquot_t *dqp) { ASSERT(XFS_DQ_IS_LOCKED(dqp)); - if (dqp->q_pincount == 0) { + if (atomic_read(&dqp->q_pincount) == 0) return; - } /* * Give the log a push so we don't wait here too long. */ xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE); - spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); - if (dqp->q_pincount == 0) { - spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); - return; - } - sv_wait(&(dqp->q_pinwait), PINOD, - &(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s); + wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0)); } /* @@ -310,7 +295,7 @@ xfs_qm_dquot_logitem_trylock( uint retval; dqp = qip->qli_dquot; - if (dqp->q_pincount > 0) + if (atomic_read(&dqp->q_pincount) > 0) return (XFS_ITEM_PINNED); if (! xfs_qm_dqlock_nowait(dqp)) diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index df0ffef9775..270f775974e 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -1137,7 +1137,6 @@ xfs_qm_init_quotainfo( return error; } - spin_lock_init(&qinf->qi_pinlock); xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0); qinf->qi_dqreclaims = 0; @@ -1234,7 +1233,6 @@ xfs_qm_destroy_quotainfo( */ xfs_qm_rele_quotafs_ref(mp); - spinlock_destroy(&qi->qi_pinlock); xfs_qm_list_destroy(&qi->qi_dqlist); if (qi->qi_uquotaip) { diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h index 44f25349e47..4f2de977172 100644 --- a/fs/xfs/quota/xfs_qm.h +++ b/fs/xfs/quota/xfs_qm.h @@ -106,7 +106,6 @@ typedef struct xfs_qm { typedef struct xfs_quotainfo { xfs_inode_t *qi_uquotaip; /* user quota inode */ xfs_inode_t *qi_gquotaip; /* group quota inode */ - spinlock_t qi_pinlock; /* dquot pinning lock */ xfs_dqlist_t qi_dqlist; /* all dquots in filesys */ int qi_dqreclaims; /* a change here indicates a removal in the dqlist */ -- cgit v1.2.3-70-g09d2 From d1de802155341ab63e64d37c58c61d6f358bb3ad Mon Sep 17 00:00:00 2001 From: Peter Leckie Date: Thu, 30 Oct 2008 17:05:18 +1100 Subject: [XFS] Fix build brakage from patch "Clean up dquot pincount code" This is a fix for patch " Clean up dquot pincount code" which introduced a build breakage due to a missing & in xfs_qm_dquot_logitem_pin. SGI-PV: 986789 SGI-Modid: xfs-linux-melb:xfs-kern:32221a Signed-off-by: Peter Leckie Signed-off-by: Donald Douwsma Signed-off-by: Lachlan McIlroy --- fs/xfs/quota/xfs_dquot_item.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c index e33f8646418..48f08109621 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/quota/xfs_dquot_item.c @@ -96,7 +96,7 @@ xfs_qm_dquot_logitem_pin( xfs_dquot_t *dqp = logitem->qli_dquot; ASSERT(XFS_DQ_IS_LOCKED(dqp)); - atomic_inc(dqp->q_pincount); + atomic_inc(&dqp->q_pincount); } /* -- cgit v1.2.3-70-g09d2 From 24ee0e49c9cce23acb1758728cb09e8d2b53bd33 Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Thu, 30 Oct 2008 17:05:26 +1100 Subject: [XFS] Make xfs_btree_check_ptr() debug-only code. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32224a Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/xfs_btree.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index b735c7299a8..72a26bb7643 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -160,6 +160,7 @@ xfs_btree_check_lptr( return 0; } +#ifdef DEBUG /* * Check that (short) pointer is ok. */ @@ -197,6 +198,7 @@ xfs_btree_check_ptr( be32_to_cpu((&ptr->s)[index]), level); } } +#endif /* * Delete the btree cursor. -- cgit v1.2.3-70-g09d2 From 847fff5ca881670ca8ec617afeb943950f0c804b Mon Sep 17 00:00:00 2001 From: Barry Naujok Date: Thu, 30 Oct 2008 17:05:38 +1100 Subject: [XFS] Sync up kernel and user-space headers SGI-PV: 986558 SGI-Modid: xfs-linux-melb:xfs-kern:32231a Signed-off-by: Barry Naujok Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_ag.h | 5 +- fs/xfs/xfs_alloc.h | 27 ++--- fs/xfs/xfs_arch.h | 39 ++++-- fs/xfs/xfs_bit.h | 3 +- fs/xfs/xfs_bmap.h | 61 +++++----- fs/xfs/xfs_bmap_btree.h | 3 - fs/xfs/xfs_btree.h | 4 - fs/xfs/xfs_da_btree.h | 4 +- fs/xfs/xfs_ialloc.h | 3 - fs/xfs/xfs_imap.h | 2 - fs/xfs/xfs_inode.h | 246 ++++++++++++++++++------------------- fs/xfs/xfs_inode_item.h | 41 +++---- fs/xfs/xfs_mount.h | 17 +-- fs/xfs/xfs_trans.h | 317 ++++++++++++++++++++++++------------------------ 14 files changed, 383 insertions(+), 389 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index 61b292a9fb4..729ee3eb39a 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h @@ -192,15 +192,16 @@ typedef struct xfs_perag xfs_agino_t pagi_freecount; /* number of free inodes */ xfs_agino_t pagi_count; /* number of allocated inodes */ int pagb_count; /* pagb slots in use */ + xfs_perag_busy_t *pagb_list; /* unstable blocks */ #ifdef __KERNEL__ spinlock_t pagb_lock; /* lock for pagb_list */ -#endif - xfs_perag_busy_t *pagb_list; /* unstable blocks */ + atomic_t pagf_fstrms; /* # of filestreams active in this AG */ int pag_ici_init; /* incore inode cache initialised */ rwlock_t pag_ici_lock; /* incore inode lock */ struct radix_tree_root pag_ici_root; /* incore inode cache root */ +#endif } xfs_perag_t; #define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels) diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h index 5aec15d0651..588172796f7 100644 --- a/fs/xfs/xfs_alloc.h +++ b/fs/xfs/xfs_alloc.h @@ -121,6 +121,19 @@ extern ktrace_t *xfs_alloc_trace_buf; #define XFS_ALLOC_KTRACE_BUSYSEARCH 6 #endif +void +xfs_alloc_mark_busy(xfs_trans_t *tp, + xfs_agnumber_t agno, + xfs_agblock_t bno, + xfs_extlen_t len); + +void +xfs_alloc_clear_busy(xfs_trans_t *tp, + xfs_agnumber_t ag, + int idx); + +#endif /* __KERNEL__ */ + /* * Compute and fill in value of m_ag_maxlevels. */ @@ -196,18 +209,4 @@ xfs_free_extent( xfs_fsblock_t bno, /* starting block number of extent */ xfs_extlen_t len); /* length of extent */ -void -xfs_alloc_mark_busy(xfs_trans_t *tp, - xfs_agnumber_t agno, - xfs_agblock_t bno, - xfs_extlen_t len); - -void -xfs_alloc_clear_busy(xfs_trans_t *tp, - xfs_agnumber_t ag, - int idx); - - -#endif /* __KERNEL__ */ - #endif /* __XFS_ALLOC_H__ */ diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h index 0b3b5efe848..53d5e70d136 100644 --- a/fs/xfs/xfs_arch.h +++ b/fs/xfs/xfs_arch.h @@ -41,21 +41,36 @@ #endif #ifdef XFS_NATIVE_HOST -#define cpu_to_be16(val) ((__be16)(val)) -#define cpu_to_be32(val) ((__be32)(val)) -#define cpu_to_be64(val) ((__be64)(val)) -#define be16_to_cpu(val) ((__uint16_t)(val)) -#define be32_to_cpu(val) ((__uint32_t)(val)) -#define be64_to_cpu(val) ((__uint64_t)(val)) +#define cpu_to_be16(val) ((__force __be16)(__u16)(val)) +#define cpu_to_be32(val) ((__force __be32)(__u32)(val)) +#define cpu_to_be64(val) ((__force __be64)(__u64)(val)) +#define be16_to_cpu(val) ((__force __u16)(__be16)(val)) +#define be32_to_cpu(val) ((__force __u32)(__be32)(val)) +#define be64_to_cpu(val) ((__force __u64)(__be64)(val)) #else -#define cpu_to_be16(val) (__swab16((__uint16_t)(val))) -#define cpu_to_be32(val) (__swab32((__uint32_t)(val))) -#define cpu_to_be64(val) (__swab64((__uint64_t)(val))) -#define be16_to_cpu(val) (__swab16((__be16)(val))) -#define be32_to_cpu(val) (__swab32((__be32)(val))) -#define be64_to_cpu(val) (__swab64((__be64)(val))) +#define cpu_to_be16(val) ((__force __be16)__swab16((__u16)(val))) +#define cpu_to_be32(val) ((__force __be32)__swab32((__u32)(val))) +#define cpu_to_be64(val) ((__force __be64)__swab64((__u64)(val))) +#define be16_to_cpu(val) (__swab16((__force __u16)(__be16)(val))) +#define be32_to_cpu(val) (__swab32((__force __u32)(__be32)(val))) +#define be64_to_cpu(val) (__swab64((__force __u64)(__be64)(val))) #endif +static inline void be16_add_cpu(__be16 *a, __s16 b) +{ + *a = cpu_to_be16(be16_to_cpu(*a) + b); +} + +static inline void be32_add_cpu(__be32 *a, __s32 b) +{ + *a = cpu_to_be32(be32_to_cpu(*a) + b); +} + +static inline void be64_add_cpu(__be64 *a, __s64 b) +{ + *a = cpu_to_be64(be64_to_cpu(*a) + b); +} + #endif /* __KERNEL__ */ /* do we need conversion? */ diff --git a/fs/xfs/xfs_bit.h b/fs/xfs/xfs_bit.h index 8e0e463dae2..bca7b243c31 100644 --- a/fs/xfs/xfs_bit.h +++ b/fs/xfs/xfs_bit.h @@ -61,8 +61,7 @@ static inline int xfs_highbit64(__uint64_t v) /* Get low bit set out of 32-bit argument, -1 if none set */ static inline int xfs_lowbit32(__uint32_t v) { - unsigned long t = v; - return (v) ? find_first_bit(&t, 32) : -1; + return ffs(v) - 1; } /* Get low bit set out of 64-bit argument, -1 if none set */ diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index 9f3e3a836d1..7c9d12cd7a4 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h @@ -137,9 +137,7 @@ typedef struct xfs_bmalloca { char conv; /* overwriting unwritten extents */ } xfs_bmalloca_t; -#ifdef __KERNEL__ - -#if defined(XFS_BMAP_TRACE) +#if defined(__KERNEL__) && defined(XFS_BMAP_TRACE) /* * Trace operations for bmap extent tracing */ @@ -163,9 +161,12 @@ xfs_bmap_trace_exlist( int whichfork); /* data or attr fork */ #define XFS_BMAP_TRACE_EXLIST(ip,c,w) \ xfs_bmap_trace_exlist(__func__,ip,c,w) -#else + +#else /* __KERNEL__ && XFS_BMAP_TRACE */ + #define XFS_BMAP_TRACE_EXLIST(ip,c,w) -#endif + +#endif /* __KERNEL__ && XFS_BMAP_TRACE */ /* * Convert inode from non-attributed to attributed. @@ -205,20 +206,6 @@ xfs_bmap_compute_maxlevels( struct xfs_mount *mp, /* file system mount structure */ int whichfork); /* data or attr fork */ -/* - * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi - * caller. Frees all the extents that need freeing, which must be done - * last due to locking considerations. - * - * Return 1 if the given transaction was committed and a new one allocated, - * and 0 otherwise. - */ -int /* error */ -xfs_bmap_finish( - struct xfs_trans **tp, /* transaction pointer addr */ - xfs_bmap_free_t *flist, /* i/o: list extents to free */ - int *committed); /* xact committed or not */ - /* * Returns the file-relative block number of the first unused block in the file. * This is the lowest-address hole if the file has holes, else the first block @@ -343,6 +330,32 @@ xfs_bunmapi( extents */ int *done); /* set if not done yet */ +/* + * Check an extent list, which has just been read, for + * any bit in the extent flag field. + */ +int +xfs_check_nostate_extents( + struct xfs_ifork *ifp, + xfs_extnum_t idx, + xfs_extnum_t num); + +#ifdef __KERNEL__ + +/* + * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi + * caller. Frees all the extents that need freeing, which must be done + * last due to locking considerations. + * + * Return 1 if the given transaction was committed and a new one allocated, + * and 0 otherwise. + */ +int /* error */ +xfs_bmap_finish( + struct xfs_trans **tp, /* transaction pointer addr */ + xfs_bmap_free_t *flist, /* i/o: list extents to free */ + int *committed); /* xact committed or not */ + /* * Fcntl interface to xfs_bmapi. */ @@ -374,16 +387,6 @@ xfs_bmap_count_blocks( int whichfork, int *count); -/* - * Check an extent list, which has just been read, for - * any bit in the extent flag field. - */ -int -xfs_check_nostate_extents( - struct xfs_ifork *ifp, - xfs_extnum_t idx, - xfs_extnum_t num); - /* * Search the extent records for the entry containing block bno. * If bno lies in a hole, point to the next entry. If bno lies diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 6f38e250570..5669242b52d 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -231,8 +231,6 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; be16_to_cpu((bb)->bb_numrecs) <= (mp)->m_bmap_dmxr[(level) != 0]) -#ifdef __KERNEL__ - /* * Prototypes for xfs_bmap.c to call. */ @@ -264,6 +262,5 @@ extern void xfs_bmbt_to_bmdr(xfs_bmbt_block_t *, int, xfs_bmdr_block_t *, int); extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *, struct xfs_trans *, struct xfs_inode *, int); -#endif /* __KERNEL__ */ #endif /* __XFS_BMAP_BTREE_H__ */ diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 2a22c587f1a..7425b2b4a25 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -327,8 +327,6 @@ typedef struct xfs_btree_cur #define XFS_BUF_TO_SBLOCK(bp) ((xfs_btree_sblock_t *)XFS_BUF_PTR(bp)) -#ifdef __KERNEL__ - /* * Check that long form block header is ok. */ @@ -515,8 +513,6 @@ static inline int xfs_btree_get_level(struct xfs_btree_block *block) return be16_to_cpu(block->bb_level); } -#endif /* __KERNEL__ */ - /* * Min and max functions for extlen, agblock, fileoff, and filblks types. diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h index 599e270e695..70b710c1792 100644 --- a/fs/xfs/xfs_da_btree.h +++ b/fs/xfs/xfs_da_btree.h @@ -206,9 +206,8 @@ struct xfs_nameops { }; -#ifdef __KERNEL__ /*======================================================================== - * Function prototypes for the kernel. + * Function prototypes. *========================================================================*/ /* @@ -269,6 +268,5 @@ xfs_daddr_t xfs_da_blkno(xfs_dabuf_t *dabuf); extern struct kmem_zone *xfs_da_state_zone; extern struct kmem_zone *xfs_dabuf_zone; -#endif /* __KERNEL__ */ #endif /* __XFS_DA_BTREE_H__ */ diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h index c5745f6d94e..ccf554a6e0a 100644 --- a/fs/xfs/xfs_ialloc.h +++ b/fs/xfs/xfs_ialloc.h @@ -56,7 +56,6 @@ static inline int xfs_ialloc_find_free(xfs_inofree_t *fp) } -#ifdef __KERNEL__ /* * Allocate an inode on disk. * Mode is used to tell whether the new inode will need space, and whether @@ -174,6 +173,4 @@ int xfs_inobt_lookup_le(struct xfs_btree_cur *cur, xfs_agino_t ino, extern int xfs_inobt_get_rec(struct xfs_btree_cur *cur, xfs_agino_t *ino, __int32_t *fcnt, xfs_inofree_t *free, int *stat); -#endif /* __KERNEL__ */ - #endif /* __XFS_IALLOC_H__ */ diff --git a/fs/xfs/xfs_imap.h b/fs/xfs/xfs_imap.h index d3645000398..f9ce62890ea 100644 --- a/fs/xfs/xfs_imap.h +++ b/fs/xfs/xfs_imap.h @@ -30,11 +30,9 @@ typedef struct xfs_imap { ushort im_boffset; /* inode offset in block in bytes */ } xfs_imap_t; -#ifdef __KERNEL__ struct xfs_mount; struct xfs_trans; int xfs_imap(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, xfs_imap_t *, uint); -#endif #endif /* __XFS_IMAP_H__ */ diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 2a69a7dee22..a8f1e6833aa 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -20,7 +20,7 @@ struct xfs_dinode; struct xfs_dinode_core; - +struct xfs_inode; /* * Fork identifiers. @@ -83,54 +83,6 @@ typedef struct xfs_ifork { } if_u2; } xfs_ifork_t; -/* - * Flags for xfs_ichgtime(). - */ -#define XFS_ICHGTIME_MOD 0x1 /* data fork modification timestamp */ -#define XFS_ICHGTIME_CHG 0x2 /* inode field change timestamp */ - -/* - * Per-fork incore inode flags. - */ -#define XFS_IFINLINE 0x01 /* Inline data is read in */ -#define XFS_IFEXTENTS 0x02 /* All extent pointers are read in */ -#define XFS_IFBROOT 0x04 /* i_broot points to the bmap b-tree root */ -#define XFS_IFEXTIREC 0x08 /* Indirection array of extent blocks */ - -/* - * Flags for xfs_itobp(), xfs_imap() and xfs_dilocate(). - */ -#define XFS_IMAP_LOOKUP 0x1 -#define XFS_IMAP_BULKSTAT 0x2 - -#ifdef __KERNEL__ -struct bhv_desc; -struct cred; -struct ktrace; -struct xfs_buf; -struct xfs_bmap_free; -struct xfs_bmbt_irec; -struct xfs_bmbt_block; -struct xfs_inode; -struct xfs_inode_log_item; -struct xfs_mount; -struct xfs_trans; -struct xfs_dquot; - -#if defined(XFS_ILOCK_TRACE) -#define XFS_ILOCK_KTRACE_SIZE 32 -extern ktrace_t *xfs_ilock_trace_buf; -extern void xfs_ilock_trace(struct xfs_inode *, int, unsigned int, inst_t *); -#else -#define xfs_ilock_trace(i,n,f,ra) -#endif - -typedef struct dm_attrs_s { - __uint32_t da_dmevmask; /* DMIG event mask */ - __uint16_t da_dmstate; /* DMIG state info */ - __uint16_t da_pad; /* DMIG extra padding */ -} dm_attrs_t; - /* * This is the xfs in-core inode structure. * Most of the on-disk inode is embedded in the i_d field. @@ -191,6 +143,96 @@ typedef struct xfs_icdinode { __uint32_t di_gen; /* generation number */ } xfs_icdinode_t; +/* + * Flags for xfs_ichgtime(). + */ +#define XFS_ICHGTIME_MOD 0x1 /* data fork modification timestamp */ +#define XFS_ICHGTIME_CHG 0x2 /* inode field change timestamp */ + +/* + * Per-fork incore inode flags. + */ +#define XFS_IFINLINE 0x01 /* Inline data is read in */ +#define XFS_IFEXTENTS 0x02 /* All extent pointers are read in */ +#define XFS_IFBROOT 0x04 /* i_broot points to the bmap b-tree root */ +#define XFS_IFEXTIREC 0x08 /* Indirection array of extent blocks */ + +/* + * Flags for xfs_itobp(), xfs_imap() and xfs_dilocate(). + */ +#define XFS_IMAP_LOOKUP 0x1 +#define XFS_IMAP_BULKSTAT 0x2 + +/* + * Fork handling. + */ + +#define XFS_IFORK_Q(ip) ((ip)->i_d.di_forkoff != 0) +#define XFS_IFORK_BOFF(ip) ((int)((ip)->i_d.di_forkoff << 3)) + +#define XFS_IFORK_PTR(ip,w) \ + ((w) == XFS_DATA_FORK ? \ + &(ip)->i_df : \ + (ip)->i_afp) +#define XFS_IFORK_DSIZE(ip) \ + (XFS_IFORK_Q(ip) ? \ + XFS_IFORK_BOFF(ip) : \ + XFS_LITINO((ip)->i_mount)) +#define XFS_IFORK_ASIZE(ip) \ + (XFS_IFORK_Q(ip) ? \ + XFS_LITINO((ip)->i_mount) - XFS_IFORK_BOFF(ip) : \ + 0) +#define XFS_IFORK_SIZE(ip,w) \ + ((w) == XFS_DATA_FORK ? \ + XFS_IFORK_DSIZE(ip) : \ + XFS_IFORK_ASIZE(ip)) +#define XFS_IFORK_FORMAT(ip,w) \ + ((w) == XFS_DATA_FORK ? \ + (ip)->i_d.di_format : \ + (ip)->i_d.di_aformat) +#define XFS_IFORK_FMT_SET(ip,w,n) \ + ((w) == XFS_DATA_FORK ? \ + ((ip)->i_d.di_format = (n)) : \ + ((ip)->i_d.di_aformat = (n))) +#define XFS_IFORK_NEXTENTS(ip,w) \ + ((w) == XFS_DATA_FORK ? \ + (ip)->i_d.di_nextents : \ + (ip)->i_d.di_anextents) +#define XFS_IFORK_NEXT_SET(ip,w,n) \ + ((w) == XFS_DATA_FORK ? \ + ((ip)->i_d.di_nextents = (n)) : \ + ((ip)->i_d.di_anextents = (n))) + + + +#ifdef __KERNEL__ + +struct bhv_desc; +struct cred; +struct ktrace; +struct xfs_buf; +struct xfs_bmap_free; +struct xfs_bmbt_irec; +struct xfs_bmbt_block; +struct xfs_inode_log_item; +struct xfs_mount; +struct xfs_trans; +struct xfs_dquot; + +#if defined(XFS_ILOCK_TRACE) +#define XFS_ILOCK_KTRACE_SIZE 32 +extern ktrace_t *xfs_ilock_trace_buf; +extern void xfs_ilock_trace(struct xfs_inode *, int, unsigned int, inst_t *); +#else +#define xfs_ilock_trace(i,n,f,ra) +#endif + +typedef struct dm_attrs_s { + __uint32_t da_dmevmask; /* DMIG event mask */ + __uint16_t da_dmstate; /* DMIG state info */ + __uint16_t da_pad; /* DMIG extra padding */ +} dm_attrs_t; + typedef struct { struct xfs_inode *ip_mnext; /* next inode in mount list */ struct xfs_inode *ip_mprev; /* ptr to prev inode */ @@ -327,50 +369,26 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags) spin_unlock(&ip->i_flags_lock); return ret; } -#endif /* __KERNEL__ */ - /* - * Fork handling. + * Manage the i_flush queue embedded in the inode. This completion + * queue synchronizes processes attempting to flush the in-core + * inode back to disk. */ +static inline void xfs_iflock(xfs_inode_t *ip) +{ + wait_for_completion(&ip->i_flush); +} -#define XFS_IFORK_Q(ip) ((ip)->i_d.di_forkoff != 0) -#define XFS_IFORK_BOFF(ip) ((int)((ip)->i_d.di_forkoff << 3)) - -#define XFS_IFORK_PTR(ip,w) \ - ((w) == XFS_DATA_FORK ? \ - &(ip)->i_df : \ - (ip)->i_afp) -#define XFS_IFORK_DSIZE(ip) \ - (XFS_IFORK_Q(ip) ? \ - XFS_IFORK_BOFF(ip) : \ - XFS_LITINO((ip)->i_mount)) -#define XFS_IFORK_ASIZE(ip) \ - (XFS_IFORK_Q(ip) ? \ - XFS_LITINO((ip)->i_mount) - XFS_IFORK_BOFF(ip) : \ - 0) -#define XFS_IFORK_SIZE(ip,w) \ - ((w) == XFS_DATA_FORK ? \ - XFS_IFORK_DSIZE(ip) : \ - XFS_IFORK_ASIZE(ip)) -#define XFS_IFORK_FORMAT(ip,w) \ - ((w) == XFS_DATA_FORK ? \ - (ip)->i_d.di_format : \ - (ip)->i_d.di_aformat) -#define XFS_IFORK_FMT_SET(ip,w,n) \ - ((w) == XFS_DATA_FORK ? \ - ((ip)->i_d.di_format = (n)) : \ - ((ip)->i_d.di_aformat = (n))) -#define XFS_IFORK_NEXTENTS(ip,w) \ - ((w) == XFS_DATA_FORK ? \ - (ip)->i_d.di_nextents : \ - (ip)->i_d.di_anextents) -#define XFS_IFORK_NEXT_SET(ip,w,n) \ - ((w) == XFS_DATA_FORK ? \ - ((ip)->i_d.di_nextents = (n)) : \ - ((ip)->i_d.di_anextents = (n))) +static inline int xfs_iflock_nowait(xfs_inode_t *ip) +{ + return try_wait_for_completion(&ip->i_flush); +} -#ifdef __KERNEL__ +static inline void xfs_ifunlock(xfs_inode_t *ip) +{ + complete(&ip->i_flush); +} /* * In-core inode flags. @@ -490,19 +508,11 @@ int xfs_finish_reclaim_all(struct xfs_mount *, int); /* * xfs_inode.c prototypes. */ -int xfs_itobp(struct xfs_mount *, struct xfs_trans *, - xfs_inode_t *, struct xfs_dinode **, struct xfs_buf **, - xfs_daddr_t, uint, uint); int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, xfs_inode_t **, xfs_daddr_t, uint); -int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int); int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t, xfs_nlink_t, xfs_dev_t, struct cred *, xfs_prid_t, int, struct xfs_buf **, boolean_t *, xfs_inode_t **); -void xfs_dinode_from_disk(struct xfs_icdinode *, - struct xfs_dinode_core *); -void xfs_dinode_to_disk(struct xfs_dinode_core *, - struct xfs_icdinode *); uint xfs_ip2xflags(struct xfs_inode *); uint xfs_dic2xflags(struct xfs_dinode *); @@ -514,15 +524,11 @@ int xfs_itruncate_finish(struct xfs_trans **, xfs_inode_t *, int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); struct xfs_inode * xfs_inode_alloc(struct xfs_mount *, xfs_ino_t); -void xfs_idestroy_fork(xfs_inode_t *, int); void xfs_idestroy(xfs_inode_t *); -void xfs_idata_realloc(xfs_inode_t *, int, int); void xfs_iextract(xfs_inode_t *); void xfs_iext_realloc(xfs_inode_t *, int, int); -void xfs_iroot_realloc(xfs_inode_t *, int, int); void xfs_ipin(xfs_inode_t *); void xfs_iunpin(xfs_inode_t *); -int xfs_iextents_copy(xfs_inode_t *, xfs_bmbt_rec_t *, int); int xfs_iflush(xfs_inode_t *, uint); void xfs_iflush_all(struct xfs_mount *); void xfs_ichgtime(xfs_inode_t *, int); @@ -533,6 +539,21 @@ void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); void xfs_synchronize_atime(xfs_inode_t *); void xfs_mark_inode_dirty_sync(xfs_inode_t *); +#endif /* __KERNEL__ */ + +int xfs_itobp(struct xfs_mount *, struct xfs_trans *, + struct xfs_inode *, struct xfs_dinode **, + struct xfs_buf **, xfs_daddr_t, uint, uint); +void xfs_dinode_from_disk(struct xfs_icdinode *, + struct xfs_dinode_core *); +void xfs_dinode_to_disk(struct xfs_dinode_core *, + struct xfs_icdinode *); +void xfs_idestroy_fork(struct xfs_inode *, int); +void xfs_idata_realloc(struct xfs_inode *, int, int); +void xfs_iroot_realloc(struct xfs_inode *, int, int); +int xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int); +int xfs_iextents_copy(struct xfs_inode *, xfs_bmbt_rec_t *, int); + xfs_bmbt_rec_host_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t); void xfs_iext_insert(xfs_ifork_t *, xfs_extnum_t, xfs_extnum_t, xfs_bmbt_irec_t *); @@ -562,7 +583,8 @@ void xfs_iext_irec_update_extoffs(xfs_ifork_t *, int, int); #define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount)) #ifdef DEBUG -void xfs_isize_check(struct xfs_mount *, xfs_inode_t *, xfs_fsize_t); +void xfs_isize_check(struct xfs_mount *, struct xfs_inode *, + xfs_fsize_t); #else /* DEBUG */ #define xfs_isize_check(mp, ip, isize) #endif /* DEBUG */ @@ -577,26 +599,4 @@ extern struct kmem_zone *xfs_ifork_zone; extern struct kmem_zone *xfs_inode_zone; extern struct kmem_zone *xfs_ili_zone; -/* - * Manage the i_flush queue embedded in the inode. This completion - * queue synchronizes processes attempting to flush the in-core - * inode back to disk. - */ -static inline void xfs_iflock(xfs_inode_t *ip) -{ - wait_for_completion(&ip->i_flush); -} - -static inline int xfs_iflock_nowait(xfs_inode_t *ip) -{ - return try_wait_for_completion(&ip->i_flush); -} - -static inline void xfs_ifunlock(xfs_inode_t *ip) -{ - complete(&ip->i_flush); -} - -#endif /* __KERNEL__ */ - #endif /* __XFS_INODE_H__ */ diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index 40513077ab3..1ff04cc323a 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h @@ -112,6 +112,24 @@ typedef struct xfs_inode_log_format_64 { #define XFS_ILI_IOLOCKED_ANY (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED) +#define XFS_ILOG_FBROOT(w) xfs_ilog_fbroot(w) +static inline int xfs_ilog_fbroot(int w) +{ + return (w == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT); +} + +#define XFS_ILOG_FEXT(w) xfs_ilog_fext(w) +static inline int xfs_ilog_fext(int w) +{ + return (w == XFS_DATA_FORK ? XFS_ILOG_DEXT : XFS_ILOG_AEXT); +} + +#define XFS_ILOG_FDATA(w) xfs_ilog_fdata(w) +static inline int xfs_ilog_fdata(int w) +{ + return (w == XFS_DATA_FORK ? XFS_ILOG_DDATA : XFS_ILOG_ADATA); +} + #ifdef __KERNEL__ struct xfs_buf; @@ -148,26 +166,6 @@ typedef struct xfs_inode_log_item { } xfs_inode_log_item_t; -#define XFS_ILOG_FDATA(w) xfs_ilog_fdata(w) -static inline int xfs_ilog_fdata(int w) -{ - return (w == XFS_DATA_FORK ? XFS_ILOG_DDATA : XFS_ILOG_ADATA); -} - -#endif /* __KERNEL__ */ - -#define XFS_ILOG_FBROOT(w) xfs_ilog_fbroot(w) -static inline int xfs_ilog_fbroot(int w) -{ - return (w == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT); -} - -#define XFS_ILOG_FEXT(w) xfs_ilog_fext(w) -static inline int xfs_ilog_fext(int w) -{ - return (w == XFS_DATA_FORK ? XFS_ILOG_DEXT : XFS_ILOG_AEXT); -} - static inline int xfs_inode_clean(xfs_inode_t *ip) { return (!ip->i_itemp || @@ -175,9 +173,6 @@ static inline int xfs_inode_clean(xfs_inode_t *ip) !ip->i_update_core; } - -#ifdef __KERNEL__ - extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); extern void xfs_inode_item_destroy(struct xfs_inode *); extern void xfs_iflush_done(struct xfs_buf *, xfs_inode_log_item_t *); diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 49d647e730e..56c3b122e53 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -44,14 +44,14 @@ typedef struct xfs_trans_reservations { } xfs_trans_reservations_t; #ifndef __KERNEL__ -/* - * Moved here from xfs_ag.h to avoid reordering header files - */ + #define XFS_DADDR_TO_AGNO(mp,d) \ ((xfs_agnumber_t)(XFS_BB_TO_FSBT(mp, d) / (mp)->m_sb.sb_agblocks)) #define XFS_DADDR_TO_AGBNO(mp,d) \ ((xfs_agblock_t)(XFS_BB_TO_FSBT(mp, d) % (mp)->m_sb.sb_agblocks)) -#else + +#else /* __KERNEL__ */ + struct cred; struct log; struct xfs_mount_args; @@ -507,7 +507,6 @@ typedef struct xfs_mod_sb { #define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock)) #define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock)) -extern void xfs_mod_sb(xfs_trans_t *, __int64_t); extern int xfs_log_sbcount(xfs_mount_t *, uint); extern int xfs_mountfs(xfs_mount_t *mp); extern void xfs_mountfs_check_barriers(xfs_mount_t *mp); @@ -526,9 +525,6 @@ extern void xfs_freesb(xfs_mount_t *); extern int xfs_fs_writable(xfs_mount_t *); extern int xfs_syncsub(xfs_mount_t *, int, int *); extern int xfs_sync_inodes(xfs_mount_t *, int, int *); -extern xfs_agnumber_t xfs_initialize_perag(xfs_mount_t *, xfs_agnumber_t); -extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *); -extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t); extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t); extern int xfs_dmops_get(struct xfs_mount *, struct xfs_mount_args *); @@ -540,4 +536,9 @@ extern struct xfs_dmops xfs_dmcore_xfs; #endif /* __KERNEL__ */ +extern void xfs_mod_sb(struct xfs_trans *, __int64_t); +extern xfs_agnumber_t xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t); +extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *); +extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t); + #endif /* __XFS_MOUNT_H__ */ diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 74c80bd2b0e..1d89d50a5b9 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -18,6 +18,8 @@ #ifndef __XFS_TRANS_H__ #define __XFS_TRANS_H__ +struct xfs_log_item; + /* * This is the structure written in the log at the head of * every transaction. It identifies the type and id of the @@ -98,76 +100,6 @@ typedef struct xfs_trans_header { #define XFS_TRANS_TYPE_MAX 41 /* new transaction types need to be reflected in xfs_logprint(8) */ - -#ifdef __KERNEL__ -struct xfs_buf; -struct xfs_buftarg; -struct xfs_efd_log_item; -struct xfs_efi_log_item; -struct xfs_inode; -struct xfs_item_ops; -struct xfs_log_iovec; -struct xfs_log_item; -struct xfs_log_item_desc; -struct xfs_mount; -struct xfs_trans; -struct xfs_dquot_acct; - -typedef struct xfs_log_item { - struct list_head li_ail; /* AIL pointers */ - xfs_lsn_t li_lsn; /* last on-disk lsn */ - struct xfs_log_item_desc *li_desc; /* ptr to current desc*/ - struct xfs_mount *li_mountp; /* ptr to fs mount */ - uint li_type; /* item type */ - uint li_flags; /* misc flags */ - struct xfs_log_item *li_bio_list; /* buffer item list */ - void (*li_cb)(struct xfs_buf *, - struct xfs_log_item *); - /* buffer item iodone */ - /* callback func */ - struct xfs_item_ops *li_ops; /* function list */ -} xfs_log_item_t; - -#define XFS_LI_IN_AIL 0x1 -#define XFS_LI_ABORTED 0x2 - -typedef struct xfs_item_ops { - uint (*iop_size)(xfs_log_item_t *); - void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); - void (*iop_pin)(xfs_log_item_t *); - void (*iop_unpin)(xfs_log_item_t *, int); - void (*iop_unpin_remove)(xfs_log_item_t *, struct xfs_trans *); - uint (*iop_trylock)(xfs_log_item_t *); - void (*iop_unlock)(xfs_log_item_t *); - xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); - void (*iop_push)(xfs_log_item_t *); - void (*iop_pushbuf)(xfs_log_item_t *); - void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); -} xfs_item_ops_t; - -#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip) -#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp) -#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip) -#define IOP_UNPIN(ip, flags) (*(ip)->li_ops->iop_unpin)(ip, flags) -#define IOP_UNPIN_REMOVE(ip,tp) (*(ip)->li_ops->iop_unpin_remove)(ip, tp) -#define IOP_TRYLOCK(ip) (*(ip)->li_ops->iop_trylock)(ip) -#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip) -#define IOP_COMMITTED(ip, lsn) (*(ip)->li_ops->iop_committed)(ip, lsn) -#define IOP_PUSH(ip) (*(ip)->li_ops->iop_push)(ip) -#define IOP_PUSHBUF(ip) (*(ip)->li_ops->iop_pushbuf)(ip) -#define IOP_COMMITTING(ip, lsn) (*(ip)->li_ops->iop_committing)(ip, lsn) - -/* - * Return values for the IOP_TRYLOCK() routines. - */ -#define XFS_ITEM_SUCCESS 0 -#define XFS_ITEM_PINNED 1 -#define XFS_ITEM_LOCKED 2 -#define XFS_ITEM_FLUSHING 3 -#define XFS_ITEM_PUSHBUF 4 - -#endif /* __KERNEL__ */ - /* * This structure is used to track log items associated with * a transaction. It points to the log item and keeps some @@ -176,7 +108,7 @@ typedef struct xfs_item_ops { * once we get to commit processing (see xfs_trans_commit()). */ typedef struct xfs_log_item_desc { - xfs_log_item_t *lid_item; + struct xfs_log_item *lid_item; ushort lid_size; unsigned char lid_flags; unsigned char lid_index; @@ -276,94 +208,6 @@ xfs_lic_desc_to_chunk(xfs_log_item_desc_t *dp) (xfs_caddr_t)(((xfs_log_item_chunk_t*)0)->lic_descs)); } -#ifdef __KERNEL__ -/* - * This structure is used to maintain a list of block ranges that have been - * freed in the transaction. The ranges are listed in the perag[] busy list - * between when they're freed and the transaction is committed to disk. - */ - -typedef struct xfs_log_busy_slot { - xfs_agnumber_t lbc_ag; - ushort lbc_idx; /* index in perag.busy[] */ -} xfs_log_busy_slot_t; - -#define XFS_LBC_NUM_SLOTS 31 -typedef struct xfs_log_busy_chunk { - struct xfs_log_busy_chunk *lbc_next; - uint lbc_free; /* free slots bitmask */ - ushort lbc_unused; /* first unused */ - xfs_log_busy_slot_t lbc_busy[XFS_LBC_NUM_SLOTS]; -} xfs_log_busy_chunk_t; - -#define XFS_LBC_MAX_SLOT (XFS_LBC_NUM_SLOTS - 1) -#define XFS_LBC_FREEMASK ((1U << XFS_LBC_NUM_SLOTS) - 1) - -#define XFS_LBC_INIT(cp) ((cp)->lbc_free = XFS_LBC_FREEMASK) -#define XFS_LBC_CLAIM(cp, slot) ((cp)->lbc_free &= ~(1 << (slot))) -#define XFS_LBC_SLOT(cp, slot) (&((cp)->lbc_busy[(slot)])) -#define XFS_LBC_VACANCY(cp) (((cp)->lbc_free) & XFS_LBC_FREEMASK) -#define XFS_LBC_ISFREE(cp, slot) ((cp)->lbc_free & (1 << (slot))) - -/* - * This is the type of function which can be given to xfs_trans_callback() - * to be called upon the transaction's commit to disk. - */ -typedef void (*xfs_trans_callback_t)(struct xfs_trans *, void *); - -/* - * This is the structure maintained for every active transaction. - */ -typedef struct xfs_trans { - unsigned int t_magic; /* magic number */ - xfs_log_callback_t t_logcb; /* log callback struct */ - unsigned int t_type; /* transaction type */ - unsigned int t_log_res; /* amt of log space resvd */ - unsigned int t_log_count; /* count for perm log res */ - unsigned int t_blk_res; /* # of blocks resvd */ - unsigned int t_blk_res_used; /* # of resvd blocks used */ - unsigned int t_rtx_res; /* # of rt extents resvd */ - unsigned int t_rtx_res_used; /* # of resvd rt extents used */ - xfs_log_ticket_t t_ticket; /* log mgr ticket */ - xfs_lsn_t t_lsn; /* log seq num of start of - * transaction. */ - xfs_lsn_t t_commit_lsn; /* log seq num of end of - * transaction. */ - struct xfs_mount *t_mountp; /* ptr to fs mount struct */ - struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */ - xfs_trans_callback_t t_callback; /* transaction callback */ - void *t_callarg; /* callback arg */ - unsigned int t_flags; /* misc flags */ - int64_t t_icount_delta; /* superblock icount change */ - int64_t t_ifree_delta; /* superblock ifree change */ - int64_t t_fdblocks_delta; /* superblock fdblocks chg */ - int64_t t_res_fdblocks_delta; /* on-disk only chg */ - int64_t t_frextents_delta;/* superblock freextents chg*/ - int64_t t_res_frextents_delta; /* on-disk only chg */ -#ifdef DEBUG - int64_t t_ag_freeblks_delta; /* debugging counter */ - int64_t t_ag_flist_delta; /* debugging counter */ - int64_t t_ag_btree_delta; /* debugging counter */ -#endif - int64_t t_dblocks_delta;/* superblock dblocks change */ - int64_t t_agcount_delta;/* superblock agcount change */ - int64_t t_imaxpct_delta;/* superblock imaxpct change */ - int64_t t_rextsize_delta;/* superblock rextsize chg */ - int64_t t_rbmblocks_delta;/* superblock rbmblocks chg */ - int64_t t_rblocks_delta;/* superblock rblocks change */ - int64_t t_rextents_delta;/* superblocks rextents chg */ - int64_t t_rextslog_delta;/* superblocks rextslog chg */ - unsigned int t_items_free; /* log item descs free */ - xfs_log_item_chunk_t t_items; /* first log item desc chunk */ - xfs_trans_header_t t_header; /* header for in-log trans */ - unsigned int t_busy_free; /* busy descs free */ - xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */ - unsigned long t_pflags; /* saved process flags state */ -} xfs_trans_t; - -#endif /* __KERNEL__ */ - - #define XFS_TRANS_MAGIC 0x5452414E /* 'TRAN' */ /* * Values for t_flags. @@ -906,6 +750,156 @@ typedef struct xfs_trans { #define XFS_DQUOT_REF 1 #ifdef __KERNEL__ + +struct xfs_buf; +struct xfs_buftarg; +struct xfs_efd_log_item; +struct xfs_efi_log_item; +struct xfs_inode; +struct xfs_item_ops; +struct xfs_log_iovec; +struct xfs_log_item_desc; +struct xfs_mount; +struct xfs_trans; +struct xfs_dquot_acct; + +typedef struct xfs_log_item { + struct list_head li_ail; /* AIL pointers */ + xfs_lsn_t li_lsn; /* last on-disk lsn */ + struct xfs_log_item_desc *li_desc; /* ptr to current desc*/ + struct xfs_mount *li_mountp; /* ptr to fs mount */ + uint li_type; /* item type */ + uint li_flags; /* misc flags */ + struct xfs_log_item *li_bio_list; /* buffer item list */ + void (*li_cb)(struct xfs_buf *, + struct xfs_log_item *); + /* buffer item iodone */ + /* callback func */ + struct xfs_item_ops *li_ops; /* function list */ +} xfs_log_item_t; + +#define XFS_LI_IN_AIL 0x1 +#define XFS_LI_ABORTED 0x2 + +typedef struct xfs_item_ops { + uint (*iop_size)(xfs_log_item_t *); + void (*iop_format)(xfs_log_item_t *, struct xfs_log_iovec *); + void (*iop_pin)(xfs_log_item_t *); + void (*iop_unpin)(xfs_log_item_t *, int); + void (*iop_unpin_remove)(xfs_log_item_t *, struct xfs_trans *); + uint (*iop_trylock)(xfs_log_item_t *); + void (*iop_unlock)(xfs_log_item_t *); + xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); + void (*iop_push)(xfs_log_item_t *); + void (*iop_pushbuf)(xfs_log_item_t *); + void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); +} xfs_item_ops_t; + +#define IOP_SIZE(ip) (*(ip)->li_ops->iop_size)(ip) +#define IOP_FORMAT(ip,vp) (*(ip)->li_ops->iop_format)(ip, vp) +#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip) +#define IOP_UNPIN(ip, flags) (*(ip)->li_ops->iop_unpin)(ip, flags) +#define IOP_UNPIN_REMOVE(ip,tp) (*(ip)->li_ops->iop_unpin_remove)(ip, tp) +#define IOP_TRYLOCK(ip) (*(ip)->li_ops->iop_trylock)(ip) +#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip) +#define IOP_COMMITTED(ip, lsn) (*(ip)->li_ops->iop_committed)(ip, lsn) +#define IOP_PUSH(ip) (*(ip)->li_ops->iop_push)(ip) +#define IOP_PUSHBUF(ip) (*(ip)->li_ops->iop_pushbuf)(ip) +#define IOP_COMMITTING(ip, lsn) (*(ip)->li_ops->iop_committing)(ip, lsn) + +/* + * Return values for the IOP_TRYLOCK() routines. + */ +#define XFS_ITEM_SUCCESS 0 +#define XFS_ITEM_PINNED 1 +#define XFS_ITEM_LOCKED 2 +#define XFS_ITEM_FLUSHING 3 +#define XFS_ITEM_PUSHBUF 4 + +/* + * This structure is used to maintain a list of block ranges that have been + * freed in the transaction. The ranges are listed in the perag[] busy list + * between when they're freed and the transaction is committed to disk. + */ + +typedef struct xfs_log_busy_slot { + xfs_agnumber_t lbc_ag; + ushort lbc_idx; /* index in perag.busy[] */ +} xfs_log_busy_slot_t; + +#define XFS_LBC_NUM_SLOTS 31 +typedef struct xfs_log_busy_chunk { + struct xfs_log_busy_chunk *lbc_next; + uint lbc_free; /* free slots bitmask */ + ushort lbc_unused; /* first unused */ + xfs_log_busy_slot_t lbc_busy[XFS_LBC_NUM_SLOTS]; +} xfs_log_busy_chunk_t; + +#define XFS_LBC_MAX_SLOT (XFS_LBC_NUM_SLOTS - 1) +#define XFS_LBC_FREEMASK ((1U << XFS_LBC_NUM_SLOTS) - 1) + +#define XFS_LBC_INIT(cp) ((cp)->lbc_free = XFS_LBC_FREEMASK) +#define XFS_LBC_CLAIM(cp, slot) ((cp)->lbc_free &= ~(1 << (slot))) +#define XFS_LBC_SLOT(cp, slot) (&((cp)->lbc_busy[(slot)])) +#define XFS_LBC_VACANCY(cp) (((cp)->lbc_free) & XFS_LBC_FREEMASK) +#define XFS_LBC_ISFREE(cp, slot) ((cp)->lbc_free & (1 << (slot))) + +/* + * This is the type of function which can be given to xfs_trans_callback() + * to be called upon the transaction's commit to disk. + */ +typedef void (*xfs_trans_callback_t)(struct xfs_trans *, void *); + +/* + * This is the structure maintained for every active transaction. + */ +typedef struct xfs_trans { + unsigned int t_magic; /* magic number */ + xfs_log_callback_t t_logcb; /* log callback struct */ + unsigned int t_type; /* transaction type */ + unsigned int t_log_res; /* amt of log space resvd */ + unsigned int t_log_count; /* count for perm log res */ + unsigned int t_blk_res; /* # of blocks resvd */ + unsigned int t_blk_res_used; /* # of resvd blocks used */ + unsigned int t_rtx_res; /* # of rt extents resvd */ + unsigned int t_rtx_res_used; /* # of resvd rt extents used */ + xfs_log_ticket_t t_ticket; /* log mgr ticket */ + xfs_lsn_t t_lsn; /* log seq num of start of + * transaction. */ + xfs_lsn_t t_commit_lsn; /* log seq num of end of + * transaction. */ + struct xfs_mount *t_mountp; /* ptr to fs mount struct */ + struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */ + xfs_trans_callback_t t_callback; /* transaction callback */ + void *t_callarg; /* callback arg */ + unsigned int t_flags; /* misc flags */ + int64_t t_icount_delta; /* superblock icount change */ + int64_t t_ifree_delta; /* superblock ifree change */ + int64_t t_fdblocks_delta; /* superblock fdblocks chg */ + int64_t t_res_fdblocks_delta; /* on-disk only chg */ + int64_t t_frextents_delta;/* superblock freextents chg*/ + int64_t t_res_frextents_delta; /* on-disk only chg */ +#ifdef DEBUG + int64_t t_ag_freeblks_delta; /* debugging counter */ + int64_t t_ag_flist_delta; /* debugging counter */ + int64_t t_ag_btree_delta; /* debugging counter */ +#endif + int64_t t_dblocks_delta;/* superblock dblocks change */ + int64_t t_agcount_delta;/* superblock agcount change */ + int64_t t_imaxpct_delta;/* superblock imaxpct change */ + int64_t t_rextsize_delta;/* superblock rextsize chg */ + int64_t t_rbmblocks_delta;/* superblock rbmblocks chg */ + int64_t t_rblocks_delta;/* superblock rblocks change */ + int64_t t_rextents_delta;/* superblocks rextents chg */ + int64_t t_rextslog_delta;/* superblocks rextslog chg */ + unsigned int t_items_free; /* log item descs free */ + xfs_log_item_chunk_t t_items; /* first log item desc chunk */ + xfs_trans_header_t t_header; /* header for in-log trans */ + unsigned int t_busy_free; /* busy descs free */ + xfs_log_busy_chunk_t t_busy; /* busy/async free blocks */ + unsigned long t_pflags; /* saved process flags state */ +} xfs_trans_t; + /* * XFS transaction mechanism exported interfaces that are * actually macros. @@ -928,7 +922,6 @@ typedef struct xfs_trans { /* * XFS transaction mechanism exported interfaces. */ -void xfs_trans_init(struct xfs_mount *); xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint); xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint); xfs_trans_t *xfs_trans_dup(xfs_trans_t *); @@ -975,7 +968,6 @@ int _xfs_trans_commit(xfs_trans_t *, int *); #define xfs_trans_commit(tp, flags) _xfs_trans_commit(tp, flags, NULL) void xfs_trans_cancel(xfs_trans_t *, int); -int xfs_trans_roll(struct xfs_trans **, struct xfs_inode *); int xfs_trans_ail_init(struct xfs_mount *); void xfs_trans_ail_destroy(struct xfs_mount *); void xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); @@ -990,4 +982,7 @@ extern kmem_zone_t *xfs_trans_zone; #endif /* __KERNEL__ */ +void xfs_trans_init(struct xfs_mount *); +int xfs_trans_roll(struct xfs_trans **, struct xfs_inode *); + #endif /* __XFS_TRANS_H__ */ -- cgit v1.2.3-70-g09d2 From 89b2839319cb0c0364d55dc6fd6d3838e864ab54 Mon Sep 17 00:00:00 2001 From: Barry Naujok Date: Thu, 30 Oct 2008 17:05:49 +1100 Subject: [XFS] Check agf_btreeblks is valid when reading in the AGF SGI-PV: 987683 SGI-Modid: xfs-linux-melb:xfs-kern:32232a Signed-off-by: Barry Naujok Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_alloc.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 54fa69e2776..0a2a87208b1 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -2272,6 +2272,9 @@ xfs_alloc_read_agf( be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) && be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) && be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp); + if (xfs_sb_version_haslazysbcount(&mp->m_sb)) + agf_ok = agf_ok && be32_to_cpu(agf->agf_btreeblks) <= + be32_to_cpu(agf->agf_length); if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF, XFS_RANDOM_ALLOC_READ_AGF))) { XFS_CORRUPTION_ERROR("xfs_alloc_read_agf", @@ -2297,6 +2300,7 @@ xfs_alloc_read_agf( #ifdef DEBUG else if (!XFS_FORCED_SHUTDOWN(mp)) { ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks)); + ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks)); ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount)); ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest)); ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] == -- cgit v1.2.3-70-g09d2 From 34519daae6778d129d56688f75ade27f6011bac9 Mon Sep 17 00:00:00 2001 From: Barry Naujok Date: Thu, 30 Oct 2008 17:05:58 +1100 Subject: [XFS] Show buffer address with debug hexdump on corruption SGI-PV: 987246 SGI-Modid: xfs-linux-melb:xfs-kern:32233a Signed-off-by: Barry Naujok Signed-off-by: Eric Sandeen Signed-off-by: Lachlan McIlroy --- fs/xfs/support/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c index c27abef7b84..636104254cf 100644 --- a/fs/xfs/support/debug.c +++ b/fs/xfs/support/debug.c @@ -84,5 +84,5 @@ assfail(char *expr, char *file, int line) void xfs_hex_dump(void *p, int length) { - print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1); + print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1); } -- cgit v1.2.3-70-g09d2 From fe4fa4b8e463fa5848ef9e86ed75d27501d0da1e Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:06:08 +1100 Subject: [XFS] move sync code to its own file The sync code in XFS is spread around several files. While it used to make sense to have such a distribution, the code is about to be cleaned up and so centralising it in one spot as the first step makes sense. SGI-PV: 988139 SGI-Modid: xfs-linux-melb:xfs-kern:32282a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/Makefile | 1 + fs/xfs/linux-2.6/xfs_super.c | 1 + fs/xfs/linux-2.6/xfs_sync.c | 605 +++++++++++++++++++++++++++++++++++++++++++ fs/xfs/linux-2.6/xfs_sync.h | 7 + fs/xfs/xfs_vfsops.c | 560 +-------------------------------------- fs/xfs/xfs_vfsops.h | 1 - 6 files changed, 615 insertions(+), 560 deletions(-) create mode 100644 fs/xfs/linux-2.6/xfs_sync.c create mode 100644 fs/xfs/linux-2.6/xfs_sync.h (limited to 'fs') diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 75b2be72c39..51b87de97f8 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -107,6 +107,7 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \ xfs_iops.o \ xfs_lrw.o \ xfs_super.o \ + xfs_sync.o \ xfs_vnode.o \ xfs_xattr.o) diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 0c71c215853..727d0e47e80 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -59,6 +59,7 @@ #include "xfs_extfree_item.h" #include "xfs_mru_cache.h" #include "xfs_inode_item.h" +#include "xfs_sync.h" #include #include diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c new file mode 100644 index 00000000000..c765eb2a8dc --- /dev/null +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -0,0 +1,605 @@ +/* + * Copyright (c) 2000-2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_types.h" +#include "xfs_bit.h" +#include "xfs_log.h" +#include "xfs_inum.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" +#include "xfs_bmap_btree.h" +#include "xfs_alloc_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_dir2_sf.h" +#include "xfs_attr_sf.h" +#include "xfs_inode.h" +#include "xfs_dinode.h" +#include "xfs_error.h" +#include "xfs_mru_cache.h" +#include "xfs_filestream.h" +#include "xfs_vnodeops.h" +#include "xfs_utils.h" +#include "xfs_buf_item.h" +#include "xfs_inode_item.h" +#include "xfs_rw.h" + +/* + * xfs_sync flushes any pending I/O to file system vfsp. + * + * This routine is called by vfs_sync() to make sure that things make it + * out to disk eventually, on sync() system calls to flush out everything, + * and when the file system is unmounted. For the vfs_sync() case, all + * we really need to do is sync out the log to make all of our meta-data + * updates permanent (except for timestamps). For calls from pflushd(), + * dirty pages are kept moving by calling pdflush() on the inodes + * containing them. We also flush the inodes that we can lock without + * sleeping and the superblock if we can lock it without sleeping from + * vfs_sync() so that items at the tail of the log are always moving out. + * + * Flags: + * SYNC_BDFLUSH - We're being called from vfs_sync() so we don't want + * to sleep if we can help it. All we really need + * to do is ensure that the log is synced at least + * periodically. We also push the inodes and + * superblock if we can lock them without sleeping + * and they are not pinned. + * SYNC_ATTR - We need to flush the inodes. If SYNC_BDFLUSH is not + * set, then we really want to lock each inode and flush + * it. + * SYNC_WAIT - All the flushes that take place in this call should + * be synchronous. + * SYNC_DELWRI - This tells us to push dirty pages associated with + * inodes. SYNC_WAIT and SYNC_BDFLUSH are used to + * determine if they should be flushed sync, async, or + * delwri. + * SYNC_CLOSE - This flag is passed when the system is being + * unmounted. We should sync and invalidate everything. + * SYNC_FSDATA - This indicates that the caller would like to make + * sure the superblock is safe on disk. We can ensure + * this by simply making sure the log gets flushed + * if SYNC_BDFLUSH is set, and by actually writing it + * out otherwise. + * SYNC_IOWAIT - The caller wants us to wait for all data I/O to complete + * before we return (including direct I/O). Forms the drain + * side of the write barrier needed to safely quiesce the + * filesystem. + * + */ +int +xfs_sync( + xfs_mount_t *mp, + int flags) +{ + int error; + + /* + * Get the Quota Manager to flush the dquots. + * + * If XFS quota support is not enabled or this filesystem + * instance does not use quotas XFS_QM_DQSYNC will always + * return zero. + */ + error = XFS_QM_DQSYNC(mp, flags); + if (error) { + /* + * If we got an IO error, we will be shutting down. + * So, there's nothing more for us to do here. + */ + ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp)); + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(error); + } + + if (flags & SYNC_IOWAIT) + xfs_filestream_flush(mp); + + return xfs_syncsub(mp, flags, NULL); +} + +/* + * xfs sync routine for internal use + * + * This routine supports all of the flags defined for the generic vfs_sync + * interface as explained above under xfs_sync. + * + */ +int +xfs_sync_inodes( + xfs_mount_t *mp, + int flags, + int *bypassed) +{ + xfs_inode_t *ip = NULL; + struct inode *vp = NULL; + int error; + int last_error; + uint64_t fflag; + uint lock_flags; + uint base_lock_flags; + boolean_t mount_locked; + boolean_t vnode_refed; + int preempt; + xfs_iptr_t *ipointer; +#ifdef DEBUG + boolean_t ipointer_in = B_FALSE; + +#define IPOINTER_SET ipointer_in = B_TRUE +#define IPOINTER_CLR ipointer_in = B_FALSE +#else +#define IPOINTER_SET +#define IPOINTER_CLR +#endif + + +/* Insert a marker record into the inode list after inode ip. The list + * must be locked when this is called. After the call the list will no + * longer be locked. + */ +#define IPOINTER_INSERT(ip, mp) { \ + ASSERT(ipointer_in == B_FALSE); \ + ipointer->ip_mnext = ip->i_mnext; \ + ipointer->ip_mprev = ip; \ + ip->i_mnext = (xfs_inode_t *)ipointer; \ + ipointer->ip_mnext->i_mprev = (xfs_inode_t *)ipointer; \ + preempt = 0; \ + XFS_MOUNT_IUNLOCK(mp); \ + mount_locked = B_FALSE; \ + IPOINTER_SET; \ + } + +/* Remove the marker from the inode list. If the marker was the only item + * in the list then there are no remaining inodes and we should zero out + * the whole list. If we are the current head of the list then move the head + * past us. + */ +#define IPOINTER_REMOVE(ip, mp) { \ + ASSERT(ipointer_in == B_TRUE); \ + if (ipointer->ip_mnext != (xfs_inode_t *)ipointer) { \ + ip = ipointer->ip_mnext; \ + ip->i_mprev = ipointer->ip_mprev; \ + ipointer->ip_mprev->i_mnext = ip; \ + if (mp->m_inodes == (xfs_inode_t *)ipointer) { \ + mp->m_inodes = ip; \ + } \ + } else { \ + ASSERT(mp->m_inodes == (xfs_inode_t *)ipointer); \ + mp->m_inodes = NULL; \ + ip = NULL; \ + } \ + IPOINTER_CLR; \ + } + +#define XFS_PREEMPT_MASK 0x7f + + ASSERT(!(flags & SYNC_BDFLUSH)); + + if (bypassed) + *bypassed = 0; + if (mp->m_flags & XFS_MOUNT_RDONLY) + return 0; + error = 0; + last_error = 0; + preempt = 0; + + /* Allocate a reference marker */ + ipointer = (xfs_iptr_t *)kmem_zalloc(sizeof(xfs_iptr_t), KM_SLEEP); + + fflag = XFS_B_ASYNC; /* default is don't wait */ + if (flags & SYNC_DELWRI) + fflag = XFS_B_DELWRI; + if (flags & SYNC_WAIT) + fflag = 0; /* synchronous overrides all */ + + base_lock_flags = XFS_ILOCK_SHARED; + if (flags & (SYNC_DELWRI | SYNC_CLOSE)) { + /* + * We need the I/O lock if we're going to call any of + * the flush/inval routines. + */ + base_lock_flags |= XFS_IOLOCK_SHARED; + } + + XFS_MOUNT_ILOCK(mp); + + ip = mp->m_inodes; + + mount_locked = B_TRUE; + vnode_refed = B_FALSE; + + IPOINTER_CLR; + + do { + ASSERT(ipointer_in == B_FALSE); + ASSERT(vnode_refed == B_FALSE); + + lock_flags = base_lock_flags; + + /* + * There were no inodes in the list, just break out + * of the loop. + */ + if (ip == NULL) { + break; + } + + /* + * We found another sync thread marker - skip it + */ + if (ip->i_mount == NULL) { + ip = ip->i_mnext; + continue; + } + + vp = VFS_I(ip); + + /* + * If the vnode is gone then this is being torn down, + * call reclaim if it is flushed, else let regular flush + * code deal with it later in the loop. + */ + + if (vp == NULL) { + /* Skip ones already in reclaim */ + if (ip->i_flags & XFS_IRECLAIM) { + ip = ip->i_mnext; + continue; + } + if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { + ip = ip->i_mnext; + } else if ((xfs_ipincount(ip) == 0) && + xfs_iflock_nowait(ip)) { + IPOINTER_INSERT(ip, mp); + + xfs_finish_reclaim(ip, 1, + XFS_IFLUSH_DELWRI_ELSE_ASYNC); + + XFS_MOUNT_ILOCK(mp); + mount_locked = B_TRUE; + IPOINTER_REMOVE(ip, mp); + } else { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + ip = ip->i_mnext; + } + continue; + } + + if (VN_BAD(vp)) { + ip = ip->i_mnext; + continue; + } + + if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) { + XFS_MOUNT_IUNLOCK(mp); + kmem_free(ipointer); + return 0; + } + + /* + * Try to lock without sleeping. We're out of order with + * the inode list lock here, so if we fail we need to drop + * the mount lock and try again. If we're called from + * bdflush() here, then don't bother. + * + * The inode lock here actually coordinates with the + * almost spurious inode lock in xfs_ireclaim() to prevent + * the vnode we handle here without a reference from + * being freed while we reference it. If we lock the inode + * while it's on the mount list here, then the spurious inode + * lock in xfs_ireclaim() after the inode is pulled from + * the mount list will sleep until we release it here. + * This keeps the vnode from being freed while we reference + * it. + */ + if (xfs_ilock_nowait(ip, lock_flags) == 0) { + if (vp == NULL) { + ip = ip->i_mnext; + continue; + } + + vp = vn_grab(vp); + if (vp == NULL) { + ip = ip->i_mnext; + continue; + } + + IPOINTER_INSERT(ip, mp); + xfs_ilock(ip, lock_flags); + + ASSERT(vp == VFS_I(ip)); + ASSERT(ip->i_mount == mp); + + vnode_refed = B_TRUE; + } + + /* From here on in the loop we may have a marker record + * in the inode list. + */ + + /* + * If we have to flush data or wait for I/O completion + * we need to drop the ilock that we currently hold. + * If we need to drop the lock, insert a marker if we + * have not already done so. + */ + if ((flags & (SYNC_CLOSE|SYNC_IOWAIT)) || + ((flags & SYNC_DELWRI) && VN_DIRTY(vp))) { + if (mount_locked) { + IPOINTER_INSERT(ip, mp); + } + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + if (flags & SYNC_CLOSE) { + /* Shutdown case. Flush and invalidate. */ + if (XFS_FORCED_SHUTDOWN(mp)) + xfs_tosspages(ip, 0, -1, + FI_REMAPF); + else + error = xfs_flushinval_pages(ip, + 0, -1, FI_REMAPF); + } else if ((flags & SYNC_DELWRI) && VN_DIRTY(vp)) { + error = xfs_flush_pages(ip, 0, + -1, fflag, FI_NONE); + } + + /* + * When freezing, we need to wait ensure all I/O (including direct + * I/O) is complete to ensure no further data modification can take + * place after this point + */ + if (flags & SYNC_IOWAIT) + vn_iowait(ip); + + xfs_ilock(ip, XFS_ILOCK_SHARED); + } + + if ((flags & SYNC_ATTR) && + (ip->i_update_core || + (ip->i_itemp && ip->i_itemp->ili_format.ilf_fields))) { + if (mount_locked) + IPOINTER_INSERT(ip, mp); + + if (flags & SYNC_WAIT) { + xfs_iflock(ip); + error = xfs_iflush(ip, XFS_IFLUSH_SYNC); + + /* + * If we can't acquire the flush lock, then the inode + * is already being flushed so don't bother waiting. + * + * If we can lock it then do a delwri flush so we can + * combine multiple inode flushes in each disk write. + */ + } else if (xfs_iflock_nowait(ip)) { + error = xfs_iflush(ip, XFS_IFLUSH_DELWRI); + } else if (bypassed) { + (*bypassed)++; + } + } + + if (lock_flags != 0) { + xfs_iunlock(ip, lock_flags); + } + + if (vnode_refed) { + /* + * If we had to take a reference on the vnode + * above, then wait until after we've unlocked + * the inode to release the reference. This is + * because we can be already holding the inode + * lock when IRELE() calls xfs_inactive(). + * + * Make sure to drop the mount lock before calling + * IRELE() so that we don't trip over ourselves if + * we have to go for the mount lock again in the + * inactive code. + */ + if (mount_locked) { + IPOINTER_INSERT(ip, mp); + } + + IRELE(ip); + + vnode_refed = B_FALSE; + } + + if (error) { + last_error = error; + } + + /* + * bail out if the filesystem is corrupted. + */ + if (error == EFSCORRUPTED) { + if (!mount_locked) { + XFS_MOUNT_ILOCK(mp); + IPOINTER_REMOVE(ip, mp); + } + XFS_MOUNT_IUNLOCK(mp); + ASSERT(ipointer_in == B_FALSE); + kmem_free(ipointer); + return XFS_ERROR(error); + } + + /* Let other threads have a chance at the mount lock + * if we have looped many times without dropping the + * lock. + */ + if ((++preempt & XFS_PREEMPT_MASK) == 0) { + if (mount_locked) { + IPOINTER_INSERT(ip, mp); + } + } + + if (mount_locked == B_FALSE) { + XFS_MOUNT_ILOCK(mp); + mount_locked = B_TRUE; + IPOINTER_REMOVE(ip, mp); + continue; + } + + ASSERT(ipointer_in == B_FALSE); + ip = ip->i_mnext; + + } while (ip != mp->m_inodes); + + XFS_MOUNT_IUNLOCK(mp); + + ASSERT(ipointer_in == B_FALSE); + + kmem_free(ipointer); + return XFS_ERROR(last_error); +} + +/* + * xfs sync routine for internal use + * + * This routine supports all of the flags defined for the generic vfs_sync + * interface as explained above under xfs_sync. + * + */ +int +xfs_syncsub( + xfs_mount_t *mp, + int flags, + int *bypassed) +{ + int error = 0; + int last_error = 0; + uint log_flags = XFS_LOG_FORCE; + xfs_buf_t *bp; + xfs_buf_log_item_t *bip; + + /* + * Sync out the log. This ensures that the log is periodically + * flushed even if there is not enough activity to fill it up. + */ + if (flags & SYNC_WAIT) + log_flags |= XFS_LOG_SYNC; + + xfs_log_force(mp, (xfs_lsn_t)0, log_flags); + + if (flags & (SYNC_ATTR|SYNC_DELWRI)) { + if (flags & SYNC_BDFLUSH) + xfs_finish_reclaim_all(mp, 1); + else + error = xfs_sync_inodes(mp, flags, bypassed); + } + + /* + * Flushing out dirty data above probably generated more + * log activity, so if this isn't vfs_sync() then flush + * the log again. + */ + if (flags & SYNC_DELWRI) { + xfs_log_force(mp, (xfs_lsn_t)0, log_flags); + } + + if (flags & SYNC_FSDATA) { + /* + * If this is vfs_sync() then only sync the superblock + * if we can lock it without sleeping and it is not pinned. + */ + if (flags & SYNC_BDFLUSH) { + bp = xfs_getsb(mp, XFS_BUF_TRYLOCK); + if (bp != NULL) { + bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*); + if ((bip != NULL) && + xfs_buf_item_dirty(bip)) { + if (!(XFS_BUF_ISPINNED(bp))) { + XFS_BUF_ASYNC(bp); + error = xfs_bwrite(mp, bp); + } else { + xfs_buf_relse(bp); + } + } else { + xfs_buf_relse(bp); + } + } + } else { + bp = xfs_getsb(mp, 0); + /* + * If the buffer is pinned then push on the log so + * we won't get stuck waiting in the write for + * someone, maybe ourselves, to flush the log. + * Even though we just pushed the log above, we + * did not have the superblock buffer locked at + * that point so it can become pinned in between + * there and here. + */ + if (XFS_BUF_ISPINNED(bp)) + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + if (flags & SYNC_WAIT) + XFS_BUF_UNASYNC(bp); + else + XFS_BUF_ASYNC(bp); + error = xfs_bwrite(mp, bp); + } + if (error) { + last_error = error; + } + } + + /* + * Now check to see if the log needs a "dummy" transaction. + */ + if (!(flags & SYNC_REMOUNT) && xfs_log_need_covered(mp)) { + xfs_trans_t *tp; + xfs_inode_t *ip; + + /* + * Put a dummy transaction in the log to tell + * recovery that all others are OK. + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1); + if ((error = xfs_trans_reserve(tp, 0, + XFS_ICHANGE_LOG_RES(mp), + 0, 0, 0))) { + xfs_trans_cancel(tp, 0); + return error; + } + + ip = mp->m_rootip; + xfs_ilock(ip, XFS_ILOCK_EXCL); + + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + error = xfs_trans_commit(tp, 0); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_log_force(mp, (xfs_lsn_t)0, log_flags); + } + + /* + * When shutting down, we need to insure that the AIL is pushed + * to disk or the filesystem can appear corrupt from the PROM. + */ + if ((flags & (SYNC_CLOSE|SYNC_WAIT)) == (SYNC_CLOSE|SYNC_WAIT)) { + XFS_bflush(mp->m_ddev_targp); + if (mp->m_rtdev_targp) { + XFS_bflush(mp->m_rtdev_targp); + } + } + + return XFS_ERROR(last_error); +} diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h new file mode 100644 index 00000000000..f4c3b1ea64c --- /dev/null +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -0,0 +1,7 @@ +#ifndef XFS_SYNC_H +#define XFS_SYNC_H 1 + +int xfs_sync(struct xfs_mount *mp, int flags); +int xfs_syncsub(struct xfs_mount *mp, int flags, int *bypassed); + +#endif diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index 439dd3939dd..01e274b902c 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -56,6 +56,7 @@ #include "xfs_vnodeops.h" #include "xfs_vfsops.h" #include "xfs_utils.h" +#include "xfs_sync.h" STATIC void @@ -196,562 +197,3 @@ fscorrupt_out2: return XFS_ERROR(EFSCORRUPTED); } -/* - * xfs_sync flushes any pending I/O to file system vfsp. - * - * This routine is called by vfs_sync() to make sure that things make it - * out to disk eventually, on sync() system calls to flush out everything, - * and when the file system is unmounted. For the vfs_sync() case, all - * we really need to do is sync out the log to make all of our meta-data - * updates permanent (except for timestamps). For calls from pflushd(), - * dirty pages are kept moving by calling pdflush() on the inodes - * containing them. We also flush the inodes that we can lock without - * sleeping and the superblock if we can lock it without sleeping from - * vfs_sync() so that items at the tail of the log are always moving out. - * - * Flags: - * SYNC_BDFLUSH - We're being called from vfs_sync() so we don't want - * to sleep if we can help it. All we really need - * to do is ensure that the log is synced at least - * periodically. We also push the inodes and - * superblock if we can lock them without sleeping - * and they are not pinned. - * SYNC_ATTR - We need to flush the inodes. If SYNC_BDFLUSH is not - * set, then we really want to lock each inode and flush - * it. - * SYNC_WAIT - All the flushes that take place in this call should - * be synchronous. - * SYNC_DELWRI - This tells us to push dirty pages associated with - * inodes. SYNC_WAIT and SYNC_BDFLUSH are used to - * determine if they should be flushed sync, async, or - * delwri. - * SYNC_CLOSE - This flag is passed when the system is being - * unmounted. We should sync and invalidate everything. - * SYNC_FSDATA - This indicates that the caller would like to make - * sure the superblock is safe on disk. We can ensure - * this by simply making sure the log gets flushed - * if SYNC_BDFLUSH is set, and by actually writing it - * out otherwise. - * SYNC_IOWAIT - The caller wants us to wait for all data I/O to complete - * before we return (including direct I/O). Forms the drain - * side of the write barrier needed to safely quiesce the - * filesystem. - * - */ -int -xfs_sync( - xfs_mount_t *mp, - int flags) -{ - int error; - - /* - * Get the Quota Manager to flush the dquots. - * - * If XFS quota support is not enabled or this filesystem - * instance does not use quotas XFS_QM_DQSYNC will always - * return zero. - */ - error = XFS_QM_DQSYNC(mp, flags); - if (error) { - /* - * If we got an IO error, we will be shutting down. - * So, there's nothing more for us to do here. - */ - ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp)); - if (XFS_FORCED_SHUTDOWN(mp)) - return XFS_ERROR(error); - } - - if (flags & SYNC_IOWAIT) - xfs_filestream_flush(mp); - - return xfs_syncsub(mp, flags, NULL); -} - -/* - * xfs sync routine for internal use - * - * This routine supports all of the flags defined for the generic vfs_sync - * interface as explained above under xfs_sync. - * - */ -int -xfs_sync_inodes( - xfs_mount_t *mp, - int flags, - int *bypassed) -{ - xfs_inode_t *ip = NULL; - struct inode *vp = NULL; - int error; - int last_error; - uint64_t fflag; - uint lock_flags; - uint base_lock_flags; - boolean_t mount_locked; - boolean_t vnode_refed; - int preempt; - xfs_iptr_t *ipointer; -#ifdef DEBUG - boolean_t ipointer_in = B_FALSE; - -#define IPOINTER_SET ipointer_in = B_TRUE -#define IPOINTER_CLR ipointer_in = B_FALSE -#else -#define IPOINTER_SET -#define IPOINTER_CLR -#endif - - -/* Insert a marker record into the inode list after inode ip. The list - * must be locked when this is called. After the call the list will no - * longer be locked. - */ -#define IPOINTER_INSERT(ip, mp) { \ - ASSERT(ipointer_in == B_FALSE); \ - ipointer->ip_mnext = ip->i_mnext; \ - ipointer->ip_mprev = ip; \ - ip->i_mnext = (xfs_inode_t *)ipointer; \ - ipointer->ip_mnext->i_mprev = (xfs_inode_t *)ipointer; \ - preempt = 0; \ - XFS_MOUNT_IUNLOCK(mp); \ - mount_locked = B_FALSE; \ - IPOINTER_SET; \ - } - -/* Remove the marker from the inode list. If the marker was the only item - * in the list then there are no remaining inodes and we should zero out - * the whole list. If we are the current head of the list then move the head - * past us. - */ -#define IPOINTER_REMOVE(ip, mp) { \ - ASSERT(ipointer_in == B_TRUE); \ - if (ipointer->ip_mnext != (xfs_inode_t *)ipointer) { \ - ip = ipointer->ip_mnext; \ - ip->i_mprev = ipointer->ip_mprev; \ - ipointer->ip_mprev->i_mnext = ip; \ - if (mp->m_inodes == (xfs_inode_t *)ipointer) { \ - mp->m_inodes = ip; \ - } \ - } else { \ - ASSERT(mp->m_inodes == (xfs_inode_t *)ipointer); \ - mp->m_inodes = NULL; \ - ip = NULL; \ - } \ - IPOINTER_CLR; \ - } - -#define XFS_PREEMPT_MASK 0x7f - - ASSERT(!(flags & SYNC_BDFLUSH)); - - if (bypassed) - *bypassed = 0; - if (mp->m_flags & XFS_MOUNT_RDONLY) - return 0; - error = 0; - last_error = 0; - preempt = 0; - - /* Allocate a reference marker */ - ipointer = (xfs_iptr_t *)kmem_zalloc(sizeof(xfs_iptr_t), KM_SLEEP); - - fflag = XFS_B_ASYNC; /* default is don't wait */ - if (flags & SYNC_DELWRI) - fflag = XFS_B_DELWRI; - if (flags & SYNC_WAIT) - fflag = 0; /* synchronous overrides all */ - - base_lock_flags = XFS_ILOCK_SHARED; - if (flags & (SYNC_DELWRI | SYNC_CLOSE)) { - /* - * We need the I/O lock if we're going to call any of - * the flush/inval routines. - */ - base_lock_flags |= XFS_IOLOCK_SHARED; - } - - XFS_MOUNT_ILOCK(mp); - - ip = mp->m_inodes; - - mount_locked = B_TRUE; - vnode_refed = B_FALSE; - - IPOINTER_CLR; - - do { - ASSERT(ipointer_in == B_FALSE); - ASSERT(vnode_refed == B_FALSE); - - lock_flags = base_lock_flags; - - /* - * There were no inodes in the list, just break out - * of the loop. - */ - if (ip == NULL) { - break; - } - - /* - * We found another sync thread marker - skip it - */ - if (ip->i_mount == NULL) { - ip = ip->i_mnext; - continue; - } - - vp = VFS_I(ip); - - /* - * If the vnode is gone then this is being torn down, - * call reclaim if it is flushed, else let regular flush - * code deal with it later in the loop. - */ - - if (vp == NULL) { - /* Skip ones already in reclaim */ - if (ip->i_flags & XFS_IRECLAIM) { - ip = ip->i_mnext; - continue; - } - if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { - ip = ip->i_mnext; - } else if ((xfs_ipincount(ip) == 0) && - xfs_iflock_nowait(ip)) { - IPOINTER_INSERT(ip, mp); - - xfs_finish_reclaim(ip, 1, - XFS_IFLUSH_DELWRI_ELSE_ASYNC); - - XFS_MOUNT_ILOCK(mp); - mount_locked = B_TRUE; - IPOINTER_REMOVE(ip, mp); - } else { - xfs_iunlock(ip, XFS_ILOCK_EXCL); - ip = ip->i_mnext; - } - continue; - } - - if (VN_BAD(vp)) { - ip = ip->i_mnext; - continue; - } - - if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) { - XFS_MOUNT_IUNLOCK(mp); - kmem_free(ipointer); - return 0; - } - - /* - * Try to lock without sleeping. We're out of order with - * the inode list lock here, so if we fail we need to drop - * the mount lock and try again. If we're called from - * bdflush() here, then don't bother. - * - * The inode lock here actually coordinates with the - * almost spurious inode lock in xfs_ireclaim() to prevent - * the vnode we handle here without a reference from - * being freed while we reference it. If we lock the inode - * while it's on the mount list here, then the spurious inode - * lock in xfs_ireclaim() after the inode is pulled from - * the mount list will sleep until we release it here. - * This keeps the vnode from being freed while we reference - * it. - */ - if (xfs_ilock_nowait(ip, lock_flags) == 0) { - if (vp == NULL) { - ip = ip->i_mnext; - continue; - } - - vp = vn_grab(vp); - if (vp == NULL) { - ip = ip->i_mnext; - continue; - } - - IPOINTER_INSERT(ip, mp); - xfs_ilock(ip, lock_flags); - - ASSERT(vp == VFS_I(ip)); - ASSERT(ip->i_mount == mp); - - vnode_refed = B_TRUE; - } - - /* From here on in the loop we may have a marker record - * in the inode list. - */ - - /* - * If we have to flush data or wait for I/O completion - * we need to drop the ilock that we currently hold. - * If we need to drop the lock, insert a marker if we - * have not already done so. - */ - if ((flags & (SYNC_CLOSE|SYNC_IOWAIT)) || - ((flags & SYNC_DELWRI) && VN_DIRTY(vp))) { - if (mount_locked) { - IPOINTER_INSERT(ip, mp); - } - xfs_iunlock(ip, XFS_ILOCK_SHARED); - - if (flags & SYNC_CLOSE) { - /* Shutdown case. Flush and invalidate. */ - if (XFS_FORCED_SHUTDOWN(mp)) - xfs_tosspages(ip, 0, -1, - FI_REMAPF); - else - error = xfs_flushinval_pages(ip, - 0, -1, FI_REMAPF); - } else if ((flags & SYNC_DELWRI) && VN_DIRTY(vp)) { - error = xfs_flush_pages(ip, 0, - -1, fflag, FI_NONE); - } - - /* - * When freezing, we need to wait ensure all I/O (including direct - * I/O) is complete to ensure no further data modification can take - * place after this point - */ - if (flags & SYNC_IOWAIT) - vn_iowait(ip); - - xfs_ilock(ip, XFS_ILOCK_SHARED); - } - - if ((flags & SYNC_ATTR) && - (ip->i_update_core || - (ip->i_itemp && ip->i_itemp->ili_format.ilf_fields))) { - if (mount_locked) - IPOINTER_INSERT(ip, mp); - - if (flags & SYNC_WAIT) { - xfs_iflock(ip); - error = xfs_iflush(ip, XFS_IFLUSH_SYNC); - - /* - * If we can't acquire the flush lock, then the inode - * is already being flushed so don't bother waiting. - * - * If we can lock it then do a delwri flush so we can - * combine multiple inode flushes in each disk write. - */ - } else if (xfs_iflock_nowait(ip)) { - error = xfs_iflush(ip, XFS_IFLUSH_DELWRI); - } else if (bypassed) { - (*bypassed)++; - } - } - - if (lock_flags != 0) { - xfs_iunlock(ip, lock_flags); - } - - if (vnode_refed) { - /* - * If we had to take a reference on the vnode - * above, then wait until after we've unlocked - * the inode to release the reference. This is - * because we can be already holding the inode - * lock when IRELE() calls xfs_inactive(). - * - * Make sure to drop the mount lock before calling - * IRELE() so that we don't trip over ourselves if - * we have to go for the mount lock again in the - * inactive code. - */ - if (mount_locked) { - IPOINTER_INSERT(ip, mp); - } - - IRELE(ip); - - vnode_refed = B_FALSE; - } - - if (error) { - last_error = error; - } - - /* - * bail out if the filesystem is corrupted. - */ - if (error == EFSCORRUPTED) { - if (!mount_locked) { - XFS_MOUNT_ILOCK(mp); - IPOINTER_REMOVE(ip, mp); - } - XFS_MOUNT_IUNLOCK(mp); - ASSERT(ipointer_in == B_FALSE); - kmem_free(ipointer); - return XFS_ERROR(error); - } - - /* Let other threads have a chance at the mount lock - * if we have looped many times without dropping the - * lock. - */ - if ((++preempt & XFS_PREEMPT_MASK) == 0) { - if (mount_locked) { - IPOINTER_INSERT(ip, mp); - } - } - - if (mount_locked == B_FALSE) { - XFS_MOUNT_ILOCK(mp); - mount_locked = B_TRUE; - IPOINTER_REMOVE(ip, mp); - continue; - } - - ASSERT(ipointer_in == B_FALSE); - ip = ip->i_mnext; - - } while (ip != mp->m_inodes); - - XFS_MOUNT_IUNLOCK(mp); - - ASSERT(ipointer_in == B_FALSE); - - kmem_free(ipointer); - return XFS_ERROR(last_error); -} - -/* - * xfs sync routine for internal use - * - * This routine supports all of the flags defined for the generic vfs_sync - * interface as explained above under xfs_sync. - * - */ -int -xfs_syncsub( - xfs_mount_t *mp, - int flags, - int *bypassed) -{ - int error = 0; - int last_error = 0; - uint log_flags = XFS_LOG_FORCE; - xfs_buf_t *bp; - xfs_buf_log_item_t *bip; - - /* - * Sync out the log. This ensures that the log is periodically - * flushed even if there is not enough activity to fill it up. - */ - if (flags & SYNC_WAIT) - log_flags |= XFS_LOG_SYNC; - - xfs_log_force(mp, (xfs_lsn_t)0, log_flags); - - if (flags & (SYNC_ATTR|SYNC_DELWRI)) { - if (flags & SYNC_BDFLUSH) - xfs_finish_reclaim_all(mp, 1); - else - error = xfs_sync_inodes(mp, flags, bypassed); - } - - /* - * Flushing out dirty data above probably generated more - * log activity, so if this isn't vfs_sync() then flush - * the log again. - */ - if (flags & SYNC_DELWRI) { - xfs_log_force(mp, (xfs_lsn_t)0, log_flags); - } - - if (flags & SYNC_FSDATA) { - /* - * If this is vfs_sync() then only sync the superblock - * if we can lock it without sleeping and it is not pinned. - */ - if (flags & SYNC_BDFLUSH) { - bp = xfs_getsb(mp, XFS_BUF_TRYLOCK); - if (bp != NULL) { - bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*); - if ((bip != NULL) && - xfs_buf_item_dirty(bip)) { - if (!(XFS_BUF_ISPINNED(bp))) { - XFS_BUF_ASYNC(bp); - error = xfs_bwrite(mp, bp); - } else { - xfs_buf_relse(bp); - } - } else { - xfs_buf_relse(bp); - } - } - } else { - bp = xfs_getsb(mp, 0); - /* - * If the buffer is pinned then push on the log so - * we won't get stuck waiting in the write for - * someone, maybe ourselves, to flush the log. - * Even though we just pushed the log above, we - * did not have the superblock buffer locked at - * that point so it can become pinned in between - * there and here. - */ - if (XFS_BUF_ISPINNED(bp)) - xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); - if (flags & SYNC_WAIT) - XFS_BUF_UNASYNC(bp); - else - XFS_BUF_ASYNC(bp); - error = xfs_bwrite(mp, bp); - } - if (error) { - last_error = error; - } - } - - /* - * Now check to see if the log needs a "dummy" transaction. - */ - if (!(flags & SYNC_REMOUNT) && xfs_log_need_covered(mp)) { - xfs_trans_t *tp; - xfs_inode_t *ip; - - /* - * Put a dummy transaction in the log to tell - * recovery that all others are OK. - */ - tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1); - if ((error = xfs_trans_reserve(tp, 0, - XFS_ICHANGE_LOG_RES(mp), - 0, 0, 0))) { - xfs_trans_cancel(tp, 0); - return error; - } - - ip = mp->m_rootip; - xfs_ilock(ip, XFS_ILOCK_EXCL); - - xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); - xfs_trans_ihold(tp, ip); - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - error = xfs_trans_commit(tp, 0); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - xfs_log_force(mp, (xfs_lsn_t)0, log_flags); - } - - /* - * When shutting down, we need to insure that the AIL is pushed - * to disk or the filesystem can appear corrupt from the PROM. - */ - if ((flags & (SYNC_CLOSE|SYNC_WAIT)) == (SYNC_CLOSE|SYNC_WAIT)) { - XFS_bflush(mp->m_ddev_targp); - if (mp->m_rtdev_targp) { - XFS_bflush(mp->m_rtdev_targp); - } - } - - return XFS_ERROR(last_error); -} diff --git a/fs/xfs/xfs_vfsops.h b/fs/xfs/xfs_vfsops.h index a74b05087da..6701d0ed8ad 100644 --- a/fs/xfs/xfs_vfsops.h +++ b/fs/xfs/xfs_vfsops.h @@ -8,7 +8,6 @@ struct kstatfs; struct xfs_mount; struct xfs_mount_args; -int xfs_sync(struct xfs_mount *mp, int flags); void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, int lnnum); void xfs_attr_quiesce(struct xfs_mount *mp); -- cgit v1.2.3-70-g09d2 From a167b17e899a930758506bbc18748078d6fd8c89 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:06:18 +1100 Subject: [XFS] move xfssyncd code to xfs_sync.c Move all the xfssyncd code to the new xfs_sync.c file. This places it closer to the actual code that it interacts with, rather than just being associated with high level VFS code. SGI-PV: 988139 SGI-Modid: xfs-linux-melb:xfs-kern:32283a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_super.c | 151 +-------------------------------------- fs/xfs/linux-2.6/xfs_super.h | 3 - fs/xfs/linux-2.6/xfs_sync.c | 163 +++++++++++++++++++++++++++++++++++++++++++ fs/xfs/linux-2.6/xfs_sync.h | 56 +++++++++++++++ fs/xfs/linux-2.6/xfs_vfs.h | 31 -------- fs/xfs/xfs_mount.h | 1 + 6 files changed, 223 insertions(+), 182 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 727d0e47e80..767f38e0e0b 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -979,146 +979,6 @@ xfs_fs_clear_inode( ASSERT(XFS_I(inode) == NULL); } -/* - * Enqueue a work item to be picked up by the vfs xfssyncd thread. - * Doing this has two advantages: - * - It saves on stack space, which is tight in certain situations - * - It can be used (with care) as a mechanism to avoid deadlocks. - * Flushing while allocating in a full filesystem requires both. - */ -STATIC void -xfs_syncd_queue_work( - struct xfs_mount *mp, - void *data, - void (*syncer)(struct xfs_mount *, void *)) -{ - struct bhv_vfs_sync_work *work; - - work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP); - INIT_LIST_HEAD(&work->w_list); - work->w_syncer = syncer; - work->w_data = data; - work->w_mount = mp; - spin_lock(&mp->m_sync_lock); - list_add_tail(&work->w_list, &mp->m_sync_list); - spin_unlock(&mp->m_sync_lock); - wake_up_process(mp->m_sync_task); -} - -/* - * Flush delayed allocate data, attempting to free up reserved space - * from existing allocations. At this point a new allocation attempt - * has failed with ENOSPC and we are in the process of scratching our - * heads, looking about for more room... - */ -STATIC void -xfs_flush_inode_work( - struct xfs_mount *mp, - void *arg) -{ - struct inode *inode = arg; - filemap_flush(inode->i_mapping); - iput(inode); -} - -void -xfs_flush_inode( - xfs_inode_t *ip) -{ - struct inode *inode = VFS_I(ip); - - igrab(inode); - xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work); - delay(msecs_to_jiffies(500)); -} - -/* - * This is the "bigger hammer" version of xfs_flush_inode_work... - * (IOW, "If at first you don't succeed, use a Bigger Hammer"). - */ -STATIC void -xfs_flush_device_work( - struct xfs_mount *mp, - void *arg) -{ - struct inode *inode = arg; - sync_blockdev(mp->m_super->s_bdev); - iput(inode); -} - -void -xfs_flush_device( - xfs_inode_t *ip) -{ - struct inode *inode = VFS_I(ip); - - igrab(inode); - xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work); - delay(msecs_to_jiffies(500)); - xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); -} - -STATIC void -xfs_sync_worker( - struct xfs_mount *mp, - void *unused) -{ - int error; - - if (!(mp->m_flags & XFS_MOUNT_RDONLY)) - error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR); - mp->m_sync_seq++; - wake_up(&mp->m_wait_single_sync_task); -} - -STATIC int -xfssyncd( - void *arg) -{ - struct xfs_mount *mp = arg; - long timeleft; - bhv_vfs_sync_work_t *work, *n; - LIST_HEAD (tmp); - - set_freezable(); - timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10); - for (;;) { - timeleft = schedule_timeout_interruptible(timeleft); - /* swsusp */ - try_to_freeze(); - if (kthread_should_stop() && list_empty(&mp->m_sync_list)) - break; - - spin_lock(&mp->m_sync_lock); - /* - * We can get woken by laptop mode, to do a sync - - * that's the (only!) case where the list would be - * empty with time remaining. - */ - if (!timeleft || list_empty(&mp->m_sync_list)) { - if (!timeleft) - timeleft = xfs_syncd_centisecs * - msecs_to_jiffies(10); - INIT_LIST_HEAD(&mp->m_sync_work.w_list); - list_add_tail(&mp->m_sync_work.w_list, - &mp->m_sync_list); - } - list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list) - list_move(&work->w_list, &tmp); - spin_unlock(&mp->m_sync_lock); - - list_for_each_entry_safe(work, n, &tmp, w_list) { - (*work->w_syncer)(mp, work->w_data); - list_del(&work->w_list); - if (work == &mp->m_sync_work) - continue; - kmem_free(work); - } - } - - return 0; -} - STATIC void xfs_free_fsname( struct xfs_mount *mp) @@ -1137,8 +997,7 @@ xfs_fs_put_super( int unmount_event_flags = 0; int error; - kthread_stop(mp->m_sync_task); - + xfs_syncd_stop(mp); xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI); #ifdef HAVE_DMAPI @@ -1808,13 +1667,9 @@ xfs_fs_fill_super( goto fail_vnrele; } - mp->m_sync_work.w_syncer = xfs_sync_worker; - mp->m_sync_work.w_mount = mp; - mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd"); - if (IS_ERR(mp->m_sync_task)) { - error = -PTR_ERR(mp->m_sync_task); + error = xfs_syncd_init(mp); + if (error) goto fail_vnrele; - } xfs_itrace_exit(XFS_I(sb->s_root->d_inode)); diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h index fe2ef4e6a0f..56dc48a76fa 100644 --- a/fs/xfs/linux-2.6/xfs_super.h +++ b/fs/xfs/linux-2.6/xfs_super.h @@ -101,9 +101,6 @@ struct block_device; extern __uint64_t xfs_max_file_offset(unsigned int); -extern void xfs_flush_inode(struct xfs_inode *); -extern void xfs_flush_device(struct xfs_inode *); - extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); extern const struct export_operations xfs_export_operations; diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index c765eb2a8dc..a51534c71b3 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -44,6 +44,9 @@ #include "xfs_inode_item.h" #include "xfs_rw.h" +#include +#include + /* * xfs_sync flushes any pending I/O to file system vfsp. * @@ -603,3 +606,163 @@ xfs_syncsub( return XFS_ERROR(last_error); } + +/* + * Enqueue a work item to be picked up by the vfs xfssyncd thread. + * Doing this has two advantages: + * - It saves on stack space, which is tight in certain situations + * - It can be used (with care) as a mechanism to avoid deadlocks. + * Flushing while allocating in a full filesystem requires both. + */ +STATIC void +xfs_syncd_queue_work( + struct xfs_mount *mp, + void *data, + void (*syncer)(struct xfs_mount *, void *)) +{ + struct bhv_vfs_sync_work *work; + + work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP); + INIT_LIST_HEAD(&work->w_list); + work->w_syncer = syncer; + work->w_data = data; + work->w_mount = mp; + spin_lock(&mp->m_sync_lock); + list_add_tail(&work->w_list, &mp->m_sync_list); + spin_unlock(&mp->m_sync_lock); + wake_up_process(mp->m_sync_task); +} + +/* + * Flush delayed allocate data, attempting to free up reserved space + * from existing allocations. At this point a new allocation attempt + * has failed with ENOSPC and we are in the process of scratching our + * heads, looking about for more room... + */ +STATIC void +xfs_flush_inode_work( + struct xfs_mount *mp, + void *arg) +{ + struct inode *inode = arg; + filemap_flush(inode->i_mapping); + iput(inode); +} + +void +xfs_flush_inode( + xfs_inode_t *ip) +{ + struct inode *inode = VFS_I(ip); + + igrab(inode); + xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work); + delay(msecs_to_jiffies(500)); +} + +/* + * This is the "bigger hammer" version of xfs_flush_inode_work... + * (IOW, "If at first you don't succeed, use a Bigger Hammer"). + */ +STATIC void +xfs_flush_device_work( + struct xfs_mount *mp, + void *arg) +{ + struct inode *inode = arg; + sync_blockdev(mp->m_super->s_bdev); + iput(inode); +} + +void +xfs_flush_device( + xfs_inode_t *ip) +{ + struct inode *inode = VFS_I(ip); + + igrab(inode); + xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work); + delay(msecs_to_jiffies(500)); + xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); +} + +STATIC void +xfs_sync_worker( + struct xfs_mount *mp, + void *unused) +{ + int error; + + if (!(mp->m_flags & XFS_MOUNT_RDONLY)) + error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR); + mp->m_sync_seq++; + wake_up(&mp->m_wait_single_sync_task); +} + +STATIC int +xfssyncd( + void *arg) +{ + struct xfs_mount *mp = arg; + long timeleft; + bhv_vfs_sync_work_t *work, *n; + LIST_HEAD (tmp); + + set_freezable(); + timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10); + for (;;) { + timeleft = schedule_timeout_interruptible(timeleft); + /* swsusp */ + try_to_freeze(); + if (kthread_should_stop() && list_empty(&mp->m_sync_list)) + break; + + spin_lock(&mp->m_sync_lock); + /* + * We can get woken by laptop mode, to do a sync - + * that's the (only!) case where the list would be + * empty with time remaining. + */ + if (!timeleft || list_empty(&mp->m_sync_list)) { + if (!timeleft) + timeleft = xfs_syncd_centisecs * + msecs_to_jiffies(10); + INIT_LIST_HEAD(&mp->m_sync_work.w_list); + list_add_tail(&mp->m_sync_work.w_list, + &mp->m_sync_list); + } + list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list) + list_move(&work->w_list, &tmp); + spin_unlock(&mp->m_sync_lock); + + list_for_each_entry_safe(work, n, &tmp, w_list) { + (*work->w_syncer)(mp, work->w_data); + list_del(&work->w_list); + if (work == &mp->m_sync_work) + continue; + kmem_free(work); + } + } + + return 0; +} + +int +xfs_syncd_init( + struct xfs_mount *mp) +{ + mp->m_sync_work.w_syncer = xfs_sync_worker; + mp->m_sync_work.w_mount = mp; + mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd"); + if (IS_ERR(mp->m_sync_task)) + return -PTR_ERR(mp->m_sync_task); + return 0; +} + +void +xfs_syncd_stop( + struct xfs_mount *mp) +{ + kthread_stop(mp->m_sync_task); +} + diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index f4c3b1ea64c..3746d153ec8 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -1,7 +1,63 @@ +/* + * Copyright (c) 2000-2006 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ #ifndef XFS_SYNC_H #define XFS_SYNC_H 1 +struct xfs_mount; + +typedef struct bhv_vfs_sync_work { + struct list_head w_list; + struct xfs_mount *w_mount; + void *w_data; /* syncer routine argument */ + void (*w_syncer)(struct xfs_mount *, void *); +} bhv_vfs_sync_work_t; + +#define SYNC_ATTR 0x0001 /* sync attributes */ +#define SYNC_CLOSE 0x0002 /* close file system down */ +#define SYNC_DELWRI 0x0004 /* look at delayed writes */ +#define SYNC_WAIT 0x0008 /* wait for i/o to complete */ +#define SYNC_BDFLUSH 0x0010 /* BDFLUSH is calling -- don't block */ +#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */ +#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */ +#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */ +#define SYNC_IOWAIT 0x0100 /* wait for all I/O to complete */ + +/* + * When remounting a filesystem read-only or freezing the filesystem, + * we have two phases to execute. This first phase is syncing the data + * before we quiesce the fielsystem, and the second is flushing all the + * inodes out after we've waited for all the transactions created by + * the first phase to complete. The second phase uses SYNC_INODE_QUIESCE + * to ensure that the inodes are written to their location on disk + * rather than just existing in transactions in the log. This means + * after a quiesce there is no log replay required to write the inodes + * to disk (this is the main difference between a sync and a quiesce). + */ +#define SYNC_DATA_QUIESCE (SYNC_DELWRI|SYNC_FSDATA|SYNC_WAIT|SYNC_IOWAIT) +#define SYNC_INODE_QUIESCE (SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT) + +int xfs_syncd_init(struct xfs_mount *mp); +void xfs_syncd_stop(struct xfs_mount *mp); + int xfs_sync(struct xfs_mount *mp, int flags); int xfs_syncsub(struct xfs_mount *mp, int flags, int *bypassed); +void xfs_flush_inode(struct xfs_inode *ip); +void xfs_flush_device(struct xfs_inode *ip); + #endif diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h index 7e60c7776b1..0ab60bc2e76 100644 --- a/fs/xfs/linux-2.6/xfs_vfs.h +++ b/fs/xfs/linux-2.6/xfs_vfs.h @@ -33,37 +33,6 @@ struct xfs_mount_args; typedef struct kstatfs bhv_statvfs_t; -typedef struct bhv_vfs_sync_work { - struct list_head w_list; - struct xfs_mount *w_mount; - void *w_data; /* syncer routine argument */ - void (*w_syncer)(struct xfs_mount *, void *); -} bhv_vfs_sync_work_t; - -#define SYNC_ATTR 0x0001 /* sync attributes */ -#define SYNC_CLOSE 0x0002 /* close file system down */ -#define SYNC_DELWRI 0x0004 /* look at delayed writes */ -#define SYNC_WAIT 0x0008 /* wait for i/o to complete */ -#define SYNC_BDFLUSH 0x0010 /* BDFLUSH is calling -- don't block */ -#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */ -#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */ -#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */ -#define SYNC_IOWAIT 0x0100 /* wait for all I/O to complete */ - -/* - * When remounting a filesystem read-only or freezing the filesystem, - * we have two phases to execute. This first phase is syncing the data - * before we quiesce the fielsystem, and the second is flushing all the - * inodes out after we've waited for all the transactions created by - * the first phase to complete. The second phase uses SYNC_INODE_QUIESCE - * to ensure that the inodes are written to their location on disk - * rather than just existing in transactions in the log. This means - * after a quiesce there is no log replay required to write the inodes - * to disk (this is the main difference between a sync and a quiesce). - */ -#define SYNC_DATA_QUIESCE (SYNC_DELWRI|SYNC_FSDATA|SYNC_WAIT|SYNC_IOWAIT) -#define SYNC_INODE_QUIESCE (SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT) - #define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */ #define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */ #define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */ diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 56c3b122e53..c6846810936 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -18,6 +18,7 @@ #ifndef __XFS_MOUNT_H__ #define __XFS_MOUNT_H__ +#include "xfs_sync.h" typedef struct xfs_trans_reservations { uint tr_write; /* extent alloc trans */ -- cgit v1.2.3-70-g09d2 From 75c68f411b1242c8fdaf731078fdd4e77b14981d Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:06:28 +1100 Subject: [XFS] Remove xfs_iflush_all and clean up xfs_finish_reclaim_all() xfs_iflush_all() walks the m_inodes list to find inodes that need reclaiming. We already have such a list - the m_del_inodes list. Replace xfs_iflush_all() with a call to xfs_finish_reclaim_all() and clean up xfs_finish_reclaim_all() to handle the different flush modes now needed. Originally based on a patch from Christoph Hellwig. Version 3 o rediff against new linux-2.6/xfs_sync.c code Version 2 o revert xfs_syncsub() inode reclaim behaviour back to original code o xfs_quiesce_fs() should use XFS_IFLUSH_DELWRI_ELSE_ASYNC, not XFS_IFLUSH_ASYNC, to prevent change of behaviour. SGI-PV: 988139 SGI-Modid: xfs-linux-melb:xfs-kern:32284a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_sync.c | 2 +- fs/xfs/xfs_inode.c | 35 ----------------------------------- fs/xfs/xfs_inode.h | 3 +-- fs/xfs/xfs_mount.c | 2 +- fs/xfs/xfs_vfsops.c | 2 +- fs/xfs/xfs_vnodeops.c | 42 ++++++++++++++++++------------------------ 6 files changed, 22 insertions(+), 64 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index a51534c71b3..cd82ba523dc 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -504,7 +504,7 @@ xfs_syncsub( if (flags & (SYNC_ATTR|SYNC_DELWRI)) { if (flags & SYNC_BDFLUSH) - xfs_finish_reclaim_all(mp, 1); + xfs_finish_reclaim_all(mp, 1, XFS_IFLUSH_DELWRI_ELSE_ASYNC); else error = xfs_sync_inodes(mp, flags, bypassed); } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 2b1294b8ad7..0c65ba2faa4 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -3499,41 +3499,6 @@ corrupt_out: } -/* - * Flush all inactive inodes in mp. - */ -void -xfs_iflush_all( - xfs_mount_t *mp) -{ - xfs_inode_t *ip; - - again: - XFS_MOUNT_ILOCK(mp); - ip = mp->m_inodes; - if (ip == NULL) - goto out; - - do { - /* Make sure we skip markers inserted by sync */ - if (ip->i_mount == NULL) { - ip = ip->i_mnext; - continue; - } - - if (!VFS_I(ip)) { - XFS_MOUNT_IUNLOCK(mp); - xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC); - goto again; - } - - ASSERT(vn_count(VFS_I(ip)) == 0); - - ip = ip->i_mnext; - } while (ip != mp->m_inodes); - out: - XFS_MOUNT_IUNLOCK(mp); -} #ifdef XFS_ILOCK_TRACE ktrace_t *xfs_ilock_trace_buf; diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index a8f1e6833aa..104623b7ec6 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -503,7 +503,7 @@ uint xfs_ilock_map_shared(xfs_inode_t *); void xfs_iunlock_map_shared(xfs_inode_t *, uint); void xfs_ireclaim(xfs_inode_t *); int xfs_finish_reclaim(xfs_inode_t *, int, int); -int xfs_finish_reclaim_all(struct xfs_mount *, int); +int xfs_finish_reclaim_all(struct xfs_mount *, int, int); /* * xfs_inode.c prototypes. @@ -530,7 +530,6 @@ void xfs_iext_realloc(xfs_inode_t *, int, int); void xfs_ipin(xfs_inode_t *); void xfs_iunpin(xfs_inode_t *); int xfs_iflush(xfs_inode_t *, uint); -void xfs_iflush_all(struct xfs_mount *); void xfs_ichgtime(xfs_inode_t *, int); xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); void xfs_lock_inodes(xfs_inode_t **, int, uint); diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 15f5dd22fbb..5ec6032d230 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1241,7 +1241,7 @@ xfs_unmountfs( * need to force the log first. */ xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); - xfs_iflush_all(mp); + xfs_finish_reclaim_all(mp, 0, XFS_IFLUSH_ASYNC); XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING); diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index 01e274b902c..0c5ee5ec7ee 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -66,7 +66,7 @@ xfs_quiesce_fs( int count = 0, pincount; xfs_flush_buftarg(mp->m_ddev_targp, 0); - xfs_finish_reclaim_all(mp, 0); + xfs_finish_reclaim_all(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); /* This loop must run at least twice. * The first instance of the loop will flush diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 8b6812f66a1..a6714579a41 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -2918,36 +2918,30 @@ xfs_finish_reclaim( } int -xfs_finish_reclaim_all(xfs_mount_t *mp, int noblock) +xfs_finish_reclaim_all( + xfs_mount_t *mp, + int noblock, + int mode) { - int purged; xfs_inode_t *ip, *n; - int done = 0; - while (!done) { - purged = 0; - XFS_MOUNT_ILOCK(mp); - list_for_each_entry_safe(ip, n, &mp->m_del_inodes, i_reclaim) { - if (noblock) { - if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) - continue; - if (xfs_ipincount(ip) || - !xfs_iflock_nowait(ip)) { - xfs_iunlock(ip, XFS_ILOCK_EXCL); - continue; - } +restart: + XFS_MOUNT_ILOCK(mp); + list_for_each_entry_safe(ip, n, &mp->m_del_inodes, i_reclaim) { + if (noblock) { + if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) + continue; + if (xfs_ipincount(ip) || + !xfs_iflock_nowait(ip)) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + continue; } - XFS_MOUNT_IUNLOCK(mp); - if (xfs_finish_reclaim(ip, noblock, - XFS_IFLUSH_DELWRI_ELSE_ASYNC)) - delay(1); - purged = 1; - break; } - - done = !purged; + XFS_MOUNT_IUNLOCK(mp); + if (xfs_finish_reclaim(ip, noblock, mode)) + delay(1); + goto restart; } - XFS_MOUNT_IUNLOCK(mp); return 0; } -- cgit v1.2.3-70-g09d2 From 2f8a3ce1c20f20e6494cdb77fed76bc474ca3ca5 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:07:20 +1100 Subject: [XFS] don't block in xfs_qm_dqflush() during async writeback. Normally dquots are written back via delayed write mechanisms. They are flushed to their backing buffer by xfssyncd, which is then pushed out by either AIL or xfsbufd flushing. The flush from the xfssyncd is supposed to be non-blocking, but xfs_qm_dqflush() always waits for pinned duots, which means that it will block for the length of time it takes to do a synchronous log force. This causes unnecessary extra log I/O to be issued whenever we try to flush a busy dquot. Avoid the log forces and blocking xfssyncd by making xfs_qm_dqflush() pay attention to what type of sync it is doing when it sees a pinned dquot and not waiting when doing non-blocking flushes. SGI-PV: 988147 SGI-Modid: xfs-linux-melb:xfs-kern:32287a Signed-off-by: David Chinner Signed-off-by: Peter Leckie Signed-off-by: Lachlan McIlroy --- fs/xfs/quota/xfs_dquot.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index d3f4fbbe248..1e6bf392564 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -1221,16 +1221,14 @@ xfs_qm_dqflush( xfs_dqtrace_entry(dqp, "DQFLUSH"); /* - * If not dirty, nada. + * If not dirty, or it's pinned and we are not supposed to + * block, nada. */ - if (!XFS_DQ_IS_DIRTY(dqp)) { + if (!XFS_DQ_IS_DIRTY(dqp) || + (!(flags & XFS_QMOPT_SYNC) && atomic_read(&dqp->q_pincount) > 0)) { xfs_dqfunlock(dqp); - return (0); + return 0; } - - /* - * Cant flush a pinned dquot. Wait for it. - */ xfs_qm_dqunpin_wait(dqp); /* -- cgit v1.2.3-70-g09d2 From 683a897080a053733778b36398186cb1b22c377f Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:07:29 +1100 Subject: [XFS] Use the inode tree for finding dirty inodes Update xfs_sync_inodes to walk the inode radix tree cache to find dirty inodes. This removes a huge bunch of nasty, messy code for traversing the mount inode list safely and removes another user of the mount inode list. Version 3 o rediff against new linux-2.6/xfs_sync.c code Version 2 o add comment explaining use of gang lookups for a single inode o use IRELE, not VN_RELE o move check for ag initialisation to caller. SGI-PV: 988139 SGI-Modid: xfs-linux-melb:xfs-kern:32290a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_sync.c | 361 +++++++++++++------------------------------- 1 file changed, 101 insertions(+), 260 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index cd82ba523dc..53d85ecb1d5 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -121,356 +121,197 @@ xfs_sync( } /* - * xfs sync routine for internal use - * - * This routine supports all of the flags defined for the generic vfs_sync - * interface as explained above under xfs_sync. - * + * Sync all the inodes in the given AG according to the + * direction given by the flags. */ -int -xfs_sync_inodes( +STATIC int +xfs_sync_inodes_ag( xfs_mount_t *mp, + int ag, int flags, - int *bypassed) + int *bypassed) { xfs_inode_t *ip = NULL; struct inode *vp = NULL; - int error; - int last_error; - uint64_t fflag; - uint lock_flags; - uint base_lock_flags; - boolean_t mount_locked; - boolean_t vnode_refed; - int preempt; - xfs_iptr_t *ipointer; -#ifdef DEBUG - boolean_t ipointer_in = B_FALSE; - -#define IPOINTER_SET ipointer_in = B_TRUE -#define IPOINTER_CLR ipointer_in = B_FALSE -#else -#define IPOINTER_SET -#define IPOINTER_CLR -#endif - - -/* Insert a marker record into the inode list after inode ip. The list - * must be locked when this is called. After the call the list will no - * longer be locked. - */ -#define IPOINTER_INSERT(ip, mp) { \ - ASSERT(ipointer_in == B_FALSE); \ - ipointer->ip_mnext = ip->i_mnext; \ - ipointer->ip_mprev = ip; \ - ip->i_mnext = (xfs_inode_t *)ipointer; \ - ipointer->ip_mnext->i_mprev = (xfs_inode_t *)ipointer; \ - preempt = 0; \ - XFS_MOUNT_IUNLOCK(mp); \ - mount_locked = B_FALSE; \ - IPOINTER_SET; \ - } - -/* Remove the marker from the inode list. If the marker was the only item - * in the list then there are no remaining inodes and we should zero out - * the whole list. If we are the current head of the list then move the head - * past us. - */ -#define IPOINTER_REMOVE(ip, mp) { \ - ASSERT(ipointer_in == B_TRUE); \ - if (ipointer->ip_mnext != (xfs_inode_t *)ipointer) { \ - ip = ipointer->ip_mnext; \ - ip->i_mprev = ipointer->ip_mprev; \ - ipointer->ip_mprev->i_mnext = ip; \ - if (mp->m_inodes == (xfs_inode_t *)ipointer) { \ - mp->m_inodes = ip; \ - } \ - } else { \ - ASSERT(mp->m_inodes == (xfs_inode_t *)ipointer); \ - mp->m_inodes = NULL; \ - ip = NULL; \ - } \ - IPOINTER_CLR; \ - } - -#define XFS_PREEMPT_MASK 0x7f - - ASSERT(!(flags & SYNC_BDFLUSH)); - - if (bypassed) - *bypassed = 0; - if (mp->m_flags & XFS_MOUNT_RDONLY) - return 0; - error = 0; - last_error = 0; - preempt = 0; - - /* Allocate a reference marker */ - ipointer = (xfs_iptr_t *)kmem_zalloc(sizeof(xfs_iptr_t), KM_SLEEP); + xfs_perag_t *pag = &mp->m_perag[ag]; + boolean_t vnode_refed = B_FALSE; + int nr_found; + int first_index = 0; + int error = 0; + int last_error = 0; + int fflag = XFS_B_ASYNC; + int lock_flags = XFS_ILOCK_SHARED; - fflag = XFS_B_ASYNC; /* default is don't wait */ if (flags & SYNC_DELWRI) fflag = XFS_B_DELWRI; if (flags & SYNC_WAIT) fflag = 0; /* synchronous overrides all */ - base_lock_flags = XFS_ILOCK_SHARED; if (flags & (SYNC_DELWRI | SYNC_CLOSE)) { /* * We need the I/O lock if we're going to call any of * the flush/inval routines. */ - base_lock_flags |= XFS_IOLOCK_SHARED; + lock_flags |= XFS_IOLOCK_SHARED; } - XFS_MOUNT_ILOCK(mp); - - ip = mp->m_inodes; - - mount_locked = B_TRUE; - vnode_refed = B_FALSE; - - IPOINTER_CLR; - do { - ASSERT(ipointer_in == B_FALSE); - ASSERT(vnode_refed == B_FALSE); - - lock_flags = base_lock_flags; - /* - * There were no inodes in the list, just break out - * of the loop. + * use a gang lookup to find the next inode in the tree + * as the tree is sparse and a gang lookup walks to find + * the number of objects requested. */ - if (ip == NULL) { - break; - } + read_lock(&pag->pag_ici_lock); + nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, + (void**)&ip, first_index, 1); - /* - * We found another sync thread marker - skip it - */ - if (ip->i_mount == NULL) { - ip = ip->i_mnext; - continue; + if (!nr_found) { + read_unlock(&pag->pag_ici_lock); + break; } - vp = VFS_I(ip); + /* update the index for the next lookup */ + first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); /* - * If the vnode is gone then this is being torn down, - * call reclaim if it is flushed, else let regular flush - * code deal with it later in the loop. + * skip inodes in reclaim. Let xfs_syncsub do that for + * us so we don't need to worry. */ - - if (vp == NULL) { - /* Skip ones already in reclaim */ - if (ip->i_flags & XFS_IRECLAIM) { - ip = ip->i_mnext; - continue; - } - if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { - ip = ip->i_mnext; - } else if ((xfs_ipincount(ip) == 0) && - xfs_iflock_nowait(ip)) { - IPOINTER_INSERT(ip, mp); - - xfs_finish_reclaim(ip, 1, - XFS_IFLUSH_DELWRI_ELSE_ASYNC); - - XFS_MOUNT_ILOCK(mp); - mount_locked = B_TRUE; - IPOINTER_REMOVE(ip, mp); - } else { - xfs_iunlock(ip, XFS_ILOCK_EXCL); - ip = ip->i_mnext; - } + vp = VFS_I(ip); + if (!vp) { + read_unlock(&pag->pag_ici_lock); continue; } + /* bad inodes are dealt with elsewhere */ if (VN_BAD(vp)) { - ip = ip->i_mnext; + read_unlock(&pag->pag_ici_lock); continue; } + /* nothing to sync during shutdown */ if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) { - XFS_MOUNT_IUNLOCK(mp); - kmem_free(ipointer); + read_unlock(&pag->pag_ici_lock); return 0; } /* - * Try to lock without sleeping. We're out of order with - * the inode list lock here, so if we fail we need to drop - * the mount lock and try again. If we're called from - * bdflush() here, then don't bother. - * - * The inode lock here actually coordinates with the - * almost spurious inode lock in xfs_ireclaim() to prevent - * the vnode we handle here without a reference from - * being freed while we reference it. If we lock the inode - * while it's on the mount list here, then the spurious inode - * lock in xfs_ireclaim() after the inode is pulled from - * the mount list will sleep until we release it here. - * This keeps the vnode from being freed while we reference - * it. + * The inode lock here actually coordinates with the almost + * spurious inode lock in xfs_ireclaim() to prevent the vnode + * we handle here without a reference from being freed while we + * reference it. If we lock the inode while it's on the mount + * list here, then the spurious inode lock in xfs_ireclaim() + * after the inode is pulled from the mount list will sleep + * until we release it here. This keeps the vnode from being + * freed while we reference it. */ if (xfs_ilock_nowait(ip, lock_flags) == 0) { - if (vp == NULL) { - ip = ip->i_mnext; - continue; - } - vp = vn_grab(vp); - if (vp == NULL) { - ip = ip->i_mnext; + read_unlock(&pag->pag_ici_lock); + if (!vp) continue; - } - - IPOINTER_INSERT(ip, mp); xfs_ilock(ip, lock_flags); ASSERT(vp == VFS_I(ip)); ASSERT(ip->i_mount == mp); vnode_refed = B_TRUE; + } else { + /* safe to unlock here as we have a reference */ + read_unlock(&pag->pag_ici_lock); } - - /* From here on in the loop we may have a marker record - * in the inode list. - */ - /* * If we have to flush data or wait for I/O completion * we need to drop the ilock that we currently hold. * If we need to drop the lock, insert a marker if we * have not already done so. */ - if ((flags & (SYNC_CLOSE|SYNC_IOWAIT)) || - ((flags & SYNC_DELWRI) && VN_DIRTY(vp))) { - if (mount_locked) { - IPOINTER_INSERT(ip, mp); - } + if (flags & SYNC_CLOSE) { xfs_iunlock(ip, XFS_ILOCK_SHARED); - - if (flags & SYNC_CLOSE) { - /* Shutdown case. Flush and invalidate. */ - if (XFS_FORCED_SHUTDOWN(mp)) - xfs_tosspages(ip, 0, -1, - FI_REMAPF); - else - error = xfs_flushinval_pages(ip, - 0, -1, FI_REMAPF); - } else if ((flags & SYNC_DELWRI) && VN_DIRTY(vp)) { - error = xfs_flush_pages(ip, 0, - -1, fflag, FI_NONE); - } - - /* - * When freezing, we need to wait ensure all I/O (including direct - * I/O) is complete to ensure no further data modification can take - * place after this point - */ + if (XFS_FORCED_SHUTDOWN(mp)) + xfs_tosspages(ip, 0, -1, FI_REMAPF); + else + error = xfs_flushinval_pages(ip, 0, -1, + FI_REMAPF); + /* wait for I/O on freeze */ if (flags & SYNC_IOWAIT) vn_iowait(ip); xfs_ilock(ip, XFS_ILOCK_SHARED); } - if ((flags & SYNC_ATTR) && - (ip->i_update_core || - (ip->i_itemp && ip->i_itemp->ili_format.ilf_fields))) { - if (mount_locked) - IPOINTER_INSERT(ip, mp); + if ((flags & SYNC_DELWRI) && VN_DIRTY(vp)) { + xfs_iunlock(ip, XFS_ILOCK_SHARED); + error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE); + if (flags & SYNC_IOWAIT) + vn_iowait(ip); + xfs_ilock(ip, XFS_ILOCK_SHARED); + } + if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) { if (flags & SYNC_WAIT) { xfs_iflock(ip); - error = xfs_iflush(ip, XFS_IFLUSH_SYNC); - - /* - * If we can't acquire the flush lock, then the inode - * is already being flushed so don't bother waiting. - * - * If we can lock it then do a delwri flush so we can - * combine multiple inode flushes in each disk write. - */ + if (!xfs_inode_clean(ip)) + error = xfs_iflush(ip, XFS_IFLUSH_SYNC); + else + xfs_ifunlock(ip); } else if (xfs_iflock_nowait(ip)) { - error = xfs_iflush(ip, XFS_IFLUSH_DELWRI); + if (!xfs_inode_clean(ip)) + error = xfs_iflush(ip, XFS_IFLUSH_DELWRI); + else + xfs_ifunlock(ip); } else if (bypassed) { (*bypassed)++; } } - if (lock_flags != 0) { + if (lock_flags) xfs_iunlock(ip, lock_flags); - } if (vnode_refed) { - /* - * If we had to take a reference on the vnode - * above, then wait until after we've unlocked - * the inode to release the reference. This is - * because we can be already holding the inode - * lock when IRELE() calls xfs_inactive(). - * - * Make sure to drop the mount lock before calling - * IRELE() so that we don't trip over ourselves if - * we have to go for the mount lock again in the - * inactive code. - */ - if (mount_locked) { - IPOINTER_INSERT(ip, mp); - } - IRELE(ip); - vnode_refed = B_FALSE; } - if (error) { + if (error) last_error = error; - } - /* * bail out if the filesystem is corrupted. */ - if (error == EFSCORRUPTED) { - if (!mount_locked) { - XFS_MOUNT_ILOCK(mp); - IPOINTER_REMOVE(ip, mp); - } - XFS_MOUNT_IUNLOCK(mp); - ASSERT(ipointer_in == B_FALSE); - kmem_free(ipointer); + if (error == EFSCORRUPTED) return XFS_ERROR(error); - } - - /* Let other threads have a chance at the mount lock - * if we have looped many times without dropping the - * lock. - */ - if ((++preempt & XFS_PREEMPT_MASK) == 0) { - if (mount_locked) { - IPOINTER_INSERT(ip, mp); - } - } - - if (mount_locked == B_FALSE) { - XFS_MOUNT_ILOCK(mp); - mount_locked = B_TRUE; - IPOINTER_REMOVE(ip, mp); - continue; - } - ASSERT(ipointer_in == B_FALSE); - ip = ip->i_mnext; + } while (nr_found); - } while (ip != mp->m_inodes); + return last_error; +} - XFS_MOUNT_IUNLOCK(mp); +int +xfs_sync_inodes( + xfs_mount_t *mp, + int flags, + int *bypassed) +{ + int error; + int last_error; + int i; - ASSERT(ipointer_in == B_FALSE); + if (bypassed) + *bypassed = 0; + if (mp->m_flags & XFS_MOUNT_RDONLY) + return 0; + error = 0; + last_error = 0; - kmem_free(ipointer); + for (i = 0; i < mp->m_sb.sb_agcount; i++) { + if (!mp->m_perag[i].pag_ici_init) + continue; + error = xfs_sync_inodes_ag(mp, i, flags, bypassed); + if (error) + last_error = error; + if (error == EFSCORRUPTED) + break; + } return XFS_ERROR(last_error); } -- cgit v1.2.3-70-g09d2 From 5b4d89ae0f5ae45c7fa1dfc616fd2bb8634bb7b7 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:08:03 +1100 Subject: [XFS] Traverse inode trees when releasing dquots Make releasing all inode dquots traverse the per-ag inode radix trees rather than the mount inode list. This removes another user of the mount inode list. Version 3 o fix comment relating to avoiding trying to release the quota inodes and those in reclaim. Version 2 o add comment explaining use of gang lookups for a single inode o use IRELE, not VN_RELE o move check for ag initialisation to caller. SGI-PV: 988139 SGI-Modid: xfs-linux-melb:xfs-kern:32291a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/quota/xfs_qm_syscalls.c | 127 +++++++++++++++++++---------------------- 1 file changed, 59 insertions(+), 68 deletions(-) (limited to 'fs') diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 1a3b803dfa5..26152b9ccc6 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c @@ -1022,101 +1022,92 @@ xfs_qm_export_flags( /* - * Go thru all the inodes in the file system, releasing their dquots. - * Note that the mount structure gets modified to indicate that quotas are off - * AFTER this, in the case of quotaoff. This also gets called from - * xfs_rootumount. + * Release all the dquots on the inodes in an AG. */ -void -xfs_qm_dqrele_all_inodes( - struct xfs_mount *mp, - uint flags) +STATIC void +xfs_qm_dqrele_inodes_ag( + xfs_mount_t *mp, + int ag, + uint flags) { - xfs_inode_t *ip, *topino; - uint ireclaims; - struct inode *vp; - boolean_t vnode_refd; + xfs_inode_t *ip = NULL; + struct inode *vp = NULL; + xfs_perag_t *pag = &mp->m_perag[ag]; + int first_index = 0; + int nr_found; - ASSERT(mp->m_quotainfo); - - XFS_MOUNT_ILOCK(mp); -again: - ip = mp->m_inodes; - if (ip == NULL) { - XFS_MOUNT_IUNLOCK(mp); - return; - } do { - /* Skip markers inserted by xfs_sync */ - if (ip->i_mount == NULL) { - ip = ip->i_mnext; - continue; - } - /* Root inode, rbmip and rsumip have associated blocks */ - if (ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) { - ASSERT(ip->i_udquot == NULL); - ASSERT(ip->i_gdquot == NULL); - ip = ip->i_mnext; - continue; + boolean_t vnode_refd = B_FALSE; + + /* + * use a gang lookup to find the next inode in the tree + * as the tree is sparse and a gang lookup walks to find + * the number of objects requested. + */ + read_lock(&pag->pag_ici_lock); + nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, + (void**)&ip, first_index, 1); + + if (!nr_found) { + read_unlock(&pag->pag_ici_lock); + break; } + + /* update the index for the next lookup */ + first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); + + /* skip quota inodes and those in reclaim */ vp = VFS_I(ip); - if (!vp) { + if (!vp || ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) { ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_gdquot == NULL); - ip = ip->i_mnext; + read_unlock(&pag->pag_ici_lock); continue; } - vnode_refd = B_FALSE; if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { - ireclaims = mp->m_ireclaims; - topino = mp->m_inodes; vp = vn_grab(vp); + read_unlock(&pag->pag_ici_lock); if (!vp) - goto again; - - XFS_MOUNT_IUNLOCK(mp); - /* XXX restart limit ? */ - xfs_ilock(ip, XFS_ILOCK_EXCL); + continue; vnode_refd = B_TRUE; + xfs_ilock(ip, XFS_ILOCK_EXCL); } else { - ireclaims = mp->m_ireclaims; - topino = mp->m_inodes; - XFS_MOUNT_IUNLOCK(mp); + read_unlock(&pag->pag_ici_lock); } - - /* - * We don't keep the mountlock across the dqrele() call, - * since it can take a while.. - */ if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { xfs_qm_dqrele(ip->i_udquot); ip->i_udquot = NULL; } - if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) { + if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && + ip->i_gdquot) { xfs_qm_dqrele(ip->i_gdquot); ip->i_gdquot = NULL; } xfs_iunlock(ip, XFS_ILOCK_EXCL); - /* - * Wait until we've dropped the ilock and mountlock to - * do the vn_rele. Or be condemned to an eternity in the - * inactive code in hell. - */ if (vnode_refd) IRELE(ip); - XFS_MOUNT_ILOCK(mp); - /* - * If an inode was inserted or removed, we gotta - * start over again. - */ - if (topino != mp->m_inodes || mp->m_ireclaims != ireclaims) { - /* XXX use a sentinel */ - goto again; - } - ip = ip->i_mnext; - } while (ip != mp->m_inodes); + } while (nr_found); +} - XFS_MOUNT_IUNLOCK(mp); +/* + * Go thru all the inodes in the file system, releasing their dquots. + * Note that the mount structure gets modified to indicate that quotas are off + * AFTER this, in the case of quotaoff. This also gets called from + * xfs_rootumount. + */ +void +xfs_qm_dqrele_all_inodes( + struct xfs_mount *mp, + uint flags) +{ + int i; + + ASSERT(mp->m_quotainfo); + for (i = 0; i < mp->m_sb.sb_agcount; i++) { + if (!mp->m_perag[i].pag_ici_init) + continue; + xfs_qm_dqrele_inodes_ag(mp, i, flags); + } } /*------------------------------------------------------------------------*/ -- cgit v1.2.3-70-g09d2 From 60197e8df364df326dcbb987519f367ad0ee1a11 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 17:11:19 +1100 Subject: [XFS] Cleanup maxrecs calculation. Clean up the way the maximum and minimum records for the btree blocks are calculated. For the alloc and inobt btrees all the values are pre-calculated in xfs_mount_common, and we switch the current loop around the ugly generic macros that use cpp token pasting to generate type names to two small helpers in normal C code. For the bmbt and bmdr trees these helpers also exist, but can be called during runtime, too. Here we also kill various macros dealing with them and inline the logic into the get_minrecs / get_maxrecs / get_dmaxrecs methods in xfs_bmap_btree.c. Note that all these new helpers take an xfs_mount * argument which will be needed to determine the size of a btree block once we add support for extended btree blocks with CRCs and other RAS information. SGI-PV: 988146 SGI-Modid: xfs-linux-melb:xfs-kern:32292a Signed-off-by: Christoph Hellwig Signed-off-by: Donald Douwsma Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_alloc_btree.c | 16 ++++++++++ fs/xfs/xfs_alloc_btree.h | 7 +---- fs/xfs/xfs_bmap.c | 18 ++++++------ fs/xfs/xfs_bmap_btree.c | 74 ++++++++++++++++++++++++++++++++++++++++++----- fs/xfs/xfs_bmap_btree.h | 48 ++++++++---------------------- fs/xfs/xfs_btree.h | 13 --------- fs/xfs/xfs_dinode.h | 3 +- fs/xfs/xfs_ialloc_btree.c | 16 ++++++++++ fs/xfs/xfs_ialloc_btree.h | 9 +----- fs/xfs/xfs_inode.c | 26 +++++++++-------- fs/xfs/xfs_log_recover.c | 4 +-- fs/xfs/xfs_mount.c | 34 +++++++++------------- fs/xfs/xfs_mount.h | 12 ++++---- 13 files changed, 158 insertions(+), 122 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 9e63f8c180d..6ff27b75b93 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -480,3 +480,19 @@ xfs_allocbt_init_cursor( return cur; } + +/* + * Calculate number of records in an alloc btree block. + */ +int +xfs_allocbt_maxrecs( + struct xfs_mount *mp, + int blocklen, + int leaf) +{ + blocklen -= sizeof(struct xfs_btree_sblock); + + if (leaf) + return blocklen / sizeof(xfs_alloc_rec_t); + return blocklen / (sizeof(xfs_alloc_key_t) + sizeof(xfs_alloc_ptr_t)); +} diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h index 22f1d709af7..ff1f71d069c 100644 --- a/fs/xfs/xfs_alloc_btree.h +++ b/fs/xfs/xfs_alloc_btree.h @@ -55,12 +55,6 @@ typedef struct xfs_btree_sblock xfs_alloc_block_t; #define XFS_BUF_TO_ALLOC_BLOCK(bp) ((xfs_alloc_block_t *)XFS_BUF_PTR(bp)) -/* - * Real block structures have a size equal to the disk block size. - */ -#define XFS_ALLOC_BLOCK_MAXRECS(lev,cur) ((cur)->bc_mp->m_alloc_mxr[lev != 0]) -#define XFS_ALLOC_BLOCK_MINRECS(lev,cur) ((cur)->bc_mp->m_alloc_mnr[lev != 0]) - /* * Minimum and maximum blocksize and sectorsize. * The blocksize upper limit is pretty much arbitrary. @@ -98,5 +92,6 @@ typedef struct xfs_btree_sblock xfs_alloc_block_t; extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *, struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t, xfs_btnum_t); +extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int); #endif /* __XFS_ALLOC_BTREE_H__ */ diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index b7f99d7576d..09e4de4ed50 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -3051,15 +3051,15 @@ xfs_bmap_btree_to_extents( __be64 *pp; /* ptr to block address */ xfs_bmbt_block_t *rblock;/* root btree block */ + mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); ASSERT(ifp->if_flags & XFS_IFEXTENTS); ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE); rblock = ifp->if_broot; ASSERT(be16_to_cpu(rblock->bb_level) == 1); ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); - ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1); - mp = ip->i_mount; - pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes); + ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1); + pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); cbno = be64_to_cpu(*pp); *logflagsp = 0; #ifdef DEBUG @@ -4221,7 +4221,7 @@ xfs_bmap_compute_maxlevels( maxleafents = MAXAEXTNUM; sz = XFS_BMDR_SPACE_CALC(MINABTPTRS); } - maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0); + maxrootrecs = xfs_bmdr_maxrecs(mp, sz, 0); minleafrecs = mp->m_bmap_dmnr[0]; minnoderecs = mp->m_bmap_dmnr[1]; maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs; @@ -4555,7 +4555,7 @@ xfs_bmap_read_extents( */ level = be16_to_cpu(block->bb_level); ASSERT(level > 0); - pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); + pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); bno = be64_to_cpu(*pp); ASSERT(bno != NULLDFSBNO); ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); @@ -6205,13 +6205,13 @@ xfs_check_block( */ if (root) { - pp = XFS_BMAP_BROOT_PTR_ADDR(block, i, sz); + pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); } else { pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, i, dmxr); } for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { if (root) { - thispa = XFS_BMAP_BROOT_PTR_ADDR(block, j, sz); + thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); } else { thispa = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, j, dmxr); @@ -6266,7 +6266,7 @@ xfs_bmap_check_leaf_extents( level = be16_to_cpu(block->bb_level); ASSERT(level > 0); xfs_check_block(block, mp, 1, ifp->if_broot_bytes); - pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); + pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); bno = be64_to_cpu(*pp); ASSERT(bno != NULLDFSBNO); @@ -6426,7 +6426,7 @@ xfs_bmap_count_blocks( block = ifp->if_broot; level = be16_to_cpu(block->bb_level); ASSERT(level > 0); - pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes); + pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); bno = be64_to_cpu(*pp); ASSERT(bno != NULLDFSBNO); ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index c5eeb3241e2..853828c6b45 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -66,6 +66,7 @@ xfs_extent_state( */ void xfs_bmdr_to_bmbt( + struct xfs_mount *mp, xfs_bmdr_block_t *dblock, int dblocklen, xfs_bmbt_block_t *rblock, @@ -83,11 +84,11 @@ xfs_bmdr_to_bmbt( rblock->bb_numrecs = dblock->bb_numrecs; rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO); rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO); - dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0); + dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0); fkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1); tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen); fpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr); - tpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen); + tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen); dmxr = be16_to_cpu(dblock->bb_numrecs); memcpy(tkp, fkp, sizeof(*fkp) * dmxr); memcpy(tpp, fpp, sizeof(*fpp) * dmxr); @@ -428,6 +429,7 @@ xfs_bmbt_set_state( */ void xfs_bmbt_to_bmdr( + struct xfs_mount *mp, xfs_bmbt_block_t *rblock, int rblocklen, xfs_bmdr_block_t *dblock, @@ -445,10 +447,10 @@ xfs_bmbt_to_bmdr( ASSERT(be16_to_cpu(rblock->bb_level) > 0); dblock->bb_level = rblock->bb_level; dblock->bb_numrecs = rblock->bb_numrecs; - dmxr = (int)XFS_BTREE_BLOCK_MAXRECS(dblocklen, xfs_bmdr, 0); + dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0); fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen); tkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1); - fpp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, rblocklen); + fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen); tpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr); dmxr = be16_to_cpu(dblock->bb_numrecs); memcpy(tkp, fkp, sizeof(*fkp) * dmxr); @@ -626,15 +628,36 @@ xfs_bmbt_get_minrecs( struct xfs_btree_cur *cur, int level) { - return XFS_BMAP_BLOCK_IMINRECS(level, cur); + if (level == cur->bc_nlevels - 1) { + struct xfs_ifork *ifp; + + ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, + cur->bc_private.b.whichfork); + + return xfs_bmbt_maxrecs(cur->bc_mp, + ifp->if_broot_bytes, level == 0) / 2; + } + + return cur->bc_mp->m_bmap_dmnr[level != 0]; } -STATIC int +int xfs_bmbt_get_maxrecs( struct xfs_btree_cur *cur, int level) { - return XFS_BMAP_BLOCK_IMAXRECS(level, cur); + if (level == cur->bc_nlevels - 1) { + struct xfs_ifork *ifp; + + ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, + cur->bc_private.b.whichfork); + + return xfs_bmbt_maxrecs(cur->bc_mp, + ifp->if_broot_bytes, level == 0); + } + + return cur->bc_mp->m_bmap_dmxr[level != 0]; + } /* @@ -651,7 +674,10 @@ xfs_bmbt_get_dmaxrecs( struct xfs_btree_cur *cur, int level) { - return XFS_BMAP_BLOCK_DMAXRECS(level, cur); + if (level != cur->bc_nlevels - 1) + return cur->bc_mp->m_bmap_dmxr[level != 0]; + return xfs_bmdr_maxrecs(cur->bc_mp, cur->bc_private.b.forksize, + level == 0); } STATIC void @@ -871,3 +897,35 @@ xfs_bmbt_init_cursor( return cur; } + +/* + * Calculate number of records in a bmap btree block. + */ +int +xfs_bmbt_maxrecs( + struct xfs_mount *mp, + int blocklen, + int leaf) +{ + blocklen -= sizeof(struct xfs_btree_lblock); + + if (leaf) + return blocklen / sizeof(xfs_bmbt_rec_t); + return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)); +} + +/* + * Calculate number of records in a bmap btree inode root. + */ +int +xfs_bmdr_maxrecs( + struct xfs_mount *mp, + int blocklen, + int leaf) +{ + blocklen -= sizeof(xfs_bmdr_block_t); + + if (leaf) + return blocklen / sizeof(xfs_bmdr_rec_t); + return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t)); +} diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 5669242b52d..835be2a84ca 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -151,33 +151,6 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; #define XFS_BUF_TO_BMBT_BLOCK(bp) ((xfs_bmbt_block_t *)XFS_BUF_PTR(bp)) -#define XFS_BMAP_RBLOCK_DSIZE(lev,cur) ((cur)->bc_private.b.forksize) -#define XFS_BMAP_RBLOCK_ISIZE(lev,cur) \ - ((int)XFS_IFORK_PTR((cur)->bc_private.b.ip, \ - (cur)->bc_private.b.whichfork)->if_broot_bytes) - -#define XFS_BMAP_BLOCK_DMAXRECS(lev,cur) \ - (((lev) == (cur)->bc_nlevels - 1 ? \ - XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur), \ - xfs_bmdr, (lev) == 0) : \ - ((cur)->bc_mp->m_bmap_dmxr[(lev) != 0]))) -#define XFS_BMAP_BLOCK_IMAXRECS(lev,cur) \ - (((lev) == (cur)->bc_nlevels - 1 ? \ - XFS_BTREE_BLOCK_MAXRECS(XFS_BMAP_RBLOCK_ISIZE(lev,cur),\ - xfs_bmbt, (lev) == 0) : \ - ((cur)->bc_mp->m_bmap_dmxr[(lev) != 0]))) - -#define XFS_BMAP_BLOCK_DMINRECS(lev,cur) \ - (((lev) == (cur)->bc_nlevels - 1 ? \ - XFS_BTREE_BLOCK_MINRECS(XFS_BMAP_RBLOCK_DSIZE(lev,cur),\ - xfs_bmdr, (lev) == 0) : \ - ((cur)->bc_mp->m_bmap_dmnr[(lev) != 0]))) -#define XFS_BMAP_BLOCK_IMINRECS(lev,cur) \ - (((lev) == (cur)->bc_nlevels - 1 ? \ - XFS_BTREE_BLOCK_MINRECS(XFS_BMAP_RBLOCK_ISIZE(lev,cur),\ - xfs_bmbt, (lev) == 0) : \ - ((cur)->bc_mp->m_bmap_dmnr[(lev) != 0]))) - #define XFS_BMAP_REC_DADDR(bb,i,cur) (XFS_BTREE_REC_ADDR(xfs_bmbt, bb, i)) #define XFS_BMAP_REC_IADDR(bb,i,cur) (XFS_BTREE_REC_ADDR(xfs_bmbt, bb, i)) @@ -192,8 +165,8 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; (XFS_BTREE_PTR_ADDR(xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ be16_to_cpu((bb)->bb_level), cur))) #define XFS_BMAP_PTR_IADDR(bb,i,cur) \ - (XFS_BTREE_PTR_ADDR(xfs_bmbt, bb, i, XFS_BMAP_BLOCK_IMAXRECS( \ - be16_to_cpu((bb)->bb_level), cur))) + (XFS_BTREE_PTR_ADDR(xfs_bmbt, bb, i, xfs_bmbt_get_maxrecs(cur, \ + be16_to_cpu((bb)->bb_level)))) /* * These are to be used when we know the size of the block and @@ -203,11 +176,8 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; (XFS_BTREE_REC_ADDR(xfs_bmbt,bb,i)) #define XFS_BMAP_BROOT_KEY_ADDR(bb,i,sz) \ (XFS_BTREE_KEY_ADDR(xfs_bmbt,bb,i)) -#define XFS_BMAP_BROOT_PTR_ADDR(bb,i,sz) \ - (XFS_BTREE_PTR_ADDR(xfs_bmbt,bb,i,XFS_BMAP_BROOT_MAXRECS(sz))) - -#define XFS_BMAP_BROOT_NUMRECS(bb) be16_to_cpu((bb)->bb_numrecs) -#define XFS_BMAP_BROOT_MAXRECS(sz) XFS_BTREE_BLOCK_MAXRECS(sz,xfs_bmbt,0) +#define XFS_BMAP_BROOT_PTR_ADDR(mp, bb,i,sz) \ + (XFS_BTREE_PTR_ADDR(xfs_bmbt,bb,i,xfs_bmbt_maxrecs(mp, sz, 0))) #define XFS_BMAP_BROOT_SPACE_CALC(nrecs) \ (int)(sizeof(xfs_bmbt_block_t) + \ @@ -234,7 +204,8 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; /* * Prototypes for xfs_bmap.c to call. */ -extern void xfs_bmdr_to_bmbt(xfs_bmdr_block_t *, int, xfs_bmbt_block_t *, int); +extern void xfs_bmdr_to_bmbt(struct xfs_mount *, xfs_bmdr_block_t *, int, + xfs_bmbt_block_t *, int); extern void xfs_bmbt_get_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s); extern xfs_filblks_t xfs_bmbt_get_blockcount(xfs_bmbt_rec_host_t *r); extern xfs_fsblock_t xfs_bmbt_get_startblock(xfs_bmbt_rec_host_t *r); @@ -257,7 +228,12 @@ extern void xfs_bmbt_disk_set_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s); extern void xfs_bmbt_disk_set_allf(xfs_bmbt_rec_t *r, xfs_fileoff_t o, xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v); -extern void xfs_bmbt_to_bmdr(xfs_bmbt_block_t *, int, xfs_bmdr_block_t *, int); +extern void xfs_bmbt_to_bmdr(struct xfs_mount *, xfs_bmbt_block_t *, int, + xfs_bmdr_block_t *, int); + +extern int xfs_bmbt_get_maxrecs(struct xfs_btree_cur *, int level); +extern int xfs_bmdr_maxrecs(struct xfs_mount *, int blocklen, int leaf); +extern int xfs_bmbt_maxrecs(struct xfs_mount *, int blocklen, int leaf); extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *, struct xfs_trans *, struct xfs_inode *, int); diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 7425b2b4a25..795a124cee6 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -148,19 +148,6 @@ do { \ case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \ } \ } while (0) -/* - * Maximum and minimum records in a btree block. - * Given block size, type prefix, and leaf flag (0 or 1). - * The divisor below is equivalent to lf ? (e1) : (e2) but that produces - * compiler warnings. - */ -#define XFS_BTREE_BLOCK_MAXRECS(bsz,t,lf) \ - ((int)(((bsz) - (uint)sizeof(t ## _block_t)) / \ - (((lf) * (uint)sizeof(t ## _rec_t)) + \ - ((1 - (lf)) * \ - ((uint)sizeof(t ## _key_t) + (uint)sizeof(t ## _ptr_t)))))) -#define XFS_BTREE_BLOCK_MINRECS(bsz,t,lf) \ - (XFS_BTREE_BLOCK_MAXRECS(bsz,t,lf) / 2) /* * Record, key, and pointer address calculation macros. diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h index c9065eaf2a4..2a00fcc36d8 100644 --- a/fs/xfs/xfs_dinode.h +++ b/fs/xfs/xfs_dinode.h @@ -78,8 +78,7 @@ typedef struct xfs_dinode xfs_dinode_core_t di_core; /* * In adding anything between the core and the union, be - * sure to update the macros like XFS_LITINO below and - * XFS_BMAP_RBLOCK_DSIZE in xfs_bmap_btree.h. + * sure to update the macros like XFS_LITINO below. */ __be32 di_next_unlinked;/* agi unlinked list ptr */ union { diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index dcd4a956e73..46aabb3fcbf 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -365,3 +365,19 @@ xfs_inobt_init_cursor( return cur; } + +/* + * Calculate number of records in an inobt btree block. + */ +int +xfs_inobt_maxrecs( + struct xfs_mount *mp, + int blocklen, + int leaf) +{ + blocklen -= sizeof(struct xfs_btree_sblock); + + if (leaf) + return blocklen / sizeof(xfs_inobt_rec_t); + return blocklen / (sizeof(xfs_inobt_key_t) + sizeof(xfs_inobt_ptr_t)); +} diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h index ff7406b4bac..f0fc1e46e62 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/xfs_ialloc_btree.h @@ -84,14 +84,6 @@ typedef struct xfs_btree_sblock xfs_inobt_block_t; #define XFS_INOBT_SET_FREE(rp,i) ((rp)->ir_free |= XFS_INOBT_MASK(i)) #define XFS_INOBT_CLR_FREE(rp,i) ((rp)->ir_free &= ~XFS_INOBT_MASK(i)) -/* - * Real block structures have a size equal to the disk block size. - */ -#define XFS_INOBT_BLOCK_MAXRECS(lev,cur) ((cur)->bc_mp->m_inobt_mxr[lev != 0]) -#define XFS_INOBT_BLOCK_MINRECS(lev,cur) ((cur)->bc_mp->m_inobt_mnr[lev != 0]) -#define XFS_INOBT_IS_LAST_REC(cur) \ - ((cur)->bc_ptrs[0] == be16_to_cpu(XFS_BUF_TO_INOBT_BLOCK((cur)->bc_bufs[0])->bb_numrecs)) - /* * Maximum number of inode btree levels. */ @@ -118,5 +110,6 @@ typedef struct xfs_btree_sblock xfs_inobt_block_t; extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *, struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t); +extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int); #endif /* __XFS_IALLOC_BTREE_H__ */ diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 0c65ba2faa4..73b604e15dc 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -622,7 +622,7 @@ xfs_iformat_btree( ifp = XFS_IFORK_PTR(ip, whichfork); dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork); size = XFS_BMAP_BROOT_SPACE(dfp); - nrecs = XFS_BMAP_BROOT_NUMRECS(dfp); + nrecs = be16_to_cpu(dfp->bb_numrecs); /* * blow out if -- fork has less extents than can fit in @@ -650,8 +650,9 @@ xfs_iformat_btree( * Copy and convert from the on-disk structure * to the in-memory structure. */ - xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), - ifp->if_broot, size); + xfs_bmdr_to_bmbt(ip->i_mount, dfp, + XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), + ifp->if_broot, size); ifp->if_flags &= ~XFS_IFEXTENTS; ifp->if_flags |= XFS_IFBROOT; @@ -2348,6 +2349,7 @@ xfs_iroot_realloc( int rec_diff, int whichfork) { + struct xfs_mount *mp = ip->i_mount; int cur_max; xfs_ifork_t *ifp; xfs_bmbt_block_t *new_broot; @@ -2383,7 +2385,7 @@ xfs_iroot_realloc( * location. The records don't change location because * they are kept butted up against the btree block header. */ - cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes); + cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); new_max = cur_max + rec_diff; new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); ifp->if_broot = (xfs_bmbt_block_t *) @@ -2391,10 +2393,10 @@ xfs_iroot_realloc( new_size, (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ KM_SLEEP); - op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, - ifp->if_broot_bytes); - np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, - (int)new_size); + op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, + ifp->if_broot_bytes); + np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, + (int)new_size); ifp->if_broot_bytes = (int)new_size; ASSERT(ifp->if_broot_bytes <= XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ); @@ -2408,7 +2410,7 @@ xfs_iroot_realloc( * records, just get rid of the root and clear the status bit. */ ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); - cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes); + cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); new_max = cur_max + rec_diff; ASSERT(new_max >= 0); if (new_max > 0) @@ -2442,9 +2444,9 @@ xfs_iroot_realloc( /* * Then copy the pointers. */ - op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1, + op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, ifp->if_broot_bytes); - np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1, + np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1, (int)new_size); memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); } @@ -2920,7 +2922,7 @@ xfs_iflush_fork( ASSERT(ifp->if_broot_bytes <= (XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ)); - xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes, + xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes, (xfs_bmdr_block_t *)cp, XFS_DFORK_SIZE(dip, mp, whichfork)); } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 82d46ce69d5..23c3a782a9e 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2452,7 +2452,7 @@ xlog_recover_do_inode_trans( break; case XFS_ILOG_DBROOT: - xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len, + xfs_bmbt_to_bmdr(mp, (xfs_bmbt_block_t *)src, len, &(dip->di_u.di_bmbt), XFS_DFORK_DSIZE(dip, mp)); break; @@ -2490,7 +2490,7 @@ xlog_recover_do_inode_trans( case XFS_ILOG_ABROOT: dest = XFS_DFORK_APTR(dip); - xfs_bmbt_to_bmdr((xfs_bmbt_block_t *)src, len, + xfs_bmbt_to_bmdr(mp, (xfs_bmbt_block_t *)src, len, (xfs_bmdr_block_t*)dest, XFS_DFORK_ASIZE(dip, mp)); break; diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 5ec6032d230..40338ff8fdd 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -567,8 +567,6 @@ xfs_readsb(xfs_mount_t *mp, int flags) STATIC void xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) { - int i; - mp->m_agfrotor = mp->m_agirotor = 0; spin_lock_init(&mp->m_agirotor_lock); mp->m_maxagi = mp->m_sb.sb_agcount; @@ -605,24 +603,20 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) } ASSERT(mp->m_attroffset < XFS_LITINO(mp)); - for (i = 0; i < 2; i++) { - mp->m_alloc_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize, - xfs_alloc, i == 0); - mp->m_alloc_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize, - xfs_alloc, i == 0); - } - for (i = 0; i < 2; i++) { - mp->m_bmap_dmxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize, - xfs_bmbt, i == 0); - mp->m_bmap_dmnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize, - xfs_bmbt, i == 0); - } - for (i = 0; i < 2; i++) { - mp->m_inobt_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize, - xfs_inobt, i == 0); - mp->m_inobt_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize, - xfs_inobt, i == 0); - } + mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1); + mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0); + mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2; + mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2; + + mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1); + mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0); + mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2; + mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2; + + mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1); + mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0); + mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2; + mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2; mp->m_bsize = XFS_FSB_TO_BB(mp, 1); mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK, diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index c6846810936..f4644d71548 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -276,12 +276,12 @@ typedef struct xfs_mount { uint m_blockmask; /* sb_blocksize-1 */ uint m_blockwsize; /* sb_blocksize in words */ uint m_blockwmask; /* blockwsize-1 */ - uint m_alloc_mxr[2]; /* XFS_ALLOC_BLOCK_MAXRECS */ - uint m_alloc_mnr[2]; /* XFS_ALLOC_BLOCK_MINRECS */ - uint m_bmap_dmxr[2]; /* XFS_BMAP_BLOCK_DMAXRECS */ - uint m_bmap_dmnr[2]; /* XFS_BMAP_BLOCK_DMINRECS */ - uint m_inobt_mxr[2]; /* XFS_INOBT_BLOCK_MAXRECS */ - uint m_inobt_mnr[2]; /* XFS_INOBT_BLOCK_MINRECS */ + uint m_alloc_mxr[2]; /* max alloc btree records */ + uint m_alloc_mnr[2]; /* min alloc btree records */ + uint m_bmap_dmxr[2]; /* max bmap btree records */ + uint m_bmap_dmnr[2]; /* min bmap btree records */ + uint m_inobt_mxr[2]; /* max inobt btree records */ + uint m_inobt_mnr[2]; /* min inobt btree records */ uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */ uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */ uint m_in_maxlevels; /* XFS_IN_MAXLEVELS */ -- cgit v1.2.3-70-g09d2 From 6c7699c047c50403149ad91331dd39de47dea070 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:11:29 +1100 Subject: [XFS] remove the mount inode list Now we've removed all users of the mount inode list, we can kill it. This reduces the size of the xfs_inode by 2 pointers. SGI-PV: 988139 SGI-Modid: xfs-linux-melb:xfs-kern:32293a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/xfs_iget.c | 42 +----------------------------------------- fs/xfs/xfs_inode.h | 8 -------- fs/xfs/xfs_mount.c | 5 ----- fs/xfs/xfs_mount.h | 1 - 4 files changed, 1 insertion(+), 55 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 4c92d190b3b..1256746b249 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -76,7 +76,6 @@ xfs_iget_core( { struct inode *old_inode; xfs_inode_t *ip; - xfs_inode_t *iq; int error; unsigned long first_index, mask; xfs_perag_t *pag; @@ -255,24 +254,6 @@ finish_inode: write_unlock(&pag->pag_ici_lock); radix_tree_preload_end(); - - /* - * Link ip to its mount and thread it on the mount's inode list. - */ - XFS_MOUNT_ILOCK(mp); - if ((iq = mp->m_inodes)) { - ASSERT(iq->i_mprev->i_mnext == iq); - ip->i_mprev = iq->i_mprev; - iq->i_mprev->i_mnext = ip; - iq->i_mprev = ip; - ip->i_mnext = iq; - } else { - ip->i_mnext = ip; - ip->i_mprev = ip; - } - mp->m_inodes = ip; - - XFS_MOUNT_IUNLOCK(mp); xfs_put_perag(mp, pag); return_ip: @@ -493,36 +474,15 @@ xfs_iextract( { xfs_mount_t *mp = ip->i_mount; xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); - xfs_inode_t *iq; write_lock(&pag->pag_ici_lock); radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino)); write_unlock(&pag->pag_ici_lock); xfs_put_perag(mp, pag); - /* - * Remove from mount's inode list. - */ - XFS_MOUNT_ILOCK(mp); - ASSERT((ip->i_mnext != NULL) && (ip->i_mprev != NULL)); - iq = ip->i_mnext; - iq->i_mprev = ip->i_mprev; - ip->i_mprev->i_mnext = iq; - - /* - * Fix up the head pointer if it points to the inode being deleted. - */ - if (mp->m_inodes == ip) { - if (ip == iq) { - mp->m_inodes = NULL; - } else { - mp->m_inodes = iq; - } - } - /* Deal with the deleted inodes list */ + XFS_MOUNT_ILOCK(mp); list_del_init(&ip->i_reclaim); - mp->m_ireclaims++; XFS_MOUNT_IUNLOCK(mp); } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 104623b7ec6..55d50b888b6 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -233,16 +233,8 @@ typedef struct dm_attrs_s { __uint16_t da_pad; /* DMIG extra padding */ } dm_attrs_t; -typedef struct { - struct xfs_inode *ip_mnext; /* next inode in mount list */ - struct xfs_inode *ip_mprev; /* ptr to prev inode */ - struct xfs_mount *ip_mount; /* fs mount struct ptr */ -} xfs_iptr_t; - typedef struct xfs_inode { /* Inode linking and identification information. */ - struct xfs_inode *i_mnext; /* next inode in mount list */ - struct xfs_inode *i_mprev; /* ptr to prev inode */ struct xfs_mount *i_mount; /* fs mount struct ptr */ struct list_head i_reclaim; /* reclaim list */ struct inode *i_vnode; /* vnode backpointer */ diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 40338ff8fdd..43e5917465a 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1282,11 +1282,6 @@ xfs_unmountfs( xfs_unmountfs_wait(mp); /* wait for async bufs */ xfs_log_unmount(mp); /* Done! No more fs ops. */ - /* - * All inodes from this mount point should be freed. - */ - ASSERT(mp->m_inodes == NULL); - if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) uuid_table_remove(&mp->m_sb.sb_uuid); diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index f4644d71548..0ba05269112 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -248,7 +248,6 @@ typedef struct xfs_mount { xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ spinlock_t m_agirotor_lock;/* .. and lock protecting it */ xfs_agnumber_t m_maxagi; /* highest inode alloc group */ - struct xfs_inode *m_inodes; /* active inode list */ struct list_head m_del_inodes; /* inodes to reclaim */ mutex_t m_ilock; /* inode list mutex */ uint m_ireclaims; /* count of calls to reclaim*/ -- cgit v1.2.3-70-g09d2 From 136341b41ad4883bd668120f727a52c42331fe8a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 17:11:40 +1100 Subject: [XFS] cleanup btree record / key / ptr addressing macros. Replace the generic record / key / ptr addressing macros that use cpp token pasting with simpler macros that do the job for just one given btree type. The new macros lose the cur argument and thus can be used outside the core btree code, but also gain an xfs_mount * argument to allow for checking the CRC flag in the near future. Note that many of these macros aren't actually used in the kernel code, but only in userspace (mostly in xfs_repair). SGI-PV: 988146 SGI-Modid: xfs-linux-melb:xfs-kern:32295a Signed-off-by: Christoph Hellwig Signed-off-by: Donald Douwsma Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_alloc_btree.c | 2 +- fs/xfs/xfs_alloc_btree.h | 25 +++++++++++++------ fs/xfs/xfs_bmap.c | 54 ++++++++++++++++++----------------------- fs/xfs/xfs_bmap_btree.c | 13 +++++----- fs/xfs/xfs_bmap_btree.h | 62 ++++++++++++++++++++++++++++++----------------- fs/xfs/xfs_btree.h | 15 ------------ fs/xfs/xfs_fsops.c | 4 +-- fs/xfs/xfs_ialloc_btree.h | 29 +++++++++++++++------- fs/xfs/xfs_inode.c | 6 ++--- 9 files changed, 113 insertions(+), 97 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 6ff27b75b93..72c083f62a9 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -179,7 +179,7 @@ xfs_allocbt_update_lastrec( if (numrecs) { xfs_alloc_rec_t *rrp; - rrp = XFS_ALLOC_REC_ADDR(block, numrecs, cur); + rrp = XFS_ALLOC_REC_ADDR(cur->bc_mp, block, numrecs); len = rrp->ar_blockcount; } else { len = 0; diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h index ff1f71d069c..579f9c7e087 100644 --- a/fs/xfs/xfs_alloc_btree.h +++ b/fs/xfs/xfs_alloc_btree.h @@ -78,16 +78,27 @@ typedef struct xfs_btree_sblock xfs_alloc_block_t; /* * Record, key, and pointer address macros for btree blocks. + * + * (note that some of these may appear unused, but they are used in userspace) */ -#define XFS_ALLOC_REC_ADDR(bb,i,cur) \ - XFS_BTREE_REC_ADDR(xfs_alloc, bb, i) - -#define XFS_ALLOC_KEY_ADDR(bb,i,cur) \ - XFS_BTREE_KEY_ADDR(xfs_alloc, bb, i) +#define XFS_ALLOC_REC_ADDR(mp, block, index) \ + ((xfs_alloc_rec_t *) \ + ((char *)(block) + \ + sizeof(struct xfs_btree_sblock) + \ + (((index) - 1) * sizeof(xfs_alloc_rec_t)))) -#define XFS_ALLOC_PTR_ADDR(bb,i,cur) \ - XFS_BTREE_PTR_ADDR(xfs_alloc, bb, i, XFS_ALLOC_BLOCK_MAXRECS(1, cur)) +#define XFS_ALLOC_KEY_ADDR(mp, block, index) \ + ((xfs_alloc_key_t *) \ + ((char *)(block) + \ + sizeof(struct xfs_btree_sblock) + \ + ((index) - 1) * sizeof(xfs_alloc_key_t))) +#define XFS_ALLOC_PTR_ADDR(mp, block, index, maxrecs) \ + ((xfs_alloc_ptr_t *) \ + ((char *)(block) + \ + sizeof(struct xfs_btree_sblock) + \ + (maxrecs) * sizeof(xfs_alloc_key_t) + \ + ((index) - 1) * sizeof(xfs_alloc_ptr_t))) extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *, struct xfs_trans *, struct xfs_buf *, diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 09e4de4ed50..3dab937d4b8 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -393,7 +393,7 @@ xfs_bmap_count_leaves( STATIC void xfs_bmap_disk_count_leaves( - xfs_extnum_t idx, + struct xfs_mount *mp, xfs_bmbt_block_t *block, int numrecs, int *count); @@ -3539,7 +3539,7 @@ xfs_bmap_extents_to_btree( ablock->bb_level = 0; ablock->bb_leftsib = cpu_to_be64(NULLDFSBNO); ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO); - arp = XFS_BMAP_REC_IADDR(ablock, 1, cur); + arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); for (cnt = i = 0; i < nextents; i++) { ep = xfs_iext_get_ext(ifp, i); @@ -3554,11 +3554,13 @@ xfs_bmap_extents_to_btree( /* * Fill in the root key and pointer. */ - kp = XFS_BMAP_KEY_IADDR(block, 1, cur); - arp = XFS_BMAP_REC_IADDR(ablock, 1, cur); + kp = XFS_BMBT_KEY_ADDR(mp, block, 1); + arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp)); - pp = XFS_BMAP_PTR_IADDR(block, 1, cur); + pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, + be16_to_cpu(block->bb_level))); *pp = cpu_to_be64(args.fsbno); + /* * Do all this logging at the end so that * the root is at the right level. @@ -4574,7 +4576,7 @@ xfs_bmap_read_extents( error0); if (level == 0) break; - pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]); + pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); bno = be64_to_cpu(*pp); XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0); xfs_trans_brelse(tp, bp); @@ -4617,7 +4619,7 @@ xfs_bmap_read_extents( /* * Copy records into the extent records. */ - frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1); + frp = XFS_BMBT_REC_ADDR(mp, block, 1); start = i; for (j = 0; j < num_recs; j++, i++, frp++) { xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i); @@ -6187,12 +6189,7 @@ xfs_check_block( prevp = NULL; for( i = 1; i <= be16_to_cpu(block->bb_numrecs); i++) { dmxr = mp->m_bmap_dmxr[0]; - - if (root) { - keyp = XFS_BMAP_BROOT_KEY_ADDR(block, i, sz); - } else { - keyp = XFS_BTREE_KEY_ADDR(xfs_bmbt, block, i); - } + keyp = XFS_BMBT_KEY_ADDR(mp, block, i); if (prevp) { ASSERT(be64_to_cpu(prevp->br_startoff) < @@ -6203,19 +6200,16 @@ xfs_check_block( /* * Compare the block numbers to see if there are dups. */ - - if (root) { + if (root) pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); - } else { - pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, i, dmxr); - } + else + pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); + for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) { - if (root) { + if (root) thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz); - } else { - thispa = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, j, - dmxr); - } + else + thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); if (*thispa == *pp) { cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld", __func__, j, i, @@ -6301,7 +6295,7 @@ xfs_bmap_check_leaf_extents( */ xfs_check_block(block, mp, 0, 0); - pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]); + pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); bno = be64_to_cpu(*pp); XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0); if (bp_release) { @@ -6337,14 +6331,14 @@ xfs_bmap_check_leaf_extents( * conform with the first entry in this one. */ - ep = XFS_BTREE_REC_ADDR(xfs_bmbt, block, 1); + ep = XFS_BMBT_REC_ADDR(mp, block, 1); if (i) { ASSERT(xfs_bmbt_disk_get_startoff(&last) + xfs_bmbt_disk_get_blockcount(&last) <= xfs_bmbt_disk_get_startoff(ep)); } for (j = 1; j < num_recs; j++) { - nextp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, j + 1); + nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1); ASSERT(xfs_bmbt_disk_get_startoff(ep) + xfs_bmbt_disk_get_blockcount(ep) <= xfs_bmbt_disk_get_startoff(nextp)); @@ -6482,7 +6476,7 @@ xfs_bmap_count_tree( } /* Dive to the next level */ - pp = XFS_BTREE_PTR_ADDR(xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]); + pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); bno = be64_to_cpu(*pp); if (unlikely((error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) { @@ -6497,7 +6491,7 @@ xfs_bmap_count_tree( for (;;) { nextbno = be64_to_cpu(block->bb_rightsib); numrecs = be16_to_cpu(block->bb_numrecs); - xfs_bmap_disk_count_leaves(0, block, numrecs, count); + xfs_bmap_disk_count_leaves(mp, block, numrecs, count); xfs_trans_brelse(tp, bp); if (nextbno == NULLFSBLOCK) break; @@ -6536,7 +6530,7 @@ xfs_bmap_count_leaves( */ STATIC void xfs_bmap_disk_count_leaves( - xfs_extnum_t idx, + struct xfs_mount *mp, xfs_bmbt_block_t *block, int numrecs, int *count) @@ -6545,7 +6539,7 @@ xfs_bmap_disk_count_leaves( xfs_bmbt_rec_t *frp; for (b = 1; b <= numrecs; b++) { - frp = XFS_BTREE_REC_ADDR(xfs_bmbt, block, idx + b); + frp = XFS_BMBT_REC_ADDR(mp, block, b); *count += xfs_bmbt_disk_get_blockcount(frp); } } diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 853828c6b45..11137c042c9 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -44,7 +44,6 @@ #include "xfs_error.h" #include "xfs_quota.h" - /* * Determine the extent state. */ @@ -85,9 +84,9 @@ xfs_bmdr_to_bmbt( rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO); rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO); dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0); - fkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1); - tkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen); - fpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr); + fkp = XFS_BMDR_KEY_ADDR(dblock, 1); + tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1); + fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr); tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen); dmxr = be16_to_cpu(dblock->bb_numrecs); memcpy(tkp, fkp, sizeof(*fkp) * dmxr); @@ -448,10 +447,10 @@ xfs_bmbt_to_bmdr( dblock->bb_level = rblock->bb_level; dblock->bb_numrecs = rblock->bb_numrecs; dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0); - fkp = XFS_BMAP_BROOT_KEY_ADDR(rblock, 1, rblocklen); - tkp = XFS_BTREE_KEY_ADDR(xfs_bmdr, dblock, 1); + fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1); + tkp = XFS_BMDR_KEY_ADDR(dblock, 1); fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen); - tpp = XFS_BTREE_PTR_ADDR(xfs_bmdr, dblock, 1, dmxr); + tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr); dmxr = be16_to_cpu(dblock->bb_numrecs); memcpy(tkp, fkp, sizeof(*fkp) * dmxr); memcpy(tpp, fpp, sizeof(*fpp) * dmxr); diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 835be2a84ca..7f001072db4 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -21,6 +21,7 @@ #define XFS_BMAP_MAGIC 0x424d4150 /* 'BMAP' */ struct xfs_btree_cur; +struct xfs_btree_block; struct xfs_btree_lblock; struct xfs_mount; struct xfs_inode; @@ -151,33 +152,50 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; #define XFS_BUF_TO_BMBT_BLOCK(bp) ((xfs_bmbt_block_t *)XFS_BUF_PTR(bp)) -#define XFS_BMAP_REC_DADDR(bb,i,cur) (XFS_BTREE_REC_ADDR(xfs_bmbt, bb, i)) - -#define XFS_BMAP_REC_IADDR(bb,i,cur) (XFS_BTREE_REC_ADDR(xfs_bmbt, bb, i)) - -#define XFS_BMAP_KEY_DADDR(bb,i,cur) \ - (XFS_BTREE_KEY_ADDR(xfs_bmbt, bb, i)) - -#define XFS_BMAP_KEY_IADDR(bb,i,cur) \ - (XFS_BTREE_KEY_ADDR(xfs_bmbt, bb, i)) - -#define XFS_BMAP_PTR_DADDR(bb,i,cur) \ - (XFS_BTREE_PTR_ADDR(xfs_bmbt, bb, i, XFS_BMAP_BLOCK_DMAXRECS( \ - be16_to_cpu((bb)->bb_level), cur))) -#define XFS_BMAP_PTR_IADDR(bb,i,cur) \ - (XFS_BTREE_PTR_ADDR(xfs_bmbt, bb, i, xfs_bmbt_get_maxrecs(cur, \ - be16_to_cpu((bb)->bb_level)))) +#define XFS_BMBT_REC_ADDR(mp, block, index) \ + ((xfs_bmbt_rec_t *) \ + ((char *)(block) + \ + sizeof(struct xfs_btree_lblock) + \ + ((index) - 1) * sizeof(xfs_bmbt_rec_t))) + +#define XFS_BMBT_KEY_ADDR(mp, block, index) \ + ((xfs_bmbt_key_t *) \ + ((char *)(block) + \ + sizeof(struct xfs_btree_lblock) + \ + ((index) - 1) * sizeof(xfs_bmbt_key_t))) + +#define XFS_BMBT_PTR_ADDR(mp, block, index, maxrecs) \ + ((xfs_bmbt_ptr_t *) \ + ((char *)(block) + \ + sizeof(struct xfs_btree_lblock) + \ + (maxrecs) * sizeof(xfs_bmbt_key_t) + \ + ((index) - 1) * sizeof(xfs_bmbt_ptr_t))) + +#define XFS_BMDR_REC_ADDR(block, index) \ + ((xfs_bmdr_rec_t *) \ + ((char *)(block) + \ + sizeof(struct xfs_bmdr_block) + \ + ((index) - 1) * sizeof(xfs_bmdr_rec_t))) + +#define XFS_BMDR_KEY_ADDR(block, index) \ + ((xfs_bmdr_key_t *) \ + ((char *)(block) + \ + sizeof(struct xfs_bmdr_block) + \ + ((index) - 1) * sizeof(xfs_bmdr_key_t))) + +#define XFS_BMDR_PTR_ADDR(block, index, maxrecs) \ + ((xfs_bmdr_ptr_t *) \ + ((char *)(block) + \ + sizeof(struct xfs_bmdr_block) + \ + (maxrecs) * sizeof(xfs_bmdr_key_t) + \ + ((index) - 1) * sizeof(xfs_bmdr_ptr_t))) /* * These are to be used when we know the size of the block and * we don't have a cursor. */ -#define XFS_BMAP_BROOT_REC_ADDR(bb,i,sz) \ - (XFS_BTREE_REC_ADDR(xfs_bmbt,bb,i)) -#define XFS_BMAP_BROOT_KEY_ADDR(bb,i,sz) \ - (XFS_BTREE_KEY_ADDR(xfs_bmbt,bb,i)) -#define XFS_BMAP_BROOT_PTR_ADDR(mp, bb,i,sz) \ - (XFS_BTREE_PTR_ADDR(xfs_bmbt,bb,i,xfs_bmbt_maxrecs(mp, sz, 0))) +#define XFS_BMAP_BROOT_PTR_ADDR(mp, bb, i, sz) \ + XFS_BMBT_PTR_ADDR(mp, bb, i, xfs_bmbt_maxrecs(mp, sz, 0)) #define XFS_BMAP_BROOT_SPACE_CALC(nrecs) \ (int)(sizeof(xfs_bmbt_block_t) + \ diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 795a124cee6..d6120a74906 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -149,21 +149,6 @@ do { \ } \ } while (0) -/* - * Record, key, and pointer address calculation macros. - * Given block size, type prefix, block pointer, and index of requested entry - * (first entry numbered 1). - */ -#define XFS_BTREE_REC_ADDR(t,bb,i) \ - ((t ## _rec_t *)((char *)(bb) + sizeof(t ## _block_t) + \ - ((i) - 1) * sizeof(t ## _rec_t))) -#define XFS_BTREE_KEY_ADDR(t,bb,i) \ - ((t ## _key_t *)((char *)(bb) + sizeof(t ## _block_t) + \ - ((i) - 1) * sizeof(t ## _key_t))) -#define XFS_BTREE_PTR_ADDR(t,bb,i,mxr) \ - ((t ## _ptr_t *)((char *)(bb) + sizeof(t ## _block_t) + \ - (mxr) * sizeof(t ## _key_t) + ((i) - 1) * sizeof(t ## _ptr_t))) - #define XFS_BTREE_MAXLEVELS 8 /* max of all btrees */ struct xfs_btree_ops { diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 84583cf73db..8ce72aba027 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -258,7 +258,7 @@ xfs_growfs_data_private( block->bb_numrecs = cpu_to_be16(1); block->bb_leftsib = cpu_to_be32(NULLAGBLOCK); block->bb_rightsib = cpu_to_be32(NULLAGBLOCK); - arec = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1); + arec = XFS_ALLOC_REC_ADDR(mp, block, 1); arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); arec->ar_blockcount = cpu_to_be32( agsize - be32_to_cpu(arec->ar_startblock)); @@ -279,7 +279,7 @@ xfs_growfs_data_private( block->bb_numrecs = cpu_to_be16(1); block->bb_leftsib = cpu_to_be32(NULLAGBLOCK); block->bb_rightsib = cpu_to_be32(NULLAGBLOCK); - arec = XFS_BTREE_REC_ADDR(xfs_alloc, block, 1); + arec = XFS_ALLOC_REC_ADDR(mp, block, 1); arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); arec->ar_blockcount = cpu_to_be32( agsize - be32_to_cpu(arec->ar_startblock)); diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h index f0fc1e46e62..fa12c85db34 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/xfs_ialloc_btree.h @@ -97,16 +97,27 @@ typedef struct xfs_btree_sblock xfs_inobt_block_t; /* * Record, key, and pointer address macros for btree blocks. + * + * (note that some of these may appear unused, but they are used in userspace) */ -#define XFS_INOBT_REC_ADDR(bb,i,cur) \ - (XFS_BTREE_REC_ADDR(xfs_inobt, bb, i)) - -#define XFS_INOBT_KEY_ADDR(bb,i,cur) \ - (XFS_BTREE_KEY_ADDR(xfs_inobt, bb, i)) - -#define XFS_INOBT_PTR_ADDR(bb,i,cur) \ - (XFS_BTREE_PTR_ADDR(xfs_inobt, bb, \ - i, XFS_INOBT_BLOCK_MAXRECS(1, cur))) +#define XFS_INOBT_REC_ADDR(mp, block, index) \ + ((xfs_inobt_rec_t *) \ + ((char *)(block) + \ + sizeof(struct xfs_btree_sblock) + \ + (((index) - 1) * sizeof(xfs_inobt_rec_t)))) + +#define XFS_INOBT_KEY_ADDR(mp, block, index) \ + ((xfs_inobt_key_t *) \ + ((char *)(block) + \ + sizeof(struct xfs_btree_sblock) + \ + ((index) - 1) * sizeof(xfs_inobt_key_t))) + +#define XFS_INOBT_PTR_ADDR(mp, block, index, maxrecs) \ + ((xfs_inobt_ptr_t *) \ + ((char *)(block) + \ + sizeof(struct xfs_btree_sblock) + \ + (maxrecs) * sizeof(xfs_inobt_key_t) + \ + ((index) - 1) * sizeof(xfs_inobt_ptr_t))) extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *, struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 73b604e15dc..7b4f13c710d 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2435,10 +2435,8 @@ xfs_iroot_realloc( /* * First copy the records. */ - op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1, - ifp->if_broot_bytes); - np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1, - (int)new_size); + op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1); + np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1); memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t)); /* -- cgit v1.2.3-70-g09d2 From 7cc95a821df8f09a5d37a923cf8c3a7c3ee00c29 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 17:14:34 +1100 Subject: [XFS] Always use struct xfs_btree_block instead of short / longform structures. Always use the generic xfs_btree_block type instead of the short / long structures. Add XFS_BTREE_SBLOCK_LEN / XFS_BTREE_LBLOCK_LEN defines for the length of a short / long form block. The rationale for this is that we will grow more btree block header variants to support CRCs and other RAS information, and always accessing them through the same datatype with unions for the short / long form pointers makes implementing this much easier. SGI-PV: 988146 SGI-Modid: xfs-linux-melb:xfs-kern:32300a Signed-off-by: Christoph Hellwig Signed-off-by: Donald Douwsma Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_alloc.c | 23 +++++++------- fs/xfs/xfs_alloc_btree.c | 2 +- fs/xfs/xfs_alloc_btree.h | 18 ++++++----- fs/xfs/xfs_bmap.c | 71 +++++++++++++++++++++-------------------- fs/xfs/xfs_bmap_btree.c | 14 ++++---- fs/xfs/xfs_bmap_btree.h | 23 +++++++------- fs/xfs/xfs_btree.c | 81 +++++++++++++++++++++++------------------------ fs/xfs/xfs_btree.h | 53 +++++++++---------------------- fs/xfs/xfs_dinode.h | 2 +- fs/xfs/xfs_fsops.c | 20 ++++++------ fs/xfs/xfs_ialloc_btree.c | 2 +- fs/xfs/xfs_ialloc_btree.h | 19 +++++------ fs/xfs/xfs_inode.c | 13 +++----- fs/xfs/xfs_inode.h | 3 +- fs/xfs/xfs_log_recover.c | 8 ++--- 15 files changed, 165 insertions(+), 187 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 0a2a87208b1..c47ce907572 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -380,21 +380,20 @@ xfs_alloc_fixup_trees( return error; XFS_WANT_CORRUPTED_RETURN(i == 1); } + #ifdef DEBUG - { - xfs_alloc_block_t *bnoblock; - xfs_alloc_block_t *cntblock; - - if (bno_cur->bc_nlevels == 1 && - cnt_cur->bc_nlevels == 1) { - bnoblock = XFS_BUF_TO_ALLOC_BLOCK(bno_cur->bc_bufs[0]); - cntblock = XFS_BUF_TO_ALLOC_BLOCK(cnt_cur->bc_bufs[0]); - XFS_WANT_CORRUPTED_RETURN( - be16_to_cpu(bnoblock->bb_numrecs) == - be16_to_cpu(cntblock->bb_numrecs)); - } + if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) { + struct xfs_btree_block *bnoblock; + struct xfs_btree_block *cntblock; + + bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]); + cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]); + + XFS_WANT_CORRUPTED_RETURN( + bnoblock->bb_numrecs == cntblock->bb_numrecs); } #endif + /* * Deal with all four cases: the allocated record is contained * within the freespace record, so we can have new freespace diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 72c083f62a9..733cb75a8c5 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -490,7 +490,7 @@ xfs_allocbt_maxrecs( int blocklen, int leaf) { - blocklen -= sizeof(struct xfs_btree_sblock); + blocklen -= XFS_ALLOC_BLOCK_LEN(mp); if (leaf) return blocklen / sizeof(xfs_alloc_rec_t); diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h index 579f9c7e087..a6caa0022c9 100644 --- a/fs/xfs/xfs_alloc_btree.h +++ b/fs/xfs/xfs_alloc_btree.h @@ -24,7 +24,6 @@ struct xfs_buf; struct xfs_btree_cur; -struct xfs_btree_sblock; struct xfs_mount; /* @@ -50,10 +49,6 @@ typedef struct xfs_alloc_rec_incore { /* btree pointer type */ typedef __be32 xfs_alloc_ptr_t; -/* btree block header type */ -typedef struct xfs_btree_sblock xfs_alloc_block_t; - -#define XFS_BUF_TO_ALLOC_BLOCK(bp) ((xfs_alloc_block_t *)XFS_BUF_PTR(bp)) /* * Minimum and maximum blocksize and sectorsize. @@ -76,6 +71,13 @@ typedef struct xfs_btree_sblock xfs_alloc_block_t; #define XFS_BNO_BLOCK(mp) ((xfs_agblock_t)(XFS_AGFL_BLOCK(mp) + 1)) #define XFS_CNT_BLOCK(mp) ((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1)) +/* + * Btree block header size depends on a superblock flag. + * + * (not quite yet, but soon) + */ +#define XFS_ALLOC_BLOCK_LEN(mp) XFS_BTREE_SBLOCK_LEN + /* * Record, key, and pointer address macros for btree blocks. * @@ -84,19 +86,19 @@ typedef struct xfs_btree_sblock xfs_alloc_block_t; #define XFS_ALLOC_REC_ADDR(mp, block, index) \ ((xfs_alloc_rec_t *) \ ((char *)(block) + \ - sizeof(struct xfs_btree_sblock) + \ + XFS_ALLOC_BLOCK_LEN(mp) + \ (((index) - 1) * sizeof(xfs_alloc_rec_t)))) #define XFS_ALLOC_KEY_ADDR(mp, block, index) \ ((xfs_alloc_key_t *) \ ((char *)(block) + \ - sizeof(struct xfs_btree_sblock) + \ + XFS_ALLOC_BLOCK_LEN(mp) + \ ((index) - 1) * sizeof(xfs_alloc_key_t))) #define XFS_ALLOC_PTR_ADDR(mp, block, index, maxrecs) \ ((xfs_alloc_ptr_t *) \ ((char *)(block) + \ - sizeof(struct xfs_btree_sblock) + \ + XFS_ALLOC_BLOCK_LEN(mp) + \ (maxrecs) * sizeof(xfs_alloc_key_t) + \ ((index) - 1) * sizeof(xfs_alloc_ptr_t))) diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 3dab937d4b8..7796a0c140e 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -394,7 +394,7 @@ xfs_bmap_count_leaves( STATIC void xfs_bmap_disk_count_leaves( struct xfs_mount *mp, - xfs_bmbt_block_t *block, + struct xfs_btree_block *block, int numrecs, int *count); @@ -3042,14 +3042,14 @@ xfs_bmap_btree_to_extents( int whichfork) /* data or attr fork */ { /* REFERENCED */ - xfs_bmbt_block_t *cblock;/* child btree block */ + struct xfs_btree_block *cblock;/* child btree block */ xfs_fsblock_t cbno; /* child block number */ xfs_buf_t *cbp; /* child block's buffer */ int error; /* error return value */ xfs_ifork_t *ifp; /* inode fork data */ xfs_mount_t *mp; /* mount point structure */ __be64 *pp; /* ptr to block address */ - xfs_bmbt_block_t *rblock;/* root btree block */ + struct xfs_btree_block *rblock;/* root btree block */ mp = ip->i_mount; ifp = XFS_IFORK_PTR(ip, whichfork); @@ -3069,8 +3069,8 @@ xfs_bmap_btree_to_extents( if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF))) return error; - cblock = XFS_BUF_TO_BMBT_BLOCK(cbp); - if ((error = xfs_btree_check_lblock(cur, cblock, 0, cbp))) + cblock = XFS_BUF_TO_BLOCK(cbp); + if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) return error; xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp); ip->i_d.di_nblocks--; @@ -3450,11 +3450,11 @@ xfs_bmap_extents_to_btree( int *logflagsp, /* inode logging flags */ int whichfork) /* data or attr fork */ { - xfs_bmbt_block_t *ablock; /* allocated (child) bt block */ + struct xfs_btree_block *ablock; /* allocated (child) bt block */ xfs_buf_t *abp; /* buffer for ablock */ xfs_alloc_arg_t args; /* allocation arguments */ xfs_bmbt_rec_t *arp; /* child record pointer */ - xfs_bmbt_block_t *block; /* btree root block */ + struct xfs_btree_block *block; /* btree root block */ xfs_btree_cur_t *cur; /* bmap btree cursor */ xfs_bmbt_rec_host_t *ep; /* extent record pointer */ int error; /* error return value */ @@ -3474,6 +3474,7 @@ xfs_bmap_extents_to_btree( */ xfs_iroot_realloc(ip, 1, whichfork); ifp->if_flags |= XFS_IFBROOT; + /* * Fill in the root. */ @@ -3481,8 +3482,9 @@ xfs_bmap_extents_to_btree( block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); block->bb_level = cpu_to_be16(1); block->bb_numrecs = cpu_to_be16(1); - block->bb_leftsib = cpu_to_be64(NULLDFSBNO); - block->bb_rightsib = cpu_to_be64(NULLDFSBNO); + block->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO); + block->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO); + /* * Need a cursor. Can't allocate until bb_level is filled in. */ @@ -3534,11 +3536,11 @@ xfs_bmap_extents_to_btree( /* * Fill in the child block. */ - ablock = XFS_BUF_TO_BMBT_BLOCK(abp); + ablock = XFS_BUF_TO_BLOCK(abp); ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC); ablock->bb_level = 0; - ablock->bb_leftsib = cpu_to_be64(NULLDFSBNO); - ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO); + ablock->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO); + ablock->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO); arp = XFS_BMBT_REC_ADDR(mp, ablock, 1); nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); for (cnt = i = 0; i < nextents; i++) { @@ -3550,7 +3552,8 @@ xfs_bmap_extents_to_btree( } } ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork)); - ablock->bb_numrecs = cpu_to_be16(cnt); + xfs_btree_set_numrecs(ablock, cnt); + /* * Fill in the root key and pointer. */ @@ -4533,7 +4536,7 @@ xfs_bmap_read_extents( xfs_inode_t *ip, /* incore inode */ int whichfork) /* data or attr fork */ { - xfs_bmbt_block_t *block; /* current btree block */ + struct xfs_btree_block *block; /* current btree block */ xfs_fsblock_t bno; /* block # of "block" */ xfs_buf_t *bp; /* buffer for "block" */ int error; /* error return value */ @@ -4570,7 +4573,7 @@ xfs_bmap_read_extents( if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF))) return error; - block = XFS_BUF_TO_BMBT_BLOCK(bp); + block = XFS_BUF_TO_BLOCK(bp); XFS_WANT_CORRUPTED_GOTO( XFS_BMAP_SANITY_CHECK(mp, block, level), error0); @@ -4596,7 +4599,7 @@ xfs_bmap_read_extents( xfs_extnum_t start; - num_recs = be16_to_cpu(block->bb_numrecs); + num_recs = xfs_btree_get_numrecs(block); if (unlikely(i + num_recs > room)) { ASSERT(i + num_recs <= room); xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, @@ -4613,7 +4616,7 @@ xfs_bmap_read_extents( /* * Read-ahead the next leaf block, if any. */ - nextbno = be64_to_cpu(block->bb_rightsib); + nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); if (nextbno != NULLFSBLOCK) xfs_btree_reada_bufl(mp, nextbno, 1); /* @@ -4650,7 +4653,7 @@ xfs_bmap_read_extents( if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF))) return error; - block = XFS_BUF_TO_BMBT_BLOCK(bp); + block = XFS_BUF_TO_BLOCK(bp); } ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork)); @@ -6175,7 +6178,7 @@ xfs_bmap_get_bp( void xfs_check_block( - xfs_bmbt_block_t *block, + struct xfs_btree_block *block, xfs_mount_t *mp, int root, short sz) @@ -6187,7 +6190,7 @@ xfs_check_block( ASSERT(be16_to_cpu(block->bb_level) > 0); prevp = NULL; - for( i = 1; i <= be16_to_cpu(block->bb_numrecs); i++) { + for( i = 1; i <= xfs_btree_get_numrecs(block); i++) { dmxr = mp->m_bmap_dmxr[0]; keyp = XFS_BMBT_KEY_ADDR(mp, block, i); @@ -6232,7 +6235,7 @@ xfs_bmap_check_leaf_extents( xfs_inode_t *ip, /* incore inode pointer */ int whichfork) /* data or attr fork */ { - xfs_bmbt_block_t *block; /* current btree block */ + struct xfs_btree_block *block; /* current btree block */ xfs_fsblock_t bno; /* block # of "block" */ xfs_buf_t *bp; /* buffer for "block" */ int error; /* error return value */ @@ -6282,7 +6285,7 @@ xfs_bmap_check_leaf_extents( if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, XFS_BMAP_BTREE_REF))) goto error_norelse; - block = XFS_BUF_TO_BMBT_BLOCK(bp); + block = XFS_BUF_TO_BLOCK(bp); XFS_WANT_CORRUPTED_GOTO( XFS_BMAP_SANITY_CHECK(mp, block, level), error0); @@ -6317,13 +6320,13 @@ xfs_bmap_check_leaf_extents( xfs_extnum_t num_recs; - num_recs = be16_to_cpu(block->bb_numrecs); + num_recs = xfs_btree_get_numrecs(block); /* * Read-ahead the next leaf block, if any. */ - nextbno = be64_to_cpu(block->bb_rightsib); + nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); /* * Check all the extents to make sure they are OK. @@ -6367,7 +6370,7 @@ xfs_bmap_check_leaf_extents( if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp, XFS_BMAP_BTREE_REF))) goto error_norelse; - block = XFS_BUF_TO_BMBT_BLOCK(bp); + block = XFS_BUF_TO_BLOCK(bp); } if (bp_release) { bp_release = 0; @@ -6397,7 +6400,7 @@ xfs_bmap_count_blocks( int whichfork, /* data or attr fork */ int *count) /* out: count of blocks */ { - xfs_bmbt_block_t *block; /* current btree block */ + struct xfs_btree_block *block; /* current btree block */ xfs_fsblock_t bno; /* block # of "block" */ xfs_ifork_t *ifp; /* fork structure */ int level; /* btree level, for checking */ @@ -6454,24 +6457,24 @@ xfs_bmap_count_tree( __be64 *pp; xfs_fsblock_t bno = blockno; xfs_fsblock_t nextbno; - xfs_bmbt_block_t *block, *nextblock; + struct xfs_btree_block *block, *nextblock; int numrecs; if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF))) return error; *count += 1; - block = XFS_BUF_TO_BMBT_BLOCK(bp); + block = XFS_BUF_TO_BLOCK(bp); if (--level) { /* Not at node above leafs, count this level of nodes */ - nextbno = be64_to_cpu(block->bb_rightsib); + nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); while (nextbno != NULLFSBLOCK) { if ((error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp, XFS_BMAP_BTREE_REF))) return error; *count += 1; - nextblock = XFS_BUF_TO_BMBT_BLOCK(nbp); - nextbno = be64_to_cpu(nextblock->bb_rightsib); + nextblock = XFS_BUF_TO_BLOCK(nbp); + nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib); xfs_trans_brelse(tp, nbp); } @@ -6489,7 +6492,7 @@ xfs_bmap_count_tree( } else { /* count all level 1 nodes and their leaves */ for (;;) { - nextbno = be64_to_cpu(block->bb_rightsib); + nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib); numrecs = be16_to_cpu(block->bb_numrecs); xfs_bmap_disk_count_leaves(mp, block, numrecs, count); xfs_trans_brelse(tp, bp); @@ -6500,7 +6503,7 @@ xfs_bmap_count_tree( XFS_BMAP_BTREE_REF))) return error; *count += 1; - block = XFS_BUF_TO_BMBT_BLOCK(bp); + block = XFS_BUF_TO_BLOCK(bp); } } return 0; @@ -6531,7 +6534,7 @@ xfs_bmap_count_leaves( STATIC void xfs_bmap_disk_count_leaves( struct xfs_mount *mp, - xfs_bmbt_block_t *block, + struct xfs_btree_block *block, int numrecs, int *count) { diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 11137c042c9..e46e02b8e27 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -68,7 +68,7 @@ xfs_bmdr_to_bmbt( struct xfs_mount *mp, xfs_bmdr_block_t *dblock, int dblocklen, - xfs_bmbt_block_t *rblock, + struct xfs_btree_block *rblock, int rblocklen) { int dmxr; @@ -81,8 +81,8 @@ xfs_bmdr_to_bmbt( rblock->bb_level = dblock->bb_level; ASSERT(be16_to_cpu(rblock->bb_level) > 0); rblock->bb_numrecs = dblock->bb_numrecs; - rblock->bb_leftsib = cpu_to_be64(NULLDFSBNO); - rblock->bb_rightsib = cpu_to_be64(NULLDFSBNO); + rblock->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO); + rblock->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO); dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0); fkp = XFS_BMDR_KEY_ADDR(dblock, 1); tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1); @@ -429,7 +429,7 @@ xfs_bmbt_set_state( void xfs_bmbt_to_bmdr( struct xfs_mount *mp, - xfs_bmbt_block_t *rblock, + struct xfs_btree_block *rblock, int rblocklen, xfs_bmdr_block_t *dblock, int dblocklen) @@ -441,8 +441,8 @@ xfs_bmbt_to_bmdr( __be64 *tpp; ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC); - ASSERT(be64_to_cpu(rblock->bb_leftsib) == NULLDFSBNO); - ASSERT(be64_to_cpu(rblock->bb_rightsib) == NULLDFSBNO); + ASSERT(be64_to_cpu(rblock->bb_u.l.bb_leftsib) == NULLDFSBNO); + ASSERT(be64_to_cpu(rblock->bb_u.l.bb_rightsib) == NULLDFSBNO); ASSERT(be16_to_cpu(rblock->bb_level) > 0); dblock->bb_level = rblock->bb_level; dblock->bb_numrecs = rblock->bb_numrecs; @@ -906,7 +906,7 @@ xfs_bmbt_maxrecs( int blocklen, int leaf) { - blocklen -= sizeof(struct xfs_btree_lblock); + blocklen -= XFS_BMBT_BLOCK_LEN(mp); if (leaf) return blocklen / sizeof(xfs_bmbt_rec_t); diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 7f001072db4..735a42418c9 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -22,7 +22,6 @@ struct xfs_btree_cur; struct xfs_btree_block; -struct xfs_btree_lblock; struct xfs_mount; struct xfs_inode; struct xfs_trans; @@ -147,27 +146,29 @@ typedef struct xfs_bmbt_key { /* btree pointer type */ typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t; -/* btree block header type */ -typedef struct xfs_btree_lblock xfs_bmbt_block_t; - -#define XFS_BUF_TO_BMBT_BLOCK(bp) ((xfs_bmbt_block_t *)XFS_BUF_PTR(bp)) +/* + * Btree block header size depends on a superblock flag. + * + * (not quite yet, but soon) + */ +#define XFS_BMBT_BLOCK_LEN(mp) XFS_BTREE_LBLOCK_LEN #define XFS_BMBT_REC_ADDR(mp, block, index) \ ((xfs_bmbt_rec_t *) \ ((char *)(block) + \ - sizeof(struct xfs_btree_lblock) + \ + XFS_BMBT_BLOCK_LEN(mp) + \ ((index) - 1) * sizeof(xfs_bmbt_rec_t))) #define XFS_BMBT_KEY_ADDR(mp, block, index) \ ((xfs_bmbt_key_t *) \ ((char *)(block) + \ - sizeof(struct xfs_btree_lblock) + \ + XFS_BMBT_BLOCK_LEN(mp) + \ ((index) - 1) * sizeof(xfs_bmbt_key_t))) #define XFS_BMBT_PTR_ADDR(mp, block, index, maxrecs) \ ((xfs_bmbt_ptr_t *) \ ((char *)(block) + \ - sizeof(struct xfs_btree_lblock) + \ + XFS_BMBT_BLOCK_LEN(mp) + \ (maxrecs) * sizeof(xfs_bmbt_key_t) + \ ((index) - 1) * sizeof(xfs_bmbt_ptr_t))) @@ -198,7 +199,7 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; XFS_BMBT_PTR_ADDR(mp, bb, i, xfs_bmbt_maxrecs(mp, sz, 0)) #define XFS_BMAP_BROOT_SPACE_CALC(nrecs) \ - (int)(sizeof(xfs_bmbt_block_t) + \ + (int)(XFS_BTREE_LBLOCK_LEN + \ ((nrecs) * (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t)))) #define XFS_BMAP_BROOT_SPACE(bb) \ @@ -223,7 +224,7 @@ typedef struct xfs_btree_lblock xfs_bmbt_block_t; * Prototypes for xfs_bmap.c to call. */ extern void xfs_bmdr_to_bmbt(struct xfs_mount *, xfs_bmdr_block_t *, int, - xfs_bmbt_block_t *, int); + struct xfs_btree_block *, int); extern void xfs_bmbt_get_all(xfs_bmbt_rec_host_t *r, xfs_bmbt_irec_t *s); extern xfs_filblks_t xfs_bmbt_get_blockcount(xfs_bmbt_rec_host_t *r); extern xfs_fsblock_t xfs_bmbt_get_startblock(xfs_bmbt_rec_host_t *r); @@ -246,7 +247,7 @@ extern void xfs_bmbt_disk_set_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s); extern void xfs_bmbt_disk_set_allf(xfs_bmbt_rec_t *r, xfs_fileoff_t o, xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v); -extern void xfs_bmbt_to_bmdr(struct xfs_mount *, xfs_bmbt_block_t *, int, +extern void xfs_bmbt_to_bmdr(struct xfs_mount *, struct xfs_btree_block *, int, xfs_bmdr_block_t *, int); extern int xfs_bmbt_get_maxrecs(struct xfs_btree_cur *, int level); diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 72a26bb7643..7ed59267420 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -53,10 +53,10 @@ const __uint32_t xfs_magics[XFS_BTNUM_MAX] = { }; -int /* error (0 or EFSCORRUPTED) */ +STATIC int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_lblock( struct xfs_btree_cur *cur, /* btree cursor */ - struct xfs_btree_lblock *block, /* btree long form block pointer */ + struct xfs_btree_block *block, /* btree long form block pointer */ int level, /* level of the btree block */ struct xfs_buf *bp) /* buffer for block, if any */ { @@ -69,12 +69,14 @@ xfs_btree_check_lblock( be16_to_cpu(block->bb_level) == level && be16_to_cpu(block->bb_numrecs) <= cur->bc_ops->get_maxrecs(cur, level) && - block->bb_leftsib && - (be64_to_cpu(block->bb_leftsib) == NULLDFSBNO || - XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_leftsib))) && - block->bb_rightsib && - (be64_to_cpu(block->bb_rightsib) == NULLDFSBNO || - XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_rightsib))); + block->bb_u.l.bb_leftsib && + (be64_to_cpu(block->bb_u.l.bb_leftsib) == NULLDFSBNO || + XFS_FSB_SANITY_CHECK(mp, + be64_to_cpu(block->bb_u.l.bb_leftsib))) && + block->bb_u.l.bb_rightsib && + (be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO || + XFS_FSB_SANITY_CHECK(mp, + be64_to_cpu(block->bb_u.l.bb_rightsib))); if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, XFS_ERRTAG_BTREE_CHECK_LBLOCK, XFS_RANDOM_BTREE_CHECK_LBLOCK))) { @@ -90,7 +92,7 @@ xfs_btree_check_lblock( STATIC int /* error (0 or EFSCORRUPTED) */ xfs_btree_check_sblock( struct xfs_btree_cur *cur, /* btree cursor */ - struct xfs_btree_sblock *block, /* btree short form block pointer */ + struct xfs_btree_block *block, /* btree short form block pointer */ int level, /* level of the btree block */ struct xfs_buf *bp) /* buffer containing block */ { @@ -107,12 +109,12 @@ xfs_btree_check_sblock( be16_to_cpu(block->bb_level) == level && be16_to_cpu(block->bb_numrecs) <= cur->bc_ops->get_maxrecs(cur, level) && - (be32_to_cpu(block->bb_leftsib) == NULLAGBLOCK || - be32_to_cpu(block->bb_leftsib) < agflen) && - block->bb_leftsib && - (be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK || - be32_to_cpu(block->bb_rightsib) < agflen) && - block->bb_rightsib; + (be32_to_cpu(block->bb_u.s.bb_leftsib) == NULLAGBLOCK || + be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) && + block->bb_u.s.bb_leftsib && + (be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK || + be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) && + block->bb_u.s.bb_rightsib; if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp, XFS_ERRTAG_BTREE_CHECK_SBLOCK, XFS_RANDOM_BTREE_CHECK_SBLOCK))) { @@ -135,13 +137,10 @@ xfs_btree_check_block( int level, /* level of the btree block */ struct xfs_buf *bp) /* buffer containing block, if any */ { - if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { - return xfs_btree_check_lblock(cur, - (struct xfs_btree_lblock *)block, level, bp); - } else { - return xfs_btree_check_sblock(cur, - (struct xfs_btree_sblock *)block, level, bp); - } + if (cur->bc_flags & XFS_BTREE_LONG_PTRS) + return xfs_btree_check_lblock(cur, block, level, bp); + else + return xfs_btree_check_sblock(cur, block, level, bp); } /* @@ -326,8 +325,8 @@ xfs_btree_dup_cursor( static inline size_t xfs_btree_block_len(struct xfs_btree_cur *cur) { return (cur->bc_flags & XFS_BTREE_LONG_PTRS) ? - sizeof(struct xfs_btree_lblock) : - sizeof(struct xfs_btree_sblock); + XFS_BTREE_LBLOCK_LEN : + XFS_BTREE_SBLOCK_LEN; } /* @@ -510,7 +509,7 @@ xfs_btree_islastblock( xfs_btree_cur_t *cur, /* btree cursor */ int level) /* level to check */ { - xfs_btree_block_t *block; /* generic btree block pointer */ + struct xfs_btree_block *block; /* generic btree block pointer */ xfs_buf_t *bp; /* buffer containing block */ block = xfs_btree_get_block(cur, level, &bp); @@ -530,7 +529,7 @@ xfs_btree_firstrec( xfs_btree_cur_t *cur, /* btree cursor */ int level) /* level to change */ { - xfs_btree_block_t *block; /* generic btree block pointer */ + struct xfs_btree_block *block; /* generic btree block pointer */ xfs_buf_t *bp; /* buffer containing block */ /* @@ -559,7 +558,7 @@ xfs_btree_lastrec( xfs_btree_cur_t *cur, /* btree cursor */ int level) /* level to change */ { - xfs_btree_block_t *block; /* generic btree block pointer */ + struct xfs_btree_block *block; /* generic btree block pointer */ xfs_buf_t *bp; /* buffer containing block */ /* @@ -814,7 +813,7 @@ xfs_btree_setbuf( int lev, /* level in btree */ xfs_buf_t *bp) /* new buffer to set */ { - xfs_btree_block_t *b; /* btree block */ + struct xfs_btree_block *b; /* btree block */ xfs_buf_t *obp; /* old buffer pointer */ obp = cur->bc_bufs[lev]; @@ -1252,20 +1251,20 @@ xfs_btree_log_block( int first; /* first byte offset logged */ int last; /* last byte offset logged */ static const short soffsets[] = { /* table of offsets (short) */ - offsetof(struct xfs_btree_sblock, bb_magic), - offsetof(struct xfs_btree_sblock, bb_level), - offsetof(struct xfs_btree_sblock, bb_numrecs), - offsetof(struct xfs_btree_sblock, bb_leftsib), - offsetof(struct xfs_btree_sblock, bb_rightsib), - sizeof(struct xfs_btree_sblock) + offsetof(struct xfs_btree_block, bb_magic), + offsetof(struct xfs_btree_block, bb_level), + offsetof(struct xfs_btree_block, bb_numrecs), + offsetof(struct xfs_btree_block, bb_u.s.bb_leftsib), + offsetof(struct xfs_btree_block, bb_u.s.bb_rightsib), + XFS_BTREE_SBLOCK_LEN }; static const short loffsets[] = { /* table of offsets (long) */ - offsetof(struct xfs_btree_lblock, bb_magic), - offsetof(struct xfs_btree_lblock, bb_level), - offsetof(struct xfs_btree_lblock, bb_numrecs), - offsetof(struct xfs_btree_lblock, bb_leftsib), - offsetof(struct xfs_btree_lblock, bb_rightsib), - sizeof(struct xfs_btree_lblock) + offsetof(struct xfs_btree_block, bb_magic), + offsetof(struct xfs_btree_block, bb_level), + offsetof(struct xfs_btree_block, bb_numrecs), + offsetof(struct xfs_btree_block, bb_u.l.bb_leftsib), + offsetof(struct xfs_btree_block, bb_u.l.bb_rightsib), + XFS_BTREE_LBLOCK_LEN }; XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY); @@ -3018,7 +3017,7 @@ xfs_btree_kill_iroot( if (index) { xfs_iroot_realloc(cur->bc_private.b.ip, index, cur->bc_private.b.whichfork); - block = (struct xfs_btree_block *)ifp->if_broot; + block = ifp->if_broot; } be16_add_cpu(&block->bb_numrecs, index); diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index d6120a74906..789fffdf8b2 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -39,31 +39,16 @@ extern kmem_zone_t *xfs_btree_cur_zone; #define XFS_BTNUM_INO ((xfs_btnum_t)XFS_BTNUM_INOi) /* - * Short form header: space allocation btrees. - */ -typedef struct xfs_btree_sblock { - __be32 bb_magic; /* magic number for block type */ - __be16 bb_level; /* 0 is a leaf */ - __be16 bb_numrecs; /* current # of data records */ - __be32 bb_leftsib; /* left sibling block or NULLAGBLOCK */ - __be32 bb_rightsib; /* right sibling block or NULLAGBLOCK */ -} xfs_btree_sblock_t; - -/* - * Long form header: bmap btrees. - */ -typedef struct xfs_btree_lblock { - __be32 bb_magic; /* magic number for block type */ - __be16 bb_level; /* 0 is a leaf */ - __be16 bb_numrecs; /* current # of data records */ - __be64 bb_leftsib; /* left sibling block or NULLDFSBNO */ - __be64 bb_rightsib; /* right sibling block or NULLDFSBNO */ -} xfs_btree_lblock_t; - -/* - * Combined header and structure, used by common code. + * Generic btree header. + * + * This is a comination of the actual format used on disk for short and long + * format btrees. The first three fields are shared by both format, but + * the pointers are different and should be used with care. + * + * To get the size of the actual short or long form headers please use + * the size macros below. Never use sizeof(xfs_btree_block). */ -typedef struct xfs_btree_block { +struct xfs_btree_block { __be32 bb_magic; /* magic number for block type */ __be16 bb_level; /* 0 is a leaf */ __be16 bb_numrecs; /* current # of data records */ @@ -77,7 +62,11 @@ typedef struct xfs_btree_block { __be64 bb_rightsib; } l; /* long form pointers */ } bb_u; /* rest */ -} xfs_btree_block_t; +}; + +#define XFS_BTREE_SBLOCK_LEN 16 /* size of a short form block */ +#define XFS_BTREE_LBLOCK_LEN 24 /* size of a long form block */ + /* * Generic key, ptr and record wrapper structures. @@ -294,20 +283,8 @@ typedef struct xfs_btree_cur /* * Convert from buffer to btree block header. */ -#define XFS_BUF_TO_BLOCK(bp) ((xfs_btree_block_t *)XFS_BUF_PTR(bp)) -#define XFS_BUF_TO_LBLOCK(bp) ((xfs_btree_lblock_t *)XFS_BUF_PTR(bp)) -#define XFS_BUF_TO_SBLOCK(bp) ((xfs_btree_sblock_t *)XFS_BUF_PTR(bp)) - +#define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)XFS_BUF_PTR(bp)) -/* - * Check that long form block header is ok. - */ -int /* error (0 or EFSCORRUPTED) */ -xfs_btree_check_lblock( - struct xfs_btree_cur *cur, /* btree cursor */ - struct xfs_btree_lblock *block, /* btree long form block pointer */ - int level, /* level of the btree block */ - struct xfs_buf *bp); /* buffer containing block, if any */ /* * Check that block header is ok. diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h index 2a00fcc36d8..d7cf392cc85 100644 --- a/fs/xfs/xfs_dinode.h +++ b/fs/xfs/xfs_dinode.h @@ -165,7 +165,7 @@ typedef enum xfs_dinode_fmt */ #define XFS_LITINO(mp) ((mp)->m_litino) #define XFS_BROOT_SIZE_ADJ \ - (sizeof(xfs_bmbt_block_t) - sizeof(xfs_bmdr_block_t)) + (XFS_BTREE_LBLOCK_LEN - sizeof(xfs_bmdr_block_t)) /* * Inode data & attribute fork sizes, per inode. diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 8ce72aba027..f1d0585041b 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -126,7 +126,7 @@ xfs_growfs_data_private( xfs_extlen_t agsize; xfs_extlen_t tmpsize; xfs_alloc_rec_t *arec; - xfs_btree_sblock_t *block; + struct xfs_btree_block *block; xfs_buf_t *bp; int bucket; int dpct; @@ -251,13 +251,13 @@ xfs_growfs_data_private( bp = xfs_buf_get(mp->m_ddev_targp, XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)), BTOBB(mp->m_sb.sb_blocksize), 0); - block = XFS_BUF_TO_SBLOCK(bp); + block = XFS_BUF_TO_BLOCK(bp); memset(block, 0, mp->m_sb.sb_blocksize); block->bb_magic = cpu_to_be32(XFS_ABTB_MAGIC); block->bb_level = 0; block->bb_numrecs = cpu_to_be16(1); - block->bb_leftsib = cpu_to_be32(NULLAGBLOCK); - block->bb_rightsib = cpu_to_be32(NULLAGBLOCK); + block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); + block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); arec = XFS_ALLOC_REC_ADDR(mp, block, 1); arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); arec->ar_blockcount = cpu_to_be32( @@ -272,13 +272,13 @@ xfs_growfs_data_private( bp = xfs_buf_get(mp->m_ddev_targp, XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)), BTOBB(mp->m_sb.sb_blocksize), 0); - block = XFS_BUF_TO_SBLOCK(bp); + block = XFS_BUF_TO_BLOCK(bp); memset(block, 0, mp->m_sb.sb_blocksize); block->bb_magic = cpu_to_be32(XFS_ABTC_MAGIC); block->bb_level = 0; block->bb_numrecs = cpu_to_be16(1); - block->bb_leftsib = cpu_to_be32(NULLAGBLOCK); - block->bb_rightsib = cpu_to_be32(NULLAGBLOCK); + block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); + block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); arec = XFS_ALLOC_REC_ADDR(mp, block, 1); arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); arec->ar_blockcount = cpu_to_be32( @@ -294,13 +294,13 @@ xfs_growfs_data_private( bp = xfs_buf_get(mp->m_ddev_targp, XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)), BTOBB(mp->m_sb.sb_blocksize), 0); - block = XFS_BUF_TO_SBLOCK(bp); + block = XFS_BUF_TO_BLOCK(bp); memset(block, 0, mp->m_sb.sb_blocksize); block->bb_magic = cpu_to_be32(XFS_IBT_MAGIC); block->bb_level = 0; block->bb_numrecs = 0; - block->bb_leftsib = cpu_to_be32(NULLAGBLOCK); - block->bb_rightsib = cpu_to_be32(NULLAGBLOCK); + block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); + block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); error = xfs_bwrite(mp, bp); if (error) { goto error0; diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 46aabb3fcbf..99f2408e8d8 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -375,7 +375,7 @@ xfs_inobt_maxrecs( int blocklen, int leaf) { - blocklen -= sizeof(struct xfs_btree_sblock); + blocklen -= XFS_INOBT_BLOCK_LEN(mp); if (leaf) return blocklen / sizeof(xfs_inobt_rec_t); diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h index fa12c85db34..37e5dd01a57 100644 --- a/fs/xfs/xfs_ialloc_btree.h +++ b/fs/xfs/xfs_ialloc_btree.h @@ -24,7 +24,6 @@ struct xfs_buf; struct xfs_btree_cur; -struct xfs_btree_sblock; struct xfs_mount; /* @@ -70,11 +69,6 @@ typedef struct xfs_inobt_key { /* btree pointer type */ typedef __be32 xfs_inobt_ptr_t; -/* btree block header type */ -typedef struct xfs_btree_sblock xfs_inobt_block_t; - -#define XFS_BUF_TO_INOBT_BLOCK(bp) ((xfs_inobt_block_t *)XFS_BUF_PTR(bp)) - /* * Bit manipulations for ir_free. */ @@ -95,6 +89,13 @@ typedef struct xfs_btree_sblock xfs_inobt_block_t; #define XFS_IBT_BLOCK(mp) ((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1)) #define XFS_PREALLOC_BLOCKS(mp) ((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1)) +/* + * Btree block header size depends on a superblock flag. + * + * (not quite yet, but soon) + */ +#define XFS_INOBT_BLOCK_LEN(mp) XFS_BTREE_SBLOCK_LEN + /* * Record, key, and pointer address macros for btree blocks. * @@ -103,19 +104,19 @@ typedef struct xfs_btree_sblock xfs_inobt_block_t; #define XFS_INOBT_REC_ADDR(mp, block, index) \ ((xfs_inobt_rec_t *) \ ((char *)(block) + \ - sizeof(struct xfs_btree_sblock) + \ + XFS_INOBT_BLOCK_LEN(mp) + \ (((index) - 1) * sizeof(xfs_inobt_rec_t)))) #define XFS_INOBT_KEY_ADDR(mp, block, index) \ ((xfs_inobt_key_t *) \ ((char *)(block) + \ - sizeof(struct xfs_btree_sblock) + \ + XFS_INOBT_BLOCK_LEN(mp) + \ ((index) - 1) * sizeof(xfs_inobt_key_t))) #define XFS_INOBT_PTR_ADDR(mp, block, index, maxrecs) \ ((xfs_inobt_ptr_t *) \ ((char *)(block) + \ - sizeof(struct xfs_btree_sblock) + \ + XFS_INOBT_BLOCK_LEN(mp) + \ (maxrecs) * sizeof(xfs_inobt_key_t) + \ ((index) - 1) * sizeof(xfs_inobt_ptr_t))) diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 7b4f13c710d..bc33762abc4 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2352,7 +2352,7 @@ xfs_iroot_realloc( struct xfs_mount *mp = ip->i_mount; int cur_max; xfs_ifork_t *ifp; - xfs_bmbt_block_t *new_broot; + struct xfs_btree_block *new_broot; int new_max; size_t new_size; char *np; @@ -2373,8 +2373,7 @@ xfs_iroot_realloc( */ if (ifp->if_broot_bytes == 0) { new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff); - ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size, - KM_SLEEP); + ifp->if_broot = kmem_alloc(new_size, KM_SLEEP); ifp->if_broot_bytes = (int)new_size; return; } @@ -2388,9 +2387,7 @@ xfs_iroot_realloc( cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); new_max = cur_max + rec_diff; new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max); - ifp->if_broot = (xfs_bmbt_block_t *) - kmem_realloc(ifp->if_broot, - new_size, + ifp->if_broot = kmem_realloc(ifp->if_broot, new_size, (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */ KM_SLEEP); op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, @@ -2418,11 +2415,11 @@ xfs_iroot_realloc( else new_size = 0; if (new_size > 0) { - new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP); + new_broot = kmem_alloc(new_size, KM_SLEEP); /* * First copy over the btree block header. */ - memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t)); + memcpy(new_broot, ifp->if_broot, XFS_BTREE_LBLOCK_LEN); } else { new_broot = NULL; ifp->if_flags &= ~XFS_IFBROOT; diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 55d50b888b6..6fd20fc179a 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -63,7 +63,7 @@ typedef struct xfs_ext_irec { typedef struct xfs_ifork { int if_bytes; /* bytes in if_u1 */ int if_real_bytes; /* bytes allocated in if_u1 */ - xfs_bmbt_block_t *if_broot; /* file's incore btree root */ + struct xfs_btree_block *if_broot; /* file's incore btree root */ short if_broot_bytes; /* bytes allocated for root */ unsigned char if_flags; /* per-fork flags */ unsigned char if_ext_max; /* max # of extent records */ @@ -213,7 +213,6 @@ struct ktrace; struct xfs_buf; struct xfs_bmap_free; struct xfs_bmbt_irec; -struct xfs_bmbt_block; struct xfs_inode_log_item; struct xfs_mount; struct xfs_trans; diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 23c3a782a9e..199c8ea3647 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2452,8 +2452,8 @@ xlog_recover_do_inode_trans( break; case XFS_ILOG_DBROOT: - xfs_bmbt_to_bmdr(mp, (xfs_bmbt_block_t *)src, len, - &(dip->di_u.di_bmbt), + xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len, + &dip->di_u.di_bmbt, XFS_DFORK_DSIZE(dip, mp)); break; @@ -2490,8 +2490,8 @@ xlog_recover_do_inode_trans( case XFS_ILOG_ABROOT: dest = XFS_DFORK_APTR(dip); - xfs_bmbt_to_bmdr(mp, (xfs_bmbt_block_t *)src, len, - (xfs_bmdr_block_t*)dest, + xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, + len, (xfs_bmdr_block_t*)dest, XFS_DFORK_ASIZE(dip, mp)); break; -- cgit v1.2.3-70-g09d2 From 4e8938feba770b583fb13d249c17943961731a3e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 17:14:43 +1100 Subject: [XFS] Move XFS_BMAP_SANITY_CHECK out of line. Move the XFS_BMAP_SANITY_CHECK macro out of line and make it a properly typed function. Also pass the xfs_buf for the btree block instead of just the btree block header, as we will need some additional information for it to implement CRC checking of btree blocks. SGI-PV: 988146 SGI-Modid: xfs-linux-melb:xfs-kern:32301a Signed-off-by: Christoph Hellwig Signed-off-by: Donald Douwsma Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_bmap.c | 22 +++++++++++++++++++--- fs/xfs/xfs_bmap_btree.h | 7 ------- 2 files changed, 19 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 7796a0c140e..db289050692 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -4524,6 +4524,22 @@ xfs_bmap_one_block( return rval; } +STATIC int +xfs_bmap_sanity_check( + struct xfs_mount *mp, + struct xfs_buf *bp, + int level) +{ + struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); + + if (be32_to_cpu(block->bb_magic) != XFS_BMAP_MAGIC || + be16_to_cpu(block->bb_level) != level || + be16_to_cpu(block->bb_numrecs) == 0 || + be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0]) + return 0; + return 1; +} + /* * Read in the extents to if_extents. * All inode fields are set up by caller, we just traverse the btree @@ -4575,7 +4591,7 @@ xfs_bmap_read_extents( return error; block = XFS_BUF_TO_BLOCK(bp); XFS_WANT_CORRUPTED_GOTO( - XFS_BMAP_SANITY_CHECK(mp, block, level), + xfs_bmap_sanity_check(mp, bp, level), error0); if (level == 0) break; @@ -4611,7 +4627,7 @@ xfs_bmap_read_extents( goto error0; } XFS_WANT_CORRUPTED_GOTO( - XFS_BMAP_SANITY_CHECK(mp, block, 0), + xfs_bmap_sanity_check(mp, bp, 0), error0); /* * Read-ahead the next leaf block, if any. @@ -6287,7 +6303,7 @@ xfs_bmap_check_leaf_extents( goto error_norelse; block = XFS_BUF_TO_BLOCK(bp); XFS_WANT_CORRUPTED_GOTO( - XFS_BMAP_SANITY_CHECK(mp, block, level), + xfs_bmap_sanity_check(mp, bp, level), error0); if (level == 0) break; diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h index 735a42418c9..a4555abb662 100644 --- a/fs/xfs/xfs_bmap_btree.h +++ b/fs/xfs/xfs_bmap_btree.h @@ -213,13 +213,6 @@ typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t; */ #define XFS_BM_MAXLEVELS(mp,w) ((mp)->m_bm_maxlevels[(w)]) -#define XFS_BMAP_SANITY_CHECK(mp,bb,level) \ - (be32_to_cpu((bb)->bb_magic) == XFS_BMAP_MAGIC && \ - be16_to_cpu((bb)->bb_level) == level && \ - be16_to_cpu((bb)->bb_numrecs) > 0 && \ - be16_to_cpu((bb)->bb_numrecs) <= (mp)->m_bmap_dmxr[(level) != 0]) - - /* * Prototypes for xfs_bmap.c to call. */ -- cgit v1.2.3-70-g09d2 From 2af75df7be7ca86965bf73766f827575d1c26fbd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 17:14:53 +1100 Subject: [XFS] split out two helpers from xfs_syncsub Split out two helpers from xfs_syncsub for the dummy log commit and the superblock writeout. SGI-PV: 988140 SGI-Modid: xfs-linux-melb:xfs-kern:32303a Signed-off-by: Christoph Hellwig Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_sync.c | 162 +++++++++++++++++++++++++------------------- 1 file changed, 93 insertions(+), 69 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 53d85ecb1d5..59da3327a6b 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -315,6 +315,93 @@ xfs_sync_inodes( return XFS_ERROR(last_error); } +STATIC int +xfs_commit_dummy_trans( + struct xfs_mount *mp, + uint log_flags) +{ + struct xfs_inode *ip = mp->m_rootip; + struct xfs_trans *tp; + int error; + + /* + * Put a dummy transaction in the log to tell recovery + * that all others are OK. + */ + tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1); + error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); + if (error) { + xfs_trans_cancel(tp, 0); + return error; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_ihold(tp, ip); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + /* XXX(hch): ignoring the error here.. */ + error = xfs_trans_commit(tp, 0); + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + xfs_log_force(mp, 0, log_flags); + return 0; +} + +STATIC int +xfs_sync_fsdata( + struct xfs_mount *mp, + int flags) +{ + struct xfs_buf *bp; + struct xfs_buf_log_item *bip; + int error = 0; + + /* + * If this is xfssyncd() then only sync the superblock if we can + * lock it without sleeping and it is not pinned. + */ + if (flags & SYNC_BDFLUSH) { + ASSERT(!(flags & SYNC_WAIT)); + + bp = xfs_getsb(mp, XFS_BUF_TRYLOCK); + if (!bp) + goto out; + + bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *); + if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp)) + goto out_brelse; + } else { + bp = xfs_getsb(mp, 0); + + /* + * If the buffer is pinned then push on the log so we won't + * get stuck waiting in the write for someone, maybe + * ourselves, to flush the log. + * + * Even though we just pushed the log above, we did not have + * the superblock buffer locked at that point so it can + * become pinned in between there and here. + */ + if (XFS_BUF_ISPINNED(bp)) + xfs_log_force(mp, 0, XFS_LOG_FORCE); + } + + + if (flags & SYNC_WAIT) + XFS_BUF_UNASYNC(bp); + else + XFS_BUF_ASYNC(bp); + + return xfs_bwrite(mp, bp); + + out_brelse: + xfs_buf_relse(bp); + out: + return error; +} + /* * xfs sync routine for internal use * @@ -331,8 +418,6 @@ xfs_syncsub( int error = 0; int last_error = 0; uint log_flags = XFS_LOG_FORCE; - xfs_buf_t *bp; - xfs_buf_log_item_t *bip; /* * Sync out the log. This ensures that the log is periodically @@ -355,83 +440,22 @@ xfs_syncsub( * log activity, so if this isn't vfs_sync() then flush * the log again. */ - if (flags & SYNC_DELWRI) { - xfs_log_force(mp, (xfs_lsn_t)0, log_flags); - } + if (flags & SYNC_DELWRI) + xfs_log_force(mp, 0, log_flags); if (flags & SYNC_FSDATA) { - /* - * If this is vfs_sync() then only sync the superblock - * if we can lock it without sleeping and it is not pinned. - */ - if (flags & SYNC_BDFLUSH) { - bp = xfs_getsb(mp, XFS_BUF_TRYLOCK); - if (bp != NULL) { - bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*); - if ((bip != NULL) && - xfs_buf_item_dirty(bip)) { - if (!(XFS_BUF_ISPINNED(bp))) { - XFS_BUF_ASYNC(bp); - error = xfs_bwrite(mp, bp); - } else { - xfs_buf_relse(bp); - } - } else { - xfs_buf_relse(bp); - } - } - } else { - bp = xfs_getsb(mp, 0); - /* - * If the buffer is pinned then push on the log so - * we won't get stuck waiting in the write for - * someone, maybe ourselves, to flush the log. - * Even though we just pushed the log above, we - * did not have the superblock buffer locked at - * that point so it can become pinned in between - * there and here. - */ - if (XFS_BUF_ISPINNED(bp)) - xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); - if (flags & SYNC_WAIT) - XFS_BUF_UNASYNC(bp); - else - XFS_BUF_ASYNC(bp); - error = xfs_bwrite(mp, bp); - } - if (error) { + error = xfs_sync_fsdata(mp, flags); + if (error) last_error = error; - } } /* * Now check to see if the log needs a "dummy" transaction. */ if (!(flags & SYNC_REMOUNT) && xfs_log_need_covered(mp)) { - xfs_trans_t *tp; - xfs_inode_t *ip; - - /* - * Put a dummy transaction in the log to tell - * recovery that all others are OK. - */ - tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1); - if ((error = xfs_trans_reserve(tp, 0, - XFS_ICHANGE_LOG_RES(mp), - 0, 0, 0))) { - xfs_trans_cancel(tp, 0); + error = xfs_commit_dummy_trans(mp, log_flags); + if (error) return error; - } - - ip = mp->m_rootip; - xfs_ilock(ip, XFS_ILOCK_EXCL); - - xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); - xfs_trans_ihold(tp, ip); - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - error = xfs_trans_commit(tp, 0); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - xfs_log_force(mp, (xfs_lsn_t)0, log_flags); } /* -- cgit v1.2.3-70-g09d2 From bc60a99323b3ec628273b5fa998285c87d464ca6 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:15:03 +1100 Subject: [XFS] Use struct inodes instead of vnodes to kill vn_grab With the sync code relocated to the linux-2.6 directory we can use struct inodes directly. If we do the same thing for the quota release code, we can remove vn_grab altogether. While here, convert the VN_BAD() checks to is_bad_inode() so we can remove vnodes entirely from this code. SGI-PV: 988140 SGI-Modid: xfs-linux-melb:xfs-kern:32304a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_sync.c | 53 +++++++++++++++++++++--------------------- fs/xfs/linux-2.6/xfs_vnode.c | 6 ++--- fs/xfs/linux-2.6/xfs_vnode.h | 5 ---- fs/xfs/quota/xfs_qm_syscalls.c | 16 ++++++------- 4 files changed, 37 insertions(+), 43 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 59da3327a6b..461c1dc35d3 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -131,10 +131,7 @@ xfs_sync_inodes_ag( int flags, int *bypassed) { - xfs_inode_t *ip = NULL; - struct inode *vp = NULL; xfs_perag_t *pag = &mp->m_perag[ag]; - boolean_t vnode_refed = B_FALSE; int nr_found; int first_index = 0; int error = 0; @@ -156,6 +153,10 @@ xfs_sync_inodes_ag( } do { + struct inode *inode; + boolean_t inode_refed; + xfs_inode_t *ip = NULL; + /* * use a gang lookup to find the next inode in the tree * as the tree is sparse and a gang lookup walks to find @@ -177,14 +178,14 @@ xfs_sync_inodes_ag( * skip inodes in reclaim. Let xfs_syncsub do that for * us so we don't need to worry. */ - vp = VFS_I(ip); - if (!vp) { + if (xfs_iflags_test(ip, (XFS_IRECLAIM|XFS_IRECLAIMABLE))) { read_unlock(&pag->pag_ici_lock); continue; } /* bad inodes are dealt with elsewhere */ - if (VN_BAD(vp)) { + inode = VFS_I(ip); + if (is_bad_inode(inode)) { read_unlock(&pag->pag_ici_lock); continue; } @@ -196,30 +197,29 @@ xfs_sync_inodes_ag( } /* - * The inode lock here actually coordinates with the almost - * spurious inode lock in xfs_ireclaim() to prevent the vnode - * we handle here without a reference from being freed while we - * reference it. If we lock the inode while it's on the mount - * list here, then the spurious inode lock in xfs_ireclaim() - * after the inode is pulled from the mount list will sleep - * until we release it here. This keeps the vnode from being - * freed while we reference it. + * If we can't get a reference on the VFS_I, the inode must be + * in reclaim. If we can get the inode lock without blocking, + * it is safe to flush the inode because we hold the tree lock + * and xfs_iextract will block right now. Hence if we lock the + * inode while holding the tree lock, xfs_ireclaim() is + * guaranteed to block on the inode lock we now hold and hence + * it is safe to reference the inode until we drop the inode + * locks completely. */ - if (xfs_ilock_nowait(ip, lock_flags) == 0) { - vp = vn_grab(vp); + inode_refed = B_FALSE; + if (igrab(inode)) { read_unlock(&pag->pag_ici_lock); - if (!vp) - continue; xfs_ilock(ip, lock_flags); - - ASSERT(vp == VFS_I(ip)); - ASSERT(ip->i_mount == mp); - - vnode_refed = B_TRUE; + inode_refed = B_TRUE; } else { - /* safe to unlock here as we have a reference */ + if (!xfs_ilock_nowait(ip, lock_flags)) { + /* leave it to reclaim */ + read_unlock(&pag->pag_ici_lock); + continue; + } read_unlock(&pag->pag_ici_lock); } + /* * If we have to flush data or wait for I/O completion * we need to drop the ilock that we currently hold. @@ -240,7 +240,7 @@ xfs_sync_inodes_ag( xfs_ilock(ip, XFS_ILOCK_SHARED); } - if ((flags & SYNC_DELWRI) && VN_DIRTY(vp)) { + if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) { xfs_iunlock(ip, XFS_ILOCK_SHARED); error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE); if (flags & SYNC_IOWAIT) @@ -268,9 +268,8 @@ xfs_sync_inodes_ag( if (lock_flags) xfs_iunlock(ip, lock_flags); - if (vnode_refed) { + if (inode_refed) { IRELE(ip); - vnode_refed = B_FALSE; } if (error) diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c index b52528bbbff..dceb6dbaa2d 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.c +++ b/fs/xfs/linux-2.6/xfs_vnode.c @@ -90,10 +90,10 @@ vn_ioerror( */ static inline int xfs_icount(struct xfs_inode *ip) { - struct inode *vp = VFS_I(ip); + struct inode *inode = VFS_I(ip); - if (vp) - return vn_count(vp); + if (!inode) + return atomic_read(&inode->i_count); return -1; } diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index 683ce16210f..bf89e41c3b8 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h @@ -80,11 +80,6 @@ do { \ iput(VFS_I(ip)); \ } while (0) -static inline struct inode *vn_grab(struct inode *vp) -{ - return igrab(vp); -} - /* * Dealing with bad inodes */ diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 26152b9ccc6..4254b074223 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c @@ -1031,13 +1031,13 @@ xfs_qm_dqrele_inodes_ag( uint flags) { xfs_inode_t *ip = NULL; - struct inode *vp = NULL; xfs_perag_t *pag = &mp->m_perag[ag]; int first_index = 0; int nr_found; do { - boolean_t vnode_refd = B_FALSE; + boolean_t inode_refed; + struct inode *inode; /* * use a gang lookup to find the next inode in the tree @@ -1057,19 +1057,19 @@ xfs_qm_dqrele_inodes_ag( first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); /* skip quota inodes and those in reclaim */ - vp = VFS_I(ip); - if (!vp || ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) { + inode = VFS_I(ip); + if (!inode || ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) { ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_gdquot == NULL); read_unlock(&pag->pag_ici_lock); continue; } if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { - vp = vn_grab(vp); + inode = igrab(inode); read_unlock(&pag->pag_ici_lock); - if (!vp) + if (!inode) continue; - vnode_refd = B_TRUE; + inode_refed = B_TRUE; xfs_ilock(ip, XFS_ILOCK_EXCL); } else { read_unlock(&pag->pag_ici_lock); @@ -1084,7 +1084,7 @@ xfs_qm_dqrele_inodes_ag( ip->i_gdquot = NULL; } xfs_iunlock(ip, XFS_ILOCK_EXCL); - if (vnode_refd) + if (inode_refed) IRELE(ip); } while (nr_found); } -- cgit v1.2.3-70-g09d2 From 2030b5aba8a4bcaca5aca85968514fa58207d3bd Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:15:12 +1100 Subject: [XFS] use xfs_sync_inodes rather than xfs_syncsub Kill the unused arg in xfs_syncsub() and xfs_sync_inodes(). For callers of xfs_syncsub() that only want to flush inodes, replace xfs_syncsub() with direct calls to xfs_sync_inodes() as that is all that is being done with the specific flags being passed in. SGI-PV: 988140 SGI-Modid: xfs-linux-melb:xfs-kern:32305a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_sync.c | 21 +++++++-------------- fs/xfs/linux-2.6/xfs_sync.h | 2 +- fs/xfs/quota/xfs_qm_syscalls.c | 2 +- fs/xfs/xfs_mount.h | 2 -- fs/xfs/xfs_vfsops.c | 14 +++++++------- 5 files changed, 16 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 461c1dc35d3..7e9fb5251b2 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -117,7 +117,7 @@ xfs_sync( if (flags & SYNC_IOWAIT) xfs_filestream_flush(mp); - return xfs_syncsub(mp, flags, NULL); + return xfs_syncsub(mp, flags); } /* @@ -128,8 +128,7 @@ STATIC int xfs_sync_inodes_ag( xfs_mount_t *mp, int ag, - int flags, - int *bypassed) + int flags) { xfs_perag_t *pag = &mp->m_perag[ag]; int nr_found; @@ -260,8 +259,6 @@ xfs_sync_inodes_ag( error = xfs_iflush(ip, XFS_IFLUSH_DELWRI); else xfs_ifunlock(ip); - } else if (bypassed) { - (*bypassed)++; } } @@ -288,15 +285,12 @@ xfs_sync_inodes_ag( int xfs_sync_inodes( xfs_mount_t *mp, - int flags, - int *bypassed) + int flags) { int error; int last_error; int i; - if (bypassed) - *bypassed = 0; if (mp->m_flags & XFS_MOUNT_RDONLY) return 0; error = 0; @@ -305,7 +299,7 @@ xfs_sync_inodes( for (i = 0; i < mp->m_sb.sb_agcount; i++) { if (!mp->m_perag[i].pag_ici_init) continue; - error = xfs_sync_inodes_ag(mp, i, flags, bypassed); + error = xfs_sync_inodes_ag(mp, i, flags); if (error) last_error = error; if (error == EFSCORRUPTED) @@ -408,11 +402,10 @@ xfs_sync_fsdata( * interface as explained above under xfs_sync. * */ -int +STATIC int xfs_syncsub( xfs_mount_t *mp, - int flags, - int *bypassed) + int flags) { int error = 0; int last_error = 0; @@ -431,7 +424,7 @@ xfs_syncsub( if (flags & SYNC_BDFLUSH) xfs_finish_reclaim_all(mp, 1, XFS_IFLUSH_DELWRI_ELSE_ASYNC); else - error = xfs_sync_inodes(mp, flags, bypassed); + error = xfs_sync_inodes(mp, flags); } /* diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index 3746d153ec8..29548619940 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -55,7 +55,7 @@ int xfs_syncd_init(struct xfs_mount *mp); void xfs_syncd_stop(struct xfs_mount *mp); int xfs_sync(struct xfs_mount *mp, int flags); -int xfs_syncsub(struct xfs_mount *mp, int flags, int *bypassed); +int xfs_sync_inodes(struct xfs_mount *mp, int flags); void xfs_flush_inode(struct xfs_inode *ip); void xfs_flush_device(struct xfs_inode *ip); diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 4254b074223..9ff28e6c5b8 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c @@ -127,7 +127,7 @@ xfs_qm_quotactl( break; case Q_XQUOTASYNC: - return (xfs_sync_inodes(mp, SYNC_DELWRI, NULL)); + return xfs_sync_inodes(mp, SYNC_DELWRI); default: break; diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 0ba05269112..4e62802b6ab 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -523,8 +523,6 @@ extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); extern int xfs_readsb(xfs_mount_t *, int); extern void xfs_freesb(xfs_mount_t *); extern int xfs_fs_writable(xfs_mount_t *); -extern int xfs_syncsub(xfs_mount_t *, int, int *); -extern int xfs_sync_inodes(xfs_mount_t *, int, int *); extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t); extern int xfs_dmops_get(struct xfs_mount *, struct xfs_mount_args *); diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index 0c5ee5ec7ee..d5396d6f517 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -68,15 +68,15 @@ xfs_quiesce_fs( xfs_flush_buftarg(mp->m_ddev_targp, 0); xfs_finish_reclaim_all(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); - /* This loop must run at least twice. - * The first instance of the loop will flush - * most meta data but that will generate more - * meta data (typically directory updates). - * Which then must be flushed and logged before - * we can write the unmount record. + /* + * This loop must run at least twice. The first instance of the loop + * will flush most meta data but that will generate more meta data + * (typically directory updates). Which then must be flushed and + * logged before we can write the unmount record. */ do { - xfs_syncsub(mp, SYNC_INODE_QUIESCE, NULL); + xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC); + xfs_sync_inodes(mp, SYNC_INODE_QUIESCE); pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); if (!pincount) { delay(50); -- cgit v1.2.3-70-g09d2 From dfd837a9eb79de4e50323a6f4e1ad8138d806cb7 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:15:21 +1100 Subject: [XFS] kill xfs_syncsub Now that the only caller is xfs_sync(), merge the two together as it makes no sense to keep them separate. SGI-PV: 988140 SGI-Modid: xfs-linux-melb:xfs-kern:32306a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_sync.c | 141 +++++++++++++++++++------------------------- 1 file changed, 62 insertions(+), 79 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 7e9fb5251b2..d4b7b21a6e5 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -47,79 +47,6 @@ #include #include -/* - * xfs_sync flushes any pending I/O to file system vfsp. - * - * This routine is called by vfs_sync() to make sure that things make it - * out to disk eventually, on sync() system calls to flush out everything, - * and when the file system is unmounted. For the vfs_sync() case, all - * we really need to do is sync out the log to make all of our meta-data - * updates permanent (except for timestamps). For calls from pflushd(), - * dirty pages are kept moving by calling pdflush() on the inodes - * containing them. We also flush the inodes that we can lock without - * sleeping and the superblock if we can lock it without sleeping from - * vfs_sync() so that items at the tail of the log are always moving out. - * - * Flags: - * SYNC_BDFLUSH - We're being called from vfs_sync() so we don't want - * to sleep if we can help it. All we really need - * to do is ensure that the log is synced at least - * periodically. We also push the inodes and - * superblock if we can lock them without sleeping - * and they are not pinned. - * SYNC_ATTR - We need to flush the inodes. If SYNC_BDFLUSH is not - * set, then we really want to lock each inode and flush - * it. - * SYNC_WAIT - All the flushes that take place in this call should - * be synchronous. - * SYNC_DELWRI - This tells us to push dirty pages associated with - * inodes. SYNC_WAIT and SYNC_BDFLUSH are used to - * determine if they should be flushed sync, async, or - * delwri. - * SYNC_CLOSE - This flag is passed when the system is being - * unmounted. We should sync and invalidate everything. - * SYNC_FSDATA - This indicates that the caller would like to make - * sure the superblock is safe on disk. We can ensure - * this by simply making sure the log gets flushed - * if SYNC_BDFLUSH is set, and by actually writing it - * out otherwise. - * SYNC_IOWAIT - The caller wants us to wait for all data I/O to complete - * before we return (including direct I/O). Forms the drain - * side of the write barrier needed to safely quiesce the - * filesystem. - * - */ -int -xfs_sync( - xfs_mount_t *mp, - int flags) -{ - int error; - - /* - * Get the Quota Manager to flush the dquots. - * - * If XFS quota support is not enabled or this filesystem - * instance does not use quotas XFS_QM_DQSYNC will always - * return zero. - */ - error = XFS_QM_DQSYNC(mp, flags); - if (error) { - /* - * If we got an IO error, we will be shutting down. - * So, there's nothing more for us to do here. - */ - ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp)); - if (XFS_FORCED_SHUTDOWN(mp)) - return XFS_ERROR(error); - } - - if (flags & SYNC_IOWAIT) - xfs_filestream_flush(mp); - - return xfs_syncsub(mp, flags); -} - /* * Sync all the inodes in the given AG according to the * direction given by the flags. @@ -396,21 +323,77 @@ xfs_sync_fsdata( } /* - * xfs sync routine for internal use + * xfs_sync flushes any pending I/O to file system vfsp. * - * This routine supports all of the flags defined for the generic vfs_sync - * interface as explained above under xfs_sync. + * This routine is called by vfs_sync() to make sure that things make it + * out to disk eventually, on sync() system calls to flush out everything, + * and when the file system is unmounted. For the vfs_sync() case, all + * we really need to do is sync out the log to make all of our meta-data + * updates permanent (except for timestamps). For calls from pflushd(), + * dirty pages are kept moving by calling pdflush() on the inodes + * containing them. We also flush the inodes that we can lock without + * sleeping and the superblock if we can lock it without sleeping from + * vfs_sync() so that items at the tail of the log are always moving out. + * + * Flags: + * SYNC_BDFLUSH - We're being called from vfs_sync() so we don't want + * to sleep if we can help it. All we really need + * to do is ensure that the log is synced at least + * periodically. We also push the inodes and + * superblock if we can lock them without sleeping + * and they are not pinned. + * SYNC_ATTR - We need to flush the inodes. If SYNC_BDFLUSH is not + * set, then we really want to lock each inode and flush + * it. + * SYNC_WAIT - All the flushes that take place in this call should + * be synchronous. + * SYNC_DELWRI - This tells us to push dirty pages associated with + * inodes. SYNC_WAIT and SYNC_BDFLUSH are used to + * determine if they should be flushed sync, async, or + * delwri. + * SYNC_CLOSE - This flag is passed when the system is being + * unmounted. We should sync and invalidate everything. + * SYNC_FSDATA - This indicates that the caller would like to make + * sure the superblock is safe on disk. We can ensure + * this by simply making sure the log gets flushed + * if SYNC_BDFLUSH is set, and by actually writing it + * out otherwise. + * SYNC_IOWAIT - The caller wants us to wait for all data I/O to complete + * before we return (including direct I/O). Forms the drain + * side of the write barrier needed to safely quiesce the + * filesystem. * */ -STATIC int -xfs_syncsub( +int +xfs_sync( xfs_mount_t *mp, int flags) { - int error = 0; + int error; int last_error = 0; uint log_flags = XFS_LOG_FORCE; + /* + * Get the Quota Manager to flush the dquots. + * + * If XFS quota support is not enabled or this filesystem + * instance does not use quotas XFS_QM_DQSYNC will always + * return zero. + */ + error = XFS_QM_DQSYNC(mp, flags); + if (error) { + /* + * If we got an IO error, we will be shutting down. + * So, there's nothing more for us to do here. + */ + ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp)); + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(error); + } + + if (flags & SYNC_IOWAIT) + xfs_filestream_flush(mp); + /* * Sync out the log. This ensures that the log is periodically * flushed even if there is not enough activity to fill it up. -- cgit v1.2.3-70-g09d2 From aacaa880bfac8fecd44b279a49688643890358f5 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:15:29 +1100 Subject: [XFS] xfssyncd: don't call xfs_sync Start de-multiplexing xfs_sync() by making xfs_sync_worker() call the specific sync functions it needs. This is only a small, unique subset of the entire xfs_sync() code so is easier to follow. SGI-PV: 988140 SGI-Modid: xfs-linux-melb:xfs-kern:32307a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_sync.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index d4b7b21a6e5..3c31137cdc7 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -526,6 +526,11 @@ xfs_flush_device( xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC); } +/* + * Every sync period we need to unpin all items, reclaim inodes, sync + * quota and write out the superblock. We might need to cover the log + * to indicate it is idle. + */ STATIC void xfs_sync_worker( struct xfs_mount *mp, @@ -533,8 +538,15 @@ xfs_sync_worker( { int error; - if (!(mp->m_flags & XFS_MOUNT_RDONLY)) - error = xfs_sync(mp, SYNC_FSDATA | SYNC_BDFLUSH | SYNC_ATTR); + if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); + xfs_finish_reclaim_all(mp, 1, XFS_IFLUSH_DELWRI_ELSE_ASYNC); + /* dgc: errors ignored here */ + error = XFS_QM_DQSYNC(mp, SYNC_BDFLUSH); + error = xfs_sync_fsdata(mp, SYNC_BDFLUSH); + if (xfs_log_need_covered(mp)) + error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE); + } mp->m_sync_seq++; wake_up(&mp->m_wait_single_sync_task); } -- cgit v1.2.3-70-g09d2 From be97d9d5577f6c8a36588e2f262c772c5422b128 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:15:38 +1100 Subject: [XFS] make SYNC_ATTR no longer use xfs_sync Continue to de-multiplex xfs_sync be replacing all SYNC_ATTR callers with direct calls xfs_sync_inodes(). Add an assert into xfs_sync() to ensure we caught all the SYNC_ATTR callers. SGI-PV: 988140 SGI-Modid: xfs-linux-melb:xfs-kern:32308a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_super.c | 3 ++- fs/xfs/linux-2.6/xfs_sync.c | 23 +++++++++++------------ fs/xfs/linux-2.6/xfs_sync.h | 1 - fs/xfs/xfs_vfsops.c | 2 +- 4 files changed, 14 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 767f38e0e0b..38d380a7ca2 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -998,7 +998,8 @@ xfs_fs_put_super( int error; xfs_syncd_stop(mp); - xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI); + xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC); + xfs_sync_inodes(mp, SYNC_ATTR|SYNC_DELWRI); #ifdef HAVE_DMAPI if (mp->m_flags & XFS_MOUNT_DMAPI) { diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 3c31137cdc7..002ccb6f0cb 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -342,9 +342,8 @@ xfs_sync_fsdata( * periodically. We also push the inodes and * superblock if we can lock them without sleeping * and they are not pinned. - * SYNC_ATTR - We need to flush the inodes. If SYNC_BDFLUSH is not - * set, then we really want to lock each inode and flush - * it. + * SYNC_ATTR - We need to flush the inodes. Now handled by direct calls + * to xfs_sync_inodes(). * SYNC_WAIT - All the flushes that take place in this call should * be synchronous. * SYNC_DELWRI - This tells us to push dirty pages associated with @@ -373,6 +372,8 @@ xfs_sync( int last_error = 0; uint log_flags = XFS_LOG_FORCE; + ASSERT(!(flags & SYNC_ATTR)); + /* * Get the Quota Manager to flush the dquots. * @@ -403,20 +404,18 @@ xfs_sync( xfs_log_force(mp, (xfs_lsn_t)0, log_flags); - if (flags & (SYNC_ATTR|SYNC_DELWRI)) { + if (flags & SYNC_DELWRI) { if (flags & SYNC_BDFLUSH) xfs_finish_reclaim_all(mp, 1, XFS_IFLUSH_DELWRI_ELSE_ASYNC); else error = xfs_sync_inodes(mp, flags); - } - - /* - * Flushing out dirty data above probably generated more - * log activity, so if this isn't vfs_sync() then flush - * the log again. - */ - if (flags & SYNC_DELWRI) + /* + * Flushing out dirty data above probably generated more + * log activity, so if this isn't vfs_sync() then flush + * the log again. + */ xfs_log_force(mp, 0, log_flags); + } if (flags & SYNC_FSDATA) { error = xfs_sync_fsdata(mp, flags); diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index 29548619940..5316915c083 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -49,7 +49,6 @@ typedef struct bhv_vfs_sync_work { * to disk (this is the main difference between a sync and a quiesce). */ #define SYNC_DATA_QUIESCE (SYNC_DELWRI|SYNC_FSDATA|SYNC_WAIT|SYNC_IOWAIT) -#define SYNC_INODE_QUIESCE (SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT) int xfs_syncd_init(struct xfs_mount *mp); void xfs_syncd_stop(struct xfs_mount *mp); diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index d5396d6f517..c82b9555959 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -76,7 +76,7 @@ xfs_quiesce_fs( */ do { xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC); - xfs_sync_inodes(mp, SYNC_INODE_QUIESCE); + xfs_sync_inodes(mp, SYNC_ATTR|SYNC_WAIT); pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); if (!pincount) { delay(50); -- cgit v1.2.3-70-g09d2 From e9f1c6ee12955fd8657f6f0f9a3d09112b1f1fdd Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:15:50 +1100 Subject: [XFS] make SYNC_DELWRI no longer use xfs_sync Continue to de-multiplex xfs_sync be replacing all SYNC_DELWRI callers with direct calls functions that do the work. Isolate the data quiesce case to a function in xfs_sync.c. Isolate the FSDATA case with explicit calls to xfs_sync_fsdata(). Version 2: o Push delwri related log forces into xfs_sync_inodes(). SGI-PV: 988140 SGI-Modid: xfs-linux-melb:xfs-kern:32309a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_super.c | 25 ++++++------------------- fs/xfs/linux-2.6/xfs_sync.c | 42 +++++++++++++++++++++++++++++++++++++++++- fs/xfs/linux-2.6/xfs_sync.h | 3 +++ fs/xfs/xfs_vfsops.c | 1 - 4 files changed, 50 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 38d380a7ca2..376f32d9247 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -998,7 +998,6 @@ xfs_fs_put_super( int error; xfs_syncd_stop(mp); - xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC); xfs_sync_inodes(mp, SYNC_ATTR|SYNC_DELWRI); #ifdef HAVE_DMAPI @@ -1057,7 +1056,7 @@ xfs_fs_write_super( struct super_block *sb) { if (!(sb->s_flags & MS_RDONLY)) - xfs_sync(XFS_M(sb), SYNC_FSDATA); + xfs_sync_fsdata(XFS_M(sb), 0); sb->s_dirt = 0; } @@ -1068,7 +1067,6 @@ xfs_fs_sync_super( { struct xfs_mount *mp = XFS_M(sb); int error; - int flags; /* * Treat a sync operation like a freeze. This is to work @@ -1082,20 +1080,10 @@ xfs_fs_sync_super( * dirty the Linux inode until after the transaction I/O * completes. */ - if (wait || unlikely(sb->s_frozen == SB_FREEZE_WRITE)) { - /* - * First stage of freeze - no more writers will make progress - * now we are here, so we flush delwri and delalloc buffers - * here, then wait for all I/O to complete. Data is frozen at - * that point. Metadata is not frozen, transactions can still - * occur here so don't bother flushing the buftarg (i.e - * SYNC_QUIESCE) because it'll just get dirty again. - */ - flags = SYNC_DATA_QUIESCE; - } else - flags = SYNC_FSDATA; - - error = xfs_sync(mp, flags); + if (wait || unlikely(sb->s_frozen == SB_FREEZE_WRITE)) + error = xfs_quiesce_data(mp); + else + error = xfs_sync_fsdata(mp, 0); sb->s_dirt = 0; if (unlikely(laptop_mode)) { @@ -1233,8 +1221,7 @@ xfs_fs_remount( /* rw -> ro */ if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { - xfs_filestream_flush(mp); - xfs_sync(mp, SYNC_DATA_QUIESCE); + xfs_quiesce_data(mp); xfs_attr_quiesce(mp); mp->m_flags |= XFS_MOUNT_RDONLY; } diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 002ccb6f0cb..838070ce724 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -217,12 +217,16 @@ xfs_sync_inodes( int error; int last_error; int i; + int lflags = XFS_LOG_FORCE; if (mp->m_flags & XFS_MOUNT_RDONLY) return 0; error = 0; last_error = 0; + if (flags & SYNC_WAIT) + lflags |= XFS_LOG_SYNC; + for (i = 0; i < mp->m_sb.sb_agcount; i++) { if (!mp->m_perag[i].pag_ici_init) continue; @@ -232,6 +236,9 @@ xfs_sync_inodes( if (error == EFSCORRUPTED) break; } + if (flags & SYNC_DELWRI) + xfs_log_force(mp, 0, lflags); + return XFS_ERROR(last_error); } @@ -269,7 +276,7 @@ xfs_commit_dummy_trans( return 0; } -STATIC int +int xfs_sync_fsdata( struct xfs_mount *mp, int flags) @@ -322,6 +329,39 @@ xfs_sync_fsdata( return error; } +/* + * First stage of freeze - no more writers will make progress now we are here, + * so we flush delwri and delalloc buffers here, then wait for all I/O to + * complete. Data is frozen at that point. Metadata is not frozen, + * transactions can still occur here so don't bother flushing the buftarg (i.e + * SYNC_QUIESCE) because it'll just get dirty again. + */ +int +xfs_quiesce_data( + struct xfs_mount *mp) +{ + int error; + + /* push non-blocking */ + xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_BDFLUSH); + XFS_QM_DQSYNC(mp, SYNC_BDFLUSH); + xfs_filestream_flush(mp); + + /* push and block */ + xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_WAIT|SYNC_IOWAIT); + XFS_QM_DQSYNC(mp, SYNC_WAIT); + + /* write superblock and hoover shutdown errors */ + error = xfs_sync_fsdata(mp, 0); + + /* flush devices */ + XFS_bflush(mp->m_ddev_targp); + if (mp->m_rtdev_targp) + XFS_bflush(mp->m_rtdev_targp); + + return error; +} + /* * xfs_sync flushes any pending I/O to file system vfsp. * diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index 5316915c083..fcd4040c9ad 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -55,6 +55,9 @@ void xfs_syncd_stop(struct xfs_mount *mp); int xfs_sync(struct xfs_mount *mp, int flags); int xfs_sync_inodes(struct xfs_mount *mp, int flags); +int xfs_sync_fsdata(struct xfs_mount *mp, int flags); + +int xfs_quiesce_data(struct xfs_mount *mp); void xfs_flush_inode(struct xfs_inode *ip); void xfs_flush_device(struct xfs_inode *ip); diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index c82b9555959..b55a9bb3a6e 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -75,7 +75,6 @@ xfs_quiesce_fs( * logged before we can write the unmount record. */ do { - xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC); xfs_sync_inodes(mp, SYNC_ATTR|SYNC_WAIT); pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); if (!pincount) { -- cgit v1.2.3-70-g09d2 From cb56a4b995d44b7990ca3acd18db571eedd0649f Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:16:00 +1100 Subject: [XFS] Kill SYNC_CLOSE SYNC_CLOSE is only ever used and checked in conjunction with SYNC_WAIT, and this only done in one spot. The only thing this does is make XFS_bflush() calls to the data buftargs. This will happen very shortly afterwards the xfs_sync() call anyway in the unmount path via the xfs_close_devices(), so this code is redundant and can be removed. That only user of SYNC_CLOSE is now gone, so kill the flag completely. SGI-PV: 988140 SGI-Modid: xfs-linux-melb:xfs-kern:32310a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_super.c | 10 ---------- fs/xfs/linux-2.6/xfs_sync.c | 31 ++----------------------------- fs/xfs/linux-2.6/xfs_sync.h | 1 - 3 files changed, 2 insertions(+), 40 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 376f32d9247..60ecf47b9f0 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1026,16 +1026,6 @@ xfs_fs_put_super( error = xfs_unmount_flush(mp, 0); WARN_ON(error); - /* - * If we're forcing a shutdown, typically because of a media error, - * we want to make sure we invalidate dirty pages that belong to - * referenced vnodes as well. - */ - if (XFS_FORCED_SHUTDOWN(mp)) { - error = xfs_sync(mp, SYNC_WAIT | SYNC_CLOSE); - ASSERT(error != EFSCORRUPTED); - } - if (mp->m_flags & XFS_MOUNT_DMAPI) { XFS_SEND_UNMOUNT(mp, rip, DM_RIGHT_NULL, 0, 0, unmount_event_flags); diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 838070ce724..91a54a79a09 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -70,7 +70,7 @@ xfs_sync_inodes_ag( if (flags & SYNC_WAIT) fflag = 0; /* synchronous overrides all */ - if (flags & (SYNC_DELWRI | SYNC_CLOSE)) { + if (flags & SYNC_DELWRI) { /* * We need the I/O lock if we're going to call any of * the flush/inval routines. @@ -117,7 +117,7 @@ xfs_sync_inodes_ag( } /* nothing to sync during shutdown */ - if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) { + if (XFS_FORCED_SHUTDOWN(mp)) { read_unlock(&pag->pag_ici_lock); return 0; } @@ -152,20 +152,6 @@ xfs_sync_inodes_ag( * If we need to drop the lock, insert a marker if we * have not already done so. */ - if (flags & SYNC_CLOSE) { - xfs_iunlock(ip, XFS_ILOCK_SHARED); - if (XFS_FORCED_SHUTDOWN(mp)) - xfs_tosspages(ip, 0, -1, FI_REMAPF); - else - error = xfs_flushinval_pages(ip, 0, -1, - FI_REMAPF); - /* wait for I/O on freeze */ - if (flags & SYNC_IOWAIT) - vn_iowait(ip); - - xfs_ilock(ip, XFS_ILOCK_SHARED); - } - if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) { xfs_iunlock(ip, XFS_ILOCK_SHARED); error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE); @@ -390,8 +376,6 @@ xfs_quiesce_data( * inodes. SYNC_WAIT and SYNC_BDFLUSH are used to * determine if they should be flushed sync, async, or * delwri. - * SYNC_CLOSE - This flag is passed when the system is being - * unmounted. We should sync and invalidate everything. * SYNC_FSDATA - This indicates that the caller would like to make * sure the superblock is safe on disk. We can ensure * this by simply making sure the log gets flushed @@ -472,17 +456,6 @@ xfs_sync( return error; } - /* - * When shutting down, we need to insure that the AIL is pushed - * to disk or the filesystem can appear corrupt from the PROM. - */ - if ((flags & (SYNC_CLOSE|SYNC_WAIT)) == (SYNC_CLOSE|SYNC_WAIT)) { - XFS_bflush(mp->m_ddev_targp); - if (mp->m_rtdev_targp) { - XFS_bflush(mp->m_rtdev_targp); - } - } - return XFS_ERROR(last_error); } diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index fcd4040c9ad..2509db021f7 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -28,7 +28,6 @@ typedef struct bhv_vfs_sync_work { } bhv_vfs_sync_work_t; #define SYNC_ATTR 0x0001 /* sync attributes */ -#define SYNC_CLOSE 0x0002 /* close file system down */ #define SYNC_DELWRI 0x0004 /* look at delayed writes */ #define SYNC_WAIT 0x0008 /* wait for i/o to complete */ #define SYNC_BDFLUSH 0x0010 /* BDFLUSH is calling -- don't block */ -- cgit v1.2.3-70-g09d2 From a4e4c4f4a8f9376158f8181a75285091f52a79e3 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:16:11 +1100 Subject: [XFS] Kill xfs_sync() There are no more callers to xfs_sync() now, so remove the function altogther. SGI-PV: 988140 SGI-Modid: xfs-linux-melb:xfs-kern:32311a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_sync.c | 132 +++++--------------------------------------- fs/xfs/linux-2.6/xfs_sync.h | 25 ++------- fs/xfs/quota/xfs_qm.c | 10 +--- fs/xfs/xfs_iget.c | 15 +++-- 4 files changed, 29 insertions(+), 153 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 91a54a79a09..ed24435af65 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -316,11 +316,21 @@ xfs_sync_fsdata( } /* - * First stage of freeze - no more writers will make progress now we are here, + * When remounting a filesystem read-only or freezing the filesystem, we have + * two phases to execute. This first phase is syncing the data before we + * quiesce the filesystem, and the second is flushing all the inodes out after + * we've waited for all the transactions created by the first phase to + * complete. The second phase ensures that the inodes are written to their + * location on disk rather than just existing in transactions in the log. This + * means after a quiesce there is no log replay required to write the inodes to + * disk (this is the main difference between a sync and a quiesce). + */ +/* + * First stage of freeze - no writers will make progress now we are here, * so we flush delwri and delalloc buffers here, then wait for all I/O to * complete. Data is frozen at that point. Metadata is not frozen, - * transactions can still occur here so don't bother flushing the buftarg (i.e - * SYNC_QUIESCE) because it'll just get dirty again. + * transactions can still occur here so don't bother flushing the buftarg + * because it'll just get dirty again. */ int xfs_quiesce_data( @@ -337,128 +347,16 @@ xfs_quiesce_data( xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_WAIT|SYNC_IOWAIT); XFS_QM_DQSYNC(mp, SYNC_WAIT); - /* write superblock and hoover shutdown errors */ + /* write superblock and hoover up shutdown errors */ error = xfs_sync_fsdata(mp, 0); - /* flush devices */ - XFS_bflush(mp->m_ddev_targp); + /* flush data-only devices */ if (mp->m_rtdev_targp) XFS_bflush(mp->m_rtdev_targp); return error; } -/* - * xfs_sync flushes any pending I/O to file system vfsp. - * - * This routine is called by vfs_sync() to make sure that things make it - * out to disk eventually, on sync() system calls to flush out everything, - * and when the file system is unmounted. For the vfs_sync() case, all - * we really need to do is sync out the log to make all of our meta-data - * updates permanent (except for timestamps). For calls from pflushd(), - * dirty pages are kept moving by calling pdflush() on the inodes - * containing them. We also flush the inodes that we can lock without - * sleeping and the superblock if we can lock it without sleeping from - * vfs_sync() so that items at the tail of the log are always moving out. - * - * Flags: - * SYNC_BDFLUSH - We're being called from vfs_sync() so we don't want - * to sleep if we can help it. All we really need - * to do is ensure that the log is synced at least - * periodically. We also push the inodes and - * superblock if we can lock them without sleeping - * and they are not pinned. - * SYNC_ATTR - We need to flush the inodes. Now handled by direct calls - * to xfs_sync_inodes(). - * SYNC_WAIT - All the flushes that take place in this call should - * be synchronous. - * SYNC_DELWRI - This tells us to push dirty pages associated with - * inodes. SYNC_WAIT and SYNC_BDFLUSH are used to - * determine if they should be flushed sync, async, or - * delwri. - * SYNC_FSDATA - This indicates that the caller would like to make - * sure the superblock is safe on disk. We can ensure - * this by simply making sure the log gets flushed - * if SYNC_BDFLUSH is set, and by actually writing it - * out otherwise. - * SYNC_IOWAIT - The caller wants us to wait for all data I/O to complete - * before we return (including direct I/O). Forms the drain - * side of the write barrier needed to safely quiesce the - * filesystem. - * - */ -int -xfs_sync( - xfs_mount_t *mp, - int flags) -{ - int error; - int last_error = 0; - uint log_flags = XFS_LOG_FORCE; - - ASSERT(!(flags & SYNC_ATTR)); - - /* - * Get the Quota Manager to flush the dquots. - * - * If XFS quota support is not enabled or this filesystem - * instance does not use quotas XFS_QM_DQSYNC will always - * return zero. - */ - error = XFS_QM_DQSYNC(mp, flags); - if (error) { - /* - * If we got an IO error, we will be shutting down. - * So, there's nothing more for us to do here. - */ - ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp)); - if (XFS_FORCED_SHUTDOWN(mp)) - return XFS_ERROR(error); - } - - if (flags & SYNC_IOWAIT) - xfs_filestream_flush(mp); - - /* - * Sync out the log. This ensures that the log is periodically - * flushed even if there is not enough activity to fill it up. - */ - if (flags & SYNC_WAIT) - log_flags |= XFS_LOG_SYNC; - - xfs_log_force(mp, (xfs_lsn_t)0, log_flags); - - if (flags & SYNC_DELWRI) { - if (flags & SYNC_BDFLUSH) - xfs_finish_reclaim_all(mp, 1, XFS_IFLUSH_DELWRI_ELSE_ASYNC); - else - error = xfs_sync_inodes(mp, flags); - /* - * Flushing out dirty data above probably generated more - * log activity, so if this isn't vfs_sync() then flush - * the log again. - */ - xfs_log_force(mp, 0, log_flags); - } - - if (flags & SYNC_FSDATA) { - error = xfs_sync_fsdata(mp, flags); - if (error) - last_error = error; - } - - /* - * Now check to see if the log needs a "dummy" transaction. - */ - if (!(flags & SYNC_REMOUNT) && xfs_log_need_covered(mp)) { - error = xfs_commit_dummy_trans(mp, log_flags); - if (error) - return error; - } - - return XFS_ERROR(last_error); -} - /* * Enqueue a work item to be picked up by the vfs xfssyncd thread. * Doing this has two advantages: diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index 2509db021f7..4591dc0c788 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -28,31 +28,14 @@ typedef struct bhv_vfs_sync_work { } bhv_vfs_sync_work_t; #define SYNC_ATTR 0x0001 /* sync attributes */ -#define SYNC_DELWRI 0x0004 /* look at delayed writes */ -#define SYNC_WAIT 0x0008 /* wait for i/o to complete */ -#define SYNC_BDFLUSH 0x0010 /* BDFLUSH is calling -- don't block */ -#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */ -#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */ -#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */ -#define SYNC_IOWAIT 0x0100 /* wait for all I/O to complete */ - -/* - * When remounting a filesystem read-only or freezing the filesystem, - * we have two phases to execute. This first phase is syncing the data - * before we quiesce the fielsystem, and the second is flushing all the - * inodes out after we've waited for all the transactions created by - * the first phase to complete. The second phase uses SYNC_INODE_QUIESCE - * to ensure that the inodes are written to their location on disk - * rather than just existing in transactions in the log. This means - * after a quiesce there is no log replay required to write the inodes - * to disk (this is the main difference between a sync and a quiesce). - */ -#define SYNC_DATA_QUIESCE (SYNC_DELWRI|SYNC_FSDATA|SYNC_WAIT|SYNC_IOWAIT) +#define SYNC_DELWRI 0x0002 /* look at delayed writes */ +#define SYNC_WAIT 0x0004 /* wait for i/o to complete */ +#define SYNC_BDFLUSH 0x0008 /* BDFLUSH is calling -- don't block */ +#define SYNC_IOWAIT 0x0010 /* wait for all I/O to complete */ int xfs_syncd_init(struct xfs_mount *mp); void xfs_syncd_stop(struct xfs_mount *mp); -int xfs_sync(struct xfs_mount *mp, int flags); int xfs_sync_inodes(struct xfs_mount *mp, int flags); int xfs_sync_fsdata(struct xfs_mount *mp, int flags); diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 270f775974e..db1986a205a 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -987,14 +987,10 @@ xfs_qm_dqdetach( } /* - * This is called by VFS_SYNC and flags arg determines the caller, - * and its motives, as done in xfs_sync. - * - * vfs_sync: SYNC_FSDATA|SYNC_ATTR|SYNC_BDFLUSH 0x31 - * syscall sync: SYNC_FSDATA|SYNC_ATTR|SYNC_DELWRI 0x25 - * umountroot : SYNC_WAIT | SYNC_CLOSE | SYNC_ATTR | SYNC_FSDATA + * This is called to sync quotas. We can be told to use non-blocking + * semantics by either the SYNC_BDFLUSH flag or the absence of the + * SYNC_WAIT flag. */ - int xfs_qm_sync( xfs_mount_t *mp, diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 1256746b249..58865fe4780 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -431,14 +431,13 @@ xfs_ireclaim(xfs_inode_t *ip) xfs_iextract(ip); /* - * Here we do a spurious inode lock in order to coordinate with - * xfs_sync(). This is because xfs_sync() references the inodes - * in the mount list without taking references on the corresponding - * vnodes. We make that OK here by ensuring that we wait until - * the inode is unlocked in xfs_sync() before we go ahead and - * free it. We get both the regular lock and the io lock because - * the xfs_sync() code may need to drop the regular one but will - * still hold the io lock. + * Here we do a spurious inode lock in order to coordinate with inode + * cache radix tree lookups. This is because the lookup can reference + * the inodes in the cache without taking references. We make that OK + * here by ensuring that we wait until the inode is unlocked after the + * lookup before we go ahead and free it. We get both the ilock and + * the iolock because the code may need to drop the ilock one but will + * still hold the iolock. */ xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); -- cgit v1.2.3-70-g09d2 From 76bf105cb16da6c847a13a3c77dc962ba1081713 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:16:21 +1100 Subject: [XFS] Move remaining quiesce code. With all the other filesystem sync code it in xfs_sync.c including the data quiesce code, it makes sense to move the remaining quiesce code to the same place. SGI-PV: 988140 SGI-Modid: xfs-linux-melb:xfs-kern:32312a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_super.c | 6 ++--- fs/xfs/linux-2.6/xfs_sync.c | 55 ++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/linux-2.6/xfs_sync.h | 1 + fs/xfs/xfs_vfsops.c | 55 -------------------------------------------- fs/xfs/xfs_vfsops.h | 1 - 5 files changed, 59 insertions(+), 59 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 60ecf47b9f0..15fb262ef86 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1212,7 +1212,7 @@ xfs_fs_remount( /* rw -> ro */ if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) { xfs_quiesce_data(mp); - xfs_attr_quiesce(mp); + xfs_quiesce_attr(mp); mp->m_flags |= XFS_MOUNT_RDONLY; } @@ -1221,7 +1221,7 @@ xfs_fs_remount( /* * Second stage of a freeze. The data is already frozen so we only - * need to take care of themetadata. Once that's done write a dummy + * need to take care of the metadata. Once that's done write a dummy * record to dirty the log in case of a crash while frozen. */ STATIC void @@ -1230,7 +1230,7 @@ xfs_fs_lockfs( { struct xfs_mount *mp = XFS_M(sb); - xfs_attr_quiesce(mp); + xfs_quiesce_attr(mp); xfs_fs_log_dummy(mp); } diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index ed24435af65..b2b708254ae 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -357,6 +357,61 @@ xfs_quiesce_data( return error; } +STATIC void +xfs_quiesce_fs( + struct xfs_mount *mp) +{ + int count = 0, pincount; + + xfs_flush_buftarg(mp->m_ddev_targp, 0); + xfs_finish_reclaim_all(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); + + /* + * This loop must run at least twice. The first instance of the loop + * will flush most meta data but that will generate more meta data + * (typically directory updates). Which then must be flushed and + * logged before we can write the unmount record. + */ + do { + xfs_sync_inodes(mp, SYNC_ATTR|SYNC_WAIT); + pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); + if (!pincount) { + delay(50); + count++; + } + } while (count < 2); +} + +/* + * Second stage of a quiesce. The data is already synced, now we have to take + * care of the metadata. New transactions are already blocked, so we need to + * wait for any remaining transactions to drain out before proceding. + */ +void +xfs_quiesce_attr( + struct xfs_mount *mp) +{ + int error = 0; + + /* wait for all modifications to complete */ + while (atomic_read(&mp->m_active_trans) > 0) + delay(100); + + /* flush inodes and push all remaining buffers out to disk */ + xfs_quiesce_fs(mp); + + ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0); + + /* Push the superblock and write an unmount record */ + error = xfs_log_sbcount(mp, 1); + if (error) + xfs_fs_cmn_err(CE_WARN, mp, + "xfs_attr_quiesce: failed to log sb changes. " + "Frozen image may not be consistent."); + xfs_log_unmount_write(mp); + xfs_unmountfs_writesb(mp); +} + /* * Enqueue a work item to be picked up by the vfs xfssyncd thread. * Doing this has two advantages: diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index 4591dc0c788..3b49aa3bb5f 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -40,6 +40,7 @@ int xfs_sync_inodes(struct xfs_mount *mp, int flags); int xfs_sync_fsdata(struct xfs_mount *mp, int flags); int xfs_quiesce_data(struct xfs_mount *mp); +void xfs_quiesce_attr(struct xfs_mount *mp); void xfs_flush_inode(struct xfs_inode *ip); void xfs_flush_device(struct xfs_inode *ip); diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index b55a9bb3a6e..883dd0f68e9 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -59,61 +59,6 @@ #include "xfs_sync.h" -STATIC void -xfs_quiesce_fs( - xfs_mount_t *mp) -{ - int count = 0, pincount; - - xfs_flush_buftarg(mp->m_ddev_targp, 0); - xfs_finish_reclaim_all(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); - - /* - * This loop must run at least twice. The first instance of the loop - * will flush most meta data but that will generate more meta data - * (typically directory updates). Which then must be flushed and - * logged before we can write the unmount record. - */ - do { - xfs_sync_inodes(mp, SYNC_ATTR|SYNC_WAIT); - pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); - if (!pincount) { - delay(50); - count++; - } - } while (count < 2); -} - -/* - * Second stage of a quiesce. The data is already synced, now we have to take - * care of the metadata. New transactions are already blocked, so we need to - * wait for any remaining transactions to drain out before proceding. - */ -void -xfs_attr_quiesce( - xfs_mount_t *mp) -{ - int error = 0; - - /* wait for all modifications to complete */ - while (atomic_read(&mp->m_active_trans) > 0) - delay(100); - - /* flush inodes and push all remaining buffers out to disk */ - xfs_quiesce_fs(mp); - - ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0); - - /* Push the superblock and write an unmount record */ - error = xfs_log_sbcount(mp, 1); - if (error) - xfs_fs_cmn_err(CE_WARN, mp, - "xfs_attr_quiesce: failed to log sb changes. " - "Frozen image may not be consistent."); - xfs_log_unmount_write(mp); - xfs_unmountfs_writesb(mp); -} - /* * xfs_unmount_flush implements a set of flush operation on special * inodes, which are needed as a separate set of operations so that diff --git a/fs/xfs/xfs_vfsops.h b/fs/xfs/xfs_vfsops.h index 6701d0ed8ad..6b8e0b52b95 100644 --- a/fs/xfs/xfs_vfsops.h +++ b/fs/xfs/xfs_vfsops.h @@ -10,6 +10,5 @@ struct xfs_mount_args; void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, int lnnum); -void xfs_attr_quiesce(struct xfs_mount *mp); #endif /* _XFS_VFSOPS_H */ -- cgit v1.2.3-70-g09d2 From 3471394ba56f44761ce1c300f139478dbfb49d4a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 17:21:10 +1100 Subject: [XFS] fix instant oops with tracing enabled We can only read inode->i_count if the inode is actually there and not a NULL pointer. This was introduced in one of the recent sync patches. SGI-PV: 988255 SGI-Modid: xfs-linux-melb:xfs-kern:32315a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_vnode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c index dceb6dbaa2d..ac827d23149 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.c +++ b/fs/xfs/linux-2.6/xfs_vnode.c @@ -92,7 +92,7 @@ static inline int xfs_icount(struct xfs_inode *ip) { struct inode *inode = VFS_I(ip); - if (!inode) + if (inode) return atomic_read(&inode->i_count); return -1; } -- cgit v1.2.3-70-g09d2 From 6441e549157b749bae003cce70b4c8b62e4801fa Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:21:19 +1100 Subject: [XFS] factor xfs_iget_core() into hit and miss cases There are really two cases in xfs_iget_core(). The first is the cache hit case, the second is the miss case. They share very little code, and hence can easily be factored out into separate functions. This makes the code much easier to understand and subsequently modify. SGI-PV: 988141 SGI-Modid: xfs-linux-melb:xfs-kern:32317a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/xfs_iget.c | 348 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 191 insertions(+), 157 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 58865fe4780..b2539b17c95 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -40,161 +40,119 @@ #include "xfs_utils.h" /* - * Look up an inode by number in the given file system. - * The inode is looked up in the cache held in each AG. - * If the inode is found in the cache, attach it to the provided - * vnode. - * - * If it is not in core, read it in from the file system's device, - * add it to the cache and attach the provided vnode. - * - * The inode is locked according to the value of the lock_flags parameter. - * This flag parameter indicates how and if the inode's IO lock and inode lock - * should be taken. - * - * mp -- the mount point structure for the current file system. It points - * to the inode hash table. - * tp -- a pointer to the current transaction if there is one. This is - * simply passed through to the xfs_iread() call. - * ino -- the number of the inode desired. This is the unique identifier - * within the file system for the inode being requested. - * lock_flags -- flags indicating how to lock the inode. See the comment - * for xfs_ilock() for a list of valid values. - * bno -- the block number starting the buffer containing the inode, - * if known (as by bulkstat), else 0. + * Check the validity of the inode we just found it the cache */ -STATIC int -xfs_iget_core( - struct inode *inode, - xfs_mount_t *mp, - xfs_trans_t *tp, - xfs_ino_t ino, - uint flags, - uint lock_flags, - xfs_inode_t **ipp, - xfs_daddr_t bno) +static int +xfs_iget_cache_hit( + struct inode *inode, + struct xfs_perag *pag, + struct xfs_inode *ip, + int flags, + int lock_flags) __releases(pag->pag_ici_lock) { - struct inode *old_inode; - xfs_inode_t *ip; - int error; - unsigned long first_index, mask; - xfs_perag_t *pag; - xfs_agino_t agino; + struct xfs_mount *mp = ip->i_mount; + struct inode *old_inode; + int error = 0; - /* the radix tree exists only in inode capable AGs */ - if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi) - return EINVAL; - - /* get the perag structure and ensure that it's inode capable */ - pag = xfs_get_perag(mp, ino); - if (!pag->pagi_inodeok) - return EINVAL; - ASSERT(pag->pag_ici_init); - agino = XFS_INO_TO_AGINO(mp, ino); - -again: - read_lock(&pag->pag_ici_lock); - ip = radix_tree_lookup(&pag->pag_ici_root, agino); + /* + * If INEW is set this inode is being set up + * Pause and try again. + */ + if (xfs_iflags_test(ip, XFS_INEW)) { + error = EAGAIN; + XFS_STATS_INC(xs_ig_frecycle); + goto out_error; + } - if (ip != NULL) { + old_inode = ip->i_vnode; + if (old_inode == NULL) { /* - * If INEW is set this inode is being set up + * If IRECLAIM is set this inode is + * on its way out of the system, * we need to pause and try again. */ - if (xfs_iflags_test(ip, XFS_INEW)) { - read_unlock(&pag->pag_ici_lock); - delay(1); + if (xfs_iflags_test(ip, XFS_IRECLAIM)) { + error = EAGAIN; XFS_STATS_INC(xs_ig_frecycle); + goto out_error; + } + ASSERT(xfs_iflags_test(ip, XFS_IRECLAIMABLE)); - goto again; + /* + * If lookup is racing with unlink, then we + * should return an error immediately so we + * don't remove it from the reclaim list and + * potentially leak the inode. + */ + if ((ip->i_d.di_mode == 0) && + !(flags & XFS_IGET_CREATE)) { + error = ENOENT; + goto out_error; } + xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); - old_inode = ip->i_vnode; - if (old_inode == NULL) { - /* - * If IRECLAIM is set this inode is - * on its way out of the system, - * we need to pause and try again. - */ - if (xfs_iflags_test(ip, XFS_IRECLAIM)) { - read_unlock(&pag->pag_ici_lock); - delay(1); - XFS_STATS_INC(xs_ig_frecycle); - - goto again; - } - ASSERT(xfs_iflags_test(ip, XFS_IRECLAIMABLE)); - - /* - * If lookup is racing with unlink, then we - * should return an error immediately so we - * don't remove it from the reclaim list and - * potentially leak the inode. - */ - if ((ip->i_d.di_mode == 0) && - !(flags & XFS_IGET_CREATE)) { - read_unlock(&pag->pag_ici_lock); - xfs_put_perag(mp, pag); - return ENOENT; - } - - xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); - - XFS_STATS_INC(xs_ig_found); - xfs_iflags_clear(ip, XFS_IRECLAIMABLE); - read_unlock(&pag->pag_ici_lock); - - XFS_MOUNT_ILOCK(mp); - list_del_init(&ip->i_reclaim); - XFS_MOUNT_IUNLOCK(mp); - - goto finish_inode; - - } else if (inode != old_inode) { - /* The inode is being torn down, pause and - * try again. - */ - if (old_inode->i_state & (I_FREEING | I_CLEAR)) { - read_unlock(&pag->pag_ici_lock); - delay(1); - XFS_STATS_INC(xs_ig_frecycle); - - goto again; - } + xfs_iflags_clear(ip, XFS_IRECLAIMABLE); + read_unlock(&pag->pag_ici_lock); + + XFS_MOUNT_ILOCK(mp); + list_del_init(&ip->i_reclaim); + XFS_MOUNT_IUNLOCK(mp); + + } else if (inode != old_inode) { + /* The inode is being torn down, pause and + * try again. + */ + if (old_inode->i_state & (I_FREEING | I_CLEAR)) { + error = EAGAIN; + XFS_STATS_INC(xs_ig_frecycle); + goto out_error; + } /* Chances are the other vnode (the one in the inode) is being torn * down right now, and we landed on top of it. Question is, what do * we do? Unhook the old inode and hook up the new one? */ - cmn_err(CE_PANIC, - "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p", - old_inode, inode); - } - - /* - * Inode cache hit - */ + cmn_err(CE_PANIC, + "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p", + old_inode, inode); + } else { read_unlock(&pag->pag_ici_lock); - XFS_STATS_INC(xs_ig_found); + } -finish_inode: - if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { - xfs_put_perag(mp, pag); - return ENOENT; - } + if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { + error = ENOENT; + goto out; + } - if (lock_flags != 0) - xfs_ilock(ip, lock_flags); + if (lock_flags != 0) + xfs_ilock(ip, lock_flags); - xfs_iflags_clear(ip, XFS_ISTALE); - xfs_itrace_exit_tag(ip, "xfs_iget.found"); - goto return_ip; - } + xfs_iflags_clear(ip, XFS_ISTALE); + xfs_itrace_exit_tag(ip, "xfs_iget.found"); + XFS_STATS_INC(xs_ig_found); + return 0; - /* - * Inode cache miss - */ +out_error: read_unlock(&pag->pag_ici_lock); - XFS_STATS_INC(xs_ig_missed); +out: + return error; +} + + +static int +xfs_iget_cache_miss( + struct xfs_mount *mp, + struct xfs_perag *pag, + xfs_trans_t *tp, + xfs_ino_t ino, + struct xfs_inode **ipp, + xfs_daddr_t bno, + int flags, + int lock_flags) __releases(pag->pag_ici_lock) +{ + struct xfs_inode *ip; + int error; + unsigned long first_index, mask; + xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); /* * Read the disk inode attributes into a new inode structure and get @@ -202,17 +160,14 @@ finish_inode: */ error = xfs_iread(mp, tp, ino, &ip, bno, (flags & XFS_IGET_BULKSTAT) ? XFS_IMAP_BULKSTAT : 0); - if (error) { - xfs_put_perag(mp, pag); + if (error) return error; - } xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { - xfs_idestroy(ip); - xfs_put_perag(mp, pag); - return ENOENT; + error = ENOENT; + goto out_destroy; } /* @@ -220,9 +175,8 @@ finish_inode: * write spinlock. */ if (radix_tree_preload(GFP_KERNEL)) { - xfs_idestroy(ip); - delay(1); - goto again; + error = EAGAIN; + goto out_destroy; } if (lock_flags) @@ -231,32 +185,104 @@ finish_inode: mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); first_index = agino & mask; write_lock(&pag->pag_ici_lock); - /* - * insert the new inode - */ + + /* insert the new inode */ error = radix_tree_insert(&pag->pag_ici_root, agino, ip); if (unlikely(error)) { - BUG_ON(error != -EEXIST); - write_unlock(&pag->pag_ici_lock); - radix_tree_preload_end(); - if (lock_flags) - xfs_iunlock(ip, lock_flags); - xfs_idestroy(ip); + WARN_ON(error != -EEXIST); XFS_STATS_INC(xs_ig_dup); - goto again; + error = EAGAIN; + goto out_unlock; } - /* - * These values _must_ be set before releasing the radix tree lock! - */ + /* These values _must_ be set before releasing the radix tree lock! */ ip->i_udquot = ip->i_gdquot = NULL; xfs_iflags_set(ip, XFS_INEW); write_unlock(&pag->pag_ici_lock); radix_tree_preload_end(); + *ipp = ip; + return 0; + +out_unlock: + write_unlock(&pag->pag_ici_lock); + radix_tree_preload_end(); +out_destroy: + xfs_idestroy(ip); + return error; +} + +/* + * Look up an inode by number in the given file system. + * The inode is looked up in the cache held in each AG. + * If the inode is found in the cache, attach it to the provided + * vnode. + * + * If it is not in core, read it in from the file system's device, + * add it to the cache and attach the provided vnode. + * + * The inode is locked according to the value of the lock_flags parameter. + * This flag parameter indicates how and if the inode's IO lock and inode lock + * should be taken. + * + * mp -- the mount point structure for the current file system. It points + * to the inode hash table. + * tp -- a pointer to the current transaction if there is one. This is + * simply passed through to the xfs_iread() call. + * ino -- the number of the inode desired. This is the unique identifier + * within the file system for the inode being requested. + * lock_flags -- flags indicating how to lock the inode. See the comment + * for xfs_ilock() for a list of valid values. + * bno -- the block number starting the buffer containing the inode, + * if known (as by bulkstat), else 0. + */ +STATIC int +xfs_iget_core( + struct inode *inode, + xfs_mount_t *mp, + xfs_trans_t *tp, + xfs_ino_t ino, + uint flags, + uint lock_flags, + xfs_inode_t **ipp, + xfs_daddr_t bno) +{ + xfs_inode_t *ip; + int error; + xfs_perag_t *pag; + xfs_agino_t agino; + + /* the radix tree exists only in inode capable AGs */ + if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi) + return EINVAL; + + /* get the perag structure and ensure that it's inode capable */ + pag = xfs_get_perag(mp, ino); + if (!pag->pagi_inodeok) + return EINVAL; + ASSERT(pag->pag_ici_init); + agino = XFS_INO_TO_AGINO(mp, ino); + +again: + error = 0; + read_lock(&pag->pag_ici_lock); + ip = radix_tree_lookup(&pag->pag_ici_root, agino); + + if (ip) { + error = xfs_iget_cache_hit(inode, pag, ip, flags, lock_flags); + if (error) + goto out_error_or_again; + } else { + read_unlock(&pag->pag_ici_lock); + XFS_STATS_INC(xs_ig_missed); + + error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno, + flags, lock_flags); + if (error) + goto out_error_or_again; + } xfs_put_perag(mp, pag); - return_ip: ASSERT(ip->i_df.if_ext_max == XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t)); @@ -276,6 +302,14 @@ finish_inode: if (ip->i_d.di_mode != 0) xfs_setup_inode(ip); return 0; + +out_error_or_again: + if (error == EAGAIN) { + delay(1); + goto again; + } + xfs_put_perag(mp, pag); + return error; } -- cgit v1.2.3-70-g09d2 From 94b97e39b0c983e86f0028c456dcf213abc722a0 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:21:30 +1100 Subject: [XFS] Never call mark_inode_dirty_sync() directly Once the Linux inode and the XFS inode are combined, we cannot rely on just check if the linux inode exists as a method of determining if it is valid or not. Hence we should always call xfs_mark_inode_dirty_sync() instead as it does the correct checks to determine if the liinux inode is in a valid state or not. SGI-PV: 988141 SGI-Modid: xfs-linux-melb:xfs-kern:32318a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_aops.c | 2 +- fs/xfs/linux-2.6/xfs_iops.c | 2 +- fs/xfs/linux-2.6/xfs_super.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index a44d68eb50b..8fbc97df360 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -191,7 +191,7 @@ xfs_setfilesize( ip->i_d.di_size = isize; ip->i_update_core = 1; ip->i_update_size = 1; - mark_inode_dirty_sync(ioend->io_inode); + xfs_mark_inode_dirty_sync(ip); } xfs_iunlock(ip, XFS_ILOCK_EXCL); diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 095d271f343..3bfb3c0f8e2 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -128,7 +128,7 @@ xfs_ichgtime( if (sync_it) { SYNCHRONIZE(); ip->i_update_core = 1; - mark_inode_dirty_sync(inode); + xfs_mark_inode_dirty_sync(ip); } } diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 15fb262ef86..b87e45577a5 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -948,7 +948,7 @@ xfs_fs_write_inode( * it dirty again so we'll try again later. */ if (error) - mark_inode_dirty_sync(inode); + xfs_mark_inode_dirty_sync(XFS_I(inode)); return -error; } -- cgit v1.2.3-70-g09d2 From 2cb1599f9b2ecdd7a9e59feeee647eb258966839 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:32:23 +1100 Subject: Inode: Allow external initialisers To allow XFS to combine the XFS and linux inodes into a single structure, we need to drive inode lookup from the XFS inode cache, not the generic inode cache. This means that we need initialise a struct inode from a context outside alloc_inode() as it is no longer used by XFS. Factor and export the struct inode initialisation code from alloc_inode() to inode_init_always() as a counterpart to inode_init_once(). i.e. we have to call this init function for each inode instantiation (always), as opposed inode_init_once() which is only called on slab object instantiation (once). Signed-off-by: Dave Chinner Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/inode.c | 140 +++++++++++++++++++++++++++++------------------------ include/linux/fs.h | 1 + 2 files changed, 79 insertions(+), 62 deletions(-) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index 0487ddba139..e7ee99907d6 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -108,84 +108,100 @@ static void wake_up_inode(struct inode *inode) wake_up_bit(&inode->i_state, __I_LOCK); } -static struct inode *alloc_inode(struct super_block *sb) +/** + * inode_init_always - perform inode structure intialisation + * @sb - superblock inode belongs to. + * @inode - inode to initialise + * + * These are initializations that need to be done on every inode + * allocation as the fields are not initialised by slab allocation. + */ +struct inode *inode_init_always(struct super_block *sb, struct inode *inode) { static const struct address_space_operations empty_aops; static struct inode_operations empty_iops; static const struct file_operations empty_fops; - struct inode *inode; - - if (sb->s_op->alloc_inode) - inode = sb->s_op->alloc_inode(sb); - else - inode = (struct inode *) kmem_cache_alloc(inode_cachep, GFP_KERNEL); - if (inode) { - struct address_space * const mapping = &inode->i_data; - - inode->i_sb = sb; - inode->i_blkbits = sb->s_blocksize_bits; - inode->i_flags = 0; - atomic_set(&inode->i_count, 1); - inode->i_op = &empty_iops; - inode->i_fop = &empty_fops; - inode->i_nlink = 1; - atomic_set(&inode->i_writecount, 0); - inode->i_size = 0; - inode->i_blocks = 0; - inode->i_bytes = 0; - inode->i_generation = 0; + struct address_space * const mapping = &inode->i_data; + + inode->i_sb = sb; + inode->i_blkbits = sb->s_blocksize_bits; + inode->i_flags = 0; + atomic_set(&inode->i_count, 1); + inode->i_op = &empty_iops; + inode->i_fop = &empty_fops; + inode->i_nlink = 1; + atomic_set(&inode->i_writecount, 0); + inode->i_size = 0; + inode->i_blocks = 0; + inode->i_bytes = 0; + inode->i_generation = 0; #ifdef CONFIG_QUOTA - memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); + memset(&inode->i_dquot, 0, sizeof(inode->i_dquot)); #endif - inode->i_pipe = NULL; - inode->i_bdev = NULL; - inode->i_cdev = NULL; - inode->i_rdev = 0; - inode->dirtied_when = 0; - if (security_inode_alloc(inode)) { - if (inode->i_sb->s_op->destroy_inode) - inode->i_sb->s_op->destroy_inode(inode); - else - kmem_cache_free(inode_cachep, (inode)); - return NULL; - } + inode->i_pipe = NULL; + inode->i_bdev = NULL; + inode->i_cdev = NULL; + inode->i_rdev = 0; + inode->dirtied_when = 0; + if (security_inode_alloc(inode)) { + if (inode->i_sb->s_op->destroy_inode) + inode->i_sb->s_op->destroy_inode(inode); + else + kmem_cache_free(inode_cachep, (inode)); + return NULL; + } - spin_lock_init(&inode->i_lock); - lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); + spin_lock_init(&inode->i_lock); + lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); - mutex_init(&inode->i_mutex); - lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); + mutex_init(&inode->i_mutex); + lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); - init_rwsem(&inode->i_alloc_sem); - lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); + init_rwsem(&inode->i_alloc_sem); + lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); - mapping->a_ops = &empty_aops; - mapping->host = inode; - mapping->flags = 0; - mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE); - mapping->assoc_mapping = NULL; - mapping->backing_dev_info = &default_backing_dev_info; - mapping->writeback_index = 0; + mapping->a_ops = &empty_aops; + mapping->host = inode; + mapping->flags = 0; + mapping_set_gfp_mask(mapping, GFP_HIGHUSER_PAGECACHE); + mapping->assoc_mapping = NULL; + mapping->backing_dev_info = &default_backing_dev_info; + mapping->writeback_index = 0; - /* - * If the block_device provides a backing_dev_info for client - * inodes then use that. Otherwise the inode share the bdev's - * backing_dev_info. - */ - if (sb->s_bdev) { - struct backing_dev_info *bdi; + /* + * If the block_device provides a backing_dev_info for client + * inodes then use that. Otherwise the inode share the bdev's + * backing_dev_info. + */ + if (sb->s_bdev) { + struct backing_dev_info *bdi; - bdi = sb->s_bdev->bd_inode_backing_dev_info; - if (!bdi) - bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; - mapping->backing_dev_info = bdi; - } - inode->i_private = NULL; - inode->i_mapping = mapping; + bdi = sb->s_bdev->bd_inode_backing_dev_info; + if (!bdi) + bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; + mapping->backing_dev_info = bdi; } + inode->i_private = NULL; + inode->i_mapping = mapping; + return inode; } +EXPORT_SYMBOL(inode_init_always); + +static struct inode *alloc_inode(struct super_block *sb) +{ + struct inode *inode; + + if (sb->s_op->alloc_inode) + inode = sb->s_op->alloc_inode(sb); + else + inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL); + + if (inode) + return inode_init_always(sb, inode); + return NULL; +} void destroy_inode(struct inode *inode) { diff --git a/include/linux/fs.h b/include/linux/fs.h index 5b248d61430..04abead4b02 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1881,6 +1881,7 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin); extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); +extern struct inode * inode_init_always(struct super_block *, struct inode *); extern void inode_init_once(struct inode *); extern void iput(struct inode *); extern struct inode * igrab(struct inode *); -- cgit v1.2.3-70-g09d2 From 8290c35f87304a6b73d4fd17b03580b4f7425de8 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:35:24 +1100 Subject: Inode: Allow external list initialisation To allow XFS to combine the XFS and linux inodes into a single structure, we need to drive inode lookup from the XFS inode cache, not the generic inode cache. This means that we need initialise a struct inode from a context outside alloc_inode() as it is no longer used by XFS. After inode allocation and initialisation, we need to add the inode to the superblock list, the in-use list, hash it and do some accounting. This all needs to be done with the inode_lock held and there are already several places in fs/inode.c that do this list manipulation. Factor out the common code, add a locking wrapper and export the function so ti can be called from XFS. Signed-off-by: Dave Chinner Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/inode.c | 67 +++++++++++++++++++++++++++++++++++++----------------- include/linux/fs.h | 1 + 2 files changed, 47 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index e7ee99907d6..fbcf6c5e760 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -550,6 +550,49 @@ repeat: return node ? inode : NULL; } +static unsigned long hash(struct super_block *sb, unsigned long hashval) +{ + unsigned long tmp; + + tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / + L1_CACHE_BYTES; + tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS); + return tmp & I_HASHMASK; +} + +static inline void +__inode_add_to_lists(struct super_block *sb, struct hlist_head *head, + struct inode *inode) +{ + inodes_stat.nr_inodes++; + list_add(&inode->i_list, &inode_in_use); + list_add(&inode->i_sb_list, &sb->s_inodes); + if (head) + hlist_add_head(&inode->i_hash, head); +} + +/** + * inode_add_to_lists - add a new inode to relevant lists + * @sb - superblock inode belongs to. + * @inode - inode to mark in use + * + * When an inode is allocated it needs to be accounted for, added to the in use + * list, the owning superblock and the inode hash. This needs to be done under + * the inode_lock, so export a function to do this rather than the inode lock + * itself. We calculate the hash list to add to here so it is all internal + * which requires the caller to have already set up the inode number in the + * inode to add. + */ +void inode_add_to_lists(struct super_block *sb, struct inode *inode) +{ + struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino); + + spin_lock(&inode_lock); + __inode_add_to_lists(sb, head, inode); + spin_unlock(&inode_lock); +} +EXPORT_SYMBOL_GPL(inode_add_to_lists); + /** * new_inode - obtain an inode * @sb: superblock @@ -577,9 +620,7 @@ struct inode *new_inode(struct super_block *sb) inode = alloc_inode(sb); if (inode) { spin_lock(&inode_lock); - inodes_stat.nr_inodes++; - list_add(&inode->i_list, &inode_in_use); - list_add(&inode->i_sb_list, &sb->s_inodes); + __inode_add_to_lists(sb, NULL, inode); inode->i_ino = ++last_ino; inode->i_state = 0; spin_unlock(&inode_lock); @@ -638,10 +679,7 @@ static struct inode * get_new_inode(struct super_block *sb, struct hlist_head *h if (set(inode, data)) goto set_failed; - inodes_stat.nr_inodes++; - list_add(&inode->i_list, &inode_in_use); - list_add(&inode->i_sb_list, &sb->s_inodes); - hlist_add_head(&inode->i_hash, head); + __inode_add_to_lists(sb, head, inode); inode->i_state = I_LOCK|I_NEW; spin_unlock(&inode_lock); @@ -687,10 +725,7 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he old = find_inode_fast(sb, head, ino); if (!old) { inode->i_ino = ino; - inodes_stat.nr_inodes++; - list_add(&inode->i_list, &inode_in_use); - list_add(&inode->i_sb_list, &sb->s_inodes); - hlist_add_head(&inode->i_hash, head); + __inode_add_to_lists(sb, head, inode); inode->i_state = I_LOCK|I_NEW; spin_unlock(&inode_lock); @@ -714,16 +749,6 @@ static struct inode * get_new_inode_fast(struct super_block *sb, struct hlist_he return inode; } -static unsigned long hash(struct super_block *sb, unsigned long hashval) -{ - unsigned long tmp; - - tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / - L1_CACHE_BYTES; - tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS); - return tmp & I_HASHMASK; -} - /** * iunique - get a unique inode number * @sb: superblock diff --git a/include/linux/fs.h b/include/linux/fs.h index 04abead4b02..1deedf235d5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1883,6 +1883,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); extern struct inode * inode_init_always(struct super_block *, struct inode *); extern void inode_init_once(struct inode *); +extern void inode_add_to_lists(struct super_block *, struct inode *); extern void iput(struct inode *); extern struct inode * igrab(struct inode *); extern ino_t iunique(struct super_block *, ino_t); -- cgit v1.2.3-70-g09d2 From bf904248a2adb3f3be4eb4fb1837ce3bb28cca76 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:36:14 +1100 Subject: [XFS] Combine the XFS and Linux inodes To avoid issues with different lifecycles of XFS and Linux inodes, embedd the linux inode inside the XFS inode. This means that the linux inode has the same lifecycle as the XFS inode, even when it has been released by the OS. XFS inodes don't live much longer than this (a short stint in reclaim at most), so there isn't significant memory usage penalties here. Version 3 o kill xfs_icount() Version 2 o remove unused commented out code from xfs_iget(). o kill useless cast in VFS_I() SGI-PV: 988141 SGI-Modid: xfs-linux-melb:xfs-kern:32323a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_iops.c | 17 +++-- fs/xfs/linux-2.6/xfs_super.c | 47 +++++------- fs/xfs/linux-2.6/xfs_vnode.c | 15 +--- fs/xfs/xfs_iget.c | 167 +++++++++---------------------------------- fs/xfs/xfs_inode.c | 43 ++++++++--- fs/xfs/xfs_inode.h | 9 ++- fs/xfs/xfs_vnodeops.c | 13 +--- 7 files changed, 108 insertions(+), 203 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 3bfb3c0f8e2..37bb1012aff 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -64,14 +64,14 @@ xfs_synchronize_atime( { struct inode *inode = VFS_I(ip); - if (inode) { + if (!(inode->i_state & I_CLEAR)) { ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec; ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec; } } /* - * If the linux inode exists, mark it dirty. + * If the linux inode is valid, mark it dirty. * Used when commiting a dirty inode into a transaction so that * the inode will get written back by the linux code */ @@ -81,7 +81,7 @@ xfs_mark_inode_dirty_sync( { struct inode *inode = VFS_I(ip); - if (inode) + if (!(inode->i_state & (I_WILL_FREE|I_FREEING|I_CLEAR))) mark_inode_dirty_sync(inode); } @@ -766,12 +766,21 @@ xfs_diflags_to_iflags( * When reading existing inodes from disk this is called directly * from xfs_iget, when creating a new inode it is called from * xfs_ialloc after setting up the inode. + * + * We are always called with an uninitialised linux inode here. + * We need to initialise the necessary fields and take a reference + * on it. */ void xfs_setup_inode( struct xfs_inode *ip) { - struct inode *inode = ip->i_vnode; + struct inode *inode = &ip->i_vnode; + + inode->i_ino = ip->i_ino; + inode->i_state = I_NEW|I_LOCK; + inode_add_to_lists(ip->i_mount->m_super, inode); + ASSERT(atomic_read(&inode->i_count) == 1); inode->i_mode = ip->i_d.di_mode; inode->i_nlink = ip->i_d.di_nlink; diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index b87e45577a5..c6ef684bf2e 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -72,7 +72,6 @@ static struct quotactl_ops xfs_quotactl_operations; static struct super_operations xfs_super_operations; -static kmem_zone_t *xfs_vnode_zone; static kmem_zone_t *xfs_ioend_zone; mempool_t *xfs_ioend_pool; @@ -867,29 +866,24 @@ xfsaild_stop( } - +/* Catch misguided souls that try to use this interface on XFS */ STATIC struct inode * xfs_fs_alloc_inode( struct super_block *sb) { - return kmem_zone_alloc(xfs_vnode_zone, KM_SLEEP); + BUG(); } +/* + * we need to provide an empty inode free function to prevent + * the generic code from trying to free our combined inode. + */ STATIC void xfs_fs_destroy_inode( - struct inode *inode) -{ - kmem_zone_free(xfs_vnode_zone, inode); -} - -STATIC void -xfs_fs_inode_init_once( - void *vnode) + struct inode *inode) { - inode_init_once((struct inode *)vnode); } - /* * Slab object creation initialisation for the XFS inode. * This covers only the idempotent fields in the XFS inode; @@ -898,13 +892,18 @@ xfs_fs_inode_init_once( * fields in the xfs inode that left in the initialise state * when freeing the inode. */ -void -xfs_inode_init_once( +STATIC void +xfs_fs_inode_init_once( void *inode) { struct xfs_inode *ip = inode; memset(ip, 0, sizeof(struct xfs_inode)); + + /* vfs inode */ + inode_init_once(VFS_I(ip)); + + /* xfs inode */ atomic_set(&ip->i_iocount, 0); atomic_set(&ip->i_pincount, 0); spin_lock_init(&ip->i_flags_lock); @@ -975,8 +974,6 @@ xfs_fs_clear_inode( if (xfs_reclaim(ip)) panic("%s: cannot reclaim 0x%p\n", __func__, inode); } - - ASSERT(XFS_I(inode) == NULL); } STATIC void @@ -1829,16 +1826,10 @@ xfs_free_trace_bufs(void) STATIC int __init xfs_init_zones(void) { - xfs_vnode_zone = kmem_zone_init_flags(sizeof(struct inode), "xfs_vnode", - KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | - KM_ZONE_SPREAD, - xfs_fs_inode_init_once); - if (!xfs_vnode_zone) - goto out; xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend"); if (!xfs_ioend_zone) - goto out_destroy_vnode_zone; + goto out; xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE, xfs_ioend_zone); @@ -1854,6 +1845,7 @@ xfs_init_zones(void) "xfs_bmap_free_item"); if (!xfs_bmap_free_item_zone) goto out_destroy_log_ticket_zone; + xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t), "xfs_btree_cur"); if (!xfs_btree_cur_zone) @@ -1901,8 +1893,8 @@ xfs_init_zones(void) xfs_inode_zone = kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode", - KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | - KM_ZONE_SPREAD, xfs_inode_init_once); + KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD, + xfs_fs_inode_init_once); if (!xfs_inode_zone) goto out_destroy_efi_zone; @@ -1950,8 +1942,6 @@ xfs_init_zones(void) mempool_destroy(xfs_ioend_pool); out_destroy_ioend_zone: kmem_zone_destroy(xfs_ioend_zone); - out_destroy_vnode_zone: - kmem_zone_destroy(xfs_vnode_zone); out: return -ENOMEM; } @@ -1976,7 +1966,6 @@ xfs_destroy_zones(void) kmem_zone_destroy(xfs_log_ticket_zone); mempool_destroy(xfs_ioend_pool); kmem_zone_destroy(xfs_ioend_zone); - kmem_zone_destroy(xfs_vnode_zone); } diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c index ac827d23149..ad18262d651 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.c +++ b/fs/xfs/linux-2.6/xfs_vnode.c @@ -84,25 +84,12 @@ vn_ioerror( #ifdef XFS_INODE_TRACE -/* - * Reference count of Linux inode if present, -1 if the xfs_inode - * has no associated Linux inode. - */ -static inline int xfs_icount(struct xfs_inode *ip) -{ - struct inode *inode = VFS_I(ip); - - if (inode) - return atomic_read(&inode->i_count); - return -1; -} - #define KTRACE_ENTER(ip, vk, s, line, ra) \ ktrace_enter( (ip)->i_trace, \ /* 0 */ (void *)(__psint_t)(vk), \ /* 1 */ (void *)(s), \ /* 2 */ (void *)(__psint_t) line, \ -/* 3 */ (void *)(__psint_t)xfs_icount(ip), \ +/* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \ /* 4 */ (void *)(ra), \ /* 5 */ NULL, \ /* 6 */ (void *)(__psint_t)current_cpu(), \ diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index b2539b17c95..c4414e8bce8 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -44,77 +44,65 @@ */ static int xfs_iget_cache_hit( - struct inode *inode, struct xfs_perag *pag, struct xfs_inode *ip, int flags, int lock_flags) __releases(pag->pag_ici_lock) { struct xfs_mount *mp = ip->i_mount; - struct inode *old_inode; int error = 0; /* * If INEW is set this inode is being set up + * If IRECLAIM is set this inode is being torn down * Pause and try again. */ - if (xfs_iflags_test(ip, XFS_INEW)) { + if (xfs_iflags_test(ip, (XFS_INEW|XFS_IRECLAIM))) { error = EAGAIN; XFS_STATS_INC(xs_ig_frecycle); goto out_error; } - old_inode = ip->i_vnode; - if (old_inode == NULL) { + /* If IRECLAIMABLE is set, we've torn down the vfs inode part */ + if (xfs_iflags_test(ip, XFS_IRECLAIMABLE)) { + /* - * If IRECLAIM is set this inode is - * on its way out of the system, - * we need to pause and try again. + * If lookup is racing with unlink, then we should return an + * error immediately so we don't remove it from the reclaim + * list and potentially leak the inode. */ - if (xfs_iflags_test(ip, XFS_IRECLAIM)) { - error = EAGAIN; - XFS_STATS_INC(xs_ig_frecycle); + + if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { + error = ENOENT; goto out_error; } - ASSERT(xfs_iflags_test(ip, XFS_IRECLAIMABLE)); + + xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); /* - * If lookup is racing with unlink, then we - * should return an error immediately so we - * don't remove it from the reclaim list and - * potentially leak the inode. + * We need to re-initialise the VFS inode as it has been + * 'freed' by the VFS. Do this here so we can deal with + * errors cleanly, then tag it so it can be set up correctly + * later. */ - if ((ip->i_d.di_mode == 0) && - !(flags & XFS_IGET_CREATE)) { - error = ENOENT; + if (!inode_init_always(mp->m_super, VFS_I(ip))) { + error = ENOMEM; goto out_error; } - xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); - + xfs_iflags_set(ip, XFS_INEW); xfs_iflags_clear(ip, XFS_IRECLAIMABLE); read_unlock(&pag->pag_ici_lock); XFS_MOUNT_ILOCK(mp); list_del_init(&ip->i_reclaim); XFS_MOUNT_IUNLOCK(mp); - - } else if (inode != old_inode) { - /* The inode is being torn down, pause and - * try again. - */ - if (old_inode->i_state & (I_FREEING | I_CLEAR)) { - error = EAGAIN; - XFS_STATS_INC(xs_ig_frecycle); - goto out_error; - } -/* Chances are the other vnode (the one in the inode) is being torn -* down right now, and we landed on top of it. Question is, what do -* we do? Unhook the old inode and hook up the new one? -*/ - cmn_err(CE_PANIC, - "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p", - old_inode, inode); + } else if (!igrab(VFS_I(ip))) { + /* If the VFS inode is being torn down, pause and try again. */ + error = EAGAIN; + XFS_STATS_INC(xs_ig_frecycle); + goto out_error; } else { + /* we've got a live one */ read_unlock(&pag->pag_ici_lock); } @@ -215,11 +203,11 @@ out_destroy: /* * Look up an inode by number in the given file system. * The inode is looked up in the cache held in each AG. - * If the inode is found in the cache, attach it to the provided - * vnode. + * If the inode is found in the cache, initialise the vfs inode + * if necessary. * * If it is not in core, read it in from the file system's device, - * add it to the cache and attach the provided vnode. + * add it to the cache and initialise the vfs inode. * * The inode is locked according to the value of the lock_flags parameter. * This flag parameter indicates how and if the inode's IO lock and inode lock @@ -236,9 +224,8 @@ out_destroy: * bno -- the block number starting the buffer containing the inode, * if known (as by bulkstat), else 0. */ -STATIC int -xfs_iget_core( - struct inode *inode, +int +xfs_iget( xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, @@ -269,7 +256,7 @@ again: ip = radix_tree_lookup(&pag->pag_ici_root, agino); if (ip) { - error = xfs_iget_cache_hit(inode, pag, ip, flags, lock_flags); + error = xfs_iget_cache_hit(pag, ip, flags, lock_flags); if (error) goto out_error_or_again; } else { @@ -283,23 +270,16 @@ again: } xfs_put_perag(mp, pag); - ASSERT(ip->i_df.if_ext_max == - XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t)); - xfs_iflags_set(ip, XFS_IMODIFIED); *ipp = ip; - /* - * Set up the Linux with the Linux inode. - */ - ip->i_vnode = inode; - inode->i_private = ip; - + ASSERT(ip->i_df.if_ext_max == + XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t)); /* * If we have a real type for an on-disk inode, we can set ops(&unlock) * now. If it's a new inode being created, xfs_ialloc will handle it. */ - if (ip->i_d.di_mode != 0) + if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0) xfs_setup_inode(ip); return 0; @@ -313,75 +293,6 @@ out_error_or_again: } -/* - * The 'normal' internal xfs_iget, if needed it will - * 'allocate', or 'get', the vnode. - */ -int -xfs_iget( - xfs_mount_t *mp, - xfs_trans_t *tp, - xfs_ino_t ino, - uint flags, - uint lock_flags, - xfs_inode_t **ipp, - xfs_daddr_t bno) -{ - struct inode *inode; - xfs_inode_t *ip; - int error; - - XFS_STATS_INC(xs_ig_attempts); - -retry: - inode = iget_locked(mp->m_super, ino); - if (!inode) - /* If we got no inode we are out of memory */ - return ENOMEM; - - if (inode->i_state & I_NEW) { - XFS_STATS_INC(vn_active); - XFS_STATS_INC(vn_alloc); - - error = xfs_iget_core(inode, mp, tp, ino, flags, - lock_flags, ipp, bno); - if (error) { - make_bad_inode(inode); - if (inode->i_state & I_NEW) - unlock_new_inode(inode); - iput(inode); - } - return error; - } - - /* - * If the inode is not fully constructed due to - * filehandle mismatches wait for the inode to go - * away and try again. - * - * iget_locked will call __wait_on_freeing_inode - * to wait for the inode to go away. - */ - if (is_bad_inode(inode)) { - iput(inode); - delay(1); - goto retry; - } - - ip = XFS_I(inode); - if (!ip) { - iput(inode); - delay(1); - goto retry; - } - - if (lock_flags != 0) - xfs_ilock(ip, lock_flags); - XFS_STATS_INC(xs_ig_found); - *ipp = ip; - return 0; -} - /* * Look for the inode corresponding to the given ino in the hash table. * If it is there and its i_transp pointer matches tp, return it. @@ -481,14 +392,6 @@ xfs_ireclaim(xfs_inode_t *ip) */ XFS_QM_DQDETACH(ip->i_mount, ip); - /* - * Pull our behavior descriptor from the vnode chain. - */ - if (ip->i_vnode) { - ip->i_vnode->i_private = NULL; - ip->i_vnode = NULL; - } - /* * Free all memory associated with the inode. */ diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index bc33762abc4..99d9118c4a4 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -813,6 +813,16 @@ xfs_inode_alloc( ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(list_empty(&ip->i_reclaim)); + /* + * initialise the VFS inode here to get failures + * out of the way early. + */ + if (!inode_init_always(mp->m_super, VFS_I(ip))) { + kmem_zone_free(xfs_inode_zone, ip); + return NULL; + } + + /* initialise the xfs inode */ ip->i_ino = ino; ip->i_mount = mp; ip->i_blkno = 0; @@ -1086,6 +1096,7 @@ xfs_ialloc( uint flags; int error; timespec_t tv; + int filestreams = 0; /* * Call the space management code to pick @@ -1093,9 +1104,8 @@ xfs_ialloc( */ error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, ialloc_context, call_again, &ino); - if (error != 0) { + if (error) return error; - } if (*call_again || ino == NULLFSINO) { *ipp = NULL; return 0; @@ -1109,9 +1119,8 @@ xfs_ialloc( */ error = xfs_trans_iget(tp->t_mountp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip); - if (error != 0) { + if (error) return error; - } ASSERT(ip != NULL); ip->i_d.di_mode = (__uint16_t)mode; @@ -1192,13 +1201,12 @@ xfs_ialloc( flags |= XFS_ILOG_DEV; break; case S_IFREG: - if (pip && xfs_inode_is_filestream(pip)) { - error = xfs_filestream_associate(pip, ip); - if (error < 0) - return -error; - if (!error) - xfs_iflags_set(ip, XFS_IFILESTREAM); - } + /* + * we can't set up filestreams until after the VFS inode + * is set up properly. + */ + if (pip && xfs_inode_is_filestream(pip)) + filestreams = 1; /* fall through */ case S_IFDIR: if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { @@ -1264,6 +1272,15 @@ xfs_ialloc( /* now that we have an i_mode we can setup inode ops and unlock */ xfs_setup_inode(ip); + /* now we have set up the vfs inode we can associate the filestream */ + if (filestreams) { + error = xfs_filestream_associate(pip, ip); + if (error < 0) + return -error; + if (!error) + xfs_iflags_set(ip, XFS_IFILESTREAM); + } + *ipp = ip; return 0; } @@ -2650,6 +2667,10 @@ xfs_idestroy_fork( * It must free the inode itself and any buffers allocated for * if_extents/if_data and if_broot. It must also free the lock * associated with the inode. + * + * Note: because we don't initialise everything on reallocation out + * of the zone, we must ensure we nullify everything correctly before + * freeing the structure. */ void xfs_idestroy( diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 6fd20fc179a..345b43a90eb 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -236,7 +236,6 @@ typedef struct xfs_inode { /* Inode linking and identification information. */ struct xfs_mount *i_mount; /* fs mount struct ptr */ struct list_head i_reclaim; /* reclaim list */ - struct inode *i_vnode; /* vnode backpointer */ struct xfs_dquot *i_udquot; /* user dquot */ struct xfs_dquot *i_gdquot; /* group dquot */ @@ -271,6 +270,10 @@ typedef struct xfs_inode { xfs_fsize_t i_size; /* in-memory size */ xfs_fsize_t i_new_size; /* size when write completes */ atomic_t i_iocount; /* outstanding I/O count */ + + /* VFS inode */ + struct inode i_vnode; /* embedded VFS inode */ + /* Trace buffers per inode. */ #ifdef XFS_INODE_TRACE struct ktrace *i_trace; /* general inode trace */ @@ -298,13 +301,13 @@ typedef struct xfs_inode { /* Convert from vfs inode to xfs inode */ static inline struct xfs_inode *XFS_I(struct inode *inode) { - return (struct xfs_inode *)inode->i_private; + return container_of(inode, struct xfs_inode, i_vnode); } /* convert from xfs inode to vfs inode */ static inline struct inode *VFS_I(struct xfs_inode *ip) { - return (struct inode *)ip->i_vnode; + return &ip->i_vnode; } /* diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index a6714579a41..7fb577c9f9d 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -2833,6 +2833,7 @@ xfs_reclaim( if (!ip->i_update_core && (ip->i_itemp == NULL)) { xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_iflock(ip); + xfs_iflags_set(ip, XFS_IRECLAIMABLE); return xfs_finish_reclaim(ip, 1, XFS_IFLUSH_DELWRI_ELSE_SYNC); } else { xfs_mount_t *mp = ip->i_mount; @@ -2841,8 +2842,6 @@ xfs_reclaim( XFS_MOUNT_ILOCK(mp); spin_lock(&ip->i_flags_lock); __xfs_iflags_set(ip, XFS_IRECLAIMABLE); - VFS_I(ip)->i_private = NULL; - ip->i_vnode = NULL; spin_unlock(&ip->i_flags_lock); list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); XFS_MOUNT_IUNLOCK(mp); @@ -2857,10 +2856,6 @@ xfs_finish_reclaim( int sync_mode) { xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino); - struct inode *vp = VFS_I(ip); - - if (vp && VN_BAD(vp)) - goto reclaim; /* The hash lock here protects a thread in xfs_iget_core from * racing with us on linking the inode back with a vnode. @@ -2870,7 +2865,7 @@ xfs_finish_reclaim( write_lock(&pag->pag_ici_lock); spin_lock(&ip->i_flags_lock); if (__xfs_iflags_test(ip, XFS_IRECLAIM) || - (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) && vp == NULL)) { + !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) { spin_unlock(&ip->i_flags_lock); write_unlock(&pag->pag_ici_lock); if (locked) { @@ -2904,15 +2899,13 @@ xfs_finish_reclaim( * In the case of a forced shutdown we rely on xfs_iflush() to * wait for the inode to be unpinned before returning an error. */ - if (xfs_iflush(ip, sync_mode) == 0) { + if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) { /* synchronize with xfs_iflush_done */ xfs_iflock(ip); xfs_ifunlock(ip); } xfs_iunlock(ip, XFS_ILOCK_EXCL); - - reclaim: xfs_ireclaim(ip); return 0; } -- cgit v1.2.3-70-g09d2 From 99fa8cb3c580d4445fe8fc239454e8f37a3b6847 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:36:40 +1100 Subject: [XFS] Prevent use-after-free caused by synchronous inode reclaim With the combined linux and XFS inode, we need to ensure that the combined structure is not freed before the generic code is finished with the inode. As it turns out, there is a case where the XFS inode is freed before the linux inode - when xfs_reclaim() is called from ->clear_inode() on a clean inode, the xfs inode is freed during that call. The generic code references the inode after the ->clear_inode() call, so this is a use after free situation. Fix the problem by moving the xfs_reclaim() call to ->destroy_inode() instead of in ->clear_inode(). This ensures the combined inode structure is not freed until after the generic code has finished with it. SGI-PV: 988141 SGI-Modid: xfs-linux-melb:xfs-kern:32324a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_super.c | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index c6ef684bf2e..bb535a7737b 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -875,13 +875,18 @@ xfs_fs_alloc_inode( } /* - * we need to provide an empty inode free function to prevent - * the generic code from trying to free our combined inode. + * Now that the generic code is guaranteed not to be accessing + * the linux inode, we can reclaim the inode. */ STATIC void xfs_fs_destroy_inode( struct inode *inode) { + xfs_inode_t *ip = XFS_I(inode); + + XFS_STATS_INC(vn_reclaim); + if (xfs_reclaim(ip)) + panic("%s: cannot reclaim 0x%p\n", __func__, inode); } /* @@ -958,22 +963,13 @@ xfs_fs_clear_inode( { xfs_inode_t *ip = XFS_I(inode); - /* - * ip can be null when xfs_iget_core calls xfs_idestroy if we - * find an inode with di_mode == 0 but without IGET_CREATE set. - */ - if (ip) { - xfs_itrace_entry(ip); - XFS_STATS_INC(vn_rele); - XFS_STATS_INC(vn_remove); - XFS_STATS_INC(vn_reclaim); - XFS_STATS_DEC(vn_active); - - xfs_inactive(ip); - xfs_iflags_clear(ip, XFS_IMODIFIED); - if (xfs_reclaim(ip)) - panic("%s: cannot reclaim 0x%p\n", __func__, inode); - } + xfs_itrace_entry(ip); + XFS_STATS_INC(vn_rele); + XFS_STATS_INC(vn_remove); + XFS_STATS_DEC(vn_active); + + xfs_inactive(ip); + xfs_iflags_clear(ip, XFS_IMODIFIED); } STATIC void -- cgit v1.2.3-70-g09d2 From 493dca6178cf4a6ae2a16c602d6cb455bb7d31bd Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Thu, 30 Oct 2008 17:36:52 +1100 Subject: [XFS] Fix build warning - xfs_fs_alloc_inode() needs a return statement SGI-PV: 988141 SGI-Modid: xfs-linux-melb:xfs-kern:32325a Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_super.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index bb535a7737b..1d67d7f92a4 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -872,6 +872,7 @@ xfs_fs_alloc_inode( struct super_block *sb) { BUG(); + return NULL; } /* -- cgit v1.2.3-70-g09d2 From fce08f2f3bd0d08feeb4cea70e44aa3471d9bb4c Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:37:03 +1100 Subject: [XFS] move inode reclaim functions to xfs_sync.c Background inode reclaim is run by the xfssyncd. Move the reclaim worker functions to be close to the sync code as the are very similar in structure and are both run from the same background thread. SGI-PV: 988142 SGI-Modid: xfs-linux-melb:xfs-kern:32329a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_sync.c | 91 +++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/linux-2.6/xfs_sync.h | 3 ++ fs/xfs/xfs_inode.h | 2 - fs/xfs/xfs_vnodeops.c | 90 -------------------------------------------- 4 files changed, 94 insertions(+), 92 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index b2b708254ae..79038ea55b0 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -583,3 +583,94 @@ xfs_syncd_stop( kthread_stop(mp->m_sync_task); } +int +xfs_finish_reclaim( + xfs_inode_t *ip, + int locked, + int sync_mode) +{ + xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino); + + /* The hash lock here protects a thread in xfs_iget_core from + * racing with us on linking the inode back with a vnode. + * Once we have the XFS_IRECLAIM flag set it will not touch + * us. + */ + write_lock(&pag->pag_ici_lock); + spin_lock(&ip->i_flags_lock); + if (__xfs_iflags_test(ip, XFS_IRECLAIM) || + !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) { + spin_unlock(&ip->i_flags_lock); + write_unlock(&pag->pag_ici_lock); + if (locked) { + xfs_ifunlock(ip); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + } + return 1; + } + __xfs_iflags_set(ip, XFS_IRECLAIM); + spin_unlock(&ip->i_flags_lock); + write_unlock(&pag->pag_ici_lock); + xfs_put_perag(ip->i_mount, pag); + + /* + * If the inode is still dirty, then flush it out. If the inode + * is not in the AIL, then it will be OK to flush it delwri as + * long as xfs_iflush() does not keep any references to the inode. + * We leave that decision up to xfs_iflush() since it has the + * knowledge of whether it's OK to simply do a delwri flush of + * the inode or whether we need to wait until the inode is + * pulled from the AIL. + * We get the flush lock regardless, though, just to make sure + * we don't free it while it is being flushed. + */ + if (!locked) { + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_iflock(ip); + } + + /* + * In the case of a forced shutdown we rely on xfs_iflush() to + * wait for the inode to be unpinned before returning an error. + */ + if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) { + /* synchronize with xfs_iflush_done */ + xfs_iflock(ip); + xfs_ifunlock(ip); + } + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_ireclaim(ip); + return 0; +} + +int +xfs_finish_reclaim_all( + xfs_mount_t *mp, + int noblock, + int mode) +{ + xfs_inode_t *ip, *n; + +restart: + XFS_MOUNT_ILOCK(mp); + list_for_each_entry_safe(ip, n, &mp->m_del_inodes, i_reclaim) { + if (noblock) { + if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) + continue; + if (xfs_ipincount(ip) || + !xfs_iflock_nowait(ip)) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + continue; + } + } + XFS_MOUNT_IUNLOCK(mp); + if (xfs_finish_reclaim(ip, noblock, mode)) + delay(1); + goto restart; + } + XFS_MOUNT_IUNLOCK(mp); + return 0; +} + + diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index 3b49aa3bb5f..23117a17fde 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -45,4 +45,7 @@ void xfs_quiesce_attr(struct xfs_mount *mp); void xfs_flush_inode(struct xfs_inode *ip); void xfs_flush_device(struct xfs_inode *ip); +int xfs_finish_reclaim(struct xfs_inode *ip, int locked, int sync_mode); +int xfs_finish_reclaim_all(struct xfs_mount *mp, int noblock, int mode); + #endif diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 345b43a90eb..64e50ff9ad2 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -496,8 +496,6 @@ int xfs_isilocked(xfs_inode_t *, uint); uint xfs_ilock_map_shared(xfs_inode_t *); void xfs_iunlock_map_shared(xfs_inode_t *, uint); void xfs_ireclaim(xfs_inode_t *); -int xfs_finish_reclaim(xfs_inode_t *, int, int); -int xfs_finish_reclaim_all(struct xfs_mount *, int, int); /* * xfs_inode.c prototypes. diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 7fb577c9f9d..cdcc835bc5a 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -2849,96 +2849,6 @@ xfs_reclaim( return 0; } -int -xfs_finish_reclaim( - xfs_inode_t *ip, - int locked, - int sync_mode) -{ - xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino); - - /* The hash lock here protects a thread in xfs_iget_core from - * racing with us on linking the inode back with a vnode. - * Once we have the XFS_IRECLAIM flag set it will not touch - * us. - */ - write_lock(&pag->pag_ici_lock); - spin_lock(&ip->i_flags_lock); - if (__xfs_iflags_test(ip, XFS_IRECLAIM) || - !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) { - spin_unlock(&ip->i_flags_lock); - write_unlock(&pag->pag_ici_lock); - if (locked) { - xfs_ifunlock(ip); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } - return 1; - } - __xfs_iflags_set(ip, XFS_IRECLAIM); - spin_unlock(&ip->i_flags_lock); - write_unlock(&pag->pag_ici_lock); - xfs_put_perag(ip->i_mount, pag); - - /* - * If the inode is still dirty, then flush it out. If the inode - * is not in the AIL, then it will be OK to flush it delwri as - * long as xfs_iflush() does not keep any references to the inode. - * We leave that decision up to xfs_iflush() since it has the - * knowledge of whether it's OK to simply do a delwri flush of - * the inode or whether we need to wait until the inode is - * pulled from the AIL. - * We get the flush lock regardless, though, just to make sure - * we don't free it while it is being flushed. - */ - if (!locked) { - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_iflock(ip); - } - - /* - * In the case of a forced shutdown we rely on xfs_iflush() to - * wait for the inode to be unpinned before returning an error. - */ - if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) { - /* synchronize with xfs_iflush_done */ - xfs_iflock(ip); - xfs_ifunlock(ip); - } - - xfs_iunlock(ip, XFS_ILOCK_EXCL); - xfs_ireclaim(ip); - return 0; -} - -int -xfs_finish_reclaim_all( - xfs_mount_t *mp, - int noblock, - int mode) -{ - xfs_inode_t *ip, *n; - -restart: - XFS_MOUNT_ILOCK(mp); - list_for_each_entry_safe(ip, n, &mp->m_del_inodes, i_reclaim) { - if (noblock) { - if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) - continue; - if (xfs_ipincount(ip) || - !xfs_iflock_nowait(ip)) { - xfs_iunlock(ip, XFS_ILOCK_EXCL); - continue; - } - } - XFS_MOUNT_IUNLOCK(mp); - if (xfs_finish_reclaim(ip, noblock, mode)) - delay(1); - goto restart; - } - XFS_MOUNT_IUNLOCK(mp); - return 0; -} - /* * xfs_alloc_file_space() * This routine allocates disk space for the given file. -- cgit v1.2.3-70-g09d2 From 1dc3318ae1c1cc11f9fb8279a806de448e2b90e8 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:37:15 +1100 Subject: [XFS] rename inode reclaim functions The function names xfs_finish_reclaim and xfs_finish_reclaim_all are not very descriptive of what they are reclaiming. Rename to xfs_reclaim_inode[s] to match the xfs_sync_inodes() function. SGI-PV: 988142 SGI-Modid: xfs-linux-melb:xfs-kern:32330a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_sync.c | 10 +++++----- fs/xfs/linux-2.6/xfs_sync.h | 4 ++-- fs/xfs/xfs_mount.c | 2 +- fs/xfs/xfs_vnodeops.c | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 79038ea55b0..34413ceaea9 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -364,7 +364,7 @@ xfs_quiesce_fs( int count = 0, pincount; xfs_flush_buftarg(mp->m_ddev_targp, 0); - xfs_finish_reclaim_all(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); + xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); /* * This loop must run at least twice. The first instance of the loop @@ -505,7 +505,7 @@ xfs_sync_worker( if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); - xfs_finish_reclaim_all(mp, 1, XFS_IFLUSH_DELWRI_ELSE_ASYNC); + xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC); /* dgc: errors ignored here */ error = XFS_QM_DQSYNC(mp, SYNC_BDFLUSH); error = xfs_sync_fsdata(mp, SYNC_BDFLUSH); @@ -584,7 +584,7 @@ xfs_syncd_stop( } int -xfs_finish_reclaim( +xfs_reclaim_inode( xfs_inode_t *ip, int locked, int sync_mode) @@ -645,7 +645,7 @@ xfs_finish_reclaim( } int -xfs_finish_reclaim_all( +xfs_reclaim_inodes( xfs_mount_t *mp, int noblock, int mode) @@ -665,7 +665,7 @@ restart: } } XFS_MOUNT_IUNLOCK(mp); - if (xfs_finish_reclaim(ip, noblock, mode)) + if (xfs_reclaim_inode(ip, noblock, mode)) delay(1); goto restart; } diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index 23117a17fde..c1bcd500509 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -45,7 +45,7 @@ void xfs_quiesce_attr(struct xfs_mount *mp); void xfs_flush_inode(struct xfs_inode *ip); void xfs_flush_device(struct xfs_inode *ip); -int xfs_finish_reclaim(struct xfs_inode *ip, int locked, int sync_mode); -int xfs_finish_reclaim_all(struct xfs_mount *mp, int noblock, int mode); +int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode); +int xfs_reclaim_inodes(struct xfs_mount *mp, int noblock, int mode); #endif diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 43e5917465a..3704baefe2e 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1235,7 +1235,7 @@ xfs_unmountfs( * need to force the log first. */ xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); - xfs_finish_reclaim_all(mp, 0, XFS_IFLUSH_ASYNC); + xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_ASYNC); XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING); diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index cdcc835bc5a..07945634923 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -2834,7 +2834,7 @@ xfs_reclaim( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_iflock(ip); xfs_iflags_set(ip, XFS_IRECLAIMABLE); - return xfs_finish_reclaim(ip, 1, XFS_IFLUSH_DELWRI_ELSE_SYNC); + return xfs_reclaim_inode(ip, 1, XFS_IFLUSH_DELWRI_ELSE_SYNC); } else { xfs_mount_t *mp = ip->i_mount; -- cgit v1.2.3-70-g09d2 From 396beb85311689e38634926058d9a3bb0576ca8a Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:37:26 +1100 Subject: [XFS] mark inodes for reclaim via a tag in the inode radix tree Prepare for removing the deleted inode list by marking inodes for reclaim in the inode radix trees so that we can use the radix trees to find reclaimable inodes. SGI-PV: 988142 SGI-Modid: xfs-linux-melb:xfs-kern:32331a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_sync.c | 41 +++++++++++++++++++++++++++++++++++++++++ fs/xfs/linux-2.6/xfs_sync.h | 4 ++++ fs/xfs/xfs_ag.h | 5 +++++ fs/xfs/xfs_iget.c | 3 +++ fs/xfs/xfs_vnodeops.c | 1 + 5 files changed, 54 insertions(+) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 34413ceaea9..9e7f4dccab7 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -644,6 +644,47 @@ xfs_reclaim_inode( return 0; } +void +xfs_inode_set_reclaim_tag( + xfs_inode_t *ip) +{ + xfs_mount_t *mp = ip->i_mount; + xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); + + read_lock(&pag->pag_ici_lock); + spin_lock(&ip->i_flags_lock); + radix_tree_tag_set(&pag->pag_ici_root, + XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); + spin_unlock(&ip->i_flags_lock); + read_unlock(&pag->pag_ici_lock); + xfs_put_perag(mp, pag); +} + +void +__xfs_inode_clear_reclaim_tag( + xfs_mount_t *mp, + xfs_perag_t *pag, + xfs_inode_t *ip) +{ + radix_tree_tag_clear(&pag->pag_ici_root, + XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); +} + +void +xfs_inode_clear_reclaim_tag( + xfs_inode_t *ip) +{ + xfs_mount_t *mp = ip->i_mount; + xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); + + read_lock(&pag->pag_ici_lock); + spin_lock(&ip->i_flags_lock); + __xfs_inode_clear_reclaim_tag(mp, pag, ip); + spin_unlock(&ip->i_flags_lock); + read_unlock(&pag->pag_ici_lock); + xfs_put_perag(mp, pag); +} + int xfs_reclaim_inodes( xfs_mount_t *mp, diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index c1bcd500509..5f6de1efe1f 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -48,4 +48,8 @@ void xfs_flush_device(struct xfs_inode *ip); int xfs_reclaim_inode(struct xfs_inode *ip, int locked, int sync_mode); int xfs_reclaim_inodes(struct xfs_mount *mp, int noblock, int mode); +void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); +void xfs_inode_clear_reclaim_tag(struct xfs_inode *ip); +void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, + struct xfs_inode *ip); #endif diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index 729ee3eb39a..2bfd8632914 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h @@ -204,6 +204,11 @@ typedef struct xfs_perag #endif } xfs_perag_t; +/* + * tags for inode radix tree + */ +#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */ + #define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels) #define XFS_MIN_FREELIST_RAW(bl,cl,mp) \ (MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + MIN(cl + 1, XFS_AG_MAXLEVELS(mp))) diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index c4414e8bce8..a0387f14c20 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -91,6 +91,9 @@ xfs_iget_cache_hit( } xfs_iflags_set(ip, XFS_INEW); xfs_iflags_clear(ip, XFS_IRECLAIMABLE); + + /* clear the radix tree reclaim flag as well. */ + __xfs_inode_clear_reclaim_tag(mp, pag, ip); read_unlock(&pag->pag_ici_lock); XFS_MOUNT_ILOCK(mp); diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 07945634923..f89a73eb016 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -2845,6 +2845,7 @@ xfs_reclaim( spin_unlock(&ip->i_flags_lock); list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); XFS_MOUNT_IUNLOCK(mp); + xfs_inode_set_reclaim_tag(ip); } return 0; } -- cgit v1.2.3-70-g09d2 From 7a3be02baef7bdec43965103441bde5de4dd8601 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:37:37 +1100 Subject: [XFS] use the inode radix tree for reclaiming inodes Use the reclaim tag to walk the radix tree and find the inodes under reclaim. This was the only user of the deleted inode list. SGI-PV: 988142 SGI-Modid: xfs-linux-melb:xfs-kern:32333a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_sync.c | 81 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 71 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 9e7f4dccab7..bbb40e27840 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -685,32 +685,93 @@ xfs_inode_clear_reclaim_tag( xfs_put_perag(mp, pag); } -int -xfs_reclaim_inodes( + +STATIC void +xfs_reclaim_inodes_ag( xfs_mount_t *mp, - int noblock, + int ag, + int noblock, int mode) { - xfs_inode_t *ip, *n; + xfs_inode_t *ip = NULL; + xfs_perag_t *pag = &mp->m_perag[ag]; + int nr_found; + int first_index; + int skipped; restart: - XFS_MOUNT_ILOCK(mp); - list_for_each_entry_safe(ip, n, &mp->m_del_inodes, i_reclaim) { + first_index = 0; + skipped = 0; + do { + /* + * use a gang lookup to find the next inode in the tree + * as the tree is sparse and a gang lookup walks to find + * the number of objects requested. + */ + read_lock(&pag->pag_ici_lock); + nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root, + (void**)&ip, first_index, 1, + XFS_ICI_RECLAIM_TAG); + + if (!nr_found) { + read_unlock(&pag->pag_ici_lock); + break; + } + + /* update the index for the next lookup */ + first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); + + ASSERT(xfs_iflags_test(ip, (XFS_IRECLAIMABLE|XFS_IRECLAIM))); + + /* ignore if already under reclaim */ + if (xfs_iflags_test(ip, XFS_IRECLAIM)) { + read_unlock(&pag->pag_ici_lock); + continue; + } + if (noblock) { - if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) + if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { + read_unlock(&pag->pag_ici_lock); continue; + } if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) { xfs_iunlock(ip, XFS_ILOCK_EXCL); + read_unlock(&pag->pag_ici_lock); continue; } } - XFS_MOUNT_IUNLOCK(mp); + read_unlock(&pag->pag_ici_lock); + + /* + * hmmm - this is an inode already in reclaim. Do + * we even bother catching it here? + */ if (xfs_reclaim_inode(ip, noblock, mode)) - delay(1); + skipped++; + } while (nr_found); + + if (skipped) { + delay(1); goto restart; } - XFS_MOUNT_IUNLOCK(mp); + return; + +} + +int +xfs_reclaim_inodes( + xfs_mount_t *mp, + int noblock, + int mode) +{ + int i; + + for (i = 0; i < mp->m_sb.sb_agcount; i++) { + if (!mp->m_perag[i].pag_ici_init) + continue; + xfs_reclaim_inodes_ag(mp, i, noblock, mode); + } return 0; } -- cgit v1.2.3-70-g09d2 From 116545130cbc5214523c2f994a11c81ef9eb9186 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:37:49 +1100 Subject: [XFS] kill deleted inodes list Now that the deleted inodes list is unused, kill it. This also removes the i_reclaim list head from the xfs_inode, shrinking it by two pointers. SGI-PV: 988142 SGI-Modid: xfs-linux-melb:xfs-kern:32334a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_super.c | 2 -- fs/xfs/linux-2.6/xfs_sync.c | 6 ++++++ fs/xfs/xfs_iget.c | 8 -------- fs/xfs/xfs_inode.c | 4 ++-- fs/xfs/xfs_inode.h | 1 - fs/xfs/xfs_mount.c | 1 - fs/xfs/xfs_mount.h | 5 +---- fs/xfs/xfs_vnodeops.c | 12 +----------- 8 files changed, 10 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 1d67d7f92a4..206a949e387 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -913,7 +913,6 @@ xfs_fs_inode_init_once( atomic_set(&ip->i_iocount, 0); atomic_set(&ip->i_pincount, 0); spin_lock_init(&ip->i_flags_lock); - INIT_LIST_HEAD(&ip->i_reclaim); init_waitqueue_head(&ip->i_ipin_wait); /* * Because we want to use a counting completion, complete @@ -1546,7 +1545,6 @@ xfs_fs_fill_super( goto out_free_args; spin_lock_init(&mp->m_sb_lock); - mutex_init(&mp->m_ilock); mutex_init(&mp->m_growlock); atomic_set(&mp->m_active_trans, 0); INIT_LIST_HEAD(&mp->m_sync_list); diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index bbb40e27840..22006b5733c 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -644,6 +644,11 @@ xfs_reclaim_inode( return 0; } +/* + * We set the inode flag atomically with the radix tree tag. + * Once we get tag lookups on the radix tree, this inode flag + * can go away. + */ void xfs_inode_set_reclaim_tag( xfs_inode_t *ip) @@ -655,6 +660,7 @@ xfs_inode_set_reclaim_tag( spin_lock(&ip->i_flags_lock); radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); + __xfs_iflags_set(ip, XFS_IRECLAIMABLE); spin_unlock(&ip->i_flags_lock); read_unlock(&pag->pag_ici_lock); xfs_put_perag(mp, pag); diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index a0387f14c20..800133805ca 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -95,10 +95,6 @@ xfs_iget_cache_hit( /* clear the radix tree reclaim flag as well. */ __xfs_inode_clear_reclaim_tag(mp, pag, ip); read_unlock(&pag->pag_ici_lock); - - XFS_MOUNT_ILOCK(mp); - list_del_init(&ip->i_reclaim); - XFS_MOUNT_IUNLOCK(mp); } else if (!igrab(VFS_I(ip))) { /* If the VFS inode is being torn down, pause and try again. */ error = EAGAIN; @@ -419,11 +415,7 @@ xfs_iextract( write_unlock(&pag->pag_ici_lock); xfs_put_perag(mp, pag); - /* Deal with the deleted inodes list */ - XFS_MOUNT_ILOCK(mp); - list_del_init(&ip->i_reclaim); mp->m_ireclaims++; - XFS_MOUNT_IUNLOCK(mp); } /* diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 99d9118c4a4..4eb629f0513 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -811,7 +811,7 @@ xfs_inode_alloc( ASSERT(atomic_read(&ip->i_iocount) == 0); ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); - ASSERT(list_empty(&ip->i_reclaim)); + ASSERT(completion_done(&ip->i_flush)); /* * initialise the VFS inode here to get failures @@ -2729,7 +2729,7 @@ xfs_idestroy( ASSERT(atomic_read(&ip->i_iocount) == 0); ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); - ASSERT(list_empty(&ip->i_reclaim)); + ASSERT(completion_done(&ip->i_flush)); kmem_zone_free(xfs_inode_zone, ip); } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 64e50ff9ad2..a5aeb9cfeae 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -235,7 +235,6 @@ typedef struct dm_attrs_s { typedef struct xfs_inode { /* Inode linking and identification information. */ struct xfs_mount *i_mount; /* fs mount struct ptr */ - struct list_head i_reclaim; /* reclaim list */ struct xfs_dquot *i_udquot; /* user dquot */ struct xfs_dquot *i_gdquot; /* group dquot */ diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 3704baefe2e..177976dfea0 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -580,7 +580,6 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) mp->m_blockmask = sbp->sb_blocksize - 1; mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG; mp->m_blockwmask = mp->m_blockwsize - 1; - INIT_LIST_HEAD(&mp->m_del_inodes); /* * Setup for attributes, in case they get created. diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 4e62802b6ab..67cf0b2bb84 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -248,8 +248,6 @@ typedef struct xfs_mount { xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ spinlock_t m_agirotor_lock;/* .. and lock protecting it */ xfs_agnumber_t m_maxagi; /* highest inode alloc group */ - struct list_head m_del_inodes; /* inodes to reclaim */ - mutex_t m_ilock; /* inode list mutex */ uint m_ireclaims; /* count of calls to reclaim*/ uint m_readio_log; /* min read size log bytes */ uint m_readio_blocks; /* min read size blocks */ @@ -312,8 +310,7 @@ typedef struct xfs_mount { int m_attr_magicpct;/* 37% of the blocksize */ int m_dir_magicpct; /* 37% of the dir blocksize */ __uint8_t m_mk_sharedro; /* mark shared ro on unmount */ - __uint8_t m_inode_quiesce;/* call quiesce on new inodes. - field governed by m_ilock */ + __uint8_t m_inode_quiesce;/* call quiesce on new inodes. */ __uint8_t m_sectbb_log; /* sectlog - BBSHIFT */ const struct xfs_nameops *m_dirnameops; /* vector of dir name ops */ int m_dirblksize; /* directory block sz--bytes */ diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index f89a73eb016..1d15a320b9a 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -2835,18 +2835,8 @@ xfs_reclaim( xfs_iflock(ip); xfs_iflags_set(ip, XFS_IRECLAIMABLE); return xfs_reclaim_inode(ip, 1, XFS_IFLUSH_DELWRI_ELSE_SYNC); - } else { - xfs_mount_t *mp = ip->i_mount; - - /* Protect sync and unpin from us */ - XFS_MOUNT_ILOCK(mp); - spin_lock(&ip->i_flags_lock); - __xfs_iflags_set(ip, XFS_IRECLAIMABLE); - spin_unlock(&ip->i_flags_lock); - list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); - XFS_MOUNT_IUNLOCK(mp); - xfs_inode_set_reclaim_tag(ip); } + xfs_inode_set_reclaim_tag(ip); return 0; } -- cgit v1.2.3-70-g09d2 From 8c38ab032094ff1d903c79db689607b1ebae13ca Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:38:00 +1100 Subject: [XFS] Prevent looping in xfs_sync_inodes_ag If the last block of the AG has inodes in it and the AG is an exactly power-of-2 size then the last inode in the AG points to the last block in the AG. If we try to find the next inode in the AG by adding one to the inode number, we increment the inode number past the size of the AG. The result is that the macro XFS_INO_TO_AGINO() will strip the AG portion of the inode number and return an inode number of zero. That is, instead of terminating the lookup loop because we hit the inode number went outside the valid range for the AG, the search index returns to zero and we start traversing the radix tree from the start again. This results in an endless loop in xfs_sync_inodes_ag(). Fix it be detecting if the new search index decreases as a result of incrementing the current inode number. That indicate an overflow and hence that we have finished processing the AG so we can terminate the loop. SGI-PV: 988142 SGI-Modid: xfs-linux-melb:xfs-kern:32335a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_sync.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 22006b5733c..ee1648b179f 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -59,7 +59,7 @@ xfs_sync_inodes_ag( { xfs_perag_t *pag = &mp->m_perag[ag]; int nr_found; - int first_index = 0; + uint32_t first_index = 0; int error = 0; int last_error = 0; int fflag = XFS_B_ASYNC; @@ -97,8 +97,17 @@ xfs_sync_inodes_ag( break; } - /* update the index for the next lookup */ + /* + * Update the index for the next lookup. Catch overflows + * into the next AG range which can occur if we have inodes + * in the last block of the AG and we are currently + * pointing to the last inode. + */ first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); + if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) { + read_unlock(&pag->pag_ici_lock); + break; + } /* * skip inodes in reclaim. Let xfs_syncsub do that for @@ -702,7 +711,7 @@ xfs_reclaim_inodes_ag( xfs_inode_t *ip = NULL; xfs_perag_t *pag = &mp->m_perag[ag]; int nr_found; - int first_index; + uint32_t first_index; int skipped; restart: @@ -724,8 +733,17 @@ restart: break; } - /* update the index for the next lookup */ + /* + * Update the index for the next lookup. Catch overflows + * into the next AG range which can occur if we have inodes + * in the last block of the AG and we are currently + * pointing to the last inode. + */ first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); + if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) { + read_unlock(&pag->pag_ici_lock); + break; + } ASSERT(xfs_iflags_test(ip, (XFS_IRECLAIMABLE|XFS_IRECLAIM))); -- cgit v1.2.3-70-g09d2 From a7444053fb3ebd3d905e3c7a7bd5ea80a54b083a Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:38:12 +1100 Subject: [XFS] Account for allocated blocks when expanding directories When we create a directory, we reserve a number of blocks for the maximum possible expansion of of the directory due to various btree splits, freespace allocation, etc. Unfortunately, each allocation is not reflected in the total number of blocks still available to the transaction, so the maximal reservation is used over and over again. This leads to problems where an allocation group has only enough blocks for *some* of the allocations required for the directory modification. After the first N allocations, the remaining blocks in the allocation group drops below the total reservation, and subsequent allocations fail because the allocator will not allow the allocation to proceed if the AG does not have the enough blocks available for the entire allocation total. This results in an ENOSPC occurring after an allocation has already occurred. This results in aborting the directory operation (leaving the directory in an inconsistent state) and cancelling a dirty transaction, which results in a filesystem shutdown. Avoid the problem by reflecting the number of blocks allocated in any directory expansion in the total number of blocks available to the modification in progress. This prevents a directory modification from being aborted part way through with an ENOSPC. SGI-PV: 988144 SGI-Modid: xfs-linux-melb:xfs-kern:32340a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_da_btree.c | 5 +++++ fs/xfs/xfs_dir2.c | 6 ++++++ 2 files changed, 11 insertions(+) (limited to 'fs') diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 9e561a9cefc..a11a8390bf6 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -1566,11 +1566,14 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno) int nmap, error, w, count, c, got, i, mapi; xfs_trans_t *tp; xfs_mount_t *mp; + xfs_drfsbno_t nblks; dp = args->dp; mp = dp->i_mount; w = args->whichfork; tp = args->trans; + nblks = dp->i_d.di_nblocks; + /* * For new directories adjust the file offset and block count. */ @@ -1647,6 +1650,8 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno) } if (mapp != &map) kmem_free(mapp); + /* account for newly allocated blocks in reserved blocks total */ + args->total -= dp->i_d.di_nblocks - nblks; *new_blkno = (xfs_dablk_t)bno; return 0; } diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c index 80e0dc51361..1afb12278b8 100644 --- a/fs/xfs/xfs_dir2.c +++ b/fs/xfs/xfs_dir2.c @@ -525,11 +525,13 @@ xfs_dir2_grow_inode( xfs_mount_t *mp; int nmap; /* number of bmap entries */ xfs_trans_t *tp; + xfs_drfsbno_t nblks; xfs_dir2_trace_args_s("grow_inode", args, space); dp = args->dp; tp = args->trans; mp = dp->i_mount; + nblks = dp->i_d.di_nblocks; /* * Set lowest possible block in the space requested. */ @@ -622,7 +624,11 @@ xfs_dir2_grow_inode( */ if (mapp != &map) kmem_free(mapp); + + /* account for newly allocated blocks in reserved blocks total */ + args->total -= dp->i_d.di_nblocks - nblks; *dbp = xfs_dir2_da_to_db(mp, (xfs_dablk_t)bno); + /* * Update file's size if this is the data space and it grew. */ -- cgit v1.2.3-70-g09d2 From 82fa9012458d867936d7bf130e6e14bdebc6873c Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:38:26 +1100 Subject: [XFS] Allocate the struct xfs_ail Rather than embedding the struct xfs_ail in the struct xfs_mount, allocate it during AIL initialisation. Add a back pointer to the struct xfs_ail so that we can pass around the xfs_ail and still be able to access the xfs_mount if need be. This is th first step involved in isolating the AIL implementation from the surrounding filesystem code. SGI-PV: 988143 SGI-Modid: xfs-linux-melb:xfs-kern:32346a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_super.c | 28 +++++++------- fs/xfs/xfs_mount.h | 10 +---- fs/xfs/xfs_trans_ail.c | 87 +++++++++++++++++++++++++------------------- fs/xfs/xfs_trans_priv.h | 17 ++++++--- 4 files changed, 77 insertions(+), 65 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 206a949e387..655508ce2b7 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -814,18 +814,18 @@ xfs_setup_devices( */ void xfsaild_wakeup( - xfs_mount_t *mp, + struct xfs_ail *ailp, xfs_lsn_t threshold_lsn) { - mp->m_ail.xa_target = threshold_lsn; - wake_up_process(mp->m_ail.xa_task); + ailp->xa_target = threshold_lsn; + wake_up_process(ailp->xa_task); } int xfsaild( void *data) { - xfs_mount_t *mp = (xfs_mount_t *)data; + struct xfs_ail *ailp = data; xfs_lsn_t last_pushed_lsn = 0; long tout = 0; @@ -837,11 +837,11 @@ xfsaild( /* swsusp */ try_to_freeze(); - ASSERT(mp->m_log); - if (XFS_FORCED_SHUTDOWN(mp)) + ASSERT(ailp->xa_mount->m_log); + if (XFS_FORCED_SHUTDOWN(ailp->xa_mount)) continue; - tout = xfsaild_push(mp, &last_pushed_lsn); + tout = xfsaild_push(ailp, &last_pushed_lsn); } return 0; @@ -849,20 +849,20 @@ xfsaild( int xfsaild_start( - xfs_mount_t *mp) + struct xfs_ail *ailp) { - mp->m_ail.xa_target = 0; - mp->m_ail.xa_task = kthread_run(xfsaild, mp, "xfsaild"); - if (IS_ERR(mp->m_ail.xa_task)) - return -PTR_ERR(mp->m_ail.xa_task); + ailp->xa_target = 0; + ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild"); + if (IS_ERR(ailp->xa_task)) + return -PTR_ERR(ailp->xa_task); return 0; } void xfsaild_stop( - xfs_mount_t *mp) + struct xfs_ail *ailp) { - kthread_stop(mp->m_ail.xa_task); + kthread_stop(ailp->xa_task); } diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 67cf0b2bb84..287f90011ed 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -63,6 +63,7 @@ struct xfs_extdelta; struct xfs_swapext; struct xfs_mru_cache; struct xfs_nameops; +struct xfs_ail; /* * Prototypes and functions for the Data Migration subsystem. @@ -224,18 +225,11 @@ extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int); #define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0) #endif -typedef struct xfs_ail { - struct list_head xa_ail; - uint xa_gen; - struct task_struct *xa_task; - xfs_lsn_t xa_target; -} xfs_ail_t; - typedef struct xfs_mount { struct super_block *m_super; xfs_tid_t m_tid; /* next unused tid for fs */ spinlock_t m_ail_lock; /* fs AIL mutex */ - xfs_ail_t m_ail; /* fs active log item list */ + struct xfs_ail *m_ail; /* fs active log item list */ xfs_sb_t m_sb; /* copy of fs superblock */ spinlock_t m_sb_lock; /* sb counter lock */ struct xfs_buf *m_sb_bp; /* buffer for superblock */ diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 1f77c00af56..db72b52cd42 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -28,13 +28,13 @@ #include "xfs_trans_priv.h" #include "xfs_error.h" -STATIC void xfs_ail_insert(xfs_ail_t *, xfs_log_item_t *); -STATIC xfs_log_item_t * xfs_ail_delete(xfs_ail_t *, xfs_log_item_t *); -STATIC xfs_log_item_t * xfs_ail_min(xfs_ail_t *); -STATIC xfs_log_item_t * xfs_ail_next(xfs_ail_t *, xfs_log_item_t *); +STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *); +STATIC xfs_log_item_t * xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); +STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *); +STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *); #ifdef DEBUG -STATIC void xfs_ail_check(xfs_ail_t *, xfs_log_item_t *); +STATIC void xfs_ail_check(struct xfs_ail *, xfs_log_item_t *); #else #define xfs_ail_check(a,l) #endif /* DEBUG */ @@ -57,7 +57,7 @@ xfs_trans_tail_ail( xfs_log_item_t *lip; spin_lock(&mp->m_ail_lock); - lip = xfs_ail_min(&mp->m_ail); + lip = xfs_ail_min(mp->m_ail); if (lip == NULL) { lsn = (xfs_lsn_t)0; } else { @@ -91,10 +91,10 @@ xfs_trans_push_ail( { xfs_log_item_t *lip; - lip = xfs_ail_min(&mp->m_ail); + lip = xfs_ail_min(mp->m_ail); if (lip && !XFS_FORCED_SHUTDOWN(mp)) { - if (XFS_LSN_CMP(threshold_lsn, mp->m_ail.xa_target) > 0) - xfsaild_wakeup(mp, threshold_lsn); + if (XFS_LSN_CMP(threshold_lsn, mp->m_ail->xa_target) > 0) + xfsaild_wakeup(mp->m_ail, threshold_lsn); } } @@ -111,12 +111,12 @@ xfs_trans_first_push_ail( { xfs_log_item_t *lip; - lip = xfs_ail_min(&mp->m_ail); - *gen = (int)mp->m_ail.xa_gen; + lip = xfs_ail_min(mp->m_ail); + *gen = (int)mp->m_ail->xa_gen; if (lsn == 0) return lip; - list_for_each_entry(lip, &mp->m_ail.xa_ail, li_ail) { + list_for_each_entry(lip, &mp->m_ail->xa_ail, li_ail) { if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) return lip; } @@ -129,17 +129,18 @@ xfs_trans_first_push_ail( */ long xfsaild_push( - xfs_mount_t *mp, + struct xfs_ail *ailp, xfs_lsn_t *last_lsn) { long tout = 1000; /* milliseconds */ xfs_lsn_t last_pushed_lsn = *last_lsn; - xfs_lsn_t target = mp->m_ail.xa_target; + xfs_lsn_t target = ailp->xa_target; xfs_lsn_t lsn; xfs_log_item_t *lip; int gen; int restarts; int flush_log, count, stuck; + xfs_mount_t *mp = ailp->xa_mount; #define XFS_TRANS_PUSH_AIL_RESTARTS 10 @@ -331,7 +332,7 @@ xfs_trans_unlocked_item( * the call to xfs_log_move_tail() doesn't do anything if there's * not enough free space to wake people up so we're safe calling it. */ - min_lip = xfs_ail_min(&mp->m_ail); + min_lip = xfs_ail_min(mp->m_ail); if (min_lip == lip) xfs_log_move_tail(mp, 1); @@ -362,10 +363,10 @@ xfs_trans_update_ail( xfs_log_item_t *dlip=NULL; xfs_log_item_t *mlip; /* ptr to minimum lip */ - mlip = xfs_ail_min(&mp->m_ail); + mlip = xfs_ail_min(mp->m_ail); if (lip->li_flags & XFS_LI_IN_AIL) { - dlip = xfs_ail_delete(&mp->m_ail, lip); + dlip = xfs_ail_delete(mp->m_ail, lip); ASSERT(dlip == lip); } else { lip->li_flags |= XFS_LI_IN_AIL; @@ -373,11 +374,11 @@ xfs_trans_update_ail( lip->li_lsn = lsn; - xfs_ail_insert(&mp->m_ail, lip); - mp->m_ail.xa_gen++; + xfs_ail_insert(mp->m_ail, lip); + mp->m_ail->xa_gen++; if (mlip == dlip) { - mlip = xfs_ail_min(&mp->m_ail); + mlip = xfs_ail_min(mp->m_ail); spin_unlock(&mp->m_ail_lock); xfs_log_move_tail(mp, mlip->li_lsn); } else { @@ -411,17 +412,17 @@ xfs_trans_delete_ail( xfs_log_item_t *mlip; if (lip->li_flags & XFS_LI_IN_AIL) { - mlip = xfs_ail_min(&mp->m_ail); - dlip = xfs_ail_delete(&mp->m_ail, lip); + mlip = xfs_ail_min(mp->m_ail); + dlip = xfs_ail_delete(mp->m_ail, lip); ASSERT(dlip == lip); lip->li_flags &= ~XFS_LI_IN_AIL; lip->li_lsn = 0; - mp->m_ail.xa_gen++; + mp->m_ail->xa_gen++; if (mlip == dlip) { - mlip = xfs_ail_min(&mp->m_ail); + mlip = xfs_ail_min(mp->m_ail); spin_unlock(&mp->m_ail_lock); xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0)); } else { @@ -459,8 +460,8 @@ xfs_trans_first_ail( { xfs_log_item_t *lip; - lip = xfs_ail_min(&mp->m_ail); - *gen = (int)mp->m_ail.xa_gen; + lip = xfs_ail_min(mp->m_ail); + *gen = (int)mp->m_ail->xa_gen; return lip; } @@ -482,11 +483,11 @@ xfs_trans_next_ail( xfs_log_item_t *nlip; ASSERT(mp && lip && gen); - if (mp->m_ail.xa_gen == *gen) { - nlip = xfs_ail_next(&mp->m_ail, lip); + if (mp->m_ail->xa_gen == *gen) { + nlip = xfs_ail_next(mp->m_ail, lip); } else { - nlip = xfs_ail_min(&mp->m_ail); - *gen = (int)mp->m_ail.xa_gen; + nlip = xfs_ail_min(mp->m_ail); + *gen = (int)mp->m_ail->xa_gen; if (restarts != NULL) { XFS_STATS_INC(xs_push_ail_restarts); (*restarts)++; @@ -515,15 +516,25 @@ int xfs_trans_ail_init( xfs_mount_t *mp) { - INIT_LIST_HEAD(&mp->m_ail.xa_ail); - return xfsaild_start(mp); + struct xfs_ail *ailp; + + ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); + if (!ailp) + return ENOMEM; + + ailp->xa_mount = mp; + INIT_LIST_HEAD(&ailp->xa_ail); + return xfsaild_start(ailp); } void xfs_trans_ail_destroy( xfs_mount_t *mp) { - xfsaild_stop(mp); + struct xfs_ail *ailp = mp->m_ail; + + xfsaild_stop(ailp); + kmem_free(ailp); } /* @@ -534,7 +545,7 @@ xfs_trans_ail_destroy( */ STATIC void xfs_ail_insert( - xfs_ail_t *ailp, + struct xfs_ail *ailp, xfs_log_item_t *lip) /* ARGSUSED */ { @@ -568,7 +579,7 @@ xfs_ail_insert( /*ARGSUSED*/ STATIC xfs_log_item_t * xfs_ail_delete( - xfs_ail_t *ailp, + struct xfs_ail *ailp, xfs_log_item_t *lip) /* ARGSUSED */ { @@ -585,7 +596,7 @@ xfs_ail_delete( */ STATIC xfs_log_item_t * xfs_ail_min( - xfs_ail_t *ailp) + struct xfs_ail *ailp) /* ARGSUSED */ { if (list_empty(&ailp->xa_ail)) @@ -601,7 +612,7 @@ xfs_ail_min( */ STATIC xfs_log_item_t * xfs_ail_next( - xfs_ail_t *ailp, + struct xfs_ail *ailp, xfs_log_item_t *lip) /* ARGSUSED */ { @@ -617,7 +628,7 @@ xfs_ail_next( */ STATIC void xfs_ail_check( - xfs_ail_t *ailp, + struct xfs_ail *ailp, xfs_log_item_t *lip) { xfs_log_item_t *prev_lip; diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 3c748c456ed..98317fdc33b 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -56,13 +56,20 @@ struct xfs_log_item *xfs_trans_first_ail(struct xfs_mount *, int *); struct xfs_log_item *xfs_trans_next_ail(struct xfs_mount *, struct xfs_log_item *, int *, int *); - /* * AIL push thread support */ -long xfsaild_push(struct xfs_mount *, xfs_lsn_t *); -void xfsaild_wakeup(struct xfs_mount *, xfs_lsn_t); -int xfsaild_start(struct xfs_mount *); -void xfsaild_stop(struct xfs_mount *); +struct xfs_ail { + struct xfs_mount *xa_mount; + struct list_head xa_ail; + uint xa_gen; + struct task_struct *xa_task; + xfs_lsn_t xa_target; +}; + +long xfsaild_push(struct xfs_ail *, xfs_lsn_t *); +void xfsaild_wakeup(struct xfs_ail *, xfs_lsn_t); +int xfsaild_start(struct xfs_ail *); +void xfsaild_stop(struct xfs_ail *); #endif /* __XFS_TRANS_PRIV_H__ */ -- cgit v1.2.3-70-g09d2 From 27d8d5fe0ef9daeaafbdd32b14b32a2211930062 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:38:39 +1100 Subject: [XFS] Use a cursor for AIL traversal. To replace the current generation number ensuring sanity of the AIL traversal, replace it with an external cursor that is linked to the AIL. Basically, we store the next item in the cursor whenever we want to drop the AIL lock to do something to the current item. When we regain the lock. the current item may already be free, so we can't reference it, but the next item in the traversal is already held in the cursor. When we move or delete an object, we search all the active cursors and if there is an item match we clear the cursor(s) that point to the object. This forces the traversal to restart transparently. We don't invalidate the cursor on insert because the cursor still points to a valid item. If the intem is inserted between the current item and the cursor it does not matter; the traversal is considered to be past the insertion point so it will be picked up in the next traversal. Hence traversal restarts pretty much disappear altogether with this method of traversal, which should substantially reduce the overhead of pushing on a busy AIL. Version 2 o add restart logic o comment cursor interface o minor cleanups SGI-PV: 988143 SGI-Modid: xfs-linux-melb:xfs-kern:32347a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/xfs_log.c | 4 +- fs/xfs/xfs_log_recover.c | 61 +++++--------- fs/xfs/xfs_trans_ail.c | 207 ++++++++++++++++++++++++++++++++++------------- fs/xfs/xfs_trans_priv.h | 55 ++++++++++--- 4 files changed, 218 insertions(+), 109 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 0b02c644355..4184085d44a 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -900,7 +900,7 @@ xfs_log_move_tail(xfs_mount_t *mp, int xfs_log_need_covered(xfs_mount_t *mp) { - int needed = 0, gen; + int needed = 0; xlog_t *log = mp->m_log; if (!xfs_fs_writable(mp)) @@ -909,7 +909,7 @@ xfs_log_need_covered(xfs_mount_t *mp) spin_lock(&log->l_icloglock); if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || (log->l_covered_state == XLOG_STATE_COVER_NEED2)) - && !xfs_trans_first_ail(mp, &gen) + && !xfs_trans_first_ail(mp, NULL) && xlog_iclogs_empty(log)) { if (log->l_covered_state == XLOG_STATE_COVER_NEED) log->l_covered_state = XLOG_STATE_COVER_DONE; diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 199c8ea3647..37ba4899f3e 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -54,10 +54,8 @@ STATIC void xlog_recover_insert_item_backq(xlog_recover_item_t **q, xlog_recover_item_t *item); #if defined(DEBUG) STATIC void xlog_recover_check_summary(xlog_t *); -STATIC void xlog_recover_check_ail(xfs_mount_t *, xfs_log_item_t *, int); #else #define xlog_recover_check_summary(log) -#define xlog_recover_check_ail(mp, lip, gen) #endif @@ -2710,8 +2708,8 @@ xlog_recover_do_efd_trans( xfs_efd_log_format_t *efd_formatp; xfs_efi_log_item_t *efip = NULL; xfs_log_item_t *lip; - int gen; __uint64_t efi_id; + struct xfs_ail_cursor cur; if (pass == XLOG_RECOVER_PASS1) { return; @@ -2730,7 +2728,8 @@ xlog_recover_do_efd_trans( */ mp = log->l_mp; spin_lock(&mp->m_ail_lock); - lip = xfs_trans_first_ail(mp, &gen); + xfs_trans_ail_cursor_init(mp->m_ail, &cur); + lip = xfs_trans_first_ail(mp, &cur); while (lip != NULL) { if (lip->li_type == XFS_LI_EFI) { efip = (xfs_efi_log_item_t *)lip; @@ -2741,11 +2740,13 @@ xlog_recover_do_efd_trans( */ xfs_trans_delete_ail(mp, lip); xfs_efi_item_free(efip); - return; + spin_lock(&mp->m_ail_lock); + break; } } - lip = xfs_trans_next_ail(mp, lip, &gen, NULL); + lip = xfs_trans_next_ail(mp, &cur); } + xfs_trans_ail_cursor_done(mp->m_ail, &cur); spin_unlock(&mp->m_ail_lock); } @@ -3029,33 +3030,6 @@ abort_error: return error; } -/* - * Verify that once we've encountered something other than an EFI - * in the AIL that there are no more EFIs in the AIL. - */ -#if defined(DEBUG) -STATIC void -xlog_recover_check_ail( - xfs_mount_t *mp, - xfs_log_item_t *lip, - int gen) -{ - int orig_gen = gen; - - do { - ASSERT(lip->li_type != XFS_LI_EFI); - lip = xfs_trans_next_ail(mp, lip, &gen, NULL); - /* - * The check will be bogus if we restart from the - * beginning of the AIL, so ASSERT that we don't. - * We never should since we're holding the AIL lock - * the entire time. - */ - ASSERT(gen == orig_gen); - } while (lip != NULL); -} -#endif /* DEBUG */ - /* * When this is called, all of the EFIs which did not have * corresponding EFDs should be in the AIL. What we do now @@ -3080,20 +3054,25 @@ xlog_recover_process_efis( { xfs_log_item_t *lip; xfs_efi_log_item_t *efip; - int gen; xfs_mount_t *mp; int error = 0; + struct xfs_ail_cursor cur; mp = log->l_mp; spin_lock(&mp->m_ail_lock); - lip = xfs_trans_first_ail(mp, &gen); + xfs_trans_ail_cursor_init(mp->m_ail, &cur); + lip = xfs_trans_first_ail(mp, &cur); while (lip != NULL) { /* * We're done when we see something other than an EFI. + * There should be no EFIs left in the AIL now. */ if (lip->li_type != XFS_LI_EFI) { - xlog_recover_check_ail(mp, lip, gen); +#ifdef DEBUG + for (; lip; lip = xfs_trans_next_ail(mp, &cur)) + ASSERT(lip->li_type != XFS_LI_EFI); +#endif break; } @@ -3102,17 +3081,19 @@ xlog_recover_process_efis( */ efip = (xfs_efi_log_item_t *)lip; if (efip->efi_flags & XFS_EFI_RECOVERED) { - lip = xfs_trans_next_ail(mp, lip, &gen, NULL); + lip = xfs_trans_next_ail(mp, &cur); continue; } spin_unlock(&mp->m_ail_lock); error = xlog_recover_process_efi(mp, efip); - if (error) - return error; spin_lock(&mp->m_ail_lock); - lip = xfs_trans_next_ail(mp, lip, &gen, NULL); + if (error) + goto out; + lip = xfs_trans_next_ail(mp, &cur); } +out: + xfs_trans_ail_cursor_done(mp->m_ail, &cur); spin_unlock(&mp->m_ail_lock); return error; } diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index db72b52cd42..7b8bfcf1d3d 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -98,6 +98,115 @@ xfs_trans_push_ail( } } +/* + * AIL traversal cursor initialisation. + * + * The cursor keeps track of where our current traversal is up + * to by tracking the next ƣtem in the list for us. However, for + * this to be safe, removing an object from the AIL needs to invalidate + * any cursor that points to it. hence the traversal cursor needs to + * be linked to the struct xfs_ail so that deletion can search all the + * active cursors for invalidation. + * + * We don't link the push cursor because it is embedded in the struct + * xfs_ail and hence easily findable. + */ +void +xfs_trans_ail_cursor_init( + struct xfs_ail *ailp, + struct xfs_ail_cursor *cur) +{ + cur->item = NULL; + if (cur == &ailp->xa_cursors) + return; + + cur->next = ailp->xa_cursors.next; + ailp->xa_cursors.next = cur; +} + +/* + * Set the cursor to the next item, because when we look + * up the cursor the current item may have been freed. + */ +STATIC void +xfs_trans_ail_cursor_set( + struct xfs_ail *ailp, + struct xfs_ail_cursor *cur, + struct xfs_log_item *lip) +{ + if (lip) + cur->item = xfs_ail_next(ailp, lip); +} + +/* + * Get the next item in the traversal and advance the cursor. + * If the cursor was invalidated (inidicated by a lip of 1), + * restart the traversal. + */ +STATIC struct xfs_log_item * +xfs_trans_ail_cursor_next( + struct xfs_ail *ailp, + struct xfs_ail_cursor *cur) +{ + struct xfs_log_item *lip = cur->item; + + if ((__psint_t)lip & 1) + lip = xfs_ail_min(ailp); + xfs_trans_ail_cursor_set(ailp, cur, lip); + return lip; +} + +/* + * Invalidate any cursor that is pointing to this item. This is + * called when an item is removed from the AIL. Any cursor pointing + * to this object is now invalid and the traversal needs to be + * terminated so it doesn't reference a freed object. We set the + * cursor item to a value of 1 so we can distinguish between an + * invalidation and the end of the list when getting the next item + * from the cursor. + */ +STATIC void +xfs_trans_ail_cursor_clear( + struct xfs_ail *ailp, + struct xfs_log_item *lip) +{ + struct xfs_ail_cursor *cur; + + /* need to search all cursors */ + for (cur = &ailp->xa_cursors; cur; cur = cur->next) { + if (cur->item == lip) + cur->item = (struct xfs_log_item *) + ((__psint_t)cur->item | 1); + } +} + +/* + * Now that the traversal is complete, we need to remove the cursor + * from the list of traversing cursors. Avoid removing the embedded + * push cursor, but use the fact it is alway present to make the + * list deletion simple. + */ +void +xfs_trans_ail_cursor_done( + struct xfs_ail *ailp, + struct xfs_ail_cursor *done) +{ + struct xfs_ail_cursor *prev = NULL; + struct xfs_ail_cursor *cur; + + done->item = NULL; + if (done == &ailp->xa_cursors) + return; + prev = &ailp->xa_cursors; + for (cur = prev->next; cur; prev = cur, cur = prev->next) { + if (cur == done) { + prev->next = cur->next; + break; + } + } + ASSERT(cur); +} + /* * Return the item in the AIL with the current lsn. * Return the current tree generation number for use @@ -105,20 +214,22 @@ xfs_trans_push_ail( */ STATIC xfs_log_item_t * xfs_trans_first_push_ail( - xfs_mount_t *mp, - int *gen, - xfs_lsn_t lsn) + struct xfs_ail *ailp, + struct xfs_ail_cursor *cur, + xfs_lsn_t lsn) { - xfs_log_item_t *lip; + xfs_log_item_t *lip; - lip = xfs_ail_min(mp->m_ail); - *gen = (int)mp->m_ail->xa_gen; + lip = xfs_ail_min(ailp); + xfs_trans_ail_cursor_set(ailp, cur, lip); if (lsn == 0) return lip; - list_for_each_entry(lip, &mp->m_ail->xa_ail, li_ail) { - if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) + list_for_each_entry(lip, &ailp->xa_ail, li_ail) { + if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) { + xfs_trans_ail_cursor_set(ailp, cur, lip); return lip; + } } return NULL; @@ -137,22 +248,21 @@ xfsaild_push( xfs_lsn_t target = ailp->xa_target; xfs_lsn_t lsn; xfs_log_item_t *lip; - int gen; - int restarts; int flush_log, count, stuck; xfs_mount_t *mp = ailp->xa_mount; - -#define XFS_TRANS_PUSH_AIL_RESTARTS 10 + struct xfs_ail_cursor *cur = &ailp->xa_cursors; spin_lock(&mp->m_ail_lock); - lip = xfs_trans_first_push_ail(mp, &gen, *last_lsn); + xfs_trans_ail_cursor_init(ailp, cur); + lip = xfs_trans_first_push_ail(ailp, cur, *last_lsn); if (!lip || XFS_FORCED_SHUTDOWN(mp)) { /* * AIL is empty or our push has reached the end. */ + xfs_trans_ail_cursor_done(ailp, cur); spin_unlock(&mp->m_ail_lock); last_pushed_lsn = 0; - goto out; + return tout; } XFS_STATS_INC(xs_push_ail); @@ -170,7 +280,7 @@ xfsaild_push( */ tout = 10; lsn = lip->li_lsn; - flush_log = stuck = count = restarts = 0; + flush_log = stuck = count = 0; while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) { int lock_result; /* @@ -245,13 +355,12 @@ xfsaild_push( if (stuck > 100) break; - lip = xfs_trans_next_ail(mp, lip, &gen, &restarts); + lip = xfs_trans_ail_cursor_next(ailp, cur); if (lip == NULL) break; - if (restarts > XFS_TRANS_PUSH_AIL_RESTARTS) - break; lsn = lip->li_lsn; } + xfs_trans_ail_cursor_done(ailp, cur); spin_unlock(&mp->m_ail_lock); if (flush_log) { @@ -275,8 +384,7 @@ xfsaild_push( */ tout += 20; last_pushed_lsn = 0; - } else if ((restarts > XFS_TRANS_PUSH_AIL_RESTARTS) || - ((stuck * 100) / count > 90)) { + } else if ((stuck * 100) / count > 90) { /* * Either there is a lot of contention on the AIL or we * are stuck due to operations in progress. "Stuck" in this @@ -288,7 +396,6 @@ xfsaild_push( */ tout += 10; } -out: *last_lsn = last_pushed_lsn; return tout; } /* xfsaild_push */ @@ -348,9 +455,6 @@ xfs_trans_unlocked_item( * we move in the AIL is the minimum one, update the tail lsn in the * log manager. * - * Increment the AIL's generation count to indicate that the tree - * has changed. - * * This function must be called with the AIL lock held. The lock * is dropped before returning. */ @@ -368,14 +472,13 @@ xfs_trans_update_ail( if (lip->li_flags & XFS_LI_IN_AIL) { dlip = xfs_ail_delete(mp->m_ail, lip); ASSERT(dlip == lip); + xfs_trans_ail_cursor_clear(mp->m_ail, dlip); } else { lip->li_flags |= XFS_LI_IN_AIL; } lip->li_lsn = lsn; - xfs_ail_insert(mp->m_ail, lip); - mp->m_ail->xa_gen++; if (mlip == dlip) { mlip = xfs_ail_min(mp->m_ail); @@ -415,11 +518,11 @@ xfs_trans_delete_ail( mlip = xfs_ail_min(mp->m_ail); dlip = xfs_ail_delete(mp->m_ail, lip); ASSERT(dlip == lip); + xfs_trans_ail_cursor_clear(mp->m_ail, dlip); lip->li_flags &= ~XFS_LI_IN_AIL; lip->li_lsn = 0; - mp->m_ail->xa_gen++; if (mlip == dlip) { mlip = xfs_ail_min(mp->m_ail); @@ -455,46 +558,29 @@ xfs_trans_delete_ail( */ xfs_log_item_t * xfs_trans_first_ail( - xfs_mount_t *mp, - int *gen) + struct xfs_mount *mp, + struct xfs_ail_cursor *cur) { - xfs_log_item_t *lip; + xfs_log_item_t *lip; + struct xfs_ail *ailp = mp->m_ail; - lip = xfs_ail_min(mp->m_ail); - *gen = (int)mp->m_ail->xa_gen; + lip = xfs_ail_min(ailp); + xfs_trans_ail_cursor_set(ailp, cur, lip); return lip; } /* - * If the generation count of the tree has not changed since the - * caller last took something from the AIL, then return the elmt - * in the tree which follows the one given. If the count has changed, - * then return the minimum elmt of the AIL and bump the restarts counter - * if one is given. + * Grab the next item in the AIL from the cursor passed in. */ xfs_log_item_t * xfs_trans_next_ail( - xfs_mount_t *mp, - xfs_log_item_t *lip, - int *gen, - int *restarts) + struct xfs_mount *mp, + struct xfs_ail_cursor *cur) { - xfs_log_item_t *nlip; - - ASSERT(mp && lip && gen); - if (mp->m_ail->xa_gen == *gen) { - nlip = xfs_ail_next(mp->m_ail, lip); - } else { - nlip = xfs_ail_min(mp->m_ail); - *gen = (int)mp->m_ail->xa_gen; - if (restarts != NULL) { - XFS_STATS_INC(xs_push_ail_restarts); - (*restarts)++; - } - } + struct xfs_ail *ailp = mp->m_ail; - return (nlip); + return xfs_trans_ail_cursor_next(ailp, cur); } @@ -517,6 +603,7 @@ xfs_trans_ail_init( xfs_mount_t *mp) { struct xfs_ail *ailp; + int error; ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); if (!ailp) @@ -524,7 +611,15 @@ xfs_trans_ail_init( ailp->xa_mount = mp; INIT_LIST_HEAD(&ailp->xa_ail); - return xfsaild_start(ailp); + error = xfsaild_start(ailp); + if (error) + goto out_free_ailp; + mp->m_ail = ailp; + return 0; + +out_free_ailp: + kmem_free(ailp); + return error; } void diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 98317fdc33b..f114d388570 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -44,20 +44,33 @@ xfs_log_busy_slot_t *xfs_trans_add_busy(xfs_trans_t *tp, xfs_extlen_t idx); /* - * From xfs_trans_ail.c + * AIL traversal cursor. + * + * Rather than using a generation number for detecting changes in the ail, use + * a cursor that is protected by the ail lock. The aild cursor exists in the + * struct xfs_ail, but other traversals can declare it on the stack and link it + * to the ail list. + * + * When an object is deleted from or moved int the AIL, the cursor list is + * searched to see if the object is a designated cursor item. If it is, it is + * deleted from the cursor so that the next time the cursor is used traversal + * will return to the start. + * + * This means a traversal colliding with a removal will cause a restart of the + * list scan, rather than any insertion or deletion anywhere in the list. The + * low bit of the item pointer is set if the cursor has been invalidated so + * that we can tell the difference between invalidation and reaching the end + * of the list to trigger traversal restarts. */ -void xfs_trans_update_ail(struct xfs_mount *mp, - struct xfs_log_item *lip, xfs_lsn_t lsn) - __releases(mp->m_ail_lock); -void xfs_trans_delete_ail(struct xfs_mount *mp, - struct xfs_log_item *lip) - __releases(mp->m_ail_lock); -struct xfs_log_item *xfs_trans_first_ail(struct xfs_mount *, int *); -struct xfs_log_item *xfs_trans_next_ail(struct xfs_mount *, - struct xfs_log_item *, int *, int *); +struct xfs_ail_cursor { + struct xfs_ail_cursor *next; + struct xfs_log_item *item; +}; /* - * AIL push thread support + * Private AIL structures. + * + * Eventually we need to drive the locking in here as well. */ struct xfs_ail { struct xfs_mount *xa_mount; @@ -65,8 +78,28 @@ struct xfs_ail { uint xa_gen; struct task_struct *xa_task; xfs_lsn_t xa_target; + struct xfs_ail_cursor xa_cursors; }; +/* + * From xfs_trans_ail.c + */ +void xfs_trans_update_ail(struct xfs_mount *mp, + struct xfs_log_item *lip, xfs_lsn_t lsn) + __releases(mp->m_ail_lock); +void xfs_trans_delete_ail(struct xfs_mount *mp, + struct xfs_log_item *lip) + __releases(mp->m_ail_lock); +struct xfs_log_item *xfs_trans_first_ail(struct xfs_mount *mp, + struct xfs_ail_cursor *cur); +struct xfs_log_item *xfs_trans_next_ail(struct xfs_mount *mp, + struct xfs_ail_cursor *cur); + +void xfs_trans_ail_cursor_init(struct xfs_ail *ailp, + struct xfs_ail_cursor *cur); +void xfs_trans_ail_cursor_done(struct xfs_ail *ailp, + struct xfs_ail_cursor *cur); + long xfsaild_push(struct xfs_ail *, xfs_lsn_t *); void xfsaild_wakeup(struct xfs_ail *, xfs_lsn_t); int xfsaild_start(struct xfs_ail *); -- cgit v1.2.3-70-g09d2 From 5b00f14fbd60d42441f78c0e414a539cbfba5cb9 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:39:00 +1100 Subject: [XFS] move the AIl traversal over to a consistent interface With the new cursor interface, it makes sense to make all the traversing code use the cursor interface and make the old one go away. This means more of the AIL interfacing is done by passing struct xfs_ail pointers around the place instead of struct xfs_mount pointers. We can replace the use of xfs_trans_first_ail() in xfs_log_need_covered() as it is only checking if the AIL is empty. We can do that with a call to xfs_trans_ail_tail() instead, where a zero LSN returned indicates and empty AIL... SGI-PV: 988143 SGI-Modid: xfs-linux-melb:xfs-kern:32348a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/xfs_log.c | 4 +- fs/xfs/xfs_log_recover.c | 15 +++--- fs/xfs/xfs_trans.h | 1 - fs/xfs/xfs_trans_ail.c | 117 +++++++++++++++++------------------------------ fs/xfs/xfs_trans_priv.h | 13 +++--- 5 files changed, 58 insertions(+), 92 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 4184085d44a..31fbb2eea09 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -909,7 +909,7 @@ xfs_log_need_covered(xfs_mount_t *mp) spin_lock(&log->l_icloglock); if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || (log->l_covered_state == XLOG_STATE_COVER_NEED2)) - && !xfs_trans_first_ail(mp, NULL) + && !xfs_trans_ail_tail(mp->m_ail) && xlog_iclogs_empty(log)) { if (log->l_covered_state == XLOG_STATE_COVER_NEED) log->l_covered_state = XLOG_STATE_COVER_DONE; @@ -946,7 +946,7 @@ xlog_assign_tail_lsn(xfs_mount_t *mp) xfs_lsn_t tail_lsn; xlog_t *log = mp->m_log; - tail_lsn = xfs_trans_tail_ail(mp); + tail_lsn = xfs_trans_ail_tail(mp->m_ail); spin_lock(&log->l_grant_lock); if (tail_lsn != 0) { log->l_tail_lsn = tail_lsn; diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 37ba4899f3e..45ea0d95013 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2728,8 +2728,7 @@ xlog_recover_do_efd_trans( */ mp = log->l_mp; spin_lock(&mp->m_ail_lock); - xfs_trans_ail_cursor_init(mp->m_ail, &cur); - lip = xfs_trans_first_ail(mp, &cur); + lip = xfs_trans_ail_cursor_first(mp->m_ail, &cur, 0); while (lip != NULL) { if (lip->li_type == XFS_LI_EFI) { efip = (xfs_efi_log_item_t *)lip; @@ -2744,7 +2743,7 @@ xlog_recover_do_efd_trans( break; } } - lip = xfs_trans_next_ail(mp, &cur); + lip = xfs_trans_ail_cursor_next(mp->m_ail, &cur); } xfs_trans_ail_cursor_done(mp->m_ail, &cur); spin_unlock(&mp->m_ail_lock); @@ -3061,8 +3060,7 @@ xlog_recover_process_efis( mp = log->l_mp; spin_lock(&mp->m_ail_lock); - xfs_trans_ail_cursor_init(mp->m_ail, &cur); - lip = xfs_trans_first_ail(mp, &cur); + lip = xfs_trans_ail_cursor_first(mp->m_ail, &cur, 0); while (lip != NULL) { /* * We're done when we see something other than an EFI. @@ -3070,7 +3068,8 @@ xlog_recover_process_efis( */ if (lip->li_type != XFS_LI_EFI) { #ifdef DEBUG - for (; lip; lip = xfs_trans_next_ail(mp, &cur)) + for (; lip; + lip = xfs_trans_ail_cursor_next(mp->m_ail, &cur)) ASSERT(lip->li_type != XFS_LI_EFI); #endif break; @@ -3081,7 +3080,7 @@ xlog_recover_process_efis( */ efip = (xfs_efi_log_item_t *)lip; if (efip->efi_flags & XFS_EFI_RECOVERED) { - lip = xfs_trans_next_ail(mp, &cur); + lip = xfs_trans_ail_cursor_next(mp->m_ail, &cur); continue; } @@ -3090,7 +3089,7 @@ xlog_recover_process_efis( spin_lock(&mp->m_ail_lock); if (error) goto out; - lip = xfs_trans_next_ail(mp, &cur); + lip = xfs_trans_ail_cursor_next(mp->m_ail, &cur); } out: xfs_trans_ail_cursor_done(mp->m_ail, &cur); diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 1d89d50a5b9..ae2ae3e020d 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -971,7 +971,6 @@ void xfs_trans_cancel(xfs_trans_t *, int); int xfs_trans_ail_init(struct xfs_mount *); void xfs_trans_ail_destroy(struct xfs_mount *); void xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); -xfs_lsn_t xfs_trans_tail_ail(struct xfs_mount *); void xfs_trans_unlocked_item(struct xfs_mount *, xfs_log_item_t *); xfs_log_busy_slot_t *xfs_trans_add_busy(xfs_trans_t *tp, diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 7b8bfcf1d3d..286934d56ec 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -50,20 +50,20 @@ STATIC void xfs_ail_check(struct xfs_ail *, xfs_log_item_t *); * lsn of the last item in the AIL. */ xfs_lsn_t -xfs_trans_tail_ail( - xfs_mount_t *mp) +xfs_trans_ail_tail( + struct xfs_ail *ailp) { xfs_lsn_t lsn; xfs_log_item_t *lip; - spin_lock(&mp->m_ail_lock); - lip = xfs_ail_min(mp->m_ail); + spin_lock(&ailp->xa_mount->m_ail_lock); + lip = xfs_ail_min(ailp); if (lip == NULL) { lsn = (xfs_lsn_t)0; } else { lsn = lip->li_lsn; } - spin_unlock(&mp->m_ail_lock); + spin_unlock(&ailp->xa_mount->m_ail_lock); return lsn; } @@ -111,7 +111,7 @@ xfs_trans_push_ail( * We don't link the push cursor because it is embedded in the struct * xfs_ail and hence easily findable. */ -void +STATIC void xfs_trans_ail_cursor_init( struct xfs_ail *ailp, struct xfs_ail_cursor *cur) @@ -143,7 +143,7 @@ xfs_trans_ail_cursor_set( * If the cursor was invalidated (inidicated by a lip of 1), * restart the traversal. */ -STATIC struct xfs_log_item * +struct xfs_log_item * xfs_trans_ail_cursor_next( struct xfs_ail *ailp, struct xfs_ail_cursor *cur) @@ -156,30 +156,6 @@ xfs_trans_ail_cursor_next( return lip; } -/* - * Invalidate any cursor that is pointing to this item. This is - * called when an item is removed from the AIL. Any cursor pointing - * to this object is now invalid and the traversal needs to be - * terminated so it doesn't reference a freed object. We set the - * cursor item to a value of 1 so we can distinguish between an - * invalidation and the end of the list when getting the next item - * from the cursor. - */ -STATIC void -xfs_trans_ail_cursor_clear( - struct xfs_ail *ailp, - struct xfs_log_item *lip) -{ - struct xfs_ail_cursor *cur; - - /* need to search all cursors */ - for (cur = &ailp->xa_cursors; cur; cur = cur->next) { - if (cur->item == lip) - cur->item = (struct xfs_log_item *) - ((__psint_t)cur->item | 1); - } -} - /* * Now that the traversal is complete, we need to remove the cursor * from the list of traversing cursors. Avoid removing the embedded @@ -207,32 +183,56 @@ xfs_trans_ail_cursor_done( ASSERT(cur); } +/* + * Invalidate any cursor that is pointing to this item. This is + * called when an item is removed from the AIL. Any cursor pointing + * to this object is now invalid and the traversal needs to be + * terminated so it doesn't reference a freed object. We set the + * cursor item to a value of 1 so we can distinguish between an + * invalidation and the end of the list when getting the next item + * from the cursor. + */ +STATIC void +xfs_trans_ail_cursor_clear( + struct xfs_ail *ailp, + struct xfs_log_item *lip) +{ + struct xfs_ail_cursor *cur; + + /* need to search all cursors */ + for (cur = &ailp->xa_cursors; cur; cur = cur->next) { + if (cur->item == lip) + cur->item = (struct xfs_log_item *) + ((__psint_t)cur->item | 1); + } +} + /* * Return the item in the AIL with the current lsn. * Return the current tree generation number for use * in calls to xfs_trans_next_ail(). */ -STATIC xfs_log_item_t * -xfs_trans_first_push_ail( +xfs_log_item_t * +xfs_trans_ail_cursor_first( struct xfs_ail *ailp, struct xfs_ail_cursor *cur, xfs_lsn_t lsn) { xfs_log_item_t *lip; + xfs_trans_ail_cursor_init(ailp, cur); lip = xfs_ail_min(ailp); - xfs_trans_ail_cursor_set(ailp, cur, lip); if (lsn == 0) - return lip; + goto out; list_for_each_entry(lip, &ailp->xa_ail, li_ail) { - if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) { - xfs_trans_ail_cursor_set(ailp, cur, lip); - return lip; - } + if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) + break; } - - return NULL; + lip = NULL; +out: + xfs_trans_ail_cursor_set(ailp, cur, lip); + return lip; } /* @@ -254,7 +254,7 @@ xfsaild_push( spin_lock(&mp->m_ail_lock); xfs_trans_ail_cursor_init(ailp, cur); - lip = xfs_trans_first_push_ail(ailp, cur, *last_lsn); + lip = xfs_trans_ail_cursor_first(ailp, cur, *last_lsn); if (!lip || XFS_FORCED_SHUTDOWN(mp)) { /* * AIL is empty or our push has reached the end. @@ -551,39 +551,6 @@ xfs_trans_delete_ail( -/* - * Return the item in the AIL with the smallest lsn. - * Return the current tree generation number for use - * in calls to xfs_trans_next_ail(). - */ -xfs_log_item_t * -xfs_trans_first_ail( - struct xfs_mount *mp, - struct xfs_ail_cursor *cur) -{ - xfs_log_item_t *lip; - struct xfs_ail *ailp = mp->m_ail; - - lip = xfs_ail_min(ailp); - xfs_trans_ail_cursor_set(ailp, cur, lip); - - return lip; -} - -/* - * Grab the next item in the AIL from the cursor passed in. - */ -xfs_log_item_t * -xfs_trans_next_ail( - struct xfs_mount *mp, - struct xfs_ail_cursor *cur) -{ - struct xfs_ail *ailp = mp->m_ail; - - return xfs_trans_ail_cursor_next(ailp, cur); -} - - /* * The active item list (AIL) is a doubly linked list of log * items sorted by ascending lsn. The base of the list is diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index f114d388570..aa585350252 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -90,14 +90,15 @@ void xfs_trans_update_ail(struct xfs_mount *mp, void xfs_trans_delete_ail(struct xfs_mount *mp, struct xfs_log_item *lip) __releases(mp->m_ail_lock); -struct xfs_log_item *xfs_trans_first_ail(struct xfs_mount *mp, - struct xfs_ail_cursor *cur); -struct xfs_log_item *xfs_trans_next_ail(struct xfs_mount *mp, - struct xfs_ail_cursor *cur); -void xfs_trans_ail_cursor_init(struct xfs_ail *ailp, +xfs_lsn_t xfs_trans_ail_tail(struct xfs_ail *ailp); + +struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp, + struct xfs_ail_cursor *cur, + xfs_lsn_t lsn); +struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp, struct xfs_ail_cursor *cur); -void xfs_trans_ail_cursor_done(struct xfs_ail *ailp, +void xfs_trans_ail_cursor_done(struct xfs_ail *ailp, struct xfs_ail_cursor *cur); long xfsaild_push(struct xfs_ail *, xfs_lsn_t *); -- cgit v1.2.3-70-g09d2 From 7b2e2a31f5c23b5f028af8c895137b4c512cc1c8 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:39:12 +1100 Subject: [XFS] Allow 64 bit machines to avoid the AIL lock during flushes When copying lsn's from the log item to the inode or dquot flush lsn, we currently grab the AIL lock. We do this because the LSN is a 64 bit quantity and it needs to be read atomically. The lock is used to guarantee atomicity for 32 bit platforms. Make the LSN copying a small function, and make the function used conditional on BITS_PER_LONG so that 64 bit machines don't need to take the AIL lock in these places. SGI-PV: 988143 SGI-Modid: xfs-linux-melb:xfs-kern:32349a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/quota/xfs_dquot.c | 6 ++---- fs/xfs/xfs_inode.c | 17 +++++++---------- fs/xfs/xfs_trans_priv.h | 23 +++++++++++++++++++++++ 3 files changed, 32 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index 1e6bf392564..59c1081412e 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -1272,10 +1272,8 @@ xfs_qm_dqflush( dqp->dq_flags &= ~(XFS_DQ_DIRTY); mp = dqp->q_mount; - /* lsn is 64 bits */ - spin_lock(&mp->m_ail_lock); - dqp->q_logitem.qli_flush_lsn = dqp->q_logitem.qli_item.li_lsn; - spin_unlock(&mp->m_ail_lock); + xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, + &dqp->q_logitem.qli_item.li_lsn); /* * Attach an iodone routine so that we can remove this dquot from the diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 4eb629f0513..2951ffd8306 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2214,9 +2214,9 @@ xfs_ifree_cluster( iip = (xfs_inode_log_item_t *)lip; ASSERT(iip->ili_logged == 1); lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done; - spin_lock(&mp->m_ail_lock); - iip->ili_flush_lsn = iip->ili_item.li_lsn; - spin_unlock(&mp->m_ail_lock); + xfs_trans_ail_copy_lsn(mp->m_ail, + &iip->ili_flush_lsn, + &iip->ili_item.li_lsn); xfs_iflags_set(iip->ili_inode, XFS_ISTALE); pre_flushed++; } @@ -2237,9 +2237,8 @@ xfs_ifree_cluster( iip->ili_last_fields = iip->ili_format.ilf_fields; iip->ili_format.ilf_fields = 0; iip->ili_logged = 1; - spin_lock(&mp->m_ail_lock); - iip->ili_flush_lsn = iip->ili_item.li_lsn; - spin_unlock(&mp->m_ail_lock); + xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, + &iip->ili_item.li_lsn); xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*)) @@ -3476,10 +3475,8 @@ xfs_iflush_int( iip->ili_format.ilf_fields = 0; iip->ili_logged = 1; - ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */ - spin_lock(&mp->m_ail_lock); - iip->ili_flush_lsn = iip->ili_item.li_lsn; - spin_unlock(&mp->m_ail_lock); + xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, + &iip->ili_item.li_lsn); /* * Attach the function xfs_iflush_done to the inode's diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index aa585350252..708cff72d20 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -106,4 +106,27 @@ void xfsaild_wakeup(struct xfs_ail *, xfs_lsn_t); int xfsaild_start(struct xfs_ail *); void xfsaild_stop(struct xfs_ail *); +#if BITS_PER_LONG != 64 +static inline void +xfs_trans_ail_copy_lsn( + struct xfs_ail *ailp, + xfs_lsn_t *dst, + xfs_lsn_t *src) +{ + ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */ + spin_lock(&ailp->xa_mount->m_ail_lock); + *dst = *src; + spin_unlock(&ailp->xa_mount->m_ail_lock); +} +#else +static inline void +xfs_trans_ail_copy_lsn( + struct xfs_ail *ailp, + xfs_lsn_t *dst, + xfs_lsn_t *src) +{ + ASSERT(sizeof(xfs_lsn_t) == 8); + *dst = *src; +} +#endif #endif /* __XFS_TRANS_PRIV_H__ */ -- cgit v1.2.3-70-g09d2 From c7e8f268278a292d3823b4352182fa7755a71410 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:39:23 +1100 Subject: [XFS] Move the AIL lock into the struct xfs_ail Bring the ail lock inside the struct xfs_ail. This means the AIL can be entirely manipulated via the struct xfs_ail rather than needing both the struct xfs_mount and the struct xfs_ail. SGI-PV: 988143 SGI-Modid: xfs-linux-melb:xfs-kern:32350a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/quota/xfs_dquot.c | 4 ++-- fs/xfs/quota/xfs_dquot_item.c | 2 +- fs/xfs/xfs_buf_item.c | 4 ++-- fs/xfs/xfs_extfree_item.c | 12 +++++----- fs/xfs/xfs_inode.c | 4 ++-- fs/xfs/xfs_inode_item.c | 8 +++---- fs/xfs/xfs_log.c | 1 - fs/xfs/xfs_log_recover.c | 16 ++++++------- fs/xfs/xfs_mount.h | 1 - fs/xfs/xfs_trans.c | 4 ++-- fs/xfs/xfs_trans_ail.c | 56 ++++++++++++++++++++++--------------------- fs/xfs/xfs_trans_priv.h | 5 ++-- 12 files changed, 59 insertions(+), 58 deletions(-) (limited to 'fs') diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index 59c1081412e..0d7a62bffee 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -1333,7 +1333,7 @@ xfs_qm_dqflush_done( if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && qip->qli_item.li_lsn == qip->qli_flush_lsn) { - spin_lock(&dqp->q_mount->m_ail_lock); + spin_lock(&dqp->q_mount->m_ail->xa_lock); /* * xfs_trans_delete_ail() drops the AIL lock. */ @@ -1341,7 +1341,7 @@ xfs_qm_dqflush_done( xfs_trans_delete_ail(dqp->q_mount, (xfs_log_item_t*)qip); else - spin_unlock(&dqp->q_mount->m_ail_lock); + spin_unlock(&dqp->q_mount->m_ail->xa_lock); } /* diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c index 48f08109621..0e1fa517db0 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/quota/xfs_dquot_item.c @@ -555,7 +555,7 @@ xfs_qm_qoffend_logitem_committed( xfs_qoff_logitem_t *qfs; qfs = qfe->qql_start_lip; - spin_lock(&qfs->qql_item.li_mountp->m_ail_lock); + spin_lock(&qfs->qql_item.li_mountp->m_ail->xa_lock); /* * Delete the qoff-start logitem from the AIL. * xfs_trans_delete_ail() drops the AIL lock. diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 002fc2617c8..c557fd68252 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -408,7 +408,7 @@ xfs_buf_item_unpin( XFS_BUF_SET_FSPRIVATE(bp, NULL); XFS_BUF_CLR_IODONE_FUNC(bp); } else { - spin_lock(&mp->m_ail_lock); + spin_lock(&mp->m_ail->xa_lock); xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip); xfs_buf_item_relse(bp); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL); @@ -1138,7 +1138,7 @@ xfs_buf_iodone( * * Either way, AIL is useless if we're forcing a shutdown. */ - spin_lock(&mp->m_ail_lock); + spin_lock(&mp->m_ail->xa_lock); /* * xfs_trans_delete_ail() drops the AIL lock. */ diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index 8aa28f751b2..f1dcd80cf06 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -111,7 +111,7 @@ xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale) xfs_mount_t *mp; mp = efip->efi_item.li_mountp; - spin_lock(&mp->m_ail_lock); + spin_lock(&mp->m_ail->xa_lock); if (efip->efi_flags & XFS_EFI_CANCELED) { /* * xfs_trans_delete_ail() drops the AIL lock. @@ -120,7 +120,7 @@ xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale) xfs_efi_item_free(efip); } else { efip->efi_flags |= XFS_EFI_COMMITTED; - spin_unlock(&mp->m_ail_lock); + spin_unlock(&mp->m_ail->xa_lock); } } @@ -138,7 +138,7 @@ xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp) xfs_log_item_desc_t *lidp; mp = efip->efi_item.li_mountp; - spin_lock(&mp->m_ail_lock); + spin_lock(&mp->m_ail->xa_lock); if (efip->efi_flags & XFS_EFI_CANCELED) { /* * free the xaction descriptor pointing to this item @@ -153,7 +153,7 @@ xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp) xfs_efi_item_free(efip); } else { efip->efi_flags |= XFS_EFI_COMMITTED; - spin_unlock(&mp->m_ail_lock); + spin_unlock(&mp->m_ail->xa_lock); } } @@ -352,7 +352,7 @@ xfs_efi_release(xfs_efi_log_item_t *efip, ASSERT(efip->efi_next_extent > 0); ASSERT(efip->efi_flags & XFS_EFI_COMMITTED); - spin_lock(&mp->m_ail_lock); + spin_lock(&mp->m_ail->xa_lock); ASSERT(efip->efi_next_extent >= nextents); efip->efi_next_extent -= nextents; extents_left = efip->efi_next_extent; @@ -363,7 +363,7 @@ xfs_efi_release(xfs_efi_log_item_t *efip, xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip); xfs_efi_item_free(efip); } else { - spin_unlock(&mp->m_ail_lock); + spin_unlock(&mp->m_ail->xa_lock); } } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 2951ffd8306..6d82c23629e 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2715,11 +2715,11 @@ xfs_idestroy( ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || XFS_FORCED_SHUTDOWN(ip->i_mount)); if (lip->li_flags & XFS_LI_IN_AIL) { - spin_lock(&mp->m_ail_lock); + spin_lock(&mp->m_ail->xa_lock); if (lip->li_flags & XFS_LI_IN_AIL) xfs_trans_delete_ail(mp, lip); else - spin_unlock(&mp->m_ail_lock); + spin_unlock(&mp->m_ail->xa_lock); } xfs_inode_item_destroy(ip); ip->i_itemp = NULL; diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 97c7452e262..291d30aded6 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -991,7 +991,7 @@ xfs_iflush_done( */ if (iip->ili_logged && (iip->ili_item.li_lsn == iip->ili_flush_lsn)) { - spin_lock(&ip->i_mount->m_ail_lock); + spin_lock(&ip->i_mount->m_ail->xa_lock); if (iip->ili_item.li_lsn == iip->ili_flush_lsn) { /* * xfs_trans_delete_ail() drops the AIL lock. @@ -999,7 +999,7 @@ xfs_iflush_done( xfs_trans_delete_ail(ip->i_mount, (xfs_log_item_t*)iip); } else { - spin_unlock(&ip->i_mount->m_ail_lock); + spin_unlock(&ip->i_mount->m_ail->xa_lock); } } @@ -1038,14 +1038,14 @@ xfs_iflush_abort( mp = ip->i_mount; if (iip) { if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { - spin_lock(&mp->m_ail_lock); + spin_lock(&mp->m_ail->xa_lock); if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { /* * xfs_trans_delete_ail() drops the AIL lock. */ xfs_trans_delete_ail(mp, (xfs_log_item_t *)iip); } else - spin_unlock(&mp->m_ail_lock); + spin_unlock(&mp->m_ail->xa_lock); } iip->ili_logged = 0; /* diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 31fbb2eea09..a2f7422a749 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -567,7 +567,6 @@ xfs_log_mount( /* * Initialize the AIL now we have a log. */ - spin_lock_init(&mp->m_ail_lock); error = xfs_trans_ail_init(mp); if (error) { cmn_err(CE_WARN, "XFS: AIL initialisation failed: error %d", error); diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 45ea0d95013..a484febb9ec 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2681,7 +2681,7 @@ xlog_recover_do_efi_trans( efip->efi_next_extent = efi_formatp->efi_nextents; efip->efi_flags |= XFS_EFI_COMMITTED; - spin_lock(&mp->m_ail_lock); + spin_lock(&mp->m_ail->xa_lock); /* * xfs_trans_update_ail() drops the AIL lock. */ @@ -2727,7 +2727,7 @@ xlog_recover_do_efd_trans( * in the AIL. */ mp = log->l_mp; - spin_lock(&mp->m_ail_lock); + spin_lock(&mp->m_ail->xa_lock); lip = xfs_trans_ail_cursor_first(mp->m_ail, &cur, 0); while (lip != NULL) { if (lip->li_type == XFS_LI_EFI) { @@ -2739,14 +2739,14 @@ xlog_recover_do_efd_trans( */ xfs_trans_delete_ail(mp, lip); xfs_efi_item_free(efip); - spin_lock(&mp->m_ail_lock); + spin_lock(&mp->m_ail->xa_lock); break; } } lip = xfs_trans_ail_cursor_next(mp->m_ail, &cur); } xfs_trans_ail_cursor_done(mp->m_ail, &cur); - spin_unlock(&mp->m_ail_lock); + spin_unlock(&mp->m_ail->xa_lock); } /* @@ -3058,7 +3058,7 @@ xlog_recover_process_efis( struct xfs_ail_cursor cur; mp = log->l_mp; - spin_lock(&mp->m_ail_lock); + spin_lock(&mp->m_ail->xa_lock); lip = xfs_trans_ail_cursor_first(mp->m_ail, &cur, 0); while (lip != NULL) { @@ -3084,16 +3084,16 @@ xlog_recover_process_efis( continue; } - spin_unlock(&mp->m_ail_lock); + spin_unlock(&mp->m_ail->xa_lock); error = xlog_recover_process_efi(mp, efip); - spin_lock(&mp->m_ail_lock); + spin_lock(&mp->m_ail->xa_lock); if (error) goto out; lip = xfs_trans_ail_cursor_next(mp->m_ail, &cur); } out: xfs_trans_ail_cursor_done(mp->m_ail, &cur); - spin_unlock(&mp->m_ail_lock); + spin_unlock(&mp->m_ail->xa_lock); return error; } diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 287f90011ed..d3b75257602 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -228,7 +228,6 @@ extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int); typedef struct xfs_mount { struct super_block *m_super; xfs_tid_t m_tid; /* next unused tid for fs */ - spinlock_t m_ail_lock; /* fs AIL mutex */ struct xfs_ail *m_ail; /* fs active log item list */ xfs_sb_t m_sb; /* copy of fs superblock */ spinlock_t m_sb_lock; /* sb counter lock */ diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 4e1c22a23be..99ba0e2658b 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -1425,7 +1425,7 @@ xfs_trans_chunk_committed( * the test below. */ mp = lip->li_mountp; - spin_lock(&mp->m_ail_lock); + spin_lock(&mp->m_ail->xa_lock); if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) { /* * This will set the item's lsn to item_lsn @@ -1436,7 +1436,7 @@ xfs_trans_chunk_committed( */ xfs_trans_update_ail(mp, lip, item_lsn); } else { - spin_unlock(&mp->m_ail_lock); + spin_unlock(&mp->m_ail->xa_lock); } /* diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 286934d56ec..0cd47a797d3 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -1,5 +1,6 @@ /* * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. + * Copyright (c) 2008 Dave Chinner * All Rights Reserved. * * This program is free software; you can redistribute it and/or @@ -56,14 +57,14 @@ xfs_trans_ail_tail( xfs_lsn_t lsn; xfs_log_item_t *lip; - spin_lock(&ailp->xa_mount->m_ail_lock); + spin_lock(&ailp->xa_lock); lip = xfs_ail_min(ailp); if (lip == NULL) { lsn = (xfs_lsn_t)0; } else { lsn = lip->li_lsn; } - spin_unlock(&ailp->xa_mount->m_ail_lock); + spin_unlock(&ailp->xa_lock); return lsn; } @@ -252,7 +253,7 @@ xfsaild_push( xfs_mount_t *mp = ailp->xa_mount; struct xfs_ail_cursor *cur = &ailp->xa_cursors; - spin_lock(&mp->m_ail_lock); + spin_lock(&ailp->xa_lock); xfs_trans_ail_cursor_init(ailp, cur); lip = xfs_trans_ail_cursor_first(ailp, cur, *last_lsn); if (!lip || XFS_FORCED_SHUTDOWN(mp)) { @@ -260,7 +261,7 @@ xfsaild_push( * AIL is empty or our push has reached the end. */ xfs_trans_ail_cursor_done(ailp, cur); - spin_unlock(&mp->m_ail_lock); + spin_unlock(&ailp->xa_lock); last_pushed_lsn = 0; return tout; } @@ -295,7 +296,7 @@ xfsaild_push( * skip to the next item in the list. */ lock_result = IOP_TRYLOCK(lip); - spin_unlock(&mp->m_ail_lock); + spin_unlock(&ailp->xa_lock); switch (lock_result) { case XFS_ITEM_SUCCESS: XFS_STATS_INC(xs_push_ail_success); @@ -332,7 +333,7 @@ xfsaild_push( break; } - spin_lock(&mp->m_ail_lock); + spin_lock(&ailp->xa_lock); /* should we bother continuing? */ if (XFS_FORCED_SHUTDOWN(mp)) break; @@ -361,7 +362,7 @@ xfsaild_push( lsn = lip->li_lsn; } xfs_trans_ail_cursor_done(ailp, cur); - spin_unlock(&mp->m_ail_lock); + spin_unlock(&ailp->xa_lock); if (flush_log) { /* @@ -462,30 +463,31 @@ void xfs_trans_update_ail( xfs_mount_t *mp, xfs_log_item_t *lip, - xfs_lsn_t lsn) __releases(mp->m_ail_lock) + xfs_lsn_t lsn) __releases(ailp->xa_lock) { - xfs_log_item_t *dlip=NULL; + struct xfs_ail *ailp = mp->m_ail; + xfs_log_item_t *dlip = NULL; xfs_log_item_t *mlip; /* ptr to minimum lip */ - mlip = xfs_ail_min(mp->m_ail); + mlip = xfs_ail_min(ailp); if (lip->li_flags & XFS_LI_IN_AIL) { - dlip = xfs_ail_delete(mp->m_ail, lip); + dlip = xfs_ail_delete(ailp, lip); ASSERT(dlip == lip); - xfs_trans_ail_cursor_clear(mp->m_ail, dlip); + xfs_trans_ail_cursor_clear(ailp, dlip); } else { lip->li_flags |= XFS_LI_IN_AIL; } lip->li_lsn = lsn; - xfs_ail_insert(mp->m_ail, lip); + xfs_ail_insert(ailp, lip); if (mlip == dlip) { - mlip = xfs_ail_min(mp->m_ail); - spin_unlock(&mp->m_ail_lock); + mlip = xfs_ail_min(ailp); + spin_unlock(&ailp->xa_lock); xfs_log_move_tail(mp, mlip->li_lsn); } else { - spin_unlock(&mp->m_ail_lock); + spin_unlock(&ailp->xa_lock); } @@ -509,27 +511,28 @@ xfs_trans_update_ail( void xfs_trans_delete_ail( xfs_mount_t *mp, - xfs_log_item_t *lip) __releases(mp->m_ail_lock) + xfs_log_item_t *lip) __releases(ailp->xa_lock) { + struct xfs_ail *ailp = mp->m_ail; xfs_log_item_t *dlip; xfs_log_item_t *mlip; if (lip->li_flags & XFS_LI_IN_AIL) { - mlip = xfs_ail_min(mp->m_ail); - dlip = xfs_ail_delete(mp->m_ail, lip); + mlip = xfs_ail_min(ailp); + dlip = xfs_ail_delete(ailp, lip); ASSERT(dlip == lip); - xfs_trans_ail_cursor_clear(mp->m_ail, dlip); + xfs_trans_ail_cursor_clear(ailp, dlip); lip->li_flags &= ~XFS_LI_IN_AIL; lip->li_lsn = 0; if (mlip == dlip) { - mlip = xfs_ail_min(mp->m_ail); - spin_unlock(&mp->m_ail_lock); + mlip = xfs_ail_min(ailp); + spin_unlock(&ailp->xa_lock); xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0)); } else { - spin_unlock(&mp->m_ail_lock); + spin_unlock(&ailp->xa_lock); } } else { @@ -537,13 +540,11 @@ xfs_trans_delete_ail( * If the file system is not being shutdown, we are in * serious trouble if we get to this stage. */ - if (XFS_FORCED_SHUTDOWN(mp)) - spin_unlock(&mp->m_ail_lock); - else { + spin_unlock(&ailp->xa_lock); + if (!XFS_FORCED_SHUTDOWN(mp)) { xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, "%s: attempting to delete a log item that is not in the AIL", __func__); - spin_unlock(&mp->m_ail_lock); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); } } @@ -578,6 +579,7 @@ xfs_trans_ail_init( ailp->xa_mount = mp; INIT_LIST_HEAD(&ailp->xa_ail); + spin_lock_init(&ailp->xa_lock); error = xfsaild_start(ailp); if (error) goto out_free_ailp; diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 708cff72d20..6ca0a7a7e3d 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -79,6 +79,7 @@ struct xfs_ail { struct task_struct *xa_task; xfs_lsn_t xa_target; struct xfs_ail_cursor xa_cursors; + spinlock_t xa_lock; }; /* @@ -114,9 +115,9 @@ xfs_trans_ail_copy_lsn( xfs_lsn_t *src) { ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */ - spin_lock(&ailp->xa_mount->m_ail_lock); + spin_lock(&ailp->xa_lock); *dst = *src; - spin_unlock(&ailp->xa_mount->m_ail_lock); + spin_unlock(&ailp->xa_lock); } #else static inline void -- cgit v1.2.3-70-g09d2 From a9c21c1b9deaced836034e77fe25fe0b55c21f02 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:39:35 +1100 Subject: [XFS] Given the log a pointer to the AIL When we need to go from the log to the AIL, we have to go via the xfs_mount. Add a xfs_ail pointer to the log so we can go directly to the AIL associated with the log. SGI-PV: 988143 SGI-Modid: xfs-linux-melb:xfs-kern:32351a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/xfs_log.c | 3 ++- fs/xfs/xfs_log_priv.h | 1 + fs/xfs/xfs_log_recover.c | 42 +++++++++++++++++++++--------------------- 3 files changed, 24 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index a2f7422a749..405a41ab685 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -572,6 +572,7 @@ xfs_log_mount( cmn_err(CE_WARN, "XFS: AIL initialisation failed: error %d", error); goto error; } + mp->m_log->l_ailp = mp->m_ail; /* * skip log recovery on a norecovery mount. pretend it all @@ -908,7 +909,7 @@ xfs_log_need_covered(xfs_mount_t *mp) spin_lock(&log->l_icloglock); if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || (log->l_covered_state == XLOG_STATE_COVER_NEED2)) - && !xfs_trans_ail_tail(mp->m_ail) + && !xfs_trans_ail_tail(log->l_ailp) && xlog_iclogs_empty(log)) { if (log->l_covered_state == XLOG_STATE_COVER_NEED) log->l_covered_state = XLOG_STATE_COVER_DONE; diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index e7d8f84443f..de7ef6ca920 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -404,6 +404,7 @@ typedef struct xlog_in_core { typedef struct log { /* The following fields don't need locking */ struct xfs_mount *l_mp; /* mount point */ + struct xfs_ail *l_ailp; /* AIL log is working with */ struct xfs_buf *l_xbuf; /* extra buffer for log * wrapping */ struct xfs_buftarg *l_targ; /* buftarg of log */ diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index a484febb9ec..0bbde7b84fc 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2681,7 +2681,7 @@ xlog_recover_do_efi_trans( efip->efi_next_extent = efi_formatp->efi_nextents; efip->efi_flags |= XFS_EFI_COMMITTED; - spin_lock(&mp->m_ail->xa_lock); + spin_lock(&log->l_ailp->xa_lock); /* * xfs_trans_update_ail() drops the AIL lock. */ @@ -2710,6 +2710,7 @@ xlog_recover_do_efd_trans( xfs_log_item_t *lip; __uint64_t efi_id; struct xfs_ail_cursor cur; + struct xfs_ail *ailp; if (pass == XLOG_RECOVER_PASS1) { return; @@ -2727,8 +2728,9 @@ xlog_recover_do_efd_trans( * in the AIL. */ mp = log->l_mp; - spin_lock(&mp->m_ail->xa_lock); - lip = xfs_trans_ail_cursor_first(mp->m_ail, &cur, 0); + ailp = log->l_ailp; + spin_lock(&ailp->xa_lock); + lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); while (lip != NULL) { if (lip->li_type == XFS_LI_EFI) { efip = (xfs_efi_log_item_t *)lip; @@ -2739,14 +2741,14 @@ xlog_recover_do_efd_trans( */ xfs_trans_delete_ail(mp, lip); xfs_efi_item_free(efip); - spin_lock(&mp->m_ail->xa_lock); + spin_lock(&ailp->xa_lock); break; } } - lip = xfs_trans_ail_cursor_next(mp->m_ail, &cur); + lip = xfs_trans_ail_cursor_next(ailp, &cur); } - xfs_trans_ail_cursor_done(mp->m_ail, &cur); - spin_unlock(&mp->m_ail->xa_lock); + xfs_trans_ail_cursor_done(ailp, &cur); + spin_unlock(&ailp->xa_lock); } /* @@ -3053,14 +3055,13 @@ xlog_recover_process_efis( { xfs_log_item_t *lip; xfs_efi_log_item_t *efip; - xfs_mount_t *mp; int error = 0; struct xfs_ail_cursor cur; + struct xfs_ail *ailp; - mp = log->l_mp; - spin_lock(&mp->m_ail->xa_lock); - - lip = xfs_trans_ail_cursor_first(mp->m_ail, &cur, 0); + ailp = log->l_ailp; + spin_lock(&ailp->xa_lock); + lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); while (lip != NULL) { /* * We're done when we see something other than an EFI. @@ -3068,8 +3069,7 @@ xlog_recover_process_efis( */ if (lip->li_type != XFS_LI_EFI) { #ifdef DEBUG - for (; lip; - lip = xfs_trans_ail_cursor_next(mp->m_ail, &cur)) + for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur)) ASSERT(lip->li_type != XFS_LI_EFI); #endif break; @@ -3080,20 +3080,20 @@ xlog_recover_process_efis( */ efip = (xfs_efi_log_item_t *)lip; if (efip->efi_flags & XFS_EFI_RECOVERED) { - lip = xfs_trans_ail_cursor_next(mp->m_ail, &cur); + lip = xfs_trans_ail_cursor_next(ailp, &cur); continue; } - spin_unlock(&mp->m_ail->xa_lock); - error = xlog_recover_process_efi(mp, efip); - spin_lock(&mp->m_ail->xa_lock); + spin_unlock(&ailp->xa_lock); + error = xlog_recover_process_efi(log->l_mp, efip); + spin_lock(&ailp->xa_lock); if (error) goto out; - lip = xfs_trans_ail_cursor_next(mp->m_ail, &cur); + lip = xfs_trans_ail_cursor_next(ailp, &cur); } out: - xfs_trans_ail_cursor_done(mp->m_ail, &cur); - spin_unlock(&mp->m_ail->xa_lock); + xfs_trans_ail_cursor_done(ailp, &cur); + spin_unlock(&ailp->xa_lock); return error; } -- cgit v1.2.3-70-g09d2 From fc1829f34d30899701dfd5890030d39e13e1f47d Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:39:46 +1100 Subject: [XFS] Add ail pointer into log items Add an xfs_ail pointer to log items so that the log items can reference the AIL directly during callbacks without needed a struct xfs_mount. SGI-PV: 988143 SGI-Modid: xfs-linux-melb:xfs-kern:32352a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/xfs_buf_item.c | 5 ++++- fs/xfs/xfs_extfree_item.c | 28 ++++++++++++++++++---------- fs/xfs/xfs_inode_item.c | 1 + fs/xfs/xfs_trans.c | 9 ++++++--- fs/xfs/xfs_trans.h | 1 + fs/xfs/xfs_trans_item.c | 10 ++++++++++ 6 files changed, 40 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index c557fd68252..793e53c01dc 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -731,6 +731,7 @@ xfs_buf_item_init( bip->bli_item.li_type = XFS_LI_BUF; bip->bli_item.li_ops = &xfs_buf_item_ops; bip->bli_item.li_mountp = mp; + bip->bli_item.li_ailp = mp->m_ail; bip->bli_buf = bp; xfs_buf_hold(bp); bip->bli_format.blf_type = XFS_LI_BUF; @@ -1123,11 +1124,13 @@ xfs_buf_iodone( xfs_buf_log_item_t *bip) { struct xfs_mount *mp; + struct xfs_ail *ailp; ASSERT(bip->bli_buf == bp); xfs_buf_rele(bp); mp = bip->bli_item.li_mountp; + ailp = bip->bli_item.li_ailp; /* * If we are forcibly shutting down, this may well be @@ -1138,7 +1141,7 @@ xfs_buf_iodone( * * Either way, AIL is useless if we're forcing a shutdown. */ - spin_lock(&mp->m_ail->xa_lock); + spin_lock(&ailp->xa_lock); /* * xfs_trans_delete_ail() drops the AIL lock. */ diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index f1dcd80cf06..dab57374e1f 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -108,10 +108,12 @@ xfs_efi_item_pin(xfs_efi_log_item_t *efip) STATIC void xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale) { - xfs_mount_t *mp; + xfs_mount_t *mp; + struct xfs_ail *ailp; mp = efip->efi_item.li_mountp; - spin_lock(&mp->m_ail->xa_lock); + ailp = efip->efi_item.li_ailp; + spin_lock(&ailp->xa_lock); if (efip->efi_flags & XFS_EFI_CANCELED) { /* * xfs_trans_delete_ail() drops the AIL lock. @@ -120,7 +122,7 @@ xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale) xfs_efi_item_free(efip); } else { efip->efi_flags |= XFS_EFI_COMMITTED; - spin_unlock(&mp->m_ail->xa_lock); + spin_unlock(&ailp->xa_lock); } } @@ -134,11 +136,13 @@ xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale) STATIC void xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp) { - xfs_mount_t *mp; + xfs_mount_t *mp; + struct xfs_ail *ailp; xfs_log_item_desc_t *lidp; mp = efip->efi_item.li_mountp; - spin_lock(&mp->m_ail->xa_lock); + ailp = efip->efi_item.li_ailp; + spin_lock(&ailp->xa_lock); if (efip->efi_flags & XFS_EFI_CANCELED) { /* * free the xaction descriptor pointing to this item @@ -153,7 +157,7 @@ xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp) xfs_efi_item_free(efip); } else { efip->efi_flags |= XFS_EFI_COMMITTED; - spin_unlock(&mp->m_ail->xa_lock); + spin_unlock(&ailp->xa_lock); } } @@ -268,6 +272,7 @@ xfs_efi_init(xfs_mount_t *mp, efip->efi_item.li_type = XFS_LI_EFI; efip->efi_item.li_ops = &xfs_efi_item_ops; efip->efi_item.li_mountp = mp; + efip->efi_item.li_ailp = mp->m_ail; efip->efi_format.efi_nextents = nextents; efip->efi_format.efi_id = (__psint_t)(void*)efip; @@ -345,14 +350,16 @@ void xfs_efi_release(xfs_efi_log_item_t *efip, uint nextents) { - xfs_mount_t *mp; - int extents_left; + xfs_mount_t *mp; + struct xfs_ail *ailp; + int extents_left; mp = efip->efi_item.li_mountp; + ailp = efip->efi_item.li_ailp; ASSERT(efip->efi_next_extent > 0); ASSERT(efip->efi_flags & XFS_EFI_COMMITTED); - spin_lock(&mp->m_ail->xa_lock); + spin_lock(&ailp->xa_lock); ASSERT(efip->efi_next_extent >= nextents); efip->efi_next_extent -= nextents; extents_left = efip->efi_next_extent; @@ -363,7 +370,7 @@ xfs_efi_release(xfs_efi_log_item_t *efip, xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip); xfs_efi_item_free(efip); } else { - spin_unlock(&mp->m_ail->xa_lock); + spin_unlock(&ailp->xa_lock); } } @@ -565,6 +572,7 @@ xfs_efd_init(xfs_mount_t *mp, efdp->efd_item.li_type = XFS_LI_EFD; efdp->efd_item.li_ops = &xfs_efd_item_ops; efdp->efd_item.li_mountp = mp; + efdp->efd_item.li_ailp = mp->m_ail; efdp->efd_efip = efip; efdp->efd_format.efd_nextents = nextents; efdp->efd_format.efd_efi_id = efip->efi_format.efi_id; diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 291d30aded6..47594f4b51d 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -932,6 +932,7 @@ xfs_inode_item_init( iip->ili_item.li_type = XFS_LI_INODE; iip->ili_item.li_ops = &xfs_inode_item_ops; iip->ili_item.li_mountp = mp; + iip->ili_item.li_ailp = mp->m_ail; iip->ili_inode = ip; /* diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 99ba0e2658b..5163e1216c8 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -1383,11 +1383,13 @@ xfs_trans_chunk_committed( xfs_log_item_desc_t *lidp; xfs_log_item_t *lip; xfs_lsn_t item_lsn; - struct xfs_mount *mp; int i; lidp = licp->lic_descs; for (i = 0; i < licp->lic_unused; i++, lidp++) { + struct xfs_mount *mp; + struct xfs_ail *ailp; + if (xfs_lic_isfree(licp, i)) { continue; } @@ -1425,7 +1427,8 @@ xfs_trans_chunk_committed( * the test below. */ mp = lip->li_mountp; - spin_lock(&mp->m_ail->xa_lock); + ailp = lip->li_ailp; + spin_lock(&ailp->xa_lock); if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) { /* * This will set the item's lsn to item_lsn @@ -1436,7 +1439,7 @@ xfs_trans_chunk_committed( */ xfs_trans_update_ail(mp, lip, item_lsn); } else { - spin_unlock(&mp->m_ail->xa_lock); + spin_unlock(&ailp->xa_lock); } /* diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index ae2ae3e020d..0df51547757 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -768,6 +768,7 @@ typedef struct xfs_log_item { xfs_lsn_t li_lsn; /* last on-disk lsn */ struct xfs_log_item_desc *li_desc; /* ptr to current desc*/ struct xfs_mount *li_mountp; /* ptr to fs mount */ + struct xfs_ail *li_ailp; /* ptr to AIL */ uint li_type; /* item type */ uint li_flags; /* misc flags */ struct xfs_log_item *li_bio_list; /* buffer item list */ diff --git a/fs/xfs/xfs_trans_item.c b/fs/xfs/xfs_trans_item.c index 3c666e8317f..e110bf57d7f 100644 --- a/fs/xfs/xfs_trans_item.c +++ b/fs/xfs/xfs_trans_item.c @@ -22,6 +22,14 @@ #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_trans_priv.h" +/* XXX: from here down needed until struct xfs_trans has it's own ailp */ +#include "xfs_bit.h" +#include "xfs_buf_item.h" +#include "xfs_sb.h" +#include "xfs_ag.h" +#include "xfs_dir2.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" STATIC int xfs_trans_unlock_chunk(xfs_log_item_chunk_t *, int, int, xfs_lsn_t); @@ -79,6 +87,7 @@ xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip) lidp->lid_size = 0; lip->li_desc = lidp; lip->li_mountp = tp->t_mountp; + lip->li_ailp = tp->t_mountp->m_ail; return lidp; } @@ -120,6 +129,7 @@ xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip) lidp->lid_size = 0; lip->li_desc = lidp; lip->li_mountp = tp->t_mountp; + lip->li_ailp = tp->t_mountp->m_ail; return lidp; } -- cgit v1.2.3-70-g09d2 From 783a2f656f9674c31d4019708a94af93fa1d1c22 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:39:58 +1100 Subject: [XFS] Finish removing the mount pointer from the AIL API Change all the remaining AIL API functions that are passed struct xfs_mount pointers to pass pointers directly to the struct xfs_ail being used. With this conversion, all external access to the AIL is via the struct xfs_ail. Hence the operation and referencing of the AIL is almost entirely independent of the xfs_mount that is using it - it is now much more tightly tied to the log and the items it is tracking in the log than it is tied to the xfs_mount. SGI-PV: 988143 SGI-Modid: xfs-linux-melb:xfs-kern:32353a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/quota/xfs_dquot.c | 15 +++++++-------- fs/xfs/quota/xfs_dquot_item.c | 8 +++++--- fs/xfs/xfs_buf_item.c | 24 +++++++++--------------- fs/xfs/xfs_extfree_item.c | 35 ++++++++++------------------------- fs/xfs/xfs_iget.c | 4 +++- fs/xfs/xfs_inode.c | 8 ++++---- fs/xfs/xfs_inode_item.c | 29 ++++++++++++----------------- fs/xfs/xfs_log.c | 2 +- fs/xfs/xfs_log_recover.c | 13 +++++-------- fs/xfs/xfs_trans.c | 6 ++---- fs/xfs/xfs_trans.h | 3 --- fs/xfs/xfs_trans_ail.c | 41 +++++++++++++++++++++-------------------- fs/xfs/xfs_trans_buf.c | 7 +++---- fs/xfs/xfs_trans_priv.h | 15 +++++++++------ 14 files changed, 91 insertions(+), 119 deletions(-) (limited to 'fs') diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index 0d7a62bffee..591ca6602bf 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -1319,8 +1319,10 @@ xfs_qm_dqflush_done( xfs_dq_logitem_t *qip) { xfs_dquot_t *dqp; + struct xfs_ail *ailp; dqp = qip->qli_dquot; + ailp = qip->qli_item.li_ailp; /* * We only want to pull the item from the AIL if its @@ -1333,15 +1335,12 @@ xfs_qm_dqflush_done( if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && qip->qli_item.li_lsn == qip->qli_flush_lsn) { - spin_lock(&dqp->q_mount->m_ail->xa_lock); - /* - * xfs_trans_delete_ail() drops the AIL lock. - */ + /* xfs_trans_ail_delete() drops the AIL lock. */ + spin_lock(&ailp->xa_lock); if (qip->qli_item.li_lsn == qip->qli_flush_lsn) - xfs_trans_delete_ail(dqp->q_mount, - (xfs_log_item_t*)qip); + xfs_trans_ail_delete(ailp, (xfs_log_item_t*)qip); else - spin_unlock(&dqp->q_mount->m_ail->xa_lock); + spin_unlock(&ailp->xa_lock); } /* @@ -1371,7 +1370,7 @@ xfs_dqunlock( mutex_unlock(&(dqp->q_qlock)); if (dqp->q_logitem.qli_dquot == dqp) { /* Once was dqp->q_mount, but might just have been cleared */ - xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_mountp, + xfs_trans_unlocked_item(dqp->q_logitem.qli_item.li_ailp, (xfs_log_item_t*)&(dqp->q_logitem)); } } diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c index 0e1fa517db0..1728f6a7c4f 100644 --- a/fs/xfs/quota/xfs_dquot_item.c +++ b/fs/xfs/quota/xfs_dquot_item.c @@ -553,14 +553,16 @@ xfs_qm_qoffend_logitem_committed( xfs_lsn_t lsn) { xfs_qoff_logitem_t *qfs; + struct xfs_ail *ailp; qfs = qfe->qql_start_lip; - spin_lock(&qfs->qql_item.li_mountp->m_ail->xa_lock); + ailp = qfs->qql_item.li_ailp; + spin_lock(&ailp->xa_lock); /* * Delete the qoff-start logitem from the AIL. - * xfs_trans_delete_ail() drops the AIL lock. + * xfs_trans_ail_delete() drops the AIL lock. */ - xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs); + xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs); kmem_free(qfs); kmem_free(qfe); return (xfs_lsn_t)-1; diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 793e53c01dc..d245d04e10c 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -375,7 +375,7 @@ xfs_buf_item_unpin( xfs_buf_log_item_t *bip, int stale) { - xfs_mount_t *mp; + struct xfs_ail *ailp; xfs_buf_t *bp; int freed; @@ -387,7 +387,7 @@ xfs_buf_item_unpin( xfs_buftrace("XFS_UNPIN", bp); freed = atomic_dec_and_test(&bip->bli_refcount); - mp = bip->bli_item.li_mountp; + ailp = bip->bli_item.li_ailp; xfs_bunpin(bp); if (freed && stale) { ASSERT(bip->bli_flags & XFS_BLI_STALE); @@ -399,17 +399,17 @@ xfs_buf_item_unpin( xfs_buftrace("XFS_UNPIN STALE", bp); /* * If we get called here because of an IO error, we may - * or may not have the item on the AIL. xfs_trans_delete_ail() + * or may not have the item on the AIL. xfs_trans_ail_delete() * will take care of that situation. - * xfs_trans_delete_ail() drops the AIL lock. + * xfs_trans_ail_delete() drops the AIL lock. */ if (bip->bli_flags & XFS_BLI_STALE_INODE) { xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip); XFS_BUF_SET_FSPRIVATE(bp, NULL); XFS_BUF_CLR_IODONE_FUNC(bp); } else { - spin_lock(&mp->m_ail->xa_lock); - xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip); + spin_lock(&ailp->xa_lock); + xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip); xfs_buf_item_relse(bp); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL); } @@ -1123,29 +1123,23 @@ xfs_buf_iodone( xfs_buf_t *bp, xfs_buf_log_item_t *bip) { - struct xfs_mount *mp; - struct xfs_ail *ailp; + struct xfs_ail *ailp = bip->bli_item.li_ailp; ASSERT(bip->bli_buf == bp); xfs_buf_rele(bp); - mp = bip->bli_item.li_mountp; - ailp = bip->bli_item.li_ailp; /* * If we are forcibly shutting down, this may well be * off the AIL already. That's because we simulate the * log-committed callbacks to unpin these buffers. Or we may never * have put this item on AIL because of the transaction was - * aborted forcibly. xfs_trans_delete_ail() takes care of these. + * aborted forcibly. xfs_trans_ail_delete() takes care of these. * * Either way, AIL is useless if we're forcing a shutdown. */ spin_lock(&ailp->xa_lock); - /* - * xfs_trans_delete_ail() drops the AIL lock. - */ - xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip); + xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip); xfs_buf_item_free(bip); } diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index dab57374e1f..05a4bdd4be3 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -108,17 +108,12 @@ xfs_efi_item_pin(xfs_efi_log_item_t *efip) STATIC void xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale) { - xfs_mount_t *mp; - struct xfs_ail *ailp; + struct xfs_ail *ailp = efip->efi_item.li_ailp; - mp = efip->efi_item.li_mountp; - ailp = efip->efi_item.li_ailp; spin_lock(&ailp->xa_lock); if (efip->efi_flags & XFS_EFI_CANCELED) { - /* - * xfs_trans_delete_ail() drops the AIL lock. - */ - xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip); + /* xfs_trans_ail_delete() drops the AIL lock. */ + xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip); xfs_efi_item_free(efip); } else { efip->efi_flags |= XFS_EFI_COMMITTED; @@ -136,12 +131,9 @@ xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale) STATIC void xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp) { - xfs_mount_t *mp; - struct xfs_ail *ailp; + struct xfs_ail *ailp = efip->efi_item.li_ailp; xfs_log_item_desc_t *lidp; - mp = efip->efi_item.li_mountp; - ailp = efip->efi_item.li_ailp; spin_lock(&ailp->xa_lock); if (efip->efi_flags & XFS_EFI_CANCELED) { /* @@ -149,11 +141,9 @@ xfs_efi_item_unpin_remove(xfs_efi_log_item_t *efip, xfs_trans_t *tp) */ lidp = xfs_trans_find_item(tp, (xfs_log_item_t *) efip); xfs_trans_free_item(tp, lidp); - /* - * pull the item off the AIL. - * xfs_trans_delete_ail() drops the AIL lock. - */ - xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip); + + /* xfs_trans_ail_delete() drops the AIL lock. */ + xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip); xfs_efi_item_free(efip); } else { efip->efi_flags |= XFS_EFI_COMMITTED; @@ -350,12 +340,9 @@ void xfs_efi_release(xfs_efi_log_item_t *efip, uint nextents) { - xfs_mount_t *mp; - struct xfs_ail *ailp; + struct xfs_ail *ailp = efip->efi_item.li_ailp; int extents_left; - mp = efip->efi_item.li_mountp; - ailp = efip->efi_item.li_ailp; ASSERT(efip->efi_next_extent > 0); ASSERT(efip->efi_flags & XFS_EFI_COMMITTED); @@ -364,10 +351,8 @@ xfs_efi_release(xfs_efi_log_item_t *efip, efip->efi_next_extent -= nextents; extents_left = efip->efi_next_extent; if (extents_left == 0) { - /* - * xfs_trans_delete_ail() drops the AIL lock. - */ - xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip); + /* xfs_trans_ail_delete() drops the AIL lock. */ + xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip); xfs_efi_item_free(efip); } else { spin_unlock(&ailp->xa_lock); diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 800133805ca..a1f209b0596 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -38,6 +38,8 @@ #include "xfs_ialloc.h" #include "xfs_quota.h" #include "xfs_utils.h" +#include "xfs_trans_priv.h" +#include "xfs_inode_item.h" /* * Check the validity of the inode we just found it the cache @@ -616,7 +618,7 @@ xfs_iunlock( * it is in the AIL and anyone is waiting on it. Don't do * this if the caller has asked us not to. */ - xfs_trans_unlocked_item(ip->i_mount, + xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp, (xfs_log_item_t*)(ip->i_itemp)); } xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 6d82c23629e..c83f6998f95 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2709,17 +2709,17 @@ xfs_idestroy( * inode still in the AIL. If it is there, we should remove * it to prevent a use-after-free from occurring. */ - xfs_mount_t *mp = ip->i_mount; xfs_log_item_t *lip = &ip->i_itemp->ili_item; + struct xfs_ail *ailp = lip->li_ailp; ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || XFS_FORCED_SHUTDOWN(ip->i_mount)); if (lip->li_flags & XFS_LI_IN_AIL) { - spin_lock(&mp->m_ail->xa_lock); + spin_lock(&ailp->xa_lock); if (lip->li_flags & XFS_LI_IN_AIL) - xfs_trans_delete_ail(mp, lip); + xfs_trans_ail_delete(ailp, lip); else - spin_unlock(&mp->m_ail->xa_lock); + spin_unlock(&ailp->xa_lock); } xfs_inode_item_destroy(ip); ip->i_itemp = NULL; diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 47594f4b51d..aa9bf05060c 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -977,9 +977,8 @@ xfs_iflush_done( xfs_buf_t *bp, xfs_inode_log_item_t *iip) { - xfs_inode_t *ip; - - ip = iip->ili_inode; + xfs_inode_t *ip = iip->ili_inode; + struct xfs_ail *ailp = iip->ili_item.li_ailp; /* * We only want to pull the item from the AIL if it is @@ -992,15 +991,12 @@ xfs_iflush_done( */ if (iip->ili_logged && (iip->ili_item.li_lsn == iip->ili_flush_lsn)) { - spin_lock(&ip->i_mount->m_ail->xa_lock); + spin_lock(&ailp->xa_lock); if (iip->ili_item.li_lsn == iip->ili_flush_lsn) { - /* - * xfs_trans_delete_ail() drops the AIL lock. - */ - xfs_trans_delete_ail(ip->i_mount, - (xfs_log_item_t*)iip); + /* xfs_trans_ail_delete() drops the AIL lock. */ + xfs_trans_ail_delete(ailp, (xfs_log_item_t*)iip); } else { - spin_unlock(&ip->i_mount->m_ail->xa_lock); + spin_unlock(&ailp->xa_lock); } } @@ -1032,21 +1028,20 @@ void xfs_iflush_abort( xfs_inode_t *ip) { - xfs_inode_log_item_t *iip; + xfs_inode_log_item_t *iip = ip->i_itemp; xfs_mount_t *mp; iip = ip->i_itemp; mp = ip->i_mount; if (iip) { + struct xfs_ail *ailp = iip->ili_item.li_ailp; if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { - spin_lock(&mp->m_ail->xa_lock); + spin_lock(&ailp->xa_lock); if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { - /* - * xfs_trans_delete_ail() drops the AIL lock. - */ - xfs_trans_delete_ail(mp, (xfs_log_item_t *)iip); + /* xfs_trans_ail_delete() drops the AIL lock. */ + xfs_trans_ail_delete(ailp, (xfs_log_item_t *)iip); } else - spin_unlock(&mp->m_ail->xa_lock); + spin_unlock(&ailp->xa_lock); } iip->ili_logged = 0; /* diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 405a41ab685..51840170b16 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1413,7 +1413,7 @@ xlog_grant_push_ail(xfs_mount_t *mp, */ if (threshold_lsn && !XLOG_FORCED_SHUTDOWN(log)) - xfs_trans_push_ail(mp, threshold_lsn); + xfs_trans_ail_push(log->l_ailp, threshold_lsn); } /* xlog_grant_push_ail */ diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 0bbde7b84fc..cff901efc24 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2683,9 +2683,9 @@ xlog_recover_do_efi_trans( spin_lock(&log->l_ailp->xa_lock); /* - * xfs_trans_update_ail() drops the AIL lock. + * xfs_trans_ail_update() drops the AIL lock. */ - xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn); + xfs_trans_ail_update(log->l_ailp, (xfs_log_item_t *)efip, lsn); return 0; } @@ -2704,13 +2704,12 @@ xlog_recover_do_efd_trans( xlog_recover_item_t *item, int pass) { - xfs_mount_t *mp; xfs_efd_log_format_t *efd_formatp; xfs_efi_log_item_t *efip = NULL; xfs_log_item_t *lip; __uint64_t efi_id; struct xfs_ail_cursor cur; - struct xfs_ail *ailp; + struct xfs_ail *ailp = log->l_ailp; if (pass == XLOG_RECOVER_PASS1) { return; @@ -2727,8 +2726,6 @@ xlog_recover_do_efd_trans( * Search for the efi with the id in the efd format structure * in the AIL. */ - mp = log->l_mp; - ailp = log->l_ailp; spin_lock(&ailp->xa_lock); lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); while (lip != NULL) { @@ -2736,10 +2733,10 @@ xlog_recover_do_efd_trans( efip = (xfs_efi_log_item_t *)lip; if (efip->efi_format.efi_id == efi_id) { /* - * xfs_trans_delete_ail() drops the + * xfs_trans_ail_delete() drops the * AIL lock. */ - xfs_trans_delete_ail(mp, lip); + xfs_trans_ail_delete(ailp, lip); xfs_efi_item_free(efip); spin_lock(&ailp->xa_lock); break; diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 5163e1216c8..ad137efc870 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -1387,7 +1387,6 @@ xfs_trans_chunk_committed( lidp = licp->lic_descs; for (i = 0; i < licp->lic_unused; i++, lidp++) { - struct xfs_mount *mp; struct xfs_ail *ailp; if (xfs_lic_isfree(licp, i)) { @@ -1426,7 +1425,6 @@ xfs_trans_chunk_committed( * This would cause the earlier transaction to fail * the test below. */ - mp = lip->li_mountp; ailp = lip->li_ailp; spin_lock(&ailp->xa_lock); if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) { @@ -1435,9 +1433,9 @@ xfs_trans_chunk_committed( * and update the position of the item in * the AIL. * - * xfs_trans_update_ail() drops the AIL lock. + * xfs_trans_ail_update() drops the AIL lock. */ - xfs_trans_update_ail(mp, lip, item_lsn); + xfs_trans_ail_update(ailp, lip, item_lsn); } else { spin_unlock(&ailp->xa_lock); } diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 0df51547757..d6fe4a88d79 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -971,9 +971,6 @@ int _xfs_trans_commit(xfs_trans_t *, void xfs_trans_cancel(xfs_trans_t *, int); int xfs_trans_ail_init(struct xfs_mount *); void xfs_trans_ail_destroy(struct xfs_mount *); -void xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); -void xfs_trans_unlocked_item(struct xfs_mount *, - xfs_log_item_t *); xfs_log_busy_slot_t *xfs_trans_add_busy(xfs_trans_t *tp, xfs_agnumber_t ag, xfs_extlen_t idx); diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 0cd47a797d3..67ee4663336 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -86,16 +86,16 @@ xfs_trans_ail_tail( * any of the objects, so the lock is not needed. */ void -xfs_trans_push_ail( - xfs_mount_t *mp, - xfs_lsn_t threshold_lsn) +xfs_trans_ail_push( + struct xfs_ail *ailp, + xfs_lsn_t threshold_lsn) { - xfs_log_item_t *lip; + xfs_log_item_t *lip; - lip = xfs_ail_min(mp->m_ail); - if (lip && !XFS_FORCED_SHUTDOWN(mp)) { - if (XFS_LSN_CMP(threshold_lsn, mp->m_ail->xa_target) > 0) - xfsaild_wakeup(mp->m_ail, threshold_lsn); + lip = xfs_ail_min(ailp); + if (lip && !XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { + if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) + xfsaild_wakeup(ailp, threshold_lsn); } } @@ -412,7 +412,7 @@ xfsaild_push( */ void xfs_trans_unlocked_item( - xfs_mount_t *mp, + struct xfs_ail *ailp, xfs_log_item_t *lip) { xfs_log_item_t *min_lip; @@ -424,7 +424,7 @@ xfs_trans_unlocked_item( * over some potentially valid data. */ if (!(lip->li_flags & XFS_LI_IN_AIL) || - XFS_FORCED_SHUTDOWN(mp)) { + XFS_FORCED_SHUTDOWN(ailp->xa_mount)) { return; } @@ -440,10 +440,10 @@ xfs_trans_unlocked_item( * the call to xfs_log_move_tail() doesn't do anything if there's * not enough free space to wake people up so we're safe calling it. */ - min_lip = xfs_ail_min(mp->m_ail); + min_lip = xfs_ail_min(ailp); if (min_lip == lip) - xfs_log_move_tail(mp, 1); + xfs_log_move_tail(ailp->xa_mount, 1); } /* xfs_trans_unlocked_item */ @@ -460,12 +460,11 @@ xfs_trans_unlocked_item( * is dropped before returning. */ void -xfs_trans_update_ail( - xfs_mount_t *mp, +xfs_trans_ail_update( + struct xfs_ail *ailp, xfs_log_item_t *lip, xfs_lsn_t lsn) __releases(ailp->xa_lock) { - struct xfs_ail *ailp = mp->m_ail; xfs_log_item_t *dlip = NULL; xfs_log_item_t *mlip; /* ptr to minimum lip */ @@ -485,7 +484,7 @@ xfs_trans_update_ail( if (mlip == dlip) { mlip = xfs_ail_min(ailp); spin_unlock(&ailp->xa_lock); - xfs_log_move_tail(mp, mlip->li_lsn); + xfs_log_move_tail(ailp->xa_mount, mlip->li_lsn); } else { spin_unlock(&ailp->xa_lock); } @@ -509,11 +508,10 @@ xfs_trans_update_ail( * is dropped before returning. */ void -xfs_trans_delete_ail( - xfs_mount_t *mp, +xfs_trans_ail_delete( + struct xfs_ail *ailp, xfs_log_item_t *lip) __releases(ailp->xa_lock) { - struct xfs_ail *ailp = mp->m_ail; xfs_log_item_t *dlip; xfs_log_item_t *mlip; @@ -530,7 +528,8 @@ xfs_trans_delete_ail( if (mlip == dlip) { mlip = xfs_ail_min(ailp); spin_unlock(&ailp->xa_lock); - xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0)); + xfs_log_move_tail(ailp->xa_mount, + (mlip ? mlip->li_lsn : 0)); } else { spin_unlock(&ailp->xa_lock); } @@ -540,6 +539,8 @@ xfs_trans_delete_ail( * If the file system is not being shutdown, we are in * serious trouble if we get to this stage. */ + struct xfs_mount *mp = ailp->xa_mount; + spin_unlock(&ailp->xa_lock); if (!XFS_FORCED_SHUTDOWN(mp)) { xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 4e855b5ced6..8ee2f8c8b0a 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -527,9 +527,8 @@ xfs_trans_brelse(xfs_trans_t *tp, lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); if (lip->li_type == XFS_LI_BUF) { bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*); - xfs_trans_unlocked_item( - bip->bli_item.li_mountp, - lip); + xfs_trans_unlocked_item(bip->bli_item.li_ailp, + lip); } } xfs_buf_relse(bp); @@ -626,7 +625,7 @@ xfs_trans_brelse(xfs_trans_t *tp, * tell the AIL that the buffer is being unlocked. */ if (bip != NULL) { - xfs_trans_unlocked_item(bip->bli_item.li_mountp, + xfs_trans_unlocked_item(bip->bli_item.li_ailp, (xfs_log_item_t*)bip); } diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 6ca0a7a7e3d..73e2ad39743 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -85,12 +85,15 @@ struct xfs_ail { /* * From xfs_trans_ail.c */ -void xfs_trans_update_ail(struct xfs_mount *mp, - struct xfs_log_item *lip, xfs_lsn_t lsn) - __releases(mp->m_ail_lock); -void xfs_trans_delete_ail(struct xfs_mount *mp, - struct xfs_log_item *lip) - __releases(mp->m_ail_lock); +void xfs_trans_ail_update(struct xfs_ail *ailp, + struct xfs_log_item *lip, xfs_lsn_t lsn) + __releases(ailp->xa_lock); +void xfs_trans_ail_delete(struct xfs_ail *ailp, + struct xfs_log_item *lip) + __releases(ailp->xa_lock); +void xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t); +void xfs_trans_unlocked_item(struct xfs_ail *, + xfs_log_item_t *); xfs_lsn_t xfs_trans_ail_tail(struct xfs_ail *ailp); -- cgit v1.2.3-70-g09d2 From 5a792c4579af8466246408e38fd4eff45d8493b8 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:40:09 +1100 Subject: [XFS] XFS: Check for valid transaction headers in recovery When we are about to add a new item to a transaction in recovery, we need to check that it is valid first. Currently we just assert that header magic number matches, but in production systems that is not present and we add a corrupted transaction to the list to be processed. This results in a kernel oops later when processing the corrupted transaction. Instead, if we detect a corrupted transaction, abort recovery and leave the user to clean up the mess that has occurred. SGI-PV: 988145 SGI-Modid: xfs-linux-melb:xfs-kern:32356a Signed-off-by: David Chinner Signed-off-by: Tim Shimmin Signed-off-by: Eric Sandeen Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_log_recover.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index cff901efc24..b411d494731 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -1417,7 +1417,13 @@ xlog_recover_add_to_trans( return 0; item = trans->r_itemq; if (item == NULL) { - ASSERT(*(uint *)dp == XFS_TRANS_HEADER_MAGIC); + /* we need to catch log corruptions here */ + if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { + xlog_warn("XFS: xlog_recover_add_to_trans: " + "bad header magic number"); + ASSERT(0); + return XFS_ERROR(EIO); + } if (len == sizeof(xfs_trans_header_t)) xlog_recover_add_item(&trans->r_itemq); memcpy(&trans->r_theader, dp, len); /* d, s, l */ -- cgit v1.2.3-70-g09d2 From 9d565ffa33d574c2a22442f9d95ca2bd0be7cc42 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 17:53:24 +1100 Subject: [XFS] kill struct xfs_mount_args No need to parse the mount option into a structure before applying it to struct xfs_mount. The content of xfs_start_flags gets merged into xfs_parseargs. Calls inbetween don't care and can use mount members instead of the args struct. This patch uncovered that the mount option for shared filesystems wasn't ever exposed on Linux. The code to handle it is #if 0'ed in this patch pending a decision on this feature. I'll send a writeup about it to the list soon. SGI-PV: 987246 SGI-Modid: xfs-linux-melb:xfs-kern:32371a Signed-off-by: Christoph Hellwig Signed-off-by: Donald Douwsma Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_super.c | 499 ++++++++++++++++--------------------------- fs/xfs/quota/xfs_qm.c | 1 - fs/xfs/quota/xfs_qm_bhv.c | 1 - fs/xfs/xfs_clnt.h | 105 --------- fs/xfs/xfs_dmops.c | 5 +- fs/xfs/xfs_mount.h | 4 +- fs/xfs/xfs_qmops.c | 5 +- fs/xfs/xfs_vfsops.c | 1 - 8 files changed, 195 insertions(+), 426 deletions(-) delete mode 100644 fs/xfs/xfs_clnt.h (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 655508ce2b7..5638a99fb74 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -18,7 +18,6 @@ #include "xfs.h" #include "xfs_bit.h" #include "xfs_log.h" -#include "xfs_clnt.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" @@ -75,32 +74,6 @@ static struct super_operations xfs_super_operations; static kmem_zone_t *xfs_ioend_zone; mempool_t *xfs_ioend_pool; -STATIC struct xfs_mount_args * -xfs_args_allocate( - struct super_block *sb, - int silent) -{ - struct xfs_mount_args *args; - - args = kzalloc(sizeof(struct xfs_mount_args), GFP_KERNEL); - if (!args) - return NULL; - - args->logbufs = args->logbufsize = -1; - strncpy(args->fsname, sb->s_id, MAXNAMELEN); - - /* Copy the already-parsed mount(2) flags we're interested in */ - if (sb->s_flags & MS_DIRSYNC) - args->flags |= XFSMNT_DIRSYNC; - if (sb->s_flags & MS_SYNCHRONOUS) - args->flags |= XFSMNT_WSYNC; - if (silent) - args->flags |= XFSMNT_QUIET; - args->flags |= XFSMNT_32BITINODES; - - return args; -} - #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ #define MNTOPT_LOGDEV "logdev" /* log device */ @@ -189,26 +162,54 @@ suffix_strtoul(char *s, char **endp, unsigned int base) return simple_strtoul((const char *)s, endp, base) << shift_left_factor; } +/* + * This function fills in xfs_mount_t fields based on mount args. + * Note: the superblock has _not_ yet been read in. + * + * Note that this function leaks the various device name allocations on + * failure. The caller takes care of them. + */ STATIC int xfs_parseargs( struct xfs_mount *mp, char *options, - struct xfs_mount_args *args, - int update) + char **mtpt) { + struct super_block *sb = mp->m_super; char *this_char, *value, *eov; - int dsunit, dswidth, vol_dsunit, vol_dswidth; - int iosize; + int dsunit = 0; + int dswidth = 0; + int iosize = 0; int dmapi_implies_ikeep = 1; + uchar_t iosizelog = 0; + + /* + * Copy binary VFS mount flags we are interested in. + */ + if (sb->s_flags & MS_RDONLY) + mp->m_flags |= XFS_MOUNT_RDONLY; + if (sb->s_flags & MS_DIRSYNC) + mp->m_flags |= XFS_MOUNT_DIRSYNC; + if (sb->s_flags & MS_SYNCHRONOUS) + mp->m_flags |= XFS_MOUNT_WSYNC; + + /* + * Set some default flags that could be cleared by the mount option + * parsing. + */ + mp->m_flags |= XFS_MOUNT_BARRIER; + mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; + mp->m_flags |= XFS_MOUNT_SMALL_INUMS; - args->flags |= XFSMNT_BARRIER; - args->flags2 |= XFSMNT2_COMPAT_IOSIZE; + /* + * These can be overridden by the mount option parsing. + */ + mp->m_logbufs = -1; + mp->m_logbsize = -1; if (!options) goto done; - iosize = dsunit = dswidth = vol_dsunit = vol_dswidth = 0; - while ((this_char = strsep(&options, ",")) != NULL) { if (!*this_char) continue; @@ -222,7 +223,7 @@ xfs_parseargs( this_char); return EINVAL; } - args->logbufs = simple_strtoul(value, &eov, 10); + mp->m_logbufs = simple_strtoul(value, &eov, 10); } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { if (!value || !*value) { cmn_err(CE_WARN, @@ -230,7 +231,7 @@ xfs_parseargs( this_char); return EINVAL; } - args->logbufsize = suffix_strtoul(value, &eov, 10); + mp->m_logbsize = suffix_strtoul(value, &eov, 10); } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { if (!value || !*value) { cmn_err(CE_WARN, @@ -238,7 +239,9 @@ xfs_parseargs( this_char); return EINVAL; } - strncpy(args->logname, value, MAXNAMELEN); + mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); + if (!mp->m_logname) + return ENOMEM; } else if (!strcmp(this_char, MNTOPT_MTPT)) { if (!value || !*value) { cmn_err(CE_WARN, @@ -246,7 +249,9 @@ xfs_parseargs( this_char); return EINVAL; } - strncpy(args->mtpt, value, MAXNAMELEN); + *mtpt = kstrndup(value, MAXNAMELEN, GFP_KERNEL); + if (!*mtpt) + return ENOMEM; } else if (!strcmp(this_char, MNTOPT_RTDEV)) { if (!value || !*value) { cmn_err(CE_WARN, @@ -254,7 +259,9 @@ xfs_parseargs( this_char); return EINVAL; } - strncpy(args->rtname, value, MAXNAMELEN); + mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); + if (!mp->m_rtname) + return ENOMEM; } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { if (!value || !*value) { cmn_err(CE_WARN, @@ -263,8 +270,7 @@ xfs_parseargs( return EINVAL; } iosize = simple_strtoul(value, &eov, 10); - args->flags |= XFSMNT_IOSIZE; - args->iosizelog = (uint8_t) iosize; + iosizelog = (uint8_t) iosize; } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { if (!value || !*value) { cmn_err(CE_WARN, @@ -273,8 +279,7 @@ xfs_parseargs( return EINVAL; } iosize = suffix_strtoul(value, &eov, 10); - args->flags |= XFSMNT_IOSIZE; - args->iosizelog = ffs(iosize) - 1; + iosizelog = ffs(iosize) - 1; } else if (!strcmp(this_char, MNTOPT_GRPID) || !strcmp(this_char, MNTOPT_BSDGROUPS)) { mp->m_flags |= XFS_MOUNT_GRPID; @@ -282,23 +287,25 @@ xfs_parseargs( !strcmp(this_char, MNTOPT_SYSVGROUPS)) { mp->m_flags &= ~XFS_MOUNT_GRPID; } else if (!strcmp(this_char, MNTOPT_WSYNC)) { - args->flags |= XFSMNT_WSYNC; + mp->m_flags |= XFS_MOUNT_WSYNC; } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) { - args->flags |= XFSMNT_OSYNCISOSYNC; + mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC; } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { - args->flags |= XFSMNT_NORECOVERY; + mp->m_flags |= XFS_MOUNT_NORECOVERY; } else if (!strcmp(this_char, MNTOPT_INO64)) { - args->flags |= XFSMNT_INO64; -#if !XFS_BIG_INUMS +#if XFS_BIG_INUMS + mp->m_flags |= XFS_MOUNT_INO64; + mp->m_inoadd = XFS_INO64_OFFSET; +#else cmn_err(CE_WARN, "XFS: %s option not allowed on this system", this_char); return EINVAL; #endif } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { - args->flags |= XFSMNT_NOALIGN; + mp->m_flags |= XFS_MOUNT_NOALIGN; } else if (!strcmp(this_char, MNTOPT_SWALLOC)) { - args->flags |= XFSMNT_SWALLOC; + mp->m_flags |= XFS_MOUNT_SWALLOC; } else if (!strcmp(this_char, MNTOPT_SUNIT)) { if (!value || !*value) { cmn_err(CE_WARN, @@ -316,7 +323,7 @@ xfs_parseargs( } dswidth = simple_strtoul(value, &eov, 10); } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { - args->flags &= ~XFSMNT_32BITINODES; + mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; #if !XFS_BIG_INUMS cmn_err(CE_WARN, "XFS: %s option not allowed on this system", @@ -324,56 +331,60 @@ xfs_parseargs( return EINVAL; #endif } else if (!strcmp(this_char, MNTOPT_NOUUID)) { - args->flags |= XFSMNT_NOUUID; + mp->m_flags |= XFS_MOUNT_NOUUID; } else if (!strcmp(this_char, MNTOPT_BARRIER)) { - args->flags |= XFSMNT_BARRIER; + mp->m_flags |= XFS_MOUNT_BARRIER; } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { - args->flags &= ~XFSMNT_BARRIER; + mp->m_flags &= ~XFS_MOUNT_BARRIER; } else if (!strcmp(this_char, MNTOPT_IKEEP)) { - args->flags |= XFSMNT_IKEEP; + mp->m_flags |= XFS_MOUNT_IKEEP; } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { dmapi_implies_ikeep = 0; - args->flags &= ~XFSMNT_IKEEP; + mp->m_flags &= ~XFS_MOUNT_IKEEP; } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { - args->flags2 &= ~XFSMNT2_COMPAT_IOSIZE; + mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE; } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { - args->flags2 |= XFSMNT2_COMPAT_IOSIZE; + mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; } else if (!strcmp(this_char, MNTOPT_ATTR2)) { - args->flags |= XFSMNT_ATTR2; + mp->m_flags |= XFS_MOUNT_ATTR2; } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { - args->flags &= ~XFSMNT_ATTR2; - args->flags |= XFSMNT_NOATTR2; + mp->m_flags &= ~XFS_MOUNT_ATTR2; + mp->m_flags |= XFS_MOUNT_NOATTR2; } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { - args->flags2 |= XFSMNT2_FILESTREAMS; + mp->m_flags |= XFS_MOUNT_FILESTREAMS; } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { - args->flags &= ~(XFSMNT_UQUOTAENF|XFSMNT_UQUOTA); - args->flags &= ~(XFSMNT_GQUOTAENF|XFSMNT_GQUOTA); + mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | + XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | + XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD); } else if (!strcmp(this_char, MNTOPT_QUOTA) || !strcmp(this_char, MNTOPT_UQUOTA) || !strcmp(this_char, MNTOPT_USRQUOTA)) { - args->flags |= XFSMNT_UQUOTA | XFSMNT_UQUOTAENF; + mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | + XFS_UQUOTA_ENFD); } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || !strcmp(this_char, MNTOPT_UQUOTANOENF)) { - args->flags |= XFSMNT_UQUOTA; - args->flags &= ~XFSMNT_UQUOTAENF; + mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); + mp->m_qflags &= ~XFS_UQUOTA_ENFD; } else if (!strcmp(this_char, MNTOPT_PQUOTA) || !strcmp(this_char, MNTOPT_PRJQUOTA)) { - args->flags |= XFSMNT_PQUOTA | XFSMNT_PQUOTAENF; + mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | + XFS_OQUOTA_ENFD); } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { - args->flags |= XFSMNT_PQUOTA; - args->flags &= ~XFSMNT_PQUOTAENF; + mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); + mp->m_qflags &= ~XFS_OQUOTA_ENFD; } else if (!strcmp(this_char, MNTOPT_GQUOTA) || !strcmp(this_char, MNTOPT_GRPQUOTA)) { - args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF; + mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | + XFS_OQUOTA_ENFD); } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { - args->flags |= XFSMNT_GQUOTA; - args->flags &= ~XFSMNT_GQUOTAENF; + mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); + mp->m_qflags &= ~XFS_OQUOTA_ENFD; } else if (!strcmp(this_char, MNTOPT_DMAPI)) { - args->flags |= XFSMNT_DMAPI; + mp->m_flags |= XFS_MOUNT_DMAPI; } else if (!strcmp(this_char, MNTOPT_XDSM)) { - args->flags |= XFSMNT_DMAPI; + mp->m_flags |= XFS_MOUNT_DMAPI; } else if (!strcmp(this_char, MNTOPT_DMI)) { - args->flags |= XFSMNT_DMAPI; + mp->m_flags |= XFS_MOUNT_DMAPI; } else if (!strcmp(this_char, "ihashsize")) { cmn_err(CE_WARN, "XFS: ihashsize no longer used, option is deprecated."); @@ -391,27 +402,29 @@ xfs_parseargs( } } - if (args->flags & XFSMNT_NORECOVERY) { - if ((mp->m_flags & XFS_MOUNT_RDONLY) == 0) { - cmn_err(CE_WARN, - "XFS: no-recovery mounts must be read-only."); - return EINVAL; - } + /* + * no recovery flag requires a read-only mount + */ + if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && + !(mp->m_flags & XFS_MOUNT_RDONLY)) { + cmn_err(CE_WARN, "XFS: no-recovery mounts must be read-only."); + return EINVAL; } - if ((args->flags & XFSMNT_NOALIGN) && (dsunit || dswidth)) { + if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { cmn_err(CE_WARN, "XFS: sunit and swidth options incompatible with the noalign option"); return EINVAL; } - if ((args->flags & XFSMNT_GQUOTA) && (args->flags & XFSMNT_PQUOTA)) { + if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && + (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) { cmn_err(CE_WARN, "XFS: cannot mount with both project and group quota"); return EINVAL; } - if ((args->flags & XFSMNT_DMAPI) && *args->mtpt == '\0') { + if ((mp->m_flags & XFS_MOUNT_DMAPI) && (!*mtpt || *mtpt[0] == '\0')) { printk("XFS: %s option needs the mount point option as well\n", MNTOPT_DMAPI); return EINVAL; @@ -439,27 +452,66 @@ xfs_parseargs( * Note that if "ikeep" or "noikeep" mount options are * supplied, then they are honored. */ - if ((args->flags & XFSMNT_DMAPI) && dmapi_implies_ikeep) - args->flags |= XFSMNT_IKEEP; + if ((mp->m_flags & XFS_MOUNT_DMAPI) && dmapi_implies_ikeep) + mp->m_flags |= XFS_MOUNT_IKEEP; - if ((args->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { +done: + if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) { + /* + * At this point the superblock has not been read + * in, therefore we do not know the block size. + * Before the mount call ends we will convert + * these to FSBs. + */ if (dsunit) { - args->sunit = dsunit; - args->flags |= XFSMNT_RETERR; - } else { - args->sunit = vol_dsunit; + mp->m_dalign = dsunit; + mp->m_flags |= XFS_MOUNT_RETERR; } - dswidth ? (args->swidth = dswidth) : - (args->swidth = vol_dswidth); - } else { - args->sunit = args->swidth = 0; + + if (dswidth) + mp->m_swidth = dswidth; + } + + if (mp->m_logbufs != -1 && + mp->m_logbufs != 0 && + (mp->m_logbufs < XLOG_MIN_ICLOGS || + mp->m_logbufs > XLOG_MAX_ICLOGS)) { + cmn_err(CE_WARN, + "XFS: invalid logbufs value: %d [not %d-%d]", + mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); + return XFS_ERROR(EINVAL); + } + if (mp->m_logbsize != -1 && + mp->m_logbsize != 0 && + (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || + mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || + !is_power_of_2(mp->m_logbsize))) { + cmn_err(CE_WARN, + "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", + mp->m_logbsize); + return XFS_ERROR(EINVAL); + } + + mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); + if (!mp->m_fsname) + return ENOMEM; + mp->m_fsname_len = strlen(mp->m_fsname) + 1; + + if (iosizelog) { + if (iosizelog > XFS_MAX_IO_LOG || + iosizelog < XFS_MIN_IO_LOG) { + cmn_err(CE_WARN, + "XFS: invalid log iosize: %d [not %d-%d]", + iosizelog, XFS_MIN_IO_LOG, + XFS_MAX_IO_LOG); + return XFS_ERROR(EINVAL); + } + + mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; + mp->m_readio_log = iosizelog; + mp->m_writeio_log = iosizelog; } -done: - if (args->flags & XFSMNT_32BITINODES) - mp->m_flags |= XFS_MOUNT_SMALL_INUMS; - if (args->flags2) - args->flags |= XFSMNT_FLAGS2; return 0; } @@ -705,8 +757,7 @@ xfs_close_devices( */ STATIC int xfs_open_devices( - struct xfs_mount *mp, - struct xfs_mount_args *args) + struct xfs_mount *mp) { struct block_device *ddev = mp->m_super->s_bdev; struct block_device *logdev = NULL, *rtdev = NULL; @@ -715,14 +766,14 @@ xfs_open_devices( /* * Open real time and log devices - order is important. */ - if (args->logname[0]) { - error = xfs_blkdev_get(mp, args->logname, &logdev); + if (mp->m_logname) { + error = xfs_blkdev_get(mp, mp->m_logname, &logdev); if (error) goto out; } - if (args->rtname[0]) { - error = xfs_blkdev_get(mp, args->rtname, &rtdev); + if (mp->m_rtname) { + error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); if (error) goto out_close_logdev; @@ -1286,177 +1337,30 @@ xfs_fs_setxquota( Q_XSETPQLIM), id, (caddr_t)fdq); } -/* - * This function fills in xfs_mount_t fields based on mount args. - * Note: the superblock has _not_ yet been read in. - */ -STATIC int -xfs_start_flags( - struct xfs_mount_args *ap, - struct xfs_mount *mp) -{ - int error; - - /* Values are in BBs */ - if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { - /* - * At this point the superblock has not been read - * in, therefore we do not know the block size. - * Before the mount call ends we will convert - * these to FSBs. - */ - mp->m_dalign = ap->sunit; - mp->m_swidth = ap->swidth; - } - - if (ap->logbufs != -1 && - ap->logbufs != 0 && - (ap->logbufs < XLOG_MIN_ICLOGS || - ap->logbufs > XLOG_MAX_ICLOGS)) { - cmn_err(CE_WARN, - "XFS: invalid logbufs value: %d [not %d-%d]", - ap->logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); - return XFS_ERROR(EINVAL); - } - mp->m_logbufs = ap->logbufs; - if (ap->logbufsize != -1 && - ap->logbufsize != 0 && - (ap->logbufsize < XLOG_MIN_RECORD_BSIZE || - ap->logbufsize > XLOG_MAX_RECORD_BSIZE || - !is_power_of_2(ap->logbufsize))) { - cmn_err(CE_WARN, - "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", - ap->logbufsize); - return XFS_ERROR(EINVAL); - } - - error = ENOMEM; - - mp->m_logbsize = ap->logbufsize; - mp->m_fsname_len = strlen(ap->fsname) + 1; - - mp->m_fsname = kstrdup(ap->fsname, GFP_KERNEL); - if (!mp->m_fsname) - goto out; - - if (ap->rtname[0]) { - mp->m_rtname = kstrdup(ap->rtname, GFP_KERNEL); - if (!mp->m_rtname) - goto out_free_fsname; - - } - - if (ap->logname[0]) { - mp->m_logname = kstrdup(ap->logname, GFP_KERNEL); - if (!mp->m_logname) - goto out_free_rtname; - } - - if (ap->flags & XFSMNT_WSYNC) - mp->m_flags |= XFS_MOUNT_WSYNC; -#if XFS_BIG_INUMS - if (ap->flags & XFSMNT_INO64) { - mp->m_flags |= XFS_MOUNT_INO64; - mp->m_inoadd = XFS_INO64_OFFSET; - } -#endif - if (ap->flags & XFSMNT_RETERR) - mp->m_flags |= XFS_MOUNT_RETERR; - if (ap->flags & XFSMNT_NOALIGN) - mp->m_flags |= XFS_MOUNT_NOALIGN; - if (ap->flags & XFSMNT_SWALLOC) - mp->m_flags |= XFS_MOUNT_SWALLOC; - if (ap->flags & XFSMNT_OSYNCISOSYNC) - mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC; - if (ap->flags & XFSMNT_32BITINODES) - mp->m_flags |= XFS_MOUNT_32BITINODES; - - if (ap->flags & XFSMNT_IOSIZE) { - if (ap->iosizelog > XFS_MAX_IO_LOG || - ap->iosizelog < XFS_MIN_IO_LOG) { - cmn_err(CE_WARN, - "XFS: invalid log iosize: %d [not %d-%d]", - ap->iosizelog, XFS_MIN_IO_LOG, - XFS_MAX_IO_LOG); - return XFS_ERROR(EINVAL); - } - - mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; - mp->m_readio_log = mp->m_writeio_log = ap->iosizelog; - } - - if (ap->flags & XFSMNT_IKEEP) - mp->m_flags |= XFS_MOUNT_IKEEP; - if (ap->flags & XFSMNT_DIRSYNC) - mp->m_flags |= XFS_MOUNT_DIRSYNC; - if (ap->flags & XFSMNT_ATTR2) - mp->m_flags |= XFS_MOUNT_ATTR2; - if (ap->flags & XFSMNT_NOATTR2) - mp->m_flags |= XFS_MOUNT_NOATTR2; - - if (ap->flags2 & XFSMNT2_COMPAT_IOSIZE) - mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; - - /* - * no recovery flag requires a read-only mount - */ - if (ap->flags & XFSMNT_NORECOVERY) { - if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { - cmn_err(CE_WARN, - "XFS: tried to mount a FS read-write without recovery!"); - return XFS_ERROR(EINVAL); - } - mp->m_flags |= XFS_MOUNT_NORECOVERY; - } - - if (ap->flags & XFSMNT_NOUUID) - mp->m_flags |= XFS_MOUNT_NOUUID; - if (ap->flags & XFSMNT_BARRIER) - mp->m_flags |= XFS_MOUNT_BARRIER; - else - mp->m_flags &= ~XFS_MOUNT_BARRIER; - - if (ap->flags2 & XFSMNT2_FILESTREAMS) - mp->m_flags |= XFS_MOUNT_FILESTREAMS; - - if (ap->flags & XFSMNT_DMAPI) - mp->m_flags |= XFS_MOUNT_DMAPI; - return 0; - - - out_free_rtname: - kfree(mp->m_rtname); - out_free_fsname: - kfree(mp->m_fsname); - out: - return error; -} - /* * This function fills in xfs_mount_t fields based on mount args. * Note: the superblock _has_ now been read in. */ STATIC int xfs_finish_flags( - struct xfs_mount_args *ap, struct xfs_mount *mp) { int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); /* Fail a mount where the logbuf is smaller then the log stripe */ if (xfs_sb_version_haslogv2(&mp->m_sb)) { - if ((ap->logbufsize <= 0) && - (mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE)) { + if (mp->m_logbsize <= 0 && + mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { mp->m_logbsize = mp->m_sb.sb_logsunit; - } else if (ap->logbufsize > 0 && - ap->logbufsize < mp->m_sb.sb_logsunit) { + } else if (mp->m_logbsize > 0 && + mp->m_logbsize < mp->m_sb.sb_logsunit) { cmn_err(CE_WARN, "XFS: logbuf size must be greater than or equal to log stripe size"); return XFS_ERROR(EINVAL); } } else { /* Fail a mount if the logbuf is larger than 32K */ - if (ap->logbufsize > XLOG_BIG_RECORD_BSIZE) { + if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { cmn_err(CE_WARN, "XFS: logbuf size for version 1 logs must be 16K or 32K"); return XFS_ERROR(EINVAL); @@ -1468,7 +1372,7 @@ xfs_finish_flags( * told by noattr2 to turn it off */ if (xfs_sb_version_hasattr2(&mp->m_sb) && - !(ap->flags & XFSMNT_NOATTR2)) + !(mp->m_flags & XFS_MOUNT_NOATTR2)) mp->m_flags |= XFS_MOUNT_ATTR2; /* @@ -1480,6 +1384,7 @@ xfs_finish_flags( return XFS_ERROR(EROFS); } +#if 0 /* shared mounts were never supported on Linux */ /* * check for shared mount. */ @@ -1502,25 +1407,11 @@ xfs_finish_flags( /* * Shared XFS V0 can't deal with DMI. Return EINVAL. */ - if (mp->m_sb.sb_shared_vn == 0 && (ap->flags & XFSMNT_DMAPI)) + if (mp->m_sb.sb_shared_vn == 0 && + (mp->m_flags & XFS_MOUNT_DMAPI)) return XFS_ERROR(EINVAL); } - - if (ap->flags & XFSMNT_UQUOTA) { - mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); - if (ap->flags & XFSMNT_UQUOTAENF) - mp->m_qflags |= XFS_UQUOTA_ENFD; - } - - if (ap->flags & XFSMNT_GQUOTA) { - mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); - if (ap->flags & XFSMNT_GQUOTAENF) - mp->m_qflags |= XFS_OQUOTA_ENFD; - } else if (ap->flags & XFSMNT_PQUOTA) { - mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); - if (ap->flags & XFSMNT_PQUOTAENF) - mp->m_qflags |= XFS_OQUOTA_ENFD; - } +#endif return 0; } @@ -1533,16 +1424,12 @@ xfs_fs_fill_super( { struct inode *root; struct xfs_mount *mp = NULL; - struct xfs_mount_args *args; int flags = 0, error = ENOMEM; - - args = xfs_args_allocate(sb, silent); - if (!args) - return -ENOMEM; + char *mtpt = NULL; mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); if (!mp) - goto out_free_args; + goto out; spin_lock_init(&mp->m_sb_lock); mutex_init(&mp->m_growlock); @@ -1554,12 +1441,9 @@ xfs_fs_fill_super( mp->m_super = sb; sb->s_fs_info = mp; - if (sb->s_flags & MS_RDONLY) - mp->m_flags |= XFS_MOUNT_RDONLY; - - error = xfs_parseargs(mp, (char *)data, args, 0); + error = xfs_parseargs(mp, (char *)data, &mtpt); if (error) - goto out_free_mp; + goto out_free_fsname; sb_min_blocksize(sb, BBSIZE); sb->s_xattr = xfs_xattr_handlers; @@ -1567,33 +1451,28 @@ xfs_fs_fill_super( sb->s_qcop = &xfs_quotactl_operations; sb->s_op = &xfs_super_operations; - error = xfs_dmops_get(mp, args); + error = xfs_dmops_get(mp); if (error) - goto out_free_mp; - error = xfs_qmops_get(mp, args); + goto out_free_fsname; + error = xfs_qmops_get(mp); if (error) goto out_put_dmops; - if (args->flags & XFSMNT_QUIET) + if (silent) flags |= XFS_MFSI_QUIET; - error = xfs_open_devices(mp, args); + error = xfs_open_devices(mp); if (error) goto out_put_qmops; if (xfs_icsb_init_counters(mp)) mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; - /* - * Setup flags based on mount(2) options and then the superblock - */ - error = xfs_start_flags(args, mp); - if (error) - goto out_free_fsname; error = xfs_readsb(mp, flags); if (error) - goto out_free_fsname; - error = xfs_finish_flags(args, mp); + goto out_destroy_counters; + + error = xfs_finish_flags(mp); if (error) goto out_free_sb; @@ -1612,7 +1491,7 @@ xfs_fs_fill_super( if (error) goto out_filestream_unmount; - XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, args->mtpt, args->fsname); + XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, mtpt, mp->m_fsname); sb->s_dirt = 1; sb->s_magic = XFS_SB_MAGIC; @@ -1641,27 +1520,27 @@ xfs_fs_fill_super( if (error) goto fail_vnrele; - xfs_itrace_exit(XFS_I(sb->s_root->d_inode)); + kfree(mtpt); - kfree(args); + xfs_itrace_exit(XFS_I(sb->s_root->d_inode)); return 0; out_filestream_unmount: xfs_filestream_unmount(mp); out_free_sb: xfs_freesb(mp); - out_free_fsname: - xfs_free_fsname(mp); + out_destroy_counters: xfs_icsb_destroy_counters(mp); xfs_close_devices(mp); out_put_qmops: xfs_qmops_put(mp); out_put_dmops: xfs_dmops_put(mp); - out_free_mp: + out_free_fsname: + xfs_free_fsname(mp); + kfree(mtpt); kfree(mp); - out_free_args: - kfree(args); + out: return -error; fail_vnrele: diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index db1986a205a..5b198d15e76 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -20,7 +20,6 @@ #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" -#include "xfs_clnt.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index eea2e60b456..9556df9f7da 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c @@ -20,7 +20,6 @@ #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" -#include "xfs_clnt.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" diff --git a/fs/xfs/xfs_clnt.h b/fs/xfs/xfs_clnt.h deleted file mode 100644 index d2ce5dd70d8..00000000000 --- a/fs/xfs/xfs_clnt.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_CLNT_H__ -#define __XFS_CLNT_H__ - -/* - * XFS arguments structure, constructed from the arguments we - * are passed via the mount system call. - * - * NOTE: The mount system call is handled differently between - * Linux and IRIX. In IRIX we worked work with a binary data - * structure coming in across the syscall interface from user - * space (the mount userspace knows about each filesystem type - * and the set of valid options for it, and converts the users - * argument string into a binary structure _before_ making the - * system call), and the ABI issues that this implies. - * - * In Linux, we are passed a comma separated set of options; - * ie. a NULL terminated string of characters. Userspace mount - * code does not have any knowledge of mount options expected by - * each filesystem type and so each filesystem parses its mount - * options in kernel space. - * - * For the Linux port, we kept this structure pretty much intact - * and use it internally (because the existing code groks it). - */ -struct xfs_mount_args { - int flags; /* flags -> see XFSMNT_... macros below */ - int flags2; /* flags -> see XFSMNT2_... macros below */ - int logbufs; /* Number of log buffers, -1 to default */ - int logbufsize; /* Size of log buffers, -1 to default */ - char fsname[MAXNAMELEN+1]; /* data device name */ - char rtname[MAXNAMELEN+1]; /* realtime device filename */ - char logname[MAXNAMELEN+1]; /* journal device filename */ - char mtpt[MAXNAMELEN+1]; /* filesystem mount point */ - int sunit; /* stripe unit (BBs) */ - int swidth; /* stripe width (BBs), multiple of sunit */ - uchar_t iosizelog; /* log2 of the preferred I/O size */ - int ihashsize; /* inode hash table size (buckets) */ -}; - -/* - * XFS mount option flags -- args->flags1 - */ -#define XFSMNT_ATTR2 0x00000001 /* allow ATTR2 EA format */ -#define XFSMNT_WSYNC 0x00000002 /* safe mode nfs mount - * compatible */ -#define XFSMNT_INO64 0x00000004 /* move inode numbers up - * past 2^32 */ -#define XFSMNT_UQUOTA 0x00000008 /* user quota accounting */ -#define XFSMNT_PQUOTA 0x00000010 /* IRIX prj quota accounting */ -#define XFSMNT_UQUOTAENF 0x00000020 /* user quota limit - * enforcement */ -#define XFSMNT_PQUOTAENF 0x00000040 /* IRIX project quota limit - * enforcement */ -#define XFSMNT_QUIET 0x00000080 /* don't report mount errors */ -#define XFSMNT_NOALIGN 0x00000200 /* don't allocate at - * stripe boundaries*/ -#define XFSMNT_RETERR 0x00000400 /* return error to user */ -#define XFSMNT_NORECOVERY 0x00000800 /* no recovery, implies - * read-only mount */ -#define XFSMNT_SHARED 0x00001000 /* shared XFS mount */ -#define XFSMNT_IOSIZE 0x00002000 /* optimize for I/O size */ -#define XFSMNT_OSYNCISOSYNC 0x00004000 /* o_sync is REALLY o_sync */ - /* (osyncisdsync is default) */ -#define XFSMNT_NOATTR2 0x00008000 /* turn off ATTR2 EA format */ -#define XFSMNT_32BITINODES 0x00200000 /* restrict inodes to 32 - * bits of address space */ -#define XFSMNT_GQUOTA 0x00400000 /* group quota accounting */ -#define XFSMNT_GQUOTAENF 0x00800000 /* group quota limit - * enforcement */ -#define XFSMNT_NOUUID 0x01000000 /* Ignore fs uuid */ -#define XFSMNT_DMAPI 0x02000000 /* enable dmapi/xdsm */ -#define XFSMNT_BARRIER 0x04000000 /* use write barriers */ -#define XFSMNT_IKEEP 0x08000000 /* inode cluster delete */ -#define XFSMNT_SWALLOC 0x10000000 /* turn on stripe width - * allocation */ -#define XFSMNT_DIRSYNC 0x40000000 /* sync creat,link,unlink,rename - * symlink,mkdir,rmdir,mknod */ -#define XFSMNT_FLAGS2 0x80000000 /* more flags set in flags2 */ - -/* - * XFS mount option flags -- args->flags2 - */ -#define XFSMNT2_COMPAT_IOSIZE 0x00000001 /* don't report large preferred - * I/O size in stat(2) */ -#define XFSMNT2_FILESTREAMS 0x00000002 /* enable the filestreams - * allocator */ - -#endif /* __XFS_CLNT_H__ */ diff --git a/fs/xfs/xfs_dmops.c b/fs/xfs/xfs_dmops.c index a1e55fb9d5d..e71e2581c0c 100644 --- a/fs/xfs/xfs_dmops.c +++ b/fs/xfs/xfs_dmops.c @@ -25,7 +25,6 @@ #include "xfs_inum.h" #include "xfs_ag.h" #include "xfs_mount.h" -#include "xfs_clnt.h" static struct xfs_dmops xfs_dmcore_stub = { @@ -38,9 +37,9 @@ static struct xfs_dmops xfs_dmcore_stub = { }; int -xfs_dmops_get(struct xfs_mount *mp, struct xfs_mount_args *args) +xfs_dmops_get(struct xfs_mount *mp) { - if (args->flags & XFSMNT_DMAPI) { + if (mp->m_flags & XFS_MOUNT_DMAPI) { cmn_err(CE_WARN, "XFS: dmapi support not available in this kernel."); return EINVAL; diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index d3b75257602..e3f618c84e4 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -515,9 +515,9 @@ extern void xfs_freesb(xfs_mount_t *); extern int xfs_fs_writable(xfs_mount_t *); extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t); -extern int xfs_dmops_get(struct xfs_mount *, struct xfs_mount_args *); +extern int xfs_dmops_get(struct xfs_mount *); extern void xfs_dmops_put(struct xfs_mount *); -extern int xfs_qmops_get(struct xfs_mount *, struct xfs_mount_args *); +extern int xfs_qmops_get(struct xfs_mount *); extern void xfs_qmops_put(struct xfs_mount *); extern struct xfs_dmops xfs_dmcore_xfs; diff --git a/fs/xfs/xfs_qmops.c b/fs/xfs/xfs_qmops.c index a294e58db8d..27f80581520 100644 --- a/fs/xfs/xfs_qmops.c +++ b/fs/xfs/xfs_qmops.c @@ -28,7 +28,6 @@ #include "xfs_mount.h" #include "xfs_quota.h" #include "xfs_error.h" -#include "xfs_clnt.h" STATIC struct xfs_dquot * @@ -131,9 +130,9 @@ static struct xfs_qmops xfs_qmcore_stub = { }; int -xfs_qmops_get(struct xfs_mount *mp, struct xfs_mount_args *args) +xfs_qmops_get(struct xfs_mount *mp) { - if (args->flags & (XFSMNT_UQUOTA | XFSMNT_PQUOTA | XFSMNT_GQUOTA)) { + if (XFS_IS_QUOTA_RUNNING(mp)) { #ifdef CONFIG_XFS_QUOTA mp->m_qm_ops = &xfs_qmcore_xfs; #else diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index 883dd0f68e9..305d9f3948e 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -49,7 +49,6 @@ #include "xfs_extfree_item.h" #include "xfs_acl.h" #include "xfs_attr.h" -#include "xfs_clnt.h" #include "xfs_mru_cache.h" #include "xfs_filestream.h" #include "xfs_fsops.h" -- cgit v1.2.3-70-g09d2 From 469fc23d5dd172665c69099db776c39bb1b70f92 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 17:54:57 +1100 Subject: [XFS] fix the noquota mount option Noquota should clear all mount options, and not just user and group quota. Probably doesn't matter very much in real life. SGI-PV: 987246 SGI-Modid: xfs-linux-melb:xfs-kern:32372a Signed-off-by: Christoph Hellwig Signed-off-by: Donald Douwsma Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_super.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 5638a99fb74..62d48588849 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -355,6 +355,7 @@ xfs_parseargs( } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | + XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD); } else if (!strcmp(this_char, MNTOPT_QUOTA) || !strcmp(this_char, MNTOPT_UQUOTA) || -- cgit v1.2.3-70-g09d2 From 1ec7944beb6f3c29f1e58a66422130133727e9e1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 17:55:08 +1100 Subject: [XFS] fix biosize option iosizelog shouldn't be the same as iosize but the logarithm of it. Then again the current biosize option doesn't make much sense to me as it doesn't set the preferred I/O size as mentioned in the comment next to it but rather the allocation size and thus is identical to the allocsize option (except for the missing logarithm). It's also not documented in Documentation/filesystems/xfs.txt or the mount manpage. SGI-PV: 987246 SGI-Modid: xfs-linux-melb:xfs-kern:32373a Signed-off-by: Christoph Hellwig Signed-off-by: Donald Douwsma Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 62d48588849..c3d004bc462 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -270,7 +270,7 @@ xfs_parseargs( return EINVAL; } iosize = simple_strtoul(value, &eov, 10); - iosizelog = (uint8_t) iosize; + iosizelog = ffs(iosize) - 1; } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { if (!value || !*value) { cmn_err(CE_WARN, -- cgit v1.2.3-70-g09d2 From 2b7035fd7473c799ca3372092d72c768c7db329d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 17:55:18 +1100 Subject: [XFS] Trivial xfs_remove comment fixup The dp to ip comment should be for the unconditional xfs_droplink call, and the "." link obviously only exists for directories, so it should be in the is_dir conditional. SGI-PV: 987246 SGI-Modid: xfs-linux-melb:xfs-kern:32374a Signed-off-by: Christoph Hellwig Signed-off-by: Donald Douwsma Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_vnodeops.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 1d15a320b9a..1c890113ab3 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -2009,7 +2009,7 @@ xfs_remove( goto out_bmap_cancel; /* - * Drop the link from dp to ip. + * Drop the "." link from ip to self. */ error = xfs_droplink(tp, ip); if (error) @@ -2024,7 +2024,7 @@ xfs_remove( } /* - * Drop the "." link from ip to self. + * Drop the link from dp to ip. */ error = xfs_droplink(tp, ip); if (error) -- cgit v1.2.3-70-g09d2 From 56e73ec47d749047f441e6b9d60d964535d31c3b Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 17:55:27 +1100 Subject: [XFS] Can't lock inodes in radix tree preload region When we are inside a radix tree preload region, we cannot sleep. Recently we moved the inode locking inside the preload region for the inode radix tree. Fix that, and fix a missed unlock in another error path in the same code at the same time. SGI-PV: 987246 SGI-Modid: xfs-linux-melb:xfs-kern:32385a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Christoph Hellwig --- fs/xfs/xfs_iget.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index a1f209b0596..377c0cd1499 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -159,18 +159,19 @@ xfs_iget_cache_miss( goto out_destroy; } + if (lock_flags) + xfs_ilock(ip, lock_flags); + /* * Preload the radix tree so we can insert safely under the - * write spinlock. + * write spinlock. Note that we cannot sleep inside the preload + * region. */ if (radix_tree_preload(GFP_KERNEL)) { error = EAGAIN; - goto out_destroy; + goto out_unlock; } - if (lock_flags) - xfs_ilock(ip, lock_flags); - mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); first_index = agino & mask; write_lock(&pag->pag_ici_lock); @@ -181,7 +182,7 @@ xfs_iget_cache_miss( WARN_ON(error != -EEXIST); XFS_STATS_INC(xs_ig_dup); error = EAGAIN; - goto out_unlock; + goto out_preload_end; } /* These values _must_ be set before releasing the radix tree lock! */ @@ -193,9 +194,12 @@ xfs_iget_cache_miss( *ipp = ip; return 0; -out_unlock: +out_preload_end: write_unlock(&pag->pag_ici_lock); radix_tree_preload_end(); +out_unlock: + if (lock_flags) + xfs_iunlock(ip, lock_flags); out_destroy: xfs_idestroy(ip); return error; -- cgit v1.2.3-70-g09d2 From 455486b9ccdd0a1d7432a03302f549b1c917c181 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 18:03:14 +1100 Subject: [XFS] avoid all reclaimable inodes in xfs_sync_inodes_ag If we are syncing data in xfs_sync_inodes_ag(), the VFS inode must still be referencable as the dirty data state is carried on the VFS inode. hence if we can't get a reference via igrab(), the inode must be in reclaim which implies that it has no dirty data attached. Leave such inodes to the reclaim code to flush the dirty inode state to disk and so avoid attempting to access the VFS inode when it may not exist in xfs_sync_inodes_ag(). Version 4: o don't reference linux inode until after igrab() succeeds Version 3: o converted unlock/rele to an xfs_iput() call. Version 2: o change igrab logic to be more linear o remove initial reclaimable inode check now that we are using igrab() failure to find reclaimable inodes o assert that igrab failure occurs only on reclaimable inodes o clean up inode locking - only grab the iolock if we are doing a SYNC_DELWRI call and we have a dirty inode. SGI-PV: 987246 SGI-Modid: xfs-linux-melb:xfs-kern:32391a Signed-off-by: David Chinner Signed-off-by: Christoph Hellwig Signed-off-by: Peter Leckie Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_sync.c | 75 +++++++++++---------------------------------- 1 file changed, 18 insertions(+), 57 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index ee1648b179f..fb5cca3df84 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -63,25 +63,16 @@ xfs_sync_inodes_ag( int error = 0; int last_error = 0; int fflag = XFS_B_ASYNC; - int lock_flags = XFS_ILOCK_SHARED; if (flags & SYNC_DELWRI) fflag = XFS_B_DELWRI; if (flags & SYNC_WAIT) fflag = 0; /* synchronous overrides all */ - if (flags & SYNC_DELWRI) { - /* - * We need the I/O lock if we're going to call any of - * the flush/inval routines. - */ - lock_flags |= XFS_IOLOCK_SHARED; - } - do { struct inode *inode; - boolean_t inode_refed; xfs_inode_t *ip = NULL; + int lock_flags = XFS_ILOCK_SHARED; /* * use a gang lookup to find the next inode in the tree @@ -109,22 +100,6 @@ xfs_sync_inodes_ag( break; } - /* - * skip inodes in reclaim. Let xfs_syncsub do that for - * us so we don't need to worry. - */ - if (xfs_iflags_test(ip, (XFS_IRECLAIM|XFS_IRECLAIMABLE))) { - read_unlock(&pag->pag_ici_lock); - continue; - } - - /* bad inodes are dealt with elsewhere */ - inode = VFS_I(ip); - if (is_bad_inode(inode)) { - read_unlock(&pag->pag_ici_lock); - continue; - } - /* nothing to sync during shutdown */ if (XFS_FORCED_SHUTDOWN(mp)) { read_unlock(&pag->pag_ici_lock); @@ -132,42 +107,34 @@ xfs_sync_inodes_ag( } /* - * If we can't get a reference on the VFS_I, the inode must be - * in reclaim. If we can get the inode lock without blocking, - * it is safe to flush the inode because we hold the tree lock - * and xfs_iextract will block right now. Hence if we lock the - * inode while holding the tree lock, xfs_ireclaim() is - * guaranteed to block on the inode lock we now hold and hence - * it is safe to reference the inode until we drop the inode - * locks completely. + * If we can't get a reference on the inode, it must be + * in reclaim. Leave it for the reclaim code to flush. */ - inode_refed = B_FALSE; - if (igrab(inode)) { - read_unlock(&pag->pag_ici_lock); - xfs_ilock(ip, lock_flags); - inode_refed = B_TRUE; - } else { - if (!xfs_ilock_nowait(ip, lock_flags)) { - /* leave it to reclaim */ - read_unlock(&pag->pag_ici_lock); - continue; - } + inode = VFS_I(ip); + if (!igrab(inode)) { read_unlock(&pag->pag_ici_lock); + continue; + } + read_unlock(&pag->pag_ici_lock); + + /* bad inodes are dealt with elsewhere */ + if (is_bad_inode(inode)) { + IRELE(ip); + continue; } /* * If we have to flush data or wait for I/O completion - * we need to drop the ilock that we currently hold. - * If we need to drop the lock, insert a marker if we - * have not already done so. + * we need to hold the iolock. */ if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) { - xfs_iunlock(ip, XFS_ILOCK_SHARED); + xfs_ilock(ip, XFS_IOLOCK_SHARED); + lock_flags |= XFS_IOLOCK_SHARED; error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE); if (flags & SYNC_IOWAIT) vn_iowait(ip); - xfs_ilock(ip, XFS_ILOCK_SHARED); } + xfs_ilock(ip, XFS_ILOCK_SHARED); if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) { if (flags & SYNC_WAIT) { @@ -183,13 +150,7 @@ xfs_sync_inodes_ag( xfs_ifunlock(ip); } } - - if (lock_flags) - xfs_iunlock(ip, lock_flags); - - if (inode_refed) { - IRELE(ip); - } + xfs_iput(ip, lock_flags); if (error) last_error = error; -- cgit v1.2.3-70-g09d2 From c679eef0520eb3c2c731fce505e61b8ef9469aac Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 18:04:13 +1100 Subject: [XFS] stop using xfs_itobp in xfs_bulkstat xfs_bulkstat only wants the dinode, offset and buffer from a given inode number. Instead of using xfs_itobp on a fake inode which is complicated and currently leads to leaks of the security data just use xfs_inotobp which is designed to do exactly the kind of lookup xfs_bulkstat wants. The only thing that's missing in xfs_inotobp is a flags paramter that let's us pass down XFS_IMAP_BULKSTAT, but that can easily added. SGI-PV: 987246 SGI-Modid: xfs-linux-melb:xfs-kern:32397a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: David Chinner --- fs/xfs/xfs_inode.c | 13 +++++++------ fs/xfs/xfs_inode.h | 6 ++++-- fs/xfs/xfs_itable.c | 21 ++++++++------------- 3 files changed, 19 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index c83f6998f95..35e419191ab 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -222,25 +222,26 @@ xfs_imap_to_bp( * Use xfs_imap() to determine the size and location of the * buffer to read from disk. */ -STATIC int +int xfs_inotobp( xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, xfs_dinode_t **dipp, xfs_buf_t **bpp, - int *offset) + int *offset, + uint imap_flags) { xfs_imap_t imap; xfs_buf_t *bp; int error; imap.im_blkno = 0; - error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP); + error = xfs_imap(mp, tp, ino, &imap, imap_flags | XFS_IMAP_LOOKUP); if (error) return error; - error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, 0); + error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags); if (error) return error; @@ -792,7 +793,7 @@ xfs_dic2xflags( /* * Allocate and initialise an xfs_inode. */ -struct xfs_inode * +STATIC struct xfs_inode * xfs_inode_alloc( struct xfs_mount *mp, xfs_ino_t ino) @@ -2046,7 +2047,7 @@ xfs_iunlink_remove( } next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino); error = xfs_inotobp(mp, tp, next_ino, &last_dip, - &last_ibp, &last_offset); + &last_ibp, &last_offset, 0); if (error) { cmn_err(CE_WARN, "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.", diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index a5aeb9cfeae..5d12cfeb43c 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -158,7 +158,7 @@ typedef struct xfs_icdinode { #define XFS_IFEXTIREC 0x08 /* Indirection array of extent blocks */ /* - * Flags for xfs_itobp(), xfs_imap() and xfs_dilocate(). + * Flags for xfs_inotobp, xfs_itobp(), xfs_imap() and xfs_dilocate(). */ #define XFS_IMAP_LOOKUP 0x1 #define XFS_IMAP_BULKSTAT 0x2 @@ -514,7 +514,6 @@ int xfs_itruncate_finish(struct xfs_trans **, xfs_inode_t *, xfs_fsize_t, int, int); int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); -struct xfs_inode * xfs_inode_alloc(struct xfs_mount *, xfs_ino_t); void xfs_idestroy(xfs_inode_t *); void xfs_iextract(xfs_inode_t *); void xfs_iext_realloc(xfs_inode_t *, int, int); @@ -531,6 +530,9 @@ void xfs_mark_inode_dirty_sync(xfs_inode_t *); #endif /* __KERNEL__ */ +int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, + xfs_ino_t, struct xfs_dinode **, + struct xfs_buf **, int *, uint); int xfs_itobp(struct xfs_mount *, struct xfs_trans *, struct xfs_inode *, struct xfs_dinode **, struct xfs_buf **, xfs_daddr_t, uint, uint); diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 42a214b8df9..35118032a5d 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -359,7 +359,6 @@ xfs_bulkstat( int ubused; /* bytes used by formatter */ xfs_buf_t *bp; /* ptr to on-disk inode cluster buf */ xfs_dinode_t *dip; /* ptr into bp for specific inode */ - xfs_inode_t *ip; /* ptr to in-core inode struct */ /* * Get the last inode value, see if there's nothing to do. @@ -585,6 +584,8 @@ xfs_bulkstat( if (flags & (BULKSTAT_FG_QUICK | BULKSTAT_FG_INLINE)) { + int offset; + ino = XFS_AGINO_TO_INO(mp, agno, agino); bno = XFS_AGB_TO_DADDR(mp, agno, @@ -595,19 +596,13 @@ xfs_bulkstat( */ if (bp) xfs_buf_relse(bp); - ip = xfs_inode_alloc(mp, ino); - if (!ip) { - bp = NULL; - rval = ENOMEM; - break; - } - error = xfs_itobp(mp, NULL, ip, - &dip, &bp, bno, - XFS_IMAP_BULKSTAT, - XFS_BUF_LOCK); + + error = xfs_inotobp(mp, NULL, ino, &dip, + &bp, &offset, + XFS_IMAP_BULKSTAT); + if (!error) - clustidx = ip->i_boffset / mp->m_sb.sb_inodesize; - xfs_idestroy(ip); + clustidx = offset / mp->m_sb.sb_inodesize; if (XFS_TEST_ERROR(error != 0, mp, XFS_ERRTAG_BULKSTAT_READ_CHUNK, XFS_RANDOM_BULKSTAT_READ_CHUNK)) { -- cgit v1.2.3-70-g09d2 From 087e3b0460c367d0f4a5b71d7b013968ae23b588 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 18:24:37 +1100 Subject: Inode: export symbol destroy_inode To make sure we free the security data inodes need to be freed using the proper VFS helper (which we also need to export for this). We mark these inodes bad so we can skip the flush path for them. Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: David Chinner --- fs/inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index fbcf6c5e760..f84ba338faf 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -212,6 +212,7 @@ void destroy_inode(struct inode *inode) else kmem_cache_free(inode_cachep, (inode)); } +EXPORT_SYMBOL(destroy_inode); /* -- cgit v1.2.3-70-g09d2 From 9ed0451ee0a13469f7b38e4ced8974036f6d114f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 18:26:04 +1100 Subject: [XFS] free partially initialized inodes using destroy_inode To make sure we free the security data inodes need to be freed using the proper VFS helper (which we also need to export for this). We mark these inodes bad so we can skip the flush path for them. SGI-PV: 987246 SGI-Modid: xfs-linux-melb:xfs-kern:32398a Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy Signed-off-by: David Chinner --- fs/xfs/xfs_iget.c | 2 +- fs/xfs/xfs_inode.c | 21 +++++++++++---------- fs/xfs/xfs_inode.h | 17 +++++++++++++++++ 3 files changed, 29 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 377c0cd1499..837cae78153 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -201,7 +201,7 @@ out_unlock: if (lock_flags) xfs_iunlock(ip, lock_flags); out_destroy: - xfs_idestroy(ip); + xfs_destroy_inode(ip); return error; } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 35e419191ab..cd522827f99 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -898,18 +898,14 @@ xfs_iread( * know that this is a new incore inode. */ error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags, XFS_BUF_LOCK); - if (error) { - xfs_idestroy(ip); - return error; - } + if (error) + goto out_destroy_inode; /* * If we got something that isn't an inode it means someone * (nfs or dmi) has a stale handle. */ if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC) { - xfs_idestroy(ip); - xfs_trans_brelse(tp, bp); #ifdef DEBUG xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " "dip->di_core.di_magic (0x%x) != " @@ -917,7 +913,8 @@ xfs_iread( be16_to_cpu(dip->di_core.di_magic), XFS_DINODE_MAGIC); #endif /* DEBUG */ - return XFS_ERROR(EINVAL); + error = XFS_ERROR(EINVAL); + goto out_brelse; } /* @@ -931,14 +928,12 @@ xfs_iread( xfs_dinode_from_disk(&ip->i_d, &dip->di_core); error = xfs_iformat(ip, dip); if (error) { - xfs_idestroy(ip); - xfs_trans_brelse(tp, bp); #ifdef DEBUG xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " "xfs_iformat() returned error %d", error); #endif /* DEBUG */ - return error; + goto out_brelse; } } else { ip->i_d.di_magic = be16_to_cpu(dip->di_core.di_magic); @@ -1004,6 +999,12 @@ xfs_iread( xfs_trans_brelse(tp, bp); *ipp = ip; return 0; + + out_brelse: + xfs_trans_brelse(tp, bp); + out_destroy_inode: + xfs_destroy_inode(ip); + return error; } /* diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 5d12cfeb43c..7f007ef4bbb 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -309,6 +309,23 @@ static inline struct inode *VFS_I(struct xfs_inode *ip) return &ip->i_vnode; } +/* + * Get rid of a partially initialized inode. + * + * We have to go through destroy_inode to make sure allocations + * from init_inode_always like the security data are undone. + * + * We mark the inode bad so that it takes the short cut in + * the reclaim path instead of going through the flush path + * which doesn't make sense for an inode that has never seen the + * light of day. + */ +static inline void xfs_destroy_inode(struct xfs_inode *ip) +{ + make_bad_inode(VFS_I(ip)); + return destroy_inode(VFS_I(ip)); +} + /* * i_flags helper functions */ -- cgit v1.2.3-70-g09d2 From 7ee49acfe54883f16d28d9486b789431a5804d18 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 18:26:51 +1100 Subject: [XFS] correctly select first log item to push Under heavy metadata load we are seeing log hangs. The AIL has items in it ready to be pushed, and they are within the push target window. However, we are not pushing them when the last pushed LSN is less than the LSN of the first log item on the AIL. This is a regression introduced by the AIL push cursor modifications. SGI-PV: 987246 SGI-Modid: xfs-linux-melb:xfs-kern:32409a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy Signed-off-by: Tim Shimmin --- fs/xfs/xfs_trans_ail.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 67ee4663336..2d47f10f8be 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -228,7 +228,7 @@ xfs_trans_ail_cursor_first( list_for_each_entry(lip, &ailp->xa_ail, li_ail) { if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) - break; + goto out; } lip = NULL; out: -- cgit v1.2.3-70-g09d2 From ea5a3dc8356bf1cf27bab9a5a0da5dfbbb82013d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 30 Oct 2008 18:27:48 +1100 Subject: [XFS] kill sys_cred capable_cred has been unused for a while so we can kill it and sys_cred. That also means the cred argument to xfs_setattr and xfs_change_file_space can be removed now. SGI-PV: 988918 SGI-Modid: xfs-linux-melb:xfs-kern:32412a Signed-off-by: Christoph Hellwig Signed-off-by: Tim Shimmin Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_cred.h | 8 -------- fs/xfs/linux-2.6/xfs_globals.c | 7 ------- fs/xfs/linux-2.6/xfs_globals.h | 1 - fs/xfs/linux-2.6/xfs_ioctl.c | 3 +-- fs/xfs/linux-2.6/xfs_iops.c | 6 +++--- fs/xfs/xfs_acl.c | 2 +- fs/xfs/xfs_vnodeops.c | 6 ++---- fs/xfs/xfs_vnodeops.h | 6 ++---- 8 files changed, 9 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_cred.h b/fs/xfs/linux-2.6/xfs_cred.h index 652721ce0ea..98da2199bc2 100644 --- a/fs/xfs/linux-2.6/xfs_cred.h +++ b/fs/xfs/linux-2.6/xfs_cred.h @@ -27,12 +27,4 @@ typedef struct cred { /* EMPTY */ } cred_t; -extern struct cred *sys_cred; - -/* this is a hack.. (assumes sys_cred is the only cred_t in the system) */ -static inline int capable_cred(cred_t *cr, int cid) -{ - return (cr == sys_cred) ? 1 : capable(cid); -} - #endif /* __XFS_CRED_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c index ef90e64641e..46e862b004e 100644 --- a/fs/xfs/linux-2.6/xfs_globals.c +++ b/fs/xfs/linux-2.6/xfs_globals.c @@ -43,10 +43,3 @@ xfs_param_t xfs_params = { .inherit_nodfrg = { 0, 1, 1 }, .fstrm_timer = { 1, 30*100, 3600*100}, }; - -/* - * Global system credential structure. - */ -static cred_t sys_cred_val; -cred_t *sys_cred = &sys_cred_val; - diff --git a/fs/xfs/linux-2.6/xfs_globals.h b/fs/xfs/linux-2.6/xfs_globals.h index 2770b0085ee..69f71caf061 100644 --- a/fs/xfs/linux-2.6/xfs_globals.h +++ b/fs/xfs/linux-2.6/xfs_globals.h @@ -19,6 +19,5 @@ #define __XFS_GLOBALS_H__ extern uint64_t xfs_panic_mask; /* set to cause more panics */ -extern struct cred *sys_cred; #endif /* __XFS_GLOBALS_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index d3438c72dca..b5ea3f2afdc 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -691,8 +691,7 @@ xfs_ioc_space( if (ioflags & IO_INVIS) attr_flags |= XFS_ATTR_DMI; - error = xfs_change_file_space(ip, cmd, &bf, filp->f_pos, - NULL, attr_flags); + error = xfs_change_file_space(ip, cmd, &bf, filp->f_pos, attr_flags); return -error; } diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 37bb1012aff..f78bc221576 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -601,7 +601,7 @@ xfs_vn_setattr( struct dentry *dentry, struct iattr *iattr) { - return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0, NULL); + return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0); } /* @@ -642,7 +642,7 @@ xfs_vn_fallocate( xfs_ilock(ip, XFS_IOLOCK_EXCL); error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf, - 0, NULL, XFS_ATTR_NOLOCK); + 0, XFS_ATTR_NOLOCK); if (!error && !(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) new_size = offset + len; @@ -653,7 +653,7 @@ xfs_vn_fallocate( iattr.ia_valid = ATTR_SIZE; iattr.ia_size = new_size; - error = xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK, NULL); + error = xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK); } xfs_iunlock(ip, XFS_IOLOCK_EXCL); diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index b2f639a1416..8b3d1bdeb44 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c @@ -758,7 +758,7 @@ xfs_acl_setmode( if (gap && nomask) iattr.ia_mode |= gap->ae_perm << 3; - return xfs_setattr(XFS_I(vp), &iattr, 0, sys_cred); + return xfs_setattr(XFS_I(vp), &iattr, 0); } /* diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 1c890113ab3..34a1982ed6d 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -79,8 +79,7 @@ int xfs_setattr( struct xfs_inode *ip, struct iattr *iattr, - int flags, - cred_t *credp) + int flags) { xfs_mount_t *mp = ip->i_mount; struct inode *inode = VFS_I(ip); @@ -3362,7 +3361,6 @@ xfs_change_file_space( int cmd, xfs_flock64_t *bf, xfs_off_t offset, - cred_t *credp, int attr_flags) { xfs_mount_t *mp = ip->i_mount; @@ -3450,7 +3448,7 @@ xfs_change_file_space( iattr.ia_valid = ATTR_SIZE; iattr.ia_size = startoffset; - error = xfs_setattr(ip, &iattr, attr_flags, credp); + error = xfs_setattr(ip, &iattr, attr_flags); if (error) return error; diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index e932a96bec5..b1ae8e3f404 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h @@ -15,8 +15,7 @@ struct xfs_iomap; int xfs_open(struct xfs_inode *ip); -int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags, - struct cred *credp); +int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags); #define XFS_ATTR_DMI 0x01 /* invocation from a DMI function */ #define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */ #define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */ @@ -44,8 +43,7 @@ int xfs_inode_flush(struct xfs_inode *ip, int flags); int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state); int xfs_reclaim(struct xfs_inode *ip); int xfs_change_file_space(struct xfs_inode *ip, int cmd, - xfs_flock64_t *bf, xfs_off_t offset, - struct cred *credp, int attr_flags); + xfs_flock64_t *bf, xfs_off_t offset, int attr_flags); int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name, struct xfs_inode *src_ip, struct xfs_inode *target_dp, struct xfs_name *target_name, struct xfs_inode *target_ip); -- cgit v1.2.3-70-g09d2 From e0b8e8b65d578f5d5538465dff8392cf02e1cc5d Mon Sep 17 00:00:00 2001 From: Tim Shimmin Date: Thu, 30 Oct 2008 18:30:48 +1100 Subject: [XFS] remove restricted chown parameter from xfs linux On Linux all filesystems are supposed to be operating under Posix' restricted chown. Restricted chown means it restricts chown to the owner unless you have CAP_FOWNER. NOTE: that 2 files outside of fs/xfs have been modified too for this change. Reviewed-by: Dave Chinner SGI-PV: 988919 SGI-Modid: xfs-linux-melb:xfs-kern:32413a Signed-off-by: Tim Shimmin Signed-off-by: Christoph Hellwig Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_globals.c | 1 - fs/xfs/linux-2.6/xfs_ioctl.c | 4 ---- fs/xfs/linux-2.6/xfs_linux.h | 1 - fs/xfs/linux-2.6/xfs_sysctl.c | 11 ----------- fs/xfs/linux-2.6/xfs_sysctl.h | 3 +-- fs/xfs/xfs_vnodeops.c | 13 ++----------- 6 files changed, 3 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c index 46e862b004e..2ae8b1ccb02 100644 --- a/fs/xfs/linux-2.6/xfs_globals.c +++ b/fs/xfs/linux-2.6/xfs_globals.c @@ -26,7 +26,6 @@ */ xfs_param_t xfs_params = { /* MIN DFLT MAX */ - .restrict_chown = { 0, 1, 1 }, .sgid_inherit = { 0, 0, 1 }, .symlink_mode = { 0, 0, 1 }, .panic_mask = { 0, 0, 255 }, diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index b5ea3f2afdc..d25694e8cd6 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -1103,10 +1103,6 @@ xfs_ioctl_setattr( /* * Change file ownership. Must be the owner or privileged. - * If the system was configured with the "restricted_chown" - * option, the owner is not permitted to give away the file, - * and can change the group id only to a group of which he - * or she is a member. */ if (mask & FSX_PROJID) { /* diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index cc0f7b3a979..214717650b2 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h @@ -107,7 +107,6 @@ #undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ #endif -#define restricted_chown xfs_params.restrict_chown.val #define irix_sgid_inherit xfs_params.sgid_inherit.val #define irix_symlink_mode xfs_params.symlink_mode.val #define xfs_panic_mask xfs_params.panic_mask.val diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c index 7dacb5bbde3..916c0ffb608 100644 --- a/fs/xfs/linux-2.6/xfs_sysctl.c +++ b/fs/xfs/linux-2.6/xfs_sysctl.c @@ -55,17 +55,6 @@ xfs_stats_clear_proc_handler( #endif /* CONFIG_PROC_FS */ static ctl_table xfs_table[] = { - { - .ctl_name = XFS_RESTRICT_CHOWN, - .procname = "restrict_chown", - .data = &xfs_params.restrict_chown.val, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &xfs_params.restrict_chown.min, - .extra2 = &xfs_params.restrict_chown.max - }, { .ctl_name = XFS_SGID_INHERIT, .procname = "irix_sgid_inherit", diff --git a/fs/xfs/linux-2.6/xfs_sysctl.h b/fs/xfs/linux-2.6/xfs_sysctl.h index 4aadb8056c3..b9937d450f8 100644 --- a/fs/xfs/linux-2.6/xfs_sysctl.h +++ b/fs/xfs/linux-2.6/xfs_sysctl.h @@ -31,7 +31,6 @@ typedef struct xfs_sysctl_val { } xfs_sysctl_val_t; typedef struct xfs_param { - xfs_sysctl_val_t restrict_chown;/* Root/non-root can give away files.*/ xfs_sysctl_val_t sgid_inherit; /* Inherit S_ISGID if process' GID is * not a member of parent dir GID. */ xfs_sysctl_val_t symlink_mode; /* Link creat mode affected by umask */ @@ -68,7 +67,7 @@ typedef struct xfs_param { enum { /* XFS_REFCACHE_SIZE = 1 */ /* XFS_REFCACHE_PURGE = 2 */ - XFS_RESTRICT_CHOWN = 3, + /* XFS_RESTRICT_CHOWN = 3 */ XFS_SGID_INHERIT = 4, XFS_SYMLINK_MODE = 5, XFS_PANIC_MASK = 6, diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 34a1982ed6d..c45ea278ef4 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -232,10 +232,6 @@ xfs_setattr( /* * Change file ownership. Must be the owner or privileged. - * If the system was configured with the "restricted_chown" - * option, the owner is not permitted to give away the file, - * and can change the group id only to a group of which he - * or she is a member. */ if (mask & (ATTR_UID|ATTR_GID)) { /* @@ -259,9 +255,8 @@ xfs_setattr( * shall be equal to either the group ID or one of the * supplementary group IDs of the calling process. */ - if (restricted_chown && - (iuid != uid || (igid != gid && - !in_group_p((gid_t)gid))) && + if ((iuid != uid || + (igid != gid && !in_group_p((gid_t)gid))) && !capable(CAP_CHOWN)) { code = XFS_ERROR(EPERM); goto error_return; @@ -455,10 +450,6 @@ xfs_setattr( /* * Change file ownership. Must be the owner or privileged. - * If the system was configured with the "restricted_chown" - * option, the owner is not permitted to give away the file, - * and can change the group id only to a group of which he - * or she is a member. */ if (mask & (ATTR_UID|ATTR_GID)) { /* -- cgit v1.2.3-70-g09d2 From 6bfb3d065f4c498c17a3a07f3dc08cedff53aff4 Mon Sep 17 00:00:00 2001 From: David Chinner Date: Thu, 30 Oct 2008 18:32:43 +1100 Subject: [XFS] Fix race when looking up reclaimable inodes If we get a race looking up a reclaimable inode, we can end up with the winner proceeding to use the inode before it has been completely re-initialised. This is a Bad Thing. Fix the race by checking whether we are still initialising the inod eonce we have a reference to it, and if so wait for the initialisation to complete before continuing. While there, fix a leaked reference count in the same code when encountering an unlinked inode and we are not doing a lookup for a create operation. SGI-PV: 987246 SGI-Modid: xfs-linux-melb:xfs-kern:32429a Signed-off-by: David Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_linux.h | 1 + fs/xfs/xfs_iget.c | 32 ++++++++++++++++++++++---------- 2 files changed, 23 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index 214717650b2..77d6ddcaf54 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h @@ -77,6 +77,7 @@ #include #include #include +#include #include #include diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 837cae78153..bf4dc5eb4cf 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -52,7 +52,7 @@ xfs_iget_cache_hit( int lock_flags) __releases(pag->pag_ici_lock) { struct xfs_mount *mp = ip->i_mount; - int error = 0; + int error = EAGAIN; /* * If INEW is set this inode is being set up @@ -60,7 +60,6 @@ xfs_iget_cache_hit( * Pause and try again. */ if (xfs_iflags_test(ip, (XFS_INEW|XFS_IRECLAIM))) { - error = EAGAIN; XFS_STATS_INC(xs_ig_frecycle); goto out_error; } @@ -73,7 +72,6 @@ xfs_iget_cache_hit( * error immediately so we don't remove it from the reclaim * list and potentially leak the inode. */ - if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { error = ENOENT; goto out_error; @@ -91,27 +89,42 @@ xfs_iget_cache_hit( error = ENOMEM; goto out_error; } + + /* + * We must set the XFS_INEW flag before clearing the + * XFS_IRECLAIMABLE flag so that if a racing lookup does + * not find the XFS_IRECLAIMABLE above but has the igrab() + * below succeed we can safely check XFS_INEW to detect + * that this inode is still being initialised. + */ xfs_iflags_set(ip, XFS_INEW); xfs_iflags_clear(ip, XFS_IRECLAIMABLE); /* clear the radix tree reclaim flag as well. */ __xfs_inode_clear_reclaim_tag(mp, pag, ip); - read_unlock(&pag->pag_ici_lock); } else if (!igrab(VFS_I(ip))) { /* If the VFS inode is being torn down, pause and try again. */ - error = EAGAIN; XFS_STATS_INC(xs_ig_frecycle); goto out_error; - } else { - /* we've got a live one */ - read_unlock(&pag->pag_ici_lock); + } else if (xfs_iflags_test(ip, XFS_INEW)) { + /* + * We are racing with another cache hit that is + * currently recycling this inode out of the XFS_IRECLAIMABLE + * state. Wait for the initialisation to complete before + * continuing. + */ + wait_on_inode(VFS_I(ip)); } if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { error = ENOENT; - goto out; + iput(VFS_I(ip)); + goto out_error; } + /* We've got a live one. */ + read_unlock(&pag->pag_ici_lock); + if (lock_flags != 0) xfs_ilock(ip, lock_flags); @@ -122,7 +135,6 @@ xfs_iget_cache_hit( out_error: read_unlock(&pag->pag_ici_lock); -out: return error; } -- cgit v1.2.3-70-g09d2 From 91b777125175077fb74025608dba87f100586c62 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 31 Oct 2008 15:50:04 +1100 Subject: CRED: Wrap task credential accesses in the XFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn --- fs/xfs/linux-2.6/xfs_cred.h | 2 +- fs/xfs/linux-2.6/xfs_ioctl.c | 2 +- fs/xfs/xfs_acl.c | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_cred.h b/fs/xfs/linux-2.6/xfs_cred.h index 98da2199bc2..e279d00779f 100644 --- a/fs/xfs/linux-2.6/xfs_cred.h +++ b/fs/xfs/linux-2.6/xfs_cred.h @@ -24,7 +24,7 @@ * Credentials */ typedef struct cred { - /* EMPTY */ + /* EMPTY */ } cred_t; #endif /* __XFS_CRED_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index d25694e8cd6..f1bd6c36e6f 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -1006,7 +1006,7 @@ xfs_ioctl_setattr( * to the file owner ID, except in cases where the * CAP_FSETID capability is applicable. */ - if (current->fsuid != ip->i_d.di_uid && !capable(CAP_FOWNER)) { + if (current_fsuid() != ip->i_d.di_uid && !capable(CAP_FOWNER)) { code = XFS_ERROR(EPERM); goto error_return; } diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index 8b3d1bdeb44..a8cdd73999a 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c @@ -366,7 +366,7 @@ xfs_acl_allow_set( return ENOTDIR; if (vp->i_sb->s_flags & MS_RDONLY) return EROFS; - if (XFS_I(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER)) + if (XFS_I(vp)->i_d.di_uid != current_fsuid() && !capable(CAP_FOWNER)) return EPERM; return 0; } @@ -413,13 +413,13 @@ xfs_acl_access( switch (fap->acl_entry[i].ae_tag) { case ACL_USER_OBJ: seen_userobj = 1; - if (fuid != current->fsuid) + if (fuid != current_fsuid()) continue; matched.ae_tag = ACL_USER_OBJ; matched.ae_perm = allows; break; case ACL_USER: - if (fap->acl_entry[i].ae_id != current->fsuid) + if (fap->acl_entry[i].ae_id != current_fsuid()) continue; matched.ae_tag = ACL_USER; matched.ae_perm = allows; -- cgit v1.2.3-70-g09d2 From be859405487324ed548f1ba11dc949b8230ab991 Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 31 Oct 2008 00:56:28 -0700 Subject: fs: replace NIPQUAD() Using NIPQUAD() with NIPQUAD_FMT, %d.%d.%d.%d or %u.%u.%u.%u can be replaced with %pI4 Signed-off-by: Harvey Harrison Signed-off-by: David S. Miller --- fs/afs/proc.c | 4 ++-- fs/afs/server.c | 9 ++++----- fs/cifs/cifs_spnego.c | 3 +-- fs/cifs/connect.c | 4 ++-- fs/lockd/host.c | 6 +++--- fs/lockd/mon.c | 2 +- fs/nfs/nfsroot.c | 6 +++--- fs/nfs/super.c | 3 +-- fs/nfsd/nfs4state.c | 4 ++-- fs/nfsd/nfsctl.c | 2 +- fs/ocfs2/cluster/netdebug.c | 8 ++++---- fs/ocfs2/cluster/nodemanager.c | 2 +- fs/ocfs2/cluster/tcp.c | 29 ++++++++++++++--------------- 13 files changed, 39 insertions(+), 43 deletions(-) (limited to 'fs') diff --git a/fs/afs/proc.c b/fs/afs/proc.c index 9f7d1ae7026..7578c1ab9e0 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c @@ -646,7 +646,7 @@ static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v) } /* display one cell per line on subsequent lines */ - seq_printf(m, "%u.%u.%u.%u\n", NIPQUAD(addr->s_addr)); + seq_printf(m, "%pI4\n", &addr->s_addr); return 0; } @@ -737,7 +737,7 @@ static int afs_proc_cell_servers_show(struct seq_file *m, void *v) } /* display one cell per line on subsequent lines */ - sprintf(ipaddr, "%u.%u.%u.%u", NIPQUAD(server->addr)); + sprintf(ipaddr, "%pI4", &server->addr); seq_printf(m, "%3d %-15.15s %5d\n", atomic_read(&server->usage), ipaddr, server->fs_state); diff --git a/fs/afs/server.c b/fs/afs/server.c index 28f2451419e..f4909951667 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -105,7 +105,7 @@ struct afs_server *afs_lookup_server(struct afs_cell *cell, { struct afs_server *server, *candidate; - _enter("%p,"NIPQUAD_FMT, cell, NIPQUAD(addr->s_addr)); + _enter("%p,%pI4", cell, &addr->s_addr); /* quick scan of the list to see if we already have the server */ read_lock(&cell->servers_lock); @@ -168,9 +168,8 @@ found_server: server_in_two_cells: write_unlock(&cell->servers_lock); kfree(candidate); - printk(KERN_NOTICE "kAFS:" - " Server "NIPQUAD_FMT" appears to be in two cells\n", - NIPQUAD(*addr)); + printk(KERN_NOTICE "kAFS: Server %pI4 appears to be in two cells\n", + addr); _leave(" = -EEXIST"); return ERR_PTR(-EEXIST); } @@ -184,7 +183,7 @@ struct afs_server *afs_find_server(const struct in_addr *_addr) struct rb_node *p; struct in_addr addr = *_addr; - _enter(NIPQUAD_FMT, NIPQUAD(addr.s_addr)); + _enter("%pI4", &addr.s_addr); read_lock(&afs_servers_lock); diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index 0e186cd20ad..359144c510b 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c @@ -121,8 +121,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) /* add the server address */ if (server->addr.sockAddr.sin_family == AF_INET) - sprintf(dp, "ip4=" NIPQUAD_FMT, - NIPQUAD(server->addr.sockAddr.sin_addr)); + sprintf(dp, "ip4=%pI4", &server->addr.sockAddr.sin_addr); else if (server->addr.sockAddr.sin_family == AF_INET6) sprintf(dp, "ip6=%pi6", &server->addr.sockAddr6.sin6_addr); else diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 71b7661e226..cd54fee3f05 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2058,8 +2058,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, rc = -ENOMEM; else { pSesInfo->server = srvTcp; - sprintf(pSesInfo->serverName, "%u.%u.%u.%u", - NIPQUAD(sin_server.sin_addr.s_addr)); + sprintf(pSesInfo->serverName, "%pI4", + &sin_server.sin_addr.s_addr); } if (!rc) { diff --git a/fs/lockd/host.c b/fs/lockd/host.c index c8ab7d70390..c48c2638077 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -115,12 +115,12 @@ static void nlm_display_address(const struct sockaddr *sap, snprintf(buf, len, "unspecified"); break; case AF_INET: - snprintf(buf, len, NIPQUAD_FMT, NIPQUAD(sin->sin_addr.s_addr)); + snprintf(buf, len, "%pI4", &sin->sin_addr.s_addr); break; case AF_INET6: if (ipv6_addr_v4mapped(&sin6->sin6_addr)) - snprintf(buf, len, NIPQUAD_FMT, - NIPQUAD(sin6->sin6_addr.s6_addr32[3])); + snprintf(buf, len, "%pI4", + &sin6->sin6_addr.s6_addr32[3]); else snprintf(buf, len, "%pI6", &sin6->sin6_addr); break; diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index 4e7e958e8f6..ffd3461f75e 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -179,7 +179,7 @@ static __be32 *xdr_encode_mon_name(__be32 *p, struct nsm_args *argp) if (!nsm_use_hostnames) { snprintf(buffer, XDR_ADDRBUF_LEN, - NIPQUAD_FMT, NIPQUAD(argp->addr)); + "%pI4", &argp->addr); name = buffer; } diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index 8478fc25dae..d74d16ce0d4 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c @@ -329,7 +329,7 @@ static int __init root_nfs_addr(void) } snprintf(nfs_data.hostname, sizeof(nfs_data.hostname), - "%u.%u.%u.%u", NIPQUAD(servaddr)); + "%pI4", &servaddr); return 0; } @@ -421,8 +421,8 @@ static int __init root_nfs_getport(int program, int version, int proto) { struct sockaddr_in sin; - printk(KERN_NOTICE "Looking up port of RPC %d/%d on %u.%u.%u.%u\n", - program, version, NIPQUAD(servaddr)); + printk(KERN_NOTICE "Looking up port of RPC %d/%d on %pI4\n", + program, version, &servaddr); set_sockaddr(&sin, servaddr, 0); return rpcb_getport_sync(&sin, program, version, proto); } diff --git a/fs/nfs/super.c b/fs/nfs/super.c index eb391d8d70b..bb0313ac9e1 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -462,8 +462,7 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss, switch (sap->sa_family) { case AF_INET: { struct sockaddr_in *sin = (struct sockaddr_in *)sap; - seq_printf(m, ",mountaddr=" NIPQUAD_FMT, - NIPQUAD(sin->sin_addr.s_addr)); + seq_printf(m, ",mountaddr=%pI4", &sin->sin_addr.s_addr); break; } case AF_INET6: { diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index b0bebc552a1..16efb8f8e06 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -719,8 +719,8 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, status = nfserr_clid_inuse; if (!same_creds(&conf->cl_cred, &rqstp->rq_cred) || conf->cl_addr != sin->sin_addr.s_addr) { - dprintk("NFSD: setclientid: string in use by client" - "at %u.%u.%u.%u\n", NIPQUAD(conf->cl_addr)); + dprintk("NFSD: setclientid: string in use by clientat %pI4\n", + &conf->cl_addr); goto out; } } diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index e3f9783fdcf..77d7b8c531a 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -330,7 +330,7 @@ static ssize_t failover_unlock_ip(struct file *file, char *buf, size_t size) return -EINVAL; /* get ipv4 address */ - if (sscanf(fo_path, NIPQUAD_FMT "%c", &b1, &b2, &b3, &b4, &c) != 4) + if (sscanf(fo_path, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4) return -EINVAL; if (b1 > 255 || b2 > 255 || b3 > 255 || b4 > 255) return -EINVAL; diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index 52276c02f71..f8424874fa0 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c @@ -304,8 +304,8 @@ static int sc_seq_show(struct seq_file *seq, void *v) * use of it here generates a warning with -Wbitwise */ seq_printf(seq, "%p:\n" " krefs: %d\n" - " sock: %u.%u.%u.%u:%u -> " - "%u.%u.%u.%u:%u\n" + " sock: %pI4:%u -> " + "%pI4:%u\n" " remote node: %s\n" " page off: %zu\n" " handshake ok: %u\n" @@ -319,8 +319,8 @@ static int sc_seq_show(struct seq_file *seq, void *v) " func type: %u\n", sc, atomic_read(&sc->sc_kref.refcount), - NIPQUAD(saddr), inet ? ntohs(sport) : 0, - NIPQUAD(daddr), inet ? ntohs(dport) : 0, + &saddr, inet ? ntohs(sport) : 0, + &daddr, inet ? ntohs(dport) : 0, sc->sc_node->nd_name, sc->sc_page_off, sc->sc_handshake_ok, diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c index 816a3f61330..70e8fa9e253 100644 --- a/fs/ocfs2/cluster/nodemanager.c +++ b/fs/ocfs2/cluster/nodemanager.c @@ -250,7 +250,7 @@ static ssize_t o2nm_node_ipv4_port_write(struct o2nm_node *node, static ssize_t o2nm_node_ipv4_address_read(struct o2nm_node *node, char *page) { - return sprintf(page, "%u.%u.%u.%u\n", NIPQUAD(node->nd_ipv4_address)); + return sprintf(page, "%pI4\n", &node->nd_ipv4_address); } static ssize_t o2nm_node_ipv4_address_write(struct o2nm_node *node, diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 2bcf706d9dd..9fbe849f634 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -1597,8 +1597,8 @@ static void o2net_start_connect(struct work_struct *work) ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr, sizeof(myaddr)); if (ret) { - mlog(ML_ERROR, "bind failed with %d at address %u.%u.%u.%u\n", - ret, NIPQUAD(mynode->nd_ipv4_address)); + mlog(ML_ERROR, "bind failed with %d at address %pI4\n", + ret, &mynode->nd_ipv4_address); goto out; } @@ -1790,17 +1790,16 @@ static int o2net_accept_one(struct socket *sock) node = o2nm_get_node_by_ip(sin.sin_addr.s_addr); if (node == NULL) { - mlog(ML_NOTICE, "attempt to connect from unknown node at " - "%u.%u.%u.%u:%d\n", NIPQUAD(sin.sin_addr.s_addr), - ntohs(sin.sin_port)); + mlog(ML_NOTICE, "attempt to connect from unknown node at %pI4:%d\n", + &sin.sin_addr.s_addr, ntohs(sin.sin_port)); ret = -EINVAL; goto out; } if (o2nm_this_node() > node->nd_num) { mlog(ML_NOTICE, "unexpected connect attempted from a lower " - "numbered node '%s' at " "%u.%u.%u.%u:%d with num %u\n", - node->nd_name, NIPQUAD(sin.sin_addr.s_addr), + "numbered node '%s' at " "%pI4:%d with num %u\n", + node->nd_name, &sin.sin_addr.s_addr, ntohs(sin.sin_port), node->nd_num); ret = -EINVAL; goto out; @@ -1810,8 +1809,8 @@ static int o2net_accept_one(struct socket *sock) * and tries to connect before we see their heartbeat */ if (!o2hb_check_node_heartbeating_from_callback(node->nd_num)) { mlog(ML_CONN, "attempt to connect from node '%s' at " - "%u.%u.%u.%u:%d but it isn't heartbeating\n", - node->nd_name, NIPQUAD(sin.sin_addr.s_addr), + "%pI4:%d but it isn't heartbeating\n", + node->nd_name, &sin.sin_addr.s_addr, ntohs(sin.sin_port)); ret = -EINVAL; goto out; @@ -1827,8 +1826,8 @@ static int o2net_accept_one(struct socket *sock) spin_unlock(&nn->nn_lock); if (ret) { mlog(ML_NOTICE, "attempt to connect from node '%s' at " - "%u.%u.%u.%u:%d but it already has an open connection\n", - node->nd_name, NIPQUAD(sin.sin_addr.s_addr), + "%pI4:%d but it already has an open connection\n", + node->nd_name, &sin.sin_addr.s_addr, ntohs(sin.sin_port)); goto out; } @@ -1924,15 +1923,15 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port) sock->sk->sk_reuse = 1; ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); if (ret < 0) { - mlog(ML_ERROR, "unable to bind socket at %u.%u.%u.%u:%u, " - "ret=%d\n", NIPQUAD(addr), ntohs(port), ret); + mlog(ML_ERROR, "unable to bind socket at %pI4:%u, " + "ret=%d\n", &addr, ntohs(port), ret); goto out; } ret = sock->ops->listen(sock, 64); if (ret < 0) { - mlog(ML_ERROR, "unable to listen on %u.%u.%u.%u:%u, ret=%d\n", - NIPQUAD(addr), ntohs(port), ret); + mlog(ML_ERROR, "unable to listen on %pI4:%u, ret=%d\n", + &addr, ntohs(port), ret); } out: -- cgit v1.2.3-70-g09d2 From 644c3567d16b7e53cf52ae98c4150d601c9eacfe Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 10 Nov 2008 16:50:24 +1100 Subject: [XFS] handle memory allocation failures during log initialisation When there is no memory left in the system, xfs_buf_get_noaddr() can fail. If this happens at mount time during xlog_alloc_log() we fail to catch the error and oops. Catch the error from xfs_buf_get_noaddr(), and allow other memory allocations to fail and catch those errors too. Report the error to the console and fail the mount with ENOMEM. Tested by manually injecting errors into xfs_buf_get_noaddr() and xlog_alloc_log(). Version 2: o remove unnecessary casts of the returned pointer from kmem_zalloc() SGI-PV: 987246 Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_log.c | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 51840170b16..92c20a8d9e6 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -563,6 +563,11 @@ xfs_log_mount( } mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); + if (!mp->m_log) { + cmn_err(CE_WARN, "XFS: Log allocation failed: No memory!"); + error = ENOMEM; + goto out; + } /* * Initialize the AIL now we have a log. @@ -601,6 +606,7 @@ xfs_log_mount( return 0; error: xfs_log_unmount_dealloc(mp); +out: return error; } /* xfs_log_mount */ @@ -1217,7 +1223,9 @@ xlog_alloc_log(xfs_mount_t *mp, int i; int iclogsize; - log = (xlog_t *)kmem_zalloc(sizeof(xlog_t), KM_SLEEP); + log = kmem_zalloc(sizeof(xlog_t), KM_MAYFAIL); + if (!log) + return NULL; log->l_mp = mp; log->l_targ = log_target; @@ -1249,6 +1257,8 @@ xlog_alloc_log(xfs_mount_t *mp, xlog_get_iclog_buffer_size(mp, log); bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp); + if (!bp) + goto out_free_log; XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); @@ -1275,13 +1285,17 @@ xlog_alloc_log(xfs_mount_t *mp, iclogsize = log->l_iclog_size; ASSERT(log->l_iclog_size >= 4096); for (i=0; i < log->l_iclog_bufs; i++) { - *iclogp = (xlog_in_core_t *) - kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP); + *iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL); + if (!*iclogp) + goto out_free_iclog; + iclog = *iclogp; iclog->ic_prev = prev_iclog; prev_iclog = iclog; bp = xfs_buf_get_noaddr(log->l_iclog_size, mp->m_logdev_targp); + if (!bp) + goto out_free_iclog; if (!XFS_BUF_CPSEMA(bp)) ASSERT(0); XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); @@ -1323,6 +1337,25 @@ xlog_alloc_log(xfs_mount_t *mp, log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ return log; + +out_free_iclog: + for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { + prev_iclog = iclog->ic_next; + if (iclog->ic_bp) { + sv_destroy(&iclog->ic_force_wait); + sv_destroy(&iclog->ic_write_wait); + xfs_buf_free(iclog->ic_bp); + xlog_trace_iclog_dealloc(iclog); + } + kmem_free(iclog); + } + spinlock_destroy(&log->l_icloglock); + spinlock_destroy(&log->l_grant_lock); + xlog_trace_loggrant_dealloc(log); + xfs_buf_free(log->l_xbuf); +out_free_log: + kmem_free(log); + return NULL; } /* xlog_alloc_log */ -- cgit v1.2.3-70-g09d2 From d44dab8d1cde8aeba1faf44a7654f90800feb7fc Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Mon, 10 Nov 2008 17:06:05 +1100 Subject: fs: xfs needs inode_wait to be exported Since wait_on_inode() references it. Signed-off-by: Stephen Rothwell Reviewed-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index f84ba338faf..098a2443196 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1334,6 +1334,7 @@ int inode_wait(void *word) schedule(); return 0; } +EXPORT_SYMBOL(inode_wait); /* * If we try to find an inode in the inode hash while it is being -- cgit v1.2.3-70-g09d2 From cb4f0d1d4229f609f43c68acec69c7618ed72397 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 10 Nov 2008 17:11:18 +1100 Subject: [XFS] fix uninitialised variable bug in dquot release gcc on ARM warns about an using an uninitialised variable in xfs_qm_dqrele_all_inodes(). This is a real bug, but gcc on x86_64 is not reporting this warning so it went unnoticed. Fix the bug by bring the inode radix tree walk code up to date with xfs_sync_inodes_ag(). SGI-PV: 987246 Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/quota/xfs_qm_syscalls.c | 42 ++++++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 9ff28e6c5b8..2c57a6eaff3 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c @@ -1036,9 +1036,6 @@ xfs_qm_dqrele_inodes_ag( int nr_found; do { - boolean_t inode_refed; - struct inode *inode; - /* * use a gang lookup to find the next inode in the tree * as the tree is sparse and a gang lookup walks to find @@ -1053,27 +1050,37 @@ xfs_qm_dqrele_inodes_ag( break; } - /* update the index for the next lookup */ + /* + * Update the index for the next lookup. Catch overflows + * into the next AG range which can occur if we have inodes + * in the last block of the AG and we are currently + * pointing to the last inode. + */ first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); + if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) { + read_unlock(&pag->pag_ici_lock); + break; + } - /* skip quota inodes and those in reclaim */ - inode = VFS_I(ip); - if (!inode || ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) { + /* skip quota inodes */ + if (ip == XFS_QI_UQIP(mp) || ip == XFS_QI_GQIP(mp)) { ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_gdquot == NULL); read_unlock(&pag->pag_ici_lock); continue; } - if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { - inode = igrab(inode); - read_unlock(&pag->pag_ici_lock); - if (!inode) - continue; - inode_refed = B_TRUE; - xfs_ilock(ip, XFS_ILOCK_EXCL); - } else { + + /* + * If we can't get a reference on the inode, it must be + * in reclaim. Leave it for the reclaim code to flush. + */ + if (!igrab(VFS_I(ip))) { read_unlock(&pag->pag_ici_lock); + continue; } + read_unlock(&pag->pag_ici_lock); + + xfs_ilock(ip, XFS_ILOCK_EXCL); if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { xfs_qm_dqrele(ip->i_udquot); ip->i_udquot = NULL; @@ -1083,9 +1090,8 @@ xfs_qm_dqrele_inodes_ag( xfs_qm_dqrele(ip->i_gdquot); ip->i_gdquot = NULL; } - xfs_iunlock(ip, XFS_ILOCK_EXCL); - if (inode_refed) - IRELE(ip); + xfs_iput(ip, XFS_ILOCK_EXCL); + } while (nr_found); } -- cgit v1.2.3-70-g09d2 From 6307091fe69ae74747298bdcaf43119ad67bda3a Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 10 Nov 2008 17:13:23 +1100 Subject: [XFS] Avoid using inodes that haven't been completely initialised The radix tree walks in xfs_sync_inodes_ag and xfs_qm_dqrele_all_inodes() can find inodes that are still undergoing initialisation. Avoid them by checking for the the XFS_INEW() flag once we have a reference on the inode. This flag is cleared once the inode is properly initialised. SGI-PV: 987246 Signed-off-by: Dave Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_iops.c | 1 - fs/xfs/linux-2.6/xfs_sync.c | 5 +++-- fs/xfs/quota/xfs_qm_syscalls.c | 6 ++++++ 3 files changed, 9 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index f78bc221576..69c98cf3233 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -780,7 +780,6 @@ xfs_setup_inode( inode->i_ino = ip->i_ino; inode->i_state = I_NEW|I_LOCK; inode_add_to_lists(ip->i_mount->m_super, inode); - ASSERT(atomic_read(&inode->i_count) == 1); inode->i_mode = ip->i_d.di_mode; inode->i_nlink = ip->i_d.di_nlink; diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index fb5cca3df84..d12d31b86fa 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -117,8 +117,9 @@ xfs_sync_inodes_ag( } read_unlock(&pag->pag_ici_lock); - /* bad inodes are dealt with elsewhere */ - if (is_bad_inode(inode)) { + /* avoid new or bad inodes */ + if (is_bad_inode(inode) || + xfs_iflags_test(ip, XFS_INEW)) { IRELE(ip); continue; } diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 2c57a6eaff3..68139b38aed 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c @@ -1080,6 +1080,12 @@ xfs_qm_dqrele_inodes_ag( } read_unlock(&pag->pag_ici_lock); + /* avoid new inodes though we shouldn't find any here */ + if (xfs_iflags_test(ip, XFS_INEW)) { + IRELE(ip); + continue; + } + xfs_ilock(ip, XFS_ILOCK_EXCL); if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { xfs_qm_dqrele(ip->i_udquot); -- cgit v1.2.3-70-g09d2 From ebeb0406f153db51ab2d4771faf2342bd6ca14dd Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Wed, 12 Nov 2008 07:48:00 +0900 Subject: fat: drop negative dentry on rename() path Drop the negative dentry on rename() path, in order to make sure to use the case sensitive name which is specified by user if this is for creation. For it, this uses newly added LOOKUP_RENAME_TARGET like LOOKUP_CREATE. Signed-off-by: OGAWA Hirofumi --- fs/fat/namei_vfat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index bf326d4356a..8ae32e37673 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -78,7 +78,7 @@ static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) * for creation. */ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) { - if (nd->flags & LOOKUP_CREATE) + if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; } -- cgit v1.2.3-70-g09d2 From 985eafcc5480b0d98419b96869f2560abb2764c7 Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Wed, 12 Nov 2008 07:48:01 +0900 Subject: fat: fix duplicate addition of ->llseek handler Signed-off-by: OGAWA Hirofumi --- fs/fat/dir.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/fat/dir.c b/fs/fat/dir.c index 67e05835709..3a7f603b698 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -841,7 +841,6 @@ const struct file_operations fat_dir_operations = { .compat_ioctl = fat_compat_dir_ioctl, #endif .fsync = file_fsync, - .llseek = generic_file_llseek, }; static int fat_get_short_entry(struct inode *dir, loff_t *pos, -- cgit v1.2.3-70-g09d2 From 5a6bb10393eb9a1985e97af12f0cb2906bcbf1af Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 12 Nov 2008 07:48:01 +0900 Subject: fat: make sure to set d_ops in fat_get_parent fat_get_parent needs to setup the dentry operations, otherwise we might lose them when the NFS server needs to reconnect out of cache inodes. Signed-off-by: Christoph Hellwig Signed-off-by: OGAWA Hirofumi --- fs/fat/inode.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/fat/inode.c b/fs/fat/inode.c index bdd8fb7be2c..37a8af159a1 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -749,6 +749,8 @@ static struct dentry *fat_get_parent(struct dentry *child) brelse(bh); parent = d_obtain_alias(inode); + if (!IS_ERR(parent)) + parent->d_op = sb->s_root->d_op; out: unlock_super(sb); -- cgit v1.2.3-70-g09d2 From f8b9d53a31dca2c1185232c5fe2731d99cc963c8 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:44 +1100 Subject: CRED: Wrap task credential accesses in 9P2000 filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Reviewed-by: Eric Van Hensbergen Cc: Ron Minnich Cc: Latchesar Ionkov Cc: v9fs-developer@lists.sourceforge.net Signed-off-by: James Morris --- fs/9p/fid.c | 2 +- fs/9p/vfs_inode.c | 4 ++-- fs/9p/vfs_super.c | 4 ++-- net/9p/client.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/9p/fid.c b/fs/9p/fid.c index 3031e3233dd..a43e4ab7c6c 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c @@ -120,7 +120,7 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry) switch (access) { case V9FS_ACCESS_SINGLE: case V9FS_ACCESS_USER: - uid = current->fsuid; + uid = current_fsuid(); any = 0; break; diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 8314d3f43b7..8fddfe86a0d 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -215,8 +215,8 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode) inode = new_inode(sb); if (inode) { inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); inode->i_blocks = 0; inode->i_rdev = 0; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index d6cb1a0ca72..93212e40221 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -113,8 +113,8 @@ static int v9fs_get_sb(struct file_system_type *fs_type, int flags, struct v9fs_session_info *v9ses = NULL; struct p9_wstat *st = NULL; int mode = S_IRWXUGO | S_ISVTX; - uid_t uid = current->fsuid; - gid_t gid = current->fsgid; + uid_t uid = current_fsuid(); + gid_t gid = current_fsgid(); struct p9_fid *fid; int retval = 0; diff --git a/net/9p/client.c b/net/9p/client.c index 67717f69412..c3fb6f8bfa9 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -628,7 +628,7 @@ static struct p9_fid *p9_fid_create(struct p9_client *clnt) memset(&fid->qid, 0, sizeof(struct p9_qid)); fid->mode = -1; fid->rdir_fpos = 0; - fid->uid = current->fsuid; + fid->uid = current_fsuid(); fid->clnt = clnt; fid->aux = NULL; -- cgit v1.2.3-70-g09d2 From 215599815d8977a4338fbd27d6fe2c1721200197 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:45 +1100 Subject: CRED: Wrap task credential accesses in the AFFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Roman Zippel Signed-off-by: James Morris --- fs/affs/inode.c | 4 ++-- fs/affs/super.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/affs/inode.c b/fs/affs/inode.c index a13b334a391..415d9c67ac1 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c @@ -293,8 +293,8 @@ affs_new_inode(struct inode *dir) mark_buffer_dirty_inode(bh, inode); affs_brelse(bh); - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); inode->i_ino = block; inode->i_nlink = 1; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; diff --git a/fs/affs/super.c b/fs/affs/super.c index 8989c93193e..a19d64b582a 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -163,8 +163,8 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s /* Fill in defaults */ - *uid = current->uid; - *gid = current->gid; + *uid = current_uid(); + *gid = current_gid(); *reserved = 2; *root = -1; *blocksize = -1; -- cgit v1.2.3-70-g09d2 From 73c646e4afb658e601a46bf1b57925450307a119 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:45 +1100 Subject: CRED: Wrap task credential accesses in the autofs filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: H. Peter Anvin Cc: autofs@linux.kernel.org Signed-off-by: James Morris --- fs/autofs/inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c index b70eea1e8c5..c773680d5c6 100644 --- a/fs/autofs/inode.c +++ b/fs/autofs/inode.c @@ -76,8 +76,8 @@ static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid, substring_t args[MAX_OPT_ARGS]; int option; - *uid = current->uid; - *gid = current->gid; + *uid = current_uid(); + *gid = current_gid(); *pgrp = task_pgrp_nr(current); *minproto = *maxproto = AUTOFS_PROTO_VERSION; -- cgit v1.2.3-70-g09d2 From 0eb790e3a2d872192af8ceff2cabff8594c56440 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:46 +1100 Subject: CRED: Wrap task credential accesses in the autofs4 filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Ian Kent Cc: autofs@linux.kernel.org Signed-off-by: James Morris --- fs/autofs4/inode.c | 4 ++-- fs/autofs4/waitq.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index c7e65bb30ba..7b19802cfef 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -235,8 +235,8 @@ static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid, substring_t args[MAX_OPT_ARGS]; int option; - *uid = current->uid; - *gid = current->gid; + *uid = current_uid(); + *gid = current_gid(); *pgrp = task_pgrp_nr(current); *minproto = AUTOFS_MIN_PROTO_VERSION; diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 4b67c2a2d77..e02cc8ae5eb 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -391,8 +391,8 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, memcpy(&wq->name, &qstr, sizeof(struct qstr)); wq->dev = autofs4_get_dev(sbi); wq->ino = autofs4_get_ino(sbi); - wq->uid = current->uid; - wq->gid = current->gid; + wq->uid = current_uid(); + wq->gid = current_gid(); wq->pid = current->pid; wq->tgid = current->tgid; wq->status = -EINTR; /* Status return if interrupted */ -- cgit v1.2.3-70-g09d2 From 1109b07b7dcb938de7a0d65efc1b4739dc4e9787 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:47 +1100 Subject: CRED: Wrap task credential accesses in the BFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Tigran A. Aivazian Signed-off-by: James Morris --- fs/bfs/dir.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c index daae463068e..4dd1b623f93 100644 --- a/fs/bfs/dir.c +++ b/fs/bfs/dir.c @@ -106,8 +106,8 @@ static int bfs_create(struct inode *dir, struct dentry *dentry, int mode, } set_bit(ino, info->si_imap); info->si_freei--; - inode->i_uid = current->fsuid; - inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current_fsgid(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; inode->i_blocks = 0; inode->i_op = &bfs_file_inops; -- cgit v1.2.3-70-g09d2 From a001e5b558f25eb1e588522d73ac949b643b7a37 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:47 +1100 Subject: CRED: Wrap task credential accesses in the CIFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Steve French Cc: linux-cifs-client@lists.samba.org Signed-off-by: James Morris --- fs/cifs/cifs_fs_sb.h | 2 +- fs/cifs/cifsproto.h | 2 +- fs/cifs/connect.c | 4 ++-- fs/cifs/dir.c | 12 ++++++------ fs/cifs/inode.c | 8 ++++---- fs/cifs/ioctl.c | 2 +- fs/cifs/misc.c | 4 ++-- 7 files changed, 17 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h index 877c85409f1..1e7b87497f2 100644 --- a/fs/cifs/cifs_fs_sb.h +++ b/fs/cifs/cifs_fs_sb.h @@ -19,7 +19,7 @@ #define _CIFS_FS_SB_H #define CIFS_MOUNT_NO_PERM 1 /* do not do client vfs_perm check */ -#define CIFS_MOUNT_SET_UID 2 /* set current->euid in create etc. */ +#define CIFS_MOUNT_SET_UID 2 /* set current's euid in create etc. */ #define CIFS_MOUNT_SERVER_INUM 4 /* inode numbers from uniqueid from server */ #define CIFS_MOUNT_DIRECT_IO 8 /* do not write nor read through page cache */ #define CIFS_MOUNT_NO_XATTR 0x10 /* if set - disable xattr support */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 6f21ecb85ce..9d8b978137a 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -39,7 +39,7 @@ extern int smb_send(struct socket *, struct smb_hdr *, unsigned int /* length */ , struct sockaddr *, bool); extern unsigned int _GetXid(void); extern void _FreeXid(unsigned int); -#define GetXid() (int)_GetXid(); cFYI(1,("CIFS VFS: in %s as Xid: %d with uid: %d",__func__, xid,current->fsuid)); +#define GetXid() (int)_GetXid(); cFYI(1,("CIFS VFS: in %s as Xid: %d with uid: %d",__func__, xid,current_fsuid())); #define FreeXid(curr_xid) {_FreeXid(curr_xid); cFYI(1,("CIFS VFS: leaving %s (xid = %d) rc = %d",__func__,curr_xid,(int)rc));} extern char *build_path_from_dentry(struct dentry *); extern char *build_wildcard_path_from_dentry(struct dentry *direntry); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index e9f9248cb3f..e5dca9a48d9 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -833,8 +833,8 @@ cifs_parse_mount_options(char *options, const char *devname, /* null target name indicates to use *SMBSERVR default called name if we end up sending RFC1001 session initialize */ vol->target_rfc1001_name[0] = 0; - vol->linux_uid = current->uid; /* current->euid instead? */ - vol->linux_gid = current->gid; + vol->linux_uid = current_uid(); /* use current_euid() instead? */ + vol->linux_gid = current_gid(); vol->dir_mode = S_IRWXUGO; /* 2767 perms indicate mandatory locking support */ vol->file_mode = (S_IRWXUGO | S_ISGID) & (~S_IXGRP); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index e962e75e6f7..2f02c52db66 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -235,11 +235,11 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, }; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { - args.uid = (__u64) current->fsuid; + args.uid = (__u64) current_fsuid(); if (inode->i_mode & S_ISGID) args.gid = (__u64) inode->i_gid; else - args.gid = (__u64) current->fsgid; + args.gid = (__u64) current_fsgid(); } else { args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; @@ -271,13 +271,13 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, if ((oplock & CIFS_CREATE_ACTION) && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)) { - newinode->i_uid = current->fsuid; + newinode->i_uid = current_fsuid(); if (inode->i_mode & S_ISGID) newinode->i_gid = inode->i_gid; else newinode->i_gid = - current->fsgid; + current_fsgid(); } } } @@ -375,8 +375,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, .device = device_number, }; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { - args.uid = (__u64) current->fsuid; - args.gid = (__u64) current->fsgid; + args.uid = (__u64) current_fsuid(); + args.gid = (__u64) current_fsgid(); } else { args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index ff8c68de4a9..8b7305e73d7 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1143,11 +1143,11 @@ mkdir_get_info: .device = 0, }; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { - args.uid = (__u64)current->fsuid; + args.uid = (__u64)current_fsuid(); if (inode->i_mode & S_ISGID) args.gid = (__u64)inode->i_gid; else - args.gid = (__u64)current->fsgid; + args.gid = (__u64)current_fsgid(); } else { args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; @@ -1184,13 +1184,13 @@ mkdir_get_info: if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) { direntry->d_inode->i_uid = - current->fsuid; + current_fsuid(); if (inode->i_mode & S_ISGID) direntry->d_inode->i_gid = inode->i_gid; else direntry->d_inode->i_gid = - current->fsgid; + current_fsgid(); } } } diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index 0088a5b5256..f94650683a0 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c @@ -65,7 +65,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) switch (command) { case CIFS_IOC_CHECKUMOUNT: cFYI(1, ("User unmount attempted")); - if (cifs_sb->mnt_uid == current->uid) + if (cifs_sb->mnt_uid == current_uid()) rc = 0; else { rc = -EACCES; diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 88786ba02d2..ec36410a912 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -347,13 +347,13 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , /* BB Add support for establishing new tCon and SMB Session */ /* with userid/password pairs found on the smb session */ /* for other target tcp/ip addresses BB */ - if (current->fsuid != treeCon->ses->linux_uid) { + if (current_fsuid() != treeCon->ses->linux_uid) { cFYI(1, ("Multiuser mode and UID " "did not match tcon uid")); read_lock(&GlobalSMBSeslock); list_for_each(temp_item, &GlobalSMBSessionList) { ses = list_entry(temp_item, struct cifsSesInfo, cifsSessionList); - if (ses->linux_uid == current->fsuid) { + if (ses->linux_uid == current_fsuid()) { if (ses->server == treeCon->ses->server) { cFYI(1, ("found matching uid substitute right smb_uid")); buffer->Uid = ses->Suid; -- cgit v1.2.3-70-g09d2 From 97b7702cd1bdb8e89bde6c70aa983e7b82a52ec6 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:48 +1100 Subject: CRED: Wrap task credential accesses in the Coda filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Jan Harkes Cc: codalist@coda.cs.cmu.edu Signed-off-by: James Morris --- fs/coda/cache.c | 6 +++--- fs/coda/upcall.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/coda/cache.c b/fs/coda/cache.c index 8a2370341c7..a5bf5771a22 100644 --- a/fs/coda/cache.c +++ b/fs/coda/cache.c @@ -32,8 +32,8 @@ void coda_cache_enter(struct inode *inode, int mask) struct coda_inode_info *cii = ITOC(inode); cii->c_cached_epoch = atomic_read(&permission_epoch); - if (cii->c_uid != current->fsuid) { - cii->c_uid = current->fsuid; + if (cii->c_uid != current_fsuid()) { + cii->c_uid = current_fsuid(); cii->c_cached_perm = mask; } else cii->c_cached_perm |= mask; @@ -60,7 +60,7 @@ int coda_cache_check(struct inode *inode, int mask) int hit; hit = (mask & cii->c_cached_perm) == mask && - cii->c_uid == current->fsuid && + cii->c_uid == current_fsuid() && cii->c_cached_epoch == atomic_read(&permission_epoch); return hit; diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c index ce432bca95d..c274d949179 100644 --- a/fs/coda/upcall.c +++ b/fs/coda/upcall.c @@ -52,7 +52,7 @@ static void *alloc_upcall(int opcode, int size) inp->ih.opcode = opcode; inp->ih.pid = current->pid; inp->ih.pgid = task_pgrp_nr(current); - inp->ih.uid = current->fsuid; + inp->ih.uid = current_fsuid(); return (void*)inp; } -- cgit v1.2.3-70-g09d2 From ec4c2aacd16672febca053109eb9ddf672108ca1 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:49 +1100 Subject: CRED: Wrap task credential accesses in the devpts filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Signed-off-by: James Morris --- fs/devpts/inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 4a714f6c1be..5d61b7c06e1 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -222,8 +222,8 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) return -ENOMEM; inode->i_ino = number+2; - inode->i_uid = config.setuid ? config.uid : current->fsuid; - inode->i_gid = config.setgid ? config.gid : current->fsgid; + inode->i_uid = config.setuid ? config.uid : current_fsuid(); + inode->i_gid = config.setgid ? config.gid : current_fsgid(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; init_special_inode(inode, S_IFCHR|config.mode, device); inode->i_private = tty; -- cgit v1.2.3-70-g09d2 From 4eea03539d9a8e3f5056aed690efde1f75535e7b Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:49 +1100 Subject: CRED: Wrap task credential accesses in the eCryptFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Mike Halcrow Cc: Phillip Hellewell Cc: ecryptfs-devel@lists.sourceforge.net Signed-off-by: James Morris --- fs/ecryptfs/messaging.c | 18 ++++++++++-------- fs/ecryptfs/miscdev.c | 20 ++++++++++++-------- 2 files changed, 22 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c index c6983978a31..e0b0a4e28b9 100644 --- a/fs/ecryptfs/messaging.c +++ b/fs/ecryptfs/messaging.c @@ -361,6 +361,7 @@ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, size_t msg_size; struct nsproxy *nsproxy; struct user_namespace *current_user_ns; + uid_t ctx_euid; int rc; if (msg->index >= ecryptfs_message_buf_len) { @@ -385,8 +386,8 @@ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, goto wake_up; } current_user_ns = nsproxy->user_ns; - rc = ecryptfs_find_daemon_by_euid(&daemon, msg_ctx->task->euid, - current_user_ns); + ctx_euid = task_euid(msg_ctx->task); + rc = ecryptfs_find_daemon_by_euid(&daemon, ctx_euid, current_user_ns); rcu_read_unlock(); mutex_unlock(&ecryptfs_daemon_hash_mux); if (rc) { @@ -394,14 +395,14 @@ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, printk(KERN_WARNING "%s: User [%d] received a " "message response from process [0x%p] but does " "not have a registered daemon\n", __func__, - msg_ctx->task->euid, pid); + ctx_euid, pid); goto wake_up; } - if (msg_ctx->task->euid != euid) { + if (ctx_euid != euid) { rc = -EBADMSG; printk(KERN_WARNING "%s: Received message from user " "[%d]; expected message from user [%d]\n", __func__, - euid, msg_ctx->task->euid); + euid, ctx_euid); goto unlock; } if (current_user_ns != user_ns) { @@ -415,7 +416,7 @@ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, rc = -EBADMSG; printk(KERN_ERR "%s: User [%d] sent a message response " "from an unrecognized process [0x%p]\n", - __func__, msg_ctx->task->euid, pid); + __func__, ctx_euid, pid); goto unlock; } if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_PENDING) { @@ -464,14 +465,15 @@ ecryptfs_send_message_locked(char *data, int data_len, u8 msg_type, struct ecryptfs_msg_ctx **msg_ctx) { struct ecryptfs_daemon *daemon; + uid_t euid = current_euid(); int rc; - rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid, + rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current->nsproxy->user_ns); if (rc || !daemon) { rc = -ENOTCONN; printk(KERN_ERR "%s: User [%d] does not have a daemon " - "registered\n", __func__, current->euid); + "registered\n", __func__, euid); goto out; } mutex_lock(&ecryptfs_msg_ctx_lists_mux); diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c index b484792a099..047ac609695 100644 --- a/fs/ecryptfs/miscdev.c +++ b/fs/ecryptfs/miscdev.c @@ -42,11 +42,12 @@ ecryptfs_miscdev_poll(struct file *file, poll_table *pt) { struct ecryptfs_daemon *daemon; unsigned int mask = 0; + uid_t euid = current_euid(); int rc; mutex_lock(&ecryptfs_daemon_hash_mux); /* TODO: Just use file->private_data? */ - rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid, + rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current->nsproxy->user_ns); BUG_ON(rc || !daemon); mutex_lock(&daemon->mux); @@ -83,6 +84,7 @@ static int ecryptfs_miscdev_open(struct inode *inode, struct file *file) { struct ecryptfs_daemon *daemon = NULL; + uid_t euid = current_euid(); int rc; mutex_lock(&ecryptfs_daemon_hash_mux); @@ -93,10 +95,10 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) "count; rc = [%d]\n", __func__, rc); goto out_unlock_daemon_list; } - rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid, + rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current->nsproxy->user_ns); if (rc || !daemon) { - rc = ecryptfs_spawn_daemon(&daemon, current->euid, + rc = ecryptfs_spawn_daemon(&daemon, euid, current->nsproxy->user_ns, task_pid(current)); if (rc) { @@ -147,10 +149,11 @@ static int ecryptfs_miscdev_release(struct inode *inode, struct file *file) { struct ecryptfs_daemon *daemon = NULL; + uid_t euid = current_euid(); int rc; mutex_lock(&ecryptfs_daemon_hash_mux); - rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid, + rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current->nsproxy->user_ns); BUG_ON(rc || !daemon); mutex_lock(&daemon->mux); @@ -246,11 +249,12 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count, char packet_length[3]; size_t i; size_t total_length; + uid_t euid = current_euid(); int rc; mutex_lock(&ecryptfs_daemon_hash_mux); /* TODO: Just use file->private_data? */ - rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid, + rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current->nsproxy->user_ns); BUG_ON(rc || !daemon); mutex_lock(&daemon->mux); @@ -290,7 +294,7 @@ check_list: * message from the queue; try again */ goto check_list; } - BUG_ON(current->euid != daemon->euid); + BUG_ON(euid != daemon->euid); BUG_ON(current->nsproxy->user_ns != daemon->user_ns); BUG_ON(task_pid(current) != daemon->pid); msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue, @@ -414,6 +418,7 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf, size_t packet_size, packet_size_length, i; ssize_t sz = 0; char *data; + uid_t euid = current_euid(); int rc; if (count == 0) @@ -463,8 +468,7 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf, goto out_free; } rc = ecryptfs_miscdev_response(&data[i], packet_size, - current->euid, - current->nsproxy->user_ns, + euid, current->nsproxy->user_ns, task_pid(current), seq); if (rc) printk(KERN_WARNING "%s: Failed to deliver miscdev " -- cgit v1.2.3-70-g09d2 From a8dd4d67bdf9c48c5ebfa366592fde7e2d318b4f Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:50 +1100 Subject: CRED: Wrap task credential accesses in the Ext2 filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: linux-ext4@vger.kernel.org Signed-off-by: James Morris --- fs/ext2/balloc.c | 2 +- fs/ext2/ialloc.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c index 6dac7ba2d22..4a29d637608 100644 --- a/fs/ext2/balloc.c +++ b/fs/ext2/balloc.c @@ -1193,7 +1193,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi) free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && - sbi->s_resuid != current->fsuid && + sbi->s_resuid != current_fsuid() && (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { return 0; } diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index f5974134676..8d0add62587 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -550,7 +550,7 @@ got: sb->s_dirt = 1; mark_buffer_dirty(bh2); - inode->i_uid = current->fsuid; + inode->i_uid = current_fsuid(); if (test_opt (sb, GRPID)) inode->i_gid = dir->i_gid; else if (dir->i_mode & S_ISGID) { @@ -558,7 +558,7 @@ got: if (S_ISDIR(mode)) mode |= S_ISGID; } else - inode->i_gid = current->fsgid; + inode->i_gid = current_fsgid(); inode->i_mode = mode; inode->i_ino = ino; -- cgit v1.2.3-70-g09d2 From 6a2f90e9fae0824e8b6b123f1ea7d9fff9079ef3 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:51 +1100 Subject: CRED: Wrap task credential accesses in the Ext3 filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Stephen Tweedie Cc: Andrew Morton Cc: adilger@sun.com Cc: linux-ext4@vger.kernel.org Signed-off-by: James Morris --- fs/ext3/balloc.c | 2 +- fs/ext3/ialloc.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index f5b57a2ca35..0dbf1c04847 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -1422,7 +1422,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi) free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && - sbi->s_resuid != current->fsuid && + sbi->s_resuid != current_fsuid() && (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { return 0; } diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c index 47b678d73e7..490bd0ed789 100644 --- a/fs/ext3/ialloc.c +++ b/fs/ext3/ialloc.c @@ -539,7 +539,7 @@ got: percpu_counter_inc(&sbi->s_dirs_counter); sb->s_dirt = 1; - inode->i_uid = current->fsuid; + inode->i_uid = current_fsuid(); if (test_opt (sb, GRPID)) inode->i_gid = dir->i_gid; else if (dir->i_mode & S_ISGID) { @@ -547,7 +547,7 @@ got: if (S_ISDIR(mode)) mode |= S_ISGID; } else - inode->i_gid = current->fsgid; + inode->i_gid = current_fsgid(); inode->i_mode = mode; inode->i_ino = ino; -- cgit v1.2.3-70-g09d2 From 4c9c544e4987efe6643ad5692af47a0abfd4e0d1 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:51 +1100 Subject: CRED: Wrap task credential accesses in the Ext4 filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Stephen Tweedie Cc: Andrew Morton Cc: adilger@sun.com Cc: linux-ext4@vger.kernel.org Signed-off-by: James Morris --- fs/ext4/balloc.c | 2 +- fs/ext4/ialloc.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index d2003cdc36a..a932b9a2924 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -624,7 +624,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) return 1; /* Hm, nope. Are (enough) root reserved blocks available? */ - if (sbi->s_resuid == current->fsuid || + if (sbi->s_resuid == current_fsuid() || ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) || capable(CAP_SYS_RESOURCE)) { if (free_blocks >= (nblocks + dirty_blocks)) diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index fe34d74cfb1..c8ea50ed023 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -785,7 +785,7 @@ got: spin_unlock(sb_bgl_lock(sbi, flex_group)); } - inode->i_uid = current->fsuid; + inode->i_uid = current_fsuid(); if (test_opt(sb, GRPID)) inode->i_gid = dir->i_gid; else if (dir->i_mode & S_ISGID) { @@ -793,7 +793,7 @@ got: if (S_ISDIR(mode)) mode |= S_ISGID; } else - inode->i_gid = current->fsgid; + inode->i_gid = current_fsgid(); inode->i_mode = mode; inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); -- cgit v1.2.3-70-g09d2 From f0ce7ee3a8ec1e80b6f460983ef1f26e603026f5 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:52 +1100 Subject: CRED: Wrap task credential accesses in the FAT filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Acked-by: OGAWA Hirofumi Signed-off-by: James Morris --- fs/fat/file.c | 2 +- fs/fat/inode.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/fat/file.c b/fs/fat/file.c index ddde37025ca..81e20328834 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -302,7 +302,7 @@ static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode) { mode_t allow_utime = sbi->options.allow_utime; - if (current->fsuid != inode->i_uid) { + if (current_fsuid() != inode->i_uid) { if (in_group_p(inode->i_gid)) allow_utime >>= 3; if (allow_utime & MAY_WRITE) diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 2b2eec1283b..cf621acd9e9 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -921,8 +921,8 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug, opts->isvfat = is_vfat; - opts->fs_uid = current->uid; - opts->fs_gid = current->gid; + opts->fs_uid = current_uid(); + opts->fs_gid = current_gid(); opts->fs_fmask = opts->fs_dmask = current->fs->umask; opts->allow_utime = -1; opts->codepage = fat_default_codepage; -- cgit v1.2.3-70-g09d2 From 2186a71cbcddda946dd3cfccd5285e210ec3af10 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:53 +1100 Subject: CRED: Wrap task credential accesses in the FUSE filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Miklos Szeredi Cc: fuse-devel@lists.sourceforge.net Signed-off-by: James Morris --- fs/fuse/dev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index b72361479be..fba571648a8 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -87,8 +87,8 @@ static void __fuse_put_request(struct fuse_req *req) static void fuse_req_init_context(struct fuse_req *req) { - req->in.h.uid = current->fsuid; - req->in.h.gid = current->fsgid; + req->in.h.uid = current_fsuid(); + req->in.h.gid = current_fsgid(); req->in.h.pid = current->pid; } -- cgit v1.2.3-70-g09d2 From 3de7be335593859bc84a122b1338ec358bf70e9b Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:53 +1100 Subject: CRED: Wrap task credential accesses in the GFS2 filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Steven Whitehouse Cc: cluster-devel@redhat.com Signed-off-by: James Morris --- fs/gfs2/inode.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 7cee695fa44..d57616840e8 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -705,18 +705,18 @@ static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode, (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) { if (S_ISDIR(*mode)) *mode |= S_ISUID; - else if (dip->i_inode.i_uid != current->fsuid) + else if (dip->i_inode.i_uid != current_fsuid()) *mode &= ~07111; *uid = dip->i_inode.i_uid; } else - *uid = current->fsuid; + *uid = current_fsuid(); if (dip->i_inode.i_mode & S_ISGID) { if (S_ISDIR(*mode)) *mode |= S_ISGID; *gid = dip->i_inode.i_gid; } else - *gid = current->fsgid; + *gid = current_fsgid(); } static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation) @@ -1124,8 +1124,8 @@ int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, return -EPERM; if ((dip->i_inode.i_mode & S_ISVTX) && - dip->i_inode.i_uid != current->fsuid && - ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER)) + dip->i_inode.i_uid != current_fsuid() && + ip->i_inode.i_uid != current_fsuid() && !capable(CAP_FOWNER)) return -EPERM; if (IS_APPEND(&dip->i_inode)) -- cgit v1.2.3-70-g09d2 From 94c9a5ee4cc7fb7eee2a9f9e47f7ccea6b1869ff Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:54 +1100 Subject: CRED: Wrap task credential accesses in the HFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Roman Zippel Signed-off-by: James Morris --- fs/hfs/inode.c | 4 ++-- fs/hfs/super.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index c69b7ac75bf..9435dda8f1e 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -155,8 +155,8 @@ struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, int mode) hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name); inode->i_ino = HFS_SB(sb)->next_id++; inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); inode->i_nlink = 1; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; HFS_I(inode)->flags = 0; diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 3c7c7637719..c8b5acf4b0b 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -210,8 +210,8 @@ static int parse_options(char *options, struct hfs_sb_info *hsb) int tmp, token; /* initialize the sb with defaults */ - hsb->s_uid = current->uid; - hsb->s_gid = current->gid; + hsb->s_uid = current_uid(); + hsb->s_gid = current_gid(); hsb->s_file_umask = 0133; hsb->s_dir_umask = 0022; hsb->s_type = hsb->s_creator = cpu_to_be32(0x3f3f3f3f); /* == '????' */ -- cgit v1.2.3-70-g09d2 From 4ac8489a7294dcf92127825d74f2d981143e825d Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:54 +1100 Subject: CRED: Wrap task credential accesses in the HFSplus filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Roman Zippel Signed-off-by: James Morris --- fs/hfsplus/inode.c | 4 ++-- fs/hfsplus/options.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index b207f0e6fc2..f105ee9e1cc 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -296,8 +296,8 @@ struct inode *hfsplus_new_inode(struct super_block *sb, int mode) inode->i_ino = HFSPLUS_SB(sb).next_cnid++; inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); inode->i_nlink = 1; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c index 9699c56d323..bab7f8d1bdf 100644 --- a/fs/hfsplus/options.c +++ b/fs/hfsplus/options.c @@ -49,8 +49,8 @@ void hfsplus_fill_defaults(struct hfsplus_sb_info *opts) opts->creator = HFSPLUS_DEF_CR_TYPE; opts->type = HFSPLUS_DEF_CR_TYPE; opts->umask = current->fs->umask; - opts->uid = current->uid; - opts->gid = current->gid; + opts->uid = current_uid(); + opts->gid = current_gid(); opts->part = -1; opts->session = -1; } -- cgit v1.2.3-70-g09d2 From de395b8ac25da56893d83cd5da67cf927dfa7e4d Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:55 +1100 Subject: CRED: Wrap task credential accesses in the HPFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Mikulas Patocka Signed-off-by: James Morris --- fs/hpfs/namei.c | 24 ++++++++++++------------ fs/hpfs/super.c | 4 ++-- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c index 10783f3d265..b649232dde9 100644 --- a/fs/hpfs/namei.c +++ b/fs/hpfs/namei.c @@ -92,11 +92,11 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) inc_nlink(dir); insert_inode_hash(result); - if (result->i_uid != current->fsuid || - result->i_gid != current->fsgid || + if (result->i_uid != current_fsuid() || + result->i_gid != current_fsgid() || result->i_mode != (mode | S_IFDIR)) { - result->i_uid = current->fsuid; - result->i_gid = current->fsgid; + result->i_uid = current_fsuid(); + result->i_gid = current_fsgid(); result->i_mode = mode | S_IFDIR; hpfs_write_inode_nolock(result); } @@ -184,11 +184,11 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc insert_inode_hash(result); - if (result->i_uid != current->fsuid || - result->i_gid != current->fsgid || + if (result->i_uid != current_fsuid() || + result->i_gid != current_fsgid() || result->i_mode != (mode | S_IFREG)) { - result->i_uid = current->fsuid; - result->i_gid = current->fsgid; + result->i_uid = current_fsuid(); + result->i_gid = current_fsgid(); result->i_mode = mode | S_IFREG; hpfs_write_inode_nolock(result); } @@ -247,8 +247,8 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t result->i_mtime.tv_nsec = 0; result->i_atime.tv_nsec = 0; hpfs_i(result)->i_ea_size = 0; - result->i_uid = current->fsuid; - result->i_gid = current->fsgid; + result->i_uid = current_fsuid(); + result->i_gid = current_fsgid(); result->i_nlink = 1; result->i_size = 0; result->i_blocks = 1; @@ -325,8 +325,8 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy result->i_atime.tv_nsec = 0; hpfs_i(result)->i_ea_size = 0; result->i_mode = S_IFLNK | 0777; - result->i_uid = current->fsuid; - result->i_gid = current->fsgid; + result->i_uid = current_fsuid(); + result->i_gid = current_fsgid(); result->i_blocks = 1; result->i_nlink = 1; result->i_size = strlen(symlink); diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 29ad461d568..0d049b8919c 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -475,8 +475,8 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) init_MUTEX(&sbi->hpfs_creation_de); - uid = current->uid; - gid = current->gid; + uid = current_uid(); + gid = current_gid(); umask = current->fs->umask; lowercase = 0; conv = CONV_BINARY; -- cgit v1.2.3-70-g09d2 From 77c70de15a74801f427ee5fb85ddfdde48ed84f2 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:56 +1100 Subject: CRED: Wrap task credential accesses in the hugetlbfs filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: William Irwin Signed-off-by: James Morris --- fs/hugetlbfs/inode.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 61edc701b0e..08ad76c79b4 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -551,9 +551,9 @@ static int hugetlbfs_mknod(struct inode *dir, if (S_ISDIR(mode)) mode |= S_ISGID; } else { - gid = current->fsgid; + gid = current_fsgid(); } - inode = hugetlbfs_get_inode(dir->i_sb, current->fsuid, gid, mode, dev); + inode = hugetlbfs_get_inode(dir->i_sb, current_fsuid(), gid, mode, dev); if (inode) { dir->i_ctime = dir->i_mtime = CURRENT_TIME; d_instantiate(dentry, inode); @@ -586,9 +586,9 @@ static int hugetlbfs_symlink(struct inode *dir, if (dir->i_mode & S_ISGID) gid = dir->i_gid; else - gid = current->fsgid; + gid = current_fsgid(); - inode = hugetlbfs_get_inode(dir->i_sb, current->fsuid, + inode = hugetlbfs_get_inode(dir->i_sb, current_fsuid(), gid, S_IFLNK|S_IRWXUGO, 0); if (inode) { int l = strlen(symname)+1; @@ -854,8 +854,8 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) config.nr_blocks = -1; /* No limit on size by default */ config.nr_inodes = -1; /* No limit on number of inodes by default */ - config.uid = current->fsuid; - config.gid = current->fsgid; + config.uid = current_fsuid(); + config.gid = current_fsgid(); config.mode = 0755; config.hstate = &default_hstate; ret = hugetlbfs_parse_options(data, &config); @@ -970,8 +970,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size) goto out_shm_unlock; error = -ENOSPC; - inode = hugetlbfs_get_inode(root->d_sb, current->fsuid, - current->fsgid, S_IFREG | S_IRWXUGO, 0); + inode = hugetlbfs_get_inode(root->d_sb, current_fsuid(), + current_fsgid(), S_IFREG | S_IRWXUGO, 0); if (!inode) goto out_dentry; -- cgit v1.2.3-70-g09d2 From 8f659adfa246a1e57d0cd3f109accfe071f2d0f1 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:56 +1100 Subject: CRED: Wrap task credential accesses in the JFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Acked-by: Dave Kleikamp Cc: jfs-discussion@lists.sourceforge.net Signed-off-by: James Morris --- fs/jfs/jfs_inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c index ed6574bee51..70022fd1c53 100644 --- a/fs/jfs/jfs_inode.c +++ b/fs/jfs/jfs_inode.c @@ -93,13 +93,13 @@ struct inode *ialloc(struct inode *parent, umode_t mode) return ERR_PTR(rc); } - inode->i_uid = current->fsuid; + inode->i_uid = current_fsuid(); if (parent->i_mode & S_ISGID) { inode->i_gid = parent->i_gid; if (S_ISDIR(mode)) mode |= S_ISGID; } else - inode->i_gid = current->fsgid; + inode->i_gid = current_fsgid(); /* * New inodes need to save sane values on disk when -- cgit v1.2.3-70-g09d2 From 922c030f260df9d256fecea3164210e7cb2ce407 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:57 +1100 Subject: CRED: Wrap task credential accesses in the Minix filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Signed-off-by: James Morris --- fs/minix/bitmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c index 703cc35e04b..3aebe322271 100644 --- a/fs/minix/bitmap.c +++ b/fs/minix/bitmap.c @@ -262,8 +262,8 @@ struct inode * minix_new_inode(const struct inode * dir, int * error) iput(inode); return NULL; } - inode->i_uid = current->fsuid; - inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = (dir->i_mode & S_ISGID) ? dir->i_gid : current_fsgid(); inode->i_ino = j; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; inode->i_blocks = 0; -- cgit v1.2.3-70-g09d2 From 48937024c65db88abafd0fb14021db31f09cd4ec Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:58 +1100 Subject: CRED: Wrap task credential accesses in the NCPFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Acked-by: Petr Vandrovec Signed-off-by: James Morris --- fs/ncpfs/ioctl.c | 91 +++++++++++++++++++++++++++----------------------------- 1 file changed, 44 insertions(+), 47 deletions(-) (limited to 'fs') diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c index 3a97c95e1ca..6d04e050c74 100644 --- a/fs/ncpfs/ioctl.c +++ b/fs/ncpfs/ioctl.c @@ -40,10 +40,10 @@ ncp_get_fs_info(struct ncp_server * server, struct file *file, struct inode *inode = file->f_path.dentry->d_inode; struct ncp_fs_info info; - if ((file_permission(file, MAY_WRITE) != 0) - && (current->uid != server->m.mounted_uid)) { + if (file_permission(file, MAY_WRITE) != 0 + && current_uid() != server->m.mounted_uid) return -EACCES; - } + if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT; @@ -70,10 +70,10 @@ ncp_get_fs_info_v2(struct ncp_server * server, struct file *file, struct inode *inode = file->f_path.dentry->d_inode; struct ncp_fs_info_v2 info2; - if ((file_permission(file, MAY_WRITE) != 0) - && (current->uid != server->m.mounted_uid)) { + if (file_permission(file, MAY_WRITE) != 0 + && current_uid() != server->m.mounted_uid) return -EACCES; - } + if (copy_from_user(&info2, arg, sizeof(info2))) return -EFAULT; @@ -141,10 +141,10 @@ ncp_get_compat_fs_info_v2(struct ncp_server * server, struct file *file, struct inode *inode = file->f_path.dentry->d_inode; struct compat_ncp_fs_info_v2 info2; - if ((file_permission(file, MAY_WRITE) != 0) - && (current->uid != server->m.mounted_uid)) { + if (file_permission(file, MAY_WRITE) != 0 + && current_uid() != server->m.mounted_uid) return -EACCES; - } + if (copy_from_user(&info2, arg, sizeof(info2))) return -EFAULT; @@ -270,16 +270,17 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp, struct ncp_ioctl_request request; char* bouncebuffer; void __user *argp = (void __user *)arg; + uid_t uid = current_uid(); switch (cmd) { #ifdef CONFIG_COMPAT case NCP_IOC_NCPREQUEST_32: #endif case NCP_IOC_NCPREQUEST: - if ((file_permission(filp, MAY_WRITE) != 0) - && (current->uid != server->m.mounted_uid)) { + if (file_permission(filp, MAY_WRITE) != 0 + && uid != server->m.mounted_uid) return -EACCES; - } + #ifdef CONFIG_COMPAT if (cmd == NCP_IOC_NCPREQUEST_32) { struct compat_ncp_ioctl_request request32; @@ -356,10 +357,10 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp, case NCP_IOC_GETMOUNTUID16: case NCP_IOC_GETMOUNTUID32: case NCP_IOC_GETMOUNTUID64: - if ((file_permission(filp, MAY_READ) != 0) - && (current->uid != server->m.mounted_uid)) { + if (file_permission(filp, MAY_READ) != 0 + && uid != server->m.mounted_uid) return -EACCES; - } + if (cmd == NCP_IOC_GETMOUNTUID16) { u16 uid; SET_UID(uid, server->m.mounted_uid); @@ -380,11 +381,10 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp, { struct ncp_setroot_ioctl sr; - if ((file_permission(filp, MAY_READ) != 0) - && (current->uid != server->m.mounted_uid)) - { + if (file_permission(filp, MAY_READ) != 0 + && uid != server->m.mounted_uid) return -EACCES; - } + if (server->m.mounted_vol[0]) { struct dentry* dentry = inode->i_sb->s_root; @@ -408,6 +408,7 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp, return -EFAULT; return 0; } + case NCP_IOC_SETROOT: { struct ncp_setroot_ioctl sr; @@ -455,11 +456,10 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp, #ifdef CONFIG_NCPFS_PACKET_SIGNING case NCP_IOC_SIGN_INIT: - if ((file_permission(filp, MAY_WRITE) != 0) - && (current->uid != server->m.mounted_uid)) - { + if (file_permission(filp, MAY_WRITE) != 0 + && uid != server->m.mounted_uid) return -EACCES; - } + if (argp) { if (server->sign_wanted) { @@ -478,24 +478,22 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp, return 0; case NCP_IOC_SIGN_WANTED: - if ((file_permission(filp, MAY_READ) != 0) - && (current->uid != server->m.mounted_uid)) - { + if (file_permission(filp, MAY_READ) != 0 + && uid != server->m.mounted_uid) return -EACCES; - } if (put_user(server->sign_wanted, (int __user *)argp)) return -EFAULT; return 0; + case NCP_IOC_SET_SIGN_WANTED: { int newstate; - if ((file_permission(filp, MAY_WRITE) != 0) - && (current->uid != server->m.mounted_uid)) - { + if (file_permission(filp, MAY_WRITE) != 0 + && uid != server->m.mounted_uid) return -EACCES; - } + /* get only low 8 bits... */ if (get_user(newstate, (unsigned char __user *)argp)) return -EFAULT; @@ -512,11 +510,10 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp, #ifdef CONFIG_NCPFS_IOCTL_LOCKING case NCP_IOC_LOCKUNLOCK: - if ((file_permission(filp, MAY_WRITE) != 0) - && (current->uid != server->m.mounted_uid)) - { + if (file_permission(filp, MAY_WRITE) != 0 + && uid != server->m.mounted_uid) return -EACCES; - } + { struct ncp_lock_ioctl rqdata; @@ -585,9 +582,8 @@ outrel: #ifdef CONFIG_COMPAT case NCP_IOC_GETOBJECTNAME_32: - if (current->uid != server->m.mounted_uid) { + if (uid != server->m.mounted_uid) return -EACCES; - } { struct compat_ncp_objectname_ioctl user; size_t outl; @@ -609,10 +605,10 @@ outrel: return 0; } #endif + case NCP_IOC_GETOBJECTNAME: - if (current->uid != server->m.mounted_uid) { + if (uid != server->m.mounted_uid) return -EACCES; - } { struct ncp_objectname_ioctl user; size_t outl; @@ -633,13 +629,13 @@ outrel: return -EFAULT; return 0; } + #ifdef CONFIG_COMPAT case NCP_IOC_SETOBJECTNAME_32: #endif case NCP_IOC_SETOBJECTNAME: - if (current->uid != server->m.mounted_uid) { + if (uid != server->m.mounted_uid) return -EACCES; - } { struct ncp_objectname_ioctl user; void* newname; @@ -691,13 +687,13 @@ outrel: kfree(oldname); return 0; } + #ifdef CONFIG_COMPAT case NCP_IOC_GETPRIVATEDATA_32: #endif case NCP_IOC_GETPRIVATEDATA: - if (current->uid != server->m.mounted_uid) { + if (uid != server->m.mounted_uid) return -EACCES; - } { struct ncp_privatedata_ioctl user; size_t outl; @@ -736,13 +732,13 @@ outrel: return 0; } + #ifdef CONFIG_COMPAT case NCP_IOC_SETPRIVATEDATA_32: #endif case NCP_IOC_SETPRIVATEDATA: - if (current->uid != server->m.mounted_uid) { + if (uid != server->m.mounted_uid) return -EACCES; - } { struct ncp_privatedata_ioctl user; void* new; @@ -794,9 +790,10 @@ outrel: #endif /* CONFIG_NCPFS_NLS */ case NCP_IOC_SETDENTRYTTL: - if ((file_permission(filp, MAY_WRITE) != 0) && - (current->uid != server->m.mounted_uid)) + if (file_permission(filp, MAY_WRITE) != 0 && + uid != server->m.mounted_uid) return -EACCES; + { u_int32_t user; -- cgit v1.2.3-70-g09d2 From 5cc0a84076e172c18fb927781f44c6e47d6c5a06 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:58 +1100 Subject: CRED: Wrap task credential accesses in the NFS daemon Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: J. Bruce Fields Cc: Neil Brown Cc: linux-nfs@vger.kernel.org Signed-off-by: James Morris --- fs/nfsd/vfs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 848a03e83a4..890d9a68c85 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1169,7 +1169,7 @@ nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *resfhp, * send along the gid on create when it tries to implement * setgid directories via NFS: */ - if (current->fsuid != 0) + if (current_fsuid() != 0) iap->ia_valid &= ~(ATTR_UID|ATTR_GID); if (iap->ia_valid) return nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0); @@ -2004,7 +2004,7 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp, IS_APPEND(inode)? " append" : "", __mnt_is_readonly(exp->ex_path.mnt)? " ro" : ""); dprintk(" owner %d/%d user %d/%d\n", - inode->i_uid, inode->i_gid, current->fsuid, current->fsgid); + inode->i_uid, inode->i_gid, current_fsuid(), current_fsgid()); #endif /* Normally we reject any write/sattr etc access on a read-only file @@ -2047,7 +2047,7 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp, * with NFSv3. */ if ((acc & NFSD_MAY_OWNER_OVERRIDE) && - inode->i_uid == current->fsuid) + inode->i_uid == current_fsuid()) return 0; /* This assumes NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */ -- cgit v1.2.3-70-g09d2 From b19c2a3b839b9dfb3f258e8943dc3784ae20c7b0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:59 +1100 Subject: CRED: Wrap task credential accesses in the OCFS2 filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Acked-by: Mark Fasheh Cc: Joel Becker Cc: ocfs2-devel@oss.oracle.com Signed-off-by: James Morris --- fs/ocfs2/dlm/dlmfs.c | 8 ++++---- fs/ocfs2/namei.c | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c index 533a789c3ef..3516d8a4166 100644 --- a/fs/ocfs2/dlm/dlmfs.c +++ b/fs/ocfs2/dlm/dlmfs.c @@ -339,8 +339,8 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb) ip = DLMFS_I(inode); inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); inode->i_blocks = 0; inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; @@ -365,8 +365,8 @@ static struct inode *dlmfs_get_inode(struct inode *parent, return NULL; inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); inode->i_blocks = 0; inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 485a6aa0ad3..f95f3654ee6 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -421,13 +421,13 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, fe->i_blkno = cpu_to_le64(fe_blkno); fe->i_suballoc_bit = cpu_to_le16(suballoc_bit); fe->i_suballoc_slot = cpu_to_le16(inode_ac->ac_alloc_slot); - fe->i_uid = cpu_to_le32(current->fsuid); + fe->i_uid = cpu_to_le32(current_fsuid()); if (dir->i_mode & S_ISGID) { fe->i_gid = cpu_to_le32(dir->i_gid); if (S_ISDIR(mode)) mode |= S_ISGID; } else - fe->i_gid = cpu_to_le32(current->fsgid); + fe->i_gid = cpu_to_le32(current_fsgid()); fe->i_mode = cpu_to_le16(mode); if (S_ISCHR(mode) || S_ISBLK(mode)) fe->id1.dev1.i_rdev = cpu_to_le64(huge_encode_dev(dev)); -- cgit v1.2.3-70-g09d2 From c222d53eb32ea0c9516261268a24c6f162423acd Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:38:59 +1100 Subject: CRED: Wrap task credential accesses in the OMFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Bob Copeland Cc: linux-karma-devel@lists.sourceforge.net Signed-off-by: James Morris --- fs/omfs/inode.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c index cbf047a847c..6afe57c84f8 100644 --- a/fs/omfs/inode.c +++ b/fs/omfs/inode.c @@ -37,8 +37,8 @@ struct inode *omfs_new_inode(struct inode *dir, int mode) inode->i_ino = new_block; inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); inode->i_blocks = 0; inode->i_mapping->a_ops = &omfs_aops; @@ -420,8 +420,8 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_fs_info = sbi; - sbi->s_uid = current->uid; - sbi->s_gid = current->gid; + sbi->s_uid = current_uid(); + sbi->s_gid = current_gid(); sbi->s_dmask = sbi->s_fmask = current->fs->umask; if (!parse_options((char *) data, sbi)) -- cgit v1.2.3-70-g09d2 From 0785f4dad0f7173f3b7133e3c274fe130e8fdbc6 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:00 +1100 Subject: CRED: Wrap task credential accesses in the RAMFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Signed-off-by: James Morris --- fs/ramfs/inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index f031d1c925f..a83a3518ae3 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -55,8 +55,8 @@ struct inode *ramfs_get_inode(struct super_block *sb, int mode, dev_t dev) if (inode) { inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); inode->i_blocks = 0; inode->i_mapping->a_ops = &ramfs_aops; inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info; -- cgit v1.2.3-70-g09d2 From 414cb209eaca52b708debc014a8085b7fbb15f14 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:01 +1100 Subject: CRED: Wrap task credential accesses in the ReiserFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: reiserfs-devel@vger.kernel.org Signed-off-by: James Morris --- fs/reiserfs/namei.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index f89ebb943f3..4f322e5ed84 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -573,7 +573,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, int mode) /* the quota init calls have to know who to charge the quota to, so ** we have to set uid and gid here */ - inode->i_uid = current->fsuid; + inode->i_uid = current_fsuid(); inode->i_mode = mode; /* Make inode invalid - just in case we are going to drop it before * the initialization happens */ @@ -584,7 +584,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, int mode) if (S_ISDIR(mode)) inode->i_mode |= S_ISGID; } else { - inode->i_gid = current->fsgid; + inode->i_gid = current_fsgid(); } DQUOT_INIT(inode); return 0; -- cgit v1.2.3-70-g09d2 From e2950b178e8fa1cb0b122c212b6cd48e75e2b41f Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:01 +1100 Subject: CRED: Wrap task credential accesses in the SMBFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Steven French Signed-off-by: James Morris --- fs/smbfs/dir.c | 4 ++-- fs/smbfs/inode.c | 2 +- fs/smbfs/proc.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c index 48da4fa6b7d..9e9bb0db4f6 100644 --- a/fs/smbfs/dir.c +++ b/fs/smbfs/dir.c @@ -667,8 +667,8 @@ smb_make_node(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) attr.ia_valid = ATTR_MODE | ATTR_UID | ATTR_GID; attr.ia_mode = mode; - attr.ia_uid = current->euid; - attr.ia_gid = current->egid; + attr.ia_uid = current_euid(); + attr.ia_gid = current_egid(); if (!new_valid_dev(dev)) return -EINVAL; diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c index 3528f40ffb0..fc27fbfc539 100644 --- a/fs/smbfs/inode.c +++ b/fs/smbfs/inode.c @@ -586,7 +586,7 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent) if (parse_options(mnt, raw_data)) goto out_bad_option; } - mnt->mounted_uid = current->uid; + mnt->mounted_uid = current_uid(); smb_setcodepage(server, &mnt->codepage); /* diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c index ee536e8a649..9468168b9af 100644 --- a/fs/smbfs/proc.c +++ b/fs/smbfs/proc.c @@ -864,7 +864,7 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt) goto out; error = -EACCES; - if (current->uid != server->mnt->mounted_uid && + if (current_uid() != server->mnt->mounted_uid && !capable(CAP_SYS_ADMIN)) goto out; -- cgit v1.2.3-70-g09d2 From fc7333deb741da8aafbda9ff905d3ff2c5e28a66 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:02 +1100 Subject: CRED: Wrap task credential accesses in the SYSV filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Christoph Hellwig Signed-off-by: James Morris --- fs/sysv/ialloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c index 115ab0d6f4b..241e9765cfa 100644 --- a/fs/sysv/ialloc.c +++ b/fs/sysv/ialloc.c @@ -165,9 +165,9 @@ struct inode * sysv_new_inode(const struct inode * dir, mode_t mode) if (S_ISDIR(mode)) mode |= S_ISGID; } else - inode->i_gid = current->fsgid; + inode->i_gid = current_fsgid(); - inode->i_uid = current->fsuid; + inode->i_uid = current_fsuid(); inode->i_ino = fs16_to_cpu(sbi, ino); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; inode->i_blocks = 0; -- cgit v1.2.3-70-g09d2 From 26bf1946e69abf9528beda7adb4a783c439a5f7b Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:03 +1100 Subject: CRED: Wrap task credential accesses in the UBIFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Artem Bityutskiy Cc: Adrian Hunter Cc: linux-mtd@lists.infradead.org Signed-off-by: James Morris --- fs/ubifs/budget.c | 2 +- fs/ubifs/dir.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 1a4973e1066..4a18f084cc4 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -363,7 +363,7 @@ long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs) */ static int can_use_rp(struct ubifs_info *c) { - if (current->fsuid == c->rp_uid || capable(CAP_SYS_RESOURCE) || + if (current_fsuid() == c->rp_uid || capable(CAP_SYS_RESOURCE) || (c->rp_gid != 0 && in_group_p(c->rp_gid))) return 1; return 0; diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 526c01ec800..856189014a3 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -104,13 +104,13 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir, */ inode->i_flags |= (S_NOCMTIME); - inode->i_uid = current->fsuid; + inode->i_uid = current_fsuid(); if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) mode |= S_ISGID; } else - inode->i_gid = current->fsgid; + inode->i_gid = current_fsgid(); inode->i_mode = mode; inode->i_mtime = inode->i_atime = inode->i_ctime = ubifs_current_time(inode); -- cgit v1.2.3-70-g09d2 From 7706bb39adff18a48ff98f203ffcbb00878d8589 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:03 +1100 Subject: CRED: Wrap task credential accesses in the UDF filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Acked-by: Jan Kara Signed-off-by: James Morris --- fs/udf/ialloc.c | 4 ++-- fs/udf/namei.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index a4f2b3ce45b..31fc84297dd 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -126,13 +126,13 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err) } mutex_unlock(&sbi->s_alloc_mutex); inode->i_mode = mode; - inode->i_uid = current->fsuid; + inode->i_uid = current_fsuid(); if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) mode |= S_ISGID; } else { - inode->i_gid = current->fsgid; + inode->i_gid = current_fsgid(); } iinfo->i_location.logicalBlockNum = block; diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 082409cd4b8..f84bfaa8d94 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -604,7 +604,7 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode, goto out; iinfo = UDF_I(inode); - inode->i_uid = current->fsuid; + inode->i_uid = current_fsuid(); init_special_inode(inode, mode, rdev); fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) { -- cgit v1.2.3-70-g09d2 From a5f773a65928a10eb5e5534fe6da6b427ac5b646 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:04 +1100 Subject: CRED: Wrap task credential accesses in the UFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Evgeniy Dushistov Signed-off-by: James Morris --- fs/ufs/ialloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c index ac181f6806a..6f5dcf00609 100644 --- a/fs/ufs/ialloc.c +++ b/fs/ufs/ialloc.c @@ -304,13 +304,13 @@ cg_found: inode->i_ino = cg * uspi->s_ipg + bit; inode->i_mode = mode; - inode->i_uid = current->fsuid; + inode->i_uid = current_fsuid(); if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; if (S_ISDIR(mode)) inode->i_mode |= S_ISGID; } else - inode->i_gid = current->fsgid; + inode->i_gid = current_fsgid(); inode->i_blocks = 0; inode->i_generation = 0; -- cgit v1.2.3-70-g09d2 From 82ab8deda7fef36f067ccdeacc3b3caefc970f89 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:04 +1100 Subject: CRED: Wrap task credential accesses in the XFS filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: xfs@oss.sgi.com Signed-off-by: James Morris --- fs/xfs/linux-2.6/xfs_cred.h | 2 +- fs/xfs/linux-2.6/xfs_ioctl.c | 2 +- fs/xfs/xfs_acl.c | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_cred.h b/fs/xfs/linux-2.6/xfs_cred.h index 652721ce0ea..293043a5573 100644 --- a/fs/xfs/linux-2.6/xfs_cred.h +++ b/fs/xfs/linux-2.6/xfs_cred.h @@ -24,7 +24,7 @@ * Credentials */ typedef struct cred { - /* EMPTY */ + /* EMPTY */ } cred_t; extern struct cred *sys_cred; diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index d3438c72dca..67c72aec97e 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -1007,7 +1007,7 @@ xfs_ioctl_setattr( * to the file owner ID, except in cases where the * CAP_FSETID capability is applicable. */ - if (current->fsuid != ip->i_d.di_uid && !capable(CAP_FOWNER)) { + if (current_fsuid() != ip->i_d.di_uid && !capable(CAP_FOWNER)) { code = XFS_ERROR(EPERM); goto error_return; } diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index b2f639a1416..91d69338d3b 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c @@ -366,7 +366,7 @@ xfs_acl_allow_set( return ENOTDIR; if (vp->i_sb->s_flags & MS_RDONLY) return EROFS; - if (XFS_I(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER)) + if (XFS_I(vp)->i_d.di_uid != current_fsuid() && !capable(CAP_FOWNER)) return EPERM; return 0; } @@ -413,13 +413,13 @@ xfs_acl_access( switch (fap->acl_entry[i].ae_tag) { case ACL_USER_OBJ: seen_userobj = 1; - if (fuid != current->fsuid) + if (fuid != current_fsuid()) continue; matched.ae_tag = ACL_USER_OBJ; matched.ae_perm = allows; break; case ACL_USER: - if (fap->acl_entry[i].ae_id != current->fsuid) + if (fap->acl_entry[i].ae_id != current_fsuid()) continue; matched.ae_tag = ACL_USER; matched.ae_perm = allows; -- cgit v1.2.3-70-g09d2 From da9592edebceeba1b9301beafe80ec8b9c2db0ce Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:05 +1100 Subject: CRED: Wrap task credential accesses in the filesystem subsystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Reviewed-by: James Morris Acked-by: Serge Hallyn Cc: Al Viro Signed-off-by: James Morris --- fs/anon_inodes.c | 4 ++-- fs/attr.c | 4 ++-- fs/binfmt_elf_fdpic.c | 8 ++++---- fs/dquot.c | 4 ++-- fs/exec.c | 18 +++++++++--------- fs/fcntl.c | 2 +- fs/inotify_user.c | 2 +- fs/ioprio.c | 4 ++-- fs/locks.c | 2 +- fs/namei.c | 10 ++++++---- fs/namespace.c | 2 +- fs/pipe.c | 4 ++-- fs/posix_acl.c | 4 ++-- fs/quota.c | 4 ++-- include/linux/fs.h | 2 +- 15 files changed, 38 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 3662dd44896..c16d9be1b01 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -154,8 +154,8 @@ static struct inode *anon_inode_mkinode(void) */ inode->i_state = I_DIRTY; inode->i_mode = S_IRUSR | S_IWUSR; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; return inode; } diff --git a/fs/attr.c b/fs/attr.c index 7a83819f6ba..f4360192a93 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -29,13 +29,13 @@ int inode_change_ok(struct inode *inode, struct iattr *attr) /* Make sure a caller can chown. */ if ((ia_valid & ATTR_UID) && - (current->fsuid != inode->i_uid || + (current_fsuid() != inode->i_uid || attr->ia_uid != inode->i_uid) && !capable(CAP_CHOWN)) goto error; /* Make sure caller can chgrp. */ if ((ia_valid & ATTR_GID) && - (current->fsuid != inode->i_uid || + (current_fsuid() != inode->i_uid || (!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid)) && !capable(CAP_CHOWN)) goto error; diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 5b5424cb339..488584c8751 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -623,10 +623,10 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, NEW_AUX_ENT(AT_BASE, interp_params->elfhdr_addr); NEW_AUX_ENT(AT_FLAGS, 0); NEW_AUX_ENT(AT_ENTRY, exec_params->entry_addr); - NEW_AUX_ENT(AT_UID, (elf_addr_t) current->uid); - NEW_AUX_ENT(AT_EUID, (elf_addr_t) current->euid); - NEW_AUX_ENT(AT_GID, (elf_addr_t) current->gid); - NEW_AUX_ENT(AT_EGID, (elf_addr_t) current->egid); + NEW_AUX_ENT(AT_UID, (elf_addr_t) current_uid()); + NEW_AUX_ENT(AT_EUID, (elf_addr_t) current_euid()); + NEW_AUX_ENT(AT_GID, (elf_addr_t) current_gid()); + NEW_AUX_ENT(AT_EGID, (elf_addr_t) current_egid()); NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm)); NEW_AUX_ENT(AT_EXECFN, bprm->exec); diff --git a/fs/dquot.c b/fs/dquot.c index 5e95261005b..c237ccc8581 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -874,7 +874,7 @@ static inline int need_print_warning(struct dquot *dquot) switch (dquot->dq_type) { case USRQUOTA: - return current->fsuid == dquot->dq_id; + return current_fsuid() == dquot->dq_id; case GRPQUOTA: return in_group_p(dquot->dq_id); } @@ -981,7 +981,7 @@ static void send_warning(const struct dquot *dquot, const char warntype) MINOR(dquot->dq_sb->s_dev)); if (ret) goto attr_err_out; - ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current->user->uid); + ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); if (ret) goto attr_err_out; genlmsg_end(skb, msg_head); diff --git a/fs/exec.c b/fs/exec.c index 4e834f16d9d..604834f3b20 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -980,7 +980,7 @@ int flush_old_exec(struct linux_binprm * bprm) /* This is the point of no return */ current->sas_ss_sp = current->sas_ss_size = 0; - if (current->euid == current->uid && current->egid == current->gid) + if (current_euid() == current_uid() && current_egid() == current_gid()) set_dumpable(current->mm, 1); else set_dumpable(current->mm, suid_dumpable); @@ -1007,7 +1007,7 @@ int flush_old_exec(struct linux_binprm * bprm) */ current->mm->task_size = TASK_SIZE; - if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) { + if (bprm->e_uid != current_euid() || bprm->e_gid != current_egid()) { suid_keys(current); set_dumpable(current->mm, suid_dumpable); current->pdeath_signal = 0; @@ -1047,8 +1047,8 @@ int prepare_binprm(struct linux_binprm *bprm) if (bprm->file->f_op == NULL) return -EACCES; - bprm->e_uid = current->euid; - bprm->e_gid = current->egid; + bprm->e_uid = current_euid(); + bprm->e_gid = current_egid(); if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) { /* Set-uid? */ @@ -1096,7 +1096,7 @@ void compute_creds(struct linux_binprm *bprm) { int unsafe; - if (bprm->e_uid != current->uid) { + if (bprm->e_uid != current_uid()) { suid_keys(current); current->pdeath_signal = 0; } @@ -1424,7 +1424,7 @@ static int format_corename(char *corename, long signr) /* uid */ case 'u': rc = snprintf(out_ptr, out_end - out_ptr, - "%d", current->uid); + "%d", current_uid()); if (rc > out_end - out_ptr) goto out; out_ptr += rc; @@ -1432,7 +1432,7 @@ static int format_corename(char *corename, long signr) /* gid */ case 'g': rc = snprintf(out_ptr, out_end - out_ptr, - "%d", current->gid); + "%d", current_gid()); if (rc > out_end - out_ptr) goto out; out_ptr += rc; @@ -1709,7 +1709,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) struct inode * inode; struct file * file; int retval = 0; - int fsuid = current->fsuid; + int fsuid = current_fsuid(); int flag = 0; int ispipe = 0; unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; @@ -1815,7 +1815,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) * Dont allow local users get cute and trick others to coredump * into their pre-created files: */ - if (inode->i_uid != current->fsuid) + if (inode->i_uid != current_fsuid()) goto close_fail; if (!file->f_op) goto close_fail; diff --git a/fs/fcntl.c b/fs/fcntl.c index ac4f7db9f13..bf049a805e5 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -211,7 +211,7 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, if (err) return err; - f_modown(filp, pid, type, current->uid, current->euid, force); + f_modown(filp, pid, type, current_uid(), current_euid(), force); return 0; } EXPORT_SYMBOL(__f_setown); diff --git a/fs/inotify_user.c b/fs/inotify_user.c index d367e9b9286..e2425bbd871 100644 --- a/fs/inotify_user.c +++ b/fs/inotify_user.c @@ -601,7 +601,7 @@ asmlinkage long sys_inotify_init1(int flags) goto out_put_fd; } - user = get_uid(current->user); + user = get_current_user(); if (unlikely(atomic_read(&user->inotify_devs) >= inotify_max_user_instances)) { ret = -EMFILE; diff --git a/fs/ioprio.c b/fs/ioprio.c index da3cc460d4d..68d2cd80711 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c @@ -32,8 +32,8 @@ static int set_task_ioprio(struct task_struct *task, int ioprio) int err; struct io_context *ioc; - if (task->uid != current->euid && - task->uid != current->uid && !capable(CAP_SYS_NICE)) + if (task->uid != current_euid() && + task->uid != current_uid() && !capable(CAP_SYS_NICE)) return -EPERM; err = security_task_setioprio(task, ioprio); diff --git a/fs/locks.c b/fs/locks.c index 09062e3ff10..46a2e12f7d4 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1349,7 +1349,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp) struct inode *inode = dentry->d_inode; int error, rdlease_count = 0, wrlease_count = 0; - if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) + if ((current_fsuid() != inode->i_uid) && !capable(CAP_LEASE)) return -EACCES; if (!S_ISREG(inode->i_mode)) return -EINVAL; diff --git a/fs/namei.c b/fs/namei.c index 09ce58e49e7..42d7b760693 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -186,7 +186,7 @@ int generic_permission(struct inode *inode, int mask, mask &= MAY_READ | MAY_WRITE | MAY_EXEC; - if (current->fsuid == inode->i_uid) + if (current_fsuid() == inode->i_uid) mode >>= 6; else { if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) { @@ -441,7 +441,7 @@ static int exec_permission_lite(struct inode *inode) if (inode->i_op && inode->i_op->permission) return -EAGAIN; - if (current->fsuid == inode->i_uid) + if (current_fsuid() == inode->i_uid) mode >>= 6; else if (in_group_p(inode->i_gid)) mode >>= 3; @@ -1334,11 +1334,13 @@ static int user_path_parent(int dfd, const char __user *path, */ static inline int check_sticky(struct inode *dir, struct inode *inode) { + uid_t fsuid = current_fsuid(); + if (!(dir->i_mode & S_ISVTX)) return 0; - if (inode->i_uid == current->fsuid) + if (inode->i_uid == fsuid) return 0; - if (dir->i_uid == current->fsuid) + if (dir->i_uid == fsuid) return 0; return !capable(CAP_FOWNER); } diff --git a/fs/namespace.c b/fs/namespace.c index cce46702d33..d8bc2c4704a 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1176,7 +1176,7 @@ static int mount_is_safe(struct path *path) if (S_ISLNK(path->dentry->d_inode->i_mode)) return -EPERM; if (path->dentry->d_inode->i_mode & S_ISVTX) { - if (current->uid != path->dentry->d_inode->i_uid) + if (current_uid() != path->dentry->d_inode->i_uid) return -EPERM; } if (inode_permission(path->dentry->d_inode, MAY_WRITE)) diff --git a/fs/pipe.c b/fs/pipe.c index 7aea8b89baa..aaf797bd57b 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -899,8 +899,8 @@ static struct inode * get_pipe_inode(void) */ inode->i_state = I_DIRTY; inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; return inode; diff --git a/fs/posix_acl.c b/fs/posix_acl.c index aec931e0997..39df95a0ec2 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -217,11 +217,11 @@ posix_acl_permission(struct inode *inode, const struct posix_acl *acl, int want) switch(pa->e_tag) { case ACL_USER_OBJ: /* (May have been checked already) */ - if (inode->i_uid == current->fsuid) + if (inode->i_uid == current_fsuid()) goto check_perm; break; case ACL_USER: - if (pa->e_id == current->fsuid) + if (pa->e_id == current_fsuid()) goto mask; break; case ACL_GROUP_OBJ: diff --git a/fs/quota.c b/fs/quota.c index 7f4386ebc23..b7fe44e0161 100644 --- a/fs/quota.c +++ b/fs/quota.c @@ -79,7 +79,7 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid /* Check privileges */ if (cmd == Q_GETQUOTA) { - if (((type == USRQUOTA && current->euid != id) || + if (((type == USRQUOTA && current_euid() != id) || (type == GRPQUOTA && !in_egroup_p(id))) && !capable(CAP_SYS_ADMIN)) return -EPERM; @@ -130,7 +130,7 @@ static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t i /* Check privileges */ if (cmd == Q_XGETQUOTA) { - if (((type == XQM_USRQUOTA && current->euid != id) || + if (((type == XQM_USRQUOTA && current_euid() != id) || (type == XQM_GRPQUOTA && !in_egroup_p(id))) && !capable(CAP_SYS_ADMIN)) return -EPERM; diff --git a/include/linux/fs.h b/include/linux/fs.h index 0dcdd9458f4..b3d404aaabe 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1193,7 +1193,7 @@ enum { #define has_fs_excl() atomic_read(¤t->fs_excl) #define is_owner_or_cap(inode) \ - ((current->fsuid == (inode)->i_uid) || capable(CAP_FOWNER)) + ((current_fsuid() == (inode)->i_uid) || capable(CAP_FOWNER)) /* not quite ready to be deprecated, but... */ extern void lock_super(struct super_block *); -- cgit v1.2.3-70-g09d2 From 1cdcbec1a3372c0c49c59d292e708fd07b509f18 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:14 +1100 Subject: CRED: Neuter sys_capset() Take away the ability for sys_capset() to affect processes other than current. This means that current will not need to lock its own credentials when reading them against interference by other processes. This has effectively been the case for a while anyway, since: (1) Without LSM enabled, sys_capset() is disallowed. (2) With file-based capabilities, sys_capset() is neutered. Signed-off-by: David Howells Acked-by: Serge Hallyn Acked-by: Andrew G. Morgan Acked-by: James Morris Signed-off-by: James Morris --- fs/open.c | 12 +-- include/linux/security.h | 48 ++++------ kernel/capability.c | 227 +++++------------------------------------------ security/commoncap.c | 29 ++---- security/security.c | 18 ++-- security/selinux/hooks.c | 10 +-- 6 files changed, 61 insertions(+), 283 deletions(-) (limited to 'fs') diff --git a/fs/open.c b/fs/open.c index 83cdb9dee0c..500cc0c5476 100644 --- a/fs/open.c +++ b/fs/open.c @@ -441,17 +441,7 @@ asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode) current->fsgid = current->gid; if (!issecure(SECURE_NO_SETUID_FIXUP)) { - /* - * Clear the capabilities if we switch to a non-root user - */ -#ifndef CONFIG_SECURITY_FILE_CAPABILITIES - /* - * FIXME: There is a race here against sys_capset. The - * capabilities can change yet we will restore the old - * value below. We should hold task_capabilities_lock, - * but we cannot because user_path_at can sleep. - */ -#endif /* ndef CONFIG_SECURITY_FILE_CAPABILITIES */ + /* Clear the capabilities if we switch to a non-root user */ if (current->uid) old_cap = cap_set_effective(__cap_empty_set); else diff --git a/include/linux/security.h b/include/linux/security.h index 5fe28a671cd..d1ce8beddbd 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -53,8 +53,8 @@ extern int cap_settime(struct timespec *ts, struct timezone *tz); extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode); extern int cap_ptrace_traceme(struct task_struct *parent); extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); -extern int cap_capset_check(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); -extern void cap_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); +extern int cap_capset_check(kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); +extern void cap_capset_set(kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); extern int cap_bprm_set_security(struct linux_binprm *bprm); extern void cap_bprm_apply_creds(struct linux_binprm *bprm, int unsafe); extern int cap_bprm_secureexec(struct linux_binprm *bprm); @@ -1191,24 +1191,14 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Return 0 if the capability sets were successfully obtained. * @capset_check: * Check permission before setting the @effective, @inheritable, and - * @permitted capability sets for the @target process. - * Caveat: @target is also set to current if a set of processes is - * specified (i.e. all processes other than current and init or a - * particular process group). Hence, the capset_set hook may need to - * revalidate permission to the actual target process. - * @target contains the task_struct structure for target process. + * @permitted capability sets for the current process. * @effective contains the effective capability set. * @inheritable contains the inheritable capability set. * @permitted contains the permitted capability set. * Return 0 if permission is granted. * @capset_set: * Set the @effective, @inheritable, and @permitted capability sets for - * the @target process. Since capset_check cannot always check permission - * to the real @target process, this hook may also perform permission - * checking to determine if the current process is allowed to set the - * capability sets of the @target process. However, this hook has no way - * of returning an error due to the structure of the sys_capset code. - * @target contains the task_struct structure for target process. + * the current process. * @effective contains the effective capability set. * @inheritable contains the inheritable capability set. * @permitted contains the permitted capability set. @@ -1303,12 +1293,10 @@ struct security_operations { int (*capget) (struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); - int (*capset_check) (struct task_struct *target, - kernel_cap_t *effective, + int (*capset_check) (kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); - void (*capset_set) (struct task_struct *target, - kernel_cap_t *effective, + void (*capset_set) (kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); int (*capable) (struct task_struct *tsk, int cap, int audit); @@ -1572,12 +1560,10 @@ int security_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); -int security_capset_check(struct task_struct *target, - kernel_cap_t *effective, +int security_capset_check(kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); -void security_capset_set(struct task_struct *target, - kernel_cap_t *effective, +void security_capset_set(kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); int security_capable(struct task_struct *tsk, int cap); @@ -1769,20 +1755,18 @@ static inline int security_capget(struct task_struct *target, return cap_capget(target, effective, inheritable, permitted); } -static inline int security_capset_check(struct task_struct *target, - kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted) +static inline int security_capset_check(kernel_cap_t *effective, + kernel_cap_t *inheritable, + kernel_cap_t *permitted) { - return cap_capset_check(target, effective, inheritable, permitted); + return cap_capset_check(effective, inheritable, permitted); } -static inline void security_capset_set(struct task_struct *target, - kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted) +static inline void security_capset_set(kernel_cap_t *effective, + kernel_cap_t *inheritable, + kernel_cap_t *permitted) { - cap_capset_set(target, effective, inheritable, permitted); + cap_capset_set(effective, inheritable, permitted); } static inline int security_capable(struct task_struct *tsk, int cap) diff --git a/kernel/capability.c b/kernel/capability.c index adb262f83de..58b00519624 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -127,160 +127,6 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy) return 0; } -#ifndef CONFIG_SECURITY_FILE_CAPABILITIES - -/* - * Without filesystem capability support, we nominally support one process - * setting the capabilities of another - */ -static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, - kernel_cap_t *pIp, kernel_cap_t *pPp) -{ - struct task_struct *target; - int ret; - - spin_lock(&task_capability_lock); - read_lock(&tasklist_lock); - - if (pid && pid != task_pid_vnr(current)) { - target = find_task_by_vpid(pid); - if (!target) { - ret = -ESRCH; - goto out; - } - } else - target = current; - - ret = security_capget(target, pEp, pIp, pPp); - -out: - read_unlock(&tasklist_lock); - spin_unlock(&task_capability_lock); - - return ret; -} - -/* - * cap_set_pg - set capabilities for all processes in a given process - * group. We call this holding task_capability_lock and tasklist_lock. - */ -static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted) -{ - struct task_struct *g, *target; - int ret = -EPERM; - int found = 0; - struct pid *pgrp; - - spin_lock(&task_capability_lock); - read_lock(&tasklist_lock); - - pgrp = find_vpid(pgrp_nr); - do_each_pid_task(pgrp, PIDTYPE_PGID, g) { - target = g; - while_each_thread(g, target) { - if (!security_capset_check(target, effective, - inheritable, permitted)) { - security_capset_set(target, effective, - inheritable, permitted); - ret = 0; - } - found = 1; - } - } while_each_pid_task(pgrp, PIDTYPE_PGID, g); - - read_unlock(&tasklist_lock); - spin_unlock(&task_capability_lock); - - if (!found) - ret = 0; - return ret; -} - -/* - * cap_set_all - set capabilities for all processes other than init - * and self. We call this holding task_capability_lock and tasklist_lock. - */ -static inline int cap_set_all(kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted) -{ - struct task_struct *g, *target; - int ret = -EPERM; - int found = 0; - - spin_lock(&task_capability_lock); - read_lock(&tasklist_lock); - - do_each_thread(g, target) { - if (target == current - || is_container_init(target->group_leader)) - continue; - found = 1; - if (security_capset_check(target, effective, inheritable, - permitted)) - continue; - ret = 0; - security_capset_set(target, effective, inheritable, permitted); - } while_each_thread(g, target); - - read_unlock(&tasklist_lock); - spin_unlock(&task_capability_lock); - - if (!found) - ret = 0; - - return ret; -} - -/* - * Given the target pid does not refer to the current process we - * need more elaborate support... (This support is not present when - * filesystem capabilities are configured.) - */ -static inline int do_sys_capset_other_tasks(pid_t pid, kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted) -{ - struct task_struct *target; - int ret; - - if (!capable(CAP_SETPCAP)) - return -EPERM; - - if (pid == -1) /* all procs other than current and init */ - return cap_set_all(effective, inheritable, permitted); - - else if (pid < 0) /* all procs in process group */ - return cap_set_pg(-pid, effective, inheritable, permitted); - - /* target != current */ - spin_lock(&task_capability_lock); - read_lock(&tasklist_lock); - - target = find_task_by_vpid(pid); - if (!target) - ret = -ESRCH; - else { - ret = security_capset_check(target, effective, inheritable, - permitted); - - /* having verified that the proposed changes are legal, - we now put them into effect. */ - if (!ret) - security_capset_set(target, effective, inheritable, - permitted); - } - - read_unlock(&tasklist_lock); - spin_unlock(&task_capability_lock); - - return ret; -} - -#else /* ie., def CONFIG_SECURITY_FILE_CAPABILITIES */ - /* * If we have configured with filesystem capability support, then the * only thing that can change the capabilities of the current process @@ -314,22 +160,6 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, return ret; } -/* - * With filesystem capability support configured, the kernel does not - * permit the changing of capabilities in one process by another - * process. (CAP_SETPCAP has much less broad semantics when configured - * this way.) - */ -static inline int do_sys_capset_other_tasks(pid_t pid, - kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted) -{ - return -EPERM; -} - -#endif /* ie., ndef CONFIG_SECURITY_FILE_CAPABILITIES */ - /* * Atomically modify the effective capabilities returning the original * value. No permission check is performed here - it is assumed that the @@ -424,16 +254,14 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) * @data: pointer to struct that contains the effective, permitted, * and inheritable capabilities * - * Set capabilities for a given process, all processes, or all - * processes in a given process group. + * Set capabilities for the current process only. The ability to any other + * process(es) has been deprecated and removed. * * The restrictions on setting capabilities are specified as: * - * [pid is for the 'target' task. 'current' is the calling task.] - * - * I: any raised capabilities must be a subset of the (old current) permitted - * P: any raised capabilities must be a subset of the (old current) permitted - * E: must be set to a subset of (new target) permitted + * I: any raised capabilities must be a subset of the old permitted + * P: any raised capabilities must be a subset of the old permitted + * E: must be set to a subset of new permitted * * Returns 0 on success and < 0 on error. */ @@ -452,10 +280,13 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) if (get_user(pid, &header->pid)) return -EFAULT; + /* may only affect current now */ + if (pid != 0 && pid != task_pid_vnr(current)) + return -EPERM; + if (copy_from_user(&kdata, data, tocopy - * sizeof(struct __user_cap_data_struct))) { + * sizeof(struct __user_cap_data_struct))) return -EFAULT; - } for (i = 0; i < tocopy; i++) { effective.cap[i] = kdata[i].effective; @@ -473,32 +304,20 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) if (ret) return ret; - if (pid && (pid != task_pid_vnr(current))) - ret = do_sys_capset_other_tasks(pid, &effective, &inheritable, - &permitted); - else { - /* - * This lock is required even when filesystem - * capability support is configured - it protects the - * sys_capget() call from returning incorrect data in - * the case that the targeted process is not the - * current one. - */ - spin_lock(&task_capability_lock); - - ret = security_capset_check(current, &effective, &inheritable, - &permitted); - /* - * Having verified that the proposed changes are - * legal, we now put them into effect. - */ - if (!ret) - security_capset_set(current, &effective, &inheritable, - &permitted); - spin_unlock(&task_capability_lock); - } - + /* This lock is required even when filesystem capability support is + * configured - it protects the sys_capget() call from returning + * incorrect data in the case that the targeted process is not the + * current one. + */ + spin_lock(&task_capability_lock); + ret = security_capset_check(&effective, &inheritable, &permitted); + /* Having verified that the proposed changes are legal, we now put them + * into effect. + */ + if (!ret) + security_capset_set(&effective, &inheritable, &permitted); + spin_unlock(&task_capability_lock); return ret; } diff --git a/security/commoncap.c b/security/commoncap.c index 8283271f076..e3f36ef629f 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -96,15 +96,6 @@ int cap_capget (struct task_struct *target, kernel_cap_t *effective, #ifdef CONFIG_SECURITY_FILE_CAPABILITIES -static inline int cap_block_setpcap(struct task_struct *target) -{ - /* - * No support for remote process capability manipulation with - * filesystem capability support. - */ - return (target != current); -} - static inline int cap_inh_is_capped(void) { /* @@ -119,7 +110,6 @@ static inline int cap_limit_ptraced_target(void) { return 1; } #else /* ie., ndef CONFIG_SECURITY_FILE_CAPABILITIES */ -static inline int cap_block_setpcap(struct task_struct *t) { return 0; } static inline int cap_inh_is_capped(void) { return 1; } static inline int cap_limit_ptraced_target(void) { @@ -128,21 +118,18 @@ static inline int cap_limit_ptraced_target(void) #endif /* def CONFIG_SECURITY_FILE_CAPABILITIES */ -int cap_capset_check (struct task_struct *target, kernel_cap_t *effective, +int cap_capset_check (kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { - if (cap_block_setpcap(target)) { - return -EPERM; - } if (cap_inh_is_capped() && !cap_issubset(*inheritable, - cap_combine(target->cap_inheritable, + cap_combine(current->cap_inheritable, current->cap_permitted))) { /* incapable of using this inheritable set */ return -EPERM; } if (!cap_issubset(*inheritable, - cap_combine(target->cap_inheritable, + cap_combine(current->cap_inheritable, current->cap_bset))) { /* no new pI capabilities outside bounding set */ return -EPERM; @@ -150,7 +137,7 @@ int cap_capset_check (struct task_struct *target, kernel_cap_t *effective, /* verify restrictions on target's new Permitted set */ if (!cap_issubset (*permitted, - cap_combine (target->cap_permitted, + cap_combine (current->cap_permitted, current->cap_permitted))) { return -EPERM; } @@ -163,12 +150,12 @@ int cap_capset_check (struct task_struct *target, kernel_cap_t *effective, return 0; } -void cap_capset_set (struct task_struct *target, kernel_cap_t *effective, +void cap_capset_set (kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { - target->cap_effective = *effective; - target->cap_inheritable = *inheritable; - target->cap_permitted = *permitted; + current->cap_effective = *effective; + current->cap_inheritable = *inheritable; + current->cap_permitted = *permitted; } static inline void bprm_clear_caps(struct linux_binprm *bprm) diff --git a/security/security.c b/security/security.c index 346f21e0ec2..dca37381e2a 100644 --- a/security/security.c +++ b/security/security.c @@ -145,20 +145,18 @@ int security_capget(struct task_struct *target, return security_ops->capget(target, effective, inheritable, permitted); } -int security_capset_check(struct task_struct *target, - kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted) +int security_capset_check(kernel_cap_t *effective, + kernel_cap_t *inheritable, + kernel_cap_t *permitted) { - return security_ops->capset_check(target, effective, inheritable, permitted); + return security_ops->capset_check(effective, inheritable, permitted); } -void security_capset_set(struct task_struct *target, - kernel_cap_t *effective, - kernel_cap_t *inheritable, - kernel_cap_t *permitted) +void security_capset_set(kernel_cap_t *effective, + kernel_cap_t *inheritable, + kernel_cap_t *permitted) { - security_ops->capset_set(target, effective, inheritable, permitted); + security_ops->capset_set(effective, inheritable, permitted); } int security_capable(struct task_struct *tsk, int cap) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 378dc53c08e..df9986940e9 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -1790,22 +1790,22 @@ static int selinux_capget(struct task_struct *target, kernel_cap_t *effective, return secondary_ops->capget(target, effective, inheritable, permitted); } -static int selinux_capset_check(struct task_struct *target, kernel_cap_t *effective, +static int selinux_capset_check(kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { int error; - error = secondary_ops->capset_check(target, effective, inheritable, permitted); + error = secondary_ops->capset_check(effective, inheritable, permitted); if (error) return error; - return task_has_perm(current, target, PROCESS__SETCAP); + return task_has_perm(current, current, PROCESS__SETCAP); } -static void selinux_capset_set(struct task_struct *target, kernel_cap_t *effective, +static void selinux_capset_set(kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { - secondary_ops->capset_set(target, effective, inheritable, permitted); + secondary_ops->capset_set(effective, inheritable, permitted); } static int selinux_capable(struct task_struct *tsk, int cap, int audit) -- cgit v1.2.3-70-g09d2 From b6dff3ec5e116e3af6f537d4caedcad6b9e5082a Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:16 +1100 Subject: CRED: Separate task security context from task_struct Separate the task security context from task_struct. At this point, the security data is temporarily embedded in the task_struct with two pointers pointing to it. Note that the Alpha arch is altered as it refers to (E)UID and (E)GID in entry.S via asm-offsets. With comment fixes Signed-off-by: Marc Dionne Signed-off-by: David Howells Acked-by: James Morris Acked-by: Serge Hallyn Signed-off-by: James Morris --- arch/alpha/kernel/asm-offsets.c | 11 +- arch/alpha/kernel/entry.S | 10 +- arch/ia64/ia32/sys_ia32.c | 8 +- arch/mips/kernel/kspd.c | 4 +- arch/s390/kernel/compat_linux.c | 28 ++--- drivers/connector/cn_proc.c | 8 +- fs/binfmt_elf.c | 12 +- fs/binfmt_elf_fdpic.c | 12 +- fs/exec.c | 4 +- fs/fcntl.c | 4 +- fs/file_table.c | 4 +- fs/fuse/dir.c | 12 +- fs/hugetlbfs/inode.c | 4 +- fs/ioprio.c | 12 +- fs/nfsd/auth.c | 22 ++-- fs/nfsd/nfs4recover.c | 12 +- fs/nfsd/nfsfh.c | 6 +- fs/open.c | 17 +-- fs/proc/array.c | 18 +-- fs/proc/base.c | 16 +-- fs/xfs/linux-2.6/xfs_cred.h | 6 +- fs/xfs/linux-2.6/xfs_globals.h | 2 +- fs/xfs/xfs_inode.h | 2 +- fs/xfs/xfs_vnodeops.h | 10 +- include/linux/cred.h | 155 +++++++++++++++++++---- include/linux/init_task.h | 24 ++-- include/linux/sched.h | 52 +------- include/linux/securebits.h | 2 +- ipc/mqueue.c | 2 +- ipc/shm.c | 4 +- kernel/auditsc.c | 52 ++++---- kernel/capability.c | 4 +- kernel/cgroup.c | 4 +- kernel/exit.c | 10 +- kernel/fork.c | 24 ++-- kernel/futex.c | 6 +- kernel/futex_compat.c | 5 +- kernel/ptrace.c | 19 +-- kernel/sched.c | 10 +- kernel/signal.c | 16 +-- kernel/sys.c | 266 ++++++++++++++++++++++----------------- kernel/trace/trace.c | 2 +- kernel/tsacct.c | 4 +- kernel/uid16.c | 28 ++--- kernel/user.c | 4 +- mm/mempolicy.c | 10 +- mm/migrate.c | 10 +- mm/oom_kill.c | 2 +- net/core/scm.c | 10 +- net/sunrpc/auth.c | 2 +- security/commoncap.c | 161 +++++++++++++----------- security/keys/keyctl.c | 25 ++-- security/keys/permission.c | 11 +- security/keys/process_keys.c | 98 ++++++++------- security/keys/request_key.c | 18 +-- security/keys/request_key_auth.c | 12 +- security/selinux/exports.c | 2 +- security/selinux/hooks.c | 116 ++++++++--------- security/selinux/selinuxfs.c | 2 +- security/selinux/xfrm.c | 6 +- security/smack/smack_access.c | 4 +- security/smack/smack_lsm.c | 77 ++++++------ security/smack/smackfs.c | 6 +- 63 files changed, 832 insertions(+), 677 deletions(-) (limited to 'fs') diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c index 4b18cd94d59..6ff8886e7e2 100644 --- a/arch/alpha/kernel/asm-offsets.c +++ b/arch/alpha/kernel/asm-offsets.c @@ -19,15 +19,18 @@ void foo(void) BLANK(); DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); - DEFINE(TASK_UID, offsetof(struct task_struct, uid)); - DEFINE(TASK_EUID, offsetof(struct task_struct, euid)); - DEFINE(TASK_GID, offsetof(struct task_struct, gid)); - DEFINE(TASK_EGID, offsetof(struct task_struct, egid)); + DEFINE(TASK_CRED, offsetof(struct task_struct, cred)); DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent)); DEFINE(TASK_GROUP_LEADER, offsetof(struct task_struct, group_leader)); DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); BLANK(); + DEFINE(CRED_UID, offsetof(struct cred, uid)); + DEFINE(CRED_EUID, offsetof(struct cred, euid)); + DEFINE(CRED_GID, offsetof(struct cred, gid)); + DEFINE(CRED_EGID, offsetof(struct cred, egid)); + BLANK(); + DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs)); DEFINE(PT_PTRACED, PT_PTRACED); DEFINE(CLONE_VM, CLONE_VM); diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index 5fc61e281ac..f77345bc66a 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -850,8 +850,9 @@ osf_getpriority: sys_getxuid: .prologue 0 ldq $2, TI_TASK($8) - ldl $0, TASK_UID($2) - ldl $1, TASK_EUID($2) + ldq $3, TASK_CRED($2) + ldl $0, CRED_UID($3) + ldl $1, CRED_EUID($3) stq $1, 80($sp) ret .end sys_getxuid @@ -862,8 +863,9 @@ sys_getxuid: sys_getxgid: .prologue 0 ldq $2, TI_TASK($8) - ldl $0, TASK_GID($2) - ldl $1, TASK_EGID($2) + ldq $3, TASK_CRED($2) + ldl $0, CRED_GID($3) + ldl $1, CRED_EGID($3) stq $1, 80($sp) ret .end sys_getxgid diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 5e92ae00bdb..2445a9d3488 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -1772,20 +1772,20 @@ sys32_getgroups16 (int gidsetsize, short __user *grouplist) if (gidsetsize < 0) return -EINVAL; - get_group_info(current->group_info); - i = current->group_info->ngroups; + get_group_info(current->cred->group_info); + i = current->cred->group_info->ngroups; if (gidsetsize) { if (i > gidsetsize) { i = -EINVAL; goto out; } - if (groups16_to_user(grouplist, current->group_info)) { + if (groups16_to_user(grouplist, current->cred->group_info)) { i = -EFAULT; goto out; } } out: - put_group_info(current->group_info); + put_group_info(current->cred->group_info); return i; } diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index b0591ae0ce5..fd6e5122403 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c @@ -174,8 +174,8 @@ static unsigned int translate_open_flags(int flags) static void sp_setfsuidgid( uid_t uid, gid_t gid) { - current->fsuid = uid; - current->fsgid = gid; + current->cred->fsuid = uid; + current->cred->fsgid = gid; key_fsuid_changed(current); key_fsgid_changed(current); diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 4646382af34..6cc87d8c868 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -148,9 +148,9 @@ asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user { int retval; - if (!(retval = put_user(high2lowuid(current->uid), ruid)) && - !(retval = put_user(high2lowuid(current->euid), euid))) - retval = put_user(high2lowuid(current->suid), suid); + if (!(retval = put_user(high2lowuid(current->cred->uid), ruid)) && + !(retval = put_user(high2lowuid(current->cred->euid), euid))) + retval = put_user(high2lowuid(current->cred->suid), suid); return retval; } @@ -165,9 +165,9 @@ asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user { int retval; - if (!(retval = put_user(high2lowgid(current->gid), rgid)) && - !(retval = put_user(high2lowgid(current->egid), egid))) - retval = put_user(high2lowgid(current->sgid), sgid); + if (!(retval = put_user(high2lowgid(current->cred->gid), rgid)) && + !(retval = put_user(high2lowgid(current->cred->egid), egid))) + retval = put_user(high2lowgid(current->cred->sgid), sgid); return retval; } @@ -217,20 +217,20 @@ asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist) if (gidsetsize < 0) return -EINVAL; - get_group_info(current->group_info); - i = current->group_info->ngroups; + get_group_info(current->cred->group_info); + i = current->cred->group_info->ngroups; if (gidsetsize) { if (i > gidsetsize) { i = -EINVAL; goto out; } - if (groups16_to_user(grouplist, current->group_info)) { + if (groups16_to_user(grouplist, current->cred->group_info)) { i = -EFAULT; goto out; } } out: - put_group_info(current->group_info); + put_group_info(current->cred->group_info); return i; } @@ -261,22 +261,22 @@ asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist) asmlinkage long sys32_getuid16(void) { - return high2lowuid(current->uid); + return high2lowuid(current->cred->uid); } asmlinkage long sys32_geteuid16(void) { - return high2lowuid(current->euid); + return high2lowuid(current->cred->euid); } asmlinkage long sys32_getgid16(void) { - return high2lowgid(current->gid); + return high2lowgid(current->cred->gid); } asmlinkage long sys32_getegid16(void) { - return high2lowgid(current->egid); + return high2lowgid(current->cred->egid); } /* diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 5c9f67f98d1..354c1ff1715 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -116,11 +116,11 @@ void proc_id_connector(struct task_struct *task, int which_id) ev->event_data.id.process_pid = task->pid; ev->event_data.id.process_tgid = task->tgid; if (which_id == PROC_EVENT_UID) { - ev->event_data.id.r.ruid = task->uid; - ev->event_data.id.e.euid = task->euid; + ev->event_data.id.r.ruid = task->cred->uid; + ev->event_data.id.e.euid = task->cred->euid; } else if (which_id == PROC_EVENT_GID) { - ev->event_data.id.r.rgid = task->gid; - ev->event_data.id.e.egid = task->egid; + ev->event_data.id.r.rgid = task->cred->gid; + ev->event_data.id.e.egid = task->cred->egid; } else return; get_seq(&msg->seq, &ev->cpu); diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 8fcfa398d35..7a52477ce49 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -223,10 +223,10 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, NEW_AUX_ENT(AT_BASE, interp_load_addr); NEW_AUX_ENT(AT_FLAGS, 0); NEW_AUX_ENT(AT_ENTRY, exec->e_entry); - NEW_AUX_ENT(AT_UID, tsk->uid); - NEW_AUX_ENT(AT_EUID, tsk->euid); - NEW_AUX_ENT(AT_GID, tsk->gid); - NEW_AUX_ENT(AT_EGID, tsk->egid); + NEW_AUX_ENT(AT_UID, tsk->cred->uid); + NEW_AUX_ENT(AT_EUID, tsk->cred->euid); + NEW_AUX_ENT(AT_GID, tsk->cred->gid); + NEW_AUX_ENT(AT_EGID, tsk->cred->egid); NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm)); NEW_AUX_ENT(AT_EXECFN, bprm->exec); if (k_platform) { @@ -1388,8 +1388,8 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, psinfo->pr_zomb = psinfo->pr_sname == 'Z'; psinfo->pr_nice = task_nice(p); psinfo->pr_flag = p->flags; - SET_UID(psinfo->pr_uid, p->uid); - SET_GID(psinfo->pr_gid, p->gid); + SET_UID(psinfo->pr_uid, p->cred->uid); + SET_GID(psinfo->pr_gid, p->cred->gid); strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname)); return 0; diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 488584c8751..9f67054c2c4 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -623,10 +623,10 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, NEW_AUX_ENT(AT_BASE, interp_params->elfhdr_addr); NEW_AUX_ENT(AT_FLAGS, 0); NEW_AUX_ENT(AT_ENTRY, exec_params->entry_addr); - NEW_AUX_ENT(AT_UID, (elf_addr_t) current_uid()); - NEW_AUX_ENT(AT_EUID, (elf_addr_t) current_euid()); - NEW_AUX_ENT(AT_GID, (elf_addr_t) current_gid()); - NEW_AUX_ENT(AT_EGID, (elf_addr_t) current_egid()); + NEW_AUX_ENT(AT_UID, (elf_addr_t) current->cred->uid); + NEW_AUX_ENT(AT_EUID, (elf_addr_t) current->cred->euid); + NEW_AUX_ENT(AT_GID, (elf_addr_t) current->cred->gid); + NEW_AUX_ENT(AT_EGID, (elf_addr_t) current->cred->egid); NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm)); NEW_AUX_ENT(AT_EXECFN, bprm->exec); @@ -1440,8 +1440,8 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, psinfo->pr_zomb = psinfo->pr_sname == 'Z'; psinfo->pr_nice = task_nice(p); psinfo->pr_flag = p->flags; - SET_UID(psinfo->pr_uid, p->uid); - SET_GID(psinfo->pr_gid, p->gid); + SET_UID(psinfo->pr_uid, p->cred->uid); + SET_GID(psinfo->pr_gid, p->cred->gid); strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname)); return 0; diff --git a/fs/exec.c b/fs/exec.c index 604834f3b20..31149e430a8 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1738,7 +1738,7 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) */ if (get_dumpable(mm) == 2) { /* Setuid core dump mode */ flag = O_EXCL; /* Stop rewrite attacks */ - current->fsuid = 0; /* Dump root private */ + current->cred->fsuid = 0; /* Dump root private */ } retval = coredump_wait(exit_code, &core_state); @@ -1834,7 +1834,7 @@ fail_unlock: if (helper_argv) argv_free(helper_argv); - current->fsuid = fsuid; + current->cred->fsuid = fsuid; coredump_finish(mm); fail: return retval; diff --git a/fs/fcntl.c b/fs/fcntl.c index bf049a805e5..63964d863ad 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -401,8 +401,8 @@ static inline int sigio_perm(struct task_struct *p, struct fown_struct *fown, int sig) { return (((fown->euid == 0) || - (fown->euid == p->suid) || (fown->euid == p->uid) || - (fown->uid == p->suid) || (fown->uid == p->uid)) && + (fown->euid == p->cred->suid) || (fown->euid == p->cred->uid) || + (fown->uid == p->cred->suid) || (fown->uid == p->cred->uid)) && !security_file_send_sigiotask(p, fown, sig)); } diff --git a/fs/file_table.c b/fs/file_table.c index 5ad0eca6eea..3152b53cfab 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -122,8 +122,8 @@ struct file *get_empty_filp(void) INIT_LIST_HEAD(&f->f_u.fu_list); atomic_long_set(&f->f_count, 1); rwlock_init(&f->f_owner.lock); - f->f_uid = tsk->fsuid; - f->f_gid = tsk->fsgid; + f->f_uid = tsk->cred->fsuid; + f->f_gid = tsk->cred->fsgid; eventpoll_init_file(f); /* f->f_version: 0 */ return f; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index fd03330cade..e97a9898186 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -872,12 +872,12 @@ int fuse_allow_task(struct fuse_conn *fc, struct task_struct *task) if (fc->flags & FUSE_ALLOW_OTHER) return 1; - if (task->euid == fc->user_id && - task->suid == fc->user_id && - task->uid == fc->user_id && - task->egid == fc->group_id && - task->sgid == fc->group_id && - task->gid == fc->group_id) + if (task->cred->euid == fc->user_id && + task->cred->suid == fc->user_id && + task->cred->uid == fc->user_id && + task->cred->egid == fc->group_id && + task->cred->sgid == fc->group_id && + task->cred->gid == fc->group_id) return 1; return 0; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 08ad76c79b4..870a721b8bd 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -958,7 +958,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size) if (!can_do_hugetlb_shm()) return ERR_PTR(-EPERM); - if (!user_shm_lock(size, current->user)) + if (!user_shm_lock(size, current->cred->user)) return ERR_PTR(-ENOMEM); root = hugetlbfs_vfsmount->mnt_root; @@ -998,7 +998,7 @@ out_inode: out_dentry: dput(dentry); out_shm_unlock: - user_shm_unlock(size, current->user); + user_shm_unlock(size, current->cred->user); return ERR_PTR(error); } diff --git a/fs/ioprio.c b/fs/ioprio.c index 68d2cd80711..bb5210af77c 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c @@ -32,8 +32,8 @@ static int set_task_ioprio(struct task_struct *task, int ioprio) int err; struct io_context *ioc; - if (task->uid != current_euid() && - task->uid != current_uid() && !capable(CAP_SYS_NICE)) + if (task->cred->uid != current_euid() && + task->cred->uid != current_uid() && !capable(CAP_SYS_NICE)) return -EPERM; err = security_task_setioprio(task, ioprio); @@ -123,7 +123,7 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio) break; case IOPRIO_WHO_USER: if (!who) - user = current->user; + user = current->cred->user; else user = find_user(who); @@ -131,7 +131,7 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio) break; do_each_thread(g, p) { - if (p->uid != who) + if (p->cred->uid != who) continue; ret = set_task_ioprio(p, ioprio); if (ret) @@ -216,7 +216,7 @@ asmlinkage long sys_ioprio_get(int which, int who) break; case IOPRIO_WHO_USER: if (!who) - user = current->user; + user = current->cred->user; else user = find_user(who); @@ -224,7 +224,7 @@ asmlinkage long sys_ioprio_get(int which, int who) break; do_each_thread(g, p) { - if (p->uid != user->uid) + if (p->cred->uid != user->uid) continue; tmpio = get_task_ioprio(p); if (tmpio < 0) diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c index 294992e9bf6..808fc03a6fb 100644 --- a/fs/nfsd/auth.c +++ b/fs/nfsd/auth.c @@ -27,6 +27,7 @@ int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp) int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp) { + struct cred *act_as = current->cred ; struct svc_cred cred = rqstp->rq_cred; int i; int flags = nfsexp_flags(rqstp, exp); @@ -55,25 +56,26 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp) get_group_info(cred.cr_group_info); if (cred.cr_uid != (uid_t) -1) - current->fsuid = cred.cr_uid; + act_as->fsuid = cred.cr_uid; else - current->fsuid = exp->ex_anon_uid; + act_as->fsuid = exp->ex_anon_uid; if (cred.cr_gid != (gid_t) -1) - current->fsgid = cred.cr_gid; + act_as->fsgid = cred.cr_gid; else - current->fsgid = exp->ex_anon_gid; + act_as->fsgid = exp->ex_anon_gid; if (!cred.cr_group_info) return -ENOMEM; - ret = set_current_groups(cred.cr_group_info); + ret = set_groups(act_as, cred.cr_group_info); put_group_info(cred.cr_group_info); if ((cred.cr_uid)) { - current->cap_effective = - cap_drop_nfsd_set(current->cap_effective); + act_as->cap_effective = + cap_drop_nfsd_set(act_as->cap_effective); } else { - current->cap_effective = - cap_raise_nfsd_set(current->cap_effective, - current->cap_permitted); + act_as->cap_effective = + cap_raise_nfsd_set(act_as->cap_effective, + act_as->cap_permitted); } return ret; } + diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index bb93946ace2..a5e14e8695e 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -57,17 +57,17 @@ static int rec_dir_init = 0; static void nfs4_save_user(uid_t *saveuid, gid_t *savegid) { - *saveuid = current->fsuid; - *savegid = current->fsgid; - current->fsuid = 0; - current->fsgid = 0; + *saveuid = current->cred->fsuid; + *savegid = current->cred->fsgid; + current->cred->fsuid = 0; + current->cred->fsgid = 0; } static void nfs4_reset_user(uid_t saveuid, gid_t savegid) { - current->fsuid = saveuid; - current->fsgid = savegid; + current->cred->fsuid = saveuid; + current->cred->fsgid = savegid; } static void diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index cd25d91895a..e67cfaea086 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -186,9 +186,9 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp) * access control settings being in effect, we cannot * fix that case easily. */ - current->cap_effective = - cap_raise_nfsd_set(current->cap_effective, - current->cap_permitted); + current->cred->cap_effective = + cap_raise_nfsd_set(current->cred->cap_effective, + current->cred->cap_permitted); } else { error = nfsd_setuser_and_check_port(rqstp, exp); if (error) diff --git a/fs/open.c b/fs/open.c index 500cc0c5476..b1238e195e7 100644 --- a/fs/open.c +++ b/fs/open.c @@ -425,6 +425,7 @@ out: */ asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode) { + struct cred *cred = current->cred; struct path path; struct inode *inode; int old_fsuid, old_fsgid; @@ -434,18 +435,18 @@ asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode) if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */ return -EINVAL; - old_fsuid = current->fsuid; - old_fsgid = current->fsgid; + old_fsuid = cred->fsuid; + old_fsgid = cred->fsgid; - current->fsuid = current->uid; - current->fsgid = current->gid; + cred->fsuid = cred->uid; + cred->fsgid = cred->gid; if (!issecure(SECURE_NO_SETUID_FIXUP)) { /* Clear the capabilities if we switch to a non-root user */ - if (current->uid) + if (current->cred->uid) old_cap = cap_set_effective(__cap_empty_set); else - old_cap = cap_set_effective(current->cap_permitted); + old_cap = cap_set_effective(cred->cap_permitted); } res = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path); @@ -484,8 +485,8 @@ asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode) out_path_release: path_put(&path); out: - current->fsuid = old_fsuid; - current->fsgid = old_fsgid; + cred->fsuid = old_fsuid; + cred->fsgid = old_fsgid; if (!issecure(SECURE_NO_SETUID_FIXUP)) cap_set_effective(old_cap); diff --git a/fs/proc/array.c b/fs/proc/array.c index 6af7fba7abb..62fe9b2009b 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -182,8 +182,8 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, task_tgid_nr_ns(p, ns), pid_nr_ns(pid, ns), ppid, tpid, - p->uid, p->euid, p->suid, p->fsuid, - p->gid, p->egid, p->sgid, p->fsgid); + p->cred->uid, p->cred->euid, p->cred->suid, p->cred->fsuid, + p->cred->gid, p->cred->egid, p->cred->sgid, p->cred->fsgid); task_lock(p); if (p->files) @@ -194,7 +194,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, fdt ? fdt->max_fds : 0); rcu_read_unlock(); - group_info = p->group_info; + group_info = p->cred->group_info; get_group_info(group_info); task_unlock(p); @@ -262,7 +262,7 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p) blocked = p->blocked; collect_sigign_sigcatch(p, &ignored, &caught); num_threads = atomic_read(&p->signal->count); - qsize = atomic_read(&p->user->sigpending); + qsize = atomic_read(&p->cred->user->sigpending); qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur; unlock_task_sighand(p, &flags); } @@ -293,10 +293,12 @@ static void render_cap_t(struct seq_file *m, const char *header, static inline void task_cap(struct seq_file *m, struct task_struct *p) { - render_cap_t(m, "CapInh:\t", &p->cap_inheritable); - render_cap_t(m, "CapPrm:\t", &p->cap_permitted); - render_cap_t(m, "CapEff:\t", &p->cap_effective); - render_cap_t(m, "CapBnd:\t", &p->cap_bset); + struct cred *cred = p->cred; + + render_cap_t(m, "CapInh:\t", &cred->cap_inheritable); + render_cap_t(m, "CapPrm:\t", &cred->cap_permitted); + render_cap_t(m, "CapEff:\t", &cred->cap_effective); + render_cap_t(m, "CapBnd:\t", &cred->cap_bset); } static inline void task_context_switch_counts(struct seq_file *m, diff --git a/fs/proc/base.c b/fs/proc/base.c index 486cf3fe713..6862b360c36 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1428,8 +1428,8 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st inode->i_uid = 0; inode->i_gid = 0; if (task_dumpable(task)) { - inode->i_uid = task->euid; - inode->i_gid = task->egid; + inode->i_uid = task->cred->euid; + inode->i_gid = task->cred->egid; } security_task_to_inode(task, inode); @@ -1454,8 +1454,8 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat if (task) { if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || task_dumpable(task)) { - stat->uid = task->euid; - stat->gid = task->egid; + stat->uid = task->cred->euid; + stat->gid = task->cred->egid; } } rcu_read_unlock(); @@ -1486,8 +1486,8 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd) if (task) { if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || task_dumpable(task)) { - inode->i_uid = task->euid; - inode->i_gid = task->egid; + inode->i_uid = task->cred->euid; + inode->i_gid = task->cred->egid; } else { inode->i_uid = 0; inode->i_gid = 0; @@ -1658,8 +1658,8 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) rcu_read_unlock(); put_files_struct(files); if (task_dumpable(task)) { - inode->i_uid = task->euid; - inode->i_gid = task->egid; + inode->i_uid = task->cred->euid; + inode->i_gid = task->cred->egid; } else { inode->i_uid = 0; inode->i_gid = 0; diff --git a/fs/xfs/linux-2.6/xfs_cred.h b/fs/xfs/linux-2.6/xfs_cred.h index 293043a5573..8c022cd0ad6 100644 --- a/fs/xfs/linux-2.6/xfs_cred.h +++ b/fs/xfs/linux-2.6/xfs_cred.h @@ -23,11 +23,9 @@ /* * Credentials */ -typedef struct cred { - /* EMPTY */ -} cred_t; +typedef const struct cred cred_t; -extern struct cred *sys_cred; +extern cred_t *sys_cred; /* this is a hack.. (assumes sys_cred is the only cred_t in the system) */ static inline int capable_cred(cred_t *cr, int cid) diff --git a/fs/xfs/linux-2.6/xfs_globals.h b/fs/xfs/linux-2.6/xfs_globals.h index 2770b0085ee..6eda8a3eb6f 100644 --- a/fs/xfs/linux-2.6/xfs_globals.h +++ b/fs/xfs/linux-2.6/xfs_globals.h @@ -19,6 +19,6 @@ #define __XFS_GLOBALS_H__ extern uint64_t xfs_panic_mask; /* set to cause more panics */ -extern struct cred *sys_cred; +extern cred_t *sys_cred; #endif /* __XFS_GLOBALS_H__ */ diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 1420c49674d..6be310d41da 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -497,7 +497,7 @@ int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, xfs_inode_t **, xfs_daddr_t, uint); int xfs_iread_extents(struct xfs_trans *, xfs_inode_t *, int); int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t, - xfs_nlink_t, xfs_dev_t, struct cred *, xfs_prid_t, + xfs_nlink_t, xfs_dev_t, cred_t *, xfs_prid_t, int, struct xfs_buf **, boolean_t *, xfs_inode_t **); void xfs_dinode_from_disk(struct xfs_icdinode *, struct xfs_dinode_core *); diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index e932a96bec5..7b0c2ab8833 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h @@ -16,7 +16,7 @@ struct xfs_iomap; int xfs_open(struct xfs_inode *ip); int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags, - struct cred *credp); + cred_t *credp); #define XFS_ATTR_DMI 0x01 /* invocation from a DMI function */ #define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */ #define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */ @@ -28,24 +28,24 @@ int xfs_inactive(struct xfs_inode *ip); int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name, struct xfs_inode **ipp, struct xfs_name *ci_name); int xfs_create(struct xfs_inode *dp, struct xfs_name *name, mode_t mode, - xfs_dev_t rdev, struct xfs_inode **ipp, struct cred *credp); + xfs_dev_t rdev, struct xfs_inode **ipp, cred_t *credp); int xfs_remove(struct xfs_inode *dp, struct xfs_name *name, struct xfs_inode *ip); int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip, struct xfs_name *target_name); int xfs_mkdir(struct xfs_inode *dp, struct xfs_name *dir_name, - mode_t mode, struct xfs_inode **ipp, struct cred *credp); + mode_t mode, struct xfs_inode **ipp, cred_t *credp); int xfs_readdir(struct xfs_inode *dp, void *dirent, size_t bufsize, xfs_off_t *offset, filldir_t filldir); int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name, const char *target_path, mode_t mode, struct xfs_inode **ipp, - struct cred *credp); + cred_t *credp); int xfs_inode_flush(struct xfs_inode *ip, int flags); int xfs_set_dmattrs(struct xfs_inode *ip, u_int evmask, u_int16_t state); int xfs_reclaim(struct xfs_inode *ip); int xfs_change_file_space(struct xfs_inode *ip, int cmd, xfs_flock64_t *bf, xfs_off_t offset, - struct cred *credp, int attr_flags); + cred_t *credp, int attr_flags); int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name, struct xfs_inode *src_ip, struct xfs_inode *target_dp, struct xfs_name *target_name, struct xfs_inode *target_ip); diff --git a/include/linux/cred.h b/include/linux/cred.h index b69222cc1fd..3e65587a72e 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -12,39 +12,150 @@ #ifndef _LINUX_CRED_H #define _LINUX_CRED_H -#define get_current_user() (get_uid(current->user)) - -#define task_uid(task) ((task)->uid) -#define task_gid(task) ((task)->gid) -#define task_euid(task) ((task)->euid) -#define task_egid(task) ((task)->egid) - -#define current_uid() (current->uid) -#define current_gid() (current->gid) -#define current_euid() (current->euid) -#define current_egid() (current->egid) -#define current_suid() (current->suid) -#define current_sgid() (current->sgid) -#define current_fsuid() (current->fsuid) -#define current_fsgid() (current->fsgid) -#define current_cap() (current->cap_effective) +#include +#include +#include + +struct user_struct; +struct cred; + +/* + * COW Supplementary groups list + */ +#define NGROUPS_SMALL 32 +#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t))) + +struct group_info { + atomic_t usage; + int ngroups; + int nblocks; + gid_t small_block[NGROUPS_SMALL]; + gid_t *blocks[0]; +}; + +/** + * get_group_info - Get a reference to a group info structure + * @group_info: The group info to reference + * + * This must be called with the owning task locked (via task_lock()) when task + * != current. The reason being that the vast majority of callers are looking + * at current->group_info, which can not be changed except by the current task. + * Changing current->group_info requires the task lock, too. + */ +#define get_group_info(group_info) \ +do { \ + atomic_inc(&(group_info)->usage); \ +} while (0) + +/** + * put_group_info - Release a reference to a group info structure + * @group_info: The group info to release + */ +#define put_group_info(group_info) \ +do { \ + if (atomic_dec_and_test(&(group_info)->usage)) \ + groups_free(group_info); \ +} while (0) + +extern struct group_info *groups_alloc(int); +extern void groups_free(struct group_info *); +extern int set_current_groups(struct group_info *); +extern int set_groups(struct cred *, struct group_info *); +extern int groups_search(struct group_info *, gid_t); + +/* access the groups "array" with this macro */ +#define GROUP_AT(gi, i) \ + ((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK]) + +extern int in_group_p(gid_t); +extern int in_egroup_p(gid_t); + +/* + * The security context of a task + * + * The parts of the context break down into two categories: + * + * (1) The objective context of a task. These parts are used when some other + * task is attempting to affect this one. + * + * (2) The subjective context. These details are used when the task is acting + * upon another object, be that a file, a task, a key or whatever. + * + * Note that some members of this structure belong to both categories - the + * LSM security pointer for instance. + * + * A task has two security pointers. task->real_cred points to the objective + * context that defines that task's actual details. The objective part of this + * context is used whenever that task is acted upon. + * + * task->cred points to the subjective context that defines the details of how + * that task is going to act upon another object. This may be overridden + * temporarily to point to another security context, but normally points to the + * same context as task->real_cred. + */ +struct cred { + atomic_t usage; + uid_t uid; /* real UID of the task */ + gid_t gid; /* real GID of the task */ + uid_t suid; /* saved UID of the task */ + gid_t sgid; /* saved GID of the task */ + uid_t euid; /* effective UID of the task */ + gid_t egid; /* effective GID of the task */ + uid_t fsuid; /* UID for VFS ops */ + gid_t fsgid; /* GID for VFS ops */ + unsigned securebits; /* SUID-less security management */ + kernel_cap_t cap_inheritable; /* caps our children can inherit */ + kernel_cap_t cap_permitted; /* caps we're permitted */ + kernel_cap_t cap_effective; /* caps we can actually use */ + kernel_cap_t cap_bset; /* capability bounding set */ +#ifdef CONFIG_KEYS + unsigned char jit_keyring; /* default keyring to attach requested + * keys to */ + struct key *thread_keyring; /* keyring private to this thread */ + struct key *request_key_auth; /* assumed request_key authority */ +#endif +#ifdef CONFIG_SECURITY + void *security; /* subjective LSM security */ +#endif + struct user_struct *user; /* real user ID subscription */ + struct group_info *group_info; /* supplementary groups for euid/fsgid */ + struct rcu_head rcu; /* RCU deletion hook */ + spinlock_t lock; /* lock for pointer changes */ +}; + +#define get_current_user() (get_uid(current->cred->user)) + +#define task_uid(task) ((task)->cred->uid) +#define task_gid(task) ((task)->cred->gid) +#define task_euid(task) ((task)->cred->euid) +#define task_egid(task) ((task)->cred->egid) + +#define current_uid() (current->cred->uid) +#define current_gid() (current->cred->gid) +#define current_euid() (current->cred->euid) +#define current_egid() (current->cred->egid) +#define current_suid() (current->cred->suid) +#define current_sgid() (current->cred->sgid) +#define current_fsuid() (current->cred->fsuid) +#define current_fsgid() (current->cred->fsgid) +#define current_cap() (current->cred->cap_effective) #define current_uid_gid(_uid, _gid) \ do { \ - *(_uid) = current->uid; \ - *(_gid) = current->gid; \ + *(_uid) = current->cred->uid; \ + *(_gid) = current->cred->gid; \ } while(0) #define current_euid_egid(_uid, _gid) \ do { \ - *(_uid) = current->euid; \ - *(_gid) = current->egid; \ + *(_uid) = current->cred->euid; \ + *(_gid) = current->cred->egid; \ } while(0) #define current_fsuid_fsgid(_uid, _gid) \ do { \ - *(_uid) = current->fsuid; \ - *(_gid) = current->fsgid; \ + *(_uid) = current->cred->fsuid; \ + *(_gid) = current->cred->fsgid; \ } while(0) #endif /* _LINUX_CRED_H */ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 23fd8909b9e..9de41ccd67b 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -113,6 +113,21 @@ extern struct group_info init_groups; # define CAP_INIT_BSET CAP_INIT_EFF_SET #endif +extern struct cred init_cred; + +#define INIT_CRED(p) \ +{ \ + .usage = ATOMIC_INIT(3), \ + .securebits = SECUREBITS_DEFAULT, \ + .cap_inheritable = CAP_INIT_INH_SET, \ + .cap_permitted = CAP_FULL_SET, \ + .cap_effective = CAP_INIT_EFF_SET, \ + .cap_bset = CAP_INIT_BSET, \ + .user = INIT_USER, \ + .group_info = &init_groups, \ + .lock = __SPIN_LOCK_UNLOCKED(p.lock), \ +} + /* * INIT_TASK is used to set up the first task table, touch at * your own risk!. Base=0, limit=0x1fffff (=2MB) @@ -147,13 +162,8 @@ extern struct group_info init_groups; .children = LIST_HEAD_INIT(tsk.children), \ .sibling = LIST_HEAD_INIT(tsk.sibling), \ .group_leader = &tsk, \ - .group_info = &init_groups, \ - .cap_effective = CAP_INIT_EFF_SET, \ - .cap_inheritable = CAP_INIT_INH_SET, \ - .cap_permitted = CAP_FULL_SET, \ - .cap_bset = CAP_INIT_BSET, \ - .securebits = SECUREBITS_DEFAULT, \ - .user = INIT_USER, \ + .__temp_cred = INIT_CRED(tsk.__temp_cred), \ + .cred = &tsk.__temp_cred, \ .comm = "swapper", \ .thread = INIT_THREAD, \ .fs = &init_fs, \ diff --git a/include/linux/sched.h b/include/linux/sched.h index b483f39a711..c8b92502354 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -660,6 +660,7 @@ extern struct user_struct *find_user(uid_t); extern struct user_struct root_user; #define INIT_USER (&root_user) + struct backing_dev_info; struct reclaim_state; @@ -883,38 +884,7 @@ partition_sched_domains(int ndoms_new, cpumask_t *doms_new, #endif /* !CONFIG_SMP */ struct io_context; /* See blkdev.h */ -#define NGROUPS_SMALL 32 -#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t))) -struct group_info { - int ngroups; - atomic_t usage; - gid_t small_block[NGROUPS_SMALL]; - int nblocks; - gid_t *blocks[0]; -}; - -/* - * get_group_info() must be called with the owning task locked (via task_lock()) - * when task != current. The reason being that the vast majority of callers are - * looking at current->group_info, which can not be changed except by the - * current task. Changing current->group_info requires the task lock, too. - */ -#define get_group_info(group_info) do { \ - atomic_inc(&(group_info)->usage); \ -} while (0) -#define put_group_info(group_info) do { \ - if (atomic_dec_and_test(&(group_info)->usage)) \ - groups_free(group_info); \ -} while (0) - -extern struct group_info *groups_alloc(int gidsetsize); -extern void groups_free(struct group_info *group_info); -extern int set_current_groups(struct group_info *group_info); -extern int groups_search(struct group_info *group_info, gid_t grp); -/* access the groups "array" with this macro */ -#define GROUP_AT(gi, i) \ - ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK extern void prefetch_stack(struct task_struct *t); @@ -1181,17 +1151,9 @@ struct task_struct { struct list_head cpu_timers[3]; /* process credentials */ - uid_t uid,euid,suid,fsuid; - gid_t gid,egid,sgid,fsgid; - struct group_info *group_info; - kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; - struct user_struct *user; - unsigned securebits; -#ifdef CONFIG_KEYS - unsigned char jit_keyring; /* default keyring to attach requested keys to */ - struct key *request_key_auth; /* assumed request_key authority */ - struct key *thread_keyring; /* keyring private to this thread */ -#endif + struct cred __temp_cred __deprecated; /* temporary credentials to be removed */ + struct cred *cred; /* actual/objective task credentials */ + char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock it with task_lock()) @@ -1228,9 +1190,6 @@ struct task_struct { int (*notifier)(void *priv); void *notifier_data; sigset_t *notifier_mask; -#ifdef CONFIG_SECURITY - void *security; -#endif struct audit_context *audit_context; #ifdef CONFIG_AUDITSYSCALL uid_t loginuid; @@ -1787,9 +1746,6 @@ extern void wake_up_new_task(struct task_struct *tsk, extern void sched_fork(struct task_struct *p, int clone_flags); extern void sched_dead(struct task_struct *p); -extern int in_group_p(gid_t); -extern int in_egroup_p(gid_t); - extern void proc_caches_init(void); extern void flush_signals(struct task_struct *); extern void ignore_signals(struct task_struct *); diff --git a/include/linux/securebits.h b/include/linux/securebits.h index 92f09bdf117..6d389491bfa 100644 --- a/include/linux/securebits.h +++ b/include/linux/securebits.h @@ -32,7 +32,7 @@ setting is locked or not. A setting which is locked cannot be changed from user-level. */ #define issecure_mask(X) (1 << (X)) -#define issecure(X) (issecure_mask(X) & current->securebits) +#define issecure(X) (issecure_mask(X) & current->cred->securebits) #define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \ issecure_mask(SECURE_NO_SETUID_FIXUP) | \ diff --git a/ipc/mqueue.c b/ipc/mqueue.c index abda5991d7e..e1885b494ba 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -126,7 +126,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb, int mode, if (S_ISREG(mode)) { struct mqueue_inode_info *info; struct task_struct *p = current; - struct user_struct *u = p->user; + struct user_struct *u = p->cred->user; unsigned long mq_bytes, mq_msg_tblsz; inode->i_fop = &mqueue_file_operations; diff --git a/ipc/shm.c b/ipc/shm.c index 0c3debbe32d..264a9d33c5d 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -366,7 +366,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) if (shmflg & SHM_HUGETLB) { /* hugetlb_file_setup takes care of mlock user accounting */ file = hugetlb_file_setup(name, size); - shp->mlock_user = current->user; + shp->mlock_user = current->cred->user; } else { int acctflag = VM_ACCOUNT; /* @@ -767,7 +767,7 @@ asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf) goto out_unlock; if(cmd==SHM_LOCK) { - struct user_struct * user = current->user; + struct user_struct *user = current->cred->user; if (!is_file_hugepages(shp->shm_file)) { err = shmem_lock(shp->shm_file, 1, user); if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){ diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 9c7e47ae457..2febf5165fa 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -447,6 +447,7 @@ static int audit_filter_rules(struct task_struct *tsk, struct audit_names *name, enum audit_state *state) { + struct cred *cred = tsk->cred; int i, j, need_sid = 1; u32 sid; @@ -466,28 +467,28 @@ static int audit_filter_rules(struct task_struct *tsk, } break; case AUDIT_UID: - result = audit_comparator(tsk->uid, f->op, f->val); + result = audit_comparator(cred->uid, f->op, f->val); break; case AUDIT_EUID: - result = audit_comparator(tsk->euid, f->op, f->val); + result = audit_comparator(cred->euid, f->op, f->val); break; case AUDIT_SUID: - result = audit_comparator(tsk->suid, f->op, f->val); + result = audit_comparator(cred->suid, f->op, f->val); break; case AUDIT_FSUID: - result = audit_comparator(tsk->fsuid, f->op, f->val); + result = audit_comparator(cred->fsuid, f->op, f->val); break; case AUDIT_GID: - result = audit_comparator(tsk->gid, f->op, f->val); + result = audit_comparator(cred->gid, f->op, f->val); break; case AUDIT_EGID: - result = audit_comparator(tsk->egid, f->op, f->val); + result = audit_comparator(cred->egid, f->op, f->val); break; case AUDIT_SGID: - result = audit_comparator(tsk->sgid, f->op, f->val); + result = audit_comparator(cred->sgid, f->op, f->val); break; case AUDIT_FSGID: - result = audit_comparator(tsk->fsgid, f->op, f->val); + result = audit_comparator(cred->fsgid, f->op, f->val); break; case AUDIT_PERS: result = audit_comparator(tsk->personality, f->op, f->val); @@ -1228,6 +1229,7 @@ static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name) static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) { + struct cred *cred = tsk->cred; int i, call_panic = 0; struct audit_buffer *ab; struct audit_aux_data *aux; @@ -1237,14 +1239,14 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts context->pid = tsk->pid; if (!context->ppid) context->ppid = sys_getppid(); - context->uid = tsk->uid; - context->gid = tsk->gid; - context->euid = tsk->euid; - context->suid = tsk->suid; - context->fsuid = tsk->fsuid; - context->egid = tsk->egid; - context->sgid = tsk->sgid; - context->fsgid = tsk->fsgid; + context->uid = cred->uid; + context->gid = cred->gid; + context->euid = cred->euid; + context->suid = cred->suid; + context->fsuid = cred->fsuid; + context->egid = cred->egid; + context->sgid = cred->sgid; + context->fsgid = cred->fsgid; context->personality = tsk->personality; ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); @@ -2086,7 +2088,7 @@ int audit_set_loginuid(struct task_struct *task, uid_t loginuid) audit_log_format(ab, "login pid=%d uid=%u " "old auid=%u new auid=%u" " old ses=%u new ses=%u", - task->pid, task->uid, + task->pid, task->cred->uid, task->loginuid, loginuid, task->sessionid, sessionid); audit_log_end(ab); @@ -2469,7 +2471,7 @@ void __audit_ptrace(struct task_struct *t) context->target_pid = t->pid; context->target_auid = audit_get_loginuid(t); - context->target_uid = t->uid; + context->target_uid = t->cred->uid; context->target_sessionid = audit_get_sessionid(t); security_task_getsecid(t, &context->target_sid); memcpy(context->target_comm, t->comm, TASK_COMM_LEN); @@ -2495,7 +2497,7 @@ int __audit_signal_info(int sig, struct task_struct *t) if (tsk->loginuid != -1) audit_sig_uid = tsk->loginuid; else - audit_sig_uid = tsk->uid; + audit_sig_uid = tsk->cred->uid; security_task_getsecid(tsk, &audit_sig_sid); } if (!audit_signals || audit_dummy_context()) @@ -2507,7 +2509,7 @@ int __audit_signal_info(int sig, struct task_struct *t) if (!ctx->target_pid) { ctx->target_pid = t->tgid; ctx->target_auid = audit_get_loginuid(t); - ctx->target_uid = t->uid; + ctx->target_uid = t->cred->uid; ctx->target_sessionid = audit_get_sessionid(t); security_task_getsecid(t, &ctx->target_sid); memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN); @@ -2528,7 +2530,7 @@ int __audit_signal_info(int sig, struct task_struct *t) axp->target_pid[axp->pid_count] = t->tgid; axp->target_auid[axp->pid_count] = audit_get_loginuid(t); - axp->target_uid[axp->pid_count] = t->uid; + axp->target_uid[axp->pid_count] = t->cred->uid; axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); security_task_getsecid(t, &axp->target_sid[axp->pid_count]); memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN); @@ -2575,12 +2577,12 @@ void __audit_log_bprm_fcaps(struct linux_binprm *bprm, kernel_cap_t *pP, kernel_ ax->fcap_ver = (vcaps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT; ax->old_pcap.permitted = *pP; - ax->old_pcap.inheritable = current->cap_inheritable; + ax->old_pcap.inheritable = current->cred->cap_inheritable; ax->old_pcap.effective = *pE; - ax->new_pcap.permitted = current->cap_permitted; - ax->new_pcap.inheritable = current->cap_inheritable; - ax->new_pcap.effective = current->cap_effective; + ax->new_pcap.permitted = current->cred->cap_permitted; + ax->new_pcap.inheritable = current->cred->cap_inheritable; + ax->new_pcap.effective = current->cred->cap_effective; } /** diff --git a/kernel/capability.c b/kernel/capability.c index 58b00519624..a404b980b1b 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -171,8 +171,8 @@ kernel_cap_t cap_set_effective(const kernel_cap_t pE_new) spin_lock(&task_capability_lock); - pE_old = current->cap_effective; - current->cap_effective = pE_new; + pE_old = current->cred->cap_effective; + current->cred->cap_effective = pE_new; spin_unlock(&task_capability_lock); diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 78f9b310c4f..e210526e640 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1293,7 +1293,9 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) rcu_read_unlock(); euid = current_euid(); - if (euid && euid != tsk->uid && euid != tsk->suid) { + if (euid && + euid != tsk->cred->uid && + euid != tsk->cred->suid) { put_task_struct(tsk); return -EACCES; } diff --git a/kernel/exit.c b/kernel/exit.c index 80137a5d946..e0f6e1892fb 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -160,7 +160,7 @@ void release_task(struct task_struct * p) int zap_leader; repeat: tracehook_prepare_release_task(p); - atomic_dec(&p->user->processes); + atomic_dec(&p->cred->user->processes); proc_flush_task(p); write_lock_irq(&tasklist_lock); tracehook_finish_release_task(p); @@ -1272,7 +1272,7 @@ static int wait_task_zombie(struct task_struct *p, int options, return 0; if (unlikely(options & WNOWAIT)) { - uid_t uid = p->uid; + uid_t uid = p->cred->uid; int exit_code = p->exit_code; int why, status; @@ -1393,7 +1393,7 @@ static int wait_task_zombie(struct task_struct *p, int options, if (!retval && infop) retval = put_user(pid, &infop->si_pid); if (!retval && infop) - retval = put_user(p->uid, &infop->si_uid); + retval = put_user(p->cred->uid, &infop->si_uid); if (!retval) retval = pid; @@ -1458,7 +1458,7 @@ static int wait_task_stopped(int ptrace, struct task_struct *p, if (!unlikely(options & WNOWAIT)) p->exit_code = 0; - uid = p->uid; + uid = p->cred->uid; unlock_sig: spin_unlock_irq(&p->sighand->siglock); if (!exit_code) @@ -1535,7 +1535,7 @@ static int wait_task_continued(struct task_struct *p, int options, spin_unlock_irq(&p->sighand->siglock); pid = task_pid_vnr(p); - uid = p->uid; + uid = p->cred->uid; get_task_struct(p); read_unlock(&tasklist_lock); diff --git a/kernel/fork.c b/kernel/fork.c index f6083561dfe..81fdc773390 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -147,8 +147,8 @@ void __put_task_struct(struct task_struct *tsk) WARN_ON(tsk == current); security_task_free(tsk); - free_uid(tsk->user); - put_group_info(tsk->group_info); + free_uid(tsk->__temp_cred.user); + put_group_info(tsk->__temp_cred.group_info); delayacct_tsk_free(tsk); if (!profile_handoff_task(tsk)) @@ -969,17 +969,18 @@ static struct task_struct *copy_process(unsigned long clone_flags, DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif + p->cred = &p->__temp_cred; retval = -EAGAIN; - if (atomic_read(&p->user->processes) >= + if (atomic_read(&p->cred->user->processes) >= p->signal->rlim[RLIMIT_NPROC].rlim_cur) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && - p->user != current->nsproxy->user_ns->root_user) + p->cred->user != current->nsproxy->user_ns->root_user) goto bad_fork_free; } - atomic_inc(&p->user->__count); - atomic_inc(&p->user->processes); - get_group_info(p->group_info); + atomic_inc(&p->cred->user->__count); + atomic_inc(&p->cred->user->processes); + get_group_info(p->cred->group_info); /* * If multiple threads are within copy_process(), then this check @@ -1035,9 +1036,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->real_start_time = p->start_time; monotonic_to_bootbased(&p->real_start_time); #ifdef CONFIG_SECURITY - p->security = NULL; + p->cred->security = NULL; #endif - p->cap_bset = current->cap_bset; p->io_context = NULL; p->audit_context = NULL; cgroup_fork(p); @@ -1298,9 +1298,9 @@ bad_fork_cleanup_cgroup: bad_fork_cleanup_put_domain: module_put(task_thread_info(p)->exec_domain->module); bad_fork_cleanup_count: - put_group_info(p->group_info); - atomic_dec(&p->user->processes); - free_uid(p->user); + put_group_info(p->cred->group_info); + atomic_dec(&p->cred->user->processes); + free_uid(p->cred->user); bad_fork_free: free_task(p); fork_out: diff --git a/kernel/futex.c b/kernel/futex.c index e06962132aa..28421d8210b 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -443,7 +443,8 @@ static struct task_struct * futex_find_get_task(pid_t pid) rcu_read_lock(); p = find_task_by_vpid(pid); - if (!p || (euid != p->euid && euid != p->uid)) + if (!p || (euid != p->cred->euid && + euid != p->cred->uid)) p = ERR_PTR(-ESRCH); else get_task_struct(p); @@ -1846,7 +1847,8 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, if (!p) goto err_unlock; ret = -EPERM; - if (euid != p->euid && euid != p->uid && + if (euid != p->cred->euid && + euid != p->cred->uid && !capable(CAP_SYS_PTRACE)) goto err_unlock; head = p->robust_list; diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 3254d4e41e8..2c3fd5ed34f 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -151,8 +151,9 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, if (!p) goto err_unlock; ret = -EPERM; - if (euid != p->euid && euid != p->uid && - !capable(CAP_SYS_PTRACE)) + if (euid != p->cred->euid && + euid != p->cred->uid && + !capable(CAP_SYS_PTRACE)) goto err_unlock; head = p->compat_robust_list; read_unlock(&tasklist_lock); diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 937f6b5b200..49849d12dd1 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -115,6 +115,8 @@ int ptrace_check_attach(struct task_struct *child, int kill) int __ptrace_may_access(struct task_struct *task, unsigned int mode) { + struct cred *cred = current->cred, *tcred = task->cred; + /* May we inspect the given task? * This check is used both for attaching with ptrace * and for allowing access to sensitive information in /proc. @@ -123,19 +125,18 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) * because setting up the necessary parent/child relationship * or halting the specified task is impossible. */ - uid_t uid; - gid_t gid; + uid_t uid = cred->uid; + gid_t gid = cred->gid; int dumpable = 0; /* Don't let security modules deny introspection */ if (task == current) return 0; - current_uid_gid(&uid, &gid); - if ((uid != task->euid || - uid != task->suid || - uid != task->uid || - gid != task->egid || - gid != task->sgid || - gid != task->gid) && !capable(CAP_SYS_PTRACE)) + if ((uid != tcred->euid || + uid != tcred->suid || + uid != tcred->uid || + gid != tcred->egid || + gid != tcred->sgid || + gid != tcred->gid) && !capable(CAP_SYS_PTRACE)) return -EPERM; smp_rmb(); if (task->mm) diff --git a/kernel/sched.c b/kernel/sched.c index c3b8b1fcde0..733c59e645a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -345,7 +345,7 @@ static inline struct task_group *task_group(struct task_struct *p) struct task_group *tg; #ifdef CONFIG_USER_SCHED - tg = p->user->tg; + tg = p->cred->user->tg; #elif defined(CONFIG_CGROUP_SCHED) tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), struct task_group, css); @@ -5182,8 +5182,8 @@ recheck: /* can't change other user's priorities */ euid = current_euid(); - if (euid != p->euid && - euid != p->uid) + if (euid != p->cred->euid && + euid != p->cred->uid) return -EPERM; } @@ -5417,7 +5417,9 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) euid = current_euid(); retval = -EPERM; - if (euid != p->euid && euid != p->uid && !capable(CAP_SYS_NICE)) + if (euid != p->cred->euid && + euid != p->cred->uid && + !capable(CAP_SYS_NICE)) goto out_unlock; retval = security_task_setscheduler(p, 0, NULL); diff --git a/kernel/signal.c b/kernel/signal.c index 167b535fe1a..80e8a6489f9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -187,7 +187,7 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, * In order to avoid problems with "switch_user()", we want to make * sure that the compiler doesn't re-load "t->user" */ - user = t->user; + user = t->cred->user; barrier(); atomic_inc(&user->sigpending); if (override_rlimit || @@ -582,8 +582,8 @@ static int check_kill_permission(int sig, struct siginfo *info, uid = current_uid(); euid = current_euid(); - if ((euid ^ t->suid) && (euid ^ t->uid) && - (uid ^ t->suid) && (uid ^ t->uid) && + if ((euid ^ t->cred->suid) && (euid ^ t->cred->uid) && + (uid ^ t->cred->suid) && (uid ^ t->cred->uid) && !capable(CAP_KILL)) { switch (sig) { case SIGCONT: @@ -1100,8 +1100,8 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, goto out_unlock; } if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) - && (euid != p->suid) && (euid != p->uid) - && (uid != p->suid) && (uid != p->uid)) { + && (euid != p->cred->suid) && (euid != p->cred->uid) + && (uid != p->cred->suid) && (uid != p->cred->uid)) { ret = -EPERM; goto out_unlock; } @@ -1374,7 +1374,7 @@ int do_notify_parent(struct task_struct *tsk, int sig) info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); rcu_read_unlock(); - info.si_uid = tsk->uid; + info.si_uid = tsk->cred->uid; thread_group_cputime(tsk, &cputime); info.si_utime = cputime_to_jiffies(cputime.utime); @@ -1445,7 +1445,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); rcu_read_unlock(); - info.si_uid = tsk->uid; + info.si_uid = tsk->cred->uid; info.si_utime = cputime_to_clock_t(tsk->utime); info.si_stime = cputime_to_clock_t(tsk->stime); @@ -1713,7 +1713,7 @@ static int ptrace_signal(int signr, siginfo_t *info, info->si_errno = 0; info->si_code = SI_USER; info->si_pid = task_pid_vnr(current->parent); - info->si_uid = current->parent->uid; + info->si_uid = current->parent->cred->uid; } /* If the (new) signal is now blocked, requeue it. */ diff --git a/kernel/sys.c b/kernel/sys.c index ed5c29c748a..5d81f07c015 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -117,7 +117,9 @@ static int set_one_prio(struct task_struct *p, int niceval, int error) uid_t euid = current_euid(); int no_nice; - if (p->uid != euid && p->euid != euid && !capable(CAP_SYS_NICE)) { + if (p->cred->uid != euid && + p->cred->euid != euid && + !capable(CAP_SYS_NICE)) { error = -EPERM; goto out; } @@ -174,7 +176,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); break; case PRIO_USER: - user = current->user; + user = current->cred->user; if (!who) who = current_uid(); else @@ -182,7 +184,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) goto out_unlock; /* No processes for this user */ do_each_thread(g, p) - if (p->uid == who) + if (p->cred->uid == who) error = set_one_prio(p, niceval, error); while_each_thread(g, p); if (who != current_uid()) @@ -236,7 +238,7 @@ asmlinkage long sys_getpriority(int which, int who) } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); break; case PRIO_USER: - user = current->user; + user = current->cred->user; if (!who) who = current_uid(); else @@ -244,7 +246,7 @@ asmlinkage long sys_getpriority(int which, int who) goto out_unlock; /* No processes for this user */ do_each_thread(g, p) - if (p->uid == who) { + if (p->cred->uid == who) { niceval = 20 - task_nice(p); if (niceval > retval) retval = niceval; @@ -472,8 +474,9 @@ void ctrl_alt_del(void) */ asmlinkage long sys_setregid(gid_t rgid, gid_t egid) { - int old_rgid = current->gid; - int old_egid = current->egid; + struct cred *cred = current->cred; + int old_rgid = cred->gid; + int old_egid = cred->egid; int new_rgid = old_rgid; int new_egid = old_egid; int retval; @@ -484,7 +487,7 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid) if (rgid != (gid_t) -1) { if ((old_rgid == rgid) || - (current->egid==rgid) || + (cred->egid == rgid) || capable(CAP_SETGID)) new_rgid = rgid; else @@ -492,8 +495,8 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid) } if (egid != (gid_t) -1) { if ((old_rgid == egid) || - (current->egid == egid) || - (current->sgid == egid) || + (cred->egid == egid) || + (cred->sgid == egid) || capable(CAP_SETGID)) new_egid = egid; else @@ -505,10 +508,10 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid) } if (rgid != (gid_t) -1 || (egid != (gid_t) -1 && egid != old_rgid)) - current->sgid = new_egid; - current->fsgid = new_egid; - current->egid = new_egid; - current->gid = new_rgid; + cred->sgid = new_egid; + cred->fsgid = new_egid; + cred->egid = new_egid; + cred->gid = new_rgid; key_fsgid_changed(current); proc_id_connector(current, PROC_EVENT_GID); return 0; @@ -521,7 +524,8 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid) */ asmlinkage long sys_setgid(gid_t gid) { - int old_egid = current->egid; + struct cred *cred = current->cred; + int old_egid = cred->egid; int retval; retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); @@ -533,13 +537,13 @@ asmlinkage long sys_setgid(gid_t gid) set_dumpable(current->mm, suid_dumpable); smp_wmb(); } - current->gid = current->egid = current->sgid = current->fsgid = gid; - } else if ((gid == current->gid) || (gid == current->sgid)) { + cred->gid = cred->egid = cred->sgid = cred->fsgid = gid; + } else if ((gid == cred->gid) || (gid == cred->sgid)) { if (old_egid != gid) { set_dumpable(current->mm, suid_dumpable); smp_wmb(); } - current->egid = current->fsgid = gid; + cred->egid = cred->fsgid = gid; } else return -EPERM; @@ -570,7 +574,7 @@ static int set_user(uid_t new_ruid, int dumpclear) set_dumpable(current->mm, suid_dumpable); smp_wmb(); } - current->uid = new_ruid; + current->cred->uid = new_ruid; return 0; } @@ -591,6 +595,7 @@ static int set_user(uid_t new_ruid, int dumpclear) */ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) { + struct cred *cred = current->cred; int old_ruid, old_euid, old_suid, new_ruid, new_euid; int retval; @@ -598,14 +603,14 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) if (retval) return retval; - new_ruid = old_ruid = current->uid; - new_euid = old_euid = current->euid; - old_suid = current->suid; + new_ruid = old_ruid = cred->uid; + new_euid = old_euid = cred->euid; + old_suid = cred->suid; if (ruid != (uid_t) -1) { new_ruid = ruid; if ((old_ruid != ruid) && - (current->euid != ruid) && + (cred->euid != ruid) && !capable(CAP_SETUID)) return -EPERM; } @@ -613,8 +618,8 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) if (euid != (uid_t) -1) { new_euid = euid; if ((old_ruid != euid) && - (current->euid != euid) && - (current->suid != euid) && + (cred->euid != euid) && + (cred->suid != euid) && !capable(CAP_SETUID)) return -EPERM; } @@ -626,11 +631,11 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) set_dumpable(current->mm, suid_dumpable); smp_wmb(); } - current->fsuid = current->euid = new_euid; + cred->fsuid = cred->euid = new_euid; if (ruid != (uid_t) -1 || (euid != (uid_t) -1 && euid != old_ruid)) - current->suid = current->euid; - current->fsuid = current->euid; + cred->suid = cred->euid; + cred->fsuid = cred->euid; key_fsuid_changed(current); proc_id_connector(current, PROC_EVENT_UID); @@ -653,7 +658,8 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) */ asmlinkage long sys_setuid(uid_t uid) { - int old_euid = current->euid; + struct cred *cred = current->cred; + int old_euid = cred->euid; int old_ruid, old_suid, new_suid; int retval; @@ -661,23 +667,23 @@ asmlinkage long sys_setuid(uid_t uid) if (retval) return retval; - old_ruid = current->uid; - old_suid = current->suid; + old_ruid = cred->uid; + old_suid = cred->suid; new_suid = old_suid; if (capable(CAP_SETUID)) { if (uid != old_ruid && set_user(uid, old_euid != uid) < 0) return -EAGAIN; new_suid = uid; - } else if ((uid != current->uid) && (uid != new_suid)) + } else if ((uid != cred->uid) && (uid != new_suid)) return -EPERM; if (old_euid != uid) { set_dumpable(current->mm, suid_dumpable); smp_wmb(); } - current->fsuid = current->euid = uid; - current->suid = new_suid; + cred->fsuid = cred->euid = uid; + cred->suid = new_suid; key_fsuid_changed(current); proc_id_connector(current, PROC_EVENT_UID); @@ -692,9 +698,10 @@ asmlinkage long sys_setuid(uid_t uid) */ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) { - int old_ruid = current->uid; - int old_euid = current->euid; - int old_suid = current->suid; + struct cred *cred = current->cred; + int old_ruid = cred->uid; + int old_euid = cred->euid; + int old_suid = cred->suid; int retval; retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES); @@ -702,30 +709,31 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) return retval; if (!capable(CAP_SETUID)) { - if ((ruid != (uid_t) -1) && (ruid != current->uid) && - (ruid != current->euid) && (ruid != current->suid)) + if ((ruid != (uid_t) -1) && (ruid != cred->uid) && + (ruid != cred->euid) && (ruid != cred->suid)) return -EPERM; - if ((euid != (uid_t) -1) && (euid != current->uid) && - (euid != current->euid) && (euid != current->suid)) + if ((euid != (uid_t) -1) && (euid != cred->uid) && + (euid != cred->euid) && (euid != cred->suid)) return -EPERM; - if ((suid != (uid_t) -1) && (suid != current->uid) && - (suid != current->euid) && (suid != current->suid)) + if ((suid != (uid_t) -1) && (suid != cred->uid) && + (suid != cred->euid) && (suid != cred->suid)) return -EPERM; } if (ruid != (uid_t) -1) { - if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0) + if (ruid != cred->uid && + set_user(ruid, euid != cred->euid) < 0) return -EAGAIN; } if (euid != (uid_t) -1) { - if (euid != current->euid) { + if (euid != cred->euid) { set_dumpable(current->mm, suid_dumpable); smp_wmb(); } - current->euid = euid; + cred->euid = euid; } - current->fsuid = current->euid; + cred->fsuid = cred->euid; if (suid != (uid_t) -1) - current->suid = suid; + cred->suid = suid; key_fsuid_changed(current); proc_id_connector(current, PROC_EVENT_UID); @@ -735,11 +743,12 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) { + struct cred *cred = current->cred; int retval; - if (!(retval = put_user(current->uid, ruid)) && - !(retval = put_user(current->euid, euid))) - retval = put_user(current->suid, suid); + if (!(retval = put_user(cred->uid, ruid)) && + !(retval = put_user(cred->euid, euid))) + retval = put_user(cred->suid, suid); return retval; } @@ -749,6 +758,7 @@ asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __us */ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) { + struct cred *cred = current->cred; int retval; retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES); @@ -756,28 +766,28 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) return retval; if (!capable(CAP_SETGID)) { - if ((rgid != (gid_t) -1) && (rgid != current->gid) && - (rgid != current->egid) && (rgid != current->sgid)) + if ((rgid != (gid_t) -1) && (rgid != cred->gid) && + (rgid != cred->egid) && (rgid != cred->sgid)) return -EPERM; - if ((egid != (gid_t) -1) && (egid != current->gid) && - (egid != current->egid) && (egid != current->sgid)) + if ((egid != (gid_t) -1) && (egid != cred->gid) && + (egid != cred->egid) && (egid != cred->sgid)) return -EPERM; - if ((sgid != (gid_t) -1) && (sgid != current->gid) && - (sgid != current->egid) && (sgid != current->sgid)) + if ((sgid != (gid_t) -1) && (sgid != cred->gid) && + (sgid != cred->egid) && (sgid != cred->sgid)) return -EPERM; } if (egid != (gid_t) -1) { - if (egid != current->egid) { + if (egid != cred->egid) { set_dumpable(current->mm, suid_dumpable); smp_wmb(); } - current->egid = egid; + cred->egid = egid; } - current->fsgid = current->egid; + cred->fsgid = cred->egid; if (rgid != (gid_t) -1) - current->gid = rgid; + cred->gid = rgid; if (sgid != (gid_t) -1) - current->sgid = sgid; + cred->sgid = sgid; key_fsgid_changed(current); proc_id_connector(current, PROC_EVENT_GID); @@ -786,11 +796,12 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) { + struct cred *cred = current->cred; int retval; - if (!(retval = put_user(current->gid, rgid)) && - !(retval = put_user(current->egid, egid))) - retval = put_user(current->sgid, sgid); + if (!(retval = put_user(cred->gid, rgid)) && + !(retval = put_user(cred->egid, egid))) + retval = put_user(cred->sgid, sgid); return retval; } @@ -804,20 +815,21 @@ asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __us */ asmlinkage long sys_setfsuid(uid_t uid) { + struct cred *cred = current->cred; int old_fsuid; - old_fsuid = current->fsuid; + old_fsuid = cred->fsuid; if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS)) return old_fsuid; - if (uid == current->uid || uid == current->euid || - uid == current->suid || uid == current->fsuid || + if (uid == cred->uid || uid == cred->euid || + uid == cred->suid || uid == cred->fsuid || capable(CAP_SETUID)) { if (uid != old_fsuid) { set_dumpable(current->mm, suid_dumpable); smp_wmb(); } - current->fsuid = uid; + cred->fsuid = uid; } key_fsuid_changed(current); @@ -833,20 +845,21 @@ asmlinkage long sys_setfsuid(uid_t uid) */ asmlinkage long sys_setfsgid(gid_t gid) { + struct cred *cred = current->cred; int old_fsgid; - old_fsgid = current->fsgid; + old_fsgid = cred->fsgid; if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS)) return old_fsgid; - if (gid == current->gid || gid == current->egid || - gid == current->sgid || gid == current->fsgid || + if (gid == cred->gid || gid == cred->egid || + gid == cred->sgid || gid == cred->fsgid || capable(CAP_SETGID)) { if (gid != old_fsgid) { set_dumpable(current->mm, suid_dumpable); smp_wmb(); } - current->fsgid = gid; + cred->fsgid = gid; key_fsgid_changed(current); proc_id_connector(current, PROC_EVENT_GID); } @@ -1208,8 +1221,15 @@ int groups_search(struct group_info *group_info, gid_t grp) return 0; } -/* validate and set current->group_info */ -int set_current_groups(struct group_info *group_info) +/** + * set_groups - Change a group subscription in a security record + * @sec: The security record to alter + * @group_info: The group list to impose + * + * Validate a group subscription and, if valid, impose it upon a task security + * record. + */ +int set_groups(struct cred *cred, struct group_info *group_info) { int retval; struct group_info *old_info; @@ -1221,20 +1241,34 @@ int set_current_groups(struct group_info *group_info) groups_sort(group_info); get_group_info(group_info); - task_lock(current); - old_info = current->group_info; - current->group_info = group_info; - task_unlock(current); + spin_lock(&cred->lock); + old_info = cred->group_info; + cred->group_info = group_info; + spin_unlock(&cred->lock); put_group_info(old_info); - return 0; } +EXPORT_SYMBOL(set_groups); + +/** + * set_current_groups - Change current's group subscription + * @group_info: The group list to impose + * + * Validate a group subscription and, if valid, impose it upon current's task + * security record. + */ +int set_current_groups(struct group_info *group_info) +{ + return set_groups(current->cred, group_info); +} + EXPORT_SYMBOL(set_current_groups); asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) { + struct cred *cred = current->cred; int i = 0; /* @@ -1246,13 +1280,13 @@ asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) return -EINVAL; /* no need to grab task_lock here; it cannot change */ - i = current->group_info->ngroups; + i = cred->group_info->ngroups; if (gidsetsize) { if (i > gidsetsize) { i = -EINVAL; goto out; } - if (groups_to_user(grouplist, current->group_info)) { + if (groups_to_user(grouplist, cred->group_info)) { i = -EFAULT; goto out; } @@ -1296,9 +1330,10 @@ asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist) */ int in_group_p(gid_t grp) { + struct cred *cred = current->cred; int retval = 1; - if (grp != current->fsgid) - retval = groups_search(current->group_info, grp); + if (grp != cred->fsgid) + retval = groups_search(cred->group_info, grp); return retval; } @@ -1306,9 +1341,10 @@ EXPORT_SYMBOL(in_group_p); int in_egroup_p(gid_t grp) { + struct cred *cred = current->cred; int retval = 1; - if (grp != current->egid) - retval = groups_search(current->group_info, grp); + if (grp != cred->egid) + retval = groups_search(cred->group_info, grp); return retval; } @@ -1624,7 +1660,9 @@ asmlinkage long sys_umask(int mask) asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { - long error = 0; + struct task_struct *me = current; + unsigned char comm[sizeof(me->comm)]; + long error; if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error)) return error; @@ -1635,39 +1673,41 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, error = -EINVAL; break; } - current->pdeath_signal = arg2; + me->pdeath_signal = arg2; + error = 0; break; case PR_GET_PDEATHSIG: - error = put_user(current->pdeath_signal, (int __user *)arg2); + error = put_user(me->pdeath_signal, (int __user *)arg2); break; case PR_GET_DUMPABLE: - error = get_dumpable(current->mm); + error = get_dumpable(me->mm); break; case PR_SET_DUMPABLE: if (arg2 < 0 || arg2 > 1) { error = -EINVAL; break; } - set_dumpable(current->mm, arg2); + set_dumpable(me->mm, arg2); + error = 0; break; case PR_SET_UNALIGN: - error = SET_UNALIGN_CTL(current, arg2); + error = SET_UNALIGN_CTL(me, arg2); break; case PR_GET_UNALIGN: - error = GET_UNALIGN_CTL(current, arg2); + error = GET_UNALIGN_CTL(me, arg2); break; case PR_SET_FPEMU: - error = SET_FPEMU_CTL(current, arg2); + error = SET_FPEMU_CTL(me, arg2); break; case PR_GET_FPEMU: - error = GET_FPEMU_CTL(current, arg2); + error = GET_FPEMU_CTL(me, arg2); break; case PR_SET_FPEXC: - error = SET_FPEXC_CTL(current, arg2); + error = SET_FPEXC_CTL(me, arg2); break; case PR_GET_FPEXC: - error = GET_FPEXC_CTL(current, arg2); + error = GET_FPEXC_CTL(me, arg2); break; case PR_GET_TIMING: error = PR_TIMING_STATISTICAL; @@ -1675,33 +1715,28 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, case PR_SET_TIMING: if (arg2 != PR_TIMING_STATISTICAL) error = -EINVAL; + else + error = 0; break; - case PR_SET_NAME: { - struct task_struct *me = current; - unsigned char ncomm[sizeof(me->comm)]; - - ncomm[sizeof(me->comm)-1] = 0; - if (strncpy_from_user(ncomm, (char __user *)arg2, - sizeof(me->comm)-1) < 0) + case PR_SET_NAME: + comm[sizeof(me->comm)-1] = 0; + if (strncpy_from_user(comm, (char __user *)arg2, + sizeof(me->comm) - 1) < 0) return -EFAULT; - set_task_comm(me, ncomm); + set_task_comm(me, comm); return 0; - } - case PR_GET_NAME: { - struct task_struct *me = current; - unsigned char tcomm[sizeof(me->comm)]; - - get_task_comm(tcomm, me); - if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm))) + case PR_GET_NAME: + get_task_comm(comm, me); + if (copy_to_user((char __user *)arg2, comm, + sizeof(comm))) return -EFAULT; return 0; - } case PR_GET_ENDIAN: - error = GET_ENDIAN(current, arg2); + error = GET_ENDIAN(me, arg2); break; case PR_SET_ENDIAN: - error = SET_ENDIAN(current, arg2); + error = SET_ENDIAN(me, arg2); break; case PR_GET_SECCOMP: @@ -1725,6 +1760,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, current->default_timer_slack_ns; else current->timer_slack_ns = arg2; + error = 0; break; default: error = -EINVAL; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9f3b478f917..5c97c5b4ea8 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -246,7 +246,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) memcpy(data->comm, tsk->comm, TASK_COMM_LEN); data->pid = tsk->pid; - data->uid = tsk->uid; + data->uid = task_uid(tsk); data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; data->policy = tsk->policy; data->rt_priority = tsk->rt_priority; diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 8ebcd8532df..6d1ed07bf31 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -53,8 +53,8 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) stats->ac_flag |= AXSIG; stats->ac_nice = task_nice(tsk); stats->ac_sched = tsk->policy; - stats->ac_uid = tsk->uid; - stats->ac_gid = tsk->gid; + stats->ac_uid = tsk->cred->uid; + stats->ac_gid = tsk->cred->gid; stats->ac_pid = tsk->pid; rcu_read_lock(); stats->ac_ppid = pid_alive(tsk) ? diff --git a/kernel/uid16.c b/kernel/uid16.c index 3e41c1673e2..71f07fc39fe 100644 --- a/kernel/uid16.c +++ b/kernel/uid16.c @@ -86,9 +86,9 @@ asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, { int retval; - if (!(retval = put_user(high2lowuid(current->uid), ruid)) && - !(retval = put_user(high2lowuid(current->euid), euid))) - retval = put_user(high2lowuid(current->suid), suid); + if (!(retval = put_user(high2lowuid(current->cred->uid), ruid)) && + !(retval = put_user(high2lowuid(current->cred->euid), euid))) + retval = put_user(high2lowuid(current->cred->suid), suid); return retval; } @@ -106,9 +106,9 @@ asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, { int retval; - if (!(retval = put_user(high2lowgid(current->gid), rgid)) && - !(retval = put_user(high2lowgid(current->egid), egid))) - retval = put_user(high2lowgid(current->sgid), sgid); + if (!(retval = put_user(high2lowgid(current->cred->gid), rgid)) && + !(retval = put_user(high2lowgid(current->cred->egid), egid))) + retval = put_user(high2lowgid(current->cred->sgid), sgid); return retval; } @@ -166,20 +166,20 @@ asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist) if (gidsetsize < 0) return -EINVAL; - get_group_info(current->group_info); - i = current->group_info->ngroups; + get_group_info(current->cred->group_info); + i = current->cred->group_info->ngroups; if (gidsetsize) { if (i > gidsetsize) { i = -EINVAL; goto out; } - if (groups16_to_user(grouplist, current->group_info)) { + if (groups16_to_user(grouplist, current->cred->group_info)) { i = -EFAULT; goto out; } } out: - put_group_info(current->group_info); + put_group_info(current->cred->group_info); return i; } @@ -210,20 +210,20 @@ asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist) asmlinkage long sys_getuid16(void) { - return high2lowuid(current->uid); + return high2lowuid(current->cred->uid); } asmlinkage long sys_geteuid16(void) { - return high2lowuid(current->euid); + return high2lowuid(current->cred->euid); } asmlinkage long sys_getgid16(void) { - return high2lowgid(current->gid); + return high2lowgid(current->cred->gid); } asmlinkage long sys_getegid16(void) { - return high2lowgid(current->egid); + return high2lowgid(current->cred->egid); } diff --git a/kernel/user.c b/kernel/user.c index 39d6159fae4..104d22ac84d 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -457,11 +457,11 @@ void switch_uid(struct user_struct *new_user) * cheaply with the new uid cache, so if it matters * we should be checking for it. -DaveM */ - old_user = current->user; + old_user = current->cred->user; atomic_inc(&new_user->processes); atomic_dec(&old_user->processes); switch_uid_keyring(new_user); - current->user = new_user; + current->cred->user = new_user; sched_switch_user(current); /* diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 07a96474077..b23492ee3e5 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1110,12 +1110,12 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, const unsigned long __user *old_nodes, const unsigned long __user *new_nodes) { + struct cred *cred, *tcred; struct mm_struct *mm; struct task_struct *task; nodemask_t old; nodemask_t new; nodemask_t task_nodes; - uid_t uid, euid; int err; err = get_nodes(&old, old_nodes, maxnode); @@ -1145,10 +1145,10 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, * capabilities, superuser privileges or the same * userid as the target process. */ - uid = current_uid(); - euid = current_euid(); - if (euid != task->suid && euid != task->uid && - uid != task->suid && uid != task->uid && + cred = current->cred; + tcred = task->cred; + if (cred->euid != tcred->suid && cred->euid != tcred->uid && + cred->uid != tcred->suid && cred->uid != tcred->uid && !capable(CAP_SYS_NICE)) { err = -EPERM; goto out; diff --git a/mm/migrate.c b/mm/migrate.c index 6263c24c4af..794443da1b4 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1045,10 +1045,10 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, const int __user *nodes, int __user *status, int flags) { + struct cred *cred, *tcred; struct task_struct *task; struct mm_struct *mm; int err; - uid_t uid, euid; /* Check flags */ if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL)) @@ -1076,10 +1076,10 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, * capabilities, superuser privileges or the same * userid as the target process. */ - uid = current_uid(); - euid = current_euid(); - if (euid != task->suid && euid != task->uid && - uid != task->suid && uid != task->uid && + cred = current->cred; + tcred = task->cred; + if (cred->euid != tcred->suid && cred->euid != tcred->uid && + cred->uid != tcred->suid && cred->uid != tcred->uid && !capable(CAP_SYS_NICE)) { err = -EPERM; goto out; diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 34a458aa799..3af787ba207 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -298,7 +298,7 @@ static void dump_tasks(const struct mem_cgroup *mem) task_lock(p); printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", - p->pid, p->uid, p->tgid, p->mm->total_vm, + p->pid, p->cred->uid, p->tgid, p->mm->total_vm, get_mm_rss(p->mm), (int)task_cpu(p), p->oomkilladj, p->comm); task_unlock(p); diff --git a/net/core/scm.c b/net/core/scm.c index 4681d8f9b45..c28ca32a7d9 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -44,11 +44,13 @@ static __inline__ int scm_check_creds(struct ucred *creds) { + struct cred *cred = current->cred; + if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) && - ((creds->uid == current_uid() || creds->uid == current_euid() || - creds->uid == current_suid()) || capable(CAP_SETUID)) && - ((creds->gid == current_gid() || creds->gid == current_egid() || - creds->gid == current_sgid()) || capable(CAP_SETGID))) { + ((creds->uid == cred->uid || creds->uid == cred->euid || + creds->uid == cred->suid) || capable(CAP_SETUID)) && + ((creds->gid == cred->gid || creds->gid == cred->egid || + creds->gid == cred->sgid) || capable(CAP_SETGID))) { return 0; } return -EPERM; diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 8fc38057880..c7954321260 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -353,7 +353,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags) struct auth_cred acred = { .uid = current_fsuid(), .gid = current_fsgid(), - .group_info = current->group_info, + .group_info = current->cred->group_info, }; struct rpc_cred *ret; diff --git a/security/commoncap.c b/security/commoncap.c index fb4e240720d..fa61679f8c7 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -30,7 +30,7 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb) { - NETLINK_CB(skb).eff_cap = current->cap_effective; + NETLINK_CB(skb).eff_cap = current_cap(); return 0; } @@ -52,7 +52,7 @@ EXPORT_SYMBOL(cap_netlink_recv); int cap_capable(struct task_struct *tsk, int cap, int audit) { /* Derived from include/linux/sched.h:capable. */ - if (cap_raised(tsk->cap_effective, cap)) + if (cap_raised(tsk->cred->cap_effective, cap)) return 0; return -EPERM; } @@ -67,7 +67,8 @@ int cap_settime(struct timespec *ts, struct timezone *tz) int cap_ptrace_may_access(struct task_struct *child, unsigned int mode) { /* Derived from arch/i386/kernel/ptrace.c:sys_ptrace. */ - if (cap_issubset(child->cap_permitted, current->cap_permitted)) + if (cap_issubset(child->cred->cap_permitted, + current->cred->cap_permitted)) return 0; if (capable(CAP_SYS_PTRACE)) return 0; @@ -76,8 +77,8 @@ int cap_ptrace_may_access(struct task_struct *child, unsigned int mode) int cap_ptrace_traceme(struct task_struct *parent) { - /* Derived from arch/i386/kernel/ptrace.c:sys_ptrace. */ - if (cap_issubset(current->cap_permitted, parent->cap_permitted)) + if (cap_issubset(current->cred->cap_permitted, + parent->cred->cap_permitted)) return 0; if (has_capability(parent, CAP_SYS_PTRACE)) return 0; @@ -87,10 +88,12 @@ int cap_ptrace_traceme(struct task_struct *parent) int cap_capget (struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { + struct cred *cred = target->cred; + /* Derived from kernel/capability.c:sys_capget. */ - *effective = target->cap_effective; - *inheritable = target->cap_inheritable; - *permitted = target->cap_permitted; + *effective = cred->cap_effective; + *inheritable = cred->cap_inheritable; + *permitted = cred->cap_permitted; return 0; } @@ -122,24 +125,26 @@ int cap_capset_check(const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted) { + const struct cred *cred = current->cred; + if (cap_inh_is_capped() && !cap_issubset(*inheritable, - cap_combine(current->cap_inheritable, - current->cap_permitted))) { + cap_combine(cred->cap_inheritable, + cred->cap_permitted))) { /* incapable of using this inheritable set */ return -EPERM; } if (!cap_issubset(*inheritable, - cap_combine(current->cap_inheritable, - current->cap_bset))) { + cap_combine(cred->cap_inheritable, + cred->cap_bset))) { /* no new pI capabilities outside bounding set */ return -EPERM; } /* verify restrictions on target's new Permitted set */ if (!cap_issubset (*permitted, - cap_combine (current->cap_permitted, - current->cap_permitted))) { + cap_combine (cred->cap_permitted, + cred->cap_permitted))) { return -EPERM; } @@ -155,9 +160,11 @@ void cap_capset_set(const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted) { - current->cap_effective = *effective; - current->cap_inheritable = *inheritable; - current->cap_permitted = *permitted; + struct cred *cred = current->cred; + + cred->cap_effective = *effective; + cred->cap_inheritable = *inheritable; + cred->cap_permitted = *permitted; } static inline void bprm_clear_caps(struct linux_binprm *bprm) @@ -211,8 +218,8 @@ static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps, * pP' = (X & fP) | (pI & fI) */ bprm->cap_post_exec_permitted.cap[i] = - (current->cap_bset.cap[i] & permitted) | - (current->cap_inheritable.cap[i] & inheritable); + (current->cred->cap_bset.cap[i] & permitted) | + (current->cred->cap_inheritable.cap[i] & inheritable); if (permitted & ~bprm->cap_post_exec_permitted.cap[i]) { /* @@ -354,8 +361,8 @@ int cap_bprm_set_security (struct linux_binprm *bprm) if (bprm->e_uid == 0 || current_uid() == 0) { /* pP' = (cap_bset & ~0) | (pI & ~0) */ bprm->cap_post_exec_permitted = cap_combine( - current->cap_bset, current->cap_inheritable - ); + current->cred->cap_bset, + current->cred->cap_inheritable); bprm->cap_effective = (bprm->e_uid == 0); ret = 0; } @@ -366,44 +373,39 @@ int cap_bprm_set_security (struct linux_binprm *bprm) void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe) { - kernel_cap_t pP = current->cap_permitted; - kernel_cap_t pE = current->cap_effective; - uid_t uid; - gid_t gid; + struct cred *cred = current->cred; - current_uid_gid(&uid, &gid); - - if (bprm->e_uid != uid || bprm->e_gid != gid || + if (bprm->e_uid != cred->uid || bprm->e_gid != cred->gid || !cap_issubset(bprm->cap_post_exec_permitted, - current->cap_permitted)) { + cred->cap_permitted)) { set_dumpable(current->mm, suid_dumpable); current->pdeath_signal = 0; if (unsafe & ~LSM_UNSAFE_PTRACE_CAP) { if (!capable(CAP_SETUID)) { - bprm->e_uid = uid; - bprm->e_gid = gid; + bprm->e_uid = cred->uid; + bprm->e_gid = cred->gid; } if (cap_limit_ptraced_target()) { bprm->cap_post_exec_permitted = cap_intersect( bprm->cap_post_exec_permitted, - current->cap_permitted); + cred->cap_permitted); } } } - current->suid = current->euid = current->fsuid = bprm->e_uid; - current->sgid = current->egid = current->fsgid = bprm->e_gid; + cred->suid = cred->euid = cred->fsuid = bprm->e_uid; + cred->sgid = cred->egid = cred->fsgid = bprm->e_gid; /* For init, we want to retain the capabilities set * in the init_task struct. Thus we skip the usual * capability rules */ if (!is_global_init(current)) { - current->cap_permitted = bprm->cap_post_exec_permitted; + cred->cap_permitted = bprm->cap_post_exec_permitted; if (bprm->cap_effective) - current->cap_effective = bprm->cap_post_exec_permitted; + cred->cap_effective = bprm->cap_post_exec_permitted; else - cap_clear(current->cap_effective); + cap_clear(cred->cap_effective); } /* @@ -418,27 +420,30 @@ void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe) * Number 1 above might fail if you don't have a full bset, but I think * that is interesting information to audit. */ - if (!cap_isclear(current->cap_effective)) { - if (!cap_issubset(CAP_FULL_SET, current->cap_effective) || - (bprm->e_uid != 0) || (current->uid != 0) || + if (!cap_isclear(cred->cap_effective)) { + if (!cap_issubset(CAP_FULL_SET, cred->cap_effective) || + (bprm->e_uid != 0) || (cred->uid != 0) || issecure(SECURE_NOROOT)) - audit_log_bprm_fcaps(bprm, &pP, &pE); + audit_log_bprm_fcaps(bprm, &cred->cap_permitted, + &cred->cap_effective); } - current->securebits &= ~issecure_mask(SECURE_KEEP_CAPS); + cred->securebits &= ~issecure_mask(SECURE_KEEP_CAPS); } int cap_bprm_secureexec (struct linux_binprm *bprm) { - if (current_uid() != 0) { + const struct cred *cred = current->cred; + + if (cred->uid != 0) { if (bprm->cap_effective) return 1; if (!cap_isclear(bprm->cap_post_exec_permitted)) return 1; } - return (current_euid() != current_uid() || - current_egid() != current_gid()); + return (cred->euid != cred->uid || + cred->egid != cred->gid); } int cap_inode_setxattr(struct dentry *dentry, const char *name, @@ -501,25 +506,27 @@ int cap_inode_removexattr(struct dentry *dentry, const char *name) static inline void cap_emulate_setxuid (int old_ruid, int old_euid, int old_suid) { - uid_t euid = current_euid(); + struct cred *cred = current->cred; if ((old_ruid == 0 || old_euid == 0 || old_suid == 0) && - (current_uid() != 0 && euid != 0 && current_suid() != 0) && + (cred->uid != 0 && cred->euid != 0 && cred->suid != 0) && !issecure(SECURE_KEEP_CAPS)) { - cap_clear (current->cap_permitted); - cap_clear (current->cap_effective); + cap_clear (cred->cap_permitted); + cap_clear (cred->cap_effective); } - if (old_euid == 0 && euid != 0) { - cap_clear (current->cap_effective); + if (old_euid == 0 && cred->euid != 0) { + cap_clear (cred->cap_effective); } - if (old_euid != 0 && euid == 0) { - current->cap_effective = current->cap_permitted; + if (old_euid != 0 && cred->euid == 0) { + cred->cap_effective = cred->cap_permitted; } } int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags) { + struct cred *cred = current->cred; + switch (flags) { case LSM_SETID_RE: case LSM_SETID_ID: @@ -541,16 +548,16 @@ int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, */ if (!issecure (SECURE_NO_SETUID_FIXUP)) { - if (old_fsuid == 0 && current_fsuid() != 0) { - current->cap_effective = + if (old_fsuid == 0 && cred->fsuid != 0) { + cred->cap_effective = cap_drop_fs_set( - current->cap_effective); + cred->cap_effective); } - if (old_fsuid != 0 && current_fsuid() == 0) { - current->cap_effective = + if (old_fsuid != 0 && cred->fsuid == 0) { + cred->cap_effective = cap_raise_fs_set( - current->cap_effective, - current->cap_permitted); + cred->cap_effective, + cred->cap_permitted); } } break; @@ -575,7 +582,8 @@ int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, */ static int cap_safe_nice(struct task_struct *p) { - if (!cap_issubset(p->cap_permitted, current->cap_permitted) && + if (!cap_issubset(p->cred->cap_permitted, + current->cred->cap_permitted) && !capable(CAP_SYS_NICE)) return -EPERM; return 0; @@ -610,7 +618,7 @@ static long cap_prctl_drop(unsigned long cap) return -EPERM; if (!cap_valid(cap)) return -EINVAL; - cap_lower(current->cap_bset, cap); + cap_lower(current->cred->cap_bset, cap); return 0; } @@ -633,6 +641,7 @@ int cap_task_setnice (struct task_struct *p, int nice) int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5, long *rc_p) { + struct cred *cred = current->cred; long error = 0; switch (option) { @@ -640,7 +649,7 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, if (!cap_valid(arg2)) error = -EINVAL; else - error = !!cap_raised(current->cap_bset, arg2); + error = !!cap_raised(cred->cap_bset, arg2); break; #ifdef CONFIG_SECURITY_FILE_CAPABILITIES case PR_CAPBSET_DROP: @@ -667,9 +676,9 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, * capability-based-privilege environment. */ case PR_SET_SECUREBITS: - if ((((current->securebits & SECURE_ALL_LOCKS) >> 1) - & (current->securebits ^ arg2)) /*[1]*/ - || ((current->securebits & SECURE_ALL_LOCKS + if ((((cred->securebits & SECURE_ALL_LOCKS) >> 1) + & (cred->securebits ^ arg2)) /*[1]*/ + || ((cred->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/ || (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/ || (cap_capable(current, CAP_SETPCAP, SECURITY_CAP_AUDIT) != 0)) { /*[4]*/ @@ -682,11 +691,11 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, */ error = -EPERM; /* cannot change a locked bit */ } else { - current->securebits = arg2; + cred->securebits = arg2; } break; case PR_GET_SECUREBITS: - error = current->securebits; + error = cred->securebits; break; #endif /* def CONFIG_SECURITY_FILE_CAPABILITIES */ @@ -701,10 +710,9 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, else if (issecure(SECURE_KEEP_CAPS_LOCKED)) error = -EPERM; else if (arg2) - current->securebits |= issecure_mask(SECURE_KEEP_CAPS); + cred->securebits |= issecure_mask(SECURE_KEEP_CAPS); else - current->securebits &= - ~issecure_mask(SECURE_KEEP_CAPS); + cred->securebits &= ~issecure_mask(SECURE_KEEP_CAPS); break; default: @@ -719,11 +727,12 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, void cap_task_reparent_to_init (struct task_struct *p) { - cap_set_init_eff(p->cap_effective); - cap_clear(p->cap_inheritable); - cap_set_full(p->cap_permitted); - p->securebits = SECUREBITS_DEFAULT; - return; + struct cred *cred = p->cred; + + cap_set_init_eff(cred->cap_effective); + cap_clear(cred->cap_inheritable); + cap_set_full(cred->cap_permitted); + p->cred->securebits = SECUREBITS_DEFAULT; } int cap_syslog (int type) diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index fcce331eca7..8833b447ade 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -889,7 +889,7 @@ long keyctl_instantiate_key(key_serial_t id, /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; - instkey = current->request_key_auth; + instkey = current->cred->request_key_auth; if (!instkey) goto error; @@ -932,8 +932,8 @@ long keyctl_instantiate_key(key_serial_t id, /* discard the assumed authority if it's just been disabled by * instantiation of the key */ if (ret == 0) { - key_put(current->request_key_auth); - current->request_key_auth = NULL; + key_put(current->cred->request_key_auth); + current->cred->request_key_auth = NULL; } error2: @@ -960,7 +960,7 @@ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; - instkey = current->request_key_auth; + instkey = current->cred->request_key_auth; if (!instkey) goto error; @@ -983,8 +983,8 @@ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) /* discard the assumed authority if it's just been disabled by * instantiation of the key */ if (ret == 0) { - key_put(current->request_key_auth); - current->request_key_auth = NULL; + key_put(current->cred->request_key_auth); + current->cred->request_key_auth = NULL; } error: @@ -999,6 +999,7 @@ error: */ long keyctl_set_reqkey_keyring(int reqkey_defl) { + struct cred *cred = current->cred; int ret; switch (reqkey_defl) { @@ -1018,10 +1019,10 @@ long keyctl_set_reqkey_keyring(int reqkey_defl) case KEY_REQKEY_DEFL_USER_KEYRING: case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: set: - current->jit_keyring = reqkey_defl; + cred->jit_keyring = reqkey_defl; case KEY_REQKEY_DEFL_NO_CHANGE: - return current->jit_keyring; + return cred->jit_keyring; case KEY_REQKEY_DEFL_GROUP_KEYRING: default: @@ -1086,8 +1087,8 @@ long keyctl_assume_authority(key_serial_t id) /* we divest ourselves of authority if given an ID of 0 */ if (id == 0) { - key_put(current->request_key_auth); - current->request_key_auth = NULL; + key_put(current->cred->request_key_auth); + current->cred->request_key_auth = NULL; ret = 0; goto error; } @@ -1103,8 +1104,8 @@ long keyctl_assume_authority(key_serial_t id) goto error; } - key_put(current->request_key_auth); - current->request_key_auth = authkey; + key_put(current->cred->request_key_auth); + current->cred->request_key_auth = authkey; ret = authkey->serial; error: diff --git a/security/keys/permission.c b/security/keys/permission.c index 3b41f9b5253..baf3d5f31e7 100644 --- a/security/keys/permission.c +++ b/security/keys/permission.c @@ -22,6 +22,7 @@ int key_task_permission(const key_ref_t key_ref, struct task_struct *context, key_perm_t perm) { + struct cred *cred = context->cred; struct key *key; key_perm_t kperm; int ret; @@ -29,7 +30,7 @@ int key_task_permission(const key_ref_t key_ref, key = key_ref_to_ptr(key_ref); /* use the second 8-bits of permissions for keys the caller owns */ - if (key->uid == context->fsuid) { + if (key->uid == cred->fsuid) { kperm = key->perm >> 16; goto use_these_perms; } @@ -37,14 +38,14 @@ int key_task_permission(const key_ref_t key_ref, /* use the third 8-bits of permissions for keys the caller has a group * membership in common with */ if (key->gid != -1 && key->perm & KEY_GRP_ALL) { - if (key->gid == context->fsgid) { + if (key->gid == cred->fsgid) { kperm = key->perm >> 8; goto use_these_perms; } - task_lock(context); - ret = groups_search(context->group_info, key->gid); - task_unlock(context); + spin_lock(&cred->lock); + ret = groups_search(cred->group_info, key->gid); + spin_unlock(&cred->lock); if (ret) { kperm = key->perm >> 8; diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 1c793b7090a..b0904cdda2e 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -42,7 +42,7 @@ struct key_user root_key_user = { */ int install_user_keyrings(void) { - struct user_struct *user = current->user; + struct user_struct *user = current->cred->user; struct key *uid_keyring, *session_keyring; char buf[20]; int ret; @@ -156,7 +156,7 @@ int install_thread_keyring(void) sprintf(buf, "_tid.%u", tsk->pid); - keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk, + keyring = keyring_alloc(buf, tsk->cred->uid, tsk->cred->gid, tsk, KEY_ALLOC_QUOTA_OVERRUN, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); @@ -164,8 +164,8 @@ int install_thread_keyring(void) } task_lock(tsk); - old = tsk->thread_keyring; - tsk->thread_keyring = keyring; + old = tsk->cred->thread_keyring; + tsk->cred->thread_keyring = keyring; task_unlock(tsk); ret = 0; @@ -192,7 +192,7 @@ int install_process_keyring(void) if (!tsk->signal->process_keyring) { sprintf(buf, "_pid.%u", tsk->tgid); - keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk, + keyring = keyring_alloc(buf, tsk->cred->uid, tsk->cred->gid, tsk, KEY_ALLOC_QUOTA_OVERRUN, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); @@ -238,7 +238,7 @@ static int install_session_keyring(struct key *keyring) if (tsk->signal->session_keyring) flags = KEY_ALLOC_IN_QUOTA; - keyring = keyring_alloc(buf, tsk->uid, tsk->gid, tsk, + keyring = keyring_alloc(buf, tsk->cred->uid, tsk->cred->gid, tsk, flags, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); @@ -292,14 +292,14 @@ int copy_thread_group_keys(struct task_struct *tsk) */ int copy_keys(unsigned long clone_flags, struct task_struct *tsk) { - key_check(tsk->thread_keyring); - key_check(tsk->request_key_auth); + key_check(tsk->cred->thread_keyring); + key_check(tsk->cred->request_key_auth); /* no thread keyring yet */ - tsk->thread_keyring = NULL; + tsk->cred->thread_keyring = NULL; /* copy the request_key() authorisation for this thread */ - key_get(tsk->request_key_auth); + key_get(tsk->cred->request_key_auth); return 0; @@ -322,8 +322,8 @@ void exit_thread_group_keys(struct signal_struct *tg) */ void exit_keys(struct task_struct *tsk) { - key_put(tsk->thread_keyring); - key_put(tsk->request_key_auth); + key_put(tsk->cred->thread_keyring); + key_put(tsk->cred->request_key_auth); } /* end exit_keys() */ @@ -337,8 +337,8 @@ int exec_keys(struct task_struct *tsk) /* newly exec'd tasks don't get a thread keyring */ task_lock(tsk); - old = tsk->thread_keyring; - tsk->thread_keyring = NULL; + old = tsk->cred->thread_keyring; + tsk->cred->thread_keyring = NULL; task_unlock(tsk); key_put(old); @@ -373,10 +373,11 @@ int suid_keys(struct task_struct *tsk) void key_fsuid_changed(struct task_struct *tsk) { /* update the ownership of the thread keyring */ - if (tsk->thread_keyring) { - down_write(&tsk->thread_keyring->sem); - tsk->thread_keyring->uid = tsk->fsuid; - up_write(&tsk->thread_keyring->sem); + BUG_ON(!tsk->cred); + if (tsk->cred->thread_keyring) { + down_write(&tsk->cred->thread_keyring->sem); + tsk->cred->thread_keyring->uid = tsk->cred->fsuid; + up_write(&tsk->cred->thread_keyring->sem); } } /* end key_fsuid_changed() */ @@ -388,10 +389,11 @@ void key_fsuid_changed(struct task_struct *tsk) void key_fsgid_changed(struct task_struct *tsk) { /* update the ownership of the thread keyring */ - if (tsk->thread_keyring) { - down_write(&tsk->thread_keyring->sem); - tsk->thread_keyring->gid = tsk->fsgid; - up_write(&tsk->thread_keyring->sem); + BUG_ON(!tsk->cred); + if (tsk->cred->thread_keyring) { + down_write(&tsk->cred->thread_keyring->sem); + tsk->cred->thread_keyring->gid = tsk->cred->fsgid; + up_write(&tsk->cred->thread_keyring->sem); } } /* end key_fsgid_changed() */ @@ -426,9 +428,9 @@ key_ref_t search_process_keyrings(struct key_type *type, err = ERR_PTR(-EAGAIN); /* search the thread keyring first */ - if (context->thread_keyring) { + if (context->cred->thread_keyring) { key_ref = keyring_search_aux( - make_key_ref(context->thread_keyring, 1), + make_key_ref(context->cred->thread_keyring, 1), context, type, description, match); if (!IS_ERR(key_ref)) goto found; @@ -493,9 +495,9 @@ key_ref_t search_process_keyrings(struct key_type *type, } } /* or search the user-session keyring */ - else if (context->user->session_keyring) { + else if (context->cred->user->session_keyring) { key_ref = keyring_search_aux( - make_key_ref(context->user->session_keyring, 1), + make_key_ref(context->cred->user->session_keyring, 1), context, type, description, match); if (!IS_ERR(key_ref)) goto found; @@ -517,20 +519,20 @@ key_ref_t search_process_keyrings(struct key_type *type, * search the keyrings of the process mentioned there * - we don't permit access to request_key auth keys via this method */ - if (context->request_key_auth && + if (context->cred->request_key_auth && context == current && type != &key_type_request_key_auth ) { /* defend against the auth key being revoked */ - down_read(&context->request_key_auth->sem); + down_read(&context->cred->request_key_auth->sem); - if (key_validate(context->request_key_auth) == 0) { - rka = context->request_key_auth->payload.data; + if (key_validate(context->cred->request_key_auth) == 0) { + rka = context->cred->request_key_auth->payload.data; key_ref = search_process_keyrings(type, description, match, rka->context); - up_read(&context->request_key_auth->sem); + up_read(&context->cred->request_key_auth->sem); if (!IS_ERR(key_ref)) goto found; @@ -547,7 +549,7 @@ key_ref_t search_process_keyrings(struct key_type *type, break; } } else { - up_read(&context->request_key_auth->sem); + up_read(&context->cred->request_key_auth->sem); } } @@ -580,15 +582,16 @@ key_ref_t lookup_user_key(key_serial_t id, int create, int partial, { struct request_key_auth *rka; struct task_struct *t = current; - key_ref_t key_ref, skey_ref; + struct cred *cred = t->cred; struct key *key; + key_ref_t key_ref, skey_ref; int ret; key_ref = ERR_PTR(-ENOKEY); switch (id) { case KEY_SPEC_THREAD_KEYRING: - if (!t->thread_keyring) { + if (!cred->thread_keyring) { if (!create) goto error; @@ -599,7 +602,7 @@ key_ref_t lookup_user_key(key_serial_t id, int create, int partial, } } - key = t->thread_keyring; + key = cred->thread_keyring; atomic_inc(&key->usage); key_ref = make_key_ref(key, 1); break; @@ -628,7 +631,8 @@ key_ref_t lookup_user_key(key_serial_t id, int create, int partial, ret = install_user_keyrings(); if (ret < 0) goto error; - ret = install_session_keyring(t->user->session_keyring); + ret = install_session_keyring( + cred->user->session_keyring); if (ret < 0) goto error; } @@ -641,25 +645,25 @@ key_ref_t lookup_user_key(key_serial_t id, int create, int partial, break; case KEY_SPEC_USER_KEYRING: - if (!t->user->uid_keyring) { + if (!cred->user->uid_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } - key = t->user->uid_keyring; + key = cred->user->uid_keyring; atomic_inc(&key->usage); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_SESSION_KEYRING: - if (!t->user->session_keyring) { + if (!cred->user->session_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } - key = t->user->session_keyring; + key = cred->user->session_keyring; atomic_inc(&key->usage); key_ref = make_key_ref(key, 1); break; @@ -670,7 +674,7 @@ key_ref_t lookup_user_key(key_serial_t id, int create, int partial, goto error; case KEY_SPEC_REQKEY_AUTH_KEY: - key = t->request_key_auth; + key = cred->request_key_auth; if (!key) goto error; @@ -679,19 +683,19 @@ key_ref_t lookup_user_key(key_serial_t id, int create, int partial, break; case KEY_SPEC_REQUESTOR_KEYRING: - if (!t->request_key_auth) + if (!cred->request_key_auth) goto error; - down_read(&t->request_key_auth->sem); - if (t->request_key_auth->flags & KEY_FLAG_REVOKED) { + down_read(&cred->request_key_auth->sem); + if (cred->request_key_auth->flags & KEY_FLAG_REVOKED) { key_ref = ERR_PTR(-EKEYREVOKED); key = NULL; } else { - rka = t->request_key_auth->payload.data; + rka = cred->request_key_auth->payload.data; key = rka->dest_keyring; atomic_inc(&key->usage); } - up_read(&t->request_key_auth->sem); + up_read(&cred->request_key_auth->sem); if (!key) goto error; key_ref = make_key_ref(key, 1); @@ -791,7 +795,7 @@ long join_session_keyring(const char *name) keyring = find_keyring_by_name(name, false); if (PTR_ERR(keyring) == -ENOKEY) { /* not found - try and create a new one */ - keyring = keyring_alloc(name, tsk->uid, tsk->gid, tsk, + keyring = keyring_alloc(name, tsk->cred->uid, tsk->cred->gid, tsk, KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 8e9d93b4a40..3e9b9eb1dd2 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -104,7 +104,8 @@ static int call_sbin_request_key(struct key_construction *cons, /* we specify the process's default keyrings */ sprintf(keyring_str[0], "%d", - tsk->thread_keyring ? tsk->thread_keyring->serial : 0); + tsk->cred->thread_keyring ? + tsk->cred->thread_keyring->serial : 0); prkey = 0; if (tsk->signal->process_keyring) @@ -117,7 +118,7 @@ static int call_sbin_request_key(struct key_construction *cons, sskey = rcu_dereference(tsk->signal->session_keyring)->serial; rcu_read_unlock(); } else { - sskey = tsk->user->session_keyring->serial; + sskey = tsk->cred->user->session_keyring->serial; } sprintf(keyring_str[2], "%d", sskey); @@ -232,11 +233,11 @@ static void construct_get_dest_keyring(struct key **_dest_keyring) } else { /* use a default keyring; falling through the cases until we * find one that we actually have */ - switch (tsk->jit_keyring) { + switch (tsk->cred->jit_keyring) { case KEY_REQKEY_DEFL_DEFAULT: case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: - if (tsk->request_key_auth) { - authkey = tsk->request_key_auth; + if (tsk->cred->request_key_auth) { + authkey = tsk->cred->request_key_auth; down_read(&authkey->sem); rka = authkey->payload.data; if (!test_bit(KEY_FLAG_REVOKED, @@ -249,7 +250,7 @@ static void construct_get_dest_keyring(struct key **_dest_keyring) } case KEY_REQKEY_DEFL_THREAD_KEYRING: - dest_keyring = key_get(tsk->thread_keyring); + dest_keyring = key_get(tsk->cred->thread_keyring); if (dest_keyring) break; @@ -268,11 +269,12 @@ static void construct_get_dest_keyring(struct key **_dest_keyring) break; case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: - dest_keyring = key_get(tsk->user->session_keyring); + dest_keyring = + key_get(tsk->cred->user->session_keyring); break; case KEY_REQKEY_DEFL_USER_KEYRING: - dest_keyring = key_get(tsk->user->uid_keyring); + dest_keyring = key_get(tsk->cred->user->uid_keyring); break; case KEY_REQKEY_DEFL_GROUP_KEYRING: diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index 1762d44711d..2125579d5d7 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -164,22 +164,22 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info, /* see if the calling process is already servicing the key request of * another process */ - if (current->request_key_auth) { + if (current->cred->request_key_auth) { /* it is - use that instantiation context here too */ - down_read(¤t->request_key_auth->sem); + down_read(¤t->cred->request_key_auth->sem); /* if the auth key has been revoked, then the key we're * servicing is already instantiated */ if (test_bit(KEY_FLAG_REVOKED, - ¤t->request_key_auth->flags)) + ¤t->cred->request_key_auth->flags)) goto auth_key_revoked; - irka = current->request_key_auth->payload.data; + irka = current->cred->request_key_auth->payload.data; rka->context = irka->context; rka->pid = irka->pid; get_task_struct(rka->context); - up_read(¤t->request_key_auth->sem); + up_read(¤t->cred->request_key_auth->sem); } else { /* it isn't - use this process as the context */ @@ -214,7 +214,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info, return authkey; auth_key_revoked: - up_read(¤t->request_key_auth->sem); + up_read(¤t->cred->request_key_auth->sem); kfree(rka->callout_info); kfree(rka); kleave("= -EKEYREVOKED"); diff --git a/security/selinux/exports.c b/security/selinux/exports.c index 64af2d3409e..cf02490cd1e 100644 --- a/security/selinux/exports.c +++ b/security/selinux/exports.c @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(selinux_string_to_sid); int selinux_secmark_relabel_packet_permission(u32 sid) { if (selinux_enabled) { - struct task_security_struct *tsec = current->security; + struct task_security_struct *tsec = current->cred->security; return avc_has_perm(tsec->sid, sid, SECCLASS_PACKET, PACKET__RELABELTO, NULL); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 9f6da154cc8..328308f2882 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -167,21 +167,21 @@ static int task_alloc_security(struct task_struct *task) return -ENOMEM; tsec->osid = tsec->sid = SECINITSID_UNLABELED; - task->security = tsec; + task->cred->security = tsec; return 0; } static void task_free_security(struct task_struct *task) { - struct task_security_struct *tsec = task->security; - task->security = NULL; + struct task_security_struct *tsec = task->cred->security; + task->cred->security = NULL; kfree(tsec); } static int inode_alloc_security(struct inode *inode) { - struct task_security_struct *tsec = current->security; + struct task_security_struct *tsec = current->cred->security; struct inode_security_struct *isec; isec = kmem_cache_zalloc(sel_inode_cache, GFP_NOFS); @@ -215,7 +215,7 @@ static void inode_free_security(struct inode *inode) static int file_alloc_security(struct file *file) { - struct task_security_struct *tsec = current->security; + struct task_security_struct *tsec = current->cred->security; struct file_security_struct *fsec; fsec = kzalloc(sizeof(struct file_security_struct), GFP_KERNEL); @@ -554,7 +554,7 @@ static int selinux_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts) { int rc = 0, i; - struct task_security_struct *tsec = current->security; + struct task_security_struct *tsec = current->cred->security; struct superblock_security_struct *sbsec = sb->s_security; const char *name = sb->s_type->name; struct inode *inode = sbsec->sb->s_root->d_inode; @@ -1353,8 +1353,8 @@ static int task_has_perm(struct task_struct *tsk1, { struct task_security_struct *tsec1, *tsec2; - tsec1 = tsk1->security; - tsec2 = tsk2->security; + tsec1 = tsk1->cred->security; + tsec2 = tsk2->cred->security; return avc_has_perm(tsec1->sid, tsec2->sid, SECCLASS_PROCESS, perms, NULL); } @@ -1374,7 +1374,7 @@ static int task_has_capability(struct task_struct *tsk, u32 av = CAP_TO_MASK(cap); int rc; - tsec = tsk->security; + tsec = tsk->cred->security; AVC_AUDIT_DATA_INIT(&ad, CAP); ad.tsk = tsk; @@ -1405,7 +1405,7 @@ static int task_has_system(struct task_struct *tsk, { struct task_security_struct *tsec; - tsec = tsk->security; + tsec = tsk->cred->security; return avc_has_perm(tsec->sid, SECINITSID_KERNEL, SECCLASS_SYSTEM, perms, NULL); @@ -1426,7 +1426,7 @@ static int inode_has_perm(struct task_struct *tsk, if (unlikely(IS_PRIVATE(inode))) return 0; - tsec = tsk->security; + tsec = tsk->cred->security; isec = inode->i_security; if (!adp) { @@ -1466,7 +1466,7 @@ static int file_has_perm(struct task_struct *tsk, struct file *file, u32 av) { - struct task_security_struct *tsec = tsk->security; + struct task_security_struct *tsec = tsk->cred->security; struct file_security_struct *fsec = file->f_security; struct inode *inode = file->f_path.dentry->d_inode; struct avc_audit_data ad; @@ -1503,7 +1503,7 @@ static int may_create(struct inode *dir, struct avc_audit_data ad; int rc; - tsec = current->security; + tsec = current->cred->security; dsec = dir->i_security; sbsec = dir->i_sb->s_security; @@ -1540,7 +1540,7 @@ static int may_create_key(u32 ksid, { struct task_security_struct *tsec; - tsec = ctx->security; + tsec = ctx->cred->security; return avc_has_perm(tsec->sid, ksid, SECCLASS_KEY, KEY__CREATE, NULL); } @@ -1561,7 +1561,7 @@ static int may_link(struct inode *dir, u32 av; int rc; - tsec = current->security; + tsec = current->cred->security; dsec = dir->i_security; isec = dentry->d_inode->i_security; @@ -1606,7 +1606,7 @@ static inline int may_rename(struct inode *old_dir, int old_is_dir, new_is_dir; int rc; - tsec = current->security; + tsec = current->cred->security; old_dsec = old_dir->i_security; old_isec = old_dentry->d_inode->i_security; old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode); @@ -1659,7 +1659,7 @@ static int superblock_has_perm(struct task_struct *tsk, struct task_security_struct *tsec; struct superblock_security_struct *sbsec; - tsec = tsk->security; + tsec = tsk->cred->security; sbsec = sb->s_security; return avc_has_perm(tsec->sid, sbsec->sid, SECCLASS_FILESYSTEM, perms, ad); @@ -1758,8 +1758,8 @@ static int selinux_ptrace_may_access(struct task_struct *child, return rc; if (mode == PTRACE_MODE_READ) { - struct task_security_struct *tsec = current->security; - struct task_security_struct *csec = child->security; + struct task_security_struct *tsec = current->cred->security; + struct task_security_struct *csec = child->cred->security; return avc_has_perm(tsec->sid, csec->sid, SECCLASS_FILE, FILE__READ, NULL); } @@ -1874,7 +1874,7 @@ static int selinux_sysctl(ctl_table *table, int op) if (rc) return rc; - tsec = current->security; + tsec = current->cred->security; rc = selinux_sysctl_get_sid(table, (op == 0001) ? SECCLASS_DIR : SECCLASS_FILE, &tsid); @@ -2025,7 +2025,7 @@ static int selinux_bprm_set_security(struct linux_binprm *bprm) if (bsec->set) return 0; - tsec = current->security; + tsec = current->cred->security; isec = inode->i_security; /* Default to the current task SID. */ @@ -2090,7 +2090,7 @@ static int selinux_bprm_check_security(struct linux_binprm *bprm) static int selinux_bprm_secureexec(struct linux_binprm *bprm) { - struct task_security_struct *tsec = current->security; + struct task_security_struct *tsec = current->cred->security; int atsecure = 0; if (tsec->osid != tsec->sid) { @@ -2214,7 +2214,7 @@ static void selinux_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) secondary_ops->bprm_apply_creds(bprm, unsafe); - tsec = current->security; + tsec = current->cred->security; bsec = bprm->security; sid = bsec->sid; @@ -2243,7 +2243,7 @@ static void selinux_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) rcu_read_lock(); tracer = tracehook_tracer_task(current); if (likely(tracer != NULL)) { - sec = tracer->security; + sec = tracer->cred->security; ptsid = sec->sid; } rcu_read_unlock(); @@ -2274,7 +2274,7 @@ static void selinux_bprm_post_apply_creds(struct linux_binprm *bprm) int rc, i; unsigned long flags; - tsec = current->security; + tsec = current->cred->security; bsec = bprm->security; if (bsec->unsafe) { @@ -2521,7 +2521,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir, int rc; char *namep = NULL, *context; - tsec = current->security; + tsec = current->cred->security; dsec = dir->i_security; sbsec = dir->i_sb->s_security; @@ -2706,7 +2706,7 @@ static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name) static int selinux_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { - struct task_security_struct *tsec = current->security; + struct task_security_struct *tsec = current->cred->security; struct inode *inode = dentry->d_inode; struct inode_security_struct *isec = inode->i_security; struct superblock_security_struct *sbsec; @@ -2918,7 +2918,7 @@ static int selinux_revalidate_file_permission(struct file *file, int mask) static int selinux_file_permission(struct file *file, int mask) { struct inode *inode = file->f_path.dentry->d_inode; - struct task_security_struct *tsec = current->security; + struct task_security_struct *tsec = current->cred->security; struct file_security_struct *fsec = file->f_security; struct inode_security_struct *isec = inode->i_security; @@ -2995,7 +2995,8 @@ static int selinux_file_mmap(struct file *file, unsigned long reqprot, unsigned long addr, unsigned long addr_only) { int rc = 0; - u32 sid = ((struct task_security_struct *)(current->security))->sid; + u32 sid = ((struct task_security_struct *) + (current->cred->security))->sid; if (addr < mmap_min_addr) rc = avc_has_perm(sid, sid, SECCLASS_MEMPROTECT, @@ -3107,7 +3108,7 @@ static int selinux_file_set_fowner(struct file *file) struct task_security_struct *tsec; struct file_security_struct *fsec; - tsec = current->security; + tsec = current->cred->security; fsec = file->f_security; fsec->fown_sid = tsec->sid; @@ -3125,7 +3126,7 @@ static int selinux_file_send_sigiotask(struct task_struct *tsk, /* struct fown_struct is never outside the context of a struct file */ file = container_of(fown, struct file, f_owner); - tsec = tsk->security; + tsec = tsk->cred->security; fsec = file->f_security; if (!signum) @@ -3188,12 +3189,12 @@ static int selinux_task_alloc_security(struct task_struct *tsk) struct task_security_struct *tsec1, *tsec2; int rc; - tsec1 = current->security; + tsec1 = current->cred->security; rc = task_alloc_security(tsk); if (rc) return rc; - tsec2 = tsk->security; + tsec2 = tsk->cred->security; tsec2->osid = tsec1->osid; tsec2->sid = tsec1->sid; @@ -3251,7 +3252,7 @@ static int selinux_task_getsid(struct task_struct *p) static void selinux_task_getsecid(struct task_struct *p, u32 *secid) { - struct task_security_struct *tsec = p->security; + struct task_security_struct *tsec = p->cred->security; *secid = tsec->sid; } @@ -3343,7 +3344,7 @@ static int selinux_task_kill(struct task_struct *p, struct siginfo *info, perm = PROCESS__SIGNULL; /* null signal; existence test */ else perm = signal_to_av(sig); - tsec = p->security; + tsec = p->cred->security; if (secid) rc = avc_has_perm(secid, tsec->sid, SECCLASS_PROCESS, perm, NULL); else @@ -3375,7 +3376,7 @@ static void selinux_task_reparent_to_init(struct task_struct *p) secondary_ops->task_reparent_to_init(p); - tsec = p->security; + tsec = p->cred->security; tsec->osid = tsec->sid; tsec->sid = SECINITSID_KERNEL; return; @@ -3384,7 +3385,7 @@ static void selinux_task_reparent_to_init(struct task_struct *p) static void selinux_task_to_inode(struct task_struct *p, struct inode *inode) { - struct task_security_struct *tsec = p->security; + struct task_security_struct *tsec = p->cred->security; struct inode_security_struct *isec = inode->i_security; isec->sid = tsec->sid; @@ -3632,7 +3633,7 @@ static int socket_has_perm(struct task_struct *task, struct socket *sock, struct avc_audit_data ad; int err = 0; - tsec = task->security; + tsec = task->cred->security; isec = SOCK_INODE(sock)->i_security; if (isec->sid == SECINITSID_KERNEL) @@ -3656,7 +3657,7 @@ static int selinux_socket_create(int family, int type, if (kern) goto out; - tsec = current->security; + tsec = current->cred->security; newsid = tsec->sockcreate_sid ? : tsec->sid; err = avc_has_perm(tsec->sid, newsid, socket_type_to_security_class(family, type, @@ -3677,7 +3678,7 @@ static int selinux_socket_post_create(struct socket *sock, int family, isec = SOCK_INODE(sock)->i_security; - tsec = current->security; + tsec = current->cred->security; newsid = tsec->sockcreate_sid ? : tsec->sid; isec->sclass = socket_type_to_security_class(family, type, protocol); isec->sid = kern ? SECINITSID_KERNEL : newsid; @@ -3723,7 +3724,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in struct sock *sk = sock->sk; u32 sid, node_perm; - tsec = current->security; + tsec = current->cred->security; isec = SOCK_INODE(sock)->i_security; if (family == PF_INET) { @@ -4764,7 +4765,7 @@ static int ipc_alloc_security(struct task_struct *task, struct kern_ipc_perm *perm, u16 sclass) { - struct task_security_struct *tsec = task->security; + struct task_security_struct *tsec = task->cred->security; struct ipc_security_struct *isec; isec = kzalloc(sizeof(struct ipc_security_struct), GFP_KERNEL); @@ -4814,7 +4815,7 @@ static int ipc_has_perm(struct kern_ipc_perm *ipc_perms, struct ipc_security_struct *isec; struct avc_audit_data ad; - tsec = current->security; + tsec = current->cred->security; isec = ipc_perms->security; AVC_AUDIT_DATA_INIT(&ad, IPC); @@ -4845,7 +4846,7 @@ static int selinux_msg_queue_alloc_security(struct msg_queue *msq) if (rc) return rc; - tsec = current->security; + tsec = current->cred->security; isec = msq->q_perm.security; AVC_AUDIT_DATA_INIT(&ad, IPC); @@ -4871,7 +4872,7 @@ static int selinux_msg_queue_associate(struct msg_queue *msq, int msqflg) struct ipc_security_struct *isec; struct avc_audit_data ad; - tsec = current->security; + tsec = current->cred->security; isec = msq->q_perm.security; AVC_AUDIT_DATA_INIT(&ad, IPC); @@ -4917,7 +4918,7 @@ static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg, struct avc_audit_data ad; int rc; - tsec = current->security; + tsec = current->cred->security; isec = msq->q_perm.security; msec = msg->security; @@ -4965,7 +4966,7 @@ static int selinux_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg, struct avc_audit_data ad; int rc; - tsec = target->security; + tsec = target->cred->security; isec = msq->q_perm.security; msec = msg->security; @@ -4992,7 +4993,7 @@ static int selinux_shm_alloc_security(struct shmid_kernel *shp) if (rc) return rc; - tsec = current->security; + tsec = current->cred->security; isec = shp->shm_perm.security; AVC_AUDIT_DATA_INIT(&ad, IPC); @@ -5018,7 +5019,7 @@ static int selinux_shm_associate(struct shmid_kernel *shp, int shmflg) struct ipc_security_struct *isec; struct avc_audit_data ad; - tsec = current->security; + tsec = current->cred->security; isec = shp->shm_perm.security; AVC_AUDIT_DATA_INIT(&ad, IPC); @@ -5091,7 +5092,7 @@ static int selinux_sem_alloc_security(struct sem_array *sma) if (rc) return rc; - tsec = current->security; + tsec = current->cred->security; isec = sma->sem_perm.security; AVC_AUDIT_DATA_INIT(&ad, IPC); @@ -5117,7 +5118,7 @@ static int selinux_sem_associate(struct sem_array *sma, int semflg) struct ipc_security_struct *isec; struct avc_audit_data ad; - tsec = current->security; + tsec = current->cred->security; isec = sma->sem_perm.security; AVC_AUDIT_DATA_INIT(&ad, IPC); @@ -5224,7 +5225,7 @@ static int selinux_getprocattr(struct task_struct *p, return error; } - tsec = p->security; + tsec = p->cred->security; if (!strcmp(name, "current")) sid = tsec->sid; @@ -5308,7 +5309,7 @@ static int selinux_setprocattr(struct task_struct *p, operation. See selinux_bprm_set_security for the execve checks and may_create for the file creation checks. The operation will then fail if the context is not permitted. */ - tsec = p->security; + tsec = p->cred->security; if (!strcmp(name, "exec")) tsec->exec_sid = sid; else if (!strcmp(name, "fscreate")) @@ -5361,7 +5362,8 @@ boundary_ok: rcu_read_lock(); tracer = tracehook_tracer_task(p); if (tracer != NULL) { - struct task_security_struct *ptsec = tracer->security; + struct task_security_struct *ptsec = + tracer->cred->security; u32 ptsid = ptsec->sid; rcu_read_unlock(); error = avc_has_perm_noaudit(ptsid, sid, @@ -5405,7 +5407,7 @@ static void selinux_release_secctx(char *secdata, u32 seclen) static int selinux_key_alloc(struct key *k, struct task_struct *tsk, unsigned long flags) { - struct task_security_struct *tsec = tsk->security; + struct task_security_struct *tsec = tsk->cred->security; struct key_security_struct *ksec; ksec = kzalloc(sizeof(struct key_security_struct), GFP_KERNEL); @@ -5439,7 +5441,7 @@ static int selinux_key_permission(key_ref_t key_ref, key = key_ref_to_ptr(key_ref); - tsec = ctx->security; + tsec = ctx->cred->security; ksec = key->security; /* if no specific permissions are requested, we skip the @@ -5683,7 +5685,7 @@ static __init int selinux_init(void) /* Set the security state for the initial task. */ if (task_alloc_security(current)) panic("SELinux: Failed to initialize initial task.\n"); - tsec = current->security; + tsec = current->cred->security; tsec->osid = tsec->sid = SECINITSID_KERNEL; sel_inode_cache = kmem_cache_create("selinux_inode_security", diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 69c9dccc8cf..10715d1330b 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -97,7 +97,7 @@ static int task_has_security(struct task_struct *tsk, { struct task_security_struct *tsec; - tsec = tsk->security; + tsec = tsk->cred->security; if (!tsec) return -EACCES; diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index 8f17f542a11..d7db76617b0 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c @@ -197,7 +197,7 @@ static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *uctx, u32 sid) { int rc = 0; - struct task_security_struct *tsec = current->security; + struct task_security_struct *tsec = current->cred->security; struct xfrm_sec_ctx *ctx = NULL; char *ctx_str = NULL; u32 str_len; @@ -333,7 +333,7 @@ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx) */ int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) { - struct task_security_struct *tsec = current->security; + struct task_security_struct *tsec = current->cred->security; int rc = 0; if (ctx) { @@ -378,7 +378,7 @@ void selinux_xfrm_state_free(struct xfrm_state *x) */ int selinux_xfrm_state_delete(struct xfrm_state *x) { - struct task_security_struct *tsec = current->security; + struct task_security_struct *tsec = current->cred->security; struct xfrm_sec_ctx *ctx = x->security; int rc = 0; diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c index 79ff21ed4c3..b6dd4fc0fb0 100644 --- a/security/smack/smack_access.c +++ b/security/smack/smack_access.c @@ -164,7 +164,7 @@ int smk_curacc(char *obj_label, u32 mode) { int rc; - rc = smk_access(current->security, obj_label, mode); + rc = smk_access(current->cred->security, obj_label, mode); if (rc == 0) return 0; @@ -173,7 +173,7 @@ int smk_curacc(char *obj_label, u32 mode) * only one that gets privilege and current does not * have that label. */ - if (smack_onlycap != NULL && smack_onlycap != current->security) + if (smack_onlycap != NULL && smack_onlycap != current->cred->security) return rc; if (capable(CAP_MAC_OVERRIDE)) diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 6e2dc0bab70..791da238d04 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -102,7 +102,8 @@ static int smack_ptrace_may_access(struct task_struct *ctp, unsigned int mode) if (rc != 0) return rc; - rc = smk_access(current->security, ctp->security, MAY_READWRITE); + rc = smk_access(current->cred->security, ctp->cred->security, + MAY_READWRITE); if (rc != 0 && capable(CAP_MAC_OVERRIDE)) return 0; return rc; @@ -124,7 +125,8 @@ static int smack_ptrace_traceme(struct task_struct *ptp) if (rc != 0) return rc; - rc = smk_access(ptp->security, current->security, MAY_READWRITE); + rc = smk_access(ptp->cred->security, current->cred->security, + MAY_READWRITE); if (rc != 0 && has_capability(ptp, CAP_MAC_OVERRIDE)) return 0; return rc; @@ -141,7 +143,7 @@ static int smack_ptrace_traceme(struct task_struct *ptp) static int smack_syslog(int type) { int rc; - char *sp = current->security; + char *sp = current->cred->security; rc = cap_syslog(type); if (rc != 0) @@ -373,7 +375,7 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags) */ static int smack_inode_alloc_security(struct inode *inode) { - inode->i_security = new_inode_smack(current->security); + inode->i_security = new_inode_smack(current->cred->security); if (inode->i_security == NULL) return -ENOMEM; return 0; @@ -818,7 +820,7 @@ static int smack_file_permission(struct file *file, int mask) */ static int smack_file_alloc_security(struct file *file) { - file->f_security = current->security; + file->f_security = current->cred->security; return 0; } @@ -916,7 +918,7 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd, */ static int smack_file_set_fowner(struct file *file) { - file->f_security = current->security; + file->f_security = current->cred->security; return 0; } @@ -941,7 +943,7 @@ static int smack_file_send_sigiotask(struct task_struct *tsk, * struct fown_struct is never outside the context of a struct file */ file = container_of(fown, struct file, f_owner); - rc = smk_access(file->f_security, tsk->security, MAY_WRITE); + rc = smk_access(file->f_security, tsk->cred->security, MAY_WRITE); if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE)) return 0; return rc; @@ -984,7 +986,7 @@ static int smack_file_receive(struct file *file) */ static int smack_task_alloc_security(struct task_struct *tsk) { - tsk->security = current->security; + tsk->cred->security = current->cred->security; return 0; } @@ -999,7 +1001,7 @@ static int smack_task_alloc_security(struct task_struct *tsk) */ static void smack_task_free_security(struct task_struct *task) { - task->security = NULL; + task->cred->security = NULL; } /** @@ -1011,7 +1013,7 @@ static void smack_task_free_security(struct task_struct *task) */ static int smack_task_setpgid(struct task_struct *p, pid_t pgid) { - return smk_curacc(p->security, MAY_WRITE); + return smk_curacc(p->cred->security, MAY_WRITE); } /** @@ -1022,7 +1024,7 @@ static int smack_task_setpgid(struct task_struct *p, pid_t pgid) */ static int smack_task_getpgid(struct task_struct *p) { - return smk_curacc(p->security, MAY_READ); + return smk_curacc(p->cred->security, MAY_READ); } /** @@ -1033,7 +1035,7 @@ static int smack_task_getpgid(struct task_struct *p) */ static int smack_task_getsid(struct task_struct *p) { - return smk_curacc(p->security, MAY_READ); + return smk_curacc(p->cred->security, MAY_READ); } /** @@ -1045,7 +1047,7 @@ static int smack_task_getsid(struct task_struct *p) */ static void smack_task_getsecid(struct task_struct *p, u32 *secid) { - *secid = smack_to_secid(p->security); + *secid = smack_to_secid(p->cred->security); } /** @@ -1061,7 +1063,7 @@ static int smack_task_setnice(struct task_struct *p, int nice) rc = cap_task_setnice(p, nice); if (rc == 0) - rc = smk_curacc(p->security, MAY_WRITE); + rc = smk_curacc(p->cred->security, MAY_WRITE); return rc; } @@ -1078,7 +1080,7 @@ static int smack_task_setioprio(struct task_struct *p, int ioprio) rc = cap_task_setioprio(p, ioprio); if (rc == 0) - rc = smk_curacc(p->security, MAY_WRITE); + rc = smk_curacc(p->cred->security, MAY_WRITE); return rc; } @@ -1090,7 +1092,7 @@ static int smack_task_setioprio(struct task_struct *p, int ioprio) */ static int smack_task_getioprio(struct task_struct *p) { - return smk_curacc(p->security, MAY_READ); + return smk_curacc(p->cred->security, MAY_READ); } /** @@ -1108,7 +1110,7 @@ static int smack_task_setscheduler(struct task_struct *p, int policy, rc = cap_task_setscheduler(p, policy, lp); if (rc == 0) - rc = smk_curacc(p->security, MAY_WRITE); + rc = smk_curacc(p->cred->security, MAY_WRITE); return rc; } @@ -1120,7 +1122,7 @@ static int smack_task_setscheduler(struct task_struct *p, int policy, */ static int smack_task_getscheduler(struct task_struct *p) { - return smk_curacc(p->security, MAY_READ); + return smk_curacc(p->cred->security, MAY_READ); } /** @@ -1131,7 +1133,7 @@ static int smack_task_getscheduler(struct task_struct *p) */ static int smack_task_movememory(struct task_struct *p) { - return smk_curacc(p->security, MAY_WRITE); + return smk_curacc(p->cred->security, MAY_WRITE); } /** @@ -1154,13 +1156,13 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info, * can write the receiver. */ if (secid == 0) - return smk_curacc(p->security, MAY_WRITE); + return smk_curacc(p->cred->security, MAY_WRITE); /* * If the secid isn't 0 we're dealing with some USB IO * specific behavior. This is not clean. For one thing * we can't take privilege into account. */ - return smk_access(smack_from_secid(secid), p->security, MAY_WRITE); + return smk_access(smack_from_secid(secid), p->cred->security, MAY_WRITE); } /** @@ -1173,7 +1175,7 @@ static int smack_task_wait(struct task_struct *p) { int rc; - rc = smk_access(current->security, p->security, MAY_WRITE); + rc = smk_access(current->cred->security, p->cred->security, MAY_WRITE); if (rc == 0) return 0; @@ -1204,7 +1206,7 @@ static int smack_task_wait(struct task_struct *p) static void smack_task_to_inode(struct task_struct *p, struct inode *inode) { struct inode_smack *isp = inode->i_security; - isp->smk_inode = p->security; + isp->smk_inode = p->cred->security; } /* @@ -1223,7 +1225,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode) */ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags) { - char *csp = current->security; + char *csp = current->cred->security; struct socket_smack *ssp; ssp = kzalloc(sizeof(struct socket_smack), gfp_flags); @@ -1448,7 +1450,7 @@ static int smack_flags_to_may(int flags) */ static int smack_msg_msg_alloc_security(struct msg_msg *msg) { - msg->security = current->security; + msg->security = current->cred->security; return 0; } @@ -1484,7 +1486,7 @@ static int smack_shm_alloc_security(struct shmid_kernel *shp) { struct kern_ipc_perm *isp = &shp->shm_perm; - isp->security = current->security; + isp->security = current->cred->security; return 0; } @@ -1593,7 +1595,7 @@ static int smack_sem_alloc_security(struct sem_array *sma) { struct kern_ipc_perm *isp = &sma->sem_perm; - isp->security = current->security; + isp->security = current->cred->security; return 0; } @@ -1697,7 +1699,7 @@ static int smack_msg_queue_alloc_security(struct msg_queue *msq) { struct kern_ipc_perm *kisp = &msq->q_perm; - kisp->security = current->security; + kisp->security = current->cred->security; return 0; } @@ -1852,7 +1854,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) struct super_block *sbp; struct superblock_smack *sbsp; struct inode_smack *isp; - char *csp = current->security; + char *csp = current->cred->security; char *fetched; char *final; struct dentry *dp; @@ -2009,7 +2011,7 @@ static int smack_getprocattr(struct task_struct *p, char *name, char **value) if (strcmp(name, "current") != 0) return -EINVAL; - cp = kstrdup(p->security, GFP_KERNEL); + cp = kstrdup(p->cred->security, GFP_KERNEL); if (cp == NULL) return -ENOMEM; @@ -2055,7 +2057,7 @@ static int smack_setprocattr(struct task_struct *p, char *name, if (newsmack == NULL) return -EINVAL; - p->security = newsmack; + p->cred->security = newsmack; return size; } @@ -2288,8 +2290,8 @@ static void smack_sock_graft(struct sock *sk, struct socket *parent) return; ssp = sk->sk_security; - ssp->smk_in = current->security; - ssp->smk_out = current->security; + ssp->smk_in = current->cred->security; + ssp->smk_out = current->cred->security; ssp->smk_packet[0] = '\0'; rc = smack_netlabel(sk); @@ -2362,7 +2364,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb, static int smack_key_alloc(struct key *key, struct task_struct *tsk, unsigned long flags) { - key->security = tsk->security; + key->security = tsk->cred->security; return 0; } @@ -2403,10 +2405,11 @@ static int smack_key_permission(key_ref_t key_ref, /* * This should not occur */ - if (context->security == NULL) + if (context->cred->security == NULL) return -EACCES; - return smk_access(context->security, keyp->security, MAY_READWRITE); + return smk_access(context->cred->security, keyp->security, + MAY_READWRITE); } #endif /* CONFIG_KEYS */ @@ -2726,7 +2729,7 @@ static __init int smack_init(void) /* * Set the security state for the initial task. */ - current->security = &smack_known_floor.smk_known; + current->cred->security = &smack_known_floor.smk_known; /* * Initialize locks diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index c21d8c8bf0c..c5ca279e050 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -336,7 +336,7 @@ static void smk_cipso_doi(void) audit_info.loginuid = audit_get_loginuid(current); audit_info.sessionid = audit_get_sessionid(current); - audit_info.secid = smack_to_secid(current->security); + audit_info.secid = smack_to_secid(current->cred->security); rc = netlbl_cfg_map_del(NULL, &audit_info); if (rc != 0) @@ -371,7 +371,7 @@ static void smk_unlbl_ambient(char *oldambient) audit_info.loginuid = audit_get_loginuid(current); audit_info.sessionid = audit_get_sessionid(current); - audit_info.secid = smack_to_secid(current->security); + audit_info.secid = smack_to_secid(current->cred->security); if (oldambient != NULL) { rc = netlbl_cfg_map_del(oldambient, &audit_info); @@ -843,7 +843,7 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char in[SMK_LABELLEN]; - char *sp = current->security; + char *sp = current->cred->security; if (!capable(CAP_MAC_ADMIN)) return -EPERM; -- cgit v1.2.3-70-g09d2 From 86a264abe542cfececb4df129bc45a0338d8cdb9 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:18 +1100 Subject: CRED: Wrap current->cred and a few other accessors Wrap current->cred and a few other accessors to hide their actual implementation. Signed-off-by: David Howells Acked-by: James Morris Acked-by: Serge Hallyn Signed-off-by: James Morris --- arch/ia64/ia32/sys_ia32.c | 7 +- drivers/net/tun.c | 8 +- drivers/usb/core/devio.c | 10 ++- fs/binfmt_elf.c | 10 +-- fs/binfmt_elf_fdpic.c | 9 +- fs/exec.c | 5 +- fs/fcntl.c | 3 +- fs/file_table.c | 7 +- fs/hugetlbfs/inode.c | 5 +- fs/ioprio.c | 4 +- fs/smbfs/dir.c | 3 +- include/linux/cred.h | 187 ++++++++++++++++++++++++++++++++---------- include/linux/securebits.h | 2 +- ipc/mqueue.c | 2 +- ipc/shm.c | 4 +- kernel/sys.c | 59 +++++++------ kernel/uid16.c | 31 +++---- net/core/scm.c | 2 +- net/sunrpc/auth.c | 14 ++-- security/commoncap.c | 2 +- security/keys/process_keys.c | 2 +- security/keys/request_key.c | 11 +-- security/selinux/exports.c | 8 +- security/selinux/xfrm.c | 6 +- security/smack/smack_access.c | 2 +- security/smack/smack_lsm.c | 26 +++--- security/smack/smackfs.c | 4 +- 27 files changed, 271 insertions(+), 162 deletions(-) (limited to 'fs') diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 2445a9d3488..16ef61a91d9 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -1767,25 +1767,24 @@ groups16_from_user(struct group_info *group_info, short __user *grouplist) asmlinkage long sys32_getgroups16 (int gidsetsize, short __user *grouplist) { + const struct cred *cred = current_cred(); int i; if (gidsetsize < 0) return -EINVAL; - get_group_info(current->cred->group_info); - i = current->cred->group_info->ngroups; + i = cred->group_info->ngroups; if (gidsetsize) { if (i > gidsetsize) { i = -EINVAL; goto out; } - if (groups16_to_user(grouplist, current->cred->group_info)) { + if (groups16_to_user(grouplist, cred->group_info)) { i = -EFAULT; goto out; } } out: - put_group_info(current->cred->group_info); return i; } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b14e2025e22..55dc70c6b4d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -702,6 +702,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) struct tun_net *tn; struct tun_struct *tun; struct net_device *dev; + const struct cred *cred = current_cred(); int err; tn = net_generic(net, tun_net_id); @@ -712,11 +713,12 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) /* Check permissions */ if (((tun->owner != -1 && - current_euid() != tun->owner) || + cred->euid != tun->owner) || (tun->group != -1 && - current_egid() != tun->group)) && - !capable(CAP_NET_ADMIN)) + cred->egid != tun->group)) && + !capable(CAP_NET_ADMIN)) { return -EPERM; + } } else if (__dev_get_by_name(net, ifr->ifr_name)) return -EINVAL; diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 1aadb938702..aa79280df15 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -574,6 +574,7 @@ static int usbdev_open(struct inode *inode, struct file *file) { struct usb_device *dev = NULL; struct dev_state *ps; + const struct cred *cred = current_cred(); int ret; lock_kernel(); @@ -617,8 +618,8 @@ static int usbdev_open(struct inode *inode, struct file *file) init_waitqueue_head(&ps->wait); ps->discsignr = 0; ps->disc_pid = get_pid(task_pid(current)); - ps->disc_uid = current_uid(); - ps->disc_euid = current_euid(); + ps->disc_uid = cred->uid; + ps->disc_euid = cred->euid; ps->disccontext = NULL; ps->ifclaimed = 0; security_task_getsecid(current, &ps->secid); @@ -967,6 +968,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, struct usb_host_endpoint *ep; struct async *as; struct usb_ctrlrequest *dr = NULL; + const struct cred *cred = current_cred(); unsigned int u, totlen, isofrmlen; int ret, ifnum = -1; int is_in; @@ -1174,8 +1176,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, as->signr = uurb->signr; as->ifnum = ifnum; as->pid = get_pid(task_pid(current)); - as->uid = current_uid(); - as->euid = current_euid(); + as->uid = cred->uid; + as->euid = cred->euid; security_task_getsecid(current, &as->secid); if (!is_in) { if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 7a52477ce49..0e665561316 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -157,7 +157,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, int items; elf_addr_t *elf_info; int ei_index = 0; - struct task_struct *tsk = current; + const struct cred *cred = current_cred(); struct vm_area_struct *vma; /* @@ -223,10 +223,10 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, NEW_AUX_ENT(AT_BASE, interp_load_addr); NEW_AUX_ENT(AT_FLAGS, 0); NEW_AUX_ENT(AT_ENTRY, exec->e_entry); - NEW_AUX_ENT(AT_UID, tsk->cred->uid); - NEW_AUX_ENT(AT_EUID, tsk->cred->euid); - NEW_AUX_ENT(AT_GID, tsk->cred->gid); - NEW_AUX_ENT(AT_EGID, tsk->cred->egid); + NEW_AUX_ENT(AT_UID, cred->uid); + NEW_AUX_ENT(AT_EUID, cred->euid); + NEW_AUX_ENT(AT_GID, cred->gid); + NEW_AUX_ENT(AT_EGID, cred->egid); NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm)); NEW_AUX_ENT(AT_EXECFN, bprm->exec); if (k_platform) { diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 9f67054c2c4..1f6e8c023b4 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -475,6 +475,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, struct elf_fdpic_params *exec_params, struct elf_fdpic_params *interp_params) { + const struct cred *cred = current_cred(); unsigned long sp, csp, nitems; elf_caddr_t __user *argv, *envp; size_t platform_len = 0, len; @@ -623,10 +624,10 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm, NEW_AUX_ENT(AT_BASE, interp_params->elfhdr_addr); NEW_AUX_ENT(AT_FLAGS, 0); NEW_AUX_ENT(AT_ENTRY, exec_params->entry_addr); - NEW_AUX_ENT(AT_UID, (elf_addr_t) current->cred->uid); - NEW_AUX_ENT(AT_EUID, (elf_addr_t) current->cred->euid); - NEW_AUX_ENT(AT_GID, (elf_addr_t) current->cred->gid); - NEW_AUX_ENT(AT_EGID, (elf_addr_t) current->cred->egid); + NEW_AUX_ENT(AT_UID, (elf_addr_t) cred->uid); + NEW_AUX_ENT(AT_EUID, (elf_addr_t) cred->euid); + NEW_AUX_ENT(AT_GID, (elf_addr_t) cred->gid); + NEW_AUX_ENT(AT_EGID, (elf_addr_t) cred->egid); NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm)); NEW_AUX_ENT(AT_EXECFN, bprm->exec); diff --git a/fs/exec.c b/fs/exec.c index 31149e430a8..a5330e1a221 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1388,6 +1388,7 @@ EXPORT_SYMBOL(set_binfmt); */ static int format_corename(char *corename, long signr) { + const struct cred *cred = current_cred(); const char *pat_ptr = core_pattern; int ispipe = (*pat_ptr == '|'); char *out_ptr = corename; @@ -1424,7 +1425,7 @@ static int format_corename(char *corename, long signr) /* uid */ case 'u': rc = snprintf(out_ptr, out_end - out_ptr, - "%d", current_uid()); + "%d", cred->uid); if (rc > out_end - out_ptr) goto out; out_ptr += rc; @@ -1432,7 +1433,7 @@ static int format_corename(char *corename, long signr) /* gid */ case 'g': rc = snprintf(out_ptr, out_end - out_ptr, - "%d", current_gid()); + "%d", cred->gid); if (rc > out_end - out_ptr) goto out; out_ptr += rc; diff --git a/fs/fcntl.c b/fs/fcntl.c index 63964d863ad..c594cc0e40f 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -205,13 +205,14 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, int force) { + const struct cred *cred = current_cred(); int err; err = security_file_set_fowner(filp); if (err) return err; - f_modown(filp, pid, type, current_uid(), current_euid(), force); + f_modown(filp, pid, type, cred->uid, cred->euid, force); return 0; } EXPORT_SYMBOL(__f_setown); diff --git a/fs/file_table.c b/fs/file_table.c index 3152b53cfab..bc4563fe791 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -94,7 +94,7 @@ int proc_nr_files(ctl_table *table, int write, struct file *filp, */ struct file *get_empty_filp(void) { - struct task_struct *tsk; + const struct cred *cred = current_cred(); static int old_max; struct file * f; @@ -118,12 +118,11 @@ struct file *get_empty_filp(void) if (security_file_alloc(f)) goto fail_sec; - tsk = current; INIT_LIST_HEAD(&f->f_u.fu_list); atomic_long_set(&f->f_count, 1); rwlock_init(&f->f_owner.lock); - f->f_uid = tsk->cred->fsuid; - f->f_gid = tsk->cred->fsgid; + f->f_uid = cred->fsuid; + f->f_gid = cred->fsgid; eventpoll_init_file(f); /* f->f_version: 0 */ return f; diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 870a721b8bd..7d479ce3ace 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -951,6 +951,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size) struct inode *inode; struct dentry *dentry, *root; struct qstr quick_string; + struct user_struct *user = current_user(); if (!hugetlbfs_vfsmount) return ERR_PTR(-ENOENT); @@ -958,7 +959,7 @@ struct file *hugetlb_file_setup(const char *name, size_t size) if (!can_do_hugetlb_shm()) return ERR_PTR(-EPERM); - if (!user_shm_lock(size, current->cred->user)) + if (!user_shm_lock(size, user)) return ERR_PTR(-ENOMEM); root = hugetlbfs_vfsmount->mnt_root; @@ -998,7 +999,7 @@ out_inode: out_dentry: dput(dentry); out_shm_unlock: - user_shm_unlock(size, current->cred->user); + user_shm_unlock(size, user); return ERR_PTR(error); } diff --git a/fs/ioprio.c b/fs/ioprio.c index bb5210af77c..5112554fd21 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c @@ -123,7 +123,7 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio) break; case IOPRIO_WHO_USER: if (!who) - user = current->cred->user; + user = current_user(); else user = find_user(who); @@ -216,7 +216,7 @@ asmlinkage long sys_ioprio_get(int which, int who) break; case IOPRIO_WHO_USER: if (!who) - user = current->cred->user; + user = current_user(); else user = find_user(who); diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c index 9e9bb0db4f6..e7ddd0328dd 100644 --- a/fs/smbfs/dir.c +++ b/fs/smbfs/dir.c @@ -667,8 +667,7 @@ smb_make_node(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) attr.ia_valid = ATTR_MODE | ATTR_UID | ATTR_GID; attr.ia_mode = mode; - attr.ia_uid = current_euid(); - attr.ia_gid = current_egid(); + current_euid_egid(&attr.ia_uid, &attr.ia_gid); if (!new_valid_dev(dev)) return -EINVAL; diff --git a/include/linux/cred.h b/include/linux/cred.h index a7a686074cb..4221ec6000c 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -37,15 +37,16 @@ struct group_info { * get_group_info - Get a reference to a group info structure * @group_info: The group info to reference * - * This must be called with the owning task locked (via task_lock()) when task - * != current. The reason being that the vast majority of callers are looking - * at current->group_info, which can not be changed except by the current task. - * Changing current->group_info requires the task lock, too. + * This gets a reference to a set of supplementary groups. + * + * If the caller is accessing a task's credentials, they must hold the RCU read + * lock when reading. */ -#define get_group_info(group_info) \ -do { \ - atomic_inc(&(group_info)->usage); \ -} while (0) +static inline struct group_info *get_group_info(struct group_info *gi) +{ + atomic_inc(&gi->usage); + return gi; +} /** * put_group_info - Release a reference to a group info structure @@ -61,7 +62,7 @@ extern struct group_info *groups_alloc(int); extern void groups_free(struct group_info *); extern int set_current_groups(struct group_info *); extern int set_groups(struct cred *, struct group_info *); -extern int groups_search(struct group_info *, gid_t); +extern int groups_search(const struct group_info *, gid_t); /* access the groups "array" with this macro */ #define GROUP_AT(gi, i) \ @@ -123,41 +124,6 @@ struct cred { spinlock_t lock; /* lock for pointer changes */ }; -#define get_current_user() (get_uid(current->cred->user)) - -#define task_uid(task) ((task)->cred->uid) -#define task_gid(task) ((task)->cred->gid) -#define task_euid(task) ((task)->cred->euid) -#define task_egid(task) ((task)->cred->egid) - -#define current_uid() (current->cred->uid) -#define current_gid() (current->cred->gid) -#define current_euid() (current->cred->euid) -#define current_egid() (current->cred->egid) -#define current_suid() (current->cred->suid) -#define current_sgid() (current->cred->sgid) -#define current_fsuid() (current->cred->fsuid) -#define current_fsgid() (current->cred->fsgid) -#define current_cap() (current->cred->cap_effective) - -#define current_uid_gid(_uid, _gid) \ -do { \ - *(_uid) = current->cred->uid; \ - *(_gid) = current->cred->gid; \ -} while(0) - -#define current_euid_egid(_uid, _gid) \ -do { \ - *(_uid) = current->cred->euid; \ - *(_gid) = current->cred->egid; \ -} while(0) - -#define current_fsuid_fsgid(_uid, _gid) \ -do { \ - *(_uid) = current->cred->fsuid; \ - *(_gid) = current->cred->fsgid; \ -} while(0) - extern void __put_cred(struct cred *); extern int copy_creds(struct task_struct *, unsigned long); @@ -187,4 +153,137 @@ static inline void put_cred(struct cred *cred) __put_cred(cred); } +/** + * current_cred - Access the current task's credentials + * + * Access the credentials of the current task. + */ +#define current_cred() \ + (current->cred) + +/** + * __task_cred - Access another task's credentials + * @task: The task to query + * + * Access the credentials of another task. The caller must hold the + * RCU readlock. + * + * The caller must make sure task doesn't go away, either by holding a ref on + * task or by holding tasklist_lock to prevent it from being unlinked. + */ +#define __task_cred(task) \ + ((const struct cred *)(rcu_dereference((task)->cred))) + +/** + * get_task_cred - Get another task's credentials + * @task: The task to query + * + * Get the credentials of a task, pinning them so that they can't go away. + * Accessing a task's credentials directly is not permitted. + * + * The caller must make sure task doesn't go away, either by holding a ref on + * task or by holding tasklist_lock to prevent it from being unlinked. + */ +#define get_task_cred(task) \ +({ \ + struct cred *__cred; \ + rcu_read_lock(); \ + __cred = (struct cred *) __task_cred((task)); \ + get_cred(__cred); \ + rcu_read_unlock(); \ + __cred; \ +}) + +/** + * get_current_cred - Get the current task's credentials + * + * Get the credentials of the current task, pinning them so that they can't go + * away. Accessing the current task's credentials directly is not permitted. + */ +#define get_current_cred() \ + (get_cred(current_cred())) + +/** + * get_current_user - Get the current task's user_struct + * + * Get the user record of the current task, pinning it so that it can't go + * away. + */ +#define get_current_user() \ +({ \ + struct user_struct *__u; \ + struct cred *__cred; \ + __cred = (struct cred *) current_cred(); \ + __u = get_uid(__cred->user); \ + __u; \ +}) + +/** + * get_current_groups - Get the current task's supplementary group list + * + * Get the supplementary group list of the current task, pinning it so that it + * can't go away. + */ +#define get_current_groups() \ +({ \ + struct group_info *__groups; \ + struct cred *__cred; \ + __cred = (struct cred *) current_cred(); \ + __groups = get_group_info(__cred->group_info); \ + __groups; \ +}) + +#define task_cred_xxx(task, xxx) \ +({ \ + __typeof__(task->cred->xxx) ___val; \ + rcu_read_lock(); \ + ___val = __task_cred((task))->xxx; \ + rcu_read_unlock(); \ + ___val; \ +}) + +#define task_uid(task) (task_cred_xxx((task), uid)) +#define task_euid(task) (task_cred_xxx((task), euid)) + +#define current_cred_xxx(xxx) \ +({ \ + current->cred->xxx; \ +}) + +#define current_uid() (current_cred_xxx(uid)) +#define current_gid() (current_cred_xxx(gid)) +#define current_euid() (current_cred_xxx(euid)) +#define current_egid() (current_cred_xxx(egid)) +#define current_suid() (current_cred_xxx(suid)) +#define current_sgid() (current_cred_xxx(sgid)) +#define current_fsuid() (current_cred_xxx(fsuid)) +#define current_fsgid() (current_cred_xxx(fsgid)) +#define current_cap() (current_cred_xxx(cap_effective)) +#define current_user() (current_cred_xxx(user)) +#define current_security() (current_cred_xxx(security)) + +#define current_uid_gid(_uid, _gid) \ +do { \ + const struct cred *__cred; \ + __cred = current_cred(); \ + *(_uid) = __cred->uid; \ + *(_gid) = __cred->gid; \ +} while(0) + +#define current_euid_egid(_euid, _egid) \ +do { \ + const struct cred *__cred; \ + __cred = current_cred(); \ + *(_euid) = __cred->euid; \ + *(_egid) = __cred->egid; \ +} while(0) + +#define current_fsuid_fsgid(_fsuid, _fsgid) \ +do { \ + const struct cred *__cred; \ + __cred = current_cred(); \ + *(_fsuid) = __cred->fsuid; \ + *(_fsgid) = __cred->fsgid; \ +} while(0) + #endif /* _LINUX_CRED_H */ diff --git a/include/linux/securebits.h b/include/linux/securebits.h index 6d389491bfa..d2c5ed845bc 100644 --- a/include/linux/securebits.h +++ b/include/linux/securebits.h @@ -32,7 +32,7 @@ setting is locked or not. A setting which is locked cannot be changed from user-level. */ #define issecure_mask(X) (1 << (X)) -#define issecure(X) (issecure_mask(X) & current->cred->securebits) +#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits)) #define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \ issecure_mask(SECURE_NO_SETUID_FIXUP) | \ diff --git a/ipc/mqueue.c b/ipc/mqueue.c index e1885b494ba..1151881ccb9 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -112,6 +112,7 @@ static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) static struct inode *mqueue_get_inode(struct super_block *sb, int mode, struct mq_attr *attr) { + struct user_struct *u = current_user(); struct inode *inode; inode = new_inode(sb); @@ -126,7 +127,6 @@ static struct inode *mqueue_get_inode(struct super_block *sb, int mode, if (S_ISREG(mode)) { struct mqueue_inode_info *info; struct task_struct *p = current; - struct user_struct *u = p->cred->user; unsigned long mq_bytes, mq_msg_tblsz; inode->i_fop = &mqueue_file_operations; diff --git a/ipc/shm.c b/ipc/shm.c index 264a9d33c5d..38a055758a9 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -366,7 +366,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) if (shmflg & SHM_HUGETLB) { /* hugetlb_file_setup takes care of mlock user accounting */ file = hugetlb_file_setup(name, size); - shp->mlock_user = current->cred->user; + shp->mlock_user = current_user(); } else { int acctflag = VM_ACCOUNT; /* @@ -767,7 +767,7 @@ asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf) goto out_unlock; if(cmd==SHM_LOCK) { - struct user_struct *user = current->cred->user; + struct user_struct *user = current_user(); if (!is_file_hugepages(shp->shm_file)) { err = shmem_lock(shp->shm_file, 1, user); if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){ diff --git a/kernel/sys.c b/kernel/sys.c index 5d81f07c015..c4d6b59553e 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -143,6 +143,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) { struct task_struct *g, *p; struct user_struct *user; + const struct cred *cred = current_cred(); int error = -EINVAL; struct pid *pgrp; @@ -176,18 +177,18 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); break; case PRIO_USER: - user = current->cred->user; + user = cred->user; if (!who) - who = current_uid(); - else - if (who != current_uid() && !(user = find_user(who))) - goto out_unlock; /* No processes for this user */ + who = cred->uid; + else if ((who != cred->uid) && + !(user = find_user(who))) + goto out_unlock; /* No processes for this user */ do_each_thread(g, p) - if (p->cred->uid == who) + if (__task_cred(p)->uid == who) error = set_one_prio(p, niceval, error); while_each_thread(g, p); - if (who != current_uid()) + if (who != cred->uid) free_uid(user); /* For find_user() */ break; } @@ -207,6 +208,7 @@ asmlinkage long sys_getpriority(int which, int who) { struct task_struct *g, *p; struct user_struct *user; + const struct cred *cred = current_cred(); long niceval, retval = -ESRCH; struct pid *pgrp; @@ -238,21 +240,21 @@ asmlinkage long sys_getpriority(int which, int who) } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); break; case PRIO_USER: - user = current->cred->user; + user = (struct user_struct *) cred->user; if (!who) - who = current_uid(); - else - if (who != current_uid() && !(user = find_user(who))) - goto out_unlock; /* No processes for this user */ + who = cred->uid; + else if ((who != cred->uid) && + !(user = find_user(who))) + goto out_unlock; /* No processes for this user */ do_each_thread(g, p) - if (p->cred->uid == who) { + if (__task_cred(p)->uid == who) { niceval = 20 - task_nice(p); if (niceval > retval) retval = niceval; } while_each_thread(g, p); - if (who != current_uid()) + if (who != cred->uid) free_uid(user); /* for find_user() */ break; } @@ -743,11 +745,11 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) { - struct cred *cred = current->cred; + const struct cred *cred = current_cred(); int retval; - if (!(retval = put_user(cred->uid, ruid)) && - !(retval = put_user(cred->euid, euid))) + if (!(retval = put_user(cred->uid, ruid)) && + !(retval = put_user(cred->euid, euid))) retval = put_user(cred->suid, suid); return retval; @@ -796,11 +798,11 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) { - struct cred *cred = current->cred; + const struct cred *cred = current_cred(); int retval; - if (!(retval = put_user(cred->gid, rgid)) && - !(retval = put_user(cred->egid, egid))) + if (!(retval = put_user(cred->gid, rgid)) && + !(retval = put_user(cred->egid, egid))) retval = put_user(cred->sgid, sgid); return retval; @@ -1199,7 +1201,7 @@ static void groups_sort(struct group_info *group_info) } /* a simple bsearch */ -int groups_search(struct group_info *group_info, gid_t grp) +int groups_search(const struct group_info *group_info, gid_t grp) { unsigned int left, right; @@ -1268,13 +1270,8 @@ EXPORT_SYMBOL(set_current_groups); asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) { - struct cred *cred = current->cred; - int i = 0; - - /* - * SMP: Nobody else can change our grouplist. Thus we are - * safe. - */ + const struct cred *cred = current_cred(); + int i; if (gidsetsize < 0) return -EINVAL; @@ -1330,8 +1327,9 @@ asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist) */ int in_group_p(gid_t grp) { - struct cred *cred = current->cred; + const struct cred *cred = current_cred(); int retval = 1; + if (grp != cred->fsgid) retval = groups_search(cred->group_info, grp); return retval; @@ -1341,8 +1339,9 @@ EXPORT_SYMBOL(in_group_p); int in_egroup_p(gid_t grp) { - struct cred *cred = current->cred; + const struct cred *cred = current_cred(); int retval = 1; + if (grp != cred->egid) retval = groups_search(cred->group_info, grp); return retval; diff --git a/kernel/uid16.c b/kernel/uid16.c index 71f07fc39fe..2460c3199b5 100644 --- a/kernel/uid16.c +++ b/kernel/uid16.c @@ -84,11 +84,12 @@ asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid) asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid) { + const struct cred *cred = current_cred(); int retval; - if (!(retval = put_user(high2lowuid(current->cred->uid), ruid)) && - !(retval = put_user(high2lowuid(current->cred->euid), euid))) - retval = put_user(high2lowuid(current->cred->suid), suid); + if (!(retval = put_user(high2lowuid(cred->uid), ruid)) && + !(retval = put_user(high2lowuid(cred->euid), euid))) + retval = put_user(high2lowuid(cred->suid), suid); return retval; } @@ -104,11 +105,12 @@ asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid) asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid) { + const struct cred *cred = current_cred(); int retval; - if (!(retval = put_user(high2lowgid(current->cred->gid), rgid)) && - !(retval = put_user(high2lowgid(current->cred->egid), egid))) - retval = put_user(high2lowgid(current->cred->sgid), sgid); + if (!(retval = put_user(high2lowgid(cred->gid), rgid)) && + !(retval = put_user(high2lowgid(cred->egid), egid))) + retval = put_user(high2lowgid(cred->sgid), sgid); return retval; } @@ -161,25 +163,24 @@ static int groups16_from_user(struct group_info *group_info, asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist) { - int i = 0; + const struct cred *cred = current_cred(); + int i; if (gidsetsize < 0) return -EINVAL; - get_group_info(current->cred->group_info); - i = current->cred->group_info->ngroups; + i = cred->group_info->ngroups; if (gidsetsize) { if (i > gidsetsize) { i = -EINVAL; goto out; } - if (groups16_to_user(grouplist, current->cred->group_info)) { + if (groups16_to_user(grouplist, cred->group_info)) { i = -EFAULT; goto out; } } out: - put_group_info(current->cred->group_info); return i; } @@ -210,20 +211,20 @@ asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist) asmlinkage long sys_getuid16(void) { - return high2lowuid(current->cred->uid); + return high2lowuid(current_uid()); } asmlinkage long sys_geteuid16(void) { - return high2lowuid(current->cred->euid); + return high2lowuid(current_euid()); } asmlinkage long sys_getgid16(void) { - return high2lowgid(current->cred->gid); + return high2lowgid(current_gid()); } asmlinkage long sys_getegid16(void) { - return high2lowgid(current->cred->egid); + return high2lowgid(current_egid()); } diff --git a/net/core/scm.c b/net/core/scm.c index c28ca32a7d9..f73c44b17dd 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -44,7 +44,7 @@ static __inline__ int scm_check_creds(struct ucred *creds) { - struct cred *cred = current->cred; + const struct cred *cred = current_cred(); if ((creds->pid == task_tgid_vnr(current) || capable(CAP_SYS_ADMIN)) && ((creds->uid == cred->uid || creds->uid == cred->euid || diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index c7954321260..0443f834945 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -350,16 +350,18 @@ EXPORT_SYMBOL_GPL(rpcauth_lookup_credcache); struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *auth, int flags) { - struct auth_cred acred = { - .uid = current_fsuid(), - .gid = current_fsgid(), - .group_info = current->cred->group_info, - }; + struct auth_cred acred; struct rpc_cred *ret; + const struct cred *cred = current_cred(); dprintk("RPC: looking up %s cred\n", auth->au_ops->au_name); - get_group_info(acred.group_info); + + memset(&acred, 0, sizeof(acred)); + acred.uid = cred->fsuid; + acred.gid = cred->fsgid; + acred.group_info = get_group_info(((struct cred *)cred)->group_info); + ret = auth->au_ops->lookup_cred(auth, &acred, flags); put_group_info(acred.group_info); return ret; diff --git a/security/commoncap.c b/security/commoncap.c index fa61679f8c7..61307f59000 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -641,7 +641,7 @@ int cap_task_setnice (struct task_struct *p, int nice) int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5, long *rc_p) { - struct cred *cred = current->cred; + struct cred *cred = current_cred(); long error = 0; switch (option) { diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index b0904cdda2e..ce8ac6073d5 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -582,7 +582,7 @@ key_ref_t lookup_user_key(key_serial_t id, int create, int partial, { struct request_key_auth *rka; struct task_struct *t = current; - struct cred *cred = t->cred; + struct cred *cred = current_cred(); struct key *key; key_ref_t key_ref, skey_ref; int ret; diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 3e9b9eb1dd2..0488b0af5bd 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -67,6 +67,7 @@ static int call_sbin_request_key(struct key_construction *cons, void *aux) { struct task_struct *tsk = current; + const struct cred *cred = current_cred(); key_serial_t prkey, sskey; struct key *key = cons->key, *authkey = cons->authkey, *keyring; char *argv[9], *envp[3], uid_str[12], gid_str[12]; @@ -96,16 +97,16 @@ static int call_sbin_request_key(struct key_construction *cons, goto error_link; /* record the UID and GID */ - sprintf(uid_str, "%d", current_fsuid()); - sprintf(gid_str, "%d", current_fsgid()); + sprintf(uid_str, "%d", cred->fsuid); + sprintf(gid_str, "%d", cred->fsgid); /* we say which key is under construction */ sprintf(key_str, "%d", key->serial); /* we specify the process's default keyrings */ sprintf(keyring_str[0], "%d", - tsk->cred->thread_keyring ? - tsk->cred->thread_keyring->serial : 0); + cred->thread_keyring ? + cred->thread_keyring->serial : 0); prkey = 0; if (tsk->signal->process_keyring) @@ -118,7 +119,7 @@ static int call_sbin_request_key(struct key_construction *cons, sskey = rcu_dereference(tsk->signal->session_keyring)->serial; rcu_read_unlock(); } else { - sskey = tsk->cred->user->session_keyring->serial; + sskey = cred->user->session_keyring->serial; } sprintf(keyring_str[2], "%d", sskey); diff --git a/security/selinux/exports.c b/security/selinux/exports.c index cf02490cd1e..c73aeaa008e 100644 --- a/security/selinux/exports.c +++ b/security/selinux/exports.c @@ -39,9 +39,13 @@ EXPORT_SYMBOL_GPL(selinux_string_to_sid); int selinux_secmark_relabel_packet_permission(u32 sid) { if (selinux_enabled) { - struct task_security_struct *tsec = current->cred->security; + const struct task_security_struct *__tsec; + u32 tsid; - return avc_has_perm(tsec->sid, sid, SECCLASS_PACKET, + __tsec = current_security(); + tsid = __tsec->sid; + + return avc_has_perm(tsid, sid, SECCLASS_PACKET, PACKET__RELABELTO, NULL); } return 0; diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index d7db76617b0..c0eb72013d6 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c @@ -197,7 +197,7 @@ static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *uctx, u32 sid) { int rc = 0; - struct task_security_struct *tsec = current->cred->security; + const struct task_security_struct *tsec = current_security(); struct xfrm_sec_ctx *ctx = NULL; char *ctx_str = NULL; u32 str_len; @@ -333,7 +333,7 @@ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx) */ int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) { - struct task_security_struct *tsec = current->cred->security; + const struct task_security_struct *tsec = current_security(); int rc = 0; if (ctx) { @@ -378,7 +378,7 @@ void selinux_xfrm_state_free(struct xfrm_state *x) */ int selinux_xfrm_state_delete(struct xfrm_state *x) { - struct task_security_struct *tsec = current->cred->security; + const struct task_security_struct *tsec = current_security(); struct xfrm_sec_ctx *ctx = x->security; int rc = 0; diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c index b6dd4fc0fb0..247cec3b5a4 100644 --- a/security/smack/smack_access.c +++ b/security/smack/smack_access.c @@ -164,7 +164,7 @@ int smk_curacc(char *obj_label, u32 mode) { int rc; - rc = smk_access(current->cred->security, obj_label, mode); + rc = smk_access(current_security(), obj_label, mode); if (rc == 0) return 0; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index cc837314fb0..e8a4fcb1ad0 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -143,7 +143,7 @@ static int smack_ptrace_traceme(struct task_struct *ptp) static int smack_syslog(int type) { int rc; - char *sp = current->cred->security; + char *sp = current_security(); rc = cap_syslog(type); if (rc != 0) @@ -375,7 +375,7 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags) */ static int smack_inode_alloc_security(struct inode *inode) { - inode->i_security = new_inode_smack(current->cred->security); + inode->i_security = new_inode_smack(current_security()); if (inode->i_security == NULL) return -ENOMEM; return 0; @@ -820,7 +820,7 @@ static int smack_file_permission(struct file *file, int mask) */ static int smack_file_alloc_security(struct file *file) { - file->f_security = current->cred->security; + file->f_security = current_security(); return 0; } @@ -918,7 +918,7 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd, */ static int smack_file_set_fowner(struct file *file) { - file->f_security = current->cred->security; + file->f_security = current_security(); return 0; } @@ -986,8 +986,7 @@ static int smack_file_receive(struct file *file) */ static int smack_cred_alloc_security(struct cred *cred) { - cred->security = current->cred->security; - + cred->security = current_security(); return 0; } @@ -1225,7 +1224,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode) */ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags) { - char *csp = current->cred->security; + char *csp = current_security(); struct socket_smack *ssp; ssp = kzalloc(sizeof(struct socket_smack), gfp_flags); @@ -1450,7 +1449,7 @@ static int smack_flags_to_may(int flags) */ static int smack_msg_msg_alloc_security(struct msg_msg *msg) { - msg->security = current->cred->security; + msg->security = current_security(); return 0; } @@ -1486,7 +1485,7 @@ static int smack_shm_alloc_security(struct shmid_kernel *shp) { struct kern_ipc_perm *isp = &shp->shm_perm; - isp->security = current->cred->security; + isp->security = current_security(); return 0; } @@ -1595,7 +1594,7 @@ static int smack_sem_alloc_security(struct sem_array *sma) { struct kern_ipc_perm *isp = &sma->sem_perm; - isp->security = current->cred->security; + isp->security = current_security(); return 0; } @@ -1699,7 +1698,7 @@ static int smack_msg_queue_alloc_security(struct msg_queue *msq) { struct kern_ipc_perm *kisp = &msq->q_perm; - kisp->security = current->cred->security; + kisp->security = current_security(); return 0; } @@ -1854,7 +1853,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) struct super_block *sbp; struct superblock_smack *sbsp; struct inode_smack *isp; - char *csp = current->cred->security; + char *csp = current_security(); char *fetched; char *final; struct dentry *dp; @@ -2290,8 +2289,7 @@ static void smack_sock_graft(struct sock *sk, struct socket *parent) return; ssp = sk->sk_security; - ssp->smk_in = current->cred->security; - ssp->smk_out = current->cred->security; + ssp->smk_in = ssp->smk_out = current_security(); ssp->smk_packet[0] = '\0'; rc = smack_netlabel(sk); diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index c5ca279e050..ca257dfdc75 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -336,7 +336,7 @@ static void smk_cipso_doi(void) audit_info.loginuid = audit_get_loginuid(current); audit_info.sessionid = audit_get_sessionid(current); - audit_info.secid = smack_to_secid(current->cred->security); + audit_info.secid = smack_to_secid(current_security()); rc = netlbl_cfg_map_del(NULL, &audit_info); if (rc != 0) @@ -371,7 +371,7 @@ static void smk_unlbl_ambient(char *oldambient) audit_info.loginuid = audit_get_loginuid(current); audit_info.sessionid = audit_get_sessionid(current); - audit_info.secid = smack_to_secid(current->cred->security); + audit_info.secid = smack_to_secid(current_security()); if (oldambient != NULL) { rc = netlbl_cfg_map_del(oldambient, &audit_info); -- cgit v1.2.3-70-g09d2 From c69e8d9c01db2adc503464993c358901c9af9de4 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:19 +1100 Subject: CRED: Use RCU to access another task's creds and to release a task's own creds Use RCU to access another task's creds and to release a task's own creds. This means that it will be possible for the credentials of a task to be replaced without another task (a) requiring a full lock to read them, and (b) seeing deallocated memory. Signed-off-by: David Howells Acked-by: James Morris Acked-by: Serge Hallyn Signed-off-by: James Morris --- arch/ia64/kernel/perfmon.c | 32 +++++++++++++--------- drivers/connector/cn_proc.c | 16 +++++++---- fs/binfmt_elf.c | 8 ++++-- fs/binfmt_elf_fdpic.c | 8 ++++-- fs/fcntl.c | 15 ++++++++--- fs/fuse/dir.c | 23 ++++++++++------ fs/ioprio.c | 14 +++++++--- fs/proc/array.c | 32 ++++++++++++++-------- fs/proc/base.c | 32 ++++++++++++++++------ include/linux/cred.h | 3 ++- kernel/auditsc.c | 33 +++++++++++++---------- kernel/cgroup.c | 16 +++++------ kernel/exit.c | 14 ++++++---- kernel/futex.c | 22 +++++++++------ kernel/futex_compat.c | 7 ++--- kernel/ptrace.c | 22 ++++++++------- kernel/sched.c | 31 ++++++++++++++------- kernel/signal.c | 49 ++++++++++++++++++++------------- kernel/sys.c | 11 +++++--- kernel/tsacct.c | 6 +++-- mm/mempolicy.c | 8 +++--- mm/migrate.c | 8 +++--- mm/oom_kill.c | 6 ++--- security/commoncap.c | 64 +++++++++++++++++++++++++++----------------- security/keys/permission.c | 10 ++++--- security/keys/process_keys.c | 24 ++++++++++------- security/selinux/selinuxfs.c | 13 ++++++--- security/smack/smack_lsm.c | 32 +++++++++++----------- 28 files changed, 355 insertions(+), 204 deletions(-) (limited to 'fs') diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index dd38db46a77..0e499757309 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -2399,25 +2399,33 @@ error_kmem: static int pfm_bad_permissions(struct task_struct *task) { + const struct cred *tcred; uid_t uid = current_uid(); gid_t gid = current_gid(); + int ret; + + rcu_read_lock(); + tcred = __task_cred(task); /* inspired by ptrace_attach() */ DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n", uid, gid, - task->euid, - task->suid, - task->uid, - task->egid, - task->sgid)); - - return (uid != task->euid) - || (uid != task->suid) - || (uid != task->uid) - || (gid != task->egid) - || (gid != task->sgid) - || (gid != task->gid)) && !capable(CAP_SYS_PTRACE); + tcred->euid, + tcred->suid, + tcred->uid, + tcred->egid, + tcred->sgid)); + + ret = ((uid != tcred->euid) + || (uid != tcred->suid) + || (uid != tcred->uid) + || (gid != tcred->egid) + || (gid != tcred->sgid) + || (gid != tcred->gid)) && !capable(CAP_SYS_PTRACE); + + rcu_read_unlock(); + return ret; } static int diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 354c1ff1715..c5afc98e267 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -106,6 +106,7 @@ void proc_id_connector(struct task_struct *task, int which_id) struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE]; struct timespec ts; + const struct cred *cred; if (atomic_read(&proc_event_num_listeners) < 1) return; @@ -115,14 +116,19 @@ void proc_id_connector(struct task_struct *task, int which_id) ev->what = which_id; ev->event_data.id.process_pid = task->pid; ev->event_data.id.process_tgid = task->tgid; + rcu_read_lock(); + cred = __task_cred(task); if (which_id == PROC_EVENT_UID) { - ev->event_data.id.r.ruid = task->cred->uid; - ev->event_data.id.e.euid = task->cred->euid; + ev->event_data.id.r.ruid = cred->uid; + ev->event_data.id.e.euid = cred->euid; } else if (which_id == PROC_EVENT_GID) { - ev->event_data.id.r.rgid = task->cred->gid; - ev->event_data.id.e.egid = task->cred->egid; - } else + ev->event_data.id.r.rgid = cred->gid; + ev->event_data.id.e.egid = cred->egid; + } else { + rcu_read_unlock(); return; + } + rcu_read_unlock(); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 0e665561316..9142ff5dc8e 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1361,6 +1361,7 @@ static void fill_prstatus(struct elf_prstatus *prstatus, static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, struct mm_struct *mm) { + const struct cred *cred; unsigned int i, len; /* first copy the parameters from user space */ @@ -1388,8 +1389,11 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, psinfo->pr_zomb = psinfo->pr_sname == 'Z'; psinfo->pr_nice = task_nice(p); psinfo->pr_flag = p->flags; - SET_UID(psinfo->pr_uid, p->cred->uid); - SET_GID(psinfo->pr_gid, p->cred->gid); + rcu_read_lock(); + cred = __task_cred(p); + SET_UID(psinfo->pr_uid, cred->uid); + SET_GID(psinfo->pr_gid, cred->gid); + rcu_read_unlock(); strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname)); return 0; diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 1f6e8c023b4..45dabd59936 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1414,6 +1414,7 @@ static void fill_prstatus(struct elf_prstatus *prstatus, static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, struct mm_struct *mm) { + const struct cred *cred; unsigned int i, len; /* first copy the parameters from user space */ @@ -1441,8 +1442,11 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, psinfo->pr_zomb = psinfo->pr_sname == 'Z'; psinfo->pr_nice = task_nice(p); psinfo->pr_flag = p->flags; - SET_UID(psinfo->pr_uid, p->cred->uid); - SET_GID(psinfo->pr_gid, p->cred->gid); + rcu_read_lock(); + cred = __task_cred(p); + SET_UID(psinfo->pr_uid, cred->uid); + SET_GID(psinfo->pr_gid, cred->gid); + rcu_read_unlock(); strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname)); return 0; diff --git a/fs/fcntl.c b/fs/fcntl.c index c594cc0e40f..87c39f1f081 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -401,10 +401,17 @@ static const long band_table[NSIGPOLL] = { static inline int sigio_perm(struct task_struct *p, struct fown_struct *fown, int sig) { - return (((fown->euid == 0) || - (fown->euid == p->cred->suid) || (fown->euid == p->cred->uid) || - (fown->uid == p->cred->suid) || (fown->uid == p->cred->uid)) && - !security_file_send_sigiotask(p, fown, sig)); + const struct cred *cred; + int ret; + + rcu_read_lock(); + cred = __task_cred(p); + ret = ((fown->euid == 0 || + fown->euid == cred->suid || fown->euid == cred->uid || + fown->uid == cred->suid || fown->uid == cred->uid) && + !security_file_send_sigiotask(p, fown, sig)); + rcu_read_unlock(); + return ret; } static void send_sigio_to_task(struct task_struct *p, diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index e97a9898186..95bc22bdd06 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -869,18 +869,25 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat, */ int fuse_allow_task(struct fuse_conn *fc, struct task_struct *task) { + const struct cred *cred; + int ret; + if (fc->flags & FUSE_ALLOW_OTHER) return 1; - if (task->cred->euid == fc->user_id && - task->cred->suid == fc->user_id && - task->cred->uid == fc->user_id && - task->cred->egid == fc->group_id && - task->cred->sgid == fc->group_id && - task->cred->gid == fc->group_id) - return 1; + rcu_read_lock(); + ret = 0; + cred = __task_cred(task); + if (cred->euid == fc->user_id && + cred->suid == fc->user_id && + cred->uid == fc->user_id && + cred->egid == fc->group_id && + cred->sgid == fc->group_id && + cred->gid == fc->group_id) + ret = 1; + rcu_read_unlock(); - return 0; + return ret; } static int fuse_access(struct inode *inode, int mask) diff --git a/fs/ioprio.c b/fs/ioprio.c index 5112554fd21..3569e0ad86a 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c @@ -31,10 +31,16 @@ static int set_task_ioprio(struct task_struct *task, int ioprio) { int err; struct io_context *ioc; + const struct cred *cred = current_cred(), *tcred; - if (task->cred->uid != current_euid() && - task->cred->uid != current_uid() && !capable(CAP_SYS_NICE)) + rcu_read_lock(); + tcred = __task_cred(task); + if (tcred->uid != cred->euid && + tcred->uid != cred->uid && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); return -EPERM; + } + rcu_read_unlock(); err = security_task_setioprio(task, ioprio); if (err) @@ -131,7 +137,7 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio) break; do_each_thread(g, p) { - if (p->cred->uid != who) + if (__task_cred(p)->uid != who) continue; ret = set_task_ioprio(p, ioprio); if (ret) @@ -224,7 +230,7 @@ asmlinkage long sys_ioprio_get(int which, int who) break; do_each_thread(g, p) { - if (p->cred->uid != user->uid) + if (__task_cred(p)->uid != user->uid) continue; tmpio = get_task_ioprio(p); if (tmpio < 0) diff --git a/fs/proc/array.c b/fs/proc/array.c index 62fe9b2009b..7e4877d9dcb 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -159,6 +159,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, struct group_info *group_info; int g; struct fdtable *fdt = NULL; + const struct cred *cred; pid_t ppid, tpid; rcu_read_lock(); @@ -170,6 +171,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, if (tracer) tpid = task_pid_nr_ns(tracer, ns); } + cred = get_cred((struct cred *) __task_cred(p)); seq_printf(m, "State:\t%s\n" "Tgid:\t%d\n" @@ -182,8 +184,8 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, task_tgid_nr_ns(p, ns), pid_nr_ns(pid, ns), ppid, tpid, - p->cred->uid, p->cred->euid, p->cred->suid, p->cred->fsuid, - p->cred->gid, p->cred->egid, p->cred->sgid, p->cred->fsgid); + cred->uid, cred->euid, cred->suid, cred->fsuid, + cred->gid, cred->egid, cred->sgid, cred->fsgid); task_lock(p); if (p->files) @@ -194,13 +196,12 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, fdt ? fdt->max_fds : 0); rcu_read_unlock(); - group_info = p->cred->group_info; - get_group_info(group_info); + group_info = cred->group_info; task_unlock(p); for (g = 0; g < min(group_info->ngroups, NGROUPS_SMALL); g++) seq_printf(m, "%d ", GROUP_AT(group_info, g)); - put_group_info(group_info); + put_cred(cred); seq_printf(m, "\n"); } @@ -262,7 +263,7 @@ static inline void task_sig(struct seq_file *m, struct task_struct *p) blocked = p->blocked; collect_sigign_sigcatch(p, &ignored, &caught); num_threads = atomic_read(&p->signal->count); - qsize = atomic_read(&p->cred->user->sigpending); + qsize = atomic_read(&__task_cred(p)->user->sigpending); qlim = p->signal->rlim[RLIMIT_SIGPENDING].rlim_cur; unlock_task_sighand(p, &flags); } @@ -293,12 +294,21 @@ static void render_cap_t(struct seq_file *m, const char *header, static inline void task_cap(struct seq_file *m, struct task_struct *p) { - struct cred *cred = p->cred; + const struct cred *cred; + kernel_cap_t cap_inheritable, cap_permitted, cap_effective, cap_bset; - render_cap_t(m, "CapInh:\t", &cred->cap_inheritable); - render_cap_t(m, "CapPrm:\t", &cred->cap_permitted); - render_cap_t(m, "CapEff:\t", &cred->cap_effective); - render_cap_t(m, "CapBnd:\t", &cred->cap_bset); + rcu_read_lock(); + cred = __task_cred(p); + cap_inheritable = cred->cap_inheritable; + cap_permitted = cred->cap_permitted; + cap_effective = cred->cap_effective; + cap_bset = cred->cap_bset; + rcu_read_unlock(); + + render_cap_t(m, "CapInh:\t", &cap_inheritable); + render_cap_t(m, "CapPrm:\t", &cap_permitted); + render_cap_t(m, "CapEff:\t", &cap_effective); + render_cap_t(m, "CapBnd:\t", &cap_bset); } static inline void task_context_switch_counts(struct seq_file *m, diff --git a/fs/proc/base.c b/fs/proc/base.c index 6862b360c36..cf42c42cbfb 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1406,6 +1406,7 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st { struct inode * inode; struct proc_inode *ei; + const struct cred *cred; /* We need a new inode */ @@ -1428,8 +1429,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st inode->i_uid = 0; inode->i_gid = 0; if (task_dumpable(task)) { - inode->i_uid = task->cred->euid; - inode->i_gid = task->cred->egid; + rcu_read_lock(); + cred = __task_cred(task); + inode->i_uid = cred->euid; + inode->i_gid = cred->egid; + rcu_read_unlock(); } security_task_to_inode(task, inode); @@ -1445,6 +1449,8 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat { struct inode *inode = dentry->d_inode; struct task_struct *task; + const struct cred *cred; + generic_fillattr(inode, stat); rcu_read_lock(); @@ -1454,8 +1460,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat if (task) { if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || task_dumpable(task)) { - stat->uid = task->cred->euid; - stat->gid = task->cred->egid; + cred = __task_cred(task); + stat->uid = cred->euid; + stat->gid = cred->egid; } } rcu_read_unlock(); @@ -1483,11 +1490,16 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd) { struct inode *inode = dentry->d_inode; struct task_struct *task = get_proc_task(inode); + const struct cred *cred; + if (task) { if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || task_dumpable(task)) { - inode->i_uid = task->cred->euid; - inode->i_gid = task->cred->egid; + rcu_read_lock(); + cred = __task_cred(task); + inode->i_uid = cred->euid; + inode->i_gid = cred->egid; + rcu_read_unlock(); } else { inode->i_uid = 0; inode->i_gid = 0; @@ -1649,6 +1661,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) struct task_struct *task = get_proc_task(inode); int fd = proc_fd(inode); struct files_struct *files; + const struct cred *cred; if (task) { files = get_files_struct(task); @@ -1658,8 +1671,11 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd) rcu_read_unlock(); put_files_struct(files); if (task_dumpable(task)) { - inode->i_uid = task->cred->euid; - inode->i_gid = task->cred->egid; + rcu_read_lock(); + cred = __task_cred(task); + inode->i_uid = cred->euid; + inode->i_gid = cred->egid; + rcu_read_unlock(); } else { inode->i_uid = 0; inode->i_gid = 0; diff --git a/include/linux/cred.h b/include/linux/cred.h index 4221ec6000c..166ce4ddba6 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -147,8 +147,9 @@ static inline struct cred *get_cred(struct cred *cred) * Release a reference to a set of credentials, deleting them when the last ref * is released. */ -static inline void put_cred(struct cred *cred) +static inline void put_cred(const struct cred *_cred) { + struct cred *cred = (struct cred *) _cred; if (atomic_dec_and_test(&(cred)->usage)) __put_cred(cred); } diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 2febf5165fa..ae8ef88ade3 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -447,7 +447,7 @@ static int audit_filter_rules(struct task_struct *tsk, struct audit_names *name, enum audit_state *state) { - struct cred *cred = tsk->cred; + const struct cred *cred = get_task_cred(tsk); int i, j, need_sid = 1; u32 sid; @@ -642,8 +642,10 @@ static int audit_filter_rules(struct task_struct *tsk, break; } - if (!result) + if (!result) { + put_cred(cred); return 0; + } } if (rule->filterkey && ctx) ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); @@ -651,6 +653,7 @@ static int audit_filter_rules(struct task_struct *tsk, case AUDIT_NEVER: *state = AUDIT_DISABLED; break; case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; } + put_cred(cred); return 1; } @@ -1229,7 +1232,7 @@ static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name) static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) { - struct cred *cred = tsk->cred; + const struct cred *cred; int i, call_panic = 0; struct audit_buffer *ab; struct audit_aux_data *aux; @@ -1239,13 +1242,14 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts context->pid = tsk->pid; if (!context->ppid) context->ppid = sys_getppid(); - context->uid = cred->uid; - context->gid = cred->gid; - context->euid = cred->euid; - context->suid = cred->suid; + cred = current_cred(); + context->uid = cred->uid; + context->gid = cred->gid; + context->euid = cred->euid; + context->suid = cred->suid; context->fsuid = cred->fsuid; - context->egid = cred->egid; - context->sgid = cred->sgid; + context->egid = cred->egid; + context->sgid = cred->sgid; context->fsgid = cred->fsgid; context->personality = tsk->personality; @@ -2088,7 +2092,7 @@ int audit_set_loginuid(struct task_struct *task, uid_t loginuid) audit_log_format(ab, "login pid=%d uid=%u " "old auid=%u new auid=%u" " old ses=%u new ses=%u", - task->pid, task->cred->uid, + task->pid, task_uid(task), task->loginuid, loginuid, task->sessionid, sessionid); audit_log_end(ab); @@ -2471,7 +2475,7 @@ void __audit_ptrace(struct task_struct *t) context->target_pid = t->pid; context->target_auid = audit_get_loginuid(t); - context->target_uid = t->cred->uid; + context->target_uid = task_uid(t); context->target_sessionid = audit_get_sessionid(t); security_task_getsecid(t, &context->target_sid); memcpy(context->target_comm, t->comm, TASK_COMM_LEN); @@ -2490,6 +2494,7 @@ int __audit_signal_info(int sig, struct task_struct *t) struct audit_aux_data_pids *axp; struct task_struct *tsk = current; struct audit_context *ctx = tsk->audit_context; + uid_t uid = current_uid(), t_uid = task_uid(t); if (audit_pid && t->tgid == audit_pid) { if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { @@ -2497,7 +2502,7 @@ int __audit_signal_info(int sig, struct task_struct *t) if (tsk->loginuid != -1) audit_sig_uid = tsk->loginuid; else - audit_sig_uid = tsk->cred->uid; + audit_sig_uid = uid; security_task_getsecid(tsk, &audit_sig_sid); } if (!audit_signals || audit_dummy_context()) @@ -2509,7 +2514,7 @@ int __audit_signal_info(int sig, struct task_struct *t) if (!ctx->target_pid) { ctx->target_pid = t->tgid; ctx->target_auid = audit_get_loginuid(t); - ctx->target_uid = t->cred->uid; + ctx->target_uid = t_uid; ctx->target_sessionid = audit_get_sessionid(t); security_task_getsecid(t, &ctx->target_sid); memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN); @@ -2530,7 +2535,7 @@ int __audit_signal_info(int sig, struct task_struct *t) axp->target_pid[axp->pid_count] = t->tgid; axp->target_auid[axp->pid_count] = audit_get_loginuid(t); - axp->target_uid[axp->pid_count] = t->cred->uid; + axp->target_uid[axp->pid_count] = t_uid; axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); security_task_getsecid(t, &axp->target_sid[axp->pid_count]); memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN); diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e210526e640..a512a75a556 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1279,7 +1279,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) { struct task_struct *tsk; - uid_t euid; + const struct cred *cred = current_cred(), *tcred; int ret; if (pid) { @@ -1289,16 +1289,16 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) rcu_read_unlock(); return -ESRCH; } - get_task_struct(tsk); - rcu_read_unlock(); - euid = current_euid(); - if (euid && - euid != tsk->cred->uid && - euid != tsk->cred->suid) { - put_task_struct(tsk); + tcred = __task_cred(tsk); + if (cred->euid && + cred->euid != tcred->uid && + cred->euid != tcred->suid) { + rcu_read_unlock(); return -EACCES; } + get_task_struct(tsk); + rcu_read_unlock(); } else { tsk = current; get_task_struct(tsk); diff --git a/kernel/exit.c b/kernel/exit.c index e0f6e1892fb..bbc22530f2c 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -160,7 +160,10 @@ void release_task(struct task_struct * p) int zap_leader; repeat: tracehook_prepare_release_task(p); - atomic_dec(&p->cred->user->processes); + /* don't need to get the RCU readlock here - the process is dead and + * can't be modifying its own credentials */ + atomic_dec(&__task_cred(p)->user->processes); + proc_flush_task(p); write_lock_irq(&tasklist_lock); tracehook_finish_release_task(p); @@ -1267,12 +1270,12 @@ static int wait_task_zombie(struct task_struct *p, int options, unsigned long state; int retval, status, traced; pid_t pid = task_pid_vnr(p); + uid_t uid = __task_cred(p)->uid; if (!likely(options & WEXITED)) return 0; if (unlikely(options & WNOWAIT)) { - uid_t uid = p->cred->uid; int exit_code = p->exit_code; int why, status; @@ -1393,7 +1396,7 @@ static int wait_task_zombie(struct task_struct *p, int options, if (!retval && infop) retval = put_user(pid, &infop->si_pid); if (!retval && infop) - retval = put_user(p->cred->uid, &infop->si_uid); + retval = put_user(uid, &infop->si_uid); if (!retval) retval = pid; @@ -1458,7 +1461,8 @@ static int wait_task_stopped(int ptrace, struct task_struct *p, if (!unlikely(options & WNOWAIT)) p->exit_code = 0; - uid = p->cred->uid; + /* don't need the RCU readlock here as we're holding a spinlock */ + uid = __task_cred(p)->uid; unlock_sig: spin_unlock_irq(&p->sighand->siglock); if (!exit_code) @@ -1532,10 +1536,10 @@ static int wait_task_continued(struct task_struct *p, int options, } if (!unlikely(options & WNOWAIT)) p->signal->flags &= ~SIGNAL_STOP_CONTINUED; + uid = __task_cred(p)->uid; spin_unlock_irq(&p->sighand->siglock); pid = task_pid_vnr(p); - uid = p->cred->uid; get_task_struct(p); read_unlock(&tasklist_lock); diff --git a/kernel/futex.c b/kernel/futex.c index 28421d8210b..4fe790e89d0 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -439,15 +439,20 @@ static void free_pi_state(struct futex_pi_state *pi_state) static struct task_struct * futex_find_get_task(pid_t pid) { struct task_struct *p; - uid_t euid = current_euid(); + const struct cred *cred = current_cred(), *pcred; rcu_read_lock(); p = find_task_by_vpid(pid); - if (!p || (euid != p->cred->euid && - euid != p->cred->uid)) + if (!p) { p = ERR_PTR(-ESRCH); - else - get_task_struct(p); + } else { + pcred = __task_cred(p); + if (cred->euid != pcred->euid && + cred->euid != pcred->uid) + p = ERR_PTR(-ESRCH); + else + get_task_struct(p); + } rcu_read_unlock(); @@ -1831,7 +1836,7 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, { struct robust_list_head __user *head; unsigned long ret; - uid_t euid = current_euid(); + const struct cred *cred = current_cred(), *pcred; if (!futex_cmpxchg_enabled) return -ENOSYS; @@ -1847,8 +1852,9 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, if (!p) goto err_unlock; ret = -EPERM; - if (euid != p->cred->euid && - euid != p->cred->uid && + pcred = __task_cred(p); + if (cred->euid != pcred->euid && + cred->euid != pcred->uid && !capable(CAP_SYS_PTRACE)) goto err_unlock; head = p->robust_list; diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 2c3fd5ed34f..d607a5b9ee2 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -135,7 +135,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, { struct compat_robust_list_head __user *head; unsigned long ret; - uid_t euid = current_euid(); + const struct cred *cred = current_cred(), *pcred; if (!futex_cmpxchg_enabled) return -ENOSYS; @@ -151,8 +151,9 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, if (!p) goto err_unlock; ret = -EPERM; - if (euid != p->cred->euid && - euid != p->cred->uid && + pcred = __task_cred(p); + if (cred->euid != pcred->euid && + cred->euid != pcred->uid && !capable(CAP_SYS_PTRACE)) goto err_unlock; head = p->compat_robust_list; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 49849d12dd1..b9d5f4e4f6a 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -115,7 +115,7 @@ int ptrace_check_attach(struct task_struct *child, int kill) int __ptrace_may_access(struct task_struct *task, unsigned int mode) { - struct cred *cred = current->cred, *tcred = task->cred; + const struct cred *cred = current_cred(), *tcred; /* May we inspect the given task? * This check is used both for attaching with ptrace @@ -125,19 +125,23 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) * because setting up the necessary parent/child relationship * or halting the specified task is impossible. */ - uid_t uid = cred->uid; - gid_t gid = cred->gid; int dumpable = 0; /* Don't let security modules deny introspection */ if (task == current) return 0; - if ((uid != tcred->euid || - uid != tcred->suid || - uid != tcred->uid || - gid != tcred->egid || - gid != tcred->sgid || - gid != tcred->gid) && !capable(CAP_SYS_PTRACE)) + rcu_read_lock(); + tcred = __task_cred(task); + if ((cred->uid != tcred->euid || + cred->uid != tcred->suid || + cred->uid != tcred->uid || + cred->gid != tcred->egid || + cred->gid != tcred->sgid || + cred->gid != tcred->gid) && + !capable(CAP_SYS_PTRACE)) { + rcu_read_unlock(); return -EPERM; + } + rcu_read_unlock(); smp_rmb(); if (task->mm) dumpable = get_dumpable(task->mm); diff --git a/kernel/sched.c b/kernel/sched.c index 733c59e645a..92992e287b1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -345,7 +345,9 @@ static inline struct task_group *task_group(struct task_struct *p) struct task_group *tg; #ifdef CONFIG_USER_SCHED - tg = p->cred->user->tg; + rcu_read_lock(); + tg = __task_cred(p)->user->tg; + rcu_read_unlock(); #elif defined(CONFIG_CGROUP_SCHED) tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), struct task_group, css); @@ -5121,6 +5123,22 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) set_load_weight(p); } +/* + * check the target process has a UID that matches the current process's + */ +static bool check_same_owner(struct task_struct *p) +{ + const struct cred *cred = current_cred(), *pcred; + bool match; + + rcu_read_lock(); + pcred = __task_cred(p); + match = (cred->euid == pcred->euid || + cred->euid == pcred->uid); + rcu_read_unlock(); + return match; +} + static int __sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param, bool user) { @@ -5128,7 +5146,6 @@ static int __sched_setscheduler(struct task_struct *p, int policy, unsigned long flags; const struct sched_class *prev_class = p->sched_class; struct rq *rq; - uid_t euid; /* may grab non-irq protected spin_locks */ BUG_ON(in_interrupt()); @@ -5181,9 +5198,7 @@ recheck: return -EPERM; /* can't change other user's priorities */ - euid = current_euid(); - if (euid != p->cred->euid && - euid != p->cred->uid) + if (!check_same_owner(p)) return -EPERM; } @@ -5394,7 +5409,6 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) cpumask_t cpus_allowed; cpumask_t new_mask = *in_mask; struct task_struct *p; - uid_t euid; int retval; get_online_cpus(); @@ -5415,11 +5429,8 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) get_task_struct(p); read_unlock(&tasklist_lock); - euid = current_euid(); retval = -EPERM; - if (euid != p->cred->euid && - euid != p->cred->uid && - !capable(CAP_SYS_NICE)) + if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) goto out_unlock; retval = security_task_setscheduler(p, 0, NULL); diff --git a/kernel/signal.c b/kernel/signal.c index 80e8a6489f9..84989124baf 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -177,6 +177,11 @@ int next_signal(struct sigpending *pending, sigset_t *mask) return sig; } +/* + * allocate a new signal queue record + * - this may be called without locks if and only if t == current, otherwise an + * appopriate lock must be held to protect t's user_struct + */ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, int override_rlimit) { @@ -184,11 +189,12 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, struct user_struct *user; /* - * In order to avoid problems with "switch_user()", we want to make - * sure that the compiler doesn't re-load "t->user" + * We won't get problems with the target's UID changing under us + * because changing it requires RCU be used, and if t != current, the + * caller must be holding the RCU readlock (by way of a spinlock) and + * we use RCU protection here */ - user = t->cred->user; - barrier(); + user = __task_cred(t)->user; atomic_inc(&user->sigpending); if (override_rlimit || atomic_read(&user->sigpending) <= @@ -562,12 +568,13 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s) /* * Bad permissions for sending the signal + * - the caller must hold at least the RCU read lock */ static int check_kill_permission(int sig, struct siginfo *info, struct task_struct *t) { + const struct cred *cred = current_cred(), *tcred; struct pid *sid; - uid_t uid, euid; int error; if (!valid_signal(sig)) @@ -580,10 +587,11 @@ static int check_kill_permission(int sig, struct siginfo *info, if (error) return error; - uid = current_uid(); - euid = current_euid(); - if ((euid ^ t->cred->suid) && (euid ^ t->cred->uid) && - (uid ^ t->cred->suid) && (uid ^ t->cred->uid) && + tcred = __task_cred(t); + if ((cred->euid ^ tcred->suid) && + (cred->euid ^ tcred->uid) && + (cred->uid ^ tcred->suid) && + (cred->uid ^ tcred->uid) && !capable(CAP_KILL)) { switch (sig) { case SIGCONT: @@ -1011,6 +1019,10 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long return sighand; } +/* + * send signal info to all the members of a group + * - the caller must hold the RCU read lock at least + */ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { unsigned long flags; @@ -1032,8 +1044,8 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) /* * __kill_pgrp_info() sends a signal to a process group: this is what the tty * control characters do (^C, ^Z etc) + * - the caller must hold at least a readlock on tasklist_lock */ - int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) { struct task_struct *p = NULL; @@ -1089,6 +1101,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, { int ret = -EINVAL; struct task_struct *p; + const struct cred *pcred; if (!valid_signal(sig)) return ret; @@ -1099,9 +1112,11 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, ret = -ESRCH; goto out_unlock; } - if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) - && (euid != p->cred->suid) && (euid != p->cred->uid) - && (uid != p->cred->suid) && (uid != p->cred->uid)) { + pcred = __task_cred(p); + if ((info == SEND_SIG_NOINFO || + (!is_si_special(info) && SI_FROMUSER(info))) && + euid != pcred->suid && euid != pcred->uid && + uid != pcred->suid && uid != pcred->uid) { ret = -EPERM; goto out_unlock; } @@ -1372,10 +1387,9 @@ int do_notify_parent(struct task_struct *tsk, int sig) */ rcu_read_lock(); info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); + info.si_uid = __task_cred(tsk)->uid; rcu_read_unlock(); - info.si_uid = tsk->cred->uid; - thread_group_cputime(tsk, &cputime); info.si_utime = cputime_to_jiffies(cputime.utime); info.si_stime = cputime_to_jiffies(cputime.stime); @@ -1443,10 +1457,9 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) */ rcu_read_lock(); info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); + info.si_uid = __task_cred(tsk)->uid; rcu_read_unlock(); - info.si_uid = tsk->cred->uid; - info.si_utime = cputime_to_clock_t(tsk->utime); info.si_stime = cputime_to_clock_t(tsk->stime); @@ -1713,7 +1726,7 @@ static int ptrace_signal(int signr, siginfo_t *info, info->si_errno = 0; info->si_code = SI_USER; info->si_pid = task_pid_vnr(current->parent); - info->si_uid = current->parent->cred->uid; + info->si_uid = task_uid(current->parent); } /* If the (new) signal is now blocked, requeue it. */ diff --git a/kernel/sys.c b/kernel/sys.c index c4d6b59553e..ccc9eb736d3 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -112,14 +112,17 @@ EXPORT_SYMBOL(cad_pid); void (*pm_power_off_prepare)(void); +/* + * set the priority of a task + * - the caller must hold the RCU read lock + */ static int set_one_prio(struct task_struct *p, int niceval, int error) { - uid_t euid = current_euid(); + const struct cred *cred = current_cred(), *pcred = __task_cred(p); int no_nice; - if (p->cred->uid != euid && - p->cred->euid != euid && - !capable(CAP_SYS_NICE)) { + if (pcred->uid != cred->euid && + pcred->euid != cred->euid && !capable(CAP_SYS_NICE)) { error = -EPERM; goto out; } diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 6d1ed07bf31..2dc06ab3571 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -27,6 +27,7 @@ */ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) { + const struct cred *tcred; struct timespec uptime, ts; u64 ac_etime; @@ -53,10 +54,11 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) stats->ac_flag |= AXSIG; stats->ac_nice = task_nice(tsk); stats->ac_sched = tsk->policy; - stats->ac_uid = tsk->cred->uid; - stats->ac_gid = tsk->cred->gid; stats->ac_pid = tsk->pid; rcu_read_lock(); + tcred = __task_cred(tsk); + stats->ac_uid = tcred->uid; + stats->ac_gid = tcred->gid; stats->ac_ppid = pid_alive(tsk) ? rcu_dereference(tsk->real_parent)->tgid : 0; rcu_read_unlock(); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index b23492ee3e5..7555219c535 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1110,7 +1110,7 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, const unsigned long __user *old_nodes, const unsigned long __user *new_nodes) { - struct cred *cred, *tcred; + const struct cred *cred = current_cred(), *tcred; struct mm_struct *mm; struct task_struct *task; nodemask_t old; @@ -1145,14 +1145,16 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode, * capabilities, superuser privileges or the same * userid as the target process. */ - cred = current->cred; - tcred = task->cred; + rcu_read_lock(); + tcred = __task_cred(task); if (cred->euid != tcred->suid && cred->euid != tcred->uid && cred->uid != tcred->suid && cred->uid != tcred->uid && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); err = -EPERM; goto out; } + rcu_read_unlock(); task_nodes = cpuset_mems_allowed(task); /* Is the user allowed to access the target nodes? */ diff --git a/mm/migrate.c b/mm/migrate.c index 794443da1b4..142284229ce 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1045,7 +1045,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, const int __user *nodes, int __user *status, int flags) { - struct cred *cred, *tcred; + const struct cred *cred = current_cred(), *tcred; struct task_struct *task; struct mm_struct *mm; int err; @@ -1076,14 +1076,16 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages, * capabilities, superuser privileges or the same * userid as the target process. */ - cred = current->cred; - tcred = task->cred; + rcu_read_lock(); + tcred = __task_cred(task); if (cred->euid != tcred->suid && cred->euid != tcred->uid && cred->uid != tcred->suid && cred->uid != tcred->uid && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); err = -EPERM; goto out; } + rcu_read_unlock(); err = security_task_movememory(task); if (err) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 3af787ba207..0e0b282a207 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -298,9 +298,9 @@ static void dump_tasks(const struct mem_cgroup *mem) task_lock(p); printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n", - p->pid, p->cred->uid, p->tgid, p->mm->total_vm, - get_mm_rss(p->mm), (int)task_cpu(p), p->oomkilladj, - p->comm); + p->pid, __task_cred(p)->uid, p->tgid, + p->mm->total_vm, get_mm_rss(p->mm), (int)task_cpu(p), + p->oomkilladj, p->comm); task_unlock(p); } while_each_thread(g, p); } diff --git a/security/commoncap.c b/security/commoncap.c index 61307f59000..0384bf95db6 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -51,10 +51,13 @@ EXPORT_SYMBOL(cap_netlink_recv); */ int cap_capable(struct task_struct *tsk, int cap, int audit) { + __u32 cap_raised; + /* Derived from include/linux/sched.h:capable. */ - if (cap_raised(tsk->cred->cap_effective, cap)) - return 0; - return -EPERM; + rcu_read_lock(); + cap_raised = cap_raised(__task_cred(tsk)->cap_effective, cap); + rcu_read_unlock(); + return cap_raised ? 0 : -EPERM; } int cap_settime(struct timespec *ts, struct timezone *tz) @@ -66,34 +69,42 @@ int cap_settime(struct timespec *ts, struct timezone *tz) int cap_ptrace_may_access(struct task_struct *child, unsigned int mode) { - /* Derived from arch/i386/kernel/ptrace.c:sys_ptrace. */ - if (cap_issubset(child->cred->cap_permitted, - current->cred->cap_permitted)) - return 0; - if (capable(CAP_SYS_PTRACE)) - return 0; - return -EPERM; + int ret = 0; + + rcu_read_lock(); + if (!cap_issubset(child->cred->cap_permitted, + current->cred->cap_permitted) && + !capable(CAP_SYS_PTRACE)) + ret = -EPERM; + rcu_read_unlock(); + return ret; } int cap_ptrace_traceme(struct task_struct *parent) { - if (cap_issubset(current->cred->cap_permitted, - parent->cred->cap_permitted)) - return 0; - if (has_capability(parent, CAP_SYS_PTRACE)) - return 0; - return -EPERM; + int ret = 0; + + rcu_read_lock(); + if (!cap_issubset(current->cred->cap_permitted, + parent->cred->cap_permitted) && + !has_capability(parent, CAP_SYS_PTRACE)) + ret = -EPERM; + rcu_read_unlock(); + return ret; } int cap_capget (struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) { - struct cred *cred = target->cred; + const struct cred *cred; /* Derived from kernel/capability.c:sys_capget. */ + rcu_read_lock(); + cred = __task_cred(target); *effective = cred->cap_effective; *inheritable = cred->cap_inheritable; *permitted = cred->cap_permitted; + rcu_read_unlock(); return 0; } @@ -433,7 +444,7 @@ void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe) int cap_bprm_secureexec (struct linux_binprm *bprm) { - const struct cred *cred = current->cred; + const struct cred *cred = current_cred(); if (cred->uid != 0) { if (bprm->cap_effective) @@ -511,11 +522,11 @@ static inline void cap_emulate_setxuid (int old_ruid, int old_euid, if ((old_ruid == 0 || old_euid == 0 || old_suid == 0) && (cred->uid != 0 && cred->euid != 0 && cred->suid != 0) && !issecure(SECURE_KEEP_CAPS)) { - cap_clear (cred->cap_permitted); - cap_clear (cred->cap_effective); + cap_clear(cred->cap_permitted); + cap_clear(cred->cap_effective); } if (old_euid == 0 && cred->euid != 0) { - cap_clear (cred->cap_effective); + cap_clear(cred->cap_effective); } if (old_euid != 0 && cred->euid == 0) { cred->cap_effective = cred->cap_permitted; @@ -582,9 +593,14 @@ int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, */ static int cap_safe_nice(struct task_struct *p) { - if (!cap_issubset(p->cred->cap_permitted, - current->cred->cap_permitted) && - !capable(CAP_SYS_NICE)) + int is_subset; + + rcu_read_lock(); + is_subset = cap_issubset(__task_cred(p)->cap_permitted, + current_cred()->cap_permitted); + rcu_read_unlock(); + + if (!is_subset && !capable(CAP_SYS_NICE)) return -EPERM; return 0; } diff --git a/security/keys/permission.c b/security/keys/permission.c index baf3d5f31e7..13c36164f28 100644 --- a/security/keys/permission.c +++ b/security/keys/permission.c @@ -22,13 +22,16 @@ int key_task_permission(const key_ref_t key_ref, struct task_struct *context, key_perm_t perm) { - struct cred *cred = context->cred; + const struct cred *cred; struct key *key; key_perm_t kperm; int ret; key = key_ref_to_ptr(key_ref); + rcu_read_lock(); + cred = __task_cred(context); + /* use the second 8-bits of permissions for keys the caller owns */ if (key->uid == cred->fsuid) { kperm = key->perm >> 16; @@ -43,10 +46,7 @@ int key_task_permission(const key_ref_t key_ref, goto use_these_perms; } - spin_lock(&cred->lock); ret = groups_search(cred->group_info, key->gid); - spin_unlock(&cred->lock); - if (ret) { kperm = key->perm >> 8; goto use_these_perms; @@ -57,6 +57,8 @@ int key_task_permission(const key_ref_t key_ref, kperm = key->perm; use_these_perms: + rcu_read_lock(); + /* use the top 8-bits of permissions for keys the caller possesses * - possessor permissions are additive with other permissions */ diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index ce8ac6073d5..212601ebaa4 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -412,10 +412,13 @@ key_ref_t search_process_keyrings(struct key_type *type, struct task_struct *context) { struct request_key_auth *rka; + struct cred *cred; key_ref_t key_ref, ret, err; might_sleep(); + cred = get_task_cred(context); + /* we want to return -EAGAIN or -ENOKEY if any of the keyrings were * searchable, but we failed to find a key or we found a negative key; * otherwise we want to return a sample error (probably -EACCES) if @@ -428,9 +431,9 @@ key_ref_t search_process_keyrings(struct key_type *type, err = ERR_PTR(-EAGAIN); /* search the thread keyring first */ - if (context->cred->thread_keyring) { + if (cred->thread_keyring) { key_ref = keyring_search_aux( - make_key_ref(context->cred->thread_keyring, 1), + make_key_ref(cred->thread_keyring, 1), context, type, description, match); if (!IS_ERR(key_ref)) goto found; @@ -495,9 +498,9 @@ key_ref_t search_process_keyrings(struct key_type *type, } } /* or search the user-session keyring */ - else if (context->cred->user->session_keyring) { + else if (cred->user->session_keyring) { key_ref = keyring_search_aux( - make_key_ref(context->cred->user->session_keyring, 1), + make_key_ref(cred->user->session_keyring, 1), context, type, description, match); if (!IS_ERR(key_ref)) goto found; @@ -519,20 +522,20 @@ key_ref_t search_process_keyrings(struct key_type *type, * search the keyrings of the process mentioned there * - we don't permit access to request_key auth keys via this method */ - if (context->cred->request_key_auth && + if (cred->request_key_auth && context == current && type != &key_type_request_key_auth ) { /* defend against the auth key being revoked */ - down_read(&context->cred->request_key_auth->sem); + down_read(&cred->request_key_auth->sem); - if (key_validate(context->cred->request_key_auth) == 0) { - rka = context->cred->request_key_auth->payload.data; + if (key_validate(cred->request_key_auth) == 0) { + rka = cred->request_key_auth->payload.data; key_ref = search_process_keyrings(type, description, match, rka->context); - up_read(&context->cred->request_key_auth->sem); + up_read(&cred->request_key_auth->sem); if (!IS_ERR(key_ref)) goto found; @@ -549,7 +552,7 @@ key_ref_t search_process_keyrings(struct key_type *type, break; } } else { - up_read(&context->cred->request_key_auth->sem); + up_read(&cred->request_key_auth->sem); } } @@ -557,6 +560,7 @@ key_ref_t search_process_keyrings(struct key_type *type, key_ref = ret ? ret : err; found: + put_cred(cred); return key_ref; } /* end search_process_keyrings() */ diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 10715d1330b..c8630363823 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -95,13 +95,18 @@ extern void selnl_notify_setenforce(int val); static int task_has_security(struct task_struct *tsk, u32 perms) { - struct task_security_struct *tsec; - - tsec = tsk->cred->security; + const struct task_security_struct *tsec; + u32 sid = 0; + + rcu_read_lock(); + tsec = __task_cred(tsk)->security; + if (tsec) + sid = tsec->sid; + rcu_read_unlock(); if (!tsec) return -EACCES; - return avc_has_perm(tsec->sid, SECINITSID_SECURITY, + return avc_has_perm(sid, SECINITSID_SECURITY, SECCLASS_SECURITY, perms, NULL); } diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index e8a4fcb1ad0..11167fd567b 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -30,6 +30,8 @@ #include "smack.h" +#define task_security(task) (task_cred_xxx((task), security)) + /* * I hope these are the hokeyist lines of code in the module. Casey. */ @@ -1012,7 +1014,7 @@ static void smack_cred_free(struct cred *cred) */ static int smack_task_setpgid(struct task_struct *p, pid_t pgid) { - return smk_curacc(p->cred->security, MAY_WRITE); + return smk_curacc(task_security(p), MAY_WRITE); } /** @@ -1023,7 +1025,7 @@ static int smack_task_setpgid(struct task_struct *p, pid_t pgid) */ static int smack_task_getpgid(struct task_struct *p) { - return smk_curacc(p->cred->security, MAY_READ); + return smk_curacc(task_security(p), MAY_READ); } /** @@ -1034,7 +1036,7 @@ static int smack_task_getpgid(struct task_struct *p) */ static int smack_task_getsid(struct task_struct *p) { - return smk_curacc(p->cred->security, MAY_READ); + return smk_curacc(task_security(p), MAY_READ); } /** @@ -1046,7 +1048,7 @@ static int smack_task_getsid(struct task_struct *p) */ static void smack_task_getsecid(struct task_struct *p, u32 *secid) { - *secid = smack_to_secid(p->cred->security); + *secid = smack_to_secid(task_security(p)); } /** @@ -1062,7 +1064,7 @@ static int smack_task_setnice(struct task_struct *p, int nice) rc = cap_task_setnice(p, nice); if (rc == 0) - rc = smk_curacc(p->cred->security, MAY_WRITE); + rc = smk_curacc(task_security(p), MAY_WRITE); return rc; } @@ -1079,7 +1081,7 @@ static int smack_task_setioprio(struct task_struct *p, int ioprio) rc = cap_task_setioprio(p, ioprio); if (rc == 0) - rc = smk_curacc(p->cred->security, MAY_WRITE); + rc = smk_curacc(task_security(p), MAY_WRITE); return rc; } @@ -1091,7 +1093,7 @@ static int smack_task_setioprio(struct task_struct *p, int ioprio) */ static int smack_task_getioprio(struct task_struct *p) { - return smk_curacc(p->cred->security, MAY_READ); + return smk_curacc(task_security(p), MAY_READ); } /** @@ -1109,7 +1111,7 @@ static int smack_task_setscheduler(struct task_struct *p, int policy, rc = cap_task_setscheduler(p, policy, lp); if (rc == 0) - rc = smk_curacc(p->cred->security, MAY_WRITE); + rc = smk_curacc(task_security(p), MAY_WRITE); return rc; } @@ -1121,7 +1123,7 @@ static int smack_task_setscheduler(struct task_struct *p, int policy, */ static int smack_task_getscheduler(struct task_struct *p) { - return smk_curacc(p->cred->security, MAY_READ); + return smk_curacc(task_security(p), MAY_READ); } /** @@ -1132,7 +1134,7 @@ static int smack_task_getscheduler(struct task_struct *p) */ static int smack_task_movememory(struct task_struct *p) { - return smk_curacc(p->cred->security, MAY_WRITE); + return smk_curacc(task_security(p), MAY_WRITE); } /** @@ -1155,13 +1157,13 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info, * can write the receiver. */ if (secid == 0) - return smk_curacc(p->cred->security, MAY_WRITE); + return smk_curacc(task_security(p), MAY_WRITE); /* * If the secid isn't 0 we're dealing with some USB IO * specific behavior. This is not clean. For one thing * we can't take privilege into account. */ - return smk_access(smack_from_secid(secid), p->cred->security, MAY_WRITE); + return smk_access(smack_from_secid(secid), task_security(p), MAY_WRITE); } /** @@ -1174,7 +1176,7 @@ static int smack_task_wait(struct task_struct *p) { int rc; - rc = smk_access(current->cred->security, p->cred->security, MAY_WRITE); + rc = smk_access(current_security(), task_security(p), MAY_WRITE); if (rc == 0) return 0; @@ -1205,7 +1207,7 @@ static int smack_task_wait(struct task_struct *p) static void smack_task_to_inode(struct task_struct *p, struct inode *inode) { struct inode_smack *isp = inode->i_security; - isp->smk_inode = p->cred->security; + isp->smk_inode = task_security(p); } /* @@ -2010,7 +2012,7 @@ static int smack_getprocattr(struct task_struct *p, char *name, char **value) if (strcmp(name, "current") != 0) return -EINVAL; - cp = kstrdup(p->cred->security, GFP_KERNEL); + cp = kstrdup(task_security(p), GFP_KERNEL); if (cp == NULL) return -ENOMEM; -- cgit v1.2.3-70-g09d2 From 745ca2475a6ac596e3d8d37c2759c0fbe2586227 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:22 +1100 Subject: CRED: Pass credentials through dentry_open() Pass credentials through dentry_open() so that the COW creds patch can have SELinux's flush_unauthorized_files() pass the appropriate creds back to itself when it opens its null chardev. The security_dentry_open() call also now takes a creds pointer, as does the dentry_open hook in struct security_operations. Signed-off-by: David Howells Acked-by: James Morris Signed-off-by: James Morris --- arch/powerpc/platforms/cell/spufs/inode.c | 4 ++-- arch/um/drivers/mconsole_kern.c | 3 ++- fs/autofs4/dev-ioctl.c | 3 ++- fs/ecryptfs/ecryptfs_kernel.h | 3 ++- fs/ecryptfs/kthread.c | 9 +++++---- fs/ecryptfs/main.c | 3 ++- fs/exportfs/expfs.c | 4 +++- fs/hppfs/hppfs.c | 6 ++++-- fs/nfsctl.c | 3 ++- fs/nfsd/nfs4recover.c | 3 ++- fs/nfsd/vfs.c | 3 ++- fs/open.c | 17 +++++++++++------ fs/xfs/linux-2.6/xfs_ioctl.c | 3 ++- include/linux/fs.h | 4 +++- include/linux/security.h | 7 ++++--- ipc/mqueue.c | 11 +++++++---- security/capability.c | 2 +- security/security.c | 4 ++-- security/selinux/hooks.c | 15 +++++++++------ 19 files changed, 67 insertions(+), 40 deletions(-) (limited to 'fs') diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index e128ce7f099..6296bfd9cb0 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -323,7 +323,7 @@ static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt) goto out; } - filp = dentry_open(dentry, mnt, O_RDONLY); + filp = dentry_open(dentry, mnt, O_RDONLY, current_cred()); if (IS_ERR(filp)) { put_unused_fd(ret); ret = PTR_ERR(filp); @@ -562,7 +562,7 @@ static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt) goto out; } - filp = dentry_open(dentry, mnt, O_RDONLY); + filp = dentry_open(dentry, mnt, O_RDONLY, current_cred()); if (IS_ERR(filp)) { put_unused_fd(ret); ret = PTR_ERR(filp); diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 19d579d74d2..16d3b3789a5 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c @@ -159,7 +159,8 @@ void mconsole_proc(struct mc_request *req) goto out_kill; } - file = dentry_open(nd.path.dentry, nd.path.mnt, O_RDONLY); + file = dentry_open(nd.path.dentry, nd.path.mnt, O_RDONLY, + current_cred()); if (IS_ERR(file)) { mconsole_reply(req, "Failed to open file", 1, 0); goto out_kill; diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index 625abf5422e..ec16255d27d 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -307,7 +307,8 @@ static int autofs_dev_ioctl_open_mountpoint(const char *path, dev_t devid) goto out; } - filp = dentry_open(nd.path.dentry, nd.path.mnt, O_RDONLY); + filp = dentry_open(nd.path.dentry, nd.path.mnt, O_RDONLY, + current_cred()); if (IS_ERR(filp)) { err = PTR_ERR(filp); goto out; diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 3504cf9df35..a75026d35d1 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -691,7 +691,8 @@ int ecryptfs_init_kthread(void); void ecryptfs_destroy_kthread(void); int ecryptfs_privileged_open(struct file **lower_file, struct dentry *lower_dentry, - struct vfsmount *lower_mnt); + struct vfsmount *lower_mnt, + const struct cred *cred); int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry); #endif /* #ifndef ECRYPTFS_KERNEL_H */ diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c index c440c6b58b2..c6d7a4d748a 100644 --- a/fs/ecryptfs/kthread.c +++ b/fs/ecryptfs/kthread.c @@ -73,7 +73,7 @@ static int ecryptfs_threadfn(void *ignored) mntget(req->lower_mnt); (*req->lower_file) = dentry_open( req->lower_dentry, req->lower_mnt, - (O_RDWR | O_LARGEFILE)); + (O_RDWR | O_LARGEFILE), current_cred()); req->flags |= ECRYPTFS_REQ_PROCESSED; } wake_up(&req->wait); @@ -132,7 +132,8 @@ void ecryptfs_destroy_kthread(void) */ int ecryptfs_privileged_open(struct file **lower_file, struct dentry *lower_dentry, - struct vfsmount *lower_mnt) + struct vfsmount *lower_mnt, + const struct cred *cred) { struct ecryptfs_open_req *req; int rc = 0; @@ -143,7 +144,7 @@ int ecryptfs_privileged_open(struct file **lower_file, dget(lower_dentry); mntget(lower_mnt); (*lower_file) = dentry_open(lower_dentry, lower_mnt, - (O_RDWR | O_LARGEFILE)); + (O_RDWR | O_LARGEFILE), cred); if (!IS_ERR(*lower_file)) goto out; req = kmem_cache_alloc(ecryptfs_open_req_cache, GFP_KERNEL); @@ -184,7 +185,7 @@ int ecryptfs_privileged_open(struct file **lower_file, dget(lower_dentry); mntget(lower_mnt); (*lower_file) = dentry_open(lower_dentry, lower_mnt, - (O_RDONLY | O_LARGEFILE)); + (O_RDONLY | O_LARGEFILE), cred); if (IS_ERR(*lower_file)) { rc = PTR_ERR(*req->lower_file); (*lower_file) = NULL; diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 64d2ba980df..fd630713c5c 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -115,6 +115,7 @@ void __ecryptfs_printk(const char *fmt, ...) */ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry) { + const struct cred *cred = current_cred(); struct ecryptfs_inode_info *inode_info = ecryptfs_inode_to_private(ecryptfs_dentry->d_inode); int rc = 0; @@ -127,7 +128,7 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry) lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); rc = ecryptfs_privileged_open(&inode_info->lower_file, - lower_dentry, lower_mnt); + lower_dentry, lower_mnt, cred); if (rc || IS_ERR(inode_info->lower_file)) { printk(KERN_ERR "Error opening lower persistent file " "for lower_dentry [0x%p] and lower_mnt [0x%p]; " diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index 80246bad1b7..ec1fb918200 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -14,6 +14,7 @@ #include #include #include +#include #define dprintk(fmt, args...) do{}while(0) @@ -249,6 +250,7 @@ static int filldir_one(void * __buf, const char * name, int len, static int get_name(struct vfsmount *mnt, struct dentry *dentry, char *name, struct dentry *child) { + const struct cred *cred = current_cred(); struct inode *dir = dentry->d_inode; int error; struct file *file; @@ -263,7 +265,7 @@ static int get_name(struct vfsmount *mnt, struct dentry *dentry, /* * Open the directory ... */ - file = dentry_open(dget(dentry), mntget(mnt), O_RDONLY); + file = dentry_open(dget(dentry), mntget(mnt), O_RDONLY, cred); error = PTR_ERR(file); if (IS_ERR(file)) goto out; diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c index 2b3d1828db9..795e2c130ad 100644 --- a/fs/hppfs/hppfs.c +++ b/fs/hppfs/hppfs.c @@ -426,6 +426,7 @@ static int file_mode(int fmode) static int hppfs_open(struct inode *inode, struct file *file) { + const struct cred *cred = current_cred(); struct hppfs_private *data; struct vfsmount *proc_mnt; struct dentry *proc_dentry; @@ -446,7 +447,7 @@ static int hppfs_open(struct inode *inode, struct file *file) /* XXX This isn't closed anywhere */ data->proc_file = dentry_open(dget(proc_dentry), mntget(proc_mnt), - file_mode(file->f_mode)); + file_mode(file->f_mode), cred); err = PTR_ERR(data->proc_file); if (IS_ERR(data->proc_file)) goto out_free1; @@ -489,6 +490,7 @@ static int hppfs_open(struct inode *inode, struct file *file) static int hppfs_dir_open(struct inode *inode, struct file *file) { + const struct cred *cred = current_cred(); struct hppfs_private *data; struct vfsmount *proc_mnt; struct dentry *proc_dentry; @@ -502,7 +504,7 @@ static int hppfs_dir_open(struct inode *inode, struct file *file) proc_dentry = HPPFS_I(inode)->proc_dentry; proc_mnt = inode->i_sb->s_fs_info; data->proc_file = dentry_open(dget(proc_dentry), mntget(proc_mnt), - file_mode(file->f_mode)); + file_mode(file->f_mode), cred); err = PTR_ERR(data->proc_file); if (IS_ERR(data->proc_file)) goto out_free; diff --git a/fs/nfsctl.c b/fs/nfsctl.c index aed8145d908..cc4ef2642a5 100644 --- a/fs/nfsctl.c +++ b/fs/nfsctl.c @@ -41,7 +41,8 @@ static struct file *do_open(char *name, int flags) error = may_open(&nd, MAY_WRITE, FMODE_WRITE); if (!error) - return dentry_open(nd.path.dentry, nd.path.mnt, flags); + return dentry_open(nd.path.dentry, nd.path.mnt, flags, + current_cred()); path_put(&nd.path); return ERR_PTR(error); diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index a5e14e8695e..632a50b4b37 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -226,7 +226,8 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f) nfs4_save_user(&uid, &gid); - filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY); + filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY, + current_cred()); status = PTR_ERR(filp); if (IS_ERR(filp)) goto out; diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 890d9a68c85..b59ec5a6ed2 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -671,6 +671,7 @@ __be32 nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, int access, struct file **filp) { + const struct cred *cred = current_cred(); struct dentry *dentry; struct inode *inode; int flags = O_RDONLY|O_LARGEFILE; @@ -725,7 +726,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, DQUOT_INIT(inode); } *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt), - flags); + flags, cred); if (IS_ERR(*filp)) host_err = PTR_ERR(*filp); out_nfserr: diff --git a/fs/open.c b/fs/open.c index b1238e195e7..f96eaab280a 100644 --- a/fs/open.c +++ b/fs/open.c @@ -783,7 +783,8 @@ static inline int __get_file_write_access(struct inode *inode, static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags, struct file *f, - int (*open)(struct inode *, struct file *)) + int (*open)(struct inode *, struct file *), + const struct cred *cred) { struct inode *inode; int error; @@ -807,7 +808,7 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, f->f_op = fops_get(inode->i_fop); file_move(f, &inode->i_sb->s_files); - error = security_dentry_open(f); + error = security_dentry_open(f, cred); if (error) goto cleanup_all; @@ -882,6 +883,8 @@ cleanup_file: struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, int (*open)(struct inode *, struct file *)) { + const struct cred *cred = current_cred(); + if (IS_ERR(nd->intent.open.file)) goto out; if (IS_ERR(dentry)) @@ -889,7 +892,7 @@ struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt), nd->intent.open.flags - 1, nd->intent.open.file, - open); + open, cred); out: return nd->intent.open.file; out_err: @@ -908,6 +911,7 @@ EXPORT_SYMBOL_GPL(lookup_instantiate_filp); */ struct file *nameidata_to_filp(struct nameidata *nd, int flags) { + const struct cred *cred = current_cred(); struct file *filp; /* Pick up the filp from the open intent */ @@ -915,7 +919,7 @@ struct file *nameidata_to_filp(struct nameidata *nd, int flags) /* Has the filesystem initialised the file for us? */ if (filp->f_path.dentry == NULL) filp = __dentry_open(nd->path.dentry, nd->path.mnt, flags, filp, - NULL); + NULL, cred); else path_put(&nd->path); return filp; @@ -925,7 +929,8 @@ struct file *nameidata_to_filp(struct nameidata *nd, int flags) * dentry_open() will have done dput(dentry) and mntput(mnt) if it returns an * error. */ -struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags) +struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags, + const struct cred *cred) { int error; struct file *f; @@ -950,7 +955,7 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags) return ERR_PTR(error); } - return __dentry_open(dentry, mnt, flags, f, NULL); + return __dentry_open(dentry, mnt, flags, f, NULL, cred); } EXPORT_SYMBOL(dentry_open); diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 67c72aec97e..281cbd5a25c 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -256,6 +256,7 @@ xfs_open_by_handle( struct file *parfilp, struct inode *parinode) { + const struct cred *cred = current_cred(); int error; int new_fd; int permflag; @@ -321,7 +322,7 @@ xfs_open_by_handle( mntget(parfilp->f_path.mnt); /* Create file pointer. */ - filp = dentry_open(dentry, parfilp->f_path.mnt, hreq.oflags); + filp = dentry_open(dentry, parfilp->f_path.mnt, hreq.oflags, cred); if (IS_ERR(filp)) { put_unused_fd(new_fd); return -XFS_ERROR(-PTR_ERR(filp)); diff --git a/include/linux/fs.h b/include/linux/fs.h index b3d404aaabe..3bfec1327b8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -315,6 +315,7 @@ struct poll_table_struct; struct kstatfs; struct vm_area_struct; struct vfsmount; +struct cred; extern void __init inode_init(void); extern void __init inode_init_early(void); @@ -1673,7 +1674,8 @@ extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, extern long do_sys_open(int dfd, const char __user *filename, int flags, int mode); extern struct file *filp_open(const char *, int, int); -extern struct file * dentry_open(struct dentry *, struct vfsmount *, int); +extern struct file * dentry_open(struct dentry *, struct vfsmount *, int, + const struct cred *); extern int filp_close(struct file *, fl_owner_t id); extern char * getname(const char __user *); diff --git a/include/linux/security.h b/include/linux/security.h index 9239cc11eb9..7e9fe046a0d 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1402,7 +1402,7 @@ struct security_operations { int (*file_send_sigiotask) (struct task_struct *tsk, struct fown_struct *fown, int sig); int (*file_receive) (struct file *file); - int (*dentry_open) (struct file *file); + int (*dentry_open) (struct file *file, const struct cred *cred); int (*task_create) (unsigned long clone_flags); int (*cred_alloc_security) (struct cred *cred); @@ -1658,7 +1658,7 @@ int security_file_set_fowner(struct file *file); int security_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int sig); int security_file_receive(struct file *file); -int security_dentry_open(struct file *file); +int security_dentry_open(struct file *file, const struct cred *cred); int security_task_create(unsigned long clone_flags); int security_cred_alloc(struct cred *cred); void security_cred_free(struct cred *cred); @@ -2171,7 +2171,8 @@ static inline int security_file_receive(struct file *file) return 0; } -static inline int security_dentry_open(struct file *file) +static inline int security_dentry_open(struct file *file, + const struct cred *cred) { return 0; } diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 1151881ccb9..d9393f8e4c3 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -594,6 +594,7 @@ static int mq_attr_ok(struct mq_attr *attr) static struct file *do_create(struct dentry *dir, struct dentry *dentry, int oflag, mode_t mode, struct mq_attr __user *u_attr) { + const struct cred *cred = current_cred(); struct mq_attr attr; struct file *result; int ret; @@ -618,7 +619,7 @@ static struct file *do_create(struct dentry *dir, struct dentry *dentry, if (ret) goto out_drop_write; - result = dentry_open(dentry, mqueue_mnt, oflag); + result = dentry_open(dentry, mqueue_mnt, oflag, cred); /* * dentry_open() took a persistent mnt_want_write(), * so we can now drop this one. @@ -637,8 +638,10 @@ out: /* Opens existing queue */ static struct file *do_open(struct dentry *dentry, int oflag) { -static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, - MAY_READ | MAY_WRITE }; + const struct cred *cred = current_cred(); + + static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, + MAY_READ | MAY_WRITE }; if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) { dput(dentry); @@ -652,7 +655,7 @@ static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, return ERR_PTR(-EACCES); } - return dentry_open(dentry, mqueue_mnt, oflag); + return dentry_open(dentry, mqueue_mnt, oflag, cred); } asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode, diff --git a/security/capability.c b/security/capability.c index 6c4b5137ca7..fac2f61b69a 100644 --- a/security/capability.c +++ b/security/capability.c @@ -330,7 +330,7 @@ static int cap_file_receive(struct file *file) return 0; } -static int cap_dentry_open(struct file *file) +static int cap_dentry_open(struct file *file, const struct cred *cred) { return 0; } diff --git a/security/security.c b/security/security.c index d058f7d5b10..f40a0a04c3c 100644 --- a/security/security.c +++ b/security/security.c @@ -606,9 +606,9 @@ int security_file_receive(struct file *file) return security_ops->file_receive(file); } -int security_dentry_open(struct file *file) +int security_dentry_open(struct file *file, const struct cred *cred) { - return security_ops->dentry_open(file); + return security_ops->dentry_open(file, cred); } int security_task_create(unsigned long clone_flags) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index cc6e5a3f10c..f20cbd681ba 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -2150,9 +2150,9 @@ extern struct vfsmount *selinuxfs_mount; extern struct dentry *selinux_null; /* Derived from fs/exec.c:flush_old_files. */ -static inline void flush_unauthorized_files(struct files_struct *files) +static inline void flush_unauthorized_files(const struct cred *cred, + struct files_struct *files) { - const struct cred *cred = current_cred(); struct avc_audit_data ad; struct file *file, *devnull = NULL; struct tty_struct *tty; @@ -2222,7 +2222,10 @@ static inline void flush_unauthorized_files(struct files_struct *files) if (devnull) { get_file(devnull); } else { - devnull = dentry_open(dget(selinux_null), mntget(selinuxfs_mount), O_RDWR); + devnull = dentry_open( + dget(selinux_null), + mntget(selinuxfs_mount), + O_RDWR, cred); if (IS_ERR(devnull)) { devnull = NULL; put_unused_fd(fd); @@ -2302,6 +2305,7 @@ static void selinux_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) */ static void selinux_bprm_post_apply_creds(struct linux_binprm *bprm) { + const struct cred *cred = current_cred(); struct task_security_struct *tsec; struct rlimit *rlim, *initrlim; struct itimerval itimer; @@ -2321,7 +2325,7 @@ static void selinux_bprm_post_apply_creds(struct linux_binprm *bprm) return; /* Close files for which the new task SID is not authorized. */ - flush_unauthorized_files(current->files); + flush_unauthorized_files(cred, current->files); /* Check whether the new SID can inherit signal state from the old SID. If not, clear itimers to avoid @@ -3202,9 +3206,8 @@ static int selinux_file_receive(struct file *file) return file_has_perm(cred, file, file_to_av(file)); } -static int selinux_dentry_open(struct file *file) +static int selinux_dentry_open(struct file *file, const struct cred *cred) { - const struct cred *cred = current_cred(); struct file_security_struct *fsec; struct inode *inode; struct inode_security_struct *isec; -- cgit v1.2.3-70-g09d2 From d84f4f992cbd76e8f39c488cf0c5d123843923b1 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:23 +1100 Subject: CRED: Inaugurate COW credentials Inaugurate copy-on-write credentials management. This uses RCU to manage the credentials pointer in the task_struct with respect to accesses by other tasks. A process may only modify its own credentials, and so does not need locking to access or modify its own credentials. A mutex (cred_replace_mutex) is added to the task_struct to control the effect of PTRACE_ATTACHED on credential calculations, particularly with respect to execve(). With this patch, the contents of an active credentials struct may not be changed directly; rather a new set of credentials must be prepared, modified and committed using something like the following sequence of events: struct cred *new = prepare_creds(); int ret = blah(new); if (ret < 0) { abort_creds(new); return ret; } return commit_creds(new); There are some exceptions to this rule: the keyrings pointed to by the active credentials may be instantiated - keyrings violate the COW rule as managing COW keyrings is tricky, given that it is possible for a task to directly alter the keys in a keyring in use by another task. To help enforce this, various pointers to sets of credentials, such as those in the task_struct, are declared const. The purpose of this is compile-time discouragement of altering credentials through those pointers. Once a set of credentials has been made public through one of these pointers, it may not be modified, except under special circumstances: (1) Its reference count may incremented and decremented. (2) The keyrings to which it points may be modified, but not replaced. The only safe way to modify anything else is to create a replacement and commit using the functions described in Documentation/credentials.txt (which will be added by a later patch). This patch and the preceding patches have been tested with the LTP SELinux testsuite. This patch makes several logical sets of alteration: (1) execve(). This now prepares and commits credentials in various places in the security code rather than altering the current creds directly. (2) Temporary credential overrides. do_coredump() and sys_faccessat() now prepare their own credentials and temporarily override the ones currently on the acting thread, whilst preventing interference from other threads by holding cred_replace_mutex on the thread being dumped. This will be replaced in a future patch by something that hands down the credentials directly to the functions being called, rather than altering the task's objective credentials. (3) LSM interface. A number of functions have been changed, added or removed: (*) security_capset_check(), ->capset_check() (*) security_capset_set(), ->capset_set() Removed in favour of security_capset(). (*) security_capset(), ->capset() New. This is passed a pointer to the new creds, a pointer to the old creds and the proposed capability sets. It should fill in the new creds or return an error. All pointers, barring the pointer to the new creds, are now const. (*) security_bprm_apply_creds(), ->bprm_apply_creds() Changed; now returns a value, which will cause the process to be killed if it's an error. (*) security_task_alloc(), ->task_alloc_security() Removed in favour of security_prepare_creds(). (*) security_cred_free(), ->cred_free() New. Free security data attached to cred->security. (*) security_prepare_creds(), ->cred_prepare() New. Duplicate any security data attached to cred->security. (*) security_commit_creds(), ->cred_commit() New. Apply any security effects for the upcoming installation of new security by commit_creds(). (*) security_task_post_setuid(), ->task_post_setuid() Removed in favour of security_task_fix_setuid(). (*) security_task_fix_setuid(), ->task_fix_setuid() Fix up the proposed new credentials for setuid(). This is used by cap_set_fix_setuid() to implicitly adjust capabilities in line with setuid() changes. Changes are made to the new credentials, rather than the task itself as in security_task_post_setuid(). (*) security_task_reparent_to_init(), ->task_reparent_to_init() Removed. Instead the task being reparented to init is referred directly to init's credentials. NOTE! This results in the loss of some state: SELinux's osid no longer records the sid of the thread that forked it. (*) security_key_alloc(), ->key_alloc() (*) security_key_permission(), ->key_permission() Changed. These now take cred pointers rather than task pointers to refer to the security context. (4) sys_capset(). This has been simplified and uses less locking. The LSM functions it calls have been merged. (5) reparent_to_kthreadd(). This gives the current thread the same credentials as init by simply using commit_thread() to point that way. (6) __sigqueue_alloc() and switch_uid() __sigqueue_alloc() can't stop the target task from changing its creds beneath it, so this function gets a reference to the currently applicable user_struct which it then passes into the sigqueue struct it returns if successful. switch_uid() is now called from commit_creds(), and possibly should be folded into that. commit_creds() should take care of protecting __sigqueue_alloc(). (7) [sg]et[ug]id() and co and [sg]et_current_groups. The set functions now all use prepare_creds(), commit_creds() and abort_creds() to build and check a new set of credentials before applying it. security_task_set[ug]id() is called inside the prepared section. This guarantees that nothing else will affect the creds until we've finished. The calling of set_dumpable() has been moved into commit_creds(). Much of the functionality of set_user() has been moved into commit_creds(). The get functions all simply access the data directly. (8) security_task_prctl() and cap_task_prctl(). security_task_prctl() has been modified to return -ENOSYS if it doesn't want to handle a function, or otherwise return the return value directly rather than through an argument. Additionally, cap_task_prctl() now prepares a new set of credentials, even if it doesn't end up using it. (9) Keyrings. A number of changes have been made to the keyrings code: (a) switch_uid_keyring(), copy_keys(), exit_keys() and suid_keys() have all been dropped and built in to the credentials functions directly. They may want separating out again later. (b) key_alloc() and search_process_keyrings() now take a cred pointer rather than a task pointer to specify the security context. (c) copy_creds() gives a new thread within the same thread group a new thread keyring if its parent had one, otherwise it discards the thread keyring. (d) The authorisation key now points directly to the credentials to extend the search into rather pointing to the task that carries them. (e) Installing thread, process or session keyrings causes a new set of credentials to be created, even though it's not strictly necessary for process or session keyrings (they're shared). (10) Usermode helper. The usermode helper code now carries a cred struct pointer in its subprocess_info struct instead of a new session keyring pointer. This set of credentials is derived from init_cred and installed on the new process after it has been cloned. call_usermodehelper_setup() allocates the new credentials and call_usermodehelper_freeinfo() discards them if they haven't been used. A special cred function (prepare_usermodeinfo_creds()) is provided specifically for call_usermodehelper_setup() to call. call_usermodehelper_setkeys() adjusts the credentials to sport the supplied keyring as the new session keyring. (11) SELinux. SELinux has a number of changes, in addition to those to support the LSM interface changes mentioned above: (a) selinux_setprocattr() no longer does its check for whether the current ptracer can access processes with the new SID inside the lock that covers getting the ptracer's SID. Whilst this lock ensures that the check is done with the ptracer pinned, the result is only valid until the lock is released, so there's no point doing it inside the lock. (12) is_single_threaded(). This function has been extracted from selinux_setprocattr() and put into a file of its own in the lib/ directory as join_session_keyring() now wants to use it too. The code in SELinux just checked to see whether a task shared mm_structs with other tasks (CLONE_VM), but that isn't good enough. We really want to know if they're part of the same thread group (CLONE_THREAD). (13) nfsd. The NFS server daemon now has to use the COW credentials to set the credentials it is going to use. It really needs to pass the credentials down to the functions it calls, but it can't do that until other patches in this series have been applied. Signed-off-by: David Howells Acked-by: James Morris Signed-off-by: James Morris --- fs/exec.c | 31 ++- fs/nfsd/auth.c | 92 ++++---- fs/nfsd/nfs4recover.c | 68 +++--- fs/nfsd/nfsfh.c | 11 +- fs/open.c | 31 ++- include/linux/audit.h | 22 +- include/linux/capability.h | 2 - include/linux/cred.h | 44 +++- include/linux/init_task.h | 2 + include/linux/key.h | 22 +- include/linux/sched.h | 6 +- include/linux/security.h | 178 +++++++--------- init/main.c | 1 + kernel/auditsc.c | 42 ++-- kernel/capability.c | 78 +++---- kernel/cred-internals.h | 21 ++ kernel/cred.c | 321 ++++++++++++++++++++++++---- kernel/exit.c | 9 +- kernel/fork.c | 7 +- kernel/kmod.c | 30 ++- kernel/ptrace.c | 9 + kernel/signal.c | 10 +- kernel/sys.c | 450 +++++++++++++++++++++------------------ kernel/user.c | 37 +--- kernel/user_namespace.c | 12 +- lib/Makefile | 2 +- net/rxrpc/ar-key.c | 6 +- security/capability.c | 21 +- security/commoncap.c | 265 +++++++++++------------ security/keys/internal.h | 17 +- security/keys/key.c | 25 +-- security/keys/keyctl.c | 95 ++++++--- security/keys/keyring.c | 14 +- security/keys/permission.c | 24 ++- security/keys/proc.c | 8 +- security/keys/process_keys.c | 333 ++++++++++++++--------------- security/keys/request_key.c | 29 ++- security/keys/request_key_auth.c | 41 ++-- security/security.c | 58 +++-- security/selinux/hooks.c | 286 ++++++++++++------------- security/smack/smack_lsm.c | 82 ++++--- 41 files changed, 1603 insertions(+), 1239 deletions(-) create mode 100644 kernel/cred-internals.h (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index a5330e1a221..9bd3559ddec 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1007,13 +1007,12 @@ int flush_old_exec(struct linux_binprm * bprm) */ current->mm->task_size = TASK_SIZE; - if (bprm->e_uid != current_euid() || bprm->e_gid != current_egid()) { - suid_keys(current); + if (bprm->e_uid != current_euid() || + bprm->e_gid != current_egid()) { set_dumpable(current->mm, suid_dumpable); current->pdeath_signal = 0; } else if (file_permission(bprm->file, MAY_READ) || (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) { - suid_keys(current); set_dumpable(current->mm, suid_dumpable); } @@ -1096,10 +1095,8 @@ void compute_creds(struct linux_binprm *bprm) { int unsafe; - if (bprm->e_uid != current_uid()) { - suid_keys(current); + if (bprm->e_uid != current_uid()) current->pdeath_signal = 0; - } exec_keys(current); task_lock(current); @@ -1709,8 +1706,9 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) struct linux_binfmt * binfmt; struct inode * inode; struct file * file; + const struct cred *old_cred; + struct cred *cred; int retval = 0; - int fsuid = current_fsuid(); int flag = 0; int ispipe = 0; unsigned long core_limit = current->signal->rlim[RLIMIT_CORE].rlim_cur; @@ -1723,12 +1721,20 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) binfmt = current->binfmt; if (!binfmt || !binfmt->core_dump) goto fail; + + cred = prepare_creds(); + if (!cred) { + retval = -ENOMEM; + goto fail; + } + down_write(&mm->mmap_sem); /* * If another thread got here first, or we are not dumpable, bail out. */ if (mm->core_state || !get_dumpable(mm)) { up_write(&mm->mmap_sem); + put_cred(cred); goto fail; } @@ -1739,12 +1745,16 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) */ if (get_dumpable(mm) == 2) { /* Setuid core dump mode */ flag = O_EXCL; /* Stop rewrite attacks */ - current->cred->fsuid = 0; /* Dump root private */ + cred->fsuid = 0; /* Dump root private */ } retval = coredump_wait(exit_code, &core_state); - if (retval < 0) + if (retval < 0) { + put_cred(cred); goto fail; + } + + old_cred = override_creds(cred); /* * Clear any false indication of pending signals that might @@ -1835,7 +1845,8 @@ fail_unlock: if (helper_argv) argv_free(helper_argv); - current->cred->fsuid = fsuid; + revert_creds(old_cred); + put_cred(cred); coredump_finish(mm); fail: return retval; diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c index 808fc03a6fb..836ffa1047d 100644 --- a/fs/nfsd/auth.c +++ b/fs/nfsd/auth.c @@ -27,55 +27,67 @@ int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp) int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp) { - struct cred *act_as = current->cred ; - struct svc_cred cred = rqstp->rq_cred; + struct group_info *rqgi; + struct group_info *gi; + struct cred *new; int i; int flags = nfsexp_flags(rqstp, exp); int ret; + new = prepare_creds(); + if (!new) + return -ENOMEM; + + new->fsuid = rqstp->rq_cred.cr_uid; + new->fsgid = rqstp->rq_cred.cr_gid; + + rqgi = rqstp->rq_cred.cr_group_info; + if (flags & NFSEXP_ALLSQUASH) { - cred.cr_uid = exp->ex_anon_uid; - cred.cr_gid = exp->ex_anon_gid; - cred.cr_group_info = groups_alloc(0); + new->fsuid = exp->ex_anon_uid; + new->fsgid = exp->ex_anon_gid; + gi = groups_alloc(0); } else if (flags & NFSEXP_ROOTSQUASH) { - struct group_info *gi; - if (!cred.cr_uid) - cred.cr_uid = exp->ex_anon_uid; - if (!cred.cr_gid) - cred.cr_gid = exp->ex_anon_gid; - gi = groups_alloc(cred.cr_group_info->ngroups); - if (gi) - for (i = 0; i < cred.cr_group_info->ngroups; i++) { - if (!GROUP_AT(cred.cr_group_info, i)) - GROUP_AT(gi, i) = exp->ex_anon_gid; - else - GROUP_AT(gi, i) = GROUP_AT(cred.cr_group_info, i); - } - cred.cr_group_info = gi; - } else - get_group_info(cred.cr_group_info); - - if (cred.cr_uid != (uid_t) -1) - act_as->fsuid = cred.cr_uid; - else - act_as->fsuid = exp->ex_anon_uid; - if (cred.cr_gid != (gid_t) -1) - act_as->fsgid = cred.cr_gid; - else - act_as->fsgid = exp->ex_anon_gid; + if (!new->fsuid) + new->fsuid = exp->ex_anon_uid; + if (!new->fsgid) + new->fsgid = exp->ex_anon_gid; - if (!cred.cr_group_info) - return -ENOMEM; - ret = set_groups(act_as, cred.cr_group_info); - put_group_info(cred.cr_group_info); - if ((cred.cr_uid)) { - act_as->cap_effective = - cap_drop_nfsd_set(act_as->cap_effective); + gi = groups_alloc(rqgi->ngroups); + if (!gi) + goto oom; + + for (i = 0; i < rqgi->ngroups; i++) { + if (!GROUP_AT(rqgi, i)) + GROUP_AT(gi, i) = exp->ex_anon_gid; + else + GROUP_AT(gi, i) = GROUP_AT(rqgi, i); + } } else { - act_as->cap_effective = - cap_raise_nfsd_set(act_as->cap_effective, - act_as->cap_permitted); + gi = get_group_info(rqgi); } + + if (new->fsuid == (uid_t) -1) + new->fsuid = exp->ex_anon_uid; + if (new->fsgid == (gid_t) -1) + new->fsgid = exp->ex_anon_gid; + + ret = set_groups(new, gi); + put_group_info(gi); + if (!ret) + goto error; + + if (new->uid) + new->cap_effective = cap_drop_nfsd_set(new->cap_effective); + else + new->cap_effective = cap_raise_nfsd_set(new->cap_effective, + new->cap_permitted); + return commit_creds(new); + +oom: + ret = -ENOMEM; +error: + abort_creds(new); return ret; } diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 632a50b4b37..9371ea12d7f 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -54,20 +54,26 @@ static struct path rec_dir; static int rec_dir_init = 0; -static void -nfs4_save_user(uid_t *saveuid, gid_t *savegid) +static int +nfs4_save_creds(const struct cred **original_creds) { - *saveuid = current->cred->fsuid; - *savegid = current->cred->fsgid; - current->cred->fsuid = 0; - current->cred->fsgid = 0; + struct cred *new; + + new = prepare_creds(); + if (!new) + return -ENOMEM; + + new->fsuid = 0; + new->fsgid = 0; + *original_creds = override_creds(new); + put_cred(new); + return 0; } static void -nfs4_reset_user(uid_t saveuid, gid_t savegid) +nfs4_reset_creds(const struct cred *original) { - current->cred->fsuid = saveuid; - current->cred->fsgid = savegid; + revert_creds(original); } static void @@ -129,10 +135,9 @@ nfsd4_sync_rec_dir(void) int nfsd4_create_clid_dir(struct nfs4_client *clp) { + const struct cred *original_cred; char *dname = clp->cl_recdir; struct dentry *dentry; - uid_t uid; - gid_t gid; int status; dprintk("NFSD: nfsd4_create_clid_dir for \"%s\"\n", dname); @@ -140,7 +145,9 @@ nfsd4_create_clid_dir(struct nfs4_client *clp) if (!rec_dir_init || clp->cl_firststate) return 0; - nfs4_save_user(&uid, &gid); + status = nfs4_save_creds(&original_cred); + if (status < 0) + return status; /* lock the parent */ mutex_lock(&rec_dir.dentry->d_inode->i_mutex); @@ -168,7 +175,7 @@ out_unlock: clp->cl_firststate = 1; nfsd4_sync_rec_dir(); } - nfs4_reset_user(uid, gid); + nfs4_reset_creds(original_cred); dprintk("NFSD: nfsd4_create_clid_dir returns %d\n", status); return status; } @@ -211,20 +218,21 @@ nfsd4_build_dentrylist(void *arg, const char *name, int namlen, static int nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f) { + const struct cred *original_cred; struct file *filp; struct dentry_list_arg dla = { .parent = dir, }; struct list_head *dentries = &dla.dentries; struct dentry_list *child; - uid_t uid; - gid_t gid; int status; if (!rec_dir_init) return 0; - nfs4_save_user(&uid, &gid); + status = nfs4_save_creds(&original_cred); + if (status < 0) + return status; filp = dentry_open(dget(dir), mntget(rec_dir.mnt), O_RDONLY, current_cred()); @@ -250,7 +258,7 @@ out: dput(child->dentry); kfree(child); } - nfs4_reset_user(uid, gid); + nfs4_reset_creds(original_cred); return status; } @@ -312,8 +320,7 @@ out: void nfsd4_remove_clid_dir(struct nfs4_client *clp) { - uid_t uid; - gid_t gid; + const struct cred *original_cred; int status; if (!rec_dir_init || !clp->cl_firststate) @@ -323,9 +330,13 @@ nfsd4_remove_clid_dir(struct nfs4_client *clp) if (status) goto out; clp->cl_firststate = 0; - nfs4_save_user(&uid, &gid); + + status = nfs4_save_creds(&original_cred); + if (status < 0) + goto out; + status = nfsd4_unlink_clid_dir(clp->cl_recdir, HEXDIR_LEN-1); - nfs4_reset_user(uid, gid); + nfs4_reset_creds(original_cred); if (status == 0) nfsd4_sync_rec_dir(); mnt_drop_write(rec_dir.mnt); @@ -402,16 +413,21 @@ nfsd4_recdir_load(void) { void nfsd4_init_recdir(char *rec_dirname) { - uid_t uid = 0; - gid_t gid = 0; - int status; + const struct cred *original_cred; + int status; printk("NFSD: Using %s as the NFSv4 state recovery directory\n", rec_dirname); BUG_ON(rec_dir_init); - nfs4_save_user(&uid, &gid); + status = nfs4_save_creds(&original_cred); + if (status < 0) { + printk("NFSD: Unable to change credentials to find recovery" + " directory: error %d\n", + status); + return; + } status = kern_path(rec_dirname, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &rec_dir); @@ -421,7 +437,7 @@ nfsd4_init_recdir(char *rec_dirname) if (!status) rec_dir_init = 1; - nfs4_reset_user(uid, gid); + nfs4_reset_creds(original_cred); } void diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index e67cfaea086..f0da7d9c3a9 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -186,9 +186,14 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp) * access control settings being in effect, we cannot * fix that case easily. */ - current->cred->cap_effective = - cap_raise_nfsd_set(current->cred->cap_effective, - current->cred->cap_permitted); + struct cred *new = prepare_creds(); + if (!new) + return nfserrno(-ENOMEM); + new->cap_effective = + cap_raise_nfsd_set(new->cap_effective, + new->cap_permitted); + put_cred(override_creds(new)); + put_cred(new); } else { error = nfsd_setuser_and_check_port(rqstp, exp); if (error) diff --git a/fs/open.c b/fs/open.c index f96eaab280a..c0a426d5766 100644 --- a/fs/open.c +++ b/fs/open.c @@ -425,30 +425,33 @@ out: */ asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode) { - struct cred *cred = current->cred; + const struct cred *old_cred; + struct cred *override_cred; struct path path; struct inode *inode; - int old_fsuid, old_fsgid; - kernel_cap_t uninitialized_var(old_cap); /* !SECURE_NO_SETUID_FIXUP */ int res; if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */ return -EINVAL; - old_fsuid = cred->fsuid; - old_fsgid = cred->fsgid; + override_cred = prepare_creds(); + if (!override_cred) + return -ENOMEM; - cred->fsuid = cred->uid; - cred->fsgid = cred->gid; + override_cred->fsuid = override_cred->uid; + override_cred->fsgid = override_cred->gid; if (!issecure(SECURE_NO_SETUID_FIXUP)) { /* Clear the capabilities if we switch to a non-root user */ - if (current->cred->uid) - old_cap = cap_set_effective(__cap_empty_set); + if (override_cred->uid) + cap_clear(override_cred->cap_effective); else - old_cap = cap_set_effective(cred->cap_permitted); + override_cred->cap_effective = + override_cred->cap_permitted; } + old_cred = override_creds(override_cred); + res = user_path_at(dfd, filename, LOOKUP_FOLLOW, &path); if (res) goto out; @@ -485,12 +488,8 @@ asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode) out_path_release: path_put(&path); out: - cred->fsuid = old_fsuid; - cred->fsgid = old_fsgid; - - if (!issecure(SECURE_NO_SETUID_FIXUP)) - cap_set_effective(old_cap); - + revert_creds(old_cred); + put_cred(override_cred); return res; } diff --git a/include/linux/audit.h b/include/linux/audit.h index 6fbebac7b1b..0b2fcb698a6 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -454,8 +454,10 @@ extern int __audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_pr extern int __audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout); extern int __audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification); extern int __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); -extern void __audit_log_bprm_fcaps(struct linux_binprm *bprm, kernel_cap_t *pP, kernel_cap_t *pE); -extern int __audit_log_capset(pid_t pid, kernel_cap_t *eff, kernel_cap_t *inh, kernel_cap_t *perm); +extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, + const struct cred *old); +extern int __audit_log_capset(pid_t pid, const struct cred *new, const struct cred *old); static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp) { @@ -522,16 +524,20 @@ static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) * * -Eric */ -static inline void audit_log_bprm_fcaps(struct linux_binprm *bprm, kernel_cap_t *pP, kernel_cap_t *pE) +static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, + const struct cred *old) { if (unlikely(!audit_dummy_context())) - __audit_log_bprm_fcaps(bprm, pP, pE); + return __audit_log_bprm_fcaps(bprm, new, old); + return 0; } -static inline int audit_log_capset(pid_t pid, kernel_cap_t *eff, kernel_cap_t *inh, kernel_cap_t *perm) +static inline int audit_log_capset(pid_t pid, const struct cred *new, + const struct cred *old) { if (unlikely(!audit_dummy_context())) - return __audit_log_capset(pid, eff, inh, perm); + return __audit_log_capset(pid, new, old); return 0; } @@ -566,8 +572,8 @@ extern int audit_signals; #define audit_mq_timedreceive(d,l,p,t) ({ 0; }) #define audit_mq_notify(d,n) ({ 0; }) #define audit_mq_getsetattr(d,s) ({ 0; }) -#define audit_log_bprm_fcaps(b, p, e) do { ; } while (0) -#define audit_log_capset(pid, e, i, p) ({ 0; }) +#define audit_log_bprm_fcaps(b, ncr, ocr) ({ 0; }) +#define audit_log_capset(pid, ncr, ocr) ({ 0; }) #define audit_ptrace(t) ((void)0) #define audit_n_rules 0 #define audit_signals 0 diff --git a/include/linux/capability.h b/include/linux/capability.h index 7f26580a5a4..e22f48c2a46 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -519,8 +519,6 @@ extern const kernel_cap_t __cap_empty_set; extern const kernel_cap_t __cap_full_set; extern const kernel_cap_t __cap_init_eff_set; -kernel_cap_t cap_set_effective(const kernel_cap_t pE_new); - /** * has_capability - Determine if a task has a superior capability available * @t: The task in question diff --git a/include/linux/cred.h b/include/linux/cred.h index 62b9e532422..eaf6fa695a0 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -84,6 +84,8 @@ struct thread_group_cred { struct key *process_keyring; /* keyring private to this process */ struct rcu_head rcu; /* RCU deletion hook */ }; + +extern void release_tgcred(struct cred *cred); #endif /* @@ -137,11 +139,30 @@ struct cred { struct user_struct *user; /* real user ID subscription */ struct group_info *group_info; /* supplementary groups for euid/fsgid */ struct rcu_head rcu; /* RCU deletion hook */ - spinlock_t lock; /* lock for pointer changes */ }; extern void __put_cred(struct cred *); extern int copy_creds(struct task_struct *, unsigned long); +extern struct cred *prepare_creds(void); +extern struct cred *prepare_usermodehelper_creds(void); +extern int commit_creds(struct cred *); +extern void abort_creds(struct cred *); +extern const struct cred *override_creds(const struct cred *) __deprecated; +extern void revert_creds(const struct cred *) __deprecated; +extern void __init cred_init(void); + +/** + * get_new_cred - Get a reference on a new set of credentials + * @cred: The new credentials to reference + * + * Get a reference on the specified set of new credentials. The caller must + * release the reference. + */ +static inline struct cred *get_new_cred(struct cred *cred) +{ + atomic_inc(&cred->usage); + return cred; +} /** * get_cred - Get a reference on a set of credentials @@ -150,10 +171,9 @@ extern int copy_creds(struct task_struct *, unsigned long); * Get a reference on the specified set of credentials. The caller must * release the reference. */ -static inline struct cred *get_cred(struct cred *cred) +static inline const struct cred *get_cred(const struct cred *cred) { - atomic_inc(&cred->usage); - return cred; + return get_new_cred((struct cred *) cred); } /** @@ -166,6 +186,8 @@ static inline struct cred *get_cred(struct cred *cred) static inline void put_cred(const struct cred *_cred) { struct cred *cred = (struct cred *) _cred; + + BUG_ON(atomic_read(&(cred)->usage) <= 0); if (atomic_dec_and_test(&(cred)->usage)) __put_cred(cred); } @@ -250,13 +272,13 @@ static inline void put_cred(const struct cred *_cred) __groups; \ }) -#define task_cred_xxx(task, xxx) \ -({ \ - __typeof__(task->cred->xxx) ___val; \ - rcu_read_lock(); \ - ___val = __task_cred((task))->xxx; \ - rcu_read_unlock(); \ - ___val; \ +#define task_cred_xxx(task, xxx) \ +({ \ + __typeof__(((struct cred *)NULL)->xxx) ___val; \ + rcu_read_lock(); \ + ___val = __task_cred((task))->xxx; \ + rcu_read_unlock(); \ + ___val; \ }) #define task_uid(task) (task_cred_xxx((task), uid)) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 5e24c54b6df..08c3b24ad9a 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -150,6 +150,8 @@ extern struct cred init_cred; .sibling = LIST_HEAD_INIT(tsk.sibling), \ .group_leader = &tsk, \ .cred = &init_cred, \ + .cred_exec_mutex = \ + __MUTEX_INITIALIZER(tsk.cred_exec_mutex), \ .comm = "swapper", \ .thread = INIT_THREAD, \ .fs = &init_fs, \ diff --git a/include/linux/key.h b/include/linux/key.h index 0836cc838b0..69ecf0934b0 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -73,6 +73,7 @@ struct key; struct seq_file; struct user_struct; struct signal_struct; +struct cred; struct key_type; struct key_owner; @@ -181,7 +182,7 @@ struct key { extern struct key *key_alloc(struct key_type *type, const char *desc, uid_t uid, gid_t gid, - struct task_struct *ctx, + const struct cred *cred, key_perm_t perm, unsigned long flags); @@ -249,7 +250,7 @@ extern int key_unlink(struct key *keyring, struct key *key); extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, - struct task_struct *ctx, + const struct cred *cred, unsigned long flags, struct key *dest); @@ -276,22 +277,12 @@ extern ctl_table key_sysctls[]; /* * the userspace interface */ -extern void switch_uid_keyring(struct user_struct *new_user); -extern int copy_keys(unsigned long clone_flags, struct task_struct *tsk); -extern void exit_keys(struct task_struct *tsk); -extern int suid_keys(struct task_struct *tsk); +extern int install_thread_keyring_to_cred(struct cred *cred); extern int exec_keys(struct task_struct *tsk); extern void key_fsuid_changed(struct task_struct *tsk); extern void key_fsgid_changed(struct task_struct *tsk); extern void key_init(void); -#define __install_session_keyring(keyring) \ -({ \ - struct key *old_session = current->cred->tgcred->session_keyring; \ - current->cred->tgcred->session_keyring = keyring; \ - old_session; \ -}) - #else /* CONFIG_KEYS */ #define key_validate(k) 0 @@ -303,11 +294,6 @@ extern void key_init(void); #define make_key_ref(k, p) NULL #define key_ref_to_ptr(k) NULL #define is_key_possessed(k) 0 -#define switch_uid_keyring(u) do { } while(0) -#define __install_session_keyring(k) ({ NULL; }) -#define copy_keys(f,t) 0 -#define exit_keys(t) do { } while(0) -#define suid_keys(t) do { } while(0) #define exec_keys(t) do { } while(0) #define key_fsuid_changed(t) do { } while(0) #define key_fsgid_changed(t) do { } while(0) diff --git a/include/linux/sched.h b/include/linux/sched.h index 2913252989b..121d655e460 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1145,7 +1145,8 @@ struct task_struct { struct list_head cpu_timers[3]; /* process credentials */ - struct cred *cred; /* actual/objective task credentials */ + const struct cred *cred; /* actual/objective task credentials (COW) */ + struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */ char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock @@ -1720,7 +1721,6 @@ static inline struct user_struct *get_uid(struct user_struct *u) return u; } extern void free_uid(struct user_struct *); -extern void switch_uid(struct user_struct *); extern void release_uids(struct user_namespace *ns); #include @@ -1870,6 +1870,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, #define for_each_process(p) \ for (p = &init_task ; (p = next_task(p)) != &init_task ; ) +extern bool is_single_threaded(struct task_struct *); + /* * Careful: do_each_thread/while_each_thread is a double loop so * 'break' will not work as expected - use goto instead. diff --git a/include/linux/security.h b/include/linux/security.h index 7e9fe046a0d..68be1125144 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -53,24 +53,21 @@ extern int cap_settime(struct timespec *ts, struct timezone *tz); extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode); extern int cap_ptrace_traceme(struct task_struct *parent); extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); -extern int cap_capset_check(const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted); -extern void cap_capset_set(const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted); +extern int cap_capset(struct cred *new, const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); extern int cap_bprm_set_security(struct linux_binprm *bprm); -extern void cap_bprm_apply_creds(struct linux_binprm *bprm, int unsafe); +extern int cap_bprm_apply_creds(struct linux_binprm *bprm, int unsafe); extern int cap_bprm_secureexec(struct linux_binprm *bprm); extern int cap_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); extern int cap_inode_removexattr(struct dentry *dentry, const char *name); extern int cap_inode_need_killpriv(struct dentry *dentry); extern int cap_inode_killpriv(struct dentry *dentry); -extern int cap_task_post_setuid(uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags); -extern void cap_task_reparent_to_init(struct task_struct *p); +extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, - unsigned long arg4, unsigned long arg5, long *rc_p); + unsigned long arg4, unsigned long arg5); extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp); extern int cap_task_setioprio(struct task_struct *p, int ioprio); extern int cap_task_setnice(struct task_struct *p, int nice); @@ -170,8 +167,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Compute and set the security attributes of a process being transformed * by an execve operation based on the old attributes (current->security) * and the information saved in @bprm->security by the set_security hook. - * Since this hook function (and its caller) are void, this hook can not - * return an error. However, it can leave the security attributes of the + * Since this function may return an error, in which case the process will + * be killed. However, it can leave the security attributes of the * process unchanged if an access failure occurs at this point. * bprm_apply_creds is called under task_lock. @unsafe indicates various * reasons why it may be unsafe to change security state. @@ -593,15 +590,18 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * manual page for definitions of the @clone_flags. * @clone_flags contains the flags indicating what should be shared. * Return 0 if permission is granted. - * @cred_alloc_security: - * @cred contains the cred struct for child process. - * Allocate and attach a security structure to the cred->security field. - * The security field is initialized to NULL when the task structure is - * allocated. - * Return 0 if operation was successful. * @cred_free: * @cred points to the credentials. * Deallocate and clear the cred->security field in a set of credentials. + * @cred_prepare: + * @new points to the new credentials. + * @old points to the original credentials. + * @gfp indicates the atomicity of any memory allocations. + * Prepare a new set of credentials by copying the data from the old set. + * @cred_commit: + * @new points to the new credentials. + * @old points to the original credentials. + * Install a new set of credentials. * @task_setuid: * Check permission before setting one or more of the user identity * attributes of the current process. The @flags parameter indicates @@ -614,15 +614,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @id2 contains a uid. * @flags contains one of the LSM_SETID_* values. * Return 0 if permission is granted. - * @task_post_setuid: + * @task_fix_setuid: * Update the module's state after setting one or more of the user * identity attributes of the current process. The @flags parameter * indicates which of the set*uid system calls invoked this hook. If - * @flags is LSM_SETID_FS, then @old_ruid is the old fs uid and the other - * parameters are not used. - * @old_ruid contains the old real uid (or fs uid if LSM_SETID_FS). - * @old_euid contains the old effective uid (or -1 if LSM_SETID_FS). - * @old_suid contains the old saved uid (or -1 if LSM_SETID_FS). + * @new is the set of credentials that will be installed. Modifications + * should be made to this rather than to @current->cred. + * @old is the set of credentials that are being replaces * @flags contains one of the LSM_SETID_* values. * Return 0 on success. * @task_setgid: @@ -725,13 +723,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @arg3 contains a argument. * @arg4 contains a argument. * @arg5 contains a argument. - * @rc_p contains a pointer to communicate back the forced return code - * Return 0 if permission is granted, and non-zero if the security module - * has taken responsibility (setting *rc_p) for the prctl call. - * @task_reparent_to_init: - * Set the security attributes in @p->security for a kernel thread that - * is being reparented to the init task. - * @p contains the task_struct for the kernel thread. + * Return -ENOSYS if no-one wanted to handle this op, any other value to + * cause prctl() to return immediately with that value. * @task_to_inode: * Set the security attributes for an inode based on an associated task's * security attributes, e.g. for /proc/pid inodes. @@ -1008,7 +1001,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * See whether a specific operational right is granted to a process on a * key. * @key_ref refers to the key (key pointer + possession attribute bit). - * @context points to the process to provide the context against which to + * @cred points to the credentials to provide the context against which to * evaluate the security data on the key. * @perm describes the combination of permissions required of this key. * Return 1 if permission granted, 0 if permission denied and -ve it the @@ -1170,6 +1163,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @child process. * Security modules may also want to perform a process tracing check * during an execve in the set_security or apply_creds hooks of + * tracing check during an execve in the bprm_set_creds hook of * binprm_security_ops if the process is being traced and its security * attributes would be changed by the execve. * @child contains the task_struct structure for the target process. @@ -1193,19 +1187,15 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @inheritable contains the inheritable capability set. * @permitted contains the permitted capability set. * Return 0 if the capability sets were successfully obtained. - * @capset_check: - * Check permission before setting the @effective, @inheritable, and - * @permitted capability sets for the current process. - * @effective contains the effective capability set. - * @inheritable contains the inheritable capability set. - * @permitted contains the permitted capability set. - * Return 0 if permission is granted. - * @capset_set: + * @capset: * Set the @effective, @inheritable, and @permitted capability sets for * the current process. + * @new contains the new credentials structure for target process. + * @old contains the current credentials structure for target process. * @effective contains the effective capability set. * @inheritable contains the inheritable capability set. * @permitted contains the permitted capability set. + * Return 0 and update @new if permission is granted. * @capable: * Check whether the @tsk process has the @cap capability. * @tsk contains the task_struct for the process. @@ -1297,12 +1287,11 @@ struct security_operations { int (*capget) (struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); - int (*capset_check) (const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted); - void (*capset_set) (const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted); + int (*capset) (struct cred *new, + const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); int (*capable) (struct task_struct *tsk, int cap, int audit); int (*acct) (struct file *file); int (*sysctl) (struct ctl_table *table, int op); @@ -1314,7 +1303,7 @@ struct security_operations { int (*bprm_alloc_security) (struct linux_binprm *bprm); void (*bprm_free_security) (struct linux_binprm *bprm); - void (*bprm_apply_creds) (struct linux_binprm *bprm, int unsafe); + int (*bprm_apply_creds) (struct linux_binprm *bprm, int unsafe); void (*bprm_post_apply_creds) (struct linux_binprm *bprm); int (*bprm_set_security) (struct linux_binprm *bprm); int (*bprm_check_security) (struct linux_binprm *bprm); @@ -1405,11 +1394,13 @@ struct security_operations { int (*dentry_open) (struct file *file, const struct cred *cred); int (*task_create) (unsigned long clone_flags); - int (*cred_alloc_security) (struct cred *cred); void (*cred_free) (struct cred *cred); + int (*cred_prepare)(struct cred *new, const struct cred *old, + gfp_t gfp); + void (*cred_commit)(struct cred *new, const struct cred *old); int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); - int (*task_post_setuid) (uid_t old_ruid /* or fsuid */ , - uid_t old_euid, uid_t old_suid, int flags); + int (*task_fix_setuid) (struct cred *new, const struct cred *old, + int flags); int (*task_setgid) (gid_t id0, gid_t id1, gid_t id2, int flags); int (*task_setpgid) (struct task_struct *p, pid_t pgid); int (*task_getpgid) (struct task_struct *p); @@ -1429,8 +1420,7 @@ struct security_operations { int (*task_wait) (struct task_struct *p); int (*task_prctl) (int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, - unsigned long arg5, long *rc_p); - void (*task_reparent_to_init) (struct task_struct *p); + unsigned long arg5); void (*task_to_inode) (struct task_struct *p, struct inode *inode); int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag); @@ -1535,10 +1525,10 @@ struct security_operations { /* key management security hooks */ #ifdef CONFIG_KEYS - int (*key_alloc) (struct key *key, struct task_struct *tsk, unsigned long flags); + int (*key_alloc) (struct key *key, const struct cred *cred, unsigned long flags); void (*key_free) (struct key *key); int (*key_permission) (key_ref_t key_ref, - struct task_struct *context, + const struct cred *cred, key_perm_t perm); int (*key_getsecurity)(struct key *key, char **_buffer); #endif /* CONFIG_KEYS */ @@ -1564,12 +1554,10 @@ int security_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); -int security_capset_check(const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted); -void security_capset_set(const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted); +int security_capset(struct cred *new, const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted); int security_capable(struct task_struct *tsk, int cap); int security_capable_noaudit(struct task_struct *tsk, int cap); int security_acct(struct file *file); @@ -1583,7 +1571,7 @@ int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); int security_vm_enough_memory_kern(long pages); int security_bprm_alloc(struct linux_binprm *bprm); void security_bprm_free(struct linux_binprm *bprm); -void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe); +int security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe); void security_bprm_post_apply_creds(struct linux_binprm *bprm); int security_bprm_set(struct linux_binprm *bprm); int security_bprm_check(struct linux_binprm *bprm); @@ -1660,11 +1648,12 @@ int security_file_send_sigiotask(struct task_struct *tsk, int security_file_receive(struct file *file); int security_dentry_open(struct file *file, const struct cred *cred); int security_task_create(unsigned long clone_flags); -int security_cred_alloc(struct cred *cred); void security_cred_free(struct cred *cred); +int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); +void security_commit_creds(struct cred *new, const struct cred *old); int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); -int security_task_post_setuid(uid_t old_ruid, uid_t old_euid, - uid_t old_suid, int flags); +int security_task_fix_setuid(struct cred *new, const struct cred *old, + int flags); int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags); int security_task_setpgid(struct task_struct *p, pid_t pgid); int security_task_getpgid(struct task_struct *p); @@ -1683,8 +1672,7 @@ int security_task_kill(struct task_struct *p, struct siginfo *info, int sig, u32 secid); int security_task_wait(struct task_struct *p); int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, - unsigned long arg4, unsigned long arg5, long *rc_p); -void security_task_reparent_to_init(struct task_struct *p); + unsigned long arg4, unsigned long arg5); void security_task_to_inode(struct task_struct *p, struct inode *inode); int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid); @@ -1759,18 +1747,13 @@ static inline int security_capget(struct task_struct *target, return cap_capget(target, effective, inheritable, permitted); } -static inline int security_capset_check(const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted) +static inline int security_capset(struct cred *new, + const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted) { - return cap_capset_check(effective, inheritable, permitted); -} - -static inline void security_capset_set(const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted) -{ - cap_capset_set(effective, inheritable, permitted); + return cap_capset(new, old, effective, inheritable, permitted); } static inline int security_capable(struct task_struct *tsk, int cap) @@ -1837,9 +1820,9 @@ static inline int security_bprm_alloc(struct linux_binprm *bprm) static inline void security_bprm_free(struct linux_binprm *bprm) { } -static inline void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) +static inline int security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) { - cap_bprm_apply_creds(bprm, unsafe); + return cap_bprm_apply_creds(bprm, unsafe); } static inline void security_bprm_post_apply_creds(struct linux_binprm *bprm) @@ -2182,13 +2165,20 @@ static inline int security_task_create(unsigned long clone_flags) return 0; } -static inline int security_cred_alloc(struct cred *cred) +static inline void security_cred_free(struct cred *cred) +{ } + +static inline int security_prepare_creds(struct cred *new, + const struct cred *old, + gfp_t gfp) { return 0; } -static inline void security_cred_free(struct cred *cred) -{ } +static inline void security_commit_creds(struct cred *new, + const struct cred *old) +{ +} static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags) @@ -2196,10 +2186,11 @@ static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, return 0; } -static inline int security_task_post_setuid(uid_t old_ruid, uid_t old_euid, - uid_t old_suid, int flags) +static inline int security_task_fix_setuid(struct cred *new, + const struct cred *old, + int flags) { - return cap_task_post_setuid(old_ruid, old_euid, old_suid, flags); + return cap_task_fix_setuid(new, old, flags); } static inline int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, @@ -2286,14 +2277,9 @@ static inline int security_task_wait(struct task_struct *p) static inline int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, - unsigned long arg5, long *rc_p) -{ - return cap_task_prctl(option, arg2, arg3, arg3, arg5, rc_p); -} - -static inline void security_task_reparent_to_init(struct task_struct *p) + unsigned long arg5) { - cap_task_reparent_to_init(p); + return cap_task_prctl(option, arg2, arg3, arg3, arg5); } static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) @@ -2719,16 +2705,16 @@ static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi #ifdef CONFIG_KEYS #ifdef CONFIG_SECURITY -int security_key_alloc(struct key *key, struct task_struct *tsk, unsigned long flags); +int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags); void security_key_free(struct key *key); int security_key_permission(key_ref_t key_ref, - struct task_struct *context, key_perm_t perm); + const struct cred *cred, key_perm_t perm); int security_key_getsecurity(struct key *key, char **_buffer); #else static inline int security_key_alloc(struct key *key, - struct task_struct *tsk, + const struct cred *cred, unsigned long flags) { return 0; @@ -2739,7 +2725,7 @@ static inline void security_key_free(struct key *key) } static inline int security_key_permission(key_ref_t key_ref, - struct task_struct *context, + const struct cred *cred, key_perm_t perm) { return 0; diff --git a/init/main.c b/init/main.c index 7e117a231af..db843bff573 100644 --- a/init/main.c +++ b/init/main.c @@ -669,6 +669,7 @@ asmlinkage void __init start_kernel(void) efi_enter_virtual_mode(); #endif thread_info_cache_init(); + cred_init(); fork_init(num_physpages); proc_caches_init(); buffer_init(); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index ae8ef88ade3..bc1e2d854bf 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -2546,18 +2546,17 @@ int __audit_signal_info(int sig, struct task_struct *t) /** * __audit_log_bprm_fcaps - store information about a loading bprm and relevant fcaps - * @bprm pointer to the bprm being processed - * @caps the caps read from the disk + * @bprm: pointer to the bprm being processed + * @new: the proposed new credentials + * @old: the old credentials * * Simply check if the proc already has the caps given by the file and if not * store the priv escalation info for later auditing at the end of the syscall * - * this can fail and we don't care. See the note in audit.h for - * audit_log_bprm_fcaps() for my explaination.... - * * -Eric */ -void __audit_log_bprm_fcaps(struct linux_binprm *bprm, kernel_cap_t *pP, kernel_cap_t *pE) +int __audit_log_bprm_fcaps(struct linux_binprm *bprm, + const struct cred *new, const struct cred *old) { struct audit_aux_data_bprm_fcaps *ax; struct audit_context *context = current->audit_context; @@ -2566,7 +2565,7 @@ void __audit_log_bprm_fcaps(struct linux_binprm *bprm, kernel_cap_t *pP, kernel_ ax = kmalloc(sizeof(*ax), GFP_KERNEL); if (!ax) - return; + return -ENOMEM; ax->d.type = AUDIT_BPRM_FCAPS; ax->d.next = context->aux; @@ -2581,26 +2580,27 @@ void __audit_log_bprm_fcaps(struct linux_binprm *bprm, kernel_cap_t *pP, kernel_ ax->fcap.fE = !!(vcaps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE); ax->fcap_ver = (vcaps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT; - ax->old_pcap.permitted = *pP; - ax->old_pcap.inheritable = current->cred->cap_inheritable; - ax->old_pcap.effective = *pE; + ax->old_pcap.permitted = old->cap_permitted; + ax->old_pcap.inheritable = old->cap_inheritable; + ax->old_pcap.effective = old->cap_effective; - ax->new_pcap.permitted = current->cred->cap_permitted; - ax->new_pcap.inheritable = current->cred->cap_inheritable; - ax->new_pcap.effective = current->cred->cap_effective; + ax->new_pcap.permitted = new->cap_permitted; + ax->new_pcap.inheritable = new->cap_inheritable; + ax->new_pcap.effective = new->cap_effective; + return 0; } /** * __audit_log_capset - store information about the arguments to the capset syscall - * @pid target pid of the capset call - * @eff effective cap set - * @inh inheritible cap set - * @perm permited cap set + * @pid: target pid of the capset call + * @new: the new credentials + * @old: the old (current) credentials * * Record the aguments userspace sent to sys_capset for later printing by the * audit system if applicable */ -int __audit_log_capset(pid_t pid, kernel_cap_t *eff, kernel_cap_t *inh, kernel_cap_t *perm) +int __audit_log_capset(pid_t pid, + const struct cred *new, const struct cred *old) { struct audit_aux_data_capset *ax; struct audit_context *context = current->audit_context; @@ -2617,9 +2617,9 @@ int __audit_log_capset(pid_t pid, kernel_cap_t *eff, kernel_cap_t *inh, kernel_c context->aux = (void *)ax; ax->pid = pid; - ax->cap.effective = *eff; - ax->cap.inheritable = *eff; - ax->cap.permitted = *perm; + ax->cap.effective = new->cap_effective; + ax->cap.inheritable = new->cap_effective; + ax->cap.permitted = new->cap_permitted; return 0; } diff --git a/kernel/capability.c b/kernel/capability.c index a404b980b1b..36b4b4daebe 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -15,12 +15,7 @@ #include #include #include - -/* - * This lock protects task->cap_* for all tasks including current. - * Locking rule: acquire this prior to tasklist_lock. - */ -static DEFINE_SPINLOCK(task_capability_lock); +#include "cred-internals.h" /* * Leveraged for setting/resetting capabilities @@ -128,12 +123,11 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy) } /* - * If we have configured with filesystem capability support, then the - * only thing that can change the capabilities of the current process - * is the current process. As such, we can't be in this code at the - * same time as we are in the process of setting capabilities in this - * process. The net result is that we can limit our use of locks to - * when we are reading the caps of another process. + * The only thing that can change the capabilities of the current + * process is the current process. As such, we can't be in this code + * at the same time as we are in the process of setting capabilities + * in this process. The net result is that we can limit our use of + * locks to when we are reading the caps of another process. */ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, kernel_cap_t *pIp, kernel_cap_t *pPp) @@ -143,7 +137,6 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, if (pid && (pid != task_pid_vnr(current))) { struct task_struct *target; - spin_lock(&task_capability_lock); read_lock(&tasklist_lock); target = find_task_by_vpid(pid); @@ -153,34 +146,12 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, ret = security_capget(target, pEp, pIp, pPp); read_unlock(&tasklist_lock); - spin_unlock(&task_capability_lock); } else ret = security_capget(current, pEp, pIp, pPp); return ret; } -/* - * Atomically modify the effective capabilities returning the original - * value. No permission check is performed here - it is assumed that the - * caller is permitted to set the desired effective capabilities. - */ -kernel_cap_t cap_set_effective(const kernel_cap_t pE_new) -{ - kernel_cap_t pE_old; - - spin_lock(&task_capability_lock); - - pE_old = current->cred->cap_effective; - current->cred->cap_effective = pE_new; - - spin_unlock(&task_capability_lock); - - return pE_old; -} - -EXPORT_SYMBOL(cap_set_effective); - /** * sys_capget - get the capabilities of a given process. * @header: pointer to struct that contains capability version and @@ -208,7 +179,6 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) return -EINVAL; ret = cap_get_target_pid(pid, &pE, &pI, &pP); - if (!ret) { struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; unsigned i; @@ -270,6 +240,7 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; unsigned i, tocopy; kernel_cap_t inheritable, permitted, effective; + struct cred *new; int ret; pid_t pid; @@ -284,8 +255,8 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) if (pid != 0 && pid != task_pid_vnr(current)) return -EPERM; - if (copy_from_user(&kdata, data, tocopy - * sizeof(struct __user_cap_data_struct))) + if (copy_from_user(&kdata, data, + tocopy * sizeof(struct __user_cap_data_struct))) return -EFAULT; for (i = 0; i < tocopy; i++) { @@ -300,24 +271,23 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) i++; } - ret = audit_log_capset(pid, &effective, &inheritable, &permitted); - if (ret) + new = prepare_creds(); + if (!new) + return -ENOMEM; + + ret = security_capset(new, current_cred(), + &effective, &inheritable, &permitted); + if (ret < 0) + goto error; + + ret = audit_log_capset(pid, new, current_cred()); + if (ret < 0) return ret; - /* This lock is required even when filesystem capability support is - * configured - it protects the sys_capget() call from returning - * incorrect data in the case that the targeted process is not the - * current one. - */ - spin_lock(&task_capability_lock); - - ret = security_capset_check(&effective, &inheritable, &permitted); - /* Having verified that the proposed changes are legal, we now put them - * into effect. - */ - if (!ret) - security_capset_set(&effective, &inheritable, &permitted); - spin_unlock(&task_capability_lock); + return commit_creds(new); + +error: + abort_creds(new); return ret; } diff --git a/kernel/cred-internals.h b/kernel/cred-internals.h new file mode 100644 index 00000000000..2dc4fc2d0bf --- /dev/null +++ b/kernel/cred-internals.h @@ -0,0 +1,21 @@ +/* Internal credentials stuff + * + * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +/* + * user.c + */ +static inline void sched_switch_user(struct task_struct *p) +{ +#ifdef CONFIG_USER_SCHED + sched_move_task(p); +#endif /* CONFIG_USER_SCHED */ +} + diff --git a/kernel/cred.c b/kernel/cred.c index ac73e361768..cb6b5eda978 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -15,6 +15,10 @@ #include #include #include +#include +#include "cred-internals.h" + +static struct kmem_cache *cred_jar; /* * The common credentials for the initial task's thread group @@ -64,7 +68,7 @@ static void release_tgcred_rcu(struct rcu_head *rcu) /* * Release a set of thread group credentials. */ -static void release_tgcred(struct cred *cred) +void release_tgcred(struct cred *cred) { #ifdef CONFIG_KEYS struct thread_group_cred *tgcred = cred->tgcred; @@ -81,79 +85,322 @@ static void put_cred_rcu(struct rcu_head *rcu) { struct cred *cred = container_of(rcu, struct cred, rcu); - BUG_ON(atomic_read(&cred->usage) != 0); + if (atomic_read(&cred->usage) != 0) + panic("CRED: put_cred_rcu() sees %p with usage %d\n", + cred, atomic_read(&cred->usage)); + security_cred_free(cred); key_put(cred->thread_keyring); key_put(cred->request_key_auth); release_tgcred(cred); put_group_info(cred->group_info); free_uid(cred->user); - security_cred_free(cred); - kfree(cred); + kmem_cache_free(cred_jar, cred); } /** * __put_cred - Destroy a set of credentials - * @sec: The record to release + * @cred: The record to release * * Destroy a set of credentials on which no references remain. */ void __put_cred(struct cred *cred) { + BUG_ON(atomic_read(&cred->usage) != 0); + call_rcu(&cred->rcu, put_cred_rcu); } EXPORT_SYMBOL(__put_cred); +/** + * prepare_creds - Prepare a new set of credentials for modification + * + * Prepare a new set of task credentials for modification. A task's creds + * shouldn't generally be modified directly, therefore this function is used to + * prepare a new copy, which the caller then modifies and then commits by + * calling commit_creds(). + * + * Returns a pointer to the new creds-to-be if successful, NULL otherwise. + * + * Call commit_creds() or abort_creds() to clean up. + */ +struct cred *prepare_creds(void) +{ + struct task_struct *task = current; + const struct cred *old; + struct cred *new; + + BUG_ON(atomic_read(&task->cred->usage) < 1); + + new = kmem_cache_alloc(cred_jar, GFP_KERNEL); + if (!new) + return NULL; + + old = task->cred; + memcpy(new, old, sizeof(struct cred)); + + atomic_set(&new->usage, 1); + get_group_info(new->group_info); + get_uid(new->user); + +#ifdef CONFIG_KEYS + key_get(new->thread_keyring); + key_get(new->request_key_auth); + atomic_inc(&new->tgcred->usage); +#endif + +#ifdef CONFIG_SECURITY + new->security = NULL; +#endif + + if (security_prepare_creds(new, old, GFP_KERNEL) < 0) + goto error; + return new; + +error: + abort_creds(new); + return NULL; +} +EXPORT_SYMBOL(prepare_creds); + +/* + * prepare new credentials for the usermode helper dispatcher + */ +struct cred *prepare_usermodehelper_creds(void) +{ +#ifdef CONFIG_KEYS + struct thread_group_cred *tgcred = NULL; +#endif + struct cred *new; + +#ifdef CONFIG_KEYS + tgcred = kzalloc(sizeof(*new->tgcred), GFP_ATOMIC); + if (!tgcred) + return NULL; +#endif + + new = kmem_cache_alloc(cred_jar, GFP_ATOMIC); + if (!new) + return NULL; + + memcpy(new, &init_cred, sizeof(struct cred)); + + atomic_set(&new->usage, 1); + get_group_info(new->group_info); + get_uid(new->user); + +#ifdef CONFIG_KEYS + new->thread_keyring = NULL; + new->request_key_auth = NULL; + new->jit_keyring = KEY_REQKEY_DEFL_DEFAULT; + + atomic_set(&tgcred->usage, 1); + spin_lock_init(&tgcred->lock); + new->tgcred = tgcred; +#endif + +#ifdef CONFIG_SECURITY + new->security = NULL; +#endif + if (security_prepare_creds(new, &init_cred, GFP_ATOMIC) < 0) + goto error; + + BUG_ON(atomic_read(&new->usage) != 1); + return new; + +error: + put_cred(new); + return NULL; +} + /* * Copy credentials for the new process created by fork() + * + * We share if we can, but under some circumstances we have to generate a new + * set. */ int copy_creds(struct task_struct *p, unsigned long clone_flags) { - struct cred *pcred; - int ret; +#ifdef CONFIG_KEYS + struct thread_group_cred *tgcred; +#endif + struct cred *new; + + mutex_init(&p->cred_exec_mutex); - pcred = kmemdup(p->cred, sizeof(*p->cred), GFP_KERNEL); - if (!pcred) + if ( +#ifdef CONFIG_KEYS + !p->cred->thread_keyring && +#endif + clone_flags & CLONE_THREAD + ) { + get_cred(p->cred); + atomic_inc(&p->cred->user->processes); + return 0; + } + + new = prepare_creds(); + if (!new) return -ENOMEM; #ifdef CONFIG_KEYS - if (clone_flags & CLONE_THREAD) { - atomic_inc(&pcred->tgcred->usage); - } else { - pcred->tgcred = kmalloc(sizeof(struct cred), GFP_KERNEL); - if (!pcred->tgcred) { - kfree(pcred); + /* new threads get their own thread keyrings if their parent already + * had one */ + if (new->thread_keyring) { + key_put(new->thread_keyring); + new->thread_keyring = NULL; + if (clone_flags & CLONE_THREAD) + install_thread_keyring_to_cred(new); + } + + /* we share the process and session keyrings between all the threads in + * a process - this is slightly icky as we violate COW credentials a + * bit */ + if (!(clone_flags & CLONE_THREAD)) { + tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); + if (!tgcred) { + put_cred(new); return -ENOMEM; } - atomic_set(&pcred->tgcred->usage, 1); - spin_lock_init(&pcred->tgcred->lock); - pcred->tgcred->process_keyring = NULL; - pcred->tgcred->session_keyring = - key_get(p->cred->tgcred->session_keyring); + atomic_set(&tgcred->usage, 1); + spin_lock_init(&tgcred->lock); + tgcred->process_keyring = NULL; + tgcred->session_keyring = key_get(new->tgcred->session_keyring); + + release_tgcred(new); + new->tgcred = tgcred; } #endif -#ifdef CONFIG_SECURITY - pcred->security = NULL; -#endif + atomic_inc(&new->user->processes); + p->cred = new; + return 0; +} - ret = security_cred_alloc(pcred); - if (ret < 0) { - release_tgcred(pcred); - kfree(pcred); - return ret; +/** + * commit_creds - Install new credentials upon the current task + * @new: The credentials to be assigned + * + * Install a new set of credentials to the current task, using RCU to replace + * the old set. + * + * This function eats the caller's reference to the new credentials. + * + * Always returns 0 thus allowing this function to be tail-called at the end + * of, say, sys_setgid(). + */ +int commit_creds(struct cred *new) +{ + struct task_struct *task = current; + const struct cred *old; + + BUG_ON(atomic_read(&new->usage) < 1); + BUG_ON(atomic_read(&task->cred->usage) < 1); + + old = task->cred; + security_commit_creds(new, old); + + /* dumpability changes */ + if (old->euid != new->euid || + old->egid != new->egid || + old->fsuid != new->fsuid || + old->fsgid != new->fsgid || + !cap_issubset(new->cap_permitted, old->cap_permitted)) { + set_dumpable(task->mm, suid_dumpable); + task->pdeath_signal = 0; + smp_wmb(); } - atomic_set(&pcred->usage, 1); - get_group_info(pcred->group_info); - get_uid(pcred->user); - key_get(pcred->thread_keyring); - key_get(pcred->request_key_auth); + /* alter the thread keyring */ + if (new->fsuid != old->fsuid) + key_fsuid_changed(task); + if (new->fsgid != old->fsgid) + key_fsgid_changed(task); + + /* do it + * - What if a process setreuid()'s and this brings the + * new uid over his NPROC rlimit? We can check this now + * cheaply with the new uid cache, so if it matters + * we should be checking for it. -DaveM + */ + if (new->user != old->user) + atomic_inc(&new->user->processes); + rcu_assign_pointer(task->cred, new); + if (new->user != old->user) + atomic_dec(&old->user->processes); + + sched_switch_user(task); + + /* send notifications */ + if (new->uid != old->uid || + new->euid != old->euid || + new->suid != old->suid || + new->fsuid != old->fsuid) + proc_id_connector(task, PROC_EVENT_UID); - atomic_inc(&pcred->user->processes); + if (new->gid != old->gid || + new->egid != old->egid || + new->sgid != old->sgid || + new->fsgid != old->fsgid) + proc_id_connector(task, PROC_EVENT_GID); - /* RCU assignment is unneeded here as no-one can have accessed this - * pointer yet, barring us */ - p->cred = pcred; + put_cred(old); return 0; } +EXPORT_SYMBOL(commit_creds); + +/** + * abort_creds - Discard a set of credentials and unlock the current task + * @new: The credentials that were going to be applied + * + * Discard a set of credentials that were under construction and unlock the + * current task. + */ +void abort_creds(struct cred *new) +{ + BUG_ON(atomic_read(&new->usage) < 1); + put_cred(new); +} +EXPORT_SYMBOL(abort_creds); + +/** + * override_creds - Temporarily override the current process's credentials + * @new: The credentials to be assigned + * + * Install a set of temporary override credentials on the current process, + * returning the old set for later reversion. + */ +const struct cred *override_creds(const struct cred *new) +{ + const struct cred *old = current->cred; + + rcu_assign_pointer(current->cred, get_cred(new)); + return old; +} +EXPORT_SYMBOL(override_creds); + +/** + * revert_creds - Revert a temporary credentials override + * @old: The credentials to be restored + * + * Revert a temporary set of override credentials to an old set, discarding the + * override set. + */ +void revert_creds(const struct cred *old) +{ + const struct cred *override = current->cred; + + rcu_assign_pointer(current->cred, old); + put_cred(override); +} +EXPORT_SYMBOL(revert_creds); + +/* + * initialise the credentials stuff + */ +void __init cred_init(void) +{ + /* allocate a slab in which we can store credentials */ + cred_jar = kmem_cache_create("cred_jar", sizeof(struct cred), + 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); +} diff --git a/kernel/exit.c b/kernel/exit.c index bbc22530f2c..c0711da1548 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -47,12 +47,14 @@ #include #include #include +#include #include #include #include #include #include +#include "cred-internals.h" static void exit_mm(struct task_struct * tsk); @@ -338,12 +340,12 @@ static void reparent_to_kthreadd(void) /* cpus_allowed? */ /* rt_priority? */ /* signals? */ - security_task_reparent_to_init(current); memcpy(current->signal->rlim, init_task.signal->rlim, sizeof(current->signal->rlim)); - atomic_inc(&(INIT_USER->__count)); + + atomic_inc(&init_cred.usage); + commit_creds(&init_cred); write_unlock_irq(&tasklist_lock); - switch_uid(INIT_USER); } void __set_special_pids(struct pid *pid) @@ -1085,7 +1087,6 @@ NORET_TYPE void do_exit(long code) check_stack_usage(); exit_thread(); cgroup_exit(tsk, 1); - exit_keys(tsk); if (group_dead && tsk->signal->leader) disassociate_ctty(1); diff --git a/kernel/fork.c b/kernel/fork.c index ded1972672a..82a7948a664 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1084,10 +1084,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, goto bad_fork_cleanup_sighand; if ((retval = copy_mm(clone_flags, p))) goto bad_fork_cleanup_signal; - if ((retval = copy_keys(clone_flags, p))) - goto bad_fork_cleanup_mm; if ((retval = copy_namespaces(clone_flags, p))) - goto bad_fork_cleanup_keys; + goto bad_fork_cleanup_mm; if ((retval = copy_io(clone_flags, p))) goto bad_fork_cleanup_namespaces; retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); @@ -1252,8 +1250,6 @@ bad_fork_cleanup_io: put_io_context(p->io_context); bad_fork_cleanup_namespaces: exit_task_namespaces(p); -bad_fork_cleanup_keys: - exit_keys(p); bad_fork_cleanup_mm: if (p->mm) mmput(p->mm); @@ -1281,6 +1277,7 @@ bad_fork_cleanup_cgroup: bad_fork_cleanup_put_domain: module_put(task_thread_info(p)->exec_domain->module); bad_fork_cleanup_count: + atomic_dec(&p->cred->user->processes); put_cred(p->cred); bad_fork_free: free_task(p); diff --git a/kernel/kmod.c b/kernel/kmod.c index f044f8f5770..b46dbb90866 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -118,10 +118,10 @@ EXPORT_SYMBOL(request_module); struct subprocess_info { struct work_struct work; struct completion *complete; + struct cred *cred; char *path; char **argv; char **envp; - struct key *ring; enum umh_wait wait; int retval; struct file *stdin; @@ -134,19 +134,20 @@ struct subprocess_info { static int ____call_usermodehelper(void *data) { struct subprocess_info *sub_info = data; - struct key *new_session, *old_session; int retval; - /* Unblock all signals and set the session keyring. */ - new_session = key_get(sub_info->ring); + BUG_ON(atomic_read(&sub_info->cred->usage) != 1); + + /* Unblock all signals */ spin_lock_irq(¤t->sighand->siglock); - old_session = __install_session_keyring(new_session); flush_signal_handlers(current, 1); sigemptyset(¤t->blocked); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - key_put(old_session); + /* Install the credentials */ + commit_creds(sub_info->cred); + sub_info->cred = NULL; /* Install input pipe when needed */ if (sub_info->stdin) { @@ -185,6 +186,8 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info) { if (info->cleanup) (*info->cleanup)(info->argv, info->envp); + if (info->cred) + put_cred(info->cred); kfree(info); } EXPORT_SYMBOL(call_usermodehelper_freeinfo); @@ -240,6 +243,8 @@ static void __call_usermodehelper(struct work_struct *work) pid_t pid; enum umh_wait wait = sub_info->wait; + BUG_ON(atomic_read(&sub_info->cred->usage) != 1); + /* CLONE_VFORK: wait until the usermode helper has execve'd * successfully We need the data structures to stay around * until that is done. */ @@ -362,6 +367,9 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, sub_info->path = path; sub_info->argv = argv; sub_info->envp = envp; + sub_info->cred = prepare_usermodehelper_creds(); + if (!sub_info->cred) + return NULL; out: return sub_info; @@ -376,7 +384,13 @@ EXPORT_SYMBOL(call_usermodehelper_setup); void call_usermodehelper_setkeys(struct subprocess_info *info, struct key *session_keyring) { - info->ring = session_keyring; +#ifdef CONFIG_KEYS + struct thread_group_cred *tgcred = info->cred->tgcred; + key_put(tgcred->session_keyring); + tgcred->session_keyring = key_get(session_keyring); +#else + BUG(); +#endif } EXPORT_SYMBOL(call_usermodehelper_setkeys); @@ -444,6 +458,8 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, DECLARE_COMPLETION_ONSTACK(done); int retval = 0; + BUG_ON(atomic_read(&sub_info->cred->usage) != 1); + helper_lock(); if (sub_info->path[0] == '\0') goto out; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index b9d5f4e4f6a..f764b880695 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -171,6 +171,14 @@ int ptrace_attach(struct task_struct *task) if (same_thread_group(task, current)) goto out; + /* Protect exec's credential calculations against our interference; + * SUID, SGID and LSM creds get determined differently under ptrace. + */ + retval = mutex_lock_interruptible(¤t->cred_exec_mutex); + if (retval < 0) + goto out; + + retval = -EPERM; repeat: /* * Nasty, nasty. @@ -210,6 +218,7 @@ repeat: bad: write_unlock_irqrestore(&tasklist_lock, flags); task_unlock(task); + mutex_unlock(¤t->cred_exec_mutex); out: return retval; } diff --git a/kernel/signal.c b/kernel/signal.c index 84989124baf..2a64304ed54 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -180,7 +180,7 @@ int next_signal(struct sigpending *pending, sigset_t *mask) /* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an - * appopriate lock must be held to protect t's user_struct + * appopriate lock must be held to stop the target task from exiting */ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, int override_rlimit) @@ -194,7 +194,7 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, * caller must be holding the RCU readlock (by way of a spinlock) and * we use RCU protection here */ - user = __task_cred(t)->user; + user = get_uid(__task_cred(t)->user); atomic_inc(&user->sigpending); if (override_rlimit || atomic_read(&user->sigpending) <= @@ -202,12 +202,14 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, q = kmem_cache_alloc(sigqueue_cachep, flags); if (unlikely(q == NULL)) { atomic_dec(&user->sigpending); + free_uid(user); } else { INIT_LIST_HEAD(&q->list); q->flags = 0; - q->user = get_uid(user); + q->user = user; } - return(q); + + return q; } static void __sigqueue_free(struct sigqueue *q) diff --git a/kernel/sys.c b/kernel/sys.c index ccc9eb736d3..ab735040468 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -180,7 +180,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); break; case PRIO_USER: - user = cred->user; + user = (struct user_struct *) cred->user; if (!who) who = cred->uid; else if ((who != cred->uid) && @@ -479,47 +479,48 @@ void ctrl_alt_del(void) */ asmlinkage long sys_setregid(gid_t rgid, gid_t egid) { - struct cred *cred = current->cred; - int old_rgid = cred->gid; - int old_egid = cred->egid; - int new_rgid = old_rgid; - int new_egid = old_egid; + const struct cred *old; + struct cred *new; int retval; + new = prepare_creds(); + if (!new) + return -ENOMEM; + old = current_cred(); + retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE); if (retval) - return retval; + goto error; + retval = -EPERM; if (rgid != (gid_t) -1) { - if ((old_rgid == rgid) || - (cred->egid == rgid) || + if (old->gid == rgid || + old->egid == rgid || capable(CAP_SETGID)) - new_rgid = rgid; + new->gid = rgid; else - return -EPERM; + goto error; } if (egid != (gid_t) -1) { - if ((old_rgid == egid) || - (cred->egid == egid) || - (cred->sgid == egid) || + if (old->gid == egid || + old->egid == egid || + old->sgid == egid || capable(CAP_SETGID)) - new_egid = egid; + new->egid = egid; else - return -EPERM; - } - if (new_egid != old_egid) { - set_dumpable(current->mm, suid_dumpable); - smp_wmb(); + goto error; } + if (rgid != (gid_t) -1 || - (egid != (gid_t) -1 && egid != old_rgid)) - cred->sgid = new_egid; - cred->fsgid = new_egid; - cred->egid = new_egid; - cred->gid = new_rgid; - key_fsgid_changed(current); - proc_id_connector(current, PROC_EVENT_GID); - return 0; + (egid != (gid_t) -1 && egid != old->gid)) + new->sgid = new->egid; + new->fsgid = new->egid; + + return commit_creds(new); + +error: + abort_creds(new); + return retval; } /* @@ -529,40 +530,42 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid) */ asmlinkage long sys_setgid(gid_t gid) { - struct cred *cred = current->cred; - int old_egid = cred->egid; + const struct cred *old; + struct cred *new; int retval; + new = prepare_creds(); + if (!new) + return -ENOMEM; + old = current_cred(); + retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); if (retval) - return retval; + goto error; - if (capable(CAP_SETGID)) { - if (old_egid != gid) { - set_dumpable(current->mm, suid_dumpable); - smp_wmb(); - } - cred->gid = cred->egid = cred->sgid = cred->fsgid = gid; - } else if ((gid == cred->gid) || (gid == cred->sgid)) { - if (old_egid != gid) { - set_dumpable(current->mm, suid_dumpable); - smp_wmb(); - } - cred->egid = cred->fsgid = gid; - } + retval = -EPERM; + if (capable(CAP_SETGID)) + new->gid = new->egid = new->sgid = new->fsgid = gid; + else if (gid == old->gid || gid == old->sgid) + new->egid = new->fsgid = gid; else - return -EPERM; + goto error; - key_fsgid_changed(current); - proc_id_connector(current, PROC_EVENT_GID); - return 0; + return commit_creds(new); + +error: + abort_creds(new); + return retval; } -static int set_user(uid_t new_ruid, int dumpclear) +/* + * change the user struct in a credentials set to match the new UID + */ +static int set_user(struct cred *new) { struct user_struct *new_user; - new_user = alloc_uid(current->nsproxy->user_ns, new_ruid); + new_user = alloc_uid(current->nsproxy->user_ns, new->uid); if (!new_user) return -EAGAIN; @@ -573,13 +576,8 @@ static int set_user(uid_t new_ruid, int dumpclear) return -EAGAIN; } - switch_uid(new_user); - - if (dumpclear) { - set_dumpable(current->mm, suid_dumpable); - smp_wmb(); - } - current->cred->uid = new_ruid; + free_uid(new->user); + new->user = new_user; return 0; } @@ -600,55 +598,56 @@ static int set_user(uid_t new_ruid, int dumpclear) */ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) { - struct cred *cred = current->cred; - int old_ruid, old_euid, old_suid, new_ruid, new_euid; + const struct cred *old; + struct cred *new; int retval; + new = prepare_creds(); + if (!new) + return -ENOMEM; + old = current_cred(); + retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE); if (retval) - return retval; - - new_ruid = old_ruid = cred->uid; - new_euid = old_euid = cred->euid; - old_suid = cred->suid; + goto error; + retval = -EPERM; if (ruid != (uid_t) -1) { - new_ruid = ruid; - if ((old_ruid != ruid) && - (cred->euid != ruid) && + new->uid = ruid; + if (old->uid != ruid && + old->euid != ruid && !capable(CAP_SETUID)) - return -EPERM; + goto error; } if (euid != (uid_t) -1) { - new_euid = euid; - if ((old_ruid != euid) && - (cred->euid != euid) && - (cred->suid != euid) && + new->euid = euid; + if (old->uid != euid && + old->euid != euid && + old->suid != euid && !capable(CAP_SETUID)) - return -EPERM; + goto error; } - if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0) - return -EAGAIN; + retval = -EAGAIN; + if (new->uid != old->uid && set_user(new) < 0) + goto error; - if (new_euid != old_euid) { - set_dumpable(current->mm, suid_dumpable); - smp_wmb(); - } - cred->fsuid = cred->euid = new_euid; if (ruid != (uid_t) -1 || - (euid != (uid_t) -1 && euid != old_ruid)) - cred->suid = cred->euid; - cred->fsuid = cred->euid; - - key_fsuid_changed(current); - proc_id_connector(current, PROC_EVENT_UID); + (euid != (uid_t) -1 && euid != old->uid)) + new->suid = new->euid; + new->fsuid = new->euid; - return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE); -} + retval = security_task_fix_setuid(new, old, LSM_SETID_RE); + if (retval < 0) + goto error; + return commit_creds(new); +error: + abort_creds(new); + return retval; +} /* * setuid() is implemented like SysV with SAVED_IDS @@ -663,37 +662,41 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) */ asmlinkage long sys_setuid(uid_t uid) { - struct cred *cred = current->cred; - int old_euid = cred->euid; - int old_ruid, old_suid, new_suid; + const struct cred *old; + struct cred *new; int retval; + new = prepare_creds(); + if (!new) + return -ENOMEM; + old = current_cred(); + retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); if (retval) - return retval; + goto error; - old_ruid = cred->uid; - old_suid = cred->suid; - new_suid = old_suid; - + retval = -EPERM; if (capable(CAP_SETUID)) { - if (uid != old_ruid && set_user(uid, old_euid != uid) < 0) - return -EAGAIN; - new_suid = uid; - } else if ((uid != cred->uid) && (uid != new_suid)) - return -EPERM; - - if (old_euid != uid) { - set_dumpable(current->mm, suid_dumpable); - smp_wmb(); + new->suid = new->uid = uid; + if (uid != old->uid && set_user(new) < 0) { + retval = -EAGAIN; + goto error; + } + } else if (uid != old->uid && uid != new->suid) { + goto error; } - cred->fsuid = cred->euid = uid; - cred->suid = new_suid; - key_fsuid_changed(current); - proc_id_connector(current, PROC_EVENT_UID); + new->fsuid = new->euid = uid; + + retval = security_task_fix_setuid(new, old, LSM_SETID_ID); + if (retval < 0) + goto error; + + return commit_creds(new); - return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID); +error: + abort_creds(new); + return retval; } @@ -703,47 +706,53 @@ asmlinkage long sys_setuid(uid_t uid) */ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) { - struct cred *cred = current->cred; - int old_ruid = cred->uid; - int old_euid = cred->euid; - int old_suid = cred->suid; + const struct cred *old; + struct cred *new; int retval; + new = prepare_creds(); + if (!new) + return -ENOMEM; + retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES); if (retval) - return retval; + goto error; + old = current_cred(); + retval = -EPERM; if (!capable(CAP_SETUID)) { - if ((ruid != (uid_t) -1) && (ruid != cred->uid) && - (ruid != cred->euid) && (ruid != cred->suid)) - return -EPERM; - if ((euid != (uid_t) -1) && (euid != cred->uid) && - (euid != cred->euid) && (euid != cred->suid)) - return -EPERM; - if ((suid != (uid_t) -1) && (suid != cred->uid) && - (suid != cred->euid) && (suid != cred->suid)) - return -EPERM; + if (ruid != (uid_t) -1 && ruid != old->uid && + ruid != old->euid && ruid != old->suid) + goto error; + if (euid != (uid_t) -1 && euid != old->uid && + euid != old->euid && euid != old->suid) + goto error; + if (suid != (uid_t) -1 && suid != old->uid && + suid != old->euid && suid != old->suid) + goto error; } + + retval = -EAGAIN; if (ruid != (uid_t) -1) { - if (ruid != cred->uid && - set_user(ruid, euid != cred->euid) < 0) - return -EAGAIN; + new->uid = ruid; + if (ruid != old->uid && set_user(new) < 0) + goto error; } - if (euid != (uid_t) -1) { - if (euid != cred->euid) { - set_dumpable(current->mm, suid_dumpable); - smp_wmb(); - } - cred->euid = euid; - } - cred->fsuid = cred->euid; + if (euid != (uid_t) -1) + new->euid = euid; if (suid != (uid_t) -1) - cred->suid = suid; + new->suid = suid; + new->fsuid = new->euid; - key_fsuid_changed(current); - proc_id_connector(current, PROC_EVENT_UID); + retval = security_task_fix_setuid(new, old, LSM_SETID_RES); + if (retval < 0) + goto error; - return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES); + return commit_creds(new); + +error: + abort_creds(new); + return retval; } asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) @@ -763,40 +772,45 @@ asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __us */ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) { - struct cred *cred = current->cred; + const struct cred *old; + struct cred *new; int retval; + new = prepare_creds(); + if (!new) + return -ENOMEM; + old = current_cred(); + retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES); if (retval) - return retval; + goto error; + retval = -EPERM; if (!capable(CAP_SETGID)) { - if ((rgid != (gid_t) -1) && (rgid != cred->gid) && - (rgid != cred->egid) && (rgid != cred->sgid)) - return -EPERM; - if ((egid != (gid_t) -1) && (egid != cred->gid) && - (egid != cred->egid) && (egid != cred->sgid)) - return -EPERM; - if ((sgid != (gid_t) -1) && (sgid != cred->gid) && - (sgid != cred->egid) && (sgid != cred->sgid)) - return -EPERM; + if (rgid != (gid_t) -1 && rgid != old->gid && + rgid != old->egid && rgid != old->sgid) + goto error; + if (egid != (gid_t) -1 && egid != old->gid && + egid != old->egid && egid != old->sgid) + goto error; + if (sgid != (gid_t) -1 && sgid != old->gid && + sgid != old->egid && sgid != old->sgid) + goto error; } - if (egid != (gid_t) -1) { - if (egid != cred->egid) { - set_dumpable(current->mm, suid_dumpable); - smp_wmb(); - } - cred->egid = egid; - } - cred->fsgid = cred->egid; + if (rgid != (gid_t) -1) - cred->gid = rgid; + new->gid = rgid; + if (egid != (gid_t) -1) + new->egid = egid; if (sgid != (gid_t) -1) - cred->sgid = sgid; + new->sgid = sgid; + new->fsgid = new->egid; - key_fsgid_changed(current); - proc_id_connector(current, PROC_EVENT_GID); - return 0; + return commit_creds(new); + +error: + abort_creds(new); + return retval; } asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) @@ -820,28 +834,35 @@ asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __us */ asmlinkage long sys_setfsuid(uid_t uid) { - struct cred *cred = current->cred; - int old_fsuid; + const struct cred *old; + struct cred *new; + uid_t old_fsuid; + + new = prepare_creds(); + if (!new) + return current_fsuid(); + old = current_cred(); + old_fsuid = old->fsuid; - old_fsuid = cred->fsuid; - if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS)) - return old_fsuid; + if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0) + goto error; - if (uid == cred->uid || uid == cred->euid || - uid == cred->suid || uid == cred->fsuid || + if (uid == old->uid || uid == old->euid || + uid == old->suid || uid == old->fsuid || capable(CAP_SETUID)) { if (uid != old_fsuid) { - set_dumpable(current->mm, suid_dumpable); - smp_wmb(); + new->fsuid = uid; + if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) + goto change_okay; } - cred->fsuid = uid; } - key_fsuid_changed(current); - proc_id_connector(current, PROC_EVENT_UID); - - security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); +error: + abort_creds(new); + return old_fsuid; +change_okay: + commit_creds(new); return old_fsuid; } @@ -850,24 +871,34 @@ asmlinkage long sys_setfsuid(uid_t uid) */ asmlinkage long sys_setfsgid(gid_t gid) { - struct cred *cred = current->cred; - int old_fsgid; + const struct cred *old; + struct cred *new; + gid_t old_fsgid; + + new = prepare_creds(); + if (!new) + return current_fsgid(); + old = current_cred(); + old_fsgid = old->fsgid; - old_fsgid = cred->fsgid; if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS)) - return old_fsgid; + goto error; - if (gid == cred->gid || gid == cred->egid || - gid == cred->sgid || gid == cred->fsgid || + if (gid == old->gid || gid == old->egid || + gid == old->sgid || gid == old->fsgid || capable(CAP_SETGID)) { if (gid != old_fsgid) { - set_dumpable(current->mm, suid_dumpable); - smp_wmb(); + new->fsgid = gid; + goto change_okay; } - cred->fsgid = gid; - key_fsgid_changed(current); - proc_id_connector(current, PROC_EVENT_GID); } + +error: + abort_creds(new); + return old_fsgid; + +change_okay: + commit_creds(new); return old_fsgid; } @@ -1136,7 +1167,7 @@ EXPORT_SYMBOL(groups_free); /* export the group_info to a user-space array */ static int groups_to_user(gid_t __user *grouplist, - struct group_info *group_info) + const struct group_info *group_info) { int i; unsigned int count = group_info->ngroups; @@ -1227,31 +1258,25 @@ int groups_search(const struct group_info *group_info, gid_t grp) } /** - * set_groups - Change a group subscription in a security record - * @sec: The security record to alter - * @group_info: The group list to impose + * set_groups - Change a group subscription in a set of credentials + * @new: The newly prepared set of credentials to alter + * @group_info: The group list to install * - * Validate a group subscription and, if valid, impose it upon a task security - * record. + * Validate a group subscription and, if valid, insert it into a set + * of credentials. */ -int set_groups(struct cred *cred, struct group_info *group_info) +int set_groups(struct cred *new, struct group_info *group_info) { int retval; - struct group_info *old_info; retval = security_task_setgroups(group_info); if (retval) return retval; + put_group_info(new->group_info); groups_sort(group_info); get_group_info(group_info); - - spin_lock(&cred->lock); - old_info = cred->group_info; - cred->group_info = group_info; - spin_unlock(&cred->lock); - - put_group_info(old_info); + new->group_info = group_info; return 0; } @@ -1266,7 +1291,20 @@ EXPORT_SYMBOL(set_groups); */ int set_current_groups(struct group_info *group_info) { - return set_groups(current->cred, group_info); + struct cred *new; + int ret; + + new = prepare_creds(); + if (!new) + return -ENOMEM; + + ret = set_groups(new, group_info); + if (ret < 0) { + abort_creds(new); + return ret; + } + + return commit_creds(new); } EXPORT_SYMBOL(set_current_groups); @@ -1666,9 +1704,11 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned char comm[sizeof(me->comm)]; long error; - if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error)) + error = security_task_prctl(option, arg2, arg3, arg4, arg5); + if (error != -ENOSYS) return error; + error = 0; switch (option) { case PR_SET_PDEATHSIG: if (!valid_signal(arg2)) { diff --git a/kernel/user.c b/kernel/user.c index 104d22ac84d..d476307dd4b 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -16,6 +16,7 @@ #include #include #include +#include "cred-internals.h" struct user_namespace init_user_ns = { .kref = { @@ -104,16 +105,10 @@ static int sched_create_user(struct user_struct *up) return rc; } -static void sched_switch_user(struct task_struct *p) -{ - sched_move_task(p); -} - #else /* CONFIG_USER_SCHED */ static void sched_destroy_user(struct user_struct *up) { } static int sched_create_user(struct user_struct *up) { return 0; } -static void sched_switch_user(struct task_struct *p) { } #endif /* CONFIG_USER_SCHED */ @@ -448,36 +443,6 @@ out_unlock: return NULL; } -void switch_uid(struct user_struct *new_user) -{ - struct user_struct *old_user; - - /* What if a process setreuid()'s and this brings the - * new uid over his NPROC rlimit? We can check this now - * cheaply with the new uid cache, so if it matters - * we should be checking for it. -DaveM - */ - old_user = current->cred->user; - atomic_inc(&new_user->processes); - atomic_dec(&old_user->processes); - switch_uid_keyring(new_user); - current->cred->user = new_user; - sched_switch_user(current); - - /* - * We need to synchronize with __sigqueue_alloc() - * doing a get_uid(p->user).. If that saw the old - * user value, we need to wait until it has exited - * its critical region before we can free the old - * structure. - */ - smp_mb(); - spin_unlock_wait(¤t->sighand->siglock); - - free_uid(old_user); - suid_keys(current); -} - #ifdef CONFIG_USER_NS void release_uids(struct user_namespace *ns) { diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index f82730adea0..0d9c51d6733 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -19,6 +19,7 @@ static struct user_namespace *clone_user_ns(struct user_namespace *old_ns) { struct user_namespace *ns; struct user_struct *new_user; + struct cred *new; int n; ns = kmalloc(sizeof(struct user_namespace), GFP_KERNEL); @@ -45,7 +46,16 @@ static struct user_namespace *clone_user_ns(struct user_namespace *old_ns) return ERR_PTR(-ENOMEM); } - switch_uid(new_user); + /* Install the new user */ + new = prepare_creds(); + if (!new) { + free_uid(new_user); + free_uid(ns->root_user); + kfree(ns); + } + free_uid(new->user); + new->user = new_user; + commit_creds(new); return ns; } diff --git a/lib/Makefile b/lib/Makefile index 7cb65d85aeb..80fe8a3ec12 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -11,7 +11,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o \ idr.o int_sqrt.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o \ - proportions.o prio_heap.o ratelimit.o show_mem.o + proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index 9a8ff684da7..ad8c7a782da 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c @@ -287,6 +287,7 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *conn, time_t expiry, u32 kvno) { + const struct cred *cred = current_cred(); struct key *key; int ret; @@ -297,7 +298,7 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *conn, _enter(""); - key = key_alloc(&key_type_rxrpc, "x", 0, 0, current, 0, + key = key_alloc(&key_type_rxrpc, "x", 0, 0, cred, 0, KEY_ALLOC_NOT_IN_QUOTA); if (IS_ERR(key)) { _leave(" = -ENOMEM [alloc %ld]", PTR_ERR(key)); @@ -340,10 +341,11 @@ EXPORT_SYMBOL(rxrpc_get_server_data_key); */ struct key *rxrpc_get_null_key(const char *keyname) { + const struct cred *cred = current_cred(); struct key *key; int ret; - key = key_alloc(&key_type_rxrpc, keyname, 0, 0, current, + key = key_alloc(&key_type_rxrpc, keyname, 0, 0, cred, KEY_POS_SEARCH, KEY_ALLOC_NOT_IN_QUOTA); if (IS_ERR(key)) return key; diff --git a/security/capability.c b/security/capability.c index fac2f61b69a..efeb6d9e0e6 100644 --- a/security/capability.c +++ b/security/capability.c @@ -340,12 +340,16 @@ static int cap_task_create(unsigned long clone_flags) return 0; } -static int cap_cred_alloc_security(struct cred *cred) +static void cap_cred_free(struct cred *cred) +{ +} + +static int cap_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp) { return 0; } -static void cap_cred_free(struct cred *cred) +static void cap_cred_commit(struct cred *new, const struct cred *old) { } @@ -750,7 +754,7 @@ static void cap_release_secctx(char *secdata, u32 seclen) } #ifdef CONFIG_KEYS -static int cap_key_alloc(struct key *key, struct task_struct *ctx, +static int cap_key_alloc(struct key *key, const struct cred *cred, unsigned long flags) { return 0; @@ -760,7 +764,7 @@ static void cap_key_free(struct key *key) { } -static int cap_key_permission(key_ref_t key_ref, struct task_struct *context, +static int cap_key_permission(key_ref_t key_ref, const struct cred *cred, key_perm_t perm) { return 0; @@ -814,8 +818,7 @@ void security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, ptrace_may_access); set_to_cap_if_null(ops, ptrace_traceme); set_to_cap_if_null(ops, capget); - set_to_cap_if_null(ops, capset_check); - set_to_cap_if_null(ops, capset_set); + set_to_cap_if_null(ops, capset); set_to_cap_if_null(ops, acct); set_to_cap_if_null(ops, capable); set_to_cap_if_null(ops, quotactl); @@ -890,10 +893,11 @@ void security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, file_receive); set_to_cap_if_null(ops, dentry_open); set_to_cap_if_null(ops, task_create); - set_to_cap_if_null(ops, cred_alloc_security); set_to_cap_if_null(ops, cred_free); + set_to_cap_if_null(ops, cred_prepare); + set_to_cap_if_null(ops, cred_commit); set_to_cap_if_null(ops, task_setuid); - set_to_cap_if_null(ops, task_post_setuid); + set_to_cap_if_null(ops, task_fix_setuid); set_to_cap_if_null(ops, task_setgid); set_to_cap_if_null(ops, task_setpgid); set_to_cap_if_null(ops, task_getpgid); @@ -910,7 +914,6 @@ void security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, task_wait); set_to_cap_if_null(ops, task_kill); set_to_cap_if_null(ops, task_prctl); - set_to_cap_if_null(ops, task_reparent_to_init); set_to_cap_if_null(ops, task_to_inode); set_to_cap_if_null(ops, ipc_permission); set_to_cap_if_null(ops, ipc_getsecid); diff --git a/security/commoncap.c b/security/commoncap.c index 0384bf95db6..b5419273f92 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -72,8 +72,8 @@ int cap_ptrace_may_access(struct task_struct *child, unsigned int mode) int ret = 0; rcu_read_lock(); - if (!cap_issubset(child->cred->cap_permitted, - current->cred->cap_permitted) && + if (!cap_issubset(__task_cred(child)->cap_permitted, + current_cred()->cap_permitted) && !capable(CAP_SYS_PTRACE)) ret = -EPERM; rcu_read_unlock(); @@ -85,8 +85,8 @@ int cap_ptrace_traceme(struct task_struct *parent) int ret = 0; rcu_read_lock(); - if (!cap_issubset(current->cred->cap_permitted, - parent->cred->cap_permitted) && + if (!cap_issubset(current_cred()->cap_permitted, + __task_cred(parent)->cap_permitted) && !has_capability(parent, CAP_SYS_PTRACE)) ret = -EPERM; rcu_read_unlock(); @@ -117,7 +117,7 @@ static inline int cap_inh_is_capped(void) * to the old permitted set. That is, if the current task * does *not* possess the CAP_SETPCAP capability. */ - return (cap_capable(current, CAP_SETPCAP, SECURITY_CAP_AUDIT) != 0); + return cap_capable(current, CAP_SETPCAP, SECURITY_CAP_AUDIT) != 0; } static inline int cap_limit_ptraced_target(void) { return 1; } @@ -132,52 +132,39 @@ static inline int cap_limit_ptraced_target(void) #endif /* def CONFIG_SECURITY_FILE_CAPABILITIES */ -int cap_capset_check(const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted) +int cap_capset(struct cred *new, + const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted) { - const struct cred *cred = current->cred; - - if (cap_inh_is_capped() - && !cap_issubset(*inheritable, - cap_combine(cred->cap_inheritable, - cred->cap_permitted))) { + if (cap_inh_is_capped() && + !cap_issubset(*inheritable, + cap_combine(old->cap_inheritable, + old->cap_permitted))) /* incapable of using this inheritable set */ return -EPERM; - } + if (!cap_issubset(*inheritable, - cap_combine(cred->cap_inheritable, - cred->cap_bset))) { + cap_combine(old->cap_inheritable, + old->cap_bset))) /* no new pI capabilities outside bounding set */ return -EPERM; - } /* verify restrictions on target's new Permitted set */ - if (!cap_issubset (*permitted, - cap_combine (cred->cap_permitted, - cred->cap_permitted))) { + if (!cap_issubset(*permitted, old->cap_permitted)) return -EPERM; - } /* verify the _new_Effective_ is a subset of the _new_Permitted_ */ - if (!cap_issubset (*effective, *permitted)) { + if (!cap_issubset(*effective, *permitted)) return -EPERM; - } + new->cap_effective = *effective; + new->cap_inheritable = *inheritable; + new->cap_permitted = *permitted; return 0; } -void cap_capset_set(const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted) -{ - struct cred *cred = current->cred; - - cred->cap_effective = *effective; - cred->cap_inheritable = *inheritable; - cred->cap_permitted = *permitted; -} - static inline void bprm_clear_caps(struct linux_binprm *bprm) { cap_clear(bprm->cap_post_exec_permitted); @@ -382,41 +369,46 @@ int cap_bprm_set_security (struct linux_binprm *bprm) return ret; } -void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe) +int cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe) { - struct cred *cred = current->cred; + const struct cred *old = current_cred(); + struct cred *new; + + new = prepare_creds(); + if (!new) + return -ENOMEM; - if (bprm->e_uid != cred->uid || bprm->e_gid != cred->gid || + if (bprm->e_uid != old->uid || bprm->e_gid != old->gid || !cap_issubset(bprm->cap_post_exec_permitted, - cred->cap_permitted)) { + old->cap_permitted)) { set_dumpable(current->mm, suid_dumpable); current->pdeath_signal = 0; if (unsafe & ~LSM_UNSAFE_PTRACE_CAP) { if (!capable(CAP_SETUID)) { - bprm->e_uid = cred->uid; - bprm->e_gid = cred->gid; + bprm->e_uid = old->uid; + bprm->e_gid = old->gid; } if (cap_limit_ptraced_target()) { bprm->cap_post_exec_permitted = cap_intersect( bprm->cap_post_exec_permitted, - cred->cap_permitted); + new->cap_permitted); } } } - cred->suid = cred->euid = cred->fsuid = bprm->e_uid; - cred->sgid = cred->egid = cred->fsgid = bprm->e_gid; + new->suid = new->euid = new->fsuid = bprm->e_uid; + new->sgid = new->egid = new->fsgid = bprm->e_gid; /* For init, we want to retain the capabilities set * in the init_task struct. Thus we skip the usual * capability rules */ if (!is_global_init(current)) { - cred->cap_permitted = bprm->cap_post_exec_permitted; + new->cap_permitted = bprm->cap_post_exec_permitted; if (bprm->cap_effective) - cred->cap_effective = bprm->cap_post_exec_permitted; + new->cap_effective = bprm->cap_post_exec_permitted; else - cap_clear(cred->cap_effective); + cap_clear(new->cap_effective); } /* @@ -431,15 +423,15 @@ void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe) * Number 1 above might fail if you don't have a full bset, but I think * that is interesting information to audit. */ - if (!cap_isclear(cred->cap_effective)) { - if (!cap_issubset(CAP_FULL_SET, cred->cap_effective) || - (bprm->e_uid != 0) || (cred->uid != 0) || + if (!cap_isclear(new->cap_effective)) { + if (!cap_issubset(CAP_FULL_SET, new->cap_effective) || + bprm->e_uid != 0 || new->uid != 0 || issecure(SECURE_NOROOT)) - audit_log_bprm_fcaps(bprm, &cred->cap_permitted, - &cred->cap_effective); + audit_log_bprm_fcaps(bprm, new, old); } - cred->securebits &= ~issecure_mask(SECURE_KEEP_CAPS); + new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS); + return commit_creds(new); } int cap_bprm_secureexec (struct linux_binprm *bprm) @@ -514,65 +506,49 @@ int cap_inode_removexattr(struct dentry *dentry, const char *name) * files.. * Thanks to Olaf Kirch and Peter Benie for spotting this. */ -static inline void cap_emulate_setxuid (int old_ruid, int old_euid, - int old_suid) +static inline void cap_emulate_setxuid(struct cred *new, const struct cred *old) { - struct cred *cred = current->cred; - - if ((old_ruid == 0 || old_euid == 0 || old_suid == 0) && - (cred->uid != 0 && cred->euid != 0 && cred->suid != 0) && + if ((old->uid == 0 || old->euid == 0 || old->suid == 0) && + (new->uid != 0 && new->euid != 0 && new->suid != 0) && !issecure(SECURE_KEEP_CAPS)) { - cap_clear(cred->cap_permitted); - cap_clear(cred->cap_effective); - } - if (old_euid == 0 && cred->euid != 0) { - cap_clear(cred->cap_effective); - } - if (old_euid != 0 && cred->euid == 0) { - cred->cap_effective = cred->cap_permitted; + cap_clear(new->cap_permitted); + cap_clear(new->cap_effective); } + if (old->euid == 0 && new->euid != 0) + cap_clear(new->cap_effective); + if (old->euid != 0 && new->euid == 0) + new->cap_effective = new->cap_permitted; } -int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, - int flags) +int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags) { - struct cred *cred = current->cred; - switch (flags) { case LSM_SETID_RE: case LSM_SETID_ID: case LSM_SETID_RES: /* Copied from kernel/sys.c:setreuid/setuid/setresuid. */ - if (!issecure (SECURE_NO_SETUID_FIXUP)) { - cap_emulate_setxuid (old_ruid, old_euid, old_suid); - } + if (!issecure(SECURE_NO_SETUID_FIXUP)) + cap_emulate_setxuid(new, old); break; case LSM_SETID_FS: - { - uid_t old_fsuid = old_ruid; - - /* Copied from kernel/sys.c:setfsuid. */ + /* Copied from kernel/sys.c:setfsuid. */ - /* - * FIXME - is fsuser used for all CAP_FS_MASK capabilities? - * if not, we might be a bit too harsh here. - */ - - if (!issecure (SECURE_NO_SETUID_FIXUP)) { - if (old_fsuid == 0 && cred->fsuid != 0) { - cred->cap_effective = - cap_drop_fs_set( - cred->cap_effective); - } - if (old_fsuid != 0 && cred->fsuid == 0) { - cred->cap_effective = - cap_raise_fs_set( - cred->cap_effective, - cred->cap_permitted); - } + /* + * FIXME - is fsuser used for all CAP_FS_MASK capabilities? + * if not, we might be a bit too harsh here. + */ + if (!issecure(SECURE_NO_SETUID_FIXUP)) { + if (old->fsuid == 0 && new->fsuid != 0) { + new->cap_effective = + cap_drop_fs_set(new->cap_effective); + } + if (old->fsuid != 0 && new->fsuid == 0) { + new->cap_effective = + cap_raise_fs_set(new->cap_effective, + new->cap_permitted); } - break; } + break; default: return -EINVAL; } @@ -628,13 +604,14 @@ int cap_task_setnice (struct task_struct *p, int nice) * this task could get inconsistent info. There can be no * racing writer bc a task can only change its own caps. */ -static long cap_prctl_drop(unsigned long cap) +static long cap_prctl_drop(struct cred *new, unsigned long cap) { if (!capable(CAP_SETPCAP)) return -EPERM; if (!cap_valid(cap)) return -EINVAL; - cap_lower(current->cred->cap_bset, cap); + + cap_lower(new->cap_bset, cap); return 0; } @@ -655,22 +632,29 @@ int cap_task_setnice (struct task_struct *p, int nice) #endif int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, - unsigned long arg4, unsigned long arg5, long *rc_p) + unsigned long arg4, unsigned long arg5) { - struct cred *cred = current_cred(); + struct cred *new; long error = 0; + new = prepare_creds(); + if (!new) + return -ENOMEM; + switch (option) { case PR_CAPBSET_READ: + error = -EINVAL; if (!cap_valid(arg2)) - error = -EINVAL; - else - error = !!cap_raised(cred->cap_bset, arg2); - break; + goto error; + error = !!cap_raised(new->cap_bset, arg2); + goto no_change; + #ifdef CONFIG_SECURITY_FILE_CAPABILITIES case PR_CAPBSET_DROP: - error = cap_prctl_drop(arg2); - break; + error = cap_prctl_drop(new, arg2); + if (error < 0) + goto error; + goto changed; /* * The next four prctl's remain to assist with transitioning a @@ -692,12 +676,12 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, * capability-based-privilege environment. */ case PR_SET_SECUREBITS: - if ((((cred->securebits & SECURE_ALL_LOCKS) >> 1) - & (cred->securebits ^ arg2)) /*[1]*/ - || ((cred->securebits & SECURE_ALL_LOCKS - & ~arg2)) /*[2]*/ - || (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/ - || (cap_capable(current, CAP_SETPCAP, SECURITY_CAP_AUDIT) != 0)) { /*[4]*/ + error = -EPERM; + if ((((new->securebits & SECURE_ALL_LOCKS) >> 1) + & (new->securebits ^ arg2)) /*[1]*/ + || ((new->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/ + || (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/ + || (cap_capable(current, CAP_SETPCAP, SECURITY_CAP_AUDIT) != 0) /*[4]*/ /* * [1] no changing of bits that are locked * [2] no unlocking of locks @@ -705,50 +689,51 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, * [4] doing anything requires privilege (go read about * the "sendmail capabilities bug") */ - error = -EPERM; /* cannot change a locked bit */ - } else { - cred->securebits = arg2; - } - break; + ) + /* cannot change a locked bit */ + goto error; + new->securebits = arg2; + goto changed; + case PR_GET_SECUREBITS: - error = cred->securebits; - break; + error = new->securebits; + goto no_change; #endif /* def CONFIG_SECURITY_FILE_CAPABILITIES */ case PR_GET_KEEPCAPS: if (issecure(SECURE_KEEP_CAPS)) error = 1; - break; + goto no_change; + case PR_SET_KEEPCAPS: + error = -EINVAL; if (arg2 > 1) /* Note, we rely on arg2 being unsigned here */ - error = -EINVAL; - else if (issecure(SECURE_KEEP_CAPS_LOCKED)) - error = -EPERM; - else if (arg2) - cred->securebits |= issecure_mask(SECURE_KEEP_CAPS); + goto error; + error = -EPERM; + if (issecure(SECURE_KEEP_CAPS_LOCKED)) + goto error; + if (arg2) + new->securebits |= issecure_mask(SECURE_KEEP_CAPS); else - cred->securebits &= ~issecure_mask(SECURE_KEEP_CAPS); - break; + new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS); + goto changed; default: /* No functionality available - continue with default */ - return 0; + error = -ENOSYS; + goto error; } /* Functionality provided */ - *rc_p = error; - return 1; -} - -void cap_task_reparent_to_init (struct task_struct *p) -{ - struct cred *cred = p->cred; - - cap_set_init_eff(cred->cap_effective); - cap_clear(cred->cap_inheritable); - cap_set_full(cred->cap_permitted); - p->cred->securebits = SECUREBITS_DEFAULT; +changed: + return commit_creds(new); + +no_change: + error = 0; +error: + abort_creds(new); + return error; } int cap_syslog (int type) diff --git a/security/keys/internal.h b/security/keys/internal.h index d1586c62978..81932abefe7 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -12,6 +12,7 @@ #ifndef _INTERNAL_H #define _INTERNAL_H +#include #include static inline __attribute__((format(printf, 1, 2))) @@ -25,7 +26,7 @@ void no_printk(const char *fmt, ...) #define kleave(FMT, ...) \ printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__) #define kdebug(FMT, ...) \ - printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__) + printk(KERN_DEBUG " "FMT"\n", ##__VA_ARGS__) #else #define kenter(FMT, ...) \ no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__) @@ -97,7 +98,7 @@ extern struct key *keyring_search_instkey(struct key *keyring, typedef int (*key_match_func_t)(const struct key *, const void *); extern key_ref_t keyring_search_aux(key_ref_t keyring_ref, - struct task_struct *tsk, + const struct cred *cred, struct key_type *type, const void *description, key_match_func_t match); @@ -105,13 +106,13 @@ extern key_ref_t keyring_search_aux(key_ref_t keyring_ref, extern key_ref_t search_process_keyrings(struct key_type *type, const void *description, key_match_func_t match, - struct task_struct *tsk); + const struct cred *cred); extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check); extern int install_user_keyrings(void); -extern int install_thread_keyring(void); -extern int install_process_keyring(void); +extern int install_thread_keyring_to_cred(struct cred *); +extern int install_process_keyring_to_cred(struct cred *); extern struct key *request_key_and_link(struct key_type *type, const char *description, @@ -130,12 +131,12 @@ extern long join_session_keyring(const char *name); * check to see whether permission is granted to use a key in the desired way */ extern int key_task_permission(const key_ref_t key_ref, - struct task_struct *context, + const struct cred *cred, key_perm_t perm); static inline int key_permission(const key_ref_t key_ref, key_perm_t perm) { - return key_task_permission(key_ref, current, perm); + return key_task_permission(key_ref, current_cred(), perm); } /* required permissions */ @@ -153,7 +154,7 @@ static inline int key_permission(const key_ref_t key_ref, key_perm_t perm) struct request_key_auth { struct key *target_key; struct key *dest_keyring; - struct task_struct *context; + const struct cred *cred; void *callout_info; size_t callout_len; pid_t pid; diff --git a/security/keys/key.c b/security/keys/key.c index a6ca39ed3b0..f76c8a546fd 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -218,7 +218,7 @@ serial_exists: * instantiate the key or discard it before returning */ struct key *key_alloc(struct key_type *type, const char *desc, - uid_t uid, gid_t gid, struct task_struct *ctx, + uid_t uid, gid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags) { struct key_user *user = NULL; @@ -294,7 +294,7 @@ struct key *key_alloc(struct key_type *type, const char *desc, #endif /* let the security module know about the key */ - ret = security_key_alloc(key, ctx, flags); + ret = security_key_alloc(key, cred, flags); if (ret < 0) goto security_error; @@ -391,7 +391,7 @@ static int __key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, - struct key *instkey) + struct key *authkey) { int ret, awaken; @@ -421,8 +421,8 @@ static int __key_instantiate_and_link(struct key *key, ret = __key_link(keyring, key); /* disable the authorisation key */ - if (instkey) - key_revoke(instkey); + if (authkey) + key_revoke(authkey); } } @@ -444,14 +444,14 @@ int key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, - struct key *instkey) + struct key *authkey) { int ret; if (keyring) down_write(&keyring->sem); - ret = __key_instantiate_and_link(key, data, datalen, keyring, instkey); + ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey); if (keyring) up_write(&keyring->sem); @@ -469,7 +469,7 @@ EXPORT_SYMBOL(key_instantiate_and_link); int key_negate_and_link(struct key *key, unsigned timeout, struct key *keyring, - struct key *instkey) + struct key *authkey) { struct timespec now; int ret, awaken; @@ -504,8 +504,8 @@ int key_negate_and_link(struct key *key, ret = __key_link(keyring, key); /* disable the authorisation key */ - if (instkey) - key_revoke(instkey); + if (authkey) + key_revoke(authkey); } mutex_unlock(&key_construction_mutex); @@ -743,6 +743,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref, key_perm_t perm, unsigned long flags) { + const struct cred *cred = current_cred(); struct key_type *ktype; struct key *keyring, *key = NULL; key_ref_t key_ref; @@ -802,8 +803,8 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref, } /* allocate a new key */ - key = key_alloc(ktype, description, current_fsuid(), current_fsgid(), - current, perm, flags); + key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred, + perm, flags); if (IS_ERR(key)) { key_ref = ERR_CAST(key); goto error_3; diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 8833b447ade..7c72baa02f2 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -866,6 +866,23 @@ static long get_instantiation_keyring(key_serial_t ringid, return -ENOKEY; } +/* + * change the request_key authorisation key on the current process + */ +static int keyctl_change_reqkey_auth(struct key *key) +{ + struct cred *new; + + new = prepare_creds(); + if (!new) + return -ENOMEM; + + key_put(new->request_key_auth); + new->request_key_auth = key_get(key); + + return commit_creds(new); +} + /*****************************************************************************/ /* * instantiate the key with the specified payload, and, if one is given, link @@ -876,12 +893,15 @@ long keyctl_instantiate_key(key_serial_t id, size_t plen, key_serial_t ringid) { + const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; void *payload; long ret; bool vm = false; + kenter("%d,,%zu,%d", id, plen, ringid); + ret = -EINVAL; if (plen > 1024 * 1024 - 1) goto error; @@ -889,7 +909,7 @@ long keyctl_instantiate_key(key_serial_t id, /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; - instkey = current->cred->request_key_auth; + instkey = cred->request_key_auth; if (!instkey) goto error; @@ -931,10 +951,8 @@ long keyctl_instantiate_key(key_serial_t id, /* discard the assumed authority if it's just been disabled by * instantiation of the key */ - if (ret == 0) { - key_put(current->cred->request_key_auth); - current->cred->request_key_auth = NULL; - } + if (ret == 0) + keyctl_change_reqkey_auth(NULL); error2: if (!vm) @@ -953,14 +971,17 @@ error: */ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) { + const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; long ret; + kenter("%d,%u,%d", id, timeout, ringid); + /* the appropriate instantiation authorisation key must have been * assumed before calling this */ ret = -EPERM; - instkey = current->cred->request_key_auth; + instkey = cred->request_key_auth; if (!instkey) goto error; @@ -982,10 +1003,8 @@ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) /* discard the assumed authority if it's just been disabled by * instantiation of the key */ - if (ret == 0) { - key_put(current->cred->request_key_auth); - current->cred->request_key_auth = NULL; - } + if (ret == 0) + keyctl_change_reqkey_auth(NULL); error: return ret; @@ -999,36 +1018,56 @@ error: */ long keyctl_set_reqkey_keyring(int reqkey_defl) { - struct cred *cred = current->cred; - int ret; + struct cred *new; + int ret, old_setting; + + old_setting = current_cred_xxx(jit_keyring); + + if (reqkey_defl == KEY_REQKEY_DEFL_NO_CHANGE) + return old_setting; + + new = prepare_creds(); + if (!new) + return -ENOMEM; switch (reqkey_defl) { case KEY_REQKEY_DEFL_THREAD_KEYRING: - ret = install_thread_keyring(); + ret = install_thread_keyring_to_cred(new); if (ret < 0) - return ret; + goto error; goto set; case KEY_REQKEY_DEFL_PROCESS_KEYRING: - ret = install_process_keyring(); - if (ret < 0) - return ret; + ret = install_process_keyring_to_cred(new); + if (ret < 0) { + if (ret != -EEXIST) + goto error; + ret = 0; + } + goto set; case KEY_REQKEY_DEFL_DEFAULT: case KEY_REQKEY_DEFL_SESSION_KEYRING: case KEY_REQKEY_DEFL_USER_KEYRING: case KEY_REQKEY_DEFL_USER_SESSION_KEYRING: - set: - cred->jit_keyring = reqkey_defl; + case KEY_REQKEY_DEFL_REQUESTOR_KEYRING: + goto set; case KEY_REQKEY_DEFL_NO_CHANGE: - return cred->jit_keyring; - case KEY_REQKEY_DEFL_GROUP_KEYRING: default: - return -EINVAL; + ret = -EINVAL; + goto error; } +set: + new->jit_keyring = reqkey_defl; + commit_creds(new); + return old_setting; +error: + abort_creds(new); + return -EINVAL; + } /* end keyctl_set_reqkey_keyring() */ /*****************************************************************************/ @@ -1087,9 +1126,7 @@ long keyctl_assume_authority(key_serial_t id) /* we divest ourselves of authority if given an ID of 0 */ if (id == 0) { - key_put(current->cred->request_key_auth); - current->cred->request_key_auth = NULL; - ret = 0; + ret = keyctl_change_reqkey_auth(NULL); goto error; } @@ -1104,10 +1141,12 @@ long keyctl_assume_authority(key_serial_t id) goto error; } - key_put(current->cred->request_key_auth); - current->cred->request_key_auth = authkey; - ret = authkey->serial; + ret = keyctl_change_reqkey_auth(authkey); + if (ret < 0) + goto error; + key_put(authkey); + ret = authkey->serial; error: return ret; diff --git a/security/keys/keyring.c b/security/keys/keyring.c index fdf75f90199..ed851574d07 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -245,14 +245,14 @@ static long keyring_read(const struct key *keyring, * allocate a keyring and link into the destination keyring */ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, - struct task_struct *ctx, unsigned long flags, + const struct cred *cred, unsigned long flags, struct key *dest) { struct key *keyring; int ret; keyring = key_alloc(&key_type_keyring, description, - uid, gid, ctx, + uid, gid, cred, (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL, flags); @@ -281,7 +281,7 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, * - we propagate the possession attribute from the keyring ref to the key ref */ key_ref_t keyring_search_aux(key_ref_t keyring_ref, - struct task_struct *context, + const struct cred *cred, struct key_type *type, const void *description, key_match_func_t match) @@ -304,7 +304,7 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref, key_check(keyring); /* top keyring must have search permission to begin the search */ - err = key_task_permission(keyring_ref, context, KEY_SEARCH); + err = key_task_permission(keyring_ref, cred, KEY_SEARCH); if (err < 0) { key_ref = ERR_PTR(err); goto error; @@ -377,7 +377,7 @@ descend: /* key must have search permissions */ if (key_task_permission(make_key_ref(key, possessed), - context, KEY_SEARCH) < 0) + cred, KEY_SEARCH) < 0) continue; /* we set a different error code if we pass a negative key */ @@ -404,7 +404,7 @@ ascend: continue; if (key_task_permission(make_key_ref(key, possessed), - context, KEY_SEARCH) < 0) + cred, KEY_SEARCH) < 0) continue; /* stack the current position */ @@ -459,7 +459,7 @@ key_ref_t keyring_search(key_ref_t keyring, if (!type->match) return ERR_PTR(-ENOKEY); - return keyring_search_aux(keyring, current, + return keyring_search_aux(keyring, current->cred, type, description, type->match); } /* end keyring_search() */ diff --git a/security/keys/permission.c b/security/keys/permission.c index 13c36164f28..5d9fc7b93f2 100644 --- a/security/keys/permission.c +++ b/security/keys/permission.c @@ -14,24 +14,27 @@ #include "internal.h" /*****************************************************************************/ -/* - * check to see whether permission is granted to use a key in the desired way, - * but permit the security modules to override +/** + * key_task_permission - Check a key can be used + * @key_ref: The key to check + * @cred: The credentials to use + * @perm: The permissions to check for + * + * Check to see whether permission is granted to use a key in the desired way, + * but permit the security modules to override. + * + * The caller must hold either a ref on cred or must hold the RCU readlock or a + * spinlock. */ -int key_task_permission(const key_ref_t key_ref, - struct task_struct *context, +int key_task_permission(const key_ref_t key_ref, const struct cred *cred, key_perm_t perm) { - const struct cred *cred; struct key *key; key_perm_t kperm; int ret; key = key_ref_to_ptr(key_ref); - rcu_read_lock(); - cred = __task_cred(context); - /* use the second 8-bits of permissions for keys the caller owns */ if (key->uid == cred->fsuid) { kperm = key->perm >> 16; @@ -57,7 +60,6 @@ int key_task_permission(const key_ref_t key_ref, kperm = key->perm; use_these_perms: - rcu_read_lock(); /* use the top 8-bits of permissions for keys the caller possesses * - possessor permissions are additive with other permissions @@ -71,7 +73,7 @@ use_these_perms: return -EACCES; /* let LSM be the final arbiter */ - return security_key_permission(key_ref, context, perm); + return security_key_permission(key_ref, cred, perm); } /* end key_task_permission() */ diff --git a/security/keys/proc.c b/security/keys/proc.c index f619170da76..7f508def50e 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c @@ -136,8 +136,12 @@ static int proc_keys_show(struct seq_file *m, void *v) int rc; /* check whether the current task is allowed to view the key (assuming - * non-possession) */ - rc = key_task_permission(make_key_ref(key, 0), current, KEY_VIEW); + * non-possession) + * - the caller holds a spinlock, and thus the RCU read lock, making our + * access to __current_cred() safe + */ + rc = key_task_permission(make_key_ref(key, 0), current_cred(), + KEY_VIEW); if (rc < 0) return 0; diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 70ee93406f3..df329f684a6 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -42,11 +42,15 @@ struct key_user root_key_user = { */ int install_user_keyrings(void) { - struct user_struct *user = current->cred->user; + struct user_struct *user; + const struct cred *cred; struct key *uid_keyring, *session_keyring; char buf[20]; int ret; + cred = current_cred(); + user = cred->user; + kenter("%p{%u}", user, user->uid); if (user->uid_keyring) { @@ -67,7 +71,7 @@ int install_user_keyrings(void) uid_keyring = find_keyring_by_name(buf, true); if (IS_ERR(uid_keyring)) { uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, - current, KEY_ALLOC_IN_QUOTA, + cred, KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(uid_keyring)) { ret = PTR_ERR(uid_keyring); @@ -83,8 +87,7 @@ int install_user_keyrings(void) if (IS_ERR(session_keyring)) { session_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, - current, KEY_ALLOC_IN_QUOTA, - NULL); + cred, KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(session_keyring)) { ret = PTR_ERR(session_keyring); goto error_release; @@ -116,142 +119,128 @@ error: return ret; } -/*****************************************************************************/ /* - * deal with the UID changing + * install a fresh thread keyring directly to new credentials */ -void switch_uid_keyring(struct user_struct *new_user) +int install_thread_keyring_to_cred(struct cred *new) { -#if 0 /* do nothing for now */ - struct key *old; - - /* switch to the new user's session keyring if we were running under - * root's default session keyring */ - if (new_user->uid != 0 && - current->session_keyring == &root_session_keyring - ) { - atomic_inc(&new_user->session_keyring->usage); - - task_lock(current); - old = current->session_keyring; - current->session_keyring = new_user->session_keyring; - task_unlock(current); + struct key *keyring; - key_put(old); - } -#endif + keyring = keyring_alloc("_tid", new->uid, new->gid, new, + KEY_ALLOC_QUOTA_OVERRUN, NULL); + if (IS_ERR(keyring)) + return PTR_ERR(keyring); -} /* end switch_uid_keyring() */ + new->thread_keyring = keyring; + return 0; +} -/*****************************************************************************/ /* * install a fresh thread keyring, discarding the old one */ -int install_thread_keyring(void) +static int install_thread_keyring(void) { - struct task_struct *tsk = current; - struct key *keyring, *old; - char buf[20]; + struct cred *new; int ret; - sprintf(buf, "_tid.%u", tsk->pid); + new = prepare_creds(); + if (!new) + return -ENOMEM; - keyring = keyring_alloc(buf, tsk->cred->uid, tsk->cred->gid, tsk, - KEY_ALLOC_QUOTA_OVERRUN, NULL); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); - goto error; + BUG_ON(new->thread_keyring); + + ret = install_thread_keyring_to_cred(new); + if (ret < 0) { + abort_creds(new); + return ret; } - task_lock(tsk); - old = tsk->cred->thread_keyring; - tsk->cred->thread_keyring = keyring; - task_unlock(tsk); + return commit_creds(new); +} - ret = 0; +/* + * install a process keyring directly to a credentials struct + * - returns -EEXIST if there was already a process keyring, 0 if one installed, + * and other -ve on any other error + */ +int install_process_keyring_to_cred(struct cred *new) +{ + struct key *keyring; + int ret; - key_put(old); -error: + if (new->tgcred->process_keyring) + return -EEXIST; + + keyring = keyring_alloc("_pid", new->uid, new->gid, + new, KEY_ALLOC_QUOTA_OVERRUN, NULL); + if (IS_ERR(keyring)) + return PTR_ERR(keyring); + + spin_lock_irq(&new->tgcred->lock); + if (!new->tgcred->process_keyring) { + new->tgcred->process_keyring = keyring; + keyring = NULL; + ret = 0; + } else { + ret = -EEXIST; + } + spin_unlock_irq(&new->tgcred->lock); + key_put(keyring); return ret; +} -} /* end install_thread_keyring() */ - -/*****************************************************************************/ /* * make sure a process keyring is installed + * - we */ -int install_process_keyring(void) +static int install_process_keyring(void) { - struct task_struct *tsk = current; - struct key *keyring; - char buf[20]; + struct cred *new; int ret; - might_sleep(); - - if (!tsk->cred->tgcred->process_keyring) { - sprintf(buf, "_pid.%u", tsk->tgid); - - keyring = keyring_alloc(buf, tsk->cred->uid, tsk->cred->gid, tsk, - KEY_ALLOC_QUOTA_OVERRUN, NULL); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); - goto error; - } - - /* attach keyring */ - spin_lock_irq(&tsk->cred->tgcred->lock); - if (!tsk->cred->tgcred->process_keyring) { - tsk->cred->tgcred->process_keyring = keyring; - keyring = NULL; - } - spin_unlock_irq(&tsk->cred->tgcred->lock); + new = prepare_creds(); + if (!new) + return -ENOMEM; - key_put(keyring); + ret = install_process_keyring_to_cred(new); + if (ret < 0) { + abort_creds(new); + return ret != -EEXIST ?: 0; } - ret = 0; -error: - return ret; - -} /* end install_process_keyring() */ + return commit_creds(new); +} -/*****************************************************************************/ /* - * install a session keyring, discarding the old one - * - if a keyring is not supplied, an empty one is invented + * install a session keyring directly to a credentials struct */ -static int install_session_keyring(struct key *keyring) +static int install_session_keyring_to_cred(struct cred *cred, + struct key *keyring) { - struct task_struct *tsk = current; unsigned long flags; struct key *old; - char buf[20]; might_sleep(); /* create an empty session keyring */ if (!keyring) { - sprintf(buf, "_ses.%u", tsk->tgid); - flags = KEY_ALLOC_QUOTA_OVERRUN; - if (tsk->cred->tgcred->session_keyring) + if (cred->tgcred->session_keyring) flags = KEY_ALLOC_IN_QUOTA; - keyring = keyring_alloc(buf, tsk->cred->uid, tsk->cred->gid, - tsk, flags, NULL); + keyring = keyring_alloc("_ses", cred->uid, cred->gid, + cred, flags, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); - } - else { + } else { atomic_inc(&keyring->usage); } /* install the keyring */ - spin_lock_irq(&tsk->cred->tgcred->lock); - old = tsk->cred->tgcred->session_keyring; - rcu_assign_pointer(tsk->cred->tgcred->session_keyring, keyring); - spin_unlock_irq(&tsk->cred->tgcred->lock); + spin_lock_irq(&cred->tgcred->lock); + old = cred->tgcred->session_keyring; + rcu_assign_pointer(cred->tgcred->session_keyring, keyring); + spin_unlock_irq(&cred->tgcred->lock); /* we're using RCU on the pointer, but there's no point synchronising * on it if it didn't previously point to anything */ @@ -261,38 +250,29 @@ static int install_session_keyring(struct key *keyring) } return 0; +} -} /* end install_session_keyring() */ - -/*****************************************************************************/ /* - * copy the keys for fork + * install a session keyring, discarding the old one + * - if a keyring is not supplied, an empty one is invented */ -int copy_keys(unsigned long clone_flags, struct task_struct *tsk) +static int install_session_keyring(struct key *keyring) { - key_check(tsk->cred->thread_keyring); - key_check(tsk->cred->request_key_auth); - - /* no thread keyring yet */ - tsk->cred->thread_keyring = NULL; - - /* copy the request_key() authorisation for this thread */ - key_get(tsk->cred->request_key_auth); - - return 0; + struct cred *new; + int ret; -} /* end copy_keys() */ + new = prepare_creds(); + if (!new) + return -ENOMEM; -/*****************************************************************************/ -/* - * dispose of per-thread keys upon thread exit - */ -void exit_keys(struct task_struct *tsk) -{ - key_put(tsk->cred->thread_keyring); - key_put(tsk->cred->request_key_auth); + ret = install_session_keyring_to_cred(new, NULL); + if (ret < 0) { + abort_creds(new); + return ret; + } -} /* end exit_keys() */ + return commit_creds(new); +} /*****************************************************************************/ /* @@ -300,38 +280,41 @@ void exit_keys(struct task_struct *tsk) */ int exec_keys(struct task_struct *tsk) { - struct key *old; + struct thread_group_cred *tgcred = NULL; + struct cred *new; - /* newly exec'd tasks don't get a thread keyring */ - task_lock(tsk); - old = tsk->cred->thread_keyring; - tsk->cred->thread_keyring = NULL; - task_unlock(tsk); +#ifdef CONFIG_KEYS + tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); + if (!tgcred) + return -ENOMEM; +#endif - key_put(old); + new = prepare_creds(); + if (new < 0) + return -ENOMEM; - /* discard the process keyring from a newly exec'd task */ - spin_lock_irq(&tsk->cred->tgcred->lock); - old = tsk->cred->tgcred->process_keyring; - tsk->cred->tgcred->process_keyring = NULL; - spin_unlock_irq(&tsk->cred->tgcred->lock); + /* newly exec'd tasks don't get a thread keyring */ + key_put(new->thread_keyring); + new->thread_keyring = NULL; - key_put(old); + /* create a new per-thread-group creds for all this set of threads to + * share */ + memcpy(tgcred, new->tgcred, sizeof(struct thread_group_cred)); - return 0; + atomic_set(&tgcred->usage, 1); + spin_lock_init(&tgcred->lock); -} /* end exec_keys() */ + /* inherit the session keyring; new process keyring */ + key_get(tgcred->session_keyring); + tgcred->process_keyring = NULL; -/*****************************************************************************/ -/* - * deal with SUID programs - * - we might want to make this invent a new session keyring - */ -int suid_keys(struct task_struct *tsk) -{ + release_tgcred(new); + new->tgcred = tgcred; + + commit_creds(new); return 0; -} /* end suid_keys() */ +} /* end exec_keys() */ /*****************************************************************************/ /* @@ -376,16 +359,13 @@ void key_fsgid_changed(struct task_struct *tsk) key_ref_t search_process_keyrings(struct key_type *type, const void *description, key_match_func_t match, - struct task_struct *context) + const struct cred *cred) { struct request_key_auth *rka; - struct cred *cred; key_ref_t key_ref, ret, err; might_sleep(); - cred = get_task_cred(context); - /* we want to return -EAGAIN or -ENOKEY if any of the keyrings were * searchable, but we failed to find a key or we found a negative key; * otherwise we want to return a sample error (probably -EACCES) if @@ -401,7 +381,7 @@ key_ref_t search_process_keyrings(struct key_type *type, if (cred->thread_keyring) { key_ref = keyring_search_aux( make_key_ref(cred->thread_keyring, 1), - context, type, description, match); + cred, type, description, match); if (!IS_ERR(key_ref)) goto found; @@ -422,7 +402,7 @@ key_ref_t search_process_keyrings(struct key_type *type, if (cred->tgcred->process_keyring) { key_ref = keyring_search_aux( make_key_ref(cred->tgcred->process_keyring, 1), - context, type, description, match); + cred, type, description, match); if (!IS_ERR(key_ref)) goto found; @@ -446,7 +426,7 @@ key_ref_t search_process_keyrings(struct key_type *type, make_key_ref(rcu_dereference( cred->tgcred->session_keyring), 1), - context, type, description, match); + cred, type, description, match); rcu_read_unlock(); if (!IS_ERR(key_ref)) @@ -468,7 +448,7 @@ key_ref_t search_process_keyrings(struct key_type *type, else if (cred->user->session_keyring) { key_ref = keyring_search_aux( make_key_ref(cred->user->session_keyring, 1), - context, type, description, match); + cred, type, description, match); if (!IS_ERR(key_ref)) goto found; @@ -490,7 +470,7 @@ key_ref_t search_process_keyrings(struct key_type *type, * - we don't permit access to request_key auth keys via this method */ if (cred->request_key_auth && - context == current && + cred == current_cred() && type != &key_type_request_key_auth ) { /* defend against the auth key being revoked */ @@ -500,7 +480,7 @@ key_ref_t search_process_keyrings(struct key_type *type, rka = cred->request_key_auth->payload.data; key_ref = search_process_keyrings(type, description, - match, rka->context); + match, rka->cred); up_read(&cred->request_key_auth->sem); @@ -527,7 +507,6 @@ key_ref_t search_process_keyrings(struct key_type *type, key_ref = ret ? ret : err; found: - put_cred(cred); return key_ref; } /* end search_process_keyrings() */ @@ -552,8 +531,7 @@ key_ref_t lookup_user_key(key_serial_t id, int create, int partial, key_perm_t perm) { struct request_key_auth *rka; - struct task_struct *t = current; - struct cred *cred; + const struct cred *cred; struct key *key; key_ref_t key_ref, skey_ref; int ret; @@ -608,6 +586,7 @@ try_again: goto error; ret = install_session_keyring( cred->user->session_keyring); + if (ret < 0) goto error; goto reget_creds; @@ -693,7 +672,7 @@ try_again: /* check to see if we possess the key */ skey_ref = search_process_keyrings(key->type, key, lookup_user_key_possessed, - current); + cred); if (!IS_ERR(skey_ref)) { key_put(key); @@ -725,7 +704,7 @@ try_again: goto invalid_key; /* check the permissions */ - ret = key_task_permission(key_ref, t, perm); + ret = key_task_permission(key_ref, cred, perm); if (ret < 0) goto invalid_key; @@ -755,21 +734,33 @@ reget_creds: */ long join_session_keyring(const char *name) { - struct task_struct *tsk = current; - struct cred *cred = current->cred; + const struct cred *old; + struct cred *new; struct key *keyring; - long ret; + long ret, serial; + + /* only permit this if there's a single thread in the thread group - + * this avoids us having to adjust the creds on all threads and risking + * ENOMEM */ + if (!is_single_threaded(current)) + return -EMLINK; + + new = prepare_creds(); + if (!new) + return -ENOMEM; + old = current_cred(); /* if no name is provided, install an anonymous keyring */ if (!name) { - ret = install_session_keyring(NULL); + ret = install_session_keyring_to_cred(new, NULL); if (ret < 0) goto error; - rcu_read_lock(); - ret = rcu_dereference(cred->tgcred->session_keyring)->serial; - rcu_read_unlock(); - goto error; + serial = new->tgcred->session_keyring->serial; + ret = commit_creds(new); + if (ret == 0) + ret = serial; + goto okay; } /* allow the user to join or create a named keyring */ @@ -779,29 +770,33 @@ long join_session_keyring(const char *name) keyring = find_keyring_by_name(name, false); if (PTR_ERR(keyring) == -ENOKEY) { /* not found - try and create a new one */ - keyring = keyring_alloc(name, cred->uid, cred->gid, tsk, + keyring = keyring_alloc(name, old->uid, old->gid, old, KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto error2; } - } - else if (IS_ERR(keyring)) { + } else if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto error2; } /* we've got a keyring - now to install it */ - ret = install_session_keyring(keyring); + ret = install_session_keyring_to_cred(new, keyring); if (ret < 0) goto error2; + commit_creds(new); + mutex_unlock(&key_session_mutex); + ret = keyring->serial; key_put(keyring); +okay: + return ret; error2: mutex_unlock(&key_session_mutex); error: + abort_creds(new); return ret; - -} /* end join_session_keyring() */ +} diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 3d12558362d..0e04f72ef2d 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -83,8 +83,10 @@ static int call_sbin_request_key(struct key_construction *cons, /* allocate a new session keyring */ sprintf(desc, "_req.%u", key->serial); - keyring = keyring_alloc(desc, current_fsuid(), current_fsgid(), current, + cred = get_current_cred(); + keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred, KEY_ALLOC_QUOTA_OVERRUN, NULL); + put_cred(cred); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto error_alloc; @@ -104,8 +106,7 @@ static int call_sbin_request_key(struct key_construction *cons, /* we specify the process's default keyrings */ sprintf(keyring_str[0], "%d", - cred->thread_keyring ? - cred->thread_keyring->serial : 0); + cred->thread_keyring ? cred->thread_keyring->serial : 0); prkey = 0; if (cred->tgcred->process_keyring) @@ -155,8 +156,8 @@ error_link: key_put(keyring); error_alloc: - kleave(" = %d", ret); complete_request_key(cons, ret); + kleave(" = %d", ret); return ret; } @@ -295,6 +296,7 @@ static int construct_alloc_key(struct key_type *type, struct key_user *user, struct key **_key) { + const struct cred *cred = current_cred(); struct key *key; key_ref_t key_ref; @@ -302,9 +304,8 @@ static int construct_alloc_key(struct key_type *type, mutex_lock(&user->cons_lock); - key = key_alloc(type, description, - current_fsuid(), current_fsgid(), current, KEY_POS_ALL, - flags); + key = key_alloc(type, description, cred->fsuid, cred->fsgid, cred, + KEY_POS_ALL, flags); if (IS_ERR(key)) goto alloc_failed; @@ -317,8 +318,7 @@ static int construct_alloc_key(struct key_type *type, * waited for locks */ mutex_lock(&key_construction_mutex); - key_ref = search_process_keyrings(type, description, type->match, - current); + key_ref = search_process_keyrings(type, description, type->match, cred); if (!IS_ERR(key_ref)) goto key_already_present; @@ -363,6 +363,8 @@ static struct key *construct_key_and_link(struct key_type *type, struct key *key; int ret; + kenter(""); + user = key_user_lookup(current_fsuid()); if (!user) return ERR_PTR(-ENOMEM); @@ -376,17 +378,21 @@ static struct key *construct_key_and_link(struct key_type *type, if (ret == 0) { ret = construct_key(key, callout_info, callout_len, aux, dest_keyring); - if (ret < 0) + if (ret < 0) { + kdebug("cons failed"); goto construction_failed; + } } key_put(dest_keyring); + kleave(" = key %d", key_serial(key)); return key; construction_failed: key_negate_and_link(key, key_negative_timeout, NULL, NULL); key_put(key); key_put(dest_keyring); + kleave(" = %d", ret); return ERR_PTR(ret); } @@ -405,6 +411,7 @@ struct key *request_key_and_link(struct key_type *type, struct key *dest_keyring, unsigned long flags) { + const struct cred *cred = current_cred(); struct key *key; key_ref_t key_ref; @@ -414,7 +421,7 @@ struct key *request_key_and_link(struct key_type *type, /* search all the process keyrings for a key */ key_ref = search_process_keyrings(type, description, type->match, - current); + cred); if (!IS_ERR(key_ref)) { key = key_ref_to_ptr(key_ref); diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index 2125579d5d7..86747151ee5 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -105,9 +105,9 @@ static void request_key_auth_revoke(struct key *key) kenter("{%d}", key->serial); - if (rka->context) { - put_task_struct(rka->context); - rka->context = NULL; + if (rka->cred) { + put_cred(rka->cred); + rka->cred = NULL; } } /* end request_key_auth_revoke() */ @@ -122,9 +122,9 @@ static void request_key_auth_destroy(struct key *key) kenter("{%d}", key->serial); - if (rka->context) { - put_task_struct(rka->context); - rka->context = NULL; + if (rka->cred) { + put_cred(rka->cred); + rka->cred = NULL; } key_put(rka->target_key); @@ -143,6 +143,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info, size_t callout_len, struct key *dest_keyring) { struct request_key_auth *rka, *irka; + const struct cred *cred = current->cred; struct key *authkey = NULL; char desc[20]; int ret; @@ -164,28 +165,25 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info, /* see if the calling process is already servicing the key request of * another process */ - if (current->cred->request_key_auth) { + if (cred->request_key_auth) { /* it is - use that instantiation context here too */ - down_read(¤t->cred->request_key_auth->sem); + down_read(&cred->request_key_auth->sem); /* if the auth key has been revoked, then the key we're * servicing is already instantiated */ - if (test_bit(KEY_FLAG_REVOKED, - ¤t->cred->request_key_auth->flags)) + if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags)) goto auth_key_revoked; - irka = current->cred->request_key_auth->payload.data; - rka->context = irka->context; + irka = cred->request_key_auth->payload.data; + rka->cred = get_cred(irka->cred); rka->pid = irka->pid; - get_task_struct(rka->context); - up_read(¤t->cred->request_key_auth->sem); + up_read(&cred->request_key_auth->sem); } else { /* it isn't - use this process as the context */ - rka->context = current; + rka->cred = get_cred(cred); rka->pid = current->pid; - get_task_struct(rka->context); } rka->target_key = key_get(target); @@ -197,7 +195,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info, sprintf(desc, "%x", target->serial); authkey = key_alloc(&key_type_request_key_auth, desc, - current_fsuid(), current_fsgid(), current, + cred->fsuid, cred->fsgid, cred, KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH | KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA); if (IS_ERR(authkey)) { @@ -205,16 +203,16 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info, goto error_alloc; } - /* construct and attach to the keyring */ + /* construct the auth key */ ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL); if (ret < 0) goto error_inst; - kleave(" = {%d}", authkey->serial); + kleave(" = {%d,%d}", authkey->serial, atomic_read(&authkey->usage)); return authkey; auth_key_revoked: - up_read(¤t->cred->request_key_auth->sem); + up_read(&cred->request_key_auth->sem); kfree(rka->callout_info); kfree(rka); kleave("= -EKEYREVOKED"); @@ -257,6 +255,7 @@ static int key_get_instantiation_authkey_match(const struct key *key, */ struct key *key_get_instantiation_authkey(key_serial_t target_id) { + const struct cred *cred = current_cred(); struct key *authkey; key_ref_t authkey_ref; @@ -264,7 +263,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id) &key_type_request_key_auth, (void *) (unsigned long) target_id, key_get_instantiation_authkey_match, - current); + cred); if (IS_ERR(authkey_ref)) { authkey = ERR_CAST(authkey_ref); diff --git a/security/security.c b/security/security.c index f40a0a04c3c..a55d739c686 100644 --- a/security/security.c +++ b/security/security.c @@ -145,18 +145,13 @@ int security_capget(struct task_struct *target, return security_ops->capget(target, effective, inheritable, permitted); } -int security_capset_check(const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted) +int security_capset(struct cred *new, const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted) { - return security_ops->capset_check(effective, inheritable, permitted); -} - -void security_capset_set(const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted) -{ - security_ops->capset_set(effective, inheritable, permitted); + return security_ops->capset(new, old, + effective, inheritable, permitted); } int security_capable(struct task_struct *tsk, int cap) @@ -228,9 +223,9 @@ void security_bprm_free(struct linux_binprm *bprm) security_ops->bprm_free_security(bprm); } -void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) +int security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) { - security_ops->bprm_apply_creds(bprm, unsafe); + return security_ops->bprm_apply_creds(bprm, unsafe); } void security_bprm_post_apply_creds(struct linux_binprm *bprm) @@ -616,14 +611,19 @@ int security_task_create(unsigned long clone_flags) return security_ops->task_create(clone_flags); } -int security_cred_alloc(struct cred *cred) +void security_cred_free(struct cred *cred) { - return security_ops->cred_alloc_security(cred); + security_ops->cred_free(cred); } -void security_cred_free(struct cred *cred) +int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp) { - security_ops->cred_free(cred); + return security_ops->cred_prepare(new, old, gfp); +} + +void security_commit_creds(struct cred *new, const struct cred *old) +{ + return security_ops->cred_commit(new, old); } int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags) @@ -631,10 +631,10 @@ int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags) return security_ops->task_setuid(id0, id1, id2, flags); } -int security_task_post_setuid(uid_t old_ruid, uid_t old_euid, - uid_t old_suid, int flags) +int security_task_fix_setuid(struct cred *new, const struct cred *old, + int flags) { - return security_ops->task_post_setuid(old_ruid, old_euid, old_suid, flags); + return security_ops->task_fix_setuid(new, old, flags); } int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags) @@ -716,14 +716,9 @@ int security_task_wait(struct task_struct *p) } int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, - unsigned long arg4, unsigned long arg5, long *rc_p) -{ - return security_ops->task_prctl(option, arg2, arg3, arg4, arg5, rc_p); -} - -void security_task_reparent_to_init(struct task_struct *p) + unsigned long arg4, unsigned long arg5) { - security_ops->task_reparent_to_init(p); + return security_ops->task_prctl(option, arg2, arg3, arg4, arg5); } void security_task_to_inode(struct task_struct *p, struct inode *inode) @@ -1123,9 +1118,10 @@ EXPORT_SYMBOL(security_skb_classify_flow); #ifdef CONFIG_KEYS -int security_key_alloc(struct key *key, struct task_struct *tsk, unsigned long flags) +int security_key_alloc(struct key *key, const struct cred *cred, + unsigned long flags) { - return security_ops->key_alloc(key, tsk, flags); + return security_ops->key_alloc(key, cred, flags); } void security_key_free(struct key *key) @@ -1134,9 +1130,9 @@ void security_key_free(struct key *key) } int security_key_permission(key_ref_t key_ref, - struct task_struct *context, key_perm_t perm) + const struct cred *cred, key_perm_t perm) { - return security_ops->key_permission(key_ref, context, perm); + return security_ops->key_permission(key_ref, cred, perm); } int security_key_getsecurity(struct key *key, char **_buffer) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index f20cbd681ba..c71bba78872 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -156,20 +156,20 @@ static int selinux_secmark_enabled(void) return (atomic_read(&selinux_secmark_refcount) > 0); } -/* Allocate and free functions for each kind of security blob. */ - -static int cred_alloc_security(struct cred *cred) +/* + * initialise the security for the init task + */ +static void cred_init_security(void) { + struct cred *cred = (struct cred *) current->cred; struct task_security_struct *tsec; tsec = kzalloc(sizeof(struct task_security_struct), GFP_KERNEL); if (!tsec) - return -ENOMEM; + panic("SELinux: Failed to initialize initial task.\n"); - tsec->osid = tsec->sid = SECINITSID_UNLABELED; + tsec->osid = tsec->sid = SECINITSID_KERNEL; cred->security = tsec; - - return 0; } /* @@ -1378,6 +1378,19 @@ static inline u32 signal_to_av(int sig) return perm; } +/* + * Check permission between a pair of credentials + * fork check, ptrace check, etc. + */ +static int cred_has_perm(const struct cred *actor, + const struct cred *target, + u32 perms) +{ + u32 asid = cred_sid(actor), tsid = cred_sid(target); + + return avc_has_perm(asid, tsid, SECCLASS_PROCESS, perms, NULL); +} + /* * Check permission between a pair of tasks, e.g. signal checks, * fork check, ptrace check, etc. @@ -1820,24 +1833,19 @@ static int selinux_capget(struct task_struct *target, kernel_cap_t *effective, return secondary_ops->capget(target, effective, inheritable, permitted); } -static int selinux_capset_check(const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted) +static int selinux_capset(struct cred *new, const struct cred *old, + const kernel_cap_t *effective, + const kernel_cap_t *inheritable, + const kernel_cap_t *permitted) { int error; - error = secondary_ops->capset_check(effective, inheritable, permitted); + error = secondary_ops->capset(new, old, + effective, inheritable, permitted); if (error) return error; - return task_has_perm(current, current, PROCESS__SETCAP); -} - -static void selinux_capset_set(const kernel_cap_t *effective, - const kernel_cap_t *inheritable, - const kernel_cap_t *permitted) -{ - secondary_ops->capset_set(effective, inheritable, permitted); + return cred_has_perm(old, new, PROCESS__SETCAP); } static int selinux_capable(struct task_struct *tsk, int cap, int audit) @@ -2244,16 +2252,23 @@ static inline void flush_unauthorized_files(const struct cred *cred, spin_unlock(&files->file_lock); } -static void selinux_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) +static int selinux_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) { struct task_security_struct *tsec; struct bprm_security_struct *bsec; + struct cred *new; u32 sid; int rc; - secondary_ops->bprm_apply_creds(bprm, unsafe); + rc = secondary_ops->bprm_apply_creds(bprm, unsafe); + if (rc < 0) + return rc; - tsec = current_security(); + new = prepare_creds(); + if (!new) + return -ENOMEM; + + tsec = new->security; bsec = bprm->security; sid = bsec->sid; @@ -2268,7 +2283,7 @@ static void selinux_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) PROCESS__SHARE, NULL); if (rc) { bsec->unsafe = 1; - return; + goto out; } } @@ -2292,12 +2307,16 @@ static void selinux_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) PROCESS__PTRACE, NULL); if (rc) { bsec->unsafe = 1; - return; + goto out; } } } tsec->sid = sid; } + +out: + commit_creds(new); + return 0; } /* @@ -3021,6 +3040,7 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd, static int file_map_prot_check(struct file *file, unsigned long prot, int shared) { const struct cred *cred = current_cred(); + int rc = 0; #ifndef CONFIG_PPC32 if ((prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) { @@ -3029,9 +3049,9 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared * private file mapping that will also be writable. * This has an additional check. */ - int rc = task_has_perm(current, current, PROCESS__EXECMEM); + rc = cred_has_perm(cred, cred, PROCESS__EXECMEM); if (rc) - return rc; + goto error; } #endif @@ -3048,7 +3068,9 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared return file_has_perm(cred, file, av); } - return 0; + +error: + return rc; } static int selinux_file_mmap(struct file *file, unsigned long reqprot, @@ -3090,8 +3112,7 @@ static int selinux_file_mprotect(struct vm_area_struct *vma, rc = 0; if (vma->vm_start >= vma->vm_mm->start_brk && vma->vm_end <= vma->vm_mm->brk) { - rc = task_has_perm(current, current, - PROCESS__EXECHEAP); + rc = cred_has_perm(cred, cred, PROCESS__EXECHEAP); } else if (!vma->vm_file && vma->vm_start <= vma->vm_mm->start_stack && vma->vm_end >= vma->vm_mm->start_stack) { @@ -3104,8 +3125,7 @@ static int selinux_file_mprotect(struct vm_area_struct *vma, * modified content. This typically should only * occur for text relocations. */ - rc = file_has_perm(cred, vma->vm_file, - FILE__EXECMOD); + rc = file_has_perm(cred, vma->vm_file, FILE__EXECMOD); } if (rc) return rc; @@ -3211,6 +3231,7 @@ static int selinux_dentry_open(struct file *file, const struct cred *cred) struct file_security_struct *fsec; struct inode *inode; struct inode_security_struct *isec; + inode = file->f_path.dentry->d_inode; fsec = file->f_security; isec = inode->i_security; @@ -3247,38 +3268,41 @@ static int selinux_task_create(unsigned long clone_flags) return task_has_perm(current, current, PROCESS__FORK); } -static int selinux_cred_alloc_security(struct cred *cred) +/* + * detach and free the LSM part of a set of credentials + */ +static void selinux_cred_free(struct cred *cred) { - struct task_security_struct *tsec1, *tsec2; - int rc; - - tsec1 = current_security(); + struct task_security_struct *tsec = cred->security; + cred->security = NULL; + kfree(tsec); +} - rc = cred_alloc_security(cred); - if (rc) - return rc; - tsec2 = cred->security; +/* + * prepare a new set of credentials for modification + */ +static int selinux_cred_prepare(struct cred *new, const struct cred *old, + gfp_t gfp) +{ + const struct task_security_struct *old_tsec; + struct task_security_struct *tsec; - tsec2->osid = tsec1->osid; - tsec2->sid = tsec1->sid; + old_tsec = old->security; - /* Retain the exec, fs, key, and sock SIDs across fork */ - tsec2->exec_sid = tsec1->exec_sid; - tsec2->create_sid = tsec1->create_sid; - tsec2->keycreate_sid = tsec1->keycreate_sid; - tsec2->sockcreate_sid = tsec1->sockcreate_sid; + tsec = kmemdup(old_tsec, sizeof(struct task_security_struct), gfp); + if (!tsec) + return -ENOMEM; + new->security = tsec; return 0; } /* - * detach and free the LSM part of a set of credentials + * commit new credentials */ -static void selinux_cred_free(struct cred *cred) +static void selinux_cred_commit(struct cred *new, const struct cred *old) { - struct task_security_struct *tsec = cred->security; - cred->security = NULL; - kfree(tsec); + secondary_ops->cred_commit(new, old); } static int selinux_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags) @@ -3292,9 +3316,10 @@ static int selinux_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags) return 0; } -static int selinux_task_post_setuid(uid_t id0, uid_t id1, uid_t id2, int flags) +static int selinux_task_fix_setuid(struct cred *new, const struct cred *old, + int flags) { - return secondary_ops->task_post_setuid(id0, id1, id2, flags); + return secondary_ops->task_fix_setuid(new, old, flags); } static int selinux_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags) @@ -3368,7 +3393,7 @@ static int selinux_task_setrlimit(unsigned int resource, struct rlimit *new_rlim /* Control the ability to change the hard limit (whether lowering or raising it), so that the hard limit can later be used as a safe reset point for the soft limit - upon context transitions. See selinux_bprm_apply_creds. */ + upon context transitions. See selinux_bprm_committing_creds. */ if (old_rlim->rlim_max != new_rlim->rlim_max) return task_has_perm(current, current, PROCESS__SETRLIMIT); @@ -3422,13 +3447,12 @@ static int selinux_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, - unsigned long arg5, - long *rc_p) + unsigned long arg5) { /* The current prctl operations do not appear to require any SELinux controls since they merely observe or modify the state of the current process. */ - return secondary_ops->task_prctl(option, arg2, arg3, arg4, arg5, rc_p); + return secondary_ops->task_prctl(option, arg2, arg3, arg4, arg5); } static int selinux_task_wait(struct task_struct *p) @@ -3436,18 +3460,6 @@ static int selinux_task_wait(struct task_struct *p) return task_has_perm(p, current, PROCESS__SIGCHLD); } -static void selinux_task_reparent_to_init(struct task_struct *p) -{ - struct task_security_struct *tsec; - - secondary_ops->task_reparent_to_init(p); - - tsec = p->cred->security; - tsec->osid = tsec->sid; - tsec->sid = SECINITSID_KERNEL; - return; -} - static void selinux_task_to_inode(struct task_struct *p, struct inode *inode) { @@ -5325,7 +5337,8 @@ static int selinux_setprocattr(struct task_struct *p, { struct task_security_struct *tsec; struct task_struct *tracer; - u32 sid = 0; + struct cred *new; + u32 sid = 0, ptsid; int error; char *str = value; @@ -5372,86 +5385,75 @@ static int selinux_setprocattr(struct task_struct *p, return error; } + new = prepare_creds(); + if (!new) + return -ENOMEM; + /* Permission checking based on the specified context is performed during the actual operation (execve, open/mkdir/...), when we know the full context of the - operation. See selinux_bprm_set_security for the execve + operation. See selinux_bprm_set_creds for the execve checks and may_create for the file creation checks. The operation will then fail if the context is not permitted. */ - tsec = p->cred->security; - if (!strcmp(name, "exec")) + tsec = new->security; + if (!strcmp(name, "exec")) { tsec->exec_sid = sid; - else if (!strcmp(name, "fscreate")) + } else if (!strcmp(name, "fscreate")) { tsec->create_sid = sid; - else if (!strcmp(name, "keycreate")) { + } else if (!strcmp(name, "keycreate")) { error = may_create_key(sid, p); if (error) - return error; + goto abort_change; tsec->keycreate_sid = sid; - } else if (!strcmp(name, "sockcreate")) + } else if (!strcmp(name, "sockcreate")) { tsec->sockcreate_sid = sid; - else if (!strcmp(name, "current")) { - struct av_decision avd; - + } else if (!strcmp(name, "current")) { + error = -EINVAL; if (sid == 0) - return -EINVAL; - /* - * SELinux allows to change context in the following case only. - * - Single threaded processes. - * - Multi threaded processes intend to change its context into - * more restricted domain (defined by TYPEBOUNDS statement). - */ - if (atomic_read(&p->mm->mm_users) != 1) { - struct task_struct *g, *t; - struct mm_struct *mm = p->mm; - read_lock(&tasklist_lock); - do_each_thread(g, t) { - if (t->mm == mm && t != p) { - read_unlock(&tasklist_lock); - error = security_bounded_transition(tsec->sid, sid); - if (!error) - goto boundary_ok; - - return error; - } - } while_each_thread(g, t); - read_unlock(&tasklist_lock); + goto abort_change; + + /* Only allow single threaded processes to change context */ + error = -EPERM; + if (!is_single_threaded(p)) { + error = security_bounded_transition(tsec->sid, sid); + if (error) + goto abort_change; } -boundary_ok: /* Check permissions for the transition. */ error = avc_has_perm(tsec->sid, sid, SECCLASS_PROCESS, PROCESS__DYNTRANSITION, NULL); if (error) - return error; + goto abort_change; /* Check for ptracing, and update the task SID if ok. Otherwise, leave SID unchanged and fail. */ + ptsid = 0; task_lock(p); - rcu_read_lock(); tracer = tracehook_tracer_task(p); - if (tracer != NULL) { - u32 ptsid = task_sid(tracer); - rcu_read_unlock(); - error = avc_has_perm_noaudit(ptsid, sid, - SECCLASS_PROCESS, - PROCESS__PTRACE, 0, &avd); - if (!error) - tsec->sid = sid; - task_unlock(p); - avc_audit(ptsid, sid, SECCLASS_PROCESS, - PROCESS__PTRACE, &avd, error, NULL); + if (tracer) + ptsid = task_sid(tracer); + task_unlock(p); + + if (tracer) { + error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS, + PROCESS__PTRACE, NULL); if (error) - return error; - } else { - rcu_read_unlock(); - tsec->sid = sid; - task_unlock(p); + goto abort_change; } - } else - return -EINVAL; + tsec->sid = sid; + } else { + error = -EINVAL; + goto abort_change; + } + + commit_creds(new); return size; + +abort_change: + abort_creds(new); + return error; } static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) @@ -5471,23 +5473,21 @@ static void selinux_release_secctx(char *secdata, u32 seclen) #ifdef CONFIG_KEYS -static int selinux_key_alloc(struct key *k, struct task_struct *tsk, +static int selinux_key_alloc(struct key *k, const struct cred *cred, unsigned long flags) { - const struct task_security_struct *__tsec; + const struct task_security_struct *tsec; struct key_security_struct *ksec; ksec = kzalloc(sizeof(struct key_security_struct), GFP_KERNEL); if (!ksec) return -ENOMEM; - rcu_read_lock(); - __tsec = __task_cred(tsk)->security; - if (__tsec->keycreate_sid) - ksec->sid = __tsec->keycreate_sid; + tsec = cred->security; + if (tsec->keycreate_sid) + ksec->sid = tsec->keycreate_sid; else - ksec->sid = __tsec->sid; - rcu_read_unlock(); + ksec->sid = tsec->sid; k->security = ksec; return 0; @@ -5502,8 +5502,8 @@ static void selinux_key_free(struct key *k) } static int selinux_key_permission(key_ref_t key_ref, - struct task_struct *ctx, - key_perm_t perm) + const struct cred *cred, + key_perm_t perm) { struct key *key; struct key_security_struct *ksec; @@ -5515,7 +5515,7 @@ static int selinux_key_permission(key_ref_t key_ref, if (perm == 0) return 0; - sid = task_sid(ctx); + sid = cred_sid(cred); key = key_ref_to_ptr(key_ref); ksec = key->security; @@ -5545,8 +5545,7 @@ static struct security_operations selinux_ops = { .ptrace_may_access = selinux_ptrace_may_access, .ptrace_traceme = selinux_ptrace_traceme, .capget = selinux_capget, - .capset_check = selinux_capset_check, - .capset_set = selinux_capset_set, + .capset = selinux_capset, .sysctl = selinux_sysctl, .capable = selinux_capable, .quotactl = selinux_quotactl, @@ -5621,10 +5620,11 @@ static struct security_operations selinux_ops = { .dentry_open = selinux_dentry_open, .task_create = selinux_task_create, - .cred_alloc_security = selinux_cred_alloc_security, .cred_free = selinux_cred_free, + .cred_prepare = selinux_cred_prepare, + .cred_commit = selinux_cred_commit, .task_setuid = selinux_task_setuid, - .task_post_setuid = selinux_task_post_setuid, + .task_fix_setuid = selinux_task_fix_setuid, .task_setgid = selinux_task_setgid, .task_setpgid = selinux_task_setpgid, .task_getpgid = selinux_task_getpgid, @@ -5641,7 +5641,6 @@ static struct security_operations selinux_ops = { .task_kill = selinux_task_kill, .task_wait = selinux_task_wait, .task_prctl = selinux_task_prctl, - .task_reparent_to_init = selinux_task_reparent_to_init, .task_to_inode = selinux_task_to_inode, .ipc_permission = selinux_ipc_permission, @@ -5737,8 +5736,6 @@ static struct security_operations selinux_ops = { static __init int selinux_init(void) { - struct task_security_struct *tsec; - if (!security_module_enable(&selinux_ops)) { selinux_enabled = 0; return 0; @@ -5752,10 +5749,7 @@ static __init int selinux_init(void) printk(KERN_INFO "SELinux: Initializing.\n"); /* Set the security state for the initial task. */ - if (cred_alloc_security(current->cred)) - panic("SELinux: Failed to initialize initial task.\n"); - tsec = current->cred->security; - tsec->osid = tsec->sid = SECINITSID_KERNEL; + cred_init_security(); sel_inode_cache = kmem_cache_create("selinux_inode_security", sizeof(struct inode_security_struct), diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 11167fd567b..e952b397153 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -104,8 +104,7 @@ static int smack_ptrace_may_access(struct task_struct *ctp, unsigned int mode) if (rc != 0) return rc; - rc = smk_access(current->cred->security, ctp->cred->security, - MAY_READWRITE); + rc = smk_access(current_security(), task_security(ctp), MAY_READWRITE); if (rc != 0 && capable(CAP_MAC_OVERRIDE)) return 0; return rc; @@ -127,8 +126,7 @@ static int smack_ptrace_traceme(struct task_struct *ptp) if (rc != 0) return rc; - rc = smk_access(ptp->cred->security, current->cred->security, - MAY_READWRITE); + rc = smk_access(task_security(ptp), current_security(), MAY_READWRITE); if (rc != 0 && has_capability(ptp, CAP_MAC_OVERRIDE)) return 0; return rc; @@ -976,22 +974,6 @@ static int smack_file_receive(struct file *file) * Task hooks */ -/** - * smack_cred_alloc_security - "allocate" a task cred blob - * @cred: the task creds in need of a blob - * - * Smack isn't using copies of blobs. Everyone - * points to an immutable list. No alloc required. - * No data copy required. - * - * Always returns 0 - */ -static int smack_cred_alloc_security(struct cred *cred) -{ - cred->security = current_security(); - return 0; -} - /** * smack_cred_free - "free" task-level security credentials * @cred: the credentials in question @@ -1005,6 +987,30 @@ static void smack_cred_free(struct cred *cred) cred->security = NULL; } +/** + * smack_cred_prepare - prepare new set of credentials for modification + * @new: the new credentials + * @old: the original credentials + * @gfp: the atomicity of any memory allocations + * + * Prepare a new set of credentials for modification. + */ +static int smack_cred_prepare(struct cred *new, const struct cred *old, + gfp_t gfp) +{ + new->security = old->security; + return 0; +} + +/* + * commit new credentials + * @new: the new credentials + * @old: the original credentials + */ +static void smack_cred_commit(struct cred *new, const struct cred *old) +{ +} + /** * smack_task_setpgid - Smack check on setting pgid * @p: the task object @@ -2036,6 +2042,7 @@ static int smack_getprocattr(struct task_struct *p, char *name, char **value) static int smack_setprocattr(struct task_struct *p, char *name, void *value, size_t size) { + struct cred *new; char *newsmack; /* @@ -2058,7 +2065,11 @@ static int smack_setprocattr(struct task_struct *p, char *name, if (newsmack == NULL) return -EINVAL; - p->cred->security = newsmack; + new = prepare_creds(); + if (!new) + return -ENOMEM; + new->security = newsmack; + commit_creds(new); return size; } @@ -2354,17 +2365,17 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb, /** * smack_key_alloc - Set the key security blob * @key: object - * @tsk: the task associated with the key + * @cred: the credentials to use * @flags: unused * * No allocation required * * Returns 0 */ -static int smack_key_alloc(struct key *key, struct task_struct *tsk, +static int smack_key_alloc(struct key *key, const struct cred *cred, unsigned long flags) { - key->security = tsk->cred->security; + key->security = cred->security; return 0; } @@ -2382,14 +2393,14 @@ static void smack_key_free(struct key *key) /* * smack_key_permission - Smack access on a key * @key_ref: gets to the object - * @context: task involved + * @cred: the credentials to use * @perm: unused * * Return 0 if the task has read and write to the object, * an error code otherwise */ static int smack_key_permission(key_ref_t key_ref, - struct task_struct *context, key_perm_t perm) + const struct cred *cred, key_perm_t perm) { struct key *keyp; @@ -2405,11 +2416,10 @@ static int smack_key_permission(key_ref_t key_ref, /* * This should not occur */ - if (context->cred->security == NULL) + if (cred->security == NULL) return -EACCES; - return smk_access(context->cred->security, keyp->security, - MAY_READWRITE); + return smk_access(cred->security, keyp->security, MAY_READWRITE); } #endif /* CONFIG_KEYS */ @@ -2580,8 +2590,7 @@ struct security_operations smack_ops = { .ptrace_may_access = smack_ptrace_may_access, .ptrace_traceme = smack_ptrace_traceme, .capget = cap_capget, - .capset_check = cap_capset_check, - .capset_set = cap_capset_set, + .capset = cap_capset, .capable = cap_capable, .syslog = smack_syslog, .settime = cap_settime, @@ -2630,9 +2639,10 @@ struct security_operations smack_ops = { .file_send_sigiotask = smack_file_send_sigiotask, .file_receive = smack_file_receive, - .cred_alloc_security = smack_cred_alloc_security, .cred_free = smack_cred_free, - .task_post_setuid = cap_task_post_setuid, + .cred_prepare = smack_cred_prepare, + .cred_commit = smack_cred_commit, + .task_fix_setuid = cap_task_fix_setuid, .task_setpgid = smack_task_setpgid, .task_getpgid = smack_task_getpgid, .task_getsid = smack_task_getsid, @@ -2645,7 +2655,6 @@ struct security_operations smack_ops = { .task_movememory = smack_task_movememory, .task_kill = smack_task_kill, .task_wait = smack_task_wait, - .task_reparent_to_init = cap_task_reparent_to_init, .task_to_inode = smack_task_to_inode, .task_prctl = cap_task_prctl, @@ -2721,6 +2730,8 @@ struct security_operations smack_ops = { */ static __init int smack_init(void) { + struct cred *cred; + if (!security_module_enable(&smack_ops)) return 0; @@ -2729,7 +2740,8 @@ static __init int smack_init(void) /* * Set the security state for the initial task. */ - current->cred->security = &smack_known_floor.smk_known; + cred = (struct cred *) current->cred; + cred->security = &smack_known_floor.smk_known; /* * Initialize locks -- cgit v1.2.3-70-g09d2 From a6f76f23d297f70e2a6b3ec607f7aeeea9e37e8d Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:24 +1100 Subject: CRED: Make execve() take advantage of copy-on-write credentials Make execve() take advantage of copy-on-write credentials, allowing it to set up the credentials in advance, and then commit the whole lot after the point of no return. This patch and the preceding patches have been tested with the LTP SELinux testsuite. This patch makes several logical sets of alteration: (1) execve(). The credential bits from struct linux_binprm are, for the most part, replaced with a single credentials pointer (bprm->cred). This means that all the creds can be calculated in advance and then applied at the point of no return with no possibility of failure. I would like to replace bprm->cap_effective with: cap_isclear(bprm->cap_effective) but this seems impossible due to special behaviour for processes of pid 1 (they always retain their parent's capability masks where normally they'd be changed - see cap_bprm_set_creds()). The following sequence of events now happens: (a) At the start of do_execve, the current task's cred_exec_mutex is locked to prevent PTRACE_ATTACH from obsoleting the calculation of creds that we make. (a) prepare_exec_creds() is then called to make a copy of the current task's credentials and prepare it. This copy is then assigned to bprm->cred. This renders security_bprm_alloc() and security_bprm_free() unnecessary, and so they've been removed. (b) The determination of unsafe execution is now performed immediately after (a) rather than later on in the code. The result is stored in bprm->unsafe for future reference. (c) prepare_binprm() is called, possibly multiple times. (i) This applies the result of set[ug]id binaries to the new creds attached to bprm->cred. Personality bit clearance is recorded, but now deferred on the basis that the exec procedure may yet fail. (ii) This then calls the new security_bprm_set_creds(). This should calculate the new LSM and capability credentials into *bprm->cred. This folds together security_bprm_set() and parts of security_bprm_apply_creds() (these two have been removed). Anything that might fail must be done at this point. (iii) bprm->cred_prepared is set to 1. bprm->cred_prepared is 0 on the first pass of the security calculations, and 1 on all subsequent passes. This allows SELinux in (ii) to base its calculations only on the initial script and not on the interpreter. (d) flush_old_exec() is called to commit the task to execution. This performs the following steps with regard to credentials: (i) Clear pdeath_signal and set dumpable on certain circumstances that may not be covered by commit_creds(). (ii) Clear any bits in current->personality that were deferred from (c.i). (e) install_exec_creds() [compute_creds() as was] is called to install the new credentials. This performs the following steps with regard to credentials: (i) Calls security_bprm_committing_creds() to apply any security requirements, such as flushing unauthorised files in SELinux, that must be done before the credentials are changed. This is made up of bits of security_bprm_apply_creds() and security_bprm_post_apply_creds(), both of which have been removed. This function is not allowed to fail; anything that might fail must have been done in (c.ii). (ii) Calls commit_creds() to apply the new credentials in a single assignment (more or less). Possibly pdeath_signal and dumpable should be part of struct creds. (iii) Unlocks the task's cred_replace_mutex, thus allowing PTRACE_ATTACH to take place. (iv) Clears The bprm->cred pointer as the credentials it was holding are now immutable. (v) Calls security_bprm_committed_creds() to apply any security alterations that must be done after the creds have been changed. SELinux uses this to flush signals and signal handlers. (f) If an error occurs before (d.i), bprm_free() will call abort_creds() to destroy the proposed new credentials and will then unlock cred_replace_mutex. No changes to the credentials will have been made. (2) LSM interface. A number of functions have been changed, added or removed: (*) security_bprm_alloc(), ->bprm_alloc_security() (*) security_bprm_free(), ->bprm_free_security() Removed in favour of preparing new credentials and modifying those. (*) security_bprm_apply_creds(), ->bprm_apply_creds() (*) security_bprm_post_apply_creds(), ->bprm_post_apply_creds() Removed; split between security_bprm_set_creds(), security_bprm_committing_creds() and security_bprm_committed_creds(). (*) security_bprm_set(), ->bprm_set_security() Removed; folded into security_bprm_set_creds(). (*) security_bprm_set_creds(), ->bprm_set_creds() New. The new credentials in bprm->creds should be checked and set up as appropriate. bprm->cred_prepared is 0 on the first call, 1 on the second and subsequent calls. (*) security_bprm_committing_creds(), ->bprm_committing_creds() (*) security_bprm_committed_creds(), ->bprm_committed_creds() New. Apply the security effects of the new credentials. This includes closing unauthorised files in SELinux. This function may not fail. When the former is called, the creds haven't yet been applied to the process; when the latter is called, they have. The former may access bprm->cred, the latter may not. (3) SELinux. SELinux has a number of changes, in addition to those to support the LSM interface changes mentioned above: (a) The bprm_security_struct struct has been removed in favour of using the credentials-under-construction approach. (c) flush_unauthorized_files() now takes a cred pointer and passes it on to inode_has_perm(), file_has_perm() and dentry_open(). Signed-off-by: David Howells Acked-by: James Morris Acked-by: Serge Hallyn Signed-off-by: James Morris --- arch/x86/ia32/ia32_aout.c | 2 +- fs/binfmt_aout.c | 2 +- fs/binfmt_elf.c | 2 +- fs/binfmt_elf_fdpic.c | 2 +- fs/binfmt_flat.c | 2 +- fs/binfmt_som.c | 2 +- fs/compat.c | 42 +++--- fs/exec.c | 149 +++++++++++--------- fs/internal.h | 6 + include/linux/audit.h | 16 --- include/linux/binfmts.h | 16 ++- include/linux/cred.h | 3 +- include/linux/key.h | 2 - include/linux/security.h | 103 +++++--------- kernel/cred.c | 46 ++++++- security/capability.c | 19 +-- security/commoncap.c | 152 ++++++++++---------- security/keys/process_keys.c | 42 ------ security/root_plug.c | 13 +- security/security.c | 26 ++-- security/selinux/hooks.c | 283 ++++++++++++++++---------------------- security/selinux/include/objsec.h | 11 -- security/smack/smack_lsm.c | 3 +- 23 files changed, 429 insertions(+), 515 deletions(-) (limited to 'fs') diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 127ec3f0721..2a4d073d2cf 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -327,7 +327,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) current->mm->cached_hole_size = 0; current->mm->mmap = NULL; - compute_creds(bprm); + install_exec_creds(bprm); current->flags &= ~PF_FORKNOEXEC; if (N_MAGIC(ex) == OMAGIC) { diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index 204cfd1d767..f1f3f4192a6 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -320,7 +320,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) current->mm->free_area_cache = current->mm->mmap_base; current->mm->cached_hole_size = 0; - compute_creds(bprm); + install_exec_creds(bprm); current->flags &= ~PF_FORKNOEXEC; #ifdef __sparc__ if (N_MAGIC(ex) == NMAGIC) { diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 9142ff5dc8e..f458c1217c5 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -956,7 +956,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) } #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ - compute_creds(bprm); + install_exec_creds(bprm); current->flags &= ~PF_FORKNOEXEC; retval = create_elf_tables(bprm, &loc->elf_ex, load_addr, interp_load_addr); diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 45dabd59936..aa5b43205e3 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -404,7 +404,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, current->mm->start_stack = current->mm->start_brk + stack_size; #endif - compute_creds(bprm); + install_exec_creds(bprm); current->flags &= ~PF_FORKNOEXEC; if (create_elf_fdpic_tables(bprm, current->mm, &exec_params, &interp_params) < 0) diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index ccb781a6a80..7bbd5c6b372 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -880,7 +880,7 @@ static int load_flat_binary(struct linux_binprm * bprm, struct pt_regs * regs) (libinfo.lib_list[j].loaded)? libinfo.lib_list[j].start_data:UNLOADED_LIB; - compute_creds(bprm); + install_exec_creds(bprm); current->flags &= ~PF_FORKNOEXEC; set_binfmt(&flat_format); diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c index 74e587a5279..08644a61616 100644 --- a/fs/binfmt_som.c +++ b/fs/binfmt_som.c @@ -255,7 +255,7 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs) kfree(hpuxhdr); set_binfmt(&som_format); - compute_creds(bprm); + install_exec_creds(bprm); setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); create_som_tables(bprm); diff --git a/fs/compat.c b/fs/compat.c index e5f49f53850..d1ece79b641 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -1393,10 +1393,20 @@ int compat_do_execve(char * filename, if (!bprm) goto out_ret; + retval = mutex_lock_interruptible(¤t->cred_exec_mutex); + if (retval < 0) + goto out_free; + + retval = -ENOMEM; + bprm->cred = prepare_exec_creds(); + if (!bprm->cred) + goto out_unlock; + check_unsafe_exec(bprm); + file = open_exec(filename); retval = PTR_ERR(file); if (IS_ERR(file)) - goto out_kfree; + goto out_unlock; sched_exec(); @@ -1410,14 +1420,10 @@ int compat_do_execve(char * filename, bprm->argc = compat_count(argv, MAX_ARG_STRINGS); if ((retval = bprm->argc) < 0) - goto out_mm; + goto out; bprm->envc = compat_count(envp, MAX_ARG_STRINGS); if ((retval = bprm->envc) < 0) - goto out_mm; - - retval = security_bprm_alloc(bprm); - if (retval) goto out; retval = prepare_binprm(bprm); @@ -1438,19 +1444,16 @@ int compat_do_execve(char * filename, goto out; retval = search_binary_handler(bprm, regs); - if (retval >= 0) { - /* execve success */ - security_bprm_free(bprm); - acct_update_integrals(current); - free_bprm(bprm); - return retval; - } + if (retval < 0) + goto out; -out: - if (bprm->security) - security_bprm_free(bprm); + /* execve succeeded */ + mutex_unlock(¤t->cred_exec_mutex); + acct_update_integrals(current); + free_bprm(bprm); + return retval; -out_mm: +out: if (bprm->mm) mmput(bprm->mm); @@ -1460,7 +1463,10 @@ out_file: fput(bprm->file); } -out_kfree: +out_unlock: + mutex_unlock(¤t->cred_exec_mutex); + +out_free: free_bprm(bprm); out_ret: diff --git a/fs/exec.c b/fs/exec.c index 9bd3559ddec..32f13e29941 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -55,6 +55,7 @@ #include #include #include +#include "internal.h" #ifdef __alpha__ /* for /sbin/loader handling in search_binary_handler() */ @@ -1007,15 +1008,17 @@ int flush_old_exec(struct linux_binprm * bprm) */ current->mm->task_size = TASK_SIZE; - if (bprm->e_uid != current_euid() || - bprm->e_gid != current_egid()) { - set_dumpable(current->mm, suid_dumpable); + /* install the new credentials */ + if (bprm->cred->uid != current_euid() || + bprm->cred->gid != current_egid()) { current->pdeath_signal = 0; } else if (file_permission(bprm->file, MAY_READ) || - (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) { + bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP) { set_dumpable(current->mm, suid_dumpable); } + current->personality &= ~bprm->per_clear; + /* An exec changes our domain. We are no longer part of the thread group */ @@ -1032,13 +1035,50 @@ out: EXPORT_SYMBOL(flush_old_exec); +/* + * install the new credentials for this executable + */ +void install_exec_creds(struct linux_binprm *bprm) +{ + security_bprm_committing_creds(bprm); + + commit_creds(bprm->cred); + bprm->cred = NULL; + + /* cred_exec_mutex must be held at least to this point to prevent + * ptrace_attach() from altering our determination of the task's + * credentials; any time after this it may be unlocked */ + + security_bprm_committed_creds(bprm); +} +EXPORT_SYMBOL(install_exec_creds); + +/* + * determine how safe it is to execute the proposed program + * - the caller must hold current->cred_exec_mutex to protect against + * PTRACE_ATTACH + */ +void check_unsafe_exec(struct linux_binprm *bprm) +{ + struct task_struct *p = current; + + bprm->unsafe = tracehook_unsafe_exec(p); + + if (atomic_read(&p->fs->count) > 1 || + atomic_read(&p->files->count) > 1 || + atomic_read(&p->sighand->count) > 1) + bprm->unsafe |= LSM_UNSAFE_SHARE; +} + /* * Fill the binprm structure from the inode. * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes + * + * This may be called multiple times for binary chains (scripts for example). */ int prepare_binprm(struct linux_binprm *bprm) { - int mode; + umode_t mode; struct inode * inode = bprm->file->f_path.dentry->d_inode; int retval; @@ -1046,14 +1086,15 @@ int prepare_binprm(struct linux_binprm *bprm) if (bprm->file->f_op == NULL) return -EACCES; - bprm->e_uid = current_euid(); - bprm->e_gid = current_egid(); + /* clear any previous set[ug]id data from a previous binary */ + bprm->cred->euid = current_euid(); + bprm->cred->egid = current_egid(); - if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) { + if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) { /* Set-uid? */ if (mode & S_ISUID) { - current->personality &= ~PER_CLEAR_ON_SETID; - bprm->e_uid = inode->i_uid; + bprm->per_clear |= PER_CLEAR_ON_SETID; + bprm->cred->euid = inode->i_uid; } /* Set-gid? */ @@ -1063,50 +1104,23 @@ int prepare_binprm(struct linux_binprm *bprm) * executable. */ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) { - current->personality &= ~PER_CLEAR_ON_SETID; - bprm->e_gid = inode->i_gid; + bprm->per_clear |= PER_CLEAR_ON_SETID; + bprm->cred->egid = inode->i_gid; } } /* fill in binprm security blob */ - retval = security_bprm_set(bprm); + retval = security_bprm_set_creds(bprm); if (retval) return retval; + bprm->cred_prepared = 1; - memset(bprm->buf,0,BINPRM_BUF_SIZE); - return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE); + memset(bprm->buf, 0, BINPRM_BUF_SIZE); + return kernel_read(bprm->file, 0, bprm->buf, BINPRM_BUF_SIZE); } EXPORT_SYMBOL(prepare_binprm); -static int unsafe_exec(struct task_struct *p) -{ - int unsafe = tracehook_unsafe_exec(p); - - if (atomic_read(&p->fs->count) > 1 || - atomic_read(&p->files->count) > 1 || - atomic_read(&p->sighand->count) > 1) - unsafe |= LSM_UNSAFE_SHARE; - - return unsafe; -} - -void compute_creds(struct linux_binprm *bprm) -{ - int unsafe; - - if (bprm->e_uid != current_uid()) - current->pdeath_signal = 0; - exec_keys(current); - - task_lock(current); - unsafe = unsafe_exec(current); - security_bprm_apply_creds(bprm, unsafe); - task_unlock(current); - security_bprm_post_apply_creds(bprm); -} -EXPORT_SYMBOL(compute_creds); - /* * Arguments are '\0' separated strings found at the location bprm->p * points to; chop off the first by relocating brpm->p to right after @@ -1259,6 +1273,8 @@ EXPORT_SYMBOL(search_binary_handler); void free_bprm(struct linux_binprm *bprm) { free_arg_pages(bprm); + if (bprm->cred) + abort_creds(bprm->cred); kfree(bprm); } @@ -1284,10 +1300,20 @@ int do_execve(char * filename, if (!bprm) goto out_files; + retval = mutex_lock_interruptible(¤t->cred_exec_mutex); + if (retval < 0) + goto out_free; + + retval = -ENOMEM; + bprm->cred = prepare_exec_creds(); + if (!bprm->cred) + goto out_unlock; + check_unsafe_exec(bprm); + file = open_exec(filename); retval = PTR_ERR(file); if (IS_ERR(file)) - goto out_kfree; + goto out_unlock; sched_exec(); @@ -1301,14 +1327,10 @@ int do_execve(char * filename, bprm->argc = count(argv, MAX_ARG_STRINGS); if ((retval = bprm->argc) < 0) - goto out_mm; + goto out; bprm->envc = count(envp, MAX_ARG_STRINGS); if ((retval = bprm->envc) < 0) - goto out_mm; - - retval = security_bprm_alloc(bprm); - if (retval) goto out; retval = prepare_binprm(bprm); @@ -1330,21 +1352,18 @@ int do_execve(char * filename, current->flags &= ~PF_KTHREAD; retval = search_binary_handler(bprm,regs); - if (retval >= 0) { - /* execve success */ - security_bprm_free(bprm); - acct_update_integrals(current); - free_bprm(bprm); - if (displaced) - put_files_struct(displaced); - return retval; - } + if (retval < 0) + goto out; -out: - if (bprm->security) - security_bprm_free(bprm); + /* execve succeeded */ + mutex_unlock(¤t->cred_exec_mutex); + acct_update_integrals(current); + free_bprm(bprm); + if (displaced) + put_files_struct(displaced); + return retval; -out_mm: +out: if (bprm->mm) mmput (bprm->mm); @@ -1353,7 +1372,11 @@ out_file: allow_write_access(bprm->file); fput(bprm->file); } -out_kfree: + +out_unlock: + mutex_unlock(¤t->cred_exec_mutex); + +out_free: free_bprm(bprm); out_files: diff --git a/fs/internal.h b/fs/internal.h index 80aa9a02337..53af885f173 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -10,6 +10,7 @@ */ struct super_block; +struct linux_binprm; /* * block_dev.c @@ -39,6 +40,11 @@ static inline int sb_is_blkdev_sb(struct super_block *sb) */ extern void __init chrdev_init(void); +/* + * exec.c + */ +extern void check_unsafe_exec(struct linux_binprm *); + /* * namespace.c */ diff --git a/include/linux/audit.h b/include/linux/audit.h index 0b2fcb698a6..e8ce2c4c7ac 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -508,22 +508,6 @@ static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) return 0; } -/* - * ieieeeeee, an audit function without a return code! - * - * This function might fail! I decided that it didn't matter. We are too late - * to fail the syscall and the information isn't REQUIRED for any purpose. It's - * just nice to have. We should be able to look at past audit logs to figure - * out this process's current cap set along with the fcaps from the PATH record - * and use that to come up with the final set. Yeah, its ugly, but all the info - * is still in the audit log. So I'm not going to bother mentioning we failed - * if we couldn't allocate memory. - * - * If someone changes their mind they could create the aux record earlier and - * then search here and use that earlier allocation. But I don't wanna. - * - * -Eric - */ static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm, const struct cred *new, const struct cred *old) diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 7394b5b349f..6cbfbe29718 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -35,16 +35,20 @@ struct linux_binprm{ struct mm_struct *mm; unsigned long p; /* current top of mem */ unsigned int sh_bang:1, - misc_bang:1; + misc_bang:1, + cred_prepared:1,/* true if creds already prepared (multiple + * preps happen for interpreters) */ + cap_effective:1;/* true if has elevated effective capabilities, + * false if not; except for init which inherits + * its parent's caps anyway */ #ifdef __alpha__ unsigned int taso:1; #endif unsigned int recursion_depth; struct file * file; - int e_uid, e_gid; - kernel_cap_t cap_post_exec_permitted; - bool cap_effective; - void *security; + struct cred *cred; /* new credentials */ + int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ + unsigned int per_clear; /* bits to clear in current->personality */ int argc, envc; char * filename; /* Name of binary as seen by procps */ char * interp; /* Name of the binary really executed. Most @@ -101,7 +105,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm, int executable_stack); extern int bprm_mm_init(struct linux_binprm *bprm); extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); -extern void compute_creds(struct linux_binprm *binprm); +extern void install_exec_creds(struct linux_binprm *bprm); extern int do_coredump(long signr, int exit_code, struct pt_regs * regs); extern int set_binfmt(struct linux_binfmt *new); extern void free_bprm(struct linux_binprm *); diff --git a/include/linux/cred.h b/include/linux/cred.h index eaf6fa695a0..8edb4d1d542 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -84,8 +84,6 @@ struct thread_group_cred { struct key *process_keyring; /* keyring private to this process */ struct rcu_head rcu; /* RCU deletion hook */ }; - -extern void release_tgcred(struct cred *cred); #endif /* @@ -144,6 +142,7 @@ struct cred { extern void __put_cred(struct cred *); extern int copy_creds(struct task_struct *, unsigned long); extern struct cred *prepare_creds(void); +extern struct cred *prepare_exec_creds(void); extern struct cred *prepare_usermodehelper_creds(void); extern int commit_creds(struct cred *); extern void abort_creds(struct cred *); diff --git a/include/linux/key.h b/include/linux/key.h index 69ecf0934b0..21d32a142c0 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -278,7 +278,6 @@ extern ctl_table key_sysctls[]; * the userspace interface */ extern int install_thread_keyring_to_cred(struct cred *cred); -extern int exec_keys(struct task_struct *tsk); extern void key_fsuid_changed(struct task_struct *tsk); extern void key_fsgid_changed(struct task_struct *tsk); extern void key_init(void); @@ -294,7 +293,6 @@ extern void key_init(void); #define make_key_ref(k, p) NULL #define key_ref_to_ptr(k) NULL #define is_key_possessed(k) 0 -#define exec_keys(t) do { } while(0) #define key_fsuid_changed(t) do { } while(0) #define key_fsgid_changed(t) do { } while(0) #define key_init() do { } while(0) diff --git a/include/linux/security.h b/include/linux/security.h index 68be1125144..56a0eed6567 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -57,8 +57,7 @@ extern int cap_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted); -extern int cap_bprm_set_security(struct linux_binprm *bprm); -extern int cap_bprm_apply_creds(struct linux_binprm *bprm, int unsafe); +extern int cap_bprm_set_creds(struct linux_binprm *bprm); extern int cap_bprm_secureexec(struct linux_binprm *bprm); extern int cap_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); @@ -110,7 +109,7 @@ extern unsigned long mmap_min_addr; struct sched_param; struct request_sock; -/* bprm_apply_creds unsafe reasons */ +/* bprm->unsafe reasons */ #define LSM_UNSAFE_SHARE 1 #define LSM_UNSAFE_PTRACE 2 #define LSM_UNSAFE_PTRACE_CAP 4 @@ -154,36 +153,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * * Security hooks for program execution operations. * - * @bprm_alloc_security: - * Allocate and attach a security structure to the @bprm->security field. - * The security field is initialized to NULL when the bprm structure is - * allocated. - * @bprm contains the linux_binprm structure to be modified. - * Return 0 if operation was successful. - * @bprm_free_security: - * @bprm contains the linux_binprm structure to be modified. - * Deallocate and clear the @bprm->security field. - * @bprm_apply_creds: - * Compute and set the security attributes of a process being transformed - * by an execve operation based on the old attributes (current->security) - * and the information saved in @bprm->security by the set_security hook. - * Since this function may return an error, in which case the process will - * be killed. However, it can leave the security attributes of the - * process unchanged if an access failure occurs at this point. - * bprm_apply_creds is called under task_lock. @unsafe indicates various - * reasons why it may be unsafe to change security state. - * @bprm contains the linux_binprm structure. - * @bprm_post_apply_creds: - * Runs after bprm_apply_creds with the task_lock dropped, so that - * functions which cannot be called safely under the task_lock can - * be used. This hook is a good place to perform state changes on - * the process such as closing open file descriptors to which access - * is no longer granted if the attributes were changed. - * Note that a security module might need to save state between - * bprm_apply_creds and bprm_post_apply_creds to store the decision - * on whether the process may proceed. - * @bprm contains the linux_binprm structure. - * @bprm_set_security: + * @bprm_set_creds: * Save security information in the bprm->security field, typically based * on information about the bprm->file, for later use by the apply_creds * hook. This hook may also optionally check permissions (e.g. for @@ -196,15 +166,30 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @bprm contains the linux_binprm structure. * Return 0 if the hook is successful and permission is granted. * @bprm_check_security: - * This hook mediates the point when a search for a binary handler will - * begin. It allows a check the @bprm->security value which is set in - * the preceding set_security call. The primary difference from - * set_security is that the argv list and envp list are reliably - * available in @bprm. This hook may be called multiple times - * during a single execve; and in each pass set_security is called - * first. + * This hook mediates the point when a search for a binary handler will + * begin. It allows a check the @bprm->security value which is set in the + * preceding set_creds call. The primary difference from set_creds is + * that the argv list and envp list are reliably available in @bprm. This + * hook may be called multiple times during a single execve; and in each + * pass set_creds is called first. * @bprm contains the linux_binprm structure. * Return 0 if the hook is successful and permission is granted. + * @bprm_committing_creds: + * Prepare to install the new security attributes of a process being + * transformed by an execve operation, based on the old credentials + * pointed to by @current->cred and the information set in @bprm->cred by + * the bprm_set_creds hook. @bprm points to the linux_binprm structure. + * This hook is a good place to perform state changes on the process such + * as closing open file descriptors to which access will no longer be + * granted when the attributes are changed. This is called immediately + * before commit_creds(). + * @bprm_committed_creds: + * Tidy up after the installation of the new security attributes of a + * process being transformed by an execve operation. The new credentials + * have, by this point, been set to @current->cred. @bprm points to the + * linux_binprm structure. This hook is a good place to perform state + * changes on the process such as clearing out non-inheritable signal + * state. This is called immediately after commit_creds(). * @bprm_secureexec: * Return a boolean value (0 or 1) indicating whether a "secure exec" * is required. The flag is passed in the auxiliary table @@ -1301,13 +1286,11 @@ struct security_operations { int (*settime) (struct timespec *ts, struct timezone *tz); int (*vm_enough_memory) (struct mm_struct *mm, long pages); - int (*bprm_alloc_security) (struct linux_binprm *bprm); - void (*bprm_free_security) (struct linux_binprm *bprm); - int (*bprm_apply_creds) (struct linux_binprm *bprm, int unsafe); - void (*bprm_post_apply_creds) (struct linux_binprm *bprm); - int (*bprm_set_security) (struct linux_binprm *bprm); + int (*bprm_set_creds) (struct linux_binprm *bprm); int (*bprm_check_security) (struct linux_binprm *bprm); int (*bprm_secureexec) (struct linux_binprm *bprm); + void (*bprm_committing_creds) (struct linux_binprm *bprm); + void (*bprm_committed_creds) (struct linux_binprm *bprm); int (*sb_alloc_security) (struct super_block *sb); void (*sb_free_security) (struct super_block *sb); @@ -1569,12 +1552,10 @@ int security_settime(struct timespec *ts, struct timezone *tz); int security_vm_enough_memory(long pages); int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); int security_vm_enough_memory_kern(long pages); -int security_bprm_alloc(struct linux_binprm *bprm); -void security_bprm_free(struct linux_binprm *bprm); -int security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe); -void security_bprm_post_apply_creds(struct linux_binprm *bprm); -int security_bprm_set(struct linux_binprm *bprm); +int security_bprm_set_creds(struct linux_binprm *bprm); int security_bprm_check(struct linux_binprm *bprm); +void security_bprm_committing_creds(struct linux_binprm *bprm); +void security_bprm_committed_creds(struct linux_binprm *bprm); int security_bprm_secureexec(struct linux_binprm *bprm); int security_sb_alloc(struct super_block *sb); void security_sb_free(struct super_block *sb); @@ -1812,32 +1793,22 @@ static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) return cap_vm_enough_memory(mm, pages); } -static inline int security_bprm_alloc(struct linux_binprm *bprm) -{ - return 0; -} - -static inline void security_bprm_free(struct linux_binprm *bprm) -{ } - -static inline int security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) +static inline int security_bprm_set_creds(struct linux_binprm *bprm) { - return cap_bprm_apply_creds(bprm, unsafe); + return cap_bprm_set_creds(bprm); } -static inline void security_bprm_post_apply_creds(struct linux_binprm *bprm) +static inline int security_bprm_check(struct linux_binprm *bprm) { - return; + return 0; } -static inline int security_bprm_set(struct linux_binprm *bprm) +static inline void security_bprm_committing_creds(struct linux_binprm *bprm) { - return cap_bprm_set_security(bprm); } -static inline int security_bprm_check(struct linux_binprm *bprm) +static inline void security_bprm_committed_creds(struct linux_binprm *bprm) { - return 0; } static inline int security_bprm_secureexec(struct linux_binprm *bprm) diff --git a/kernel/cred.c b/kernel/cred.c index cb6b5eda978..e6fcdd67b2e 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -68,7 +68,7 @@ static void release_tgcred_rcu(struct rcu_head *rcu) /* * Release a set of thread group credentials. */ -void release_tgcred(struct cred *cred) +static void release_tgcred(struct cred *cred) { #ifdef CONFIG_KEYS struct thread_group_cred *tgcred = cred->tgcred; @@ -163,6 +163,50 @@ error: } EXPORT_SYMBOL(prepare_creds); +/* + * Prepare credentials for current to perform an execve() + * - The caller must hold current->cred_exec_mutex + */ +struct cred *prepare_exec_creds(void) +{ + struct thread_group_cred *tgcred = NULL; + struct cred *new; + +#ifdef CONFIG_KEYS + tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); + if (!tgcred) + return NULL; +#endif + + new = prepare_creds(); + if (!new) { + kfree(tgcred); + return new; + } + +#ifdef CONFIG_KEYS + /* newly exec'd tasks don't get a thread keyring */ + key_put(new->thread_keyring); + new->thread_keyring = NULL; + + /* create a new per-thread-group creds for all this set of threads to + * share */ + memcpy(tgcred, new->tgcred, sizeof(struct thread_group_cred)); + + atomic_set(&tgcred->usage, 1); + spin_lock_init(&tgcred->lock); + + /* inherit the session keyring; new process keyring */ + key_get(tgcred->session_keyring); + tgcred->process_keyring = NULL; + + release_tgcred(new); + new->tgcred = tgcred; +#endif + + return new; +} + /* * prepare new credentials for the usermode helper dispatcher */ diff --git a/security/capability.c b/security/capability.c index efeb6d9e0e6..185804f99ad 100644 --- a/security/capability.c +++ b/security/capability.c @@ -32,24 +32,19 @@ static int cap_quota_on(struct dentry *dentry) return 0; } -static int cap_bprm_alloc_security(struct linux_binprm *bprm) +static int cap_bprm_check_security (struct linux_binprm *bprm) { return 0; } -static void cap_bprm_free_security(struct linux_binprm *bprm) +static void cap_bprm_committing_creds(struct linux_binprm *bprm) { } -static void cap_bprm_post_apply_creds(struct linux_binprm *bprm) +static void cap_bprm_committed_creds(struct linux_binprm *bprm) { } -static int cap_bprm_check_security(struct linux_binprm *bprm) -{ - return 0; -} - static int cap_sb_alloc_security(struct super_block *sb) { return 0; @@ -827,11 +822,9 @@ void security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, syslog); set_to_cap_if_null(ops, settime); set_to_cap_if_null(ops, vm_enough_memory); - set_to_cap_if_null(ops, bprm_alloc_security); - set_to_cap_if_null(ops, bprm_free_security); - set_to_cap_if_null(ops, bprm_apply_creds); - set_to_cap_if_null(ops, bprm_post_apply_creds); - set_to_cap_if_null(ops, bprm_set_security); + set_to_cap_if_null(ops, bprm_set_creds); + set_to_cap_if_null(ops, bprm_committing_creds); + set_to_cap_if_null(ops, bprm_committed_creds); set_to_cap_if_null(ops, bprm_check_security); set_to_cap_if_null(ops, bprm_secureexec); set_to_cap_if_null(ops, sb_alloc_security); diff --git a/security/commoncap.c b/security/commoncap.c index b5419273f92..51dfa11e8e5 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -167,7 +167,7 @@ int cap_capset(struct cred *new, static inline void bprm_clear_caps(struct linux_binprm *bprm) { - cap_clear(bprm->cap_post_exec_permitted); + cap_clear(bprm->cred->cap_permitted); bprm->cap_effective = false; } @@ -198,15 +198,15 @@ int cap_inode_killpriv(struct dentry *dentry) } static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps, - struct linux_binprm *bprm) + struct linux_binprm *bprm, + bool *effective) { + struct cred *new = bprm->cred; unsigned i; int ret = 0; if (caps->magic_etc & VFS_CAP_FLAGS_EFFECTIVE) - bprm->cap_effective = true; - else - bprm->cap_effective = false; + *effective = true; CAP_FOR_EACH_U32(i) { __u32 permitted = caps->permitted.cap[i]; @@ -215,16 +215,13 @@ static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps, /* * pP' = (X & fP) | (pI & fI) */ - bprm->cap_post_exec_permitted.cap[i] = - (current->cred->cap_bset.cap[i] & permitted) | - (current->cred->cap_inheritable.cap[i] & inheritable); + new->cap_permitted.cap[i] = + (new->cap_bset.cap[i] & permitted) | + (new->cap_inheritable.cap[i] & inheritable); - if (permitted & ~bprm->cap_post_exec_permitted.cap[i]) { - /* - * insufficient to execute correctly - */ + if (permitted & ~new->cap_permitted.cap[i]) + /* insufficient to execute correctly */ ret = -EPERM; - } } /* @@ -232,7 +229,7 @@ static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps, * do not have enough capabilities, we return an error if they are * missing some "forced" (aka file-permitted) capabilities. */ - return bprm->cap_effective ? ret : 0; + return *effective ? ret : 0; } int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps) @@ -250,10 +247,9 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data size = inode->i_op->getxattr((struct dentry *)dentry, XATTR_NAME_CAPS, &caps, XATTR_CAPS_SZ); - if (size == -ENODATA || size == -EOPNOTSUPP) { + if (size == -ENODATA || size == -EOPNOTSUPP) /* no data, that's ok */ return -ENODATA; - } if (size < 0) return size; @@ -262,7 +258,7 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data cpu_caps->magic_etc = magic_etc = le32_to_cpu(caps.magic_etc); - switch ((magic_etc & VFS_CAP_REVISION_MASK)) { + switch (magic_etc & VFS_CAP_REVISION_MASK) { case VFS_CAP_REVISION_1: if (size != XATTR_CAPS_SZ_1) return -EINVAL; @@ -283,11 +279,12 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data cpu_caps->permitted.cap[i] = le32_to_cpu(caps.data[i].permitted); cpu_caps->inheritable.cap[i] = le32_to_cpu(caps.data[i].inheritable); } + return 0; } /* Locate any VFS capabilities: */ -static int get_file_caps(struct linux_binprm *bprm) +static int get_file_caps(struct linux_binprm *bprm, bool *effective) { struct dentry *dentry; int rc = 0; @@ -313,7 +310,10 @@ static int get_file_caps(struct linux_binprm *bprm) goto out; } - rc = bprm_caps_from_vfs_caps(&vcaps, bprm); + rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective); + if (rc == -EINVAL) + printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n", + __func__, rc, bprm->filename); out: dput(dentry); @@ -334,18 +334,27 @@ int cap_inode_killpriv(struct dentry *dentry) return 0; } -static inline int get_file_caps(struct linux_binprm *bprm) +static inline int get_file_caps(struct linux_binprm *bprm, bool *effective) { bprm_clear_caps(bprm); return 0; } #endif -int cap_bprm_set_security (struct linux_binprm *bprm) +/* + * set up the new credentials for an exec'd task + */ +int cap_bprm_set_creds(struct linux_binprm *bprm) { + const struct cred *old = current_cred(); + struct cred *new = bprm->cred; + bool effective; int ret; - ret = get_file_caps(bprm); + effective = false; + ret = get_file_caps(bprm, &effective); + if (ret < 0) + return ret; if (!issecure(SECURE_NOROOT)) { /* @@ -353,63 +362,47 @@ int cap_bprm_set_security (struct linux_binprm *bprm) * executables under compatibility mode, we override the * capability sets for the file. * - * If only the real uid is 0, we do not set the effective - * bit. + * If only the real uid is 0, we do not set the effective bit. */ - if (bprm->e_uid == 0 || current_uid() == 0) { + if (new->euid == 0 || new->uid == 0) { /* pP' = (cap_bset & ~0) | (pI & ~0) */ - bprm->cap_post_exec_permitted = cap_combine( - current->cred->cap_bset, - current->cred->cap_inheritable); - bprm->cap_effective = (bprm->e_uid == 0); - ret = 0; + new->cap_permitted = cap_combine(old->cap_bset, + old->cap_inheritable); } + if (new->euid == 0) + effective = true; } - return ret; -} - -int cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe) -{ - const struct cred *old = current_cred(); - struct cred *new; - - new = prepare_creds(); - if (!new) - return -ENOMEM; - - if (bprm->e_uid != old->uid || bprm->e_gid != old->gid || - !cap_issubset(bprm->cap_post_exec_permitted, - old->cap_permitted)) { - set_dumpable(current->mm, suid_dumpable); - current->pdeath_signal = 0; - - if (unsafe & ~LSM_UNSAFE_PTRACE_CAP) { - if (!capable(CAP_SETUID)) { - bprm->e_uid = old->uid; - bprm->e_gid = old->gid; - } - if (cap_limit_ptraced_target()) { - bprm->cap_post_exec_permitted = cap_intersect( - bprm->cap_post_exec_permitted, - new->cap_permitted); - } + /* Don't let someone trace a set[ug]id/setpcap binary with the revised + * credentials unless they have the appropriate permit + */ + if ((new->euid != old->uid || + new->egid != old->gid || + !cap_issubset(new->cap_permitted, old->cap_permitted)) && + bprm->unsafe & ~LSM_UNSAFE_PTRACE_CAP) { + /* downgrade; they get no more than they had, and maybe less */ + if (!capable(CAP_SETUID)) { + new->euid = new->uid; + new->egid = new->gid; } + if (cap_limit_ptraced_target()) + new->cap_permitted = cap_intersect(new->cap_permitted, + old->cap_permitted); } - new->suid = new->euid = new->fsuid = bprm->e_uid; - new->sgid = new->egid = new->fsgid = bprm->e_gid; + new->suid = new->fsuid = new->euid; + new->sgid = new->fsgid = new->egid; - /* For init, we want to retain the capabilities set - * in the init_task struct. Thus we skip the usual - * capability rules */ + /* For init, we want to retain the capabilities set in the initial + * task. Thus we skip the usual capability rules + */ if (!is_global_init(current)) { - new->cap_permitted = bprm->cap_post_exec_permitted; - if (bprm->cap_effective) - new->cap_effective = bprm->cap_post_exec_permitted; + if (effective) + new->cap_effective = new->cap_permitted; else cap_clear(new->cap_effective); } + bprm->cap_effective = effective; /* * Audit candidate if current->cap_effective is set @@ -425,23 +418,31 @@ int cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe) */ if (!cap_isclear(new->cap_effective)) { if (!cap_issubset(CAP_FULL_SET, new->cap_effective) || - bprm->e_uid != 0 || new->uid != 0 || - issecure(SECURE_NOROOT)) - audit_log_bprm_fcaps(bprm, new, old); + new->euid != 0 || new->uid != 0 || + issecure(SECURE_NOROOT)) { + ret = audit_log_bprm_fcaps(bprm, new, old); + if (ret < 0) + return ret; + } } new->securebits &= ~issecure_mask(SECURE_KEEP_CAPS); - return commit_creds(new); + return 0; } -int cap_bprm_secureexec (struct linux_binprm *bprm) +/* + * determine whether a secure execution is required + * - the creds have been committed at this point, and are no longer available + * through bprm + */ +int cap_bprm_secureexec(struct linux_binprm *bprm) { const struct cred *cred = current_cred(); if (cred->uid != 0) { if (bprm->cap_effective) return 1; - if (!cap_isclear(bprm->cap_post_exec_permitted)) + if (!cap_isclear(cred->cap_permitted)) return 1; } @@ -477,7 +478,7 @@ int cap_inode_removexattr(struct dentry *dentry, const char *name) } /* moved from kernel/sys.c. */ -/* +/* * cap_emulate_setxuid() fixes the effective / permitted capabilities of * a process after a call to setuid, setreuid, or setresuid. * @@ -491,10 +492,10 @@ int cap_inode_removexattr(struct dentry *dentry, const char *name) * 3) When set*uiding _from_ euid != 0 _to_ euid == 0, the effective * capabilities are set to the permitted capabilities. * - * fsuid is handled elsewhere. fsuid == 0 and {r,e,s}uid!= 0 should + * fsuid is handled elsewhere. fsuid == 0 and {r,e,s}uid!= 0 should * never happen. * - * -astor + * -astor * * cevans - New behaviour, Oct '99 * A process may, via prctl(), elect to keep its capabilities when it @@ -751,4 +752,3 @@ int cap_vm_enough_memory(struct mm_struct *mm, long pages) cap_sys_admin = 1; return __vm_enough_memory(mm, pages, cap_sys_admin); } - diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index df329f684a6..2f5d89e92b8 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -274,48 +274,6 @@ static int install_session_keyring(struct key *keyring) return commit_creds(new); } -/*****************************************************************************/ -/* - * deal with execve() - */ -int exec_keys(struct task_struct *tsk) -{ - struct thread_group_cred *tgcred = NULL; - struct cred *new; - -#ifdef CONFIG_KEYS - tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); - if (!tgcred) - return -ENOMEM; -#endif - - new = prepare_creds(); - if (new < 0) - return -ENOMEM; - - /* newly exec'd tasks don't get a thread keyring */ - key_put(new->thread_keyring); - new->thread_keyring = NULL; - - /* create a new per-thread-group creds for all this set of threads to - * share */ - memcpy(tgcred, new->tgcred, sizeof(struct thread_group_cred)); - - atomic_set(&tgcred->usage, 1); - spin_lock_init(&tgcred->lock); - - /* inherit the session keyring; new process keyring */ - key_get(tgcred->session_keyring); - tgcred->process_keyring = NULL; - - release_tgcred(new); - new->tgcred = tgcred; - - commit_creds(new); - return 0; - -} /* end exec_keys() */ - /*****************************************************************************/ /* * the filesystem user ID changed diff --git a/security/root_plug.c b/security/root_plug.c index c3f68b5b372..40fb4f15e27 100644 --- a/security/root_plug.c +++ b/security/root_plug.c @@ -55,9 +55,9 @@ static int rootplug_bprm_check_security (struct linux_binprm *bprm) struct usb_device *dev; root_dbg("file %s, e_uid = %d, e_gid = %d\n", - bprm->filename, bprm->e_uid, bprm->e_gid); + bprm->filename, bprm->cred->euid, bprm->cred->egid); - if (bprm->e_gid == 0) { + if (bprm->cred->egid == 0) { dev = usb_find_device(vendor_id, product_id); if (!dev) { root_dbg("e_gid = 0, and device not found, " @@ -75,15 +75,12 @@ static struct security_operations rootplug_security_ops = { .ptrace_may_access = cap_ptrace_may_access, .ptrace_traceme = cap_ptrace_traceme, .capget = cap_capget, - .capset_check = cap_capset_check, - .capset_set = cap_capset_set, + .capset = cap_capset, .capable = cap_capable, - .bprm_apply_creds = cap_bprm_apply_creds, - .bprm_set_security = cap_bprm_set_security, + .bprm_set_creds = cap_bprm_set_creds, - .task_post_setuid = cap_task_post_setuid, - .task_reparent_to_init = cap_task_reparent_to_init, + .task_fix_setuid = cap_task_fix_setuid, .task_prctl = cap_task_prctl, .bprm_check_security = rootplug_bprm_check_security, diff --git a/security/security.c b/security/security.c index a55d739c686..dc5babb2d6d 100644 --- a/security/security.c +++ b/security/security.c @@ -213,34 +213,24 @@ int security_vm_enough_memory_kern(long pages) return security_ops->vm_enough_memory(current->mm, pages); } -int security_bprm_alloc(struct linux_binprm *bprm) +int security_bprm_set_creds(struct linux_binprm *bprm) { - return security_ops->bprm_alloc_security(bprm); + return security_ops->bprm_set_creds(bprm); } -void security_bprm_free(struct linux_binprm *bprm) -{ - security_ops->bprm_free_security(bprm); -} - -int security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) -{ - return security_ops->bprm_apply_creds(bprm, unsafe); -} - -void security_bprm_post_apply_creds(struct linux_binprm *bprm) +int security_bprm_check(struct linux_binprm *bprm) { - security_ops->bprm_post_apply_creds(bprm); + return security_ops->bprm_check_security(bprm); } -int security_bprm_set(struct linux_binprm *bprm) +void security_bprm_committing_creds(struct linux_binprm *bprm) { - return security_ops->bprm_set_security(bprm); + return security_ops->bprm_committing_creds(bprm); } -int security_bprm_check(struct linux_binprm *bprm) +void security_bprm_committed_creds(struct linux_binprm *bprm) { - return security_ops->bprm_check_security(bprm); + return security_ops->bprm_committed_creds(bprm); } int security_bprm_secureexec(struct linux_binprm *bprm) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index c71bba78872..21a59218463 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -2029,59 +2029,45 @@ static int selinux_vm_enough_memory(struct mm_struct *mm, long pages) /* binprm security operations */ -static int selinux_bprm_alloc_security(struct linux_binprm *bprm) +static int selinux_bprm_set_creds(struct linux_binprm *bprm) { - struct bprm_security_struct *bsec; - - bsec = kzalloc(sizeof(struct bprm_security_struct), GFP_KERNEL); - if (!bsec) - return -ENOMEM; - - bsec->sid = SECINITSID_UNLABELED; - bsec->set = 0; - - bprm->security = bsec; - return 0; -} - -static int selinux_bprm_set_security(struct linux_binprm *bprm) -{ - struct task_security_struct *tsec; - struct inode *inode = bprm->file->f_path.dentry->d_inode; + const struct task_security_struct *old_tsec; + struct task_security_struct *new_tsec; struct inode_security_struct *isec; - struct bprm_security_struct *bsec; - u32 newsid; struct avc_audit_data ad; + struct inode *inode = bprm->file->f_path.dentry->d_inode; int rc; - rc = secondary_ops->bprm_set_security(bprm); + rc = secondary_ops->bprm_set_creds(bprm); if (rc) return rc; - bsec = bprm->security; - - if (bsec->set) + /* SELinux context only depends on initial program or script and not + * the script interpreter */ + if (bprm->cred_prepared) return 0; - tsec = current_security(); + old_tsec = current_security(); + new_tsec = bprm->cred->security; isec = inode->i_security; /* Default to the current task SID. */ - bsec->sid = tsec->sid; + new_tsec->sid = old_tsec->sid; + new_tsec->osid = old_tsec->sid; /* Reset fs, key, and sock SIDs on execve. */ - tsec->create_sid = 0; - tsec->keycreate_sid = 0; - tsec->sockcreate_sid = 0; + new_tsec->create_sid = 0; + new_tsec->keycreate_sid = 0; + new_tsec->sockcreate_sid = 0; - if (tsec->exec_sid) { - newsid = tsec->exec_sid; + if (old_tsec->exec_sid) { + new_tsec->sid = old_tsec->exec_sid; /* Reset exec SID on execve. */ - tsec->exec_sid = 0; + new_tsec->exec_sid = 0; } else { /* Check for a default transition on this program. */ - rc = security_transition_sid(tsec->sid, isec->sid, - SECCLASS_PROCESS, &newsid); + rc = security_transition_sid(old_tsec->sid, isec->sid, + SECCLASS_PROCESS, &new_tsec->sid); if (rc) return rc; } @@ -2090,33 +2076,63 @@ static int selinux_bprm_set_security(struct linux_binprm *bprm) ad.u.fs.path = bprm->file->f_path; if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) - newsid = tsec->sid; + new_tsec->sid = old_tsec->sid; - if (tsec->sid == newsid) { - rc = avc_has_perm(tsec->sid, isec->sid, + if (new_tsec->sid == old_tsec->sid) { + rc = avc_has_perm(old_tsec->sid, isec->sid, SECCLASS_FILE, FILE__EXECUTE_NO_TRANS, &ad); if (rc) return rc; } else { /* Check permissions for the transition. */ - rc = avc_has_perm(tsec->sid, newsid, + rc = avc_has_perm(old_tsec->sid, new_tsec->sid, SECCLASS_PROCESS, PROCESS__TRANSITION, &ad); if (rc) return rc; - rc = avc_has_perm(newsid, isec->sid, + rc = avc_has_perm(new_tsec->sid, isec->sid, SECCLASS_FILE, FILE__ENTRYPOINT, &ad); if (rc) return rc; - /* Clear any possibly unsafe personality bits on exec: */ - current->personality &= ~PER_CLEAR_ON_SETID; + /* Check for shared state */ + if (bprm->unsafe & LSM_UNSAFE_SHARE) { + rc = avc_has_perm(old_tsec->sid, new_tsec->sid, + SECCLASS_PROCESS, PROCESS__SHARE, + NULL); + if (rc) + return -EPERM; + } + + /* Make sure that anyone attempting to ptrace over a task that + * changes its SID has the appropriate permit */ + if (bprm->unsafe & + (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) { + struct task_struct *tracer; + struct task_security_struct *sec; + u32 ptsid = 0; + + rcu_read_lock(); + tracer = tracehook_tracer_task(current); + if (likely(tracer != NULL)) { + sec = __task_cred(tracer)->security; + ptsid = sec->sid; + } + rcu_read_unlock(); + + if (ptsid != 0) { + rc = avc_has_perm(ptsid, new_tsec->sid, + SECCLASS_PROCESS, + PROCESS__PTRACE, NULL); + if (rc) + return -EPERM; + } + } - /* Set the security field to the new SID. */ - bsec->sid = newsid; + /* Clear any possibly unsafe personality bits on exec: */ + bprm->per_clear |= PER_CLEAR_ON_SETID; } - bsec->set = 1; return 0; } @@ -2125,7 +2141,6 @@ static int selinux_bprm_check_security(struct linux_binprm *bprm) return secondary_ops->bprm_check_security(bprm); } - static int selinux_bprm_secureexec(struct linux_binprm *bprm) { const struct cred *cred = current_cred(); @@ -2141,19 +2156,13 @@ static int selinux_bprm_secureexec(struct linux_binprm *bprm) the noatsecure permission is granted between the two SIDs, i.e. ahp returns 0. */ atsecure = avc_has_perm(osid, sid, - SECCLASS_PROCESS, - PROCESS__NOATSECURE, NULL); + SECCLASS_PROCESS, + PROCESS__NOATSECURE, NULL); } return (atsecure || secondary_ops->bprm_secureexec(bprm)); } -static void selinux_bprm_free_security(struct linux_binprm *bprm) -{ - kfree(bprm->security); - bprm->security = NULL; -} - extern struct vfsmount *selinuxfs_mount; extern struct dentry *selinux_null; @@ -2252,108 +2261,78 @@ static inline void flush_unauthorized_files(const struct cred *cred, spin_unlock(&files->file_lock); } -static int selinux_bprm_apply_creds(struct linux_binprm *bprm, int unsafe) +/* + * Prepare a process for imminent new credential changes due to exec + */ +static void selinux_bprm_committing_creds(struct linux_binprm *bprm) { - struct task_security_struct *tsec; - struct bprm_security_struct *bsec; - struct cred *new; - u32 sid; - int rc; - - rc = secondary_ops->bprm_apply_creds(bprm, unsafe); - if (rc < 0) - return rc; - - new = prepare_creds(); - if (!new) - return -ENOMEM; + struct task_security_struct *new_tsec; + struct rlimit *rlim, *initrlim; + int rc, i; - tsec = new->security; + secondary_ops->bprm_committing_creds(bprm); - bsec = bprm->security; - sid = bsec->sid; - - tsec->osid = tsec->sid; - bsec->unsafe = 0; - if (tsec->sid != sid) { - /* Check for shared state. If not ok, leave SID - unchanged and kill. */ - if (unsafe & LSM_UNSAFE_SHARE) { - rc = avc_has_perm(tsec->sid, sid, SECCLASS_PROCESS, - PROCESS__SHARE, NULL); - if (rc) { - bsec->unsafe = 1; - goto out; - } - } + new_tsec = bprm->cred->security; + if (new_tsec->sid == new_tsec->osid) + return; - /* Check for ptracing, and update the task SID if ok. - Otherwise, leave SID unchanged and kill. */ - if (unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) { - struct task_struct *tracer; - struct task_security_struct *sec; - u32 ptsid = 0; + /* Close files for which the new task SID is not authorized. */ + flush_unauthorized_files(bprm->cred, current->files); - rcu_read_lock(); - tracer = tracehook_tracer_task(current); - if (likely(tracer != NULL)) { - sec = __task_cred(tracer)->security; - ptsid = sec->sid; - } - rcu_read_unlock(); + /* Always clear parent death signal on SID transitions. */ + current->pdeath_signal = 0; - if (ptsid != 0) { - rc = avc_has_perm(ptsid, sid, SECCLASS_PROCESS, - PROCESS__PTRACE, NULL); - if (rc) { - bsec->unsafe = 1; - goto out; - } - } + /* Check whether the new SID can inherit resource limits from the old + * SID. If not, reset all soft limits to the lower of the current + * task's hard limit and the init task's soft limit. + * + * Note that the setting of hard limits (even to lower them) can be + * controlled by the setrlimit check. The inclusion of the init task's + * soft limit into the computation is to avoid resetting soft limits + * higher than the default soft limit for cases where the default is + * lower than the hard limit, e.g. RLIMIT_CORE or RLIMIT_STACK. + */ + rc = avc_has_perm(new_tsec->osid, new_tsec->sid, SECCLASS_PROCESS, + PROCESS__RLIMITINH, NULL); + if (rc) { + for (i = 0; i < RLIM_NLIMITS; i++) { + rlim = current->signal->rlim + i; + initrlim = init_task.signal->rlim + i; + rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur); } - tsec->sid = sid; + update_rlimit_cpu(rlim->rlim_cur); } - -out: - commit_creds(new); - return 0; } /* - * called after apply_creds without the task lock held + * Clean up the process immediately after the installation of new credentials + * due to exec */ -static void selinux_bprm_post_apply_creds(struct linux_binprm *bprm) +static void selinux_bprm_committed_creds(struct linux_binprm *bprm) { - const struct cred *cred = current_cred(); - struct task_security_struct *tsec; - struct rlimit *rlim, *initrlim; + const struct task_security_struct *tsec = current_security(); struct itimerval itimer; - struct bprm_security_struct *bsec; struct sighand_struct *psig; + u32 osid, sid; int rc, i; unsigned long flags; - tsec = current_security(); - bsec = bprm->security; + secondary_ops->bprm_committed_creds(bprm); - if (bsec->unsafe) { - force_sig_specific(SIGKILL, current); - return; - } - if (tsec->osid == tsec->sid) + osid = tsec->osid; + sid = tsec->sid; + + if (sid == osid) return; - /* Close files for which the new task SID is not authorized. */ - flush_unauthorized_files(cred, current->files); - - /* Check whether the new SID can inherit signal state - from the old SID. If not, clear itimers to avoid - subsequent signal generation and flush and unblock - signals. This must occur _after_ the task SID has - been updated so that any kill done after the flush - will be checked against the new SID. */ - rc = avc_has_perm(tsec->osid, tsec->sid, SECCLASS_PROCESS, - PROCESS__SIGINH, NULL); + /* Check whether the new SID can inherit signal state from the old SID. + * If not, clear itimers to avoid subsequent signal generation and + * flush and unblock signals. + * + * This must occur _after_ the task SID has been updated so that any + * kill done after the flush will be checked against the new SID. + */ + rc = avc_has_perm(osid, sid, SECCLASS_PROCESS, PROCESS__SIGINH, NULL); if (rc) { memset(&itimer, 0, sizeof itimer); for (i = 0; i < 3; i++) @@ -2366,32 +2345,8 @@ static void selinux_bprm_post_apply_creds(struct linux_binprm *bprm) spin_unlock_irq(¤t->sighand->siglock); } - /* Always clear parent death signal on SID transitions. */ - current->pdeath_signal = 0; - - /* Check whether the new SID can inherit resource limits - from the old SID. If not, reset all soft limits to - the lower of the current task's hard limit and the init - task's soft limit. Note that the setting of hard limits - (even to lower them) can be controlled by the setrlimit - check. The inclusion of the init task's soft limit into - the computation is to avoid resetting soft limits higher - than the default soft limit for cases where the default - is lower than the hard limit, e.g. RLIMIT_CORE or - RLIMIT_STACK.*/ - rc = avc_has_perm(tsec->osid, tsec->sid, SECCLASS_PROCESS, - PROCESS__RLIMITINH, NULL); - if (rc) { - for (i = 0; i < RLIM_NLIMITS; i++) { - rlim = current->signal->rlim + i; - initrlim = init_task.signal->rlim+i; - rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur); - } - update_rlimit_cpu(rlim->rlim_cur); - } - - /* Wake up the parent if it is waiting so that it can - recheck wait permission to the new task SID. */ + /* Wake up the parent if it is waiting so that it can recheck + * wait permission to the new task SID. */ read_lock_irq(&tasklist_lock); psig = current->parent->sighand; spin_lock_irqsave(&psig->siglock, flags); @@ -5556,12 +5511,10 @@ static struct security_operations selinux_ops = { .netlink_send = selinux_netlink_send, .netlink_recv = selinux_netlink_recv, - .bprm_alloc_security = selinux_bprm_alloc_security, - .bprm_free_security = selinux_bprm_free_security, - .bprm_apply_creds = selinux_bprm_apply_creds, - .bprm_post_apply_creds = selinux_bprm_post_apply_creds, - .bprm_set_security = selinux_bprm_set_security, + .bprm_set_creds = selinux_bprm_set_creds, .bprm_check_security = selinux_bprm_check_security, + .bprm_committing_creds = selinux_bprm_committing_creds, + .bprm_committed_creds = selinux_bprm_committed_creds, .bprm_secureexec = selinux_bprm_secureexec, .sb_alloc_security = selinux_sb_alloc_security, diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index f8be8d7fa26..3cc45168f67 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -77,17 +77,6 @@ struct ipc_security_struct { u32 sid; /* SID of IPC resource */ }; -struct bprm_security_struct { - u32 sid; /* SID for transformed process */ - unsigned char set; - - /* - * unsafe is used to share failure information from bprm_apply_creds() - * to bprm_post_apply_creds(). - */ - char unsafe; -}; - struct netif_security_struct { int ifindex; /* device index */ u32 sid; /* SID for this interface */ diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index e952b397153..de396742abf 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -2596,8 +2596,7 @@ struct security_operations smack_ops = { .settime = cap_settime, .vm_enough_memory = cap_vm_enough_memory, - .bprm_apply_creds = cap_bprm_apply_creds, - .bprm_set_security = cap_bprm_set_security, + .bprm_set_creds = cap_bprm_set_creds, .bprm_secureexec = cap_bprm_secureexec, .sb_alloc_security = smack_sb_alloc_security, -- cgit v1.2.3-70-g09d2 From d76b0d9b2d87cfc95686e148767cbf7d0e22bdc0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:25 +1100 Subject: CRED: Use creds in file structs Attach creds to file structs and discard f_uid/f_gid. file_operations::open() methods (such as hppfs_open()) should use file->f_cred rather than current_cred(). At the moment file->f_cred will be current_cred() at this point. Signed-off-by: David Howells Reviewed-by: James Morris Signed-off-by: James Morris --- arch/mips/kernel/vpe.c | 4 ++-- drivers/isdn/hysdn/hysdn_procconf.c | 6 ++++-- fs/coda/file.c | 2 +- fs/file_table.c | 7 ++++--- fs/hppfs/hppfs.c | 4 ++-- include/linux/fs.h | 2 +- net/ipv4/netfilter/ipt_LOG.c | 4 ++-- net/ipv6/netfilter/ip6t_LOG.c | 4 ++-- net/netfilter/nfnetlink_log.c | 5 +++-- net/netfilter/xt_owner.c | 16 ++++++++-------- net/sched/cls_flow.c | 4 ++-- 11 files changed, 31 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 972b2d2b840..09786e49637 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -1085,8 +1085,8 @@ static int vpe_open(struct inode *inode, struct file *filp) v->load_addr = NULL; v->len = 0; - v->uid = filp->f_uid; - v->gid = filp->f_gid; + v->uid = filp->f_cred->fsuid; + v->gid = filp->f_cred->fsgid; #ifdef CONFIG_MIPS_APSP_KSPD /* get kspd to tell us when a syscall_exit happens */ diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c index 484299b031f..8f9f4912de3 100644 --- a/drivers/isdn/hysdn/hysdn_procconf.c +++ b/drivers/isdn/hysdn/hysdn_procconf.c @@ -246,7 +246,8 @@ hysdn_conf_open(struct inode *ino, struct file *filep) } if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL)) hysdn_addlog(card, "config open for uid=%d gid=%d mode=0x%x", - filep->f_uid, filep->f_gid, filep->f_mode); + filep->f_cred->fsuid, filep->f_cred->fsgid, + filep->f_mode); if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) { /* write only access -> write boot file or conf line */ @@ -331,7 +332,8 @@ hysdn_conf_close(struct inode *ino, struct file *filep) } if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL)) hysdn_addlog(card, "config close for uid=%d gid=%d mode=0x%x", - filep->f_uid, filep->f_gid, filep->f_mode); + filep->f_cred->fsuid, filep->f_cred->fsgid, + filep->f_mode); if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) { /* write only access -> write boot file or conf line */ diff --git a/fs/coda/file.c b/fs/coda/file.c index 29137ff3ca6..5a876998549 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -174,7 +174,7 @@ int coda_release(struct inode *coda_inode, struct file *coda_file) BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); err = venus_close(coda_inode->i_sb, coda_i2f(coda_inode), - coda_flags, coda_file->f_uid); + coda_flags, coda_file->f_cred->fsuid); host_inode = cfi->cfi_container->f_path.dentry->d_inode; cii = ITOC(coda_inode); diff --git a/fs/file_table.c b/fs/file_table.c index bc4563fe791..0fbcacc3ea7 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -36,7 +36,9 @@ static struct percpu_counter nr_files __cacheline_aligned_in_smp; static inline void file_free_rcu(struct rcu_head *head) { - struct file *f = container_of(head, struct file, f_u.fu_rcuhead); + struct file *f = container_of(head, struct file, f_u.fu_rcuhead); + + put_cred(f->f_cred); kmem_cache_free(filp_cachep, f); } @@ -121,8 +123,7 @@ struct file *get_empty_filp(void) INIT_LIST_HEAD(&f->f_u.fu_list); atomic_long_set(&f->f_count, 1); rwlock_init(&f->f_owner.lock); - f->f_uid = cred->fsuid; - f->f_gid = cred->fsgid; + f->f_cred = get_cred(cred); eventpoll_init_file(f); /* f->f_version: 0 */ return f; diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c index 795e2c130ad..b278f7f5202 100644 --- a/fs/hppfs/hppfs.c +++ b/fs/hppfs/hppfs.c @@ -426,7 +426,7 @@ static int file_mode(int fmode) static int hppfs_open(struct inode *inode, struct file *file) { - const struct cred *cred = current_cred(); + const struct cred *cred = file->f_cred; struct hppfs_private *data; struct vfsmount *proc_mnt; struct dentry *proc_dentry; @@ -490,7 +490,7 @@ static int hppfs_open(struct inode *inode, struct file *file) static int hppfs_dir_open(struct inode *inode, struct file *file) { - const struct cred *cred = current_cred(); + const struct cred *cred = file->f_cred; struct hppfs_private *data; struct vfsmount *proc_mnt; struct dentry *proc_dentry; diff --git a/include/linux/fs.h b/include/linux/fs.h index 3bfec1327b8..c0fb6d81d89 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -827,7 +827,7 @@ struct file { fmode_t f_mode; loff_t f_pos; struct fown_struct f_owner; - unsigned int f_uid, f_gid; + const struct cred *f_cred; struct file_ra_state f_ra; u64 f_version; diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c index fc6ce04a3e3..7b5dbe118c0 100644 --- a/net/ipv4/netfilter/ipt_LOG.c +++ b/net/ipv4/netfilter/ipt_LOG.c @@ -340,8 +340,8 @@ static void dump_packet(const struct nf_loginfo *info, read_lock_bh(&skb->sk->sk_callback_lock); if (skb->sk->sk_socket && skb->sk->sk_socket->file) printk("UID=%u GID=%u ", - skb->sk->sk_socket->file->f_uid, - skb->sk->sk_socket->file->f_gid); + skb->sk->sk_socket->file->f_cred->fsuid, + skb->sk->sk_socket->file->f_cred->fsgid); read_unlock_bh(&skb->sk->sk_callback_lock); } diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index caa441d0956..871d157cec4 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c @@ -364,8 +364,8 @@ static void dump_packet(const struct nf_loginfo *info, read_lock_bh(&skb->sk->sk_callback_lock); if (skb->sk->sk_socket && skb->sk->sk_socket->file) printk("UID=%u GID=%u ", - skb->sk->sk_socket->file->f_uid, - skb->sk->sk_socket->file->f_gid); + skb->sk->sk_socket->file->f_cred->fsuid, + skb->sk->sk_socket->file->f_cred->fsgid); read_unlock_bh(&skb->sk->sk_callback_lock); } diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 41e0105d382..38f9efd90e8 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -474,8 +474,9 @@ __build_packet_message(struct nfulnl_instance *inst, if (skb->sk) { read_lock_bh(&skb->sk->sk_callback_lock); if (skb->sk->sk_socket && skb->sk->sk_socket->file) { - __be32 uid = htonl(skb->sk->sk_socket->file->f_uid); - __be32 gid = htonl(skb->sk->sk_socket->file->f_gid); + struct file *file = skb->sk->sk_socket->file; + __be32 uid = htonl(file->f_cred->fsuid); + __be32 gid = htonl(file->f_cred->fsgid); /* need to unlock here since NLA_PUT may goto */ read_unlock_bh(&skb->sk->sk_callback_lock); NLA_PUT_BE32(inst->skb, NFULA_UID, uid); diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c index f19ebd9b78f..22b2a5e881e 100644 --- a/net/netfilter/xt_owner.c +++ b/net/netfilter/xt_owner.c @@ -34,12 +34,12 @@ owner_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par) return false; if (info->match & IPT_OWNER_UID) - if ((filp->f_uid != info->uid) ^ + if ((filp->f_cred->fsuid != info->uid) ^ !!(info->invert & IPT_OWNER_UID)) return false; if (info->match & IPT_OWNER_GID) - if ((filp->f_gid != info->gid) ^ + if ((filp->f_cred->fsgid != info->gid) ^ !!(info->invert & IPT_OWNER_GID)) return false; @@ -60,12 +60,12 @@ owner_mt6_v0(const struct sk_buff *skb, const struct xt_match_param *par) return false; if (info->match & IP6T_OWNER_UID) - if ((filp->f_uid != info->uid) ^ + if ((filp->f_cred->fsuid != info->uid) ^ !!(info->invert & IP6T_OWNER_UID)) return false; if (info->match & IP6T_OWNER_GID) - if ((filp->f_gid != info->gid) ^ + if ((filp->f_cred->fsgid != info->gid) ^ !!(info->invert & IP6T_OWNER_GID)) return false; @@ -93,14 +93,14 @@ owner_mt(const struct sk_buff *skb, const struct xt_match_param *par) (XT_OWNER_UID | XT_OWNER_GID)) == 0; if (info->match & XT_OWNER_UID) - if ((filp->f_uid >= info->uid_min && - filp->f_uid <= info->uid_max) ^ + if ((filp->f_cred->fsuid >= info->uid_min && + filp->f_cred->fsuid <= info->uid_max) ^ !(info->invert & XT_OWNER_UID)) return false; if (info->match & XT_OWNER_GID) - if ((filp->f_gid >= info->gid_min && - filp->f_gid <= info->gid_max) ^ + if ((filp->f_cred->fsgid >= info->gid_min && + filp->f_cred->fsgid <= info->gid_max) ^ !(info->invert & XT_OWNER_GID)) return false; diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 0ebaff637e3..0ef4e3065bc 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -260,14 +260,14 @@ static u32 flow_get_rtclassid(const struct sk_buff *skb) static u32 flow_get_skuid(const struct sk_buff *skb) { if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) - return skb->sk->sk_socket->file->f_uid; + return skb->sk->sk_socket->file->f_cred->fsuid; return 0; } static u32 flow_get_skgid(const struct sk_buff *skb) { if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) - return skb->sk->sk_socket->file->f_gid; + return skb->sk->sk_socket->file->f_cred->fsgid; return 0; } -- cgit v1.2.3-70-g09d2 From 3b11a1decef07c19443d24ae926982bc8ec9f4c0 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Nov 2008 10:39:26 +1100 Subject: CRED: Differentiate objective and effective subjective credentials on a task Differentiate the objective and real subjective credentials from the effective subjective credentials on a task by introducing a second credentials pointer into the task_struct. task_struct::real_cred then refers to the objective and apparent real subjective credentials of a task, as perceived by the other tasks in the system. task_struct::cred then refers to the effective subjective credentials of a task, as used by that task when it's actually running. These are not visible to the other tasks in the system. __task_cred(task) then refers to the objective/real credentials of the task in question. current_cred() refers to the effective subjective credentials of the current task. prepare_creds() uses the objective creds as a base and commit_creds() changes both pointers in the task_struct (indeed commit_creds() requires them to be the same). override_creds() and revert_creds() change the subjective creds pointer only, and the former returns the old subjective creds. These are used by NFSD, faccessat() and do_coredump(), and will by used by CacheFiles. In SELinux, current_has_perm() is provided as an alternative to task_has_perm(). This uses the effective subjective context of current, whereas task_has_perm() uses the objective/real context of the subject. Signed-off-by: David Howells Signed-off-by: James Morris --- fs/nfsd/auth.c | 5 +++- include/linux/cred.h | 29 +++++++++++---------- include/linux/init_task.h | 1 + include/linux/sched.h | 5 +++- kernel/cred.c | 38 ++++++++++++++++++--------- kernel/fork.c | 6 +++-- security/selinux/hooks.c | 65 ++++++++++++++++++++++++++++++----------------- 7 files changed, 95 insertions(+), 54 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c index 836ffa1047d..0184fe9b514 100644 --- a/fs/nfsd/auth.c +++ b/fs/nfsd/auth.c @@ -34,6 +34,8 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp) int flags = nfsexp_flags(rqstp, exp); int ret; + /* discard any old override before preparing the new set */ + revert_creds(get_cred(current->real_cred)); new = prepare_creds(); if (!new) return -ENOMEM; @@ -82,7 +84,8 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp) else new->cap_effective = cap_raise_nfsd_set(new->cap_effective, new->cap_permitted); - return commit_creds(new); + put_cred(override_creds(new)); + return 0; oom: ret = -ENOMEM; diff --git a/include/linux/cred.h b/include/linux/cred.h index 794aab5c66e..55a9c995d69 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -146,8 +146,8 @@ extern struct cred *prepare_exec_creds(void); extern struct cred *prepare_usermodehelper_creds(void); extern int commit_creds(struct cred *); extern void abort_creds(struct cred *); -extern const struct cred *override_creds(const struct cred *) __deprecated; -extern void revert_creds(const struct cred *) __deprecated; +extern const struct cred *override_creds(const struct cred *); +extern void revert_creds(const struct cred *); extern void __init cred_init(void); /** @@ -202,32 +202,32 @@ static inline void put_cred(const struct cred *_cred) } /** - * current_cred - Access the current task's credentials + * current_cred - Access the current task's subjective credentials * - * Access the credentials of the current task. + * Access the subjective credentials of the current task. */ #define current_cred() \ (current->cred) /** - * __task_cred - Access another task's credentials + * __task_cred - Access a task's objective credentials * @task: The task to query * - * Access the credentials of another task. The caller must hold the - * RCU readlock. + * Access the objective credentials of a task. The caller must hold the RCU + * readlock. * * The caller must make sure task doesn't go away, either by holding a ref on * task or by holding tasklist_lock to prevent it from being unlinked. */ #define __task_cred(task) \ - ((const struct cred *)(rcu_dereference((task)->cred))) + ((const struct cred *)(rcu_dereference((task)->real_cred))) /** - * get_task_cred - Get another task's credentials + * get_task_cred - Get another task's objective credentials * @task: The task to query * - * Get the credentials of a task, pinning them so that they can't go away. - * Accessing a task's credentials directly is not permitted. + * Get the objective credentials of a task, pinning them so that they can't go + * away. Accessing a task's credentials directly is not permitted. * * The caller must make sure task doesn't go away, either by holding a ref on * task or by holding tasklist_lock to prevent it from being unlinked. @@ -243,10 +243,11 @@ static inline void put_cred(const struct cred *_cred) }) /** - * get_current_cred - Get the current task's credentials + * get_current_cred - Get the current task's subjective credentials * - * Get the credentials of the current task, pinning them so that they can't go - * away. Accessing the current task's credentials directly is not permitted. + * Get the subjective credentials of the current task, pinning them so that + * they can't go away. Accessing the current task's credentials directly is + * not permitted. */ #define get_current_cred() \ (get_cred(current_cred())) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 08c3b24ad9a..2597858035c 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -149,6 +149,7 @@ extern struct cred init_cred; .children = LIST_HEAD_INIT(tsk.children), \ .sibling = LIST_HEAD_INIT(tsk.sibling), \ .group_leader = &tsk, \ + .real_cred = &init_cred, \ .cred = &init_cred, \ .cred_exec_mutex = \ __MUTEX_INITIALIZER(tsk.cred_exec_mutex), \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 121d655e460..3443123b070 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1145,7 +1145,10 @@ struct task_struct { struct list_head cpu_timers[3]; /* process credentials */ - const struct cred *cred; /* actual/objective task credentials (COW) */ + const struct cred *real_cred; /* objective and real subjective task + * credentials (COW) */ + const struct cred *cred; /* effective (overridable) subjective task + * credentials (COW) */ struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */ char comm[TASK_COMM_LEN]; /* executable name excluding path diff --git a/kernel/cred.c b/kernel/cred.c index b8bd2f99d8c..f3ca1066061 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -35,7 +35,7 @@ static struct thread_group_cred init_tgcred = { * The initial credentials for the initial task */ struct cred init_cred = { - .usage = ATOMIC_INIT(3), + .usage = ATOMIC_INIT(4), .securebits = SECUREBITS_DEFAULT, .cap_inheritable = CAP_INIT_INH_SET, .cap_permitted = CAP_FULL_SET, @@ -120,6 +120,8 @@ EXPORT_SYMBOL(__put_cred); * prepare a new copy, which the caller then modifies and then commits by * calling commit_creds(). * + * Preparation involves making a copy of the objective creds for modification. + * * Returns a pointer to the new creds-to-be if successful, NULL otherwise. * * Call commit_creds() or abort_creds() to clean up. @@ -130,7 +132,7 @@ struct cred *prepare_creds(void) const struct cred *old; struct cred *new; - BUG_ON(atomic_read(&task->cred->usage) < 1); + BUG_ON(atomic_read(&task->real_cred->usage) < 1); new = kmem_cache_alloc(cred_jar, GFP_KERNEL); if (!new) @@ -262,6 +264,9 @@ error: * * We share if we can, but under some circumstances we have to generate a new * set. + * + * The new process gets the current process's subjective credentials as its + * objective and subjective credentials */ int copy_creds(struct task_struct *p, unsigned long clone_flags) { @@ -278,6 +283,7 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) #endif clone_flags & CLONE_THREAD ) { + p->real_cred = get_cred(p->cred); get_cred(p->cred); atomic_inc(&p->cred->user->processes); return 0; @@ -317,7 +323,7 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) #endif atomic_inc(&new->user->processes); - p->cred = new; + p->cred = p->real_cred = get_cred(new); return 0; } @@ -326,7 +332,9 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) * @new: The credentials to be assigned * * Install a new set of credentials to the current task, using RCU to replace - * the old set. + * the old set. Both the objective and the subjective credentials pointers are + * updated. This function may not be called if the subjective credentials are + * in an overridden state. * * This function eats the caller's reference to the new credentials. * @@ -338,12 +346,15 @@ int commit_creds(struct cred *new) struct task_struct *task = current; const struct cred *old; + BUG_ON(task->cred != task->real_cred); + BUG_ON(atomic_read(&task->real_cred->usage) < 2); BUG_ON(atomic_read(&new->usage) < 1); - BUG_ON(atomic_read(&task->cred->usage) < 1); - old = task->cred; + old = task->real_cred; security_commit_creds(new, old); + get_cred(new); /* we will require a ref for the subj creds too */ + /* dumpability changes */ if (old->euid != new->euid || old->egid != new->egid || @@ -369,6 +380,7 @@ int commit_creds(struct cred *new) */ if (new->user != old->user) atomic_inc(&new->user->processes); + rcu_assign_pointer(task->real_cred, new); rcu_assign_pointer(task->cred, new); if (new->user != old->user) atomic_dec(&old->user->processes); @@ -388,6 +400,8 @@ int commit_creds(struct cred *new) new->fsgid != old->fsgid) proc_id_connector(task, PROC_EVENT_GID); + /* release the old obj and subj refs both */ + put_cred(old); put_cred(old); return 0; } @@ -408,11 +422,11 @@ void abort_creds(struct cred *new) EXPORT_SYMBOL(abort_creds); /** - * override_creds - Temporarily override the current process's credentials + * override_creds - Override the current process's subjective credentials * @new: The credentials to be assigned * - * Install a set of temporary override credentials on the current process, - * returning the old set for later reversion. + * Install a set of temporary override subjective credentials on the current + * process, returning the old set for later reversion. */ const struct cred *override_creds(const struct cred *new) { @@ -424,11 +438,11 @@ const struct cred *override_creds(const struct cred *new) EXPORT_SYMBOL(override_creds); /** - * revert_creds - Revert a temporary credentials override + * revert_creds - Revert a temporary subjective credentials override * @old: The credentials to be restored * - * Revert a temporary set of override credentials to an old set, discarding the - * override set. + * Revert a temporary set of override subjective credentials to an old set, + * discarding the override set. */ void revert_creds(const struct cred *old) { diff --git a/kernel/fork.c b/kernel/fork.c index 82a7948a664..af0d0f04585 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -146,6 +146,7 @@ void __put_task_struct(struct task_struct *tsk) WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); + put_cred(tsk->real_cred); put_cred(tsk->cred); delayacct_tsk_free(tsk); @@ -961,10 +962,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif retval = -EAGAIN; - if (atomic_read(&p->cred->user->processes) >= + if (atomic_read(&p->real_cred->user->processes) >= p->signal->rlim[RLIMIT_NPROC].rlim_cur) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && - p->cred->user != current->nsproxy->user_ns->root_user) + p->real_cred->user != current->nsproxy->user_ns->root_user) goto bad_fork_free; } @@ -1278,6 +1279,7 @@ bad_fork_cleanup_put_domain: module_put(task_thread_info(p)->exec_domain->module); bad_fork_cleanup_count: atomic_dec(&p->cred->user->processes); + put_cred(p->real_cred); put_cred(p->cred); bad_fork_free: free_task(p); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 21a59218463..91b06f2aa96 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -161,7 +161,7 @@ static int selinux_secmark_enabled(void) */ static void cred_init_security(void) { - struct cred *cred = (struct cred *) current->cred; + struct cred *cred = (struct cred *) current->real_cred; struct task_security_struct *tsec; tsec = kzalloc(sizeof(struct task_security_struct), GFP_KERNEL); @@ -184,7 +184,7 @@ static inline u32 cred_sid(const struct cred *cred) } /* - * get the security ID of a task + * get the objective security ID of a task */ static inline u32 task_sid(const struct task_struct *task) { @@ -197,7 +197,7 @@ static inline u32 task_sid(const struct task_struct *task) } /* - * get the security ID of the current task + * get the subjective security ID of the current task */ static inline u32 current_sid(void) { @@ -1395,6 +1395,7 @@ static int cred_has_perm(const struct cred *actor, * Check permission between a pair of tasks, e.g. signal checks, * fork check, ptrace check, etc. * tsk1 is the actor and tsk2 is the target + * - this uses the default subjective creds of tsk1 */ static int task_has_perm(const struct task_struct *tsk1, const struct task_struct *tsk2, @@ -1410,6 +1411,22 @@ static int task_has_perm(const struct task_struct *tsk1, return avc_has_perm(sid1, sid2, SECCLASS_PROCESS, perms, NULL); } +/* + * Check permission between current and another task, e.g. signal checks, + * fork check, ptrace check, etc. + * current is the actor and tsk2 is the target + * - this uses current's subjective creds + */ +static int current_has_perm(const struct task_struct *tsk, + u32 perms) +{ + u32 sid, tsid; + + sid = current_sid(); + tsid = task_sid(tsk); + return avc_has_perm(sid, tsid, SECCLASS_PROCESS, perms, NULL); +} + #if CAP_LAST_CAP > 63 #error Fix SELinux to handle capabilities > 63. #endif @@ -1807,7 +1824,7 @@ static int selinux_ptrace_may_access(struct task_struct *child, return avc_has_perm(sid, csid, SECCLASS_FILE, FILE__READ, NULL); } - return task_has_perm(current, child, PROCESS__PTRACE); + return current_has_perm(child, PROCESS__PTRACE); } static int selinux_ptrace_traceme(struct task_struct *parent) @@ -1826,7 +1843,7 @@ static int selinux_capget(struct task_struct *target, kernel_cap_t *effective, { int error; - error = task_has_perm(current, target, PROCESS__GETCAP); + error = current_has_perm(target, PROCESS__GETCAP); if (error) return error; @@ -3071,7 +3088,7 @@ static int selinux_file_mprotect(struct vm_area_struct *vma, } else if (!vma->vm_file && vma->vm_start <= vma->vm_mm->start_stack && vma->vm_end >= vma->vm_mm->start_stack) { - rc = task_has_perm(current, current, PROCESS__EXECSTACK); + rc = current_has_perm(current, PROCESS__EXECSTACK); } else if (vma->vm_file && vma->anon_vma) { /* * We are making executable a file mapping that has @@ -3220,7 +3237,7 @@ static int selinux_task_create(unsigned long clone_flags) if (rc) return rc; - return task_has_perm(current, current, PROCESS__FORK); + return current_has_perm(current, PROCESS__FORK); } /* @@ -3285,17 +3302,17 @@ static int selinux_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags) static int selinux_task_setpgid(struct task_struct *p, pid_t pgid) { - return task_has_perm(current, p, PROCESS__SETPGID); + return current_has_perm(p, PROCESS__SETPGID); } static int selinux_task_getpgid(struct task_struct *p) { - return task_has_perm(current, p, PROCESS__GETPGID); + return current_has_perm(p, PROCESS__GETPGID); } static int selinux_task_getsid(struct task_struct *p) { - return task_has_perm(current, p, PROCESS__GETSESSION); + return current_has_perm(p, PROCESS__GETSESSION); } static void selinux_task_getsecid(struct task_struct *p, u32 *secid) @@ -3317,7 +3334,7 @@ static int selinux_task_setnice(struct task_struct *p, int nice) if (rc) return rc; - return task_has_perm(current, p, PROCESS__SETSCHED); + return current_has_perm(p, PROCESS__SETSCHED); } static int selinux_task_setioprio(struct task_struct *p, int ioprio) @@ -3328,12 +3345,12 @@ static int selinux_task_setioprio(struct task_struct *p, int ioprio) if (rc) return rc; - return task_has_perm(current, p, PROCESS__SETSCHED); + return current_has_perm(p, PROCESS__SETSCHED); } static int selinux_task_getioprio(struct task_struct *p) { - return task_has_perm(current, p, PROCESS__GETSCHED); + return current_has_perm(p, PROCESS__GETSCHED); } static int selinux_task_setrlimit(unsigned int resource, struct rlimit *new_rlim) @@ -3350,7 +3367,7 @@ static int selinux_task_setrlimit(unsigned int resource, struct rlimit *new_rlim later be used as a safe reset point for the soft limit upon context transitions. See selinux_bprm_committing_creds. */ if (old_rlim->rlim_max != new_rlim->rlim_max) - return task_has_perm(current, current, PROCESS__SETRLIMIT); + return current_has_perm(current, PROCESS__SETRLIMIT); return 0; } @@ -3363,17 +3380,17 @@ static int selinux_task_setscheduler(struct task_struct *p, int policy, struct s if (rc) return rc; - return task_has_perm(current, p, PROCESS__SETSCHED); + return current_has_perm(p, PROCESS__SETSCHED); } static int selinux_task_getscheduler(struct task_struct *p) { - return task_has_perm(current, p, PROCESS__GETSCHED); + return current_has_perm(p, PROCESS__GETSCHED); } static int selinux_task_movememory(struct task_struct *p) { - return task_has_perm(current, p, PROCESS__SETSCHED); + return current_has_perm(p, PROCESS__SETSCHED); } static int selinux_task_kill(struct task_struct *p, struct siginfo *info, @@ -3394,7 +3411,7 @@ static int selinux_task_kill(struct task_struct *p, struct siginfo *info, rc = avc_has_perm(secid, task_sid(p), SECCLASS_PROCESS, perm, NULL); else - rc = task_has_perm(current, p, perm); + rc = current_has_perm(p, perm); return rc; } @@ -5250,7 +5267,7 @@ static int selinux_getprocattr(struct task_struct *p, unsigned len; if (current != p) { - error = task_has_perm(current, p, PROCESS__GETATTR); + error = current_has_perm(p, PROCESS__GETATTR); if (error) return error; } @@ -5309,15 +5326,15 @@ static int selinux_setprocattr(struct task_struct *p, * above restriction is ever removed. */ if (!strcmp(name, "exec")) - error = task_has_perm(current, p, PROCESS__SETEXEC); + error = current_has_perm(p, PROCESS__SETEXEC); else if (!strcmp(name, "fscreate")) - error = task_has_perm(current, p, PROCESS__SETFSCREATE); + error = current_has_perm(p, PROCESS__SETFSCREATE); else if (!strcmp(name, "keycreate")) - error = task_has_perm(current, p, PROCESS__SETKEYCREATE); + error = current_has_perm(p, PROCESS__SETKEYCREATE); else if (!strcmp(name, "sockcreate")) - error = task_has_perm(current, p, PROCESS__SETSOCKCREATE); + error = current_has_perm(p, PROCESS__SETSOCKCREATE); else if (!strcmp(name, "current")) - error = task_has_perm(current, p, PROCESS__SETCURRENT); + error = current_has_perm(p, PROCESS__SETCURRENT); else error = -EINVAL; if (error) -- cgit v1.2.3-70-g09d2 From cc09c0dc57de7f7d2ed89d480b5653e5f6a32f2c Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 17 Nov 2008 17:37:10 +1100 Subject: [XFS] Fix double free of log tickets When an I/O error occurs during an intermediate commit on a rolling transaction, xfs_trans_commit() will free the transaction structure and the related ticket. However, the duplicate transaction that gets used as the transaction continues still contains a pointer to the ticket. Hence when the duplicate transaction is cancelled and freed, we free the ticket a second time. Add reference counting to the ticket so that we hold an extra reference to the ticket over the transaction commit. We drop the extra reference once we have checked that the transaction commit did not return an error, thus avoiding a double free on commit error. Credit to Nick Piggin for tripping over the problem. SGI-PV: 989741 Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_bmap.c | 10 ++++++++-- fs/xfs/xfs_inode.c | 10 ++++++++-- fs/xfs/xfs_log.c | 39 +++++++++++++++++++++++++-------------- fs/xfs/xfs_log.h | 4 ++++ fs/xfs/xfs_log_priv.h | 1 + fs/xfs/xfs_trans.c | 9 ++++++++- fs/xfs/xfs_utils.c | 6 ++++++ fs/xfs/xfs_vnodeops.c | 6 ++++++ 8 files changed, 66 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index db289050692..c3912213645 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -4292,9 +4292,15 @@ xfs_bmap_finish( * We have a new transaction, so we should return committed=1, * even though we're returning an error. */ - if (error) { + if (error) return error; - } + + /* + * transaction commit worked ok so we can drop the extra ticket + * reference that we gained in xfs_trans_dup() + */ + xfs_log_ticket_put(ntp->t_ticket); + if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES, logcount))) return error; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index cd522827f99..b9771004706 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1782,8 +1782,14 @@ xfs_itruncate_finish( xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); xfs_trans_ihold(ntp, ip); - if (!error) - error = xfs_trans_reserve(ntp, 0, + if (error) + return error; + /* + * transaction commit worked ok so we can drop the extra ticket + * reference that we gained in xfs_trans_dup() + */ + xfs_log_ticket_put(ntp->t_ticket); + error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 92c20a8d9e6..4bf44aef644 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -100,12 +100,11 @@ STATIC void xlog_ungrant_log_space(xlog_t *log, /* local ticket functions */ -STATIC xlog_ticket_t *xlog_ticket_get(xlog_t *log, +STATIC xlog_ticket_t *xlog_ticket_alloc(xlog_t *log, int unit_bytes, int count, char clientid, uint flags); -STATIC void xlog_ticket_put(xlog_t *log, xlog_ticket_t *ticket); #if defined(DEBUG) STATIC void xlog_verify_dest_ptr(xlog_t *log, __psint_t ptr); @@ -360,7 +359,7 @@ xfs_log_done(xfs_mount_t *mp, */ xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)"); xlog_ungrant_log_space(log, ticket); - xlog_ticket_put(log, ticket); + xfs_log_ticket_put(ticket); } else { xlog_trace_loggrant(log, ticket, "xfs_log_done: (permanent)"); xlog_regrant_reserve_log_space(log, ticket); @@ -514,7 +513,7 @@ xfs_log_reserve(xfs_mount_t *mp, retval = xlog_regrant_write_log_space(log, internal_ticket); } else { /* may sleep if need to allocate more tickets */ - internal_ticket = xlog_ticket_get(log, unit_bytes, cnt, + internal_ticket = xlog_ticket_alloc(log, unit_bytes, cnt, client, flags); if (!internal_ticket) return XFS_ERROR(ENOMEM); @@ -749,7 +748,7 @@ xfs_log_unmount_write(xfs_mount_t *mp) if (tic) { xlog_trace_loggrant(log, tic, "unmount rec"); xlog_ungrant_log_space(log, tic); - xlog_ticket_put(log, tic); + xfs_log_ticket_put(tic); } } else { /* @@ -3222,22 +3221,33 @@ xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) */ /* - * Free a used ticket. + * Free a used ticket when it's refcount falls to zero. */ -STATIC void -xlog_ticket_put(xlog_t *log, - xlog_ticket_t *ticket) +void +xfs_log_ticket_put( + xlog_ticket_t *ticket) { - sv_destroy(&ticket->t_wait); - kmem_zone_free(xfs_log_ticket_zone, ticket); -} /* xlog_ticket_put */ + ASSERT(atomic_read(&ticket->t_ref) > 0); + if (atomic_dec_and_test(&ticket->t_ref)) { + sv_destroy(&ticket->t_wait); + kmem_zone_free(xfs_log_ticket_zone, ticket); + } +} +xlog_ticket_t * +xfs_log_ticket_get( + xlog_ticket_t *ticket) +{ + ASSERT(atomic_read(&ticket->t_ref) > 0); + atomic_inc(&ticket->t_ref); + return ticket; +} /* * Allocate and initialise a new log ticket. */ STATIC xlog_ticket_t * -xlog_ticket_get(xlog_t *log, +xlog_ticket_alloc(xlog_t *log, int unit_bytes, int cnt, char client, @@ -3308,6 +3318,7 @@ xlog_ticket_get(xlog_t *log, unit_bytes += 2*BBSIZE; } + atomic_set(&tic->t_ref, 1); tic->t_unit_res = unit_bytes; tic->t_curr_res = unit_bytes; tic->t_cnt = cnt; @@ -3323,7 +3334,7 @@ xlog_ticket_get(xlog_t *log, xlog_tic_reset_res(tic); return tic; -} /* xlog_ticket_get */ +} /****************************************************************************** diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index d47b91f1082..8a3e84e900a 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h @@ -134,6 +134,7 @@ typedef struct xfs_log_callback { #ifdef __KERNEL__ /* Log manager interfaces */ struct xfs_mount; +struct xlog_ticket; xfs_lsn_t xfs_log_done(struct xfs_mount *mp, xfs_log_ticket_t ticket, void **iclog, @@ -177,6 +178,9 @@ int xfs_log_need_covered(struct xfs_mount *mp); void xlog_iodone(struct xfs_buf *); +struct xlog_ticket * xfs_log_ticket_get(struct xlog_ticket *ticket); +void xfs_log_ticket_put(struct xlog_ticket *ticket); + #endif diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index de7ef6ca920..b39a1980e82 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -245,6 +245,7 @@ typedef struct xlog_ticket { struct xlog_ticket *t_next; /* :4|8 */ struct xlog_ticket *t_prev; /* :4|8 */ xlog_tid_t t_tid; /* transaction identifier : 4 */ + atomic_t t_ref; /* ticket reference count : 4 */ int t_curr_res; /* current reservation in bytes : 4 */ int t_unit_res; /* unit reservation in bytes : 4 */ char t_ocnt; /* original count : 1 */ diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index ad137efc870..8570b826fed 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -290,7 +290,7 @@ xfs_trans_dup( ASSERT(tp->t_ticket != NULL); ntp->t_flags = XFS_TRANS_PERM_LOG_RES | (tp->t_flags & XFS_TRANS_RESERVE); - ntp->t_ticket = tp->t_ticket; + ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket); ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; tp->t_blk_res = tp->t_blk_res_used; ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; @@ -1259,6 +1259,13 @@ xfs_trans_roll( trans = *tpp; + /* + * transaction commit worked ok so we can drop the extra ticket + * reference that we gained in xfs_trans_dup() + */ + xfs_log_ticket_put(trans->t_ticket); + + /* * Reserve space in the log for th next transaction. * This also pushes items in the "AIL", the list of logged items, diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c index 35d4d414bcc..771144932ab 100644 --- a/fs/xfs/xfs_utils.c +++ b/fs/xfs/xfs_utils.c @@ -172,6 +172,12 @@ xfs_dir_ialloc( *ipp = NULL; return code; } + + /* + * transaction commit worked ok so we can drop the extra ticket + * reference that we gained in xfs_trans_dup() + */ + xfs_log_ticket_put(tp->t_ticket); code = xfs_trans_reserve(tp, 0, log_res, 0, XFS_TRANS_PERM_LOG_RES, log_count); /* diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index c45ea278ef4..0574aadc4d3 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -1018,6 +1018,12 @@ xfs_inactive_symlink_rmt( ASSERT(XFS_FORCED_SHUTDOWN(mp)); goto error0; } + /* + * transaction commit worked ok so we can drop the extra ticket + * reference that we gained in xfs_trans_dup() + */ + xfs_log_ticket_put(tp->t_ticket); + /* * Remove the memory for extent descriptions (just bookkeeping). */ -- cgit v1.2.3-70-g09d2 From 7596b27dbd8de7bcfa7a80b2756114b49bd5c018 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 19 Nov 2008 11:30:27 -0800 Subject: coda: fix creds reference Needs a header file for credentials struct: linux-next-20081023/fs/coda/file.c:177: error: dereferencing pointer to incomplete type Signed-off-by: Randy Dunlap Cc: Jan Harkes Cc: David Howells Cc: James Morris Signed-off-by: Andrew Morton Signed-off-by: James Morris --- fs/coda/file.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/coda/file.c b/fs/coda/file.c index 5a876998549..466303db2df 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3-70-g09d2 From 9789cfe22e5d7bc10cad841a4ea96ecedb34b267 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 19 Nov 2008 11:46:46 -0800 Subject: nfsctl: add headers for credentials Needs headers help for current_cred: Adding only cred.h wasn't enough. linux-next-20081023/fs/nfsctl.c:45: error: implicit declaration of function 'current_cred' Signed-off-by: Randy Dunlap Cc: David Howells Cc: James Morris Signed-off-by: Andrew Morton Signed-off-by: James Morris --- fs/nfsctl.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/nfsctl.c b/fs/nfsctl.c index cc4ef2642a5..b1acbd6ab6f 100644 --- a/fs/nfsctl.c +++ b/fs/nfsctl.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include #include #include #include -- cgit v1.2.3-70-g09d2 From 74e2f334f4440cbcb63e9ebbcdcea430d41bdfa3 Mon Sep 17 00:00:00 2001 From: Török Edwin Date: Sat, 22 Nov 2008 13:28:48 +0200 Subject: vfs, seqfile: make mangle_path() global MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Impact: expose new VFS API make mangle_path() available, as per the suggestions of Christoph Hellwig and Al Viro: http://lkml.org/lkml/2008/11/4/338 Signed-off-by: Török Edwin Signed-off-by: Ingo Molnar --- fs/seq_file.c | 14 +++++++++++++- include/linux/seq_file.h | 1 + 2 files changed, 14 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/seq_file.c b/fs/seq_file.c index eba2eabcd2b..f5b61cc1cc1 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -357,7 +357,18 @@ int seq_printf(struct seq_file *m, const char *f, ...) } EXPORT_SYMBOL(seq_printf); -static char *mangle_path(char *s, char *p, char *esc) +/** + * mangle_path - mangle and copy path to buffer beginning + * @s - buffer start + * @p - beginning of path in above buffer + * @esc - set of characters that need escaping + * + * Copy the path from @p to @s, replacing each occurrence of character from + * @esc with usual octal escape. + * Returns pointer past last written character in @s, or NULL in case of + * failure. + */ +char *mangle_path(char *s, char *p, char *esc) { while (s <= p) { char c = *p++; @@ -376,6 +387,7 @@ static char *mangle_path(char *s, char *p, char *esc) } return NULL; } +EXPORT_SYMBOL_GPL(mangle_path); /* * return the absolute path of 'dentry' residing in mount 'mnt'. diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index dc50bcc282a..b3dfa72f13b 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -34,6 +34,7 @@ struct seq_operations { #define SEQ_SKIP 1 +char *mangle_path(char *s, char *p, char *esc); int seq_open(struct file *, const struct seq_operations *); ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); loff_t seq_lseek(struct file *, loff_t, int); -- cgit v1.2.3-70-g09d2 From 958086d1784459be3fe85e4cad79d42b17d33381 Mon Sep 17 00:00:00 2001 From: Török Edwin Date: Sun, 23 Nov 2008 23:24:53 +0200 Subject: vfs, seqfile: fix comment style on mangle_path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Impact: use standard docbook tags Reported-by: Randy Dunlap Signed-off-by: Török Edwin Signed-off-by: Ingo Molnar --- fs/seq_file.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/seq_file.c b/fs/seq_file.c index f5b61cc1cc1..f03220d7891 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -358,10 +358,10 @@ int seq_printf(struct seq_file *m, const char *f, ...) EXPORT_SYMBOL(seq_printf); /** - * mangle_path - mangle and copy path to buffer beginning - * @s - buffer start - * @p - beginning of path in above buffer - * @esc - set of characters that need escaping + * mangle_path - mangle and copy path to buffer beginning + * @s: buffer start + * @p: beginning of path in above buffer + * @esc: set of characters that need escaping * * Copy the path from @p to @s, replacing each occurrence of character from * @esc with usual octal escape. -- cgit v1.2.3-70-g09d2 From 18b6e0414e42d95183f07d8177e3ff0241abd825 Mon Sep 17 00:00:00 2001 From: Serge Hallyn Date: Wed, 15 Oct 2008 16:38:45 -0500 Subject: User namespaces: set of cleanups (v2) The user_ns is moved from nsproxy to user_struct, so that a struct cred by itself is sufficient to determine access (which it otherwise would not be). Corresponding ecryptfs fixes (by David Howells) are here as well. Fix refcounting. The following rules now apply: 1. The task pins the user struct. 2. The user struct pins its user namespace. 3. The user namespace pins the struct user which created it. User namespaces are cloned during copy_creds(). Unsharing a new user_ns is no longer possible. (We could re-add that, but it'll cause code duplication and doesn't seem useful if PAM doesn't need to clone user namespaces). When a user namespace is created, its first user (uid 0) gets empty keyrings and a clean group_info. This incorporates a previous patch by David Howells. Here is his original patch description: >I suggest adding the attached incremental patch. It makes the following >changes: > > (1) Provides a current_user_ns() macro to wrap accesses to current's user > namespace. > > (2) Fixes eCryptFS. > > (3) Renames create_new_userns() to create_user_ns() to be more consistent > with the other associated functions and because the 'new' in the name is > superfluous. > > (4) Moves the argument and permission checks made for CLONE_NEWUSER to the > beginning of do_fork() so that they're done prior to making any attempts > at allocation. > > (5) Calls create_user_ns() after prepare_creds(), and gives it the new creds > to fill in rather than have it return the new root user. I don't imagine > the new root user being used for anything other than filling in a cred > struct. > > This also permits me to get rid of a get_uid() and a free_uid(), as the > reference the creds were holding on the old user_struct can just be > transferred to the new namespace's creator pointer. > > (6) Makes create_user_ns() reset the UIDs and GIDs of the creds under > preparation rather than doing it in copy_creds(). > >David >Signed-off-by: David Howells Changelog: Oct 20: integrate dhowells comments 1. leave thread_keyring alone 2. use current_user_ns() in set_user() Signed-off-by: Serge Hallyn --- fs/ecryptfs/messaging.c | 13 ++++---- fs/ecryptfs/miscdev.c | 19 ++++------- include/linux/cred.h | 2 ++ include/linux/init_task.h | 1 - include/linux/nsproxy.h | 1 - include/linux/sched.h | 1 + include/linux/user_namespace.h | 13 +++----- kernel/cred.c | 15 +++++++-- kernel/fork.c | 19 +++++++++-- kernel/nsproxy.c | 15 ++------- kernel/sys.c | 4 +-- kernel/user.c | 47 ++++++++------------------ kernel/user_namespace.c | 75 +++++++++++++++++------------------------- 13 files changed, 96 insertions(+), 129 deletions(-) (limited to 'fs') diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c index e0b0a4e28b9..6913f727624 100644 --- a/fs/ecryptfs/messaging.c +++ b/fs/ecryptfs/messaging.c @@ -360,7 +360,7 @@ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, struct ecryptfs_msg_ctx *msg_ctx; size_t msg_size; struct nsproxy *nsproxy; - struct user_namespace *current_user_ns; + struct user_namespace *tsk_user_ns; uid_t ctx_euid; int rc; @@ -385,9 +385,9 @@ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, mutex_unlock(&ecryptfs_daemon_hash_mux); goto wake_up; } - current_user_ns = nsproxy->user_ns; + tsk_user_ns = __task_cred(msg_ctx->task)->user->user_ns; ctx_euid = task_euid(msg_ctx->task); - rc = ecryptfs_find_daemon_by_euid(&daemon, ctx_euid, current_user_ns); + rc = ecryptfs_find_daemon_by_euid(&daemon, ctx_euid, tsk_user_ns); rcu_read_unlock(); mutex_unlock(&ecryptfs_daemon_hash_mux); if (rc) { @@ -405,11 +405,11 @@ int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, euid, ctx_euid); goto unlock; } - if (current_user_ns != user_ns) { + if (tsk_user_ns != user_ns) { rc = -EBADMSG; printk(KERN_WARNING "%s: Received message from user_ns " "[0x%p]; expected message from user_ns [0x%p]\n", - __func__, user_ns, nsproxy->user_ns); + __func__, user_ns, tsk_user_ns); goto unlock; } if (daemon->pid != pid) { @@ -468,8 +468,7 @@ ecryptfs_send_message_locked(char *data, int data_len, u8 msg_type, uid_t euid = current_euid(); int rc; - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, - current->nsproxy->user_ns); + rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); if (rc || !daemon) { rc = -ENOTCONN; printk(KERN_ERR "%s: User [%d] does not have a daemon " diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c index 047ac609695..efd95a0ed1e 100644 --- a/fs/ecryptfs/miscdev.c +++ b/fs/ecryptfs/miscdev.c @@ -47,8 +47,7 @@ ecryptfs_miscdev_poll(struct file *file, poll_table *pt) mutex_lock(&ecryptfs_daemon_hash_mux); /* TODO: Just use file->private_data? */ - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, - current->nsproxy->user_ns); + rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); BUG_ON(rc || !daemon); mutex_lock(&daemon->mux); mutex_unlock(&ecryptfs_daemon_hash_mux); @@ -95,11 +94,9 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) "count; rc = [%d]\n", __func__, rc); goto out_unlock_daemon_list; } - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, - current->nsproxy->user_ns); + rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); if (rc || !daemon) { - rc = ecryptfs_spawn_daemon(&daemon, euid, - current->nsproxy->user_ns, + rc = ecryptfs_spawn_daemon(&daemon, euid, current_user_ns(), task_pid(current)); if (rc) { printk(KERN_ERR "%s: Error attempting to spawn daemon; " @@ -153,8 +150,7 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file) int rc; mutex_lock(&ecryptfs_daemon_hash_mux); - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, - current->nsproxy->user_ns); + rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); BUG_ON(rc || !daemon); mutex_lock(&daemon->mux); BUG_ON(daemon->pid != task_pid(current)); @@ -254,8 +250,7 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count, mutex_lock(&ecryptfs_daemon_hash_mux); /* TODO: Just use file->private_data? */ - rc = ecryptfs_find_daemon_by_euid(&daemon, euid, - current->nsproxy->user_ns); + rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns()); BUG_ON(rc || !daemon); mutex_lock(&daemon->mux); if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { @@ -295,7 +290,7 @@ check_list: goto check_list; } BUG_ON(euid != daemon->euid); - BUG_ON(current->nsproxy->user_ns != daemon->user_ns); + BUG_ON(current_user_ns() != daemon->user_ns); BUG_ON(task_pid(current) != daemon->pid); msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue, struct ecryptfs_msg_ctx, daemon_out_list); @@ -468,7 +463,7 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf, goto out_free; } rc = ecryptfs_miscdev_response(&data[i], packet_size, - euid, current->nsproxy->user_ns, + euid, current_user_ns(), task_pid(current), seq); if (rc) printk(KERN_WARNING "%s: Failed to deliver miscdev " diff --git a/include/linux/cred.h b/include/linux/cred.h index 26c1ab17994..3282ee4318e 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -60,6 +60,7 @@ do { \ } while (0) extern struct group_info *groups_alloc(int); +extern struct group_info init_groups; extern void groups_free(struct group_info *); extern int set_current_groups(struct group_info *); extern int set_groups(struct cred *, struct group_info *); @@ -315,6 +316,7 @@ static inline void put_cred(const struct cred *_cred) #define current_fsgid() (current_cred_xxx(fsgid)) #define current_cap() (current_cred_xxx(cap_effective)) #define current_user() (current_cred_xxx(user)) +#define current_user_ns() (current_cred_xxx(user)->user_ns) #define current_security() (current_cred_xxx(security)) #define current_uid_gid(_uid, _gid) \ diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 2597858035c..959f5522d10 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -57,7 +57,6 @@ extern struct nsproxy init_nsproxy; .mnt_ns = NULL, \ INIT_NET_NS(net_ns) \ INIT_IPC_NS(ipc_ns) \ - .user_ns = &init_user_ns, \ } #define INIT_SIGHAND(sighand) { \ diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index c8a768e5964..afad7dec1b3 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -27,7 +27,6 @@ struct nsproxy { struct ipc_namespace *ipc_ns; struct mnt_namespace *mnt_ns; struct pid_namespace *pid_ns; - struct user_namespace *user_ns; struct net *net_ns; }; extern struct nsproxy init_nsproxy; diff --git a/include/linux/sched.h b/include/linux/sched.h index 2036e9f2602..7f8015a3082 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -638,6 +638,7 @@ struct user_struct { /* Hash table maintenance information */ struct hlist_node uidhash_node; uid_t uid; + struct user_namespace *user_ns; #ifdef CONFIG_USER_SCHED struct task_group *tg; diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index b5f41d4c2ee..315bcd37522 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -12,7 +12,7 @@ struct user_namespace { struct kref kref; struct hlist_head uidhash_table[UIDHASH_SZ]; - struct user_struct *root_user; + struct user_struct *creator; }; extern struct user_namespace init_user_ns; @@ -26,8 +26,7 @@ static inline struct user_namespace *get_user_ns(struct user_namespace *ns) return ns; } -extern struct user_namespace *copy_user_ns(int flags, - struct user_namespace *old_ns); +extern int create_user_ns(struct cred *new); extern void free_user_ns(struct kref *kref); static inline void put_user_ns(struct user_namespace *ns) @@ -43,13 +42,9 @@ static inline struct user_namespace *get_user_ns(struct user_namespace *ns) return &init_user_ns; } -static inline struct user_namespace *copy_user_ns(int flags, - struct user_namespace *old_ns) +static inline int create_user_ns(struct cred *new) { - if (flags & CLONE_NEWUSER) - return ERR_PTR(-EINVAL); - - return old_ns; + return -EINVAL; } static inline void put_user_ns(struct user_namespace *ns) diff --git a/kernel/cred.c b/kernel/cred.c index 13697ca2bb3..ff7bc071991 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -274,6 +274,7 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) struct thread_group_cred *tgcred; #endif struct cred *new; + int ret; mutex_init(&p->cred_exec_mutex); @@ -293,6 +294,12 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) if (!new) return -ENOMEM; + if (clone_flags & CLONE_NEWUSER) { + ret = create_user_ns(new); + if (ret < 0) + goto error_put; + } + #ifdef CONFIG_KEYS /* new threads get their own thread keyrings if their parent already * had one */ @@ -309,8 +316,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) if (!(clone_flags & CLONE_THREAD)) { tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); if (!tgcred) { - put_cred(new); - return -ENOMEM; + ret = -ENOMEM; + goto error_put; } atomic_set(&tgcred->usage, 1); spin_lock_init(&tgcred->lock); @@ -325,6 +332,10 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) atomic_inc(&new->user->processes); p->cred = p->real_cred = get_cred(new); return 0; + +error_put: + put_cred(new); + return ret; } /** diff --git a/kernel/fork.c b/kernel/fork.c index 29c18c14812..1dd89451fae 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -976,7 +976,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, if (atomic_read(&p->real_cred->user->processes) >= p->signal->rlim[RLIMIT_NPROC].rlim_cur) { if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && - p->real_cred->user != current->nsproxy->user_ns->root_user) + p->real_cred->user != INIT_USER) goto bad_fork_free; } @@ -1334,6 +1334,20 @@ long do_fork(unsigned long clone_flags, int trace = 0; long nr; + /* + * Do some preliminary argument and permissions checking before we + * actually start allocating stuff + */ + if (clone_flags & CLONE_NEWUSER) { + if (clone_flags & CLONE_THREAD) + return -EINVAL; + /* hopefully this check will go away when userns support is + * complete + */ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + } + /* * We hope to recycle these flags after 2.6.26 */ @@ -1581,8 +1595,7 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) err = -EINVAL; if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| - CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER| - CLONE_NEWNET)) + CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET)) goto bad_unshare_out; /* diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 1d3ef29a258..63598dca2d0 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c @@ -80,12 +80,6 @@ static struct nsproxy *create_new_namespaces(unsigned long flags, goto out_pid; } - new_nsp->user_ns = copy_user_ns(flags, tsk->nsproxy->user_ns); - if (IS_ERR(new_nsp->user_ns)) { - err = PTR_ERR(new_nsp->user_ns); - goto out_user; - } - new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns); if (IS_ERR(new_nsp->net_ns)) { err = PTR_ERR(new_nsp->net_ns); @@ -95,9 +89,6 @@ static struct nsproxy *create_new_namespaces(unsigned long flags, return new_nsp; out_net: - if (new_nsp->user_ns) - put_user_ns(new_nsp->user_ns); -out_user: if (new_nsp->pid_ns) put_pid_ns(new_nsp->pid_ns); out_pid: @@ -130,7 +121,7 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk) get_nsproxy(old_ns); if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | - CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNET))) + CLONE_NEWPID | CLONE_NEWNET))) return 0; if (!capable(CAP_SYS_ADMIN)) { @@ -173,8 +164,6 @@ void free_nsproxy(struct nsproxy *ns) put_ipc_ns(ns->ipc_ns); if (ns->pid_ns) put_pid_ns(ns->pid_ns); - if (ns->user_ns) - put_user_ns(ns->user_ns); put_net(ns->net_ns); kmem_cache_free(nsproxy_cachep, ns); } @@ -189,7 +178,7 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags, int err = 0; if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | - CLONE_NEWUSER | CLONE_NEWNET))) + CLONE_NEWNET))) return 0; if (!capable(CAP_SYS_ADMIN)) diff --git a/kernel/sys.c b/kernel/sys.c index ab735040468..ebe65c2c987 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -565,13 +565,13 @@ static int set_user(struct cred *new) { struct user_struct *new_user; - new_user = alloc_uid(current->nsproxy->user_ns, new->uid); + new_user = alloc_uid(current_user_ns(), new->uid); if (!new_user) return -EAGAIN; if (atomic_read(&new_user->processes) >= current->signal->rlim[RLIMIT_NPROC].rlim_cur && - new_user != current->nsproxy->user_ns->root_user) { + new_user != INIT_USER) { free_uid(new_user); return -EAGAIN; } diff --git a/kernel/user.c b/kernel/user.c index d476307dd4b..c0ef3a46443 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -20,9 +20,9 @@ struct user_namespace init_user_ns = { .kref = { - .refcount = ATOMIC_INIT(2), + .refcount = ATOMIC_INIT(1), }, - .root_user = &root_user, + .creator = &root_user, }; EXPORT_SYMBOL_GPL(init_user_ns); @@ -48,12 +48,14 @@ static struct kmem_cache *uid_cachep; */ static DEFINE_SPINLOCK(uidhash_lock); +/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */ struct user_struct root_user = { - .__count = ATOMIC_INIT(1), + .__count = ATOMIC_INIT(2), .processes = ATOMIC_INIT(1), .files = ATOMIC_INIT(0), .sigpending = ATOMIC_INIT(0), .locked_shm = 0, + .user_ns = &init_user_ns, #ifdef CONFIG_USER_SCHED .tg = &init_task_group, #endif @@ -314,12 +316,13 @@ done: * IRQ state (as stored in flags) is restored and uidhash_lock released * upon function exit. */ -static inline void free_user(struct user_struct *up, unsigned long flags) +static void free_user(struct user_struct *up, unsigned long flags) { /* restore back the count */ atomic_inc(&up->__count); spin_unlock_irqrestore(&uidhash_lock, flags); + put_user_ns(up->user_ns); INIT_WORK(&up->work, remove_user_sysfs_dir); schedule_work(&up->work); } @@ -335,13 +338,14 @@ static inline void uids_mutex_unlock(void) { } * IRQ state (as stored in flags) is restored and uidhash_lock released * upon function exit. */ -static inline void free_user(struct user_struct *up, unsigned long flags) +static void free_user(struct user_struct *up, unsigned long flags) { uid_hash_remove(up); spin_unlock_irqrestore(&uidhash_lock, flags); sched_destroy_user(up); key_put(up->uid_keyring); key_put(up->session_keyring); + put_user_ns(up->user_ns); kmem_cache_free(uid_cachep, up); } @@ -357,7 +361,7 @@ struct user_struct *find_user(uid_t uid) { struct user_struct *ret; unsigned long flags; - struct user_namespace *ns = current->nsproxy->user_ns; + struct user_namespace *ns = current_user()->user_ns; spin_lock_irqsave(&uidhash_lock, flags); ret = uid_hash_find(uid, uidhashentry(ns, uid)); @@ -404,6 +408,8 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) if (sched_create_user(new) < 0) goto out_free_user; + new->user_ns = get_user_ns(ns); + if (uids_user_create(new)) goto out_destoy_sched; @@ -427,7 +433,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) up = new; } spin_unlock_irq(&uidhash_lock); - } uids_mutex_unlock(); @@ -436,6 +441,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) out_destoy_sched: sched_destroy_user(new); + put_user_ns(new->user_ns); out_free_user: kmem_cache_free(uid_cachep, new); out_unlock: @@ -443,33 +449,6 @@ out_unlock: return NULL; } -#ifdef CONFIG_USER_NS -void release_uids(struct user_namespace *ns) -{ - int i; - unsigned long flags; - struct hlist_head *head; - struct hlist_node *nd; - - spin_lock_irqsave(&uidhash_lock, flags); - /* - * collapse the chains so that the user_struct-s will - * be still alive, but not in hashes. subsequent free_uid() - * will free them. - */ - for (i = 0; i < UIDHASH_SZ; i++) { - head = ns->uidhash_table + i; - while (!hlist_empty(head)) { - nd = head->first; - hlist_del_init(nd); - } - } - spin_unlock_irqrestore(&uidhash_lock, flags); - - free_uid(ns->root_user); -} -#endif - static int __init uid_cache_init(void) { int n; diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 0d9c51d6733..79084311ee5 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -9,70 +9,55 @@ #include #include #include +#include /* - * Clone a new ns copying an original user ns, setting refcount to 1 - * @old_ns: namespace to clone - * Return NULL on error (failure to kmalloc), new ns otherwise + * Create a new user namespace, deriving the creator from the user in the + * passed credentials, and replacing that user with the new root user for the + * new namespace. + * + * This is called by copy_creds(), which will finish setting the target task's + * credentials. */ -static struct user_namespace *clone_user_ns(struct user_namespace *old_ns) +int create_user_ns(struct cred *new) { struct user_namespace *ns; - struct user_struct *new_user; - struct cred *new; + struct user_struct *root_user; int n; ns = kmalloc(sizeof(struct user_namespace), GFP_KERNEL); if (!ns) - return ERR_PTR(-ENOMEM); + return -ENOMEM; kref_init(&ns->kref); for (n = 0; n < UIDHASH_SZ; ++n) INIT_HLIST_HEAD(ns->uidhash_table + n); - /* Insert new root user. */ - ns->root_user = alloc_uid(ns, 0); - if (!ns->root_user) { + /* Alloc new root user. */ + root_user = alloc_uid(ns, 0); + if (!root_user) { kfree(ns); - return ERR_PTR(-ENOMEM); + return -ENOMEM; } - /* Reset current->user with a new one */ - new_user = alloc_uid(ns, current_uid()); - if (!new_user) { - free_uid(ns->root_user); - kfree(ns); - return ERR_PTR(-ENOMEM); - } - - /* Install the new user */ - new = prepare_creds(); - if (!new) { - free_uid(new_user); - free_uid(ns->root_user); - kfree(ns); - } - free_uid(new->user); - new->user = new_user; - commit_creds(new); - return ns; -} - -struct user_namespace * copy_user_ns(int flags, struct user_namespace *old_ns) -{ - struct user_namespace *new_ns; - - BUG_ON(!old_ns); - get_user_ns(old_ns); - - if (!(flags & CLONE_NEWUSER)) - return old_ns; + /* set the new root user in the credentials under preparation */ + ns->creator = new->user; + new->user = root_user; + new->uid = new->euid = new->suid = new->fsuid = 0; + new->gid = new->egid = new->sgid = new->fsgid = 0; + put_group_info(new->group_info); + new->group_info = get_group_info(&init_groups); +#ifdef CONFIG_KEYS + key_put(new->request_key_auth); + new->request_key_auth = NULL; +#endif + /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */ - new_ns = clone_user_ns(old_ns); + /* alloc_uid() incremented the userns refcount. Just set it to 1 */ + kref_set(&ns->kref, 1); - put_user_ns(old_ns); - return new_ns; + return 0; } void free_user_ns(struct kref *kref) @@ -80,7 +65,7 @@ void free_user_ns(struct kref *kref) struct user_namespace *ns; ns = container_of(kref, struct user_namespace, kref); - release_uids(ns); + free_uid(ns->creator); kfree(ns); } EXPORT_SYMBOL(free_user_ns); -- cgit v1.2.3-70-g09d2 From 180b65df7ba1e700e28aabfbddbad84b7beebe4b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 25 Nov 2008 16:51:45 -0800 Subject: fix warning in fs/dlm/netlink.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit this warning: fs/dlm/netlink.c: In function ‘dlm_timeout_warn’: fs/dlm/netlink.c:131: warning: ‘send_skb’ may be used uninitialized in this function triggers because GCC does not recognize the (correct) error flow between prepare_data() and send_skb. Annotate it. Signed-off-by: Ingo Molnar Signed-off-by: David S. Miller --- fs/dlm/netlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c index 18bda83cc89..aa2a5775a02 100644 --- a/fs/dlm/netlink.c +++ b/fs/dlm/netlink.c @@ -127,8 +127,8 @@ static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb) void dlm_timeout_warn(struct dlm_lkb *lkb) { + struct sk_buff *uninitialized_var(send_skb); struct dlm_lock_data *data; - struct sk_buff *send_skb; size_t size; int rv; -- cgit v1.2.3-70-g09d2 From 5f3ea37c7716db4e894a480e0c18b24399595b6b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 30 Oct 2008 08:34:33 +0100 Subject: blktrace: port to tracepoints This was a forward port of work done by Mathieu Desnoyers, I changed it to encode the 'what' parameter on the tracepoint name, so that one can register interest in specific events and not on classes of events to then check the 'what' parameter. Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Jens Axboe Signed-off-by: Ingo Molnar --- block/Kconfig | 1 + block/blk-core.c | 33 ++--- block/blktrace.c | 332 ++++++++++++++++++++++++++++++++++++++++++- block/elevator.c | 7 +- drivers/md/dm.c | 6 +- fs/bio.c | 3 +- include/linux/blktrace_api.h | 172 +--------------------- include/trace/block.h | 60 ++++++++ mm/bounce.c | 3 +- 9 files changed, 418 insertions(+), 199 deletions(-) create mode 100644 include/trace/block.h (limited to 'fs') diff --git a/block/Kconfig b/block/Kconfig index 1ab7c15c8d7..290b219fad9 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -47,6 +47,7 @@ config BLK_DEV_IO_TRACE depends on SYSFS select RELAY select DEBUG_FS + select TRACEPOINTS help Say Y here if you want to be able to trace the block layer actions on a given queue. Tracing allows you to see any traffic happening diff --git a/block/blk-core.c b/block/blk-core.c index 10e8a64a5a5..04267d66a2b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "blk.h" @@ -205,7 +206,7 @@ void blk_plug_device(struct request_queue *q) if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); - blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); + trace_block_plug(q); } } EXPORT_SYMBOL(blk_plug_device); @@ -292,9 +293,7 @@ void blk_unplug_work(struct work_struct *work) struct request_queue *q = container_of(work, struct request_queue, unplug_work); - blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, - q->rq.count[READ] + q->rq.count[WRITE]); - + trace_block_unplug_io(q); q->unplug_fn(q); } @@ -302,9 +301,7 @@ void blk_unplug_timeout(unsigned long data) { struct request_queue *q = (struct request_queue *)data; - blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, - q->rq.count[READ] + q->rq.count[WRITE]); - + trace_block_unplug_timer(q); kblockd_schedule_work(q, &q->unplug_work); } @@ -314,9 +311,7 @@ void blk_unplug(struct request_queue *q) * devices don't necessarily have an ->unplug_fn defined */ if (q->unplug_fn) { - blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, - q->rq.count[READ] + q->rq.count[WRITE]); - + trace_block_unplug_io(q); q->unplug_fn(q); } } @@ -822,7 +817,7 @@ rq_starved: if (ioc_batching(q, ioc)) ioc->nr_batch_requests--; - blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); + trace_block_getrq(q, bio, rw); out: return rq; } @@ -848,7 +843,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, prepare_to_wait_exclusive(&rl->wait[rw], &wait, TASK_UNINTERRUPTIBLE); - blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); + trace_block_sleeprq(q, bio, rw); __generic_unplug_device(q); spin_unlock_irq(q->queue_lock); @@ -928,7 +923,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq) { blk_delete_timer(rq); blk_clear_rq_complete(rq); - blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); + trace_block_rq_requeue(q, rq); if (blk_rq_tagged(rq)) blk_queue_end_tag(q, rq); @@ -1167,7 +1162,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) if (!ll_back_merge_fn(q, req, bio)) break; - blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); + trace_block_bio_backmerge(q, bio); req->biotail->bi_next = bio; req->biotail = bio; @@ -1186,7 +1181,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) if (!ll_front_merge_fn(q, req, bio)) break; - blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); + trace_block_bio_frontmerge(q, bio); bio->bi_next = req->bio; req->bio = bio; @@ -1269,7 +1264,7 @@ static inline void blk_partition_remap(struct bio *bio) bio->bi_sector += p->start_sect; bio->bi_bdev = bdev->bd_contains; - blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio, + trace_block_remap(bdev_get_queue(bio->bi_bdev), bio, bdev->bd_dev, bio->bi_sector, bio->bi_sector - p->start_sect); } @@ -1441,10 +1436,10 @@ end_io: goto end_io; if (old_sector != -1) - blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, + trace_block_remap(q, bio, old_dev, bio->bi_sector, old_sector); - blk_add_trace_bio(q, bio, BLK_TA_QUEUE); + trace_block_bio_queue(q, bio); old_sector = bio->bi_sector; old_dev = bio->bi_bdev->bd_dev; @@ -1656,7 +1651,7 @@ static int __end_that_request_first(struct request *req, int error, int total_bytes, bio_nbytes, next_idx = 0; struct bio *bio; - blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); + trace_block_rq_complete(req->q, req); /* * for a REQ_TYPE_BLOCK_PC request, we want to carry any eventual diff --git a/block/blktrace.c b/block/blktrace.c index 85049a7e7a1..b0a2cae886d 100644 --- a/block/blktrace.c +++ b/block/blktrace.c @@ -23,10 +23,18 @@ #include #include #include +#include #include static unsigned int blktrace_seq __read_mostly = 1; +/* Global reference count of probes */ +static DEFINE_MUTEX(blk_probe_mutex); +static atomic_t blk_probes_ref = ATOMIC_INIT(0); + +static int blk_register_tracepoints(void); +static void blk_unregister_tracepoints(void); + /* * Send out a notify message. */ @@ -119,7 +127,7 @@ static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK * The worker for the various blk_add_trace*() types. Fills out a * blk_io_trace structure and places it in a per-cpu subbuffer. */ -void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, +static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, int rw, u32 what, int error, int pdu_len, void *pdu_data) { struct task_struct *tsk = current; @@ -177,8 +185,6 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, local_irq_restore(flags); } -EXPORT_SYMBOL_GPL(__blk_add_trace); - static struct dentry *blk_tree_root; static DEFINE_MUTEX(blk_tree_mutex); static unsigned int root_users; @@ -237,6 +243,10 @@ static void blk_trace_cleanup(struct blk_trace *bt) free_percpu(bt->sequence); free_percpu(bt->msg_data); kfree(bt); + mutex_lock(&blk_probe_mutex); + if (atomic_dec_and_test(&blk_probes_ref)) + blk_unregister_tracepoints(); + mutex_unlock(&blk_probe_mutex); } int blk_trace_remove(struct request_queue *q) @@ -428,6 +438,14 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, bt->pid = buts->pid; bt->trace_state = Blktrace_setup; + mutex_lock(&blk_probe_mutex); + if (atomic_add_return(1, &blk_probes_ref) == 1) { + ret = blk_register_tracepoints(); + if (ret) + goto probe_err; + } + mutex_unlock(&blk_probe_mutex); + ret = -EBUSY; old_bt = xchg(&q->blk_trace, bt); if (old_bt) { @@ -436,6 +454,9 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, } return 0; +probe_err: + atomic_dec(&blk_probes_ref); + mutex_unlock(&blk_probe_mutex); err: if (dir) blk_remove_tree(dir); @@ -562,3 +583,308 @@ void blk_trace_shutdown(struct request_queue *q) blk_trace_remove(q); } } + +/* + * blktrace probes + */ + +/** + * blk_add_trace_rq - Add a trace for a request oriented action + * @q: queue the io is for + * @rq: the source request + * @what: the action + * + * Description: + * Records an action against a request. Will log the bio offset + size. + * + **/ +static void blk_add_trace_rq(struct request_queue *q, struct request *rq, + u32 what) +{ + struct blk_trace *bt = q->blk_trace; + int rw = rq->cmd_flags & 0x03; + + if (likely(!bt)) + return; + + if (blk_discard_rq(rq)) + rw |= (1 << BIO_RW_DISCARD); + + if (blk_pc_request(rq)) { + what |= BLK_TC_ACT(BLK_TC_PC); + __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, + sizeof(rq->cmd), rq->cmd); + } else { + what |= BLK_TC_ACT(BLK_TC_FS); + __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, + rw, what, rq->errors, 0, NULL); + } +} + +static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq) +{ + blk_add_trace_rq(q, rq, BLK_TA_ABORT); +} + +static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq) +{ + blk_add_trace_rq(q, rq, BLK_TA_INSERT); +} + +static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq) +{ + blk_add_trace_rq(q, rq, BLK_TA_ISSUE); +} + +static void blk_add_trace_rq_requeue(struct request_queue *q, struct request *rq) +{ + blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); +} + +static void blk_add_trace_rq_complete(struct request_queue *q, struct request *rq) +{ + blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); +} + +/** + * blk_add_trace_bio - Add a trace for a bio oriented action + * @q: queue the io is for + * @bio: the source bio + * @what: the action + * + * Description: + * Records an action against a bio. Will log the bio offset + size. + * + **/ +static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, + u32 what) +{ + struct blk_trace *bt = q->blk_trace; + + if (likely(!bt)) + return; + + __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, + !bio_flagged(bio, BIO_UPTODATE), 0, NULL); +} + +static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio) +{ + blk_add_trace_bio(q, bio, BLK_TA_BOUNCE); +} + +static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio) +{ + blk_add_trace_bio(q, bio, BLK_TA_COMPLETE); +} + +static void blk_add_trace_bio_backmerge(struct request_queue *q, struct bio *bio) +{ + blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); +} + +static void blk_add_trace_bio_frontmerge(struct request_queue *q, struct bio *bio) +{ + blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); +} + +static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio) +{ + blk_add_trace_bio(q, bio, BLK_TA_QUEUE); +} + +static void blk_add_trace_getrq(struct request_queue *q, struct bio *bio, int rw) +{ + if (bio) + blk_add_trace_bio(q, bio, BLK_TA_GETRQ); + else { + struct blk_trace *bt = q->blk_trace; + + if (bt) + __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL); + } +} + + +static void blk_add_trace_sleeprq(struct request_queue *q, struct bio *bio, int rw) +{ + if (bio) + blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ); + else { + struct blk_trace *bt = q->blk_trace; + + if (bt) + __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, 0, 0, NULL); + } +} + +static void blk_add_trace_plug(struct request_queue *q) +{ + struct blk_trace *bt = q->blk_trace; + + if (bt) + __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); +} + +static void blk_add_trace_unplug_io(struct request_queue *q) +{ + struct blk_trace *bt = q->blk_trace; + + if (bt) { + unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; + __be64 rpdu = cpu_to_be64(pdu); + + __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, + sizeof(rpdu), &rpdu); + } +} + +static void blk_add_trace_unplug_timer(struct request_queue *q) +{ + struct blk_trace *bt = q->blk_trace; + + if (bt) { + unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; + __be64 rpdu = cpu_to_be64(pdu); + + __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0, + sizeof(rpdu), &rpdu); + } +} + +static void blk_add_trace_split(struct request_queue *q, struct bio *bio, + unsigned int pdu) +{ + struct blk_trace *bt = q->blk_trace; + + if (bt) { + __be64 rpdu = cpu_to_be64(pdu); + + __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, + BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE), + sizeof(rpdu), &rpdu); + } +} + +/** + * blk_add_trace_remap - Add a trace for a remap operation + * @q: queue the io is for + * @bio: the source bio + * @dev: target device + * @from: source sector + * @to: target sector + * + * Description: + * Device mapper or raid target sometimes need to split a bio because + * it spans a stripe (or similar). Add a trace for that action. + * + **/ +static void blk_add_trace_remap(struct request_queue *q, struct bio *bio, + dev_t dev, sector_t from, sector_t to) +{ + struct blk_trace *bt = q->blk_trace; + struct blk_io_trace_remap r; + + if (likely(!bt)) + return; + + r.device = cpu_to_be32(dev); + r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev); + r.sector = cpu_to_be64(to); + + __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, + !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); +} + +/** + * blk_add_driver_data - Add binary message with driver-specific data + * @q: queue the io is for + * @rq: io request + * @data: driver-specific data + * @len: length of driver-specific data + * + * Description: + * Some drivers might want to write driver-specific data per request. + * + **/ +void blk_add_driver_data(struct request_queue *q, + struct request *rq, + void *data, size_t len) +{ + struct blk_trace *bt = q->blk_trace; + + if (likely(!bt)) + return; + + if (blk_pc_request(rq)) + __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, + rq->errors, len, data); + else + __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, + 0, BLK_TA_DRV_DATA, rq->errors, len, data); +} +EXPORT_SYMBOL_GPL(blk_add_driver_data); + +static int blk_register_tracepoints(void) +{ + int ret; + + ret = register_trace_block_rq_abort(blk_add_trace_rq_abort); + WARN_ON(ret); + ret = register_trace_block_rq_insert(blk_add_trace_rq_insert); + WARN_ON(ret); + ret = register_trace_block_rq_issue(blk_add_trace_rq_issue); + WARN_ON(ret); + ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue); + WARN_ON(ret); + ret = register_trace_block_rq_complete(blk_add_trace_rq_complete); + WARN_ON(ret); + ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce); + WARN_ON(ret); + ret = register_trace_block_bio_complete(blk_add_trace_bio_complete); + WARN_ON(ret); + ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge); + WARN_ON(ret); + ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge); + WARN_ON(ret); + ret = register_trace_block_bio_queue(blk_add_trace_bio_queue); + WARN_ON(ret); + ret = register_trace_block_getrq(blk_add_trace_getrq); + WARN_ON(ret); + ret = register_trace_block_sleeprq(blk_add_trace_sleeprq); + WARN_ON(ret); + ret = register_trace_block_plug(blk_add_trace_plug); + WARN_ON(ret); + ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer); + WARN_ON(ret); + ret = register_trace_block_unplug_io(blk_add_trace_unplug_io); + WARN_ON(ret); + ret = register_trace_block_split(blk_add_trace_split); + WARN_ON(ret); + ret = register_trace_block_remap(blk_add_trace_remap); + WARN_ON(ret); + return 0; +} + +static void blk_unregister_tracepoints(void) +{ + unregister_trace_block_remap(blk_add_trace_remap); + unregister_trace_block_split(blk_add_trace_split); + unregister_trace_block_unplug_io(blk_add_trace_unplug_io); + unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer); + unregister_trace_block_plug(blk_add_trace_plug); + unregister_trace_block_sleeprq(blk_add_trace_sleeprq); + unregister_trace_block_getrq(blk_add_trace_getrq); + unregister_trace_block_bio_queue(blk_add_trace_bio_queue); + unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge); + unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge); + unregister_trace_block_bio_complete(blk_add_trace_bio_complete); + unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce); + unregister_trace_block_rq_complete(blk_add_trace_rq_complete); + unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue); + unregister_trace_block_rq_issue(blk_add_trace_rq_issue); + unregister_trace_block_rq_insert(blk_add_trace_rq_insert); + unregister_trace_block_rq_abort(blk_add_trace_rq_abort); + + tracepoint_synchronize_unregister(); +} diff --git a/block/elevator.c b/block/elevator.c index 9ac82dde99d..530fcfe2ef0 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -586,7 +587,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) unsigned ordseq; int unplug_it = 1; - blk_add_trace_rq(q, rq, BLK_TA_INSERT); + trace_block_rq_insert(q, rq); rq->q = q; @@ -772,7 +773,7 @@ struct request *elv_next_request(struct request_queue *q) * not be passed by new incoming requests */ rq->cmd_flags |= REQ_STARTED; - blk_add_trace_rq(q, rq, BLK_TA_ISSUE); + trace_block_rq_issue(q, rq); } if (!q->boundary_rq || q->boundary_rq == rq) { @@ -921,7 +922,7 @@ void elv_abort_queue(struct request_queue *q) while (!list_empty(&q->queue_head)) { rq = list_entry_rq(q->queue_head.next); rq->cmd_flags |= REQ_QUIET; - blk_add_trace_rq(q, rq, BLK_TA_ABORT); + trace_block_rq_abort(q, rq); __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); } } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index c99e4728ff4..d23fda17816 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -21,6 +21,7 @@ #include #include #include +#include #define DM_MSG_PREFIX "core" @@ -504,8 +505,7 @@ static void dec_pending(struct dm_io *io, int error) end_io_acct(io); if (io->error != DM_ENDIO_REQUEUE) { - blk_add_trace_bio(io->md->queue, io->bio, - BLK_TA_COMPLETE); + trace_block_bio_complete(io->md->queue, io->bio); bio_endio(io->bio, io->error); } @@ -598,7 +598,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, if (r == DM_MAPIO_REMAPPED) { /* the bio has been remapped so dispatch it */ - blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, + trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, tio->io->bio->bi_bdev->bd_dev, clone->bi_sector, sector); diff --git a/fs/bio.c b/fs/bio.c index 77a55bcceed..060859c6909 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -26,6 +26,7 @@ #include #include #include +#include #include /* for struct sg_iovec */ static struct kmem_cache *bio_slab __read_mostly; @@ -1263,7 +1264,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors) if (!bp) return bp; - blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi, + trace_block_split(bdev_get_queue(bi->bi_bdev), bi, bi->bi_sector + first_sectors); BUG_ON(bi->bi_vcnt != 1); diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index bdf505d33e7..1dba3493d52 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h @@ -160,7 +160,6 @@ struct blk_trace { extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); extern void blk_trace_shutdown(struct request_queue *); -extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); extern int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, struct blk_user_trace_setup *buts); extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); @@ -186,168 +185,8 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); } while (0) #define BLK_TN_MAX_MSG 128 -/** - * blk_add_trace_rq - Add a trace for a request oriented action - * @q: queue the io is for - * @rq: the source request - * @what: the action - * - * Description: - * Records an action against a request. Will log the bio offset + size. - * - **/ -static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, - u32 what) -{ - struct blk_trace *bt = q->blk_trace; - int rw = rq->cmd_flags & 0x03; - - if (likely(!bt)) - return; - - if (blk_discard_rq(rq)) - rw |= (1 << BIO_RW_DISCARD); - - if (blk_pc_request(rq)) { - what |= BLK_TC_ACT(BLK_TC_PC); - __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); - } else { - what |= BLK_TC_ACT(BLK_TC_FS); - __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL); - } -} - -/** - * blk_add_trace_bio - Add a trace for a bio oriented action - * @q: queue the io is for - * @bio: the source bio - * @what: the action - * - * Description: - * Records an action against a bio. Will log the bio offset + size. - * - **/ -static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio, - u32 what) -{ - struct blk_trace *bt = q->blk_trace; - - if (likely(!bt)) - return; - - __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL); -} - -/** - * blk_add_trace_generic - Add a trace for a generic action - * @q: queue the io is for - * @bio: the source bio - * @rw: the data direction - * @what: the action - * - * Description: - * Records a simple trace - * - **/ -static inline void blk_add_trace_generic(struct request_queue *q, - struct bio *bio, int rw, u32 what) -{ - struct blk_trace *bt = q->blk_trace; - - if (likely(!bt)) - return; - - if (bio) - blk_add_trace_bio(q, bio, what); - else - __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL); -} - -/** - * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload - * @q: queue the io is for - * @what: the action - * @bio: the source bio - * @pdu: the integer payload - * - * Description: - * Adds a trace with some integer payload. This might be an unplug - * option given as the action, with the depth at unplug time given - * as the payload - * - **/ -static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what, - struct bio *bio, unsigned int pdu) -{ - struct blk_trace *bt = q->blk_trace; - __be64 rpdu = cpu_to_be64(pdu); - - if (likely(!bt)) - return; - - if (bio) - __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu); - else - __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); -} - -/** - * blk_add_trace_remap - Add a trace for a remap operation - * @q: queue the io is for - * @bio: the source bio - * @dev: target device - * @from: source sector - * @to: target sector - * - * Description: - * Device mapper or raid target sometimes need to split a bio because - * it spans a stripe (or similar). Add a trace for that action. - * - **/ -static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio, - dev_t dev, sector_t from, sector_t to) -{ - struct blk_trace *bt = q->blk_trace; - struct blk_io_trace_remap r; - - if (likely(!bt)) - return; - - r.device = cpu_to_be32(dev); - r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev); - r.sector = cpu_to_be64(to); - - __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); -} - -/** - * blk_add_driver_data - Add binary message with driver-specific data - * @q: queue the io is for - * @rq: io request - * @data: driver-specific data - * @len: length of driver-specific data - * - * Description: - * Some drivers might want to write driver-specific data per request. - * - **/ -static inline void blk_add_driver_data(struct request_queue *q, - struct request *rq, - void *data, size_t len) -{ - struct blk_trace *bt = q->blk_trace; - - if (likely(!bt)) - return; - - if (blk_pc_request(rq)) - __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, - rq->errors, len, data); - else - __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, - 0, BLK_TA_DRV_DATA, rq->errors, len, data); -} - +extern void blk_add_driver_data(struct request_queue *q, struct request *rq, + void *data, size_t len); extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, char __user *arg); extern int blk_trace_startstop(struct request_queue *q, int start); @@ -356,13 +195,8 @@ extern int blk_trace_remove(struct request_queue *q); #else /* !CONFIG_BLK_DEV_IO_TRACE */ #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) #define blk_trace_shutdown(q) do { } while (0) -#define blk_add_trace_rq(q, rq, what) do { } while (0) -#define blk_add_trace_bio(q, rq, what) do { } while (0) -#define blk_add_trace_generic(q, rq, rw, what) do { } while (0) -#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0) -#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0) -#define blk_add_driver_data(q, rq, data, len) do {} while (0) #define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY) +#define blk_add_driver_data(q, rq, data, len) do {} while (0) #define blk_trace_setup(q, name, dev, arg) (-ENOTTY) #define blk_trace_startstop(q, start) (-ENOTTY) #define blk_trace_remove(q) (-ENOTTY) diff --git a/include/trace/block.h b/include/trace/block.h new file mode 100644 index 00000000000..3cc2675ebf0 --- /dev/null +++ b/include/trace/block.h @@ -0,0 +1,60 @@ +#ifndef _TRACE_BLOCK_H +#define _TRACE_BLOCK_H + +#include +#include + +DEFINE_TRACE(block_rq_abort, + TPPROTO(struct request_queue *q, struct request *rq), + TPARGS(q, rq)); +DEFINE_TRACE(block_rq_insert, + TPPROTO(struct request_queue *q, struct request *rq), + TPARGS(q, rq)); +DEFINE_TRACE(block_rq_issue, + TPPROTO(struct request_queue *q, struct request *rq), + TPARGS(q, rq)); +DEFINE_TRACE(block_rq_requeue, + TPPROTO(struct request_queue *q, struct request *rq), + TPARGS(q, rq)); +DEFINE_TRACE(block_rq_complete, + TPPROTO(struct request_queue *q, struct request *rq), + TPARGS(q, rq)); +DEFINE_TRACE(block_bio_bounce, + TPPROTO(struct request_queue *q, struct bio *bio), + TPARGS(q, bio)); +DEFINE_TRACE(block_bio_complete, + TPPROTO(struct request_queue *q, struct bio *bio), + TPARGS(q, bio)); +DEFINE_TRACE(block_bio_backmerge, + TPPROTO(struct request_queue *q, struct bio *bio), + TPARGS(q, bio)); +DEFINE_TRACE(block_bio_frontmerge, + TPPROTO(struct request_queue *q, struct bio *bio), + TPARGS(q, bio)); +DEFINE_TRACE(block_bio_queue, + TPPROTO(struct request_queue *q, struct bio *bio), + TPARGS(q, bio)); +DEFINE_TRACE(block_getrq, + TPPROTO(struct request_queue *q, struct bio *bio, int rw), + TPARGS(q, bio, rw)); +DEFINE_TRACE(block_sleeprq, + TPPROTO(struct request_queue *q, struct bio *bio, int rw), + TPARGS(q, bio, rw)); +DEFINE_TRACE(block_plug, + TPPROTO(struct request_queue *q), + TPARGS(q)); +DEFINE_TRACE(block_unplug_timer, + TPPROTO(struct request_queue *q), + TPARGS(q)); +DEFINE_TRACE(block_unplug_io, + TPPROTO(struct request_queue *q), + TPARGS(q)); +DEFINE_TRACE(block_split, + TPPROTO(struct request_queue *q, struct bio *bio, unsigned int pdu), + TPARGS(q, bio, pdu)); +DEFINE_TRACE(block_remap, + TPPROTO(struct request_queue *q, struct bio *bio, dev_t dev, + sector_t from, sector_t to), + TPARGS(q, bio, dev, from, to)); + +#endif diff --git a/mm/bounce.c b/mm/bounce.c index 06722c40305..bd1caaa582b 100644 --- a/mm/bounce.c +++ b/mm/bounce.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #define POOL_SIZE 64 @@ -222,7 +223,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, if (!bio) return; - blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE); + trace_block_bio_bounce(q, *bio_orig); /* * at least one page was bounced, fill in possible non-highmem -- cgit v1.2.3-70-g09d2 From 0bfc24559d7945506184d86739fe365a181f06b7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 26 Nov 2008 11:59:56 +0100 Subject: blktrace: port to tracepoints, update Port to the new tracepoints API: split DEFINE_TRACE() and DECLARE_TRACE() sites. Spread them out to the usage sites, as suggested by Mathieu Desnoyers. Signed-off-by: Ingo Molnar Acked-by: Mathieu Desnoyers --- block/blk-core.c | 13 ++++++++ block/elevator.c | 5 +++ drivers/md/dm.c | 2 ++ fs/bio.c | 2 ++ include/trace/block.h | 84 ++++++++++++++++++++++++++++++--------------------- mm/bounce.c | 2 ++ 6 files changed, 74 insertions(+), 34 deletions(-) (limited to 'fs') diff --git a/block/blk-core.c b/block/blk-core.c index 04267d66a2b..0c06cf5aaaf 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -32,6 +32,19 @@ #include "blk.h" +DEFINE_TRACE(block_plug); +DEFINE_TRACE(block_unplug_io); +DEFINE_TRACE(block_unplug_timer); +DEFINE_TRACE(block_getrq); +DEFINE_TRACE(block_sleeprq); +DEFINE_TRACE(block_rq_requeue); +DEFINE_TRACE(block_bio_backmerge); +DEFINE_TRACE(block_bio_frontmerge); +DEFINE_TRACE(block_bio_queue); +DEFINE_TRACE(block_rq_complete); +DEFINE_TRACE(block_remap); /* Also used in drivers/md/dm.c */ +EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); + static int __make_request(struct request_queue *q, struct bio *bio); /* diff --git a/block/elevator.c b/block/elevator.c index 530fcfe2ef0..e5677fe4f41 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -42,6 +42,8 @@ static DEFINE_SPINLOCK(elv_list_lock); static LIST_HEAD(elv_list); +DEFINE_TRACE(block_rq_abort); + /* * Merge hash stuff. */ @@ -53,6 +55,9 @@ static const int elv_hash_shift = 6; #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) +DEFINE_TRACE(block_rq_insert); +DEFINE_TRACE(block_rq_issue); + /* * Query io scheduler to see if the current process issuing bio may be * merged with rq. diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d23fda17816..343094c3fee 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -52,6 +52,8 @@ struct dm_target_io { union map_info info; }; +DEFINE_TRACE(block_bio_complete); + union map_info *dm_get_mapinfo(struct bio *bio) { if (bio && bio->bi_private) diff --git a/fs/bio.c b/fs/bio.c index 060859c6909..df99c882b80 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -29,6 +29,8 @@ #include #include /* for struct sg_iovec */ +DEFINE_TRACE(block_split); + static struct kmem_cache *bio_slab __read_mostly; static mempool_t *bio_split_pool __read_mostly; diff --git a/include/trace/block.h b/include/trace/block.h index 3cc2675ebf0..25c6a1fd5b7 100644 --- a/include/trace/block.h +++ b/include/trace/block.h @@ -4,57 +4,73 @@ #include #include -DEFINE_TRACE(block_rq_abort, +DECLARE_TRACE(block_rq_abort, TPPROTO(struct request_queue *q, struct request *rq), - TPARGS(q, rq)); -DEFINE_TRACE(block_rq_insert, + TPARGS(q, rq)); + +DECLARE_TRACE(block_rq_insert, TPPROTO(struct request_queue *q, struct request *rq), - TPARGS(q, rq)); -DEFINE_TRACE(block_rq_issue, + TPARGS(q, rq)); + +DECLARE_TRACE(block_rq_issue, TPPROTO(struct request_queue *q, struct request *rq), - TPARGS(q, rq)); -DEFINE_TRACE(block_rq_requeue, + TPARGS(q, rq)); + +DECLARE_TRACE(block_rq_requeue, TPPROTO(struct request_queue *q, struct request *rq), - TPARGS(q, rq)); -DEFINE_TRACE(block_rq_complete, + TPARGS(q, rq)); + +DECLARE_TRACE(block_rq_complete, TPPROTO(struct request_queue *q, struct request *rq), - TPARGS(q, rq)); -DEFINE_TRACE(block_bio_bounce, + TPARGS(q, rq)); + +DECLARE_TRACE(block_bio_bounce, TPPROTO(struct request_queue *q, struct bio *bio), - TPARGS(q, bio)); -DEFINE_TRACE(block_bio_complete, + TPARGS(q, bio)); + +DECLARE_TRACE(block_bio_complete, TPPROTO(struct request_queue *q, struct bio *bio), - TPARGS(q, bio)); -DEFINE_TRACE(block_bio_backmerge, + TPARGS(q, bio)); + +DECLARE_TRACE(block_bio_backmerge, TPPROTO(struct request_queue *q, struct bio *bio), - TPARGS(q, bio)); -DEFINE_TRACE(block_bio_frontmerge, + TPARGS(q, bio)); + +DECLARE_TRACE(block_bio_frontmerge, TPPROTO(struct request_queue *q, struct bio *bio), - TPARGS(q, bio)); -DEFINE_TRACE(block_bio_queue, + TPARGS(q, bio)); + +DECLARE_TRACE(block_bio_queue, TPPROTO(struct request_queue *q, struct bio *bio), - TPARGS(q, bio)); -DEFINE_TRACE(block_getrq, + TPARGS(q, bio)); + +DECLARE_TRACE(block_getrq, TPPROTO(struct request_queue *q, struct bio *bio, int rw), - TPARGS(q, bio, rw)); -DEFINE_TRACE(block_sleeprq, + TPARGS(q, bio, rw)); + +DECLARE_TRACE(block_sleeprq, TPPROTO(struct request_queue *q, struct bio *bio, int rw), - TPARGS(q, bio, rw)); -DEFINE_TRACE(block_plug, + TPARGS(q, bio, rw)); + +DECLARE_TRACE(block_plug, TPPROTO(struct request_queue *q), - TPARGS(q)); -DEFINE_TRACE(block_unplug_timer, + TPARGS(q)); + +DECLARE_TRACE(block_unplug_timer, TPPROTO(struct request_queue *q), - TPARGS(q)); -DEFINE_TRACE(block_unplug_io, + TPARGS(q)); + +DECLARE_TRACE(block_unplug_io, TPPROTO(struct request_queue *q), - TPARGS(q)); -DEFINE_TRACE(block_split, + TPARGS(q)); + +DECLARE_TRACE(block_split, TPPROTO(struct request_queue *q, struct bio *bio, unsigned int pdu), - TPARGS(q, bio, pdu)); -DEFINE_TRACE(block_remap, + TPARGS(q, bio, pdu)); + +DECLARE_TRACE(block_remap, TPPROTO(struct request_queue *q, struct bio *bio, dev_t dev, sector_t from, sector_t to), - TPARGS(q, bio, dev, from, to)); + TPARGS(q, bio, dev, from, to)); #endif diff --git a/mm/bounce.c b/mm/bounce.c index bd1caaa582b..bf0cf7c8387 100644 --- a/mm/bounce.c +++ b/mm/bounce.c @@ -22,6 +22,8 @@ static mempool_t *page_pool, *isa_page_pool; +DEFINE_TRACE(block_bio_bounce); + #ifdef CONFIG_HIGHMEM static __init int init_emergency_pool(void) { -- cgit v1.2.3-70-g09d2 From 604094f4615180f71da799e7e5b191f5c2a42a28 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 28 Nov 2008 18:03:22 +0100 Subject: vfs, seqfile: export mangle_path() generally mangle_path() is trivial enough to make export restrictions on it pointless - so change the export from EXPORT_SYMBOL_GPL to EXPORT_SYMBOL. Signed-off-by: Ingo Molnar Acked-by: Al Viro --- fs/seq_file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/seq_file.c b/fs/seq_file.c index f03220d7891..16c211558c2 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -387,7 +387,7 @@ char *mangle_path(char *s, char *p, char *esc) } return NULL; } -EXPORT_SYMBOL_GPL(mangle_path); +EXPORT_SYMBOL(mangle_path); /* * return the absolute path of 'dentry' residing in mount 'mnt'. -- cgit v1.2.3-70-g09d2 From bac8dca9f9b1dfcf9c4ecb4f9ca17185b828cc20 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:31 +1100 Subject: [XFS] fix NULL pointer dereference in xfs_log_force_umount xfs_log_force_umount may be called very early during log recovery where If we fail a buffer read in xlog_recover_do_inode_trans we abort the mount. But at that point log recovery has started delayed writeback of inode buffers. As part of the aborted mount we try to flush out all delwri buffers, but at that point we have already freed the superblock, and set mp->m_sb_bp to NULL, and xfs_log_force_umount which gets called after the inode buffer writeback trips over it. Make xfs_log_force_umount a little more careful when accessing mp->m_sb_bp to avoid this. Signed-off-by: Christoph Hellwig Reviewed-by: Eric Sandeen Signed-off-by: Niv Sardi --- fs/xfs/xfs_log.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 4bf44aef644..8a5b05536a2 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -3569,7 +3569,8 @@ xfs_log_force_umount( if (!log || log->l_flags & XLOG_ACTIVE_RECOVERY) { mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; - XFS_BUF_DONE(mp->m_sb_bp); + if (mp->m_sb_bp) + XFS_BUF_DONE(mp->m_sb_bp); return 0; } @@ -3590,7 +3591,9 @@ xfs_log_force_umount( spin_lock(&log->l_icloglock); spin_lock(&log->l_grant_lock); mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; - XFS_BUF_DONE(mp->m_sb_bp); + if (mp->m_sb_bp) + XFS_BUF_DONE(mp->m_sb_bp); + /* * This flag is sort of redundant because of the mount flag, but * it's good to maintain the separation between the log and the rest -- cgit v1.2.3-70-g09d2 From f999a5bf3fa6b3d11334c3ba1e9dcfed5ff9f8a6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:32 +1100 Subject: [XFS] wire up ->open for directories Currently there's no ->open method set for directories on XFS. That means we don't perform any check for opening too large directories without O_LARGEFILE, we don't check for shut down filesystems, and we don't actually do the readahead for the first block in the directory. Instead of just setting the directories open routine to xfs_file_open we merge the shutdown check directly into xfs_file_open and create a new xfs_dir_open that first calls xfs_file_open and then performs the readahead for block 0. (First sent on September 29th) Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_file.c | 34 +++++++++++++++++++++++++++++++--- fs/xfs/xfs_vnodeops.c | 22 ---------------------- fs/xfs/xfs_vnodeops.h | 1 - 3 files changed, 31 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index 3fee790f138..72fc8d8c8bc 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -38,6 +38,7 @@ #include "xfs_rw.h" #include "xfs_ioctl32.h" #include "xfs_vnodeops.h" +#include "xfs_da_btree.h" #include #include @@ -169,11 +170,37 @@ xfs_file_splice_write_invis( STATIC int xfs_file_open( struct inode *inode, - struct file *filp) + struct file *file) { - if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) + if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) return -EFBIG; - return -xfs_open(XFS_I(inode)); + if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) + return -EIO; + return 0; +} + +STATIC int +xfs_dir_open( + struct inode *inode, + struct file *file) +{ + struct xfs_inode *ip = XFS_I(inode); + int mode; + int error; + + error = xfs_file_open(inode, file); + if (error) + return error; + + /* + * If there are any blocks, read-ahead block 0 as we're almost + * certain to have the next operation be a read there. + */ + mode = xfs_ilock_map_shared(ip); + if (ip->i_d.di_nextents > 0) + xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); + xfs_iunlock(ip, mode); + return 0; } STATIC int @@ -345,6 +372,7 @@ const struct file_operations xfs_invis_file_operations = { const struct file_operations xfs_dir_file_operations = { + .open = xfs_dir_open, .read = generic_read_dir, .readdir = xfs_file_readdir, .llseek = generic_file_llseek, diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 0574aadc4d3..c055bdb11cb 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -53,28 +53,6 @@ #include "xfs_filestream.h" #include "xfs_vnodeops.h" -int -xfs_open( - xfs_inode_t *ip) -{ - int mode; - - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) - return XFS_ERROR(EIO); - - /* - * If it's a directory with any blocks, read-ahead block 0 - * as we're almost certain to have the next operation be a read there. - */ - if (S_ISDIR(ip->i_d.di_mode) && ip->i_d.di_nextents > 0) { - mode = xfs_ilock_map_shared(ip); - if (ip->i_d.di_nextents > 0) - (void)xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); - xfs_iunlock(ip, mode); - } - return 0; -} - int xfs_setattr( struct xfs_inode *ip, diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index b1ae8e3f404..a559400aeae 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h @@ -14,7 +14,6 @@ struct xfs_inode; struct xfs_iomap; -int xfs_open(struct xfs_inode *ip); int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags); #define XFS_ATTR_DMI 0x01 /* invocation from a DMI function */ #define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */ -- cgit v1.2.3-70-g09d2 From 6c31b93a14a453c8756ffd228e24910ffdf30c5d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:32 +1100 Subject: [XFS] allow inode64 mount option on 32 bit systems Now that we've stopped using the Linux inode cache when can trivally support the inode64 mount option on 32bit architectures. As far as the kernel and most userspace is concerned this works perfectly, but applications still using really old stat and readdir interfaces will get an EOVERFLOW error when hitting an inode number not fitting into 32 bits (that problem of course also exists when using these applications on a 64bit kernel). Note that because inode64 is simply a mount option we can currently mount a filesystem having > 32 bit inode numbers and cause a variety of problems, all this is solved but this patch which enables XFS_BIG_INUMS, even when inode64 is not used. (First sent on October 18th) Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_linux.h | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index 77d6ddcaf54..cfe16a36a1d 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h @@ -21,18 +21,12 @@ #include /* - * Some types are conditional depending on the target system. * XFS_BIG_BLKNOS needs block layer disk addresses to be 64 bits. - * XFS_BIG_INUMS needs the VFS inode number to be 64 bits, as well - * as requiring XFS_BIG_BLKNOS to be set. + * XFS_BIG_INUMS requires XFS_BIG_BLKNOS to be set. */ #if defined(CONFIG_LBD) || (BITS_PER_LONG == 64) # define XFS_BIG_BLKNOS 1 -# if BITS_PER_LONG == 64 -# define XFS_BIG_INUMS 1 -# else -# define XFS_BIG_INUMS 0 -# endif +# define XFS_BIG_INUMS 1 #else # define XFS_BIG_BLKNOS 0 # define XFS_BIG_INUMS 0 -- cgit v1.2.3-70-g09d2 From 65795910c1b798f8a47181b48cf6eb163a15e778 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:33 +1100 Subject: [XFS] fix spurious gcc warnings Some recent gcc warnings don't like passing string variables to printf-like functions without using at least a "%s" format string. Change the two occurances of that in xfs to please gcc. Signed-off-by: Christoph Hellwig Reviewed-by: Eric Sandeen Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_stats.c | 2 +- fs/xfs/linux-2.6/xfs_super.c | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c index 64f4ec90b8b..c3526d445f6 100644 --- a/fs/xfs/linux-2.6/xfs_stats.c +++ b/fs/xfs/linux-2.6/xfs_stats.c @@ -61,7 +61,7 @@ xfs_read_xfsstats( /* Loop over all stats groups */ for (i=j=len = 0; i < ARRAY_SIZE(xstats); i++) { - len += sprintf(buffer + len, xstats[i].desc); + len += sprintf(buffer + len, "%s", xstats[i].desc); /* inner loop does each group */ while (j < xstats[i].endpoint) { val = 0; diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index c3d004bc462..cc5e07e3e7a 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1848,10 +1848,9 @@ STATIC int __init init_xfs_fs(void) { int error; - static char message[] __initdata = KERN_INFO \ - XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n"; - printk(message); + printk(KERN_INFO XFS_VERSION_STRING " with " + XFS_BUILD_OPTIONS " enabled\n"); ktrace_init(64); vn_init(); -- cgit v1.2.3-70-g09d2 From 2e6560929d8ab4b650fecc3a87013852b34f0922 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 28 Nov 2008 14:23:33 +1100 Subject: [XFS] fix error inversion problems with data flushing XFS gets the sign of the error wrong in several places when gathering the error from generic linux functions. These functions return negative error values, while the core XFS code returns positive error values. Hence when XFS inverts the error to be returned to the VFS, it can incorrectly invert a negative error and this error will be ignored by the syscall return. Fix all the problems related to calling filemap_* functions. Problem initially identified by Nick Piggin in xfs_fsync(). Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_fs_subr.c | 23 ++++++++++++++++++++--- fs/xfs/linux-2.6/xfs_lrw.c | 2 +- fs/xfs/linux-2.6/xfs_super.c | 13 +++++++++---- fs/xfs/xfs_vnodeops.c | 2 +- fs/xfs/xfs_vnodeops.h | 1 + 5 files changed, 32 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c index 36caa6d957d..5aeb7777696 100644 --- a/fs/xfs/linux-2.6/xfs_fs_subr.c +++ b/fs/xfs/linux-2.6/xfs_fs_subr.c @@ -24,6 +24,10 @@ int fs_noerr(void) { return 0; } int fs_nosys(void) { return ENOSYS; } void fs_noval(void) { return; } +/* + * note: all filemap functions return negative error codes. These + * need to be inverted before returning to the xfs core functions. + */ void xfs_tosspages( xfs_inode_t *ip, @@ -53,7 +57,7 @@ xfs_flushinval_pages( if (!ret) truncate_inode_pages(mapping, first); } - return ret; + return -ret; } int @@ -72,10 +76,23 @@ xfs_flush_pages( xfs_iflags_clear(ip, XFS_ITRUNCATED); ret = filemap_fdatawrite(mapping); if (flags & XFS_B_ASYNC) - return ret; + return -ret; ret2 = filemap_fdatawait(mapping); if (!ret) ret = ret2; } - return ret; + return -ret; +} + +int +xfs_wait_on_pages( + xfs_inode_t *ip, + xfs_off_t first, + xfs_off_t last) +{ + struct address_space *mapping = VFS_I(ip)->i_mapping; + + if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) + return -filemap_fdatawait(mapping); + return 0; } diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 1957e5357d0..4959c874499 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -243,7 +243,7 @@ xfs_read( if (unlikely(ioflags & IO_ISDIRECT)) { if (inode->i_mapping->nrpages) - ret = xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK), + ret = -xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK), -1, FI_REMAPF_LOCKED); mutex_unlock(&inode->i_mutex); if (ret) { diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index cc5e07e3e7a..ae92290e3c1 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -990,21 +990,26 @@ xfs_fs_write_inode( struct inode *inode, int sync) { + struct xfs_inode *ip = XFS_I(inode); int error = 0; int flags = 0; - xfs_itrace_entry(XFS_I(inode)); + xfs_itrace_entry(ip); if (sync) { - filemap_fdatawait(inode->i_mapping); + error = xfs_wait_on_pages(ip, 0, -1); + if (error) + goto out_error; flags |= FLUSH_SYNC; } - error = xfs_inode_flush(XFS_I(inode), flags); + error = xfs_inode_flush(ip, flags); + +out_error: /* * if we failed to write out the inode then mark * it dirty again so we'll try again later. */ if (error) - xfs_mark_inode_dirty_sync(XFS_I(inode)); + xfs_mark_inode_dirty_sync(ip); return -error; } diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index c055bdb11cb..f26b038004a 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -681,7 +681,7 @@ xfs_fsync( return XFS_ERROR(EIO); /* capture size updates in I/O completion before writing the inode. */ - error = filemap_fdatawait(VFS_I(ip)->i_mapping); + error = xfs_wait_on_pages(ip, 0, -1); if (error) return XFS_ERROR(error); diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index a559400aeae..2a45b00ad32 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h @@ -75,5 +75,6 @@ int xfs_flushinval_pages(struct xfs_inode *ip, xfs_off_t first, xfs_off_t last, int fiopt); int xfs_flush_pages(struct xfs_inode *ip, xfs_off_t first, xfs_off_t last, uint64_t flags, int fiopt); +int xfs_wait_on_pages(struct xfs_inode *ip, xfs_off_t first, xfs_off_t last); #endif /* _XFS_VNODEOPS_H */ -- cgit v1.2.3-70-g09d2 From 0924b585fc49bf371bc700c23e516a538bf589af Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 28 Nov 2008 14:23:34 +1100 Subject: [XFS] fix uninitialised variable bug in dquot release. gcc is warning about an uninitialised variable in xfs_growfs_rt(). This is a false positive. Fix it by changing the scope of the transaction pointer to wholly within the internal loop inside the function. While there, preemptively change xfs_growfs_rt_alloc() in the same way as it has exactly the same structure as xfs_growfs_rt() but gcc is not warning about it. Yet. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Niv Sardi --- fs/xfs/xfs_rtalloc.c | 39 ++++++++++++++++++--------------------- 1 file changed, 18 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index e2f68de1615..f18b9b28179 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -85,7 +85,6 @@ xfs_growfs_rt_alloc( { xfs_fileoff_t bno; /* block number in file */ xfs_buf_t *bp; /* temporary buffer for zeroing */ - int cancelflags; /* flags for xfs_trans_cancel */ int committed; /* transaction committed flag */ xfs_daddr_t d; /* disk block address */ int error; /* error return value */ @@ -96,15 +95,16 @@ xfs_growfs_rt_alloc( xfs_bmbt_irec_t map; /* block map output */ int nmap; /* number of block maps */ int resblks; /* space reservation */ - xfs_trans_t *tp; /* transaction pointer */ /* * Allocate space to the file, as necessary. */ while (oblocks < nblocks) { + int cancelflags = 0; + xfs_trans_t *tp; + tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC); resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks); - cancelflags = 0; /* * Reserve space & log for one extent added to the file. */ @@ -171,7 +171,9 @@ xfs_growfs_rt_alloc( mp->m_bsize, 0); if (bp == NULL) { error = XFS_ERROR(EIO); - goto error_cancel; +error_cancel: + xfs_trans_cancel(tp, cancelflags); + goto error; } memset(XFS_BUF_PTR(bp), 0, mp->m_sb.sb_blocksize); xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1); @@ -188,8 +190,6 @@ xfs_growfs_rt_alloc( oblocks = map.br_startoff + map.br_blockcount; } return 0; -error_cancel: - xfs_trans_cancel(tp, cancelflags); error: return error; } @@ -1856,7 +1856,6 @@ xfs_growfs_rt( { xfs_rtblock_t bmbno; /* bitmap block number */ xfs_buf_t *bp; /* temporary buffer */ - int cancelflags; /* flags for xfs_trans_cancel */ int error; /* error return value */ xfs_inode_t *ip; /* bitmap inode, used as lock */ xfs_mount_t *nmp; /* new (fake) mount structure */ @@ -1872,10 +1871,8 @@ xfs_growfs_rt( xfs_extlen_t rsumblocks; /* current number of rt summary blks */ xfs_sb_t *sbp; /* old superblock */ xfs_fsblock_t sumbno; /* summary block number */ - xfs_trans_t *tp; /* transaction pointer */ sbp = &mp->m_sb; - cancelflags = 0; /* * Initial error checking. */ @@ -1942,6 +1939,9 @@ xfs_growfs_rt( ((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0); bmbno < nrbmblocks; bmbno++) { + xfs_trans_t *tp; + int cancelflags = 0; + *nmp = *mp; nsbp = &nmp->m_sb; /* @@ -1967,16 +1967,15 @@ xfs_growfs_rt( * Start a transaction, get the log reservation. */ tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_FREE); - cancelflags = 0; if ((error = xfs_trans_reserve(tp, 0, XFS_GROWRTFREE_LOG_RES(nmp), 0, 0, 0))) - break; + goto error_cancel; /* * Lock out other callers by grabbing the bitmap inode lock. */ if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, XFS_ILOCK_EXCL, &ip))) - break; + goto error_cancel; ASSERT(ip == mp->m_rbmip); /* * Update the bitmap inode's size. @@ -1990,7 +1989,7 @@ xfs_growfs_rt( */ if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino, 0, XFS_ILOCK_EXCL, &ip))) - break; + goto error_cancel; ASSERT(ip == mp->m_rsumip); /* * Update the summary inode's size. @@ -2005,7 +2004,7 @@ xfs_growfs_rt( mp->m_rsumlevels != nmp->m_rsumlevels) { error = xfs_rtcopy_summary(mp, nmp, tp); if (error) - break; + goto error_cancel; } /* * Update superblock fields. @@ -2031,8 +2030,11 @@ xfs_growfs_rt( bp = NULL; error = xfs_rtfree_range(nmp, tp, sbp->sb_rextents, nsbp->sb_rextents - sbp->sb_rextents, &bp, &sumbno); - if (error) + if (error) { +error_cancel: + xfs_trans_cancel(tp, cancelflags); break; + } /* * Mark more blocks free in the superblock. */ @@ -2045,15 +2047,10 @@ xfs_growfs_rt( mp->m_rsumsize = nrsumsize; error = xfs_trans_commit(tp, 0); - if (error) { - tp = NULL; + if (error) break; - } } - if (error && tp) - xfs_trans_cancel(tp, cancelflags); - /* * Free the fake mp structure. */ -- cgit v1.2.3-70-g09d2 From 8a7141a8b931d60d42830432b82078cd6dace83b Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Fri, 28 Nov 2008 14:23:35 +1100 Subject: [XFS] convert xfs_getbmap to take formatter functions Preliminary work to hook up fiemap, this allows us to pass in an arbitrary formatter to copy extent data back to userspace. The formatter takes info for 1 extent, a pointer to the user "thing*" and a pointer to a "filled" variable to indicate whether a userspace buffer did get filled in (for fiemap, hole "extents" are skipped). I'm just using the getbmapx struct as a "common denominator" because as far as I can see, it holds all info that any formatters will care about. ("*thing" because fiemap doesn't pass the user pointer around, but rather has a pointer to a fiemap info structure, and helpers associated with it) Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_ioctl.c | 64 +++++++++++++++++++++++++++----------------- fs/xfs/xfs_bmap.c | 62 +++++++++++++++++------------------------- fs/xfs/xfs_bmap.h | 11 +++++--- fs/xfs/xfs_fs.h | 12 --------- 4 files changed, 71 insertions(+), 78 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index f1bd6c36e6f..d5970599953 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -1249,6 +1249,19 @@ xfs_ioc_setxflags( return -xfs_ioctl_setattr(ip, &fa, mask); } +STATIC int +xfs_getbmap_format(void **ap, struct getbmapx *bmv, int *full) +{ + struct getbmap __user *base = *ap; + + /* copy only getbmap portion (not getbmapx) */ + if (copy_to_user(base, bmv, sizeof(struct getbmap))) + return XFS_ERROR(EFAULT); + + *ap += sizeof(struct getbmap); + return 0; +} + STATIC int xfs_ioc_getbmap( struct xfs_inode *ip, @@ -1256,37 +1269,48 @@ xfs_ioc_getbmap( unsigned int cmd, void __user *arg) { - struct getbmap bm; - int iflags; + struct getbmapx bmx; int error; - if (copy_from_user(&bm, arg, sizeof(bm))) + if (copy_from_user(&bmx, arg, sizeof(struct getbmapx))) return -XFS_ERROR(EFAULT); - if (bm.bmv_count < 2) + if (bmx.bmv_count < 2) return -XFS_ERROR(EINVAL); - iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0); + bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0); if (ioflags & IO_INVIS) - iflags |= BMV_IF_NO_DMAPI_READ; + bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ; - error = xfs_getbmap(ip, &bm, (struct getbmap __user *)arg+1, iflags); + error = xfs_getbmap(ip, &bmx, xfs_getbmap_format, + (struct getbmap *)arg+1); if (error) return -error; - if (copy_to_user(arg, &bm, sizeof(bm))) + /* copy back header - only size of getbmap */ + if (copy_to_user(arg, &bmx, sizeof(struct getbmap))) return -XFS_ERROR(EFAULT); return 0; } +STATIC int +xfs_getbmapx_format(void **ap, struct getbmapx *bmv, int *full) +{ + struct getbmapx __user *base = *ap; + + if (copy_to_user(base, bmv, sizeof(struct getbmapx))) + return XFS_ERROR(EFAULT); + + *ap += sizeof(struct getbmapx); + return 0; +} + STATIC int xfs_ioc_getbmapx( struct xfs_inode *ip, void __user *arg) { struct getbmapx bmx; - struct getbmap bm; - int iflags; int error; if (copy_from_user(&bmx, arg, sizeof(bmx))) @@ -1295,26 +1319,16 @@ xfs_ioc_getbmapx( if (bmx.bmv_count < 2) return -XFS_ERROR(EINVAL); - /* - * Map input getbmapx structure to a getbmap - * structure for xfs_getbmap. - */ - GETBMAP_CONVERT(bmx, bm); - - iflags = bmx.bmv_iflags; - - if (iflags & (~BMV_IF_VALID)) + if (bmx.bmv_iflags & (~BMV_IF_VALID)) return -XFS_ERROR(EINVAL); - iflags |= BMV_IF_EXTENDED; - - error = xfs_getbmap(ip, &bm, (struct getbmapx __user *)arg+1, iflags); + error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format, + (struct getbmapx *)arg+1); if (error) return -error; - GETBMAP_CONVERT(bm, bmx); - - if (copy_to_user(arg, &bmx, sizeof(bmx))) + /* copy back header */ + if (copy_to_user(arg, &bmx, sizeof(struct getbmapx))) return -XFS_ERROR(EFAULT); return 0; diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index c3912213645..8077580a719 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -5811,9 +5811,9 @@ error0: STATIC int xfs_getbmapx_fix_eof_hole( xfs_inode_t *ip, /* xfs incore inode pointer */ - struct getbmap *out, /* output structure */ + struct getbmapx *out, /* output structure */ int prealloced, /* this is a file with - * preallocated data space */ + * preallocated data space */ __int64_t end, /* last block requested */ xfs_fsblock_t startblock) { @@ -5839,14 +5839,18 @@ xfs_getbmapx_fix_eof_hole( } /* - * Fcntl interface to xfs_bmapi. + * Get inode's extents as described in bmv, and format for output. + * Calls formatter to fill the user's buffer until all extents + * are mapped, until the passed-in bmv->bmv_count slots have + * been filled, or until the formatter short-circuits the loop, + * if it is tracking filled-in extents on its own. */ int /* error code */ xfs_getbmap( xfs_inode_t *ip, - struct getbmap *bmv, /* user bmap structure */ - void __user *ap, /* pointer to user's array */ - int interface) /* interface flags */ + struct getbmapx *bmv, /* user bmap structure */ + xfs_bmap_format_t formatter, /* format to user */ + void *arg) /* formatter arg */ { __int64_t bmvend; /* last block requested */ int error; /* return value */ @@ -5859,19 +5863,20 @@ xfs_getbmap( int nexleft; /* # of user extents left */ int subnex; /* # of bmapi's can do */ int nmap; /* number of map entries */ - struct getbmap out; /* output structure */ + struct getbmapx out; /* output structure */ int whichfork; /* data or attr fork */ int prealloced; /* this is a file with * preallocated data space */ int sh_unwritten; /* true, if unwritten */ /* extents listed separately */ + int iflags; /* interface flags */ int bmapi_flags; /* flags for xfs_bmapi */ - __int32_t oflags; /* getbmapx bmv_oflags field */ mp = ip->i_mount; + iflags = bmv->bmv_iflags; - whichfork = interface & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; - sh_unwritten = (interface & BMV_IF_PREALLOC) != 0; + whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; + sh_unwritten = (iflags & BMV_IF_PREALLOC) != 0; /* If the BMV_IF_NO_DMAPI_READ interface bit specified, do not * generate a DMAPI read event. Otherwise, if the DM_EVENT_READ @@ -5886,7 +5891,7 @@ xfs_getbmap( * could misinterpret holes in a DMAPI file as true holes, * when in fact they may represent offline user data. */ - if ((interface & BMV_IF_NO_DMAPI_READ) == 0 && + if ((iflags & BMV_IF_NO_DMAPI_READ) == 0 && DM_EVENT_ENABLED(ip, DM_EVENT_READ) && whichfork == XFS_DATA_FORK) { error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL); @@ -5993,52 +5998,35 @@ xfs_getbmap( ASSERT(nmap <= subnex); for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) { - nexleft--; - oflags = (map[i].br_state == XFS_EXT_UNWRITTEN) ? + out.bmv_oflags = (map[i].br_state == XFS_EXT_UNWRITTEN) ? BMV_OF_PREALLOC : 0; out.bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff); out.bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount); + out.bmv_unused1 = out.bmv_unused2 = 0; ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); if (map[i].br_startblock == HOLESTARTBLOCK && whichfork == XFS_ATTR_FORK) { /* came to the end of attribute fork */ goto unlock_and_return; } else { + int full = 0; /* user array is full */ + if (!xfs_getbmapx_fix_eof_hole(ip, &out, prealloced, bmvend, map[i].br_startblock)) { goto unlock_and_return; } - /* return either getbmap/getbmapx structure. */ - if (interface & BMV_IF_EXTENDED) { - struct getbmapx outx; - - GETBMAP_CONVERT(out,outx); - outx.bmv_oflags = oflags; - outx.bmv_unused1 = outx.bmv_unused2 = 0; - if (copy_to_user(ap, &outx, - sizeof(outx))) { - error = XFS_ERROR(EFAULT); - goto unlock_and_return; - } - } else { - if (copy_to_user(ap, &out, - sizeof(out))) { - error = XFS_ERROR(EFAULT); - goto unlock_and_return; - } - } + /* format results & advance arg */ + error = formatter(&arg, &out, &full); + if (error || full) + goto unlock_and_return; + nexleft--; bmv->bmv_offset = out.bmv_offset + out.bmv_length; bmv->bmv_length = MAX((__int64_t)0, (__int64_t)(bmvend - bmv->bmv_offset)); bmv->bmv_entries++; - ap = (interface & BMV_IF_EXTENDED) ? - (void __user *) - ((struct getbmapx __user *)ap + 1) : - (void __user *) - ((struct getbmap __user *)ap + 1); } } } while (nmap && nexleft && bmv->bmv_length); diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index 7c9d12cd7a4..284571c05ed 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h @@ -356,15 +356,18 @@ xfs_bmap_finish( xfs_bmap_free_t *flist, /* i/o: list extents to free */ int *committed); /* xact committed or not */ +/* bmap to userspace formatter - copy to user & advance pointer */ +typedef int (*xfs_bmap_format_t)(void **, struct getbmapx *, int *); + /* - * Fcntl interface to xfs_bmapi. + * Get inode's extents as described in bmv, and format for output. */ int /* error code */ xfs_getbmap( xfs_inode_t *ip, - struct getbmap *bmv, /* user bmap structure */ - void __user *ap, /* pointer to user's array */ - int iflags); /* interface flags */ + struct getbmapx *bmv, /* user bmap structure */ + xfs_bmap_format_t formatter, /* format to user */ + void *arg); /* formatter arg */ /* * Check if the endoff is outside the last extent. If so the caller will grow diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h index 01c0cc88d3f..df859d62a16 100644 --- a/fs/xfs/xfs_fs.h +++ b/fs/xfs/xfs_fs.h @@ -114,22 +114,10 @@ struct getbmapx { #define BMV_IF_NO_DMAPI_READ 0x2 /* Do not generate DMAPI read event */ #define BMV_IF_PREALLOC 0x4 /* rtn status BMV_OF_PREALLOC if req */ #define BMV_IF_VALID (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC) -#ifdef __KERNEL__ -#define BMV_IF_EXTENDED 0x40000000 /* getpmapx if set */ -#endif /* bmv_oflags values - returned for for each non-header segment */ #define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */ -/* Convert getbmap <-> getbmapx - move fields from p1 to p2. */ -#define GETBMAP_CONVERT(p1,p2) { \ - p2.bmv_offset = p1.bmv_offset; \ - p2.bmv_block = p1.bmv_block; \ - p2.bmv_length = p1.bmv_length; \ - p2.bmv_count = p1.bmv_count; \ - p2.bmv_entries = p1.bmv_entries; } - - /* * Structure for XFS_IOC_FSSETDM. * For use by backup and restore programs to set the XFS on-disk inode -- cgit v1.2.3-70-g09d2 From 5af317c942aebc928ab244eb69581bd8e5333215 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Fri, 28 Nov 2008 14:23:35 +1100 Subject: [XFS] Add new getbmap flags. This adds a new output flag, BMV_OF_LAST to indicate if we've hit the last extent in the inode. This potentially saves an extra call from userspace to see when the whole mapping is done. It also adds BMV_IF_DELALLOC and BMV_OF_DELALLOC to request, and indicate, delayed-allocation extents. In this case bmv_block is set to -2 (-1 was already taken for HOLESTARTBLOCK; unfortunately these are the reverse of the in-kernel constants.) These new flags facilitate addition of the new fiemap interface. Rather than adding sh_delalloc, remove sh_unwritten & just test the flags directly. Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Niv Sardi --- fs/xfs/xfs_bmap.c | 46 ++++++++++++++++++++++++++++++++-------------- fs/xfs/xfs_fs.h | 6 +++++- 2 files changed, 37 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 8077580a719..138308e70d1 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -5819,6 +5819,9 @@ xfs_getbmapx_fix_eof_hole( { __int64_t fixlen; xfs_mount_t *mp; /* file system mount point */ + xfs_ifork_t *ifp; /* inode fork pointer */ + xfs_extnum_t lastx; /* last extent pointer */ + xfs_fileoff_t fileblock; if (startblock == HOLESTARTBLOCK) { mp = ip->i_mount; @@ -5832,7 +5835,15 @@ xfs_getbmapx_fix_eof_hole( out->bmv_length = fixlen; } } else { - out->bmv_block = XFS_FSB_TO_DB(ip, startblock); + if (startblock == DELAYSTARTBLOCK) + out->bmv_block = -2; + else + out->bmv_block = XFS_FSB_TO_DB(ip, startblock); + fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset); + ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) && + (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1)) + out->bmv_oflags |= BMV_OF_LAST; } return 1; @@ -5867,8 +5878,6 @@ xfs_getbmap( int whichfork; /* data or attr fork */ int prealloced; /* this is a file with * preallocated data space */ - int sh_unwritten; /* true, if unwritten */ - /* extents listed separately */ int iflags; /* interface flags */ int bmapi_flags; /* flags for xfs_bmapi */ @@ -5876,7 +5885,6 @@ xfs_getbmap( iflags = bmv->bmv_iflags; whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK; - sh_unwritten = (iflags & BMV_IF_PREALLOC) != 0; /* If the BMV_IF_NO_DMAPI_READ interface bit specified, do not * generate a DMAPI read event. Otherwise, if the DM_EVENT_READ @@ -5947,8 +5955,9 @@ xfs_getbmap( xfs_ilock(ip, XFS_IOLOCK_SHARED); - if (whichfork == XFS_DATA_FORK && - (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size)) { + if (((iflags & BMV_IF_DELALLOC) == 0) && + (whichfork == XFS_DATA_FORK) && + (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size)) { /* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */ error = xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); @@ -5958,7 +5967,8 @@ xfs_getbmap( } } - ASSERT(whichfork == XFS_ATTR_FORK || ip->i_delayed_blks == 0); + ASSERT(whichfork == XFS_ATTR_FORK || (iflags & BMV_IF_DELALLOC) || + ip->i_delayed_blks == 0); lock = xfs_ilock_map_shared(ip); @@ -5970,7 +5980,7 @@ xfs_getbmap( nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1; bmapi_flags = XFS_BMAPI_AFLAG(whichfork) | - ((sh_unwritten) ? 0 : XFS_BMAPI_IGSTATE); + ((iflags & BMV_IF_PREALLOC) ? 0 : XFS_BMAPI_IGSTATE); /* * Allocate enough space to handle "subnex" maps at a time. @@ -5980,9 +5990,12 @@ xfs_getbmap( bmv->bmv_entries = 0; - if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0) { - error = 0; - goto unlock_and_return; + if ((XFS_IFORK_NEXTENTS(ip, whichfork) == 0)) { + if (((iflags & BMV_IF_DELALLOC) == 0) || + whichfork == XFS_ATTR_FORK) { + error = 0; + goto unlock_and_return; + } } nexleft = nex; @@ -5998,15 +6011,20 @@ xfs_getbmap( ASSERT(nmap <= subnex); for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) { - out.bmv_oflags = (map[i].br_state == XFS_EXT_UNWRITTEN) ? - BMV_OF_PREALLOC : 0; + out.bmv_oflags = 0; + if (map[i].br_state == XFS_EXT_UNWRITTEN) + out.bmv_oflags |= BMV_OF_PREALLOC; + else if (map[i].br_startblock == DELAYSTARTBLOCK) + out.bmv_oflags |= BMV_OF_DELALLOC; out.bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff); out.bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount); out.bmv_unused1 = out.bmv_unused2 = 0; - ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); + ASSERT(((iflags & BMV_IF_DELALLOC) != 0) || + (map[i].br_startblock != DELAYSTARTBLOCK)); if (map[i].br_startblock == HOLESTARTBLOCK && whichfork == XFS_ATTR_FORK) { /* came to the end of attribute fork */ + out.bmv_oflags |= BMV_OF_LAST; goto unlock_and_return; } else { int full = 0; /* user array is full */ diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h index df859d62a16..4ae03bae992 100644 --- a/fs/xfs/xfs_fs.h +++ b/fs/xfs/xfs_fs.h @@ -113,10 +113,14 @@ struct getbmapx { #define BMV_IF_ATTRFORK 0x1 /* return attr fork rather than data */ #define BMV_IF_NO_DMAPI_READ 0x2 /* Do not generate DMAPI read event */ #define BMV_IF_PREALLOC 0x4 /* rtn status BMV_OF_PREALLOC if req */ -#define BMV_IF_VALID (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC) +#define BMV_IF_DELALLOC 0x8 /* rtn status BMV_OF_DELALLOC if req */ +#define BMV_IF_VALID \ + (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC|BMV_IF_DELALLOC) /* bmv_oflags values - returned for for each non-header segment */ #define BMV_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */ +#define BMV_OF_DELALLOC 0x2 /* segment = delayed allocation */ +#define BMV_OF_LAST 0x4 /* segment is the last in the file */ /* * Structure for XFS_IOC_FSSETDM. -- cgit v1.2.3-70-g09d2 From f35642e2f89f2b0379e929bd9027342365abc839 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Fri, 28 Nov 2008 14:23:35 +1100 Subject: [XFS] Hook up the fiemap ioctl. This adds the fiemap inode_operation, which for us converts the fiemap values & flags into a getbmapx structure which can be sent to xfs_getbmap. The formatter then copies the bmv array back into the user's fiemap buffer via the fiemap helpers. If we wanted to be more clever, we could also return mapping data for in-inode attributes, but I'm not terribly motivated to do that just yet. Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_iops.c | 84 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 69c98cf3233..2903faf6a26 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -53,6 +53,7 @@ #include #include #include +#include /* * Bring the atime in the XFS inode uptodate. @@ -661,6 +662,88 @@ out_error: return error; } +#define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) + +/* + * Call fiemap helper to fill in user data. + * Returns positive errors to xfs_getbmap. + */ +STATIC int +xfs_fiemap_format( + void **arg, + struct getbmapx *bmv, + int *full) +{ + int error; + struct fiemap_extent_info *fieinfo = *arg; + u32 fiemap_flags = 0; + u64 logical, physical, length; + + /* Do nothing for a hole */ + if (bmv->bmv_block == -1LL) + return 0; + + logical = BBTOB(bmv->bmv_offset); + physical = BBTOB(bmv->bmv_block); + length = BBTOB(bmv->bmv_length); + + if (bmv->bmv_oflags & BMV_OF_PREALLOC) + fiemap_flags |= FIEMAP_EXTENT_UNWRITTEN; + else if (bmv->bmv_oflags & BMV_OF_DELALLOC) { + fiemap_flags |= FIEMAP_EXTENT_DELALLOC; + physical = 0; /* no block yet */ + } + if (bmv->bmv_oflags & BMV_OF_LAST) + fiemap_flags |= FIEMAP_EXTENT_LAST; + + error = fiemap_fill_next_extent(fieinfo, logical, physical, + length, fiemap_flags); + if (error > 0) { + error = 0; + *full = 1; /* user array now full */ + } + + return -error; +} + +STATIC int +xfs_vn_fiemap( + struct inode *inode, + struct fiemap_extent_info *fieinfo, + u64 start, + u64 length) +{ + xfs_inode_t *ip = XFS_I(inode); + struct getbmapx bm; + int error; + + error = fiemap_check_flags(fieinfo, XFS_FIEMAP_FLAGS); + if (error) + return error; + + /* Set up bmap header for xfs internal routine */ + bm.bmv_offset = BTOBB(start); + /* Special case for whole file */ + if (length == FIEMAP_MAX_OFFSET) + bm.bmv_length = -1LL; + else + bm.bmv_length = BTOBB(length); + + /* our formatter will tell xfs_getbmap when to stop. */ + bm.bmv_count = MAXEXTNUM; + bm.bmv_iflags = BMV_IF_PREALLOC; + if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) + bm.bmv_iflags |= BMV_IF_ATTRFORK; + if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC)) + bm.bmv_iflags |= BMV_IF_DELALLOC; + + error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo); + if (error) + return -error; + + return 0; +} + static const struct inode_operations xfs_inode_operations = { .permission = xfs_vn_permission, .truncate = xfs_vn_truncate, @@ -671,6 +754,7 @@ static const struct inode_operations xfs_inode_operations = { .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, .fallocate = xfs_vn_fallocate, + .fiemap = xfs_vn_fiemap, }; static const struct inode_operations xfs_dir_inode_operations = { -- cgit v1.2.3-70-g09d2 From 00dd4029e9afa642c2b26dc3aac834322ac29b4a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:36 +1100 Subject: [XFS] remove bhv_statvfs_t typedef Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_vfs.h | 2 -- fs/xfs/quota/xfs_qm_bhv.c | 4 ++-- fs/xfs/xfs_mount.h | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h index 0ab60bc2e76..8de5c4c95cb 100644 --- a/fs/xfs/linux-2.6/xfs_vfs.h +++ b/fs/xfs/linux-2.6/xfs_vfs.h @@ -31,8 +31,6 @@ struct xfs_inode; struct xfs_mount; struct xfs_mount_args; -typedef struct kstatfs bhv_statvfs_t; - #define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */ #define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */ #define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */ diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index 9556df9f7da..bc6c5cca3e1 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c @@ -50,7 +50,7 @@ STATIC void xfs_fill_statvfs_from_dquot( - bhv_statvfs_t *statp, + struct kstatfs *statp, xfs_disk_dquot_t *dp) { __uint64_t limit; @@ -87,7 +87,7 @@ xfs_fill_statvfs_from_dquot( STATIC void xfs_qm_statvfs( xfs_inode_t *ip, - bhv_statvfs_t *statp) + struct kstatfs *statp) { xfs_mount_t *mp = ip->i_mount; xfs_dquot_t *dqp; diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index e3f618c84e4..d7031430252 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -134,7 +134,7 @@ typedef struct xfs_dquot * (*xfs_dqvopchown_t)( struct xfs_dquot **, struct xfs_dquot *); typedef int (*xfs_dqvopchownresv_t)(struct xfs_trans *, struct xfs_inode *, struct xfs_dquot *, struct xfs_dquot *, uint); -typedef void (*xfs_dqstatvfs_t)(struct xfs_inode *, bhv_statvfs_t *); +typedef void (*xfs_dqstatvfs_t)(struct xfs_inode *, struct kstatfs *); typedef int (*xfs_dqsync_t)(struct xfs_mount *, int flags); typedef int (*xfs_quotactl_t)(struct xfs_mount *, int, int, xfs_caddr_t); -- cgit v1.2.3-70-g09d2 From 2b5decd09e9f98c4e361f97f3e32d80164774f75 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:36 +1100 Subject: [XFS] remove xfs_vfs.h The only thing left are the forced shutdown flags and freeze macros which fit into xfs_mount.h much better. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_ioctl32.c | 1 - fs/xfs/linux-2.6/xfs_linux.h | 1 - fs/xfs/linux-2.6/xfs_vfs.h | 44 ------------------------------------------ fs/xfs/linux-2.6/xfs_vnode.h | 3 +++ fs/xfs/xfs_mount.h | 10 ++++++++++ 5 files changed, 13 insertions(+), 46 deletions(-) delete mode 100644 fs/xfs/linux-2.6/xfs_vfs.h (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index a4b254eb43b..6ce9c9c7cdd 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -36,7 +36,6 @@ #include "xfs_bmap_btree.h" #include "xfs_attr_sf.h" #include "xfs_dir2_sf.h" -#include "xfs_vfs.h" #include "xfs_vnode.h" #include "xfs_dinode.h" #include "xfs_inode.h" diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index cfe16a36a1d..507492d6dcc 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h @@ -80,7 +80,6 @@ #include #include -#include #include #include #include diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h deleted file mode 100644 index 8de5c4c95cb..00000000000 --- a/fs/xfs/linux-2.6/xfs_vfs.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2000-2006 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_VFS_H__ -#define __XFS_VFS_H__ - -#include -#include "xfs_fs.h" - -struct inode; - -struct fid; -struct cred; -struct seq_file; -struct super_block; -struct xfs_inode; -struct xfs_mount; -struct xfs_mount_args; - -#define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */ -#define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */ -#define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */ -#define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */ -#define SHUTDOWN_REMOTE_REQ 0x0010 /* shutdown came from remote cell */ -#define SHUTDOWN_DEVICE_REQ 0x0020 /* failed all paths to the device */ - -#define xfs_test_for_freeze(mp) ((mp)->m_super->s_frozen) -#define xfs_wait_for_freeze(mp,l) vfs_check_frozen((mp)->m_super, (l)) - -#endif /* __XFS_VFS_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index bf89e41c3b8..fb49e0f42d3 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h @@ -18,7 +18,10 @@ #ifndef __XFS_VNODE_H__ #define __XFS_VNODE_H__ +#include "xfs_fs.h" + struct file; +struct xfs_inode; struct xfs_iomap; struct attrlist_cursor_kern; diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index d7031430252..4fce22a8c35 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -428,6 +428,16 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, #define xfs_force_shutdown(m,f) \ xfs_do_force_shutdown(m, f, __FILE__, __LINE__) +#define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */ +#define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */ +#define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */ +#define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */ +#define SHUTDOWN_REMOTE_REQ 0x0010 /* shutdown came from remote cell */ +#define SHUTDOWN_DEVICE_REQ 0x0020 /* failed all paths to the device */ + +#define xfs_test_for_freeze(mp) ((mp)->m_super->s_frozen) +#define xfs_wait_for_freeze(mp,l) vfs_check_frozen((mp)->m_super, (l)) + /* * Flags for xfs_mountfs */ -- cgit v1.2.3-70-g09d2 From 207fcfad58482c7c9f92939a1f6df9f7e8873a34 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:37 +1100 Subject: [XFS] remove xfs_vfsops.h The only thing left is xfs_do_force_shutdown which already has a defintion in xfs_mount.h. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_export.c | 1 - fs/xfs/linux-2.6/xfs_super.c | 1 - fs/xfs/xfs_vfsops.c | 1 - fs/xfs/xfs_vfsops.h | 14 -------------- 4 files changed, 17 deletions(-) delete mode 100644 fs/xfs/xfs_vfsops.h (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c index 7f7abec25e1..595751f7835 100644 --- a/fs/xfs/linux-2.6/xfs_export.c +++ b/fs/xfs/linux-2.6/xfs_export.c @@ -29,7 +29,6 @@ #include "xfs_vnodeops.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" -#include "xfs_vfsops.h" /* * Note that we only accept fileids which are long enough rather than allow diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index ae92290e3c1..5389f077874 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -48,7 +48,6 @@ #include "xfs_buf_item.h" #include "xfs_utils.h" #include "xfs_vnodeops.h" -#include "xfs_vfsops.h" #include "xfs_version.h" #include "xfs_log_priv.h" #include "xfs_trans_priv.h" diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index 305d9f3948e..cad3da36fb2 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -53,7 +53,6 @@ #include "xfs_filestream.h" #include "xfs_fsops.h" #include "xfs_vnodeops.h" -#include "xfs_vfsops.h" #include "xfs_utils.h" #include "xfs_sync.h" diff --git a/fs/xfs/xfs_vfsops.h b/fs/xfs/xfs_vfsops.h deleted file mode 100644 index 6b8e0b52b95..00000000000 --- a/fs/xfs/xfs_vfsops.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef _XFS_VFSOPS_H -#define _XFS_VFSOPS_H 1 - -struct cred; -struct xfs_fid; -struct inode; -struct kstatfs; -struct xfs_mount; -struct xfs_mount_args; - -void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, - int lnnum); - -#endif /* _XFS_VFSOPS_H */ -- cgit v1.2.3-70-g09d2 From 26c5295135d10fc90cbf160adfda392d91f58279 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 28 Nov 2008 14:23:37 +1100 Subject: [XFS] remove i_gen from incore inode i_gen is incremented in directory operations when the directory is changed. It is never read or otherwise used so it should be removed to help reduce the size of the struct xfs_inode. The patch also removes a duplicate logging of the directory inode core. We only need to do this once per transaction so kill the one associated with the i_gen increment. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Niv Sardi --- fs/xfs/xfs_inode.h | 1 - fs/xfs/xfs_rename.c | 12 ++---------- fs/xfs/xfs_vnodeops.c | 29 ++--------------------------- 3 files changed, 4 insertions(+), 38 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 7f007ef4bbb..ea691c738f2 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -261,7 +261,6 @@ typedef struct xfs_inode { unsigned short i_flags; /* see defined flags below */ unsigned char i_update_core; /* timestamps/size is dirty */ unsigned char i_update_size; /* di_size field is dirty */ - unsigned int i_gen; /* generation count */ unsigned int i_delayed_blks; /* count of delay alloc blks */ xfs_icdinode_t i_d; /* most of ondisk inode */ diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c index d700dacdb10..02f0e8f53a9 100644 --- a/fs/xfs/xfs_rename.c +++ b/fs/xfs/xfs_rename.c @@ -367,19 +367,11 @@ xfs_rename( &first_block, &free_list, spaceres); if (error) goto abort_return; - xfs_ichgtime(src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); - /* - * Update the generation counts on all the directory inodes - * that we're modifying. - */ - src_dp->i_gen++; + xfs_ichgtime(src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); - - if (new_parent) { - target_dp->i_gen++; + if (new_parent) xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); - } /* * If this is a synchronous mount, make sure that the diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index f26b038004a..b29a0eb9c0f 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -1599,8 +1599,6 @@ xfs_create( xfs_trans_set_sync(tp); } - dp->i_gen++; - /* * Attach the dquot(s) to the inodes and modify them incore. * These ids of the inode couldn't have changed since the new @@ -1967,13 +1965,6 @@ xfs_remove( } xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); - /* - * Bump the in memory generation count on the parent - * directory so that other can know that it has changed. - */ - dp->i_gen++; - xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); - if (is_dir) { /* * Drop the link from ip's "..". @@ -1991,8 +1982,8 @@ xfs_remove( } else { /* * When removing a non-directory we need to log the parent - * inode here for the i_gen update. For a directory this is - * done implicitly by the xfs_droplink call for the ".." entry. + * inode here. For a directory this is done implicitly + * by the xfs_droplink call for the ".." entry. */ xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); } @@ -2152,7 +2143,6 @@ xfs_link( if (error) goto abort_return; xfs_ichgtime(tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); - tdp->i_gen++; xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); error = xfs_bumplink(tp, sip); @@ -2329,18 +2319,10 @@ xfs_mkdir( } xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); - /* - * Bump the in memory version number of the parent directory - * so that other processes accessing it will recognize that - * the directory has changed. - */ - dp->i_gen++; - error = xfs_dir_init(tp, cdp, dp); if (error) goto error2; - cdp->i_gen = 1; error = xfs_bumplink(tp, dp); if (error) goto error2; @@ -2626,13 +2608,6 @@ xfs_symlink( xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); - /* - * Bump the in memory version number of the parent directory - * so that other processes accessing it will recognize that - * the directory has changed. - */ - dp->i_gen++; - /* * If this is a synchronous mount, make sure that the * symlink transaction goes to disk before returning to -- cgit v1.2.3-70-g09d2 From 5e1be0fb1a3950597aeda448698e85b0595a2e92 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:37 +1100 Subject: [XFS] factor out xfs_read_agi helper Add a helper to read the AGI header and perform basic verification. Based on hunks from a larger patch from Dave Chinner. (First sent on Juli 23rd) Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_ag.h | 3 ++ fs/xfs/xfs_ialloc.c | 101 +++++++++++++++++++++++++++++------------------ fs/xfs/xfs_inode.c | 57 +++----------------------- fs/xfs/xfs_log_recover.c | 72 ++++++++++++--------------------- 4 files changed, 98 insertions(+), 135 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index 2bfd8632914..f4a315390c6 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h @@ -142,6 +142,9 @@ typedef struct xfs_agi { #define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp)) #define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)XFS_BUF_PTR(bp)) +extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp, + xfs_agnumber_t agno, struct xfs_buf **bpp); + /* * The third a.g. block contains the a.g. freelist, an array * of block pointers to blocks owned by the allocation btree code. diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index c8a56c52964..efb65fea203 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -1462,70 +1462,95 @@ xfs_ialloc_log_agi( xfs_trans_log_buf(tp, bp, first, last); } +#ifdef DEBUG +STATIC void +xfs_check_agi_unlinked( + struct xfs_agi *agi) +{ + int i; + + for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) + ASSERT(agi->agi_unlinked[i]); +} +#else +#define xfs_check_agi_unlinked(agi) +#endif + /* * Read in the allocation group header (inode allocation section) */ int -xfs_ialloc_read_agi( - xfs_mount_t *mp, /* file system mount structure */ - xfs_trans_t *tp, /* transaction pointer */ - xfs_agnumber_t agno, /* allocation group number */ - xfs_buf_t **bpp) /* allocation group hdr buf */ +xfs_read_agi( + struct xfs_mount *mp, /* file system mount structure */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + struct xfs_buf **bpp) /* allocation group hdr buf */ { - xfs_agi_t *agi; /* allocation group header */ - int agi_ok; /* agi is consistent */ - xfs_buf_t *bp; /* allocation group hdr buf */ - xfs_perag_t *pag; /* per allocation group data */ - int error; + struct xfs_agi *agi; /* allocation group header */ + int agi_ok; /* agi is consistent */ + int error; ASSERT(agno != NULLAGNUMBER); - error = xfs_trans_read_buf( - mp, tp, mp->m_ddev_targp, + + error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), - XFS_FSS_TO_BB(mp, 1), 0, &bp); + XFS_FSS_TO_BB(mp, 1), 0, bpp); if (error) return error; - ASSERT(bp && !XFS_BUF_GETERROR(bp)); + + ASSERT(*bpp && !XFS_BUF_GETERROR(*bpp)); + agi = XFS_BUF_TO_AGI(*bpp); /* * Validate the magic number of the agi block. */ - agi = XFS_BUF_TO_AGI(bp); - agi_ok = - be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && - XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)); + agi_ok = be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && + XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)) && + be32_to_cpu(agi->agi_seqno) == agno; if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI, XFS_RANDOM_IALLOC_READ_AGI))) { - XFS_CORRUPTION_ERROR("xfs_ialloc_read_agi", XFS_ERRLEVEL_LOW, + XFS_CORRUPTION_ERROR("xfs_read_agi", XFS_ERRLEVEL_LOW, mp, agi); - xfs_trans_brelse(tp, bp); + xfs_trans_brelse(tp, *bpp); return XFS_ERROR(EFSCORRUPTED); } + + XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_AGI, XFS_AGI_REF); + + xfs_check_agi_unlinked(agi); + return 0; +} + +int +xfs_ialloc_read_agi( + struct xfs_mount *mp, /* file system mount structure */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + struct xfs_buf **bpp) /* allocation group hdr buf */ +{ + struct xfs_agi *agi; /* allocation group header */ + struct xfs_perag *pag; /* per allocation group data */ + int error; + + error = xfs_read_agi(mp, tp, agno, bpp); + if (error) + return error; + + agi = XFS_BUF_TO_AGI(*bpp); pag = &mp->m_perag[agno]; + if (!pag->pagi_init) { pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); pag->pagi_count = be32_to_cpu(agi->agi_count); pag->pagi_init = 1; - } else { - /* - * It's possible for these to be out of sync if - * we are in the middle of a forced shutdown. - */ - ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || - XFS_FORCED_SHUTDOWN(mp)); - } - -#ifdef DEBUG - { - int i; - - for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) - ASSERT(agi->agi_unlinked[i]); } -#endif - XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGI, XFS_AGI_REF); - *bpp = bp; + /* + * It's possible for these to be out of sync if + * we are in the middle of a forced shutdown. + */ + ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || + XFS_FORCED_SHUTDOWN(mp)); return 0; } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index b9771004706..46b0526acd4 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1843,13 +1843,10 @@ xfs_iunlink( xfs_dinode_t *dip; xfs_buf_t *agibp; xfs_buf_t *ibp; - xfs_agnumber_t agno; - xfs_daddr_t agdaddr; xfs_agino_t agino; short bucket_index; int offset; int error; - int agi_ok; ASSERT(ip->i_d.di_nlink == 0); ASSERT(ip->i_d.di_mode != 0); @@ -1857,31 +1854,15 @@ xfs_iunlink( mp = tp->t_mountp; - agno = XFS_INO_TO_AGNO(mp, ip->i_ino); - agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); - /* * Get the agi buffer first. It ensures lock ordering * on the list. */ - error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr, - XFS_FSS_TO_BB(mp, 1), 0, &agibp); + error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp); if (error) return error; - - /* - * Validate the magic number of the agi block. - */ agi = XFS_BUF_TO_AGI(agibp); - agi_ok = - be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && - XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)); - if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK, - XFS_RANDOM_IUNLINK))) { - XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi); - xfs_trans_brelse(tp, agibp); - return XFS_ERROR(EFSCORRUPTED); - } + /* * Get the index into the agi hash table for the * list this inode will go on. @@ -1941,7 +1922,6 @@ xfs_iunlink_remove( xfs_buf_t *agibp; xfs_buf_t *ibp; xfs_agnumber_t agno; - xfs_daddr_t agdaddr; xfs_agino_t agino; xfs_agino_t next_agino; xfs_buf_t *last_ibp; @@ -1949,45 +1929,20 @@ xfs_iunlink_remove( short bucket_index; int offset, last_offset = 0; int error; - int agi_ok; - /* - * First pull the on-disk inode from the AGI unlinked list. - */ mp = tp->t_mountp; - agno = XFS_INO_TO_AGNO(mp, ip->i_ino); - agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); /* * Get the agi buffer first. It ensures lock ordering * on the list. */ - error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr, - XFS_FSS_TO_BB(mp, 1), 0, &agibp); - if (error) { - cmn_err(CE_WARN, - "xfs_iunlink_remove: xfs_trans_read_buf() returned an error %d on %s. Returning error.", - error, mp->m_fsname); + error = xfs_read_agi(mp, tp, agno, &agibp); + if (error) return error; - } - /* - * Validate the magic number of the agi block. - */ + agi = XFS_BUF_TO_AGI(agibp); - agi_ok = - be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && - XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)); - if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE, - XFS_RANDOM_IUNLINK_REMOVE))) { - XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW, - mp, agi); - xfs_trans_brelse(tp, agibp); - cmn_err(CE_WARN, - "xfs_iunlink_remove: XFS_TEST_ERROR() returned an error on %s. Returning EFSCORRUPTED.", - mp->m_fsname); - return XFS_ERROR(EFSCORRUPTED); - } + /* * Get the index into the agi hash table for the * list this inode will go on. diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index b411d494731..b552676ca5c 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3117,19 +3117,16 @@ xlog_recover_clear_agi_bucket( int error; tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET); - error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), 0, 0, 0); - if (!error) - error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, - XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), - XFS_FSS_TO_BB(mp, 1), 0, &agibp); + error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp), + 0, 0, 0); if (error) goto out_abort; - error = EINVAL; - agi = XFS_BUF_TO_AGI(agibp); - if (be32_to_cpu(agi->agi_magicnum) != XFS_AGI_MAGIC) + error = xfs_read_agi(mp, tp, agno, &agibp); + if (error) goto out_abort; + agi = XFS_BUF_TO_AGI(agibp); agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); offset = offsetof(xfs_agi_t, agi_unlinked) + (sizeof(xfs_agino_t) * bucket); @@ -3190,16 +3187,17 @@ xlog_recover_process_iunlinks( /* * Find the agi for this ag. */ - agibp = xfs_buf_read(mp->m_ddev_targp, - XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), - XFS_FSS_TO_BB(mp, 1), 0); - if (XFS_BUF_ISERROR(agibp)) { - xfs_ioerror_alert("xlog_recover_process_iunlinks(#1)", - log->l_mp, agibp, - XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp))); + error = xfs_read_agi(mp, NULL, agno, &agibp); + if (error) { + /* + * AGI is b0rked. Don't process it. + * + * We should probably mark the filesystem as corrupt + * after we've recovered all the ag's we can.... + */ + continue; } agi = XFS_BUF_TO_AGI(agibp); - ASSERT(XFS_AGI_MAGIC == be32_to_cpu(agi->agi_magicnum)); for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { @@ -3278,22 +3276,12 @@ xlog_recover_process_iunlinks( /* * Reacquire the agibuffer and continue around - * the loop. + * the loop. This should never fail as we know + * the buffer was good earlier on. */ - agibp = xfs_buf_read(mp->m_ddev_targp, - XFS_AG_DADDR(mp, agno, - XFS_AGI_DADDR(mp)), - XFS_FSS_TO_BB(mp, 1), 0); - if (XFS_BUF_ISERROR(agibp)) { - xfs_ioerror_alert( - "xlog_recover_process_iunlinks(#2)", - log->l_mp, agibp, - XFS_AG_DADDR(mp, agno, - XFS_AGI_DADDR(mp))); - } + error = xfs_read_agi(mp, NULL, agno, &agibp); + ASSERT(error == 0); agi = XFS_BUF_TO_AGI(agibp); - ASSERT(XFS_AGI_MAGIC == be32_to_cpu( - agi->agi_magicnum)); } } @@ -3980,11 +3968,9 @@ xlog_recover_check_summary( { xfs_mount_t *mp; xfs_agf_t *agfp; - xfs_agi_t *agip; xfs_buf_t *agfbp; xfs_buf_t *agibp; xfs_daddr_t agfdaddr; - xfs_daddr_t agidaddr; xfs_buf_t *sbbp; #ifdef XFS_LOUD_RECOVERY xfs_sb_t *sbp; @@ -3993,6 +3979,7 @@ xlog_recover_check_summary( __uint64_t freeblks; __uint64_t itotal; __uint64_t ifree; + int error; mp = log->l_mp; @@ -4016,21 +4003,14 @@ xlog_recover_check_summary( be32_to_cpu(agfp->agf_flcount); xfs_buf_relse(agfbp); - agidaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)); - agibp = xfs_buf_read(mp->m_ddev_targp, agidaddr, - XFS_FSS_TO_BB(mp, 1), 0); - if (XFS_BUF_ISERROR(agibp)) { - xfs_ioerror_alert("xlog_recover_check_summary(agi)", - mp, agibp, agidaddr); - } - agip = XFS_BUF_TO_AGI(agibp); - ASSERT(XFS_AGI_MAGIC == be32_to_cpu(agip->agi_magicnum)); - ASSERT(XFS_AGI_GOOD_VERSION(be32_to_cpu(agip->agi_versionnum))); - ASSERT(be32_to_cpu(agip->agi_seqno) == agno); + error = xfs_read_agi(mp, NULL, agno, &agibp); + if (!error) { + struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp); - itotal += be32_to_cpu(agip->agi_count); - ifree += be32_to_cpu(agip->agi_freecount); - xfs_buf_relse(agibp); + itotal += be32_to_cpu(agi->agi_count); + ifree += be32_to_cpu(agi->agi_freecount); + xfs_buf_relse(agibp); + } } sbbp = xfs_getsb(mp, 0); -- cgit v1.2.3-70-g09d2 From 4805621a37d9b2b16641b5c68597651419e9e252 Mon Sep 17 00:00:00 2001 From: "From: Christoph Hellwig" Date: Fri, 28 Nov 2008 14:23:38 +1100 Subject: [XFS] factor out xfs_read_agf helper Add a helper to read the AGF header and perform basic verification. Based on hunks from a larger patch from Dave Chinner. (First sent on Juli 23rd) Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_ag.h | 2 ++ fs/xfs/xfs_alloc.c | 69 +++++++++++++++++++++++++++++++++--------------- fs/xfs/xfs_log_recover.c | 26 ++++++++---------- 3 files changed, 61 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index f4a315390c6..f2e21817a22 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h @@ -91,6 +91,8 @@ typedef struct xfs_agf { #define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp)) #define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)XFS_BUF_PTR(bp)) +extern int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp, + xfs_agnumber_t agno, int flags, struct xfs_buf **bpp); /* * Size of the unlinked inode hash table in the agi. diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index c47ce907572..028e44e58ea 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -2233,44 +2233,41 @@ xfs_alloc_put_freelist( * Read in the allocation group header (free/alloc section). */ int /* error */ -xfs_alloc_read_agf( - xfs_mount_t *mp, /* mount point structure */ - xfs_trans_t *tp, /* transaction pointer */ - xfs_agnumber_t agno, /* allocation group number */ - int flags, /* XFS_ALLOC_FLAG_... */ - xfs_buf_t **bpp) /* buffer for the ag freelist header */ +xfs_read_agf( + struct xfs_mount *mp, /* mount point structure */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + int flags, /* XFS_BUF_ */ + struct xfs_buf **bpp) /* buffer for the ag freelist header */ { - xfs_agf_t *agf; /* ag freelist header */ + struct xfs_agf *agf; /* ag freelist header */ int agf_ok; /* set if agf is consistent */ - xfs_buf_t *bp; /* return value */ - xfs_perag_t *pag; /* per allocation group data */ int error; ASSERT(agno != NULLAGNUMBER); error = xfs_trans_read_buf( mp, tp, mp->m_ddev_targp, XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), - XFS_FSS_TO_BB(mp, 1), - (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XFS_BUF_TRYLOCK : 0U, - &bp); + XFS_FSS_TO_BB(mp, 1), flags, bpp); if (error) return error; - ASSERT(!bp || !XFS_BUF_GETERROR(bp)); - if (!bp) { - *bpp = NULL; + if (!*bpp) return 0; - } + + ASSERT(!XFS_BUF_GETERROR(*bpp)); + agf = XFS_BUF_TO_AGF(*bpp); + /* * Validate the magic number of the agf block. */ - agf = XFS_BUF_TO_AGF(bp); agf_ok = be32_to_cpu(agf->agf_magicnum) == XFS_AGF_MAGIC && XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) && be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) && be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) && be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) && - be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp); + be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp) && + be32_to_cpu(agf->agf_seqno) == agno; if (xfs_sb_version_haslazysbcount(&mp->m_sb)) agf_ok = agf_ok && be32_to_cpu(agf->agf_btreeblks) <= be32_to_cpu(agf->agf_length); @@ -2278,9 +2275,41 @@ xfs_alloc_read_agf( XFS_RANDOM_ALLOC_READ_AGF))) { XFS_CORRUPTION_ERROR("xfs_alloc_read_agf", XFS_ERRLEVEL_LOW, mp, agf); - xfs_trans_brelse(tp, bp); + xfs_trans_brelse(tp, *bpp); return XFS_ERROR(EFSCORRUPTED); } + + XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_AGF, XFS_AGF_REF); + return 0; +} + +/* + * Read in the allocation group header (free/alloc section). + */ +int /* error */ +xfs_alloc_read_agf( + struct xfs_mount *mp, /* mount point structure */ + struct xfs_trans *tp, /* transaction pointer */ + xfs_agnumber_t agno, /* allocation group number */ + int flags, /* XFS_ALLOC_FLAG_... */ + struct xfs_buf **bpp) /* buffer for the ag freelist header */ +{ + struct xfs_agf *agf; /* ag freelist header */ + struct xfs_perag *pag; /* per allocation group data */ + int error; + + ASSERT(agno != NULLAGNUMBER); + + error = xfs_read_agf(mp, tp, agno, + (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XFS_BUF_TRYLOCK : 0, + bpp); + if (error) + return error; + if (!*bpp) + return 0; + ASSERT(!XFS_BUF_GETERROR(*bpp)); + + agf = XFS_BUF_TO_AGF(*bpp); pag = &mp->m_perag[agno]; if (!pag->pagf_init) { pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks); @@ -2308,8 +2337,6 @@ xfs_alloc_read_agf( be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi])); } #endif - XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGF, XFS_AGF_REF); - *bpp = bp; return 0; } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index b552676ca5c..d94987915d5 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3970,7 +3970,6 @@ xlog_recover_check_summary( xfs_agf_t *agfp; xfs_buf_t *agfbp; xfs_buf_t *agibp; - xfs_daddr_t agfdaddr; xfs_buf_t *sbbp; #ifdef XFS_LOUD_RECOVERY xfs_sb_t *sbp; @@ -3987,21 +3986,18 @@ xlog_recover_check_summary( itotal = 0LL; ifree = 0LL; for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { - agfdaddr = XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)); - agfbp = xfs_buf_read(mp->m_ddev_targp, agfdaddr, - XFS_FSS_TO_BB(mp, 1), 0); - if (XFS_BUF_ISERROR(agfbp)) { - xfs_ioerror_alert("xlog_recover_check_summary(agf)", - mp, agfbp, agfdaddr); + error = xfs_read_agf(mp, NULL, agno, 0, &agfbp); + if (error) { + xfs_fs_cmn_err(CE_ALERT, mp, + "xlog_recover_check_summary(agf)" + "agf read failed agno %d error %d", + agno, error); + } else { + agfp = XFS_BUF_TO_AGF(agfbp); + freeblks += be32_to_cpu(agfp->agf_freeblks) + + be32_to_cpu(agfp->agf_flcount); + xfs_buf_relse(agfbp); } - agfp = XFS_BUF_TO_AGF(agfbp); - ASSERT(XFS_AGF_MAGIC == be32_to_cpu(agfp->agf_magicnum)); - ASSERT(XFS_AGF_GOOD_VERSION(be32_to_cpu(agfp->agf_versionnum))); - ASSERT(be32_to_cpu(agfp->agf_seqno) == agno); - - freeblks += be32_to_cpu(agfp->agf_freeblks) + - be32_to_cpu(agfp->agf_flcount); - xfs_buf_relse(agfbp); error = xfs_read_agi(mp, NULL, agno, &agibp); if (!error) { -- cgit v1.2.3-70-g09d2 From b28708d6a0a3ed65a68f0dcd8e6d1c09f14e5cf3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:38 +1100 Subject: [XFS] sanitize xlog_in_core_t definition Move all fields from xlog_iclog_fields_t into xlog_in_core_t instead of having them in a substructure and the using #defines to make it look like they were directly in xlog_in_core_t. Also document that xlog_in_core_2_t is grossly misnamed, and make all references to it typesafe. (First sent on Semptember 15th) Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_log.c | 12 +++--------- fs/xfs/xfs_log_priv.h | 46 +++++++++++++--------------------------------- fs/xfs/xfs_log_recover.c | 7 +++---- 3 files changed, 19 insertions(+), 46 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 8a5b05536a2..aadaa1472f6 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1029,12 +1029,6 @@ xlog_iodone(xfs_buf_t *bp) ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long) 2); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); aborted = 0; - - /* - * Some versions of cpp barf on the recursive definition of - * ic_log -> hic_fields.ic_log and expand ic_log twice when - * it is passed through two macros. Workaround broken cpp. - */ l = iclog->ic_log; /* @@ -1301,7 +1295,7 @@ xlog_alloc_log(xfs_mount_t *mp, XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); iclog->ic_bp = bp; - iclog->hic_data = bp->b_addr; + iclog->ic_data = bp->b_addr; #ifdef DEBUG log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header); #endif @@ -1321,7 +1315,7 @@ xlog_alloc_log(xfs_mount_t *mp, atomic_set(&iclog->ic_refcnt, 0); spin_lock_init(&iclog->ic_callback_lock); iclog->ic_callback_tail = &(iclog->ic_callback); - iclog->ic_datap = (char *)iclog->hic_data + log->l_iclog_hsize; + iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0); @@ -3463,7 +3457,7 @@ xlog_verify_iclog(xlog_t *log, ptr = iclog->ic_datap; base_ptr = ptr; ophead = (xlog_op_header_t *)ptr; - xhdr = (xlog_in_core_2_t *)&iclog->ic_header; + xhdr = iclog->ic_data; for (i = 0; i < len; i++) { ophead = (xlog_op_header_t *)ptr; diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index b39a1980e82..654167be0ef 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -310,6 +310,16 @@ typedef struct xlog_rec_ext_header { } xlog_rec_ext_header_t; #ifdef __KERNEL__ + +/* + * Quite misnamed, because this union lays out the actual on-disk log buffer. + */ +typedef union xlog_in_core2 { + xlog_rec_header_t hic_header; + xlog_rec_ext_header_t hic_xheader; + char hic_sector[XLOG_HEADER_SIZE]; +} xlog_in_core_2_t; + /* * - A log record header is 512 bytes. There is plenty of room to grow the * xlog_rec_header_t into the reserved space. @@ -339,7 +349,7 @@ typedef struct xlog_rec_ext_header { * We'll put all the read-only and l_icloglock fields in the first cacheline, * and move everything else out to subsequent cachelines. */ -typedef struct xlog_iclog_fields { +typedef struct xlog_in_core { sv_t ic_force_wait; sv_t ic_write_wait; struct xlog_in_core *ic_next; @@ -362,40 +372,10 @@ typedef struct xlog_iclog_fields { /* reference counts need their own cacheline */ atomic_t ic_refcnt ____cacheline_aligned_in_smp; -} xlog_iclog_fields_t; - -typedef union xlog_in_core2 { - xlog_rec_header_t hic_header; - xlog_rec_ext_header_t hic_xheader; - char hic_sector[XLOG_HEADER_SIZE]; -} xlog_in_core_2_t; - -typedef struct xlog_in_core { - xlog_iclog_fields_t hic_fields; - xlog_in_core_2_t *hic_data; + xlog_in_core_2_t *ic_data; +#define ic_header ic_data->hic_header } xlog_in_core_t; -/* - * Defines to save our code from this glop. - */ -#define ic_force_wait hic_fields.ic_force_wait -#define ic_write_wait hic_fields.ic_write_wait -#define ic_next hic_fields.ic_next -#define ic_prev hic_fields.ic_prev -#define ic_bp hic_fields.ic_bp -#define ic_log hic_fields.ic_log -#define ic_callback hic_fields.ic_callback -#define ic_callback_lock hic_fields.ic_callback_lock -#define ic_callback_tail hic_fields.ic_callback_tail -#define ic_trace hic_fields.ic_trace -#define ic_size hic_fields.ic_size -#define ic_offset hic_fields.ic_offset -#define ic_refcnt hic_fields.ic_refcnt -#define ic_bwritecnt hic_fields.ic_bwritecnt -#define ic_state hic_fields.ic_state -#define ic_datap hic_fields.ic_datap -#define ic_header hic_data->hic_header - /* * The reservation head lsn is not made up of a cycle number and block number. * Instead, it uses a cycle number and byte number. Logs don't expect to diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index d94987915d5..9abb96a7674 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3332,7 +3332,6 @@ xlog_pack_data( int size = iclog->ic_offset + roundoff; __be32 cycle_lsn; xfs_caddr_t dp; - xlog_in_core_2_t *xhdr; xlog_pack_data_checksum(log, iclog, size); @@ -3347,7 +3346,8 @@ xlog_pack_data( } if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { - xhdr = (xlog_in_core_2_t *)&iclog->ic_header; + xlog_in_core_2_t *xhdr = iclog->ic_data; + for ( ; i < BTOBB(size); i++) { j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); @@ -3405,7 +3405,6 @@ xlog_unpack_data( xlog_t *log) { int i, j, k; - xlog_in_core_2_t *xhdr; for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) && i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { @@ -3414,7 +3413,7 @@ xlog_unpack_data( } if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { - xhdr = (xlog_in_core_2_t *)rhead; + xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead; for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) { j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); -- cgit v1.2.3-70-g09d2 From d42f08f61c5e7f0ed4c6b6df4c9987ddb85ec66e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:38 +1100 Subject: [XFS] kill xfs_ialloc_log_di xfs_ialloc_log_di is only used to log the full inode core + di_next_unlinked. That means all the offset magic is not nessecary and we can simply use xfs_trans_log_buf directly. Also add a comment describing what we should do here instead. (First sent on October 7th) Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_dinode.h | 34 ------------------------ fs/xfs/xfs_ialloc.c | 75 +++++++---------------------------------------------- 2 files changed, 10 insertions(+), 99 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h index d7cf392cc85..10f9204b81e 100644 --- a/fs/xfs/xfs_dinode.h +++ b/fs/xfs/xfs_dinode.h @@ -105,40 +105,6 @@ typedef struct xfs_dinode #define XFS_MAXLINK ((1U << 31) - 1U) #define XFS_MAXLINK_1 65535U -/* - * Bit names for logging disk inodes only - */ -#define XFS_DI_MAGIC 0x0000001 -#define XFS_DI_MODE 0x0000002 -#define XFS_DI_VERSION 0x0000004 -#define XFS_DI_FORMAT 0x0000008 -#define XFS_DI_ONLINK 0x0000010 -#define XFS_DI_UID 0x0000020 -#define XFS_DI_GID 0x0000040 -#define XFS_DI_NLINK 0x0000080 -#define XFS_DI_PROJID 0x0000100 -#define XFS_DI_PAD 0x0000200 -#define XFS_DI_ATIME 0x0000400 -#define XFS_DI_MTIME 0x0000800 -#define XFS_DI_CTIME 0x0001000 -#define XFS_DI_SIZE 0x0002000 -#define XFS_DI_NBLOCKS 0x0004000 -#define XFS_DI_EXTSIZE 0x0008000 -#define XFS_DI_NEXTENTS 0x0010000 -#define XFS_DI_NAEXTENTS 0x0020000 -#define XFS_DI_FORKOFF 0x0040000 -#define XFS_DI_AFORMAT 0x0080000 -#define XFS_DI_DMEVMASK 0x0100000 -#define XFS_DI_DMSTATE 0x0200000 -#define XFS_DI_FLAGS 0x0400000 -#define XFS_DI_GEN 0x0800000 -#define XFS_DI_NEXT_UNLINKED 0x1000000 -#define XFS_DI_U 0x2000000 -#define XFS_DI_A 0x4000000 -#define XFS_DI_NUM_BITS 27 -#define XFS_DI_ALL_BITS ((1 << XFS_DI_NUM_BITS) - 1) -#define XFS_DI_CORE_BITS (XFS_DI_ALL_BITS & ~(XFS_DI_U|XFS_DI_A)) - /* * Values for di_format */ diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index efb65fea203..86d463ee7b9 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -41,68 +41,6 @@ #include "xfs_error.h" #include "xfs_bmap.h" -/* - * Log specified fields for the inode given by bp and off. - */ -STATIC void -xfs_ialloc_log_di( - xfs_trans_t *tp, /* transaction pointer */ - xfs_buf_t *bp, /* inode buffer */ - int off, /* index of inode in buffer */ - int fields) /* bitmask of fields to log */ -{ - int first; /* first byte number */ - int ioffset; /* off in bytes */ - int last; /* last byte number */ - xfs_mount_t *mp; /* mount point structure */ - static const short offsets[] = { /* field offsets */ - /* keep in sync with bits */ - offsetof(xfs_dinode_core_t, di_magic), - offsetof(xfs_dinode_core_t, di_mode), - offsetof(xfs_dinode_core_t, di_version), - offsetof(xfs_dinode_core_t, di_format), - offsetof(xfs_dinode_core_t, di_onlink), - offsetof(xfs_dinode_core_t, di_uid), - offsetof(xfs_dinode_core_t, di_gid), - offsetof(xfs_dinode_core_t, di_nlink), - offsetof(xfs_dinode_core_t, di_projid), - offsetof(xfs_dinode_core_t, di_pad), - offsetof(xfs_dinode_core_t, di_atime), - offsetof(xfs_dinode_core_t, di_mtime), - offsetof(xfs_dinode_core_t, di_ctime), - offsetof(xfs_dinode_core_t, di_size), - offsetof(xfs_dinode_core_t, di_nblocks), - offsetof(xfs_dinode_core_t, di_extsize), - offsetof(xfs_dinode_core_t, di_nextents), - offsetof(xfs_dinode_core_t, di_anextents), - offsetof(xfs_dinode_core_t, di_forkoff), - offsetof(xfs_dinode_core_t, di_aformat), - offsetof(xfs_dinode_core_t, di_dmevmask), - offsetof(xfs_dinode_core_t, di_dmstate), - offsetof(xfs_dinode_core_t, di_flags), - offsetof(xfs_dinode_core_t, di_gen), - offsetof(xfs_dinode_t, di_next_unlinked), - offsetof(xfs_dinode_t, di_u), - offsetof(xfs_dinode_t, di_a), - sizeof(xfs_dinode_t) - }; - - - ASSERT(offsetof(xfs_dinode_t, di_core) == 0); - ASSERT((fields & (XFS_DI_U|XFS_DI_A)) == 0); - mp = tp->t_mountp; - /* - * Get the inode-relative first and last bytes for these fields - */ - xfs_btree_offsets(fields, offsets, XFS_DI_NUM_BITS, &first, &last); - /* - * Convert to buffer offsets and log it. - */ - ioffset = off << mp->m_sb.sb_inodelog; - first += ioffset; - last += ioffset; - xfs_trans_log_buf(tp, bp, first, last); -} /* * Allocation group level functions. @@ -406,18 +344,25 @@ xfs_ialloc_ag_alloc( XFS_BUF_LOCK); ASSERT(fbuf); ASSERT(!XFS_BUF_GETERROR(fbuf)); + /* - * Set initial values for the inodes in this buffer. + * Initialize all inodes in this buffer and then log them. + * + * XXX: It would be much better if we had just one transaction to + * log a whole cluster of inodes instead of all the indivdual + * transactions causing a lot of log traffic. */ xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog); for (i = 0; i < ninodes; i++) { + int ioffset = i << args.mp->m_sb.sb_inodelog; + uint isize = sizeof(xfs_dinode_t) + sizeof(__be32); + free = XFS_MAKE_IPTR(args.mp, fbuf, i); free->di_core.di_magic = cpu_to_be16(XFS_DINODE_MAGIC); free->di_core.di_version = version; free->di_core.di_gen = cpu_to_be32(gen); free->di_next_unlinked = cpu_to_be32(NULLAGINO); - xfs_ialloc_log_di(tp, fbuf, i, - XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED); + xfs_trans_log_buf(tp, fbuf, ioffset, ioffset + isize - 1); } xfs_trans_inode_alloc_buf(tp, fbuf); } -- cgit v1.2.3-70-g09d2 From 81591fe2db19d0fc1ec2aaaa6a790a5ab97ac3ab Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:39 +1100 Subject: [XFS] kill xfs_dinode_core_t Now that we have a separate xfs_icdinode_t for the in-core inode which gets logged there is no need anymore for the xfs_dinode vs xfs_dinode_core split - the fact that part of the structure gets logged through the inode log item and a small part not can better be described in a comment. All sizeof operations on the dinode_core either really wanted the icdinode and are switched to that one, or had already added the size of the agi unlinked list pointer. Later both will be replaced with helpers once we get the larger CRC-enabled dinode. Removing the data and attribute fork unions also has the advantage that xfs_dinode.h doesn't need to pull in every header under the sun. While we're at it also add some more comments describing the dinode structure. (First sent on October 7th) Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_dinode.h | 104 +++++++++++++++++++++++------------------------ fs/xfs/xfs_dir2_sf.h | 7 ---- fs/xfs/xfs_ialloc.c | 8 ++-- fs/xfs/xfs_inode.c | 85 +++++++++++++++++++------------------- fs/xfs/xfs_inode.h | 7 ++-- fs/xfs/xfs_inode_item.c | 2 +- fs/xfs/xfs_itable.c | 20 ++++----- fs/xfs/xfs_log_recover.c | 29 ++++++------- fs/xfs/xfs_mount.c | 3 +- 9 files changed, 124 insertions(+), 141 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h index 10f9204b81e..0b7ebf922af 100644 --- a/fs/xfs/xfs_dinode.h +++ b/fs/xfs/xfs_dinode.h @@ -18,32 +18,32 @@ #ifndef __XFS_DINODE_H__ #define __XFS_DINODE_H__ -struct xfs_buf; -struct xfs_mount; - #define XFS_DINODE_VERSION_1 1 #define XFS_DINODE_VERSION_2 2 #define XFS_DINODE_GOOD_VERSION(v) \ (((v) == XFS_DINODE_VERSION_1 || (v) == XFS_DINODE_VERSION_2)) #define XFS_DINODE_MAGIC 0x494e /* 'IN' */ -/* - * Disk inode structure. - * This is just the header; the inode is expanded to fill a variable size - * with the last field expanding. It is split into the core and "other" - * because we only need the core part in the in-core inode. - */ typedef struct xfs_timestamp { __be32 t_sec; /* timestamp seconds */ __be32 t_nsec; /* timestamp nanoseconds */ } xfs_timestamp_t; /* - * Note: Coordinate changes to this structure with the XFS_DI_* #defines - * below, the offsets table in xfs_ialloc_log_di() and struct xfs_icdinode - * in xfs_inode.h. + * On-disk inode structure. + * + * This is just the header or "dinode core", the inode is expanded to fill a + * variable size the leftover area split into a data and an attribute fork. + * The format of the data and attribute fork depends on the format of the + * inode as indicated by di_format and di_aformat. To access the data and + * attribute use the XFS_DFORK_PTR, XFS_DFORK_DPTR, and XFS_DFORK_PTR macros + * below. + * + * There is a very similar struct icdinode in xfs_inode which matches the + * layout of the first 96 bytes of this structure, but is kept in native + * format instead of big endian. */ -typedef struct xfs_dinode_core { +typedef struct xfs_dinode { __be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */ __be16 di_mode; /* mode and type of file */ __u8 di_version; /* inode version */ @@ -69,33 +69,12 @@ typedef struct xfs_dinode_core { __be16 di_dmstate; /* DMIG state info */ __be16 di_flags; /* random flags, XFS_DIFLAG_... */ __be32 di_gen; /* generation number */ -} xfs_dinode_core_t; -#define DI_MAX_FLUSH 0xffff + /* di_next_unlinked is the only non-core field in the old dinode */ + __be32 di_next_unlinked;/* agi unlinked list ptr */ +} __attribute__((packed)) xfs_dinode_t; -typedef struct xfs_dinode -{ - xfs_dinode_core_t di_core; - /* - * In adding anything between the core and the union, be - * sure to update the macros like XFS_LITINO below. - */ - __be32 di_next_unlinked;/* agi unlinked list ptr */ - union { - xfs_bmdr_block_t di_bmbt; /* btree root block */ - xfs_bmbt_rec_32_t di_bmx[1]; /* extent list */ - xfs_dir2_sf_t di_dir2sf; /* shortform directory v2 */ - char di_c[1]; /* local contents */ - __be32 di_dev; /* device for S_IFCHR/S_IFBLK */ - uuid_t di_muuid; /* mount point value */ - char di_symlink[1]; /* local symbolic link */ - } di_u; - union { - xfs_bmdr_block_t di_abmbt; /* btree root block */ - xfs_bmbt_rec_32_t di_abmx[1]; /* extent list */ - xfs_attr_shortform_t di_attrsf; /* shortform attribute list */ - } di_a; -} xfs_dinode_t; +#define DI_MAX_FLUSH 0xffff /* * The 32 bit link count in the inode theoretically maxes out at UINT_MAX. @@ -108,14 +87,12 @@ typedef struct xfs_dinode /* * Values for di_format */ -typedef enum xfs_dinode_fmt -{ - XFS_DINODE_FMT_DEV, /* CHR, BLK: di_dev */ - XFS_DINODE_FMT_LOCAL, /* DIR, REG: di_c */ - /* LNK: di_symlink */ - XFS_DINODE_FMT_EXTENTS, /* DIR, REG, LNK: di_bmx */ - XFS_DINODE_FMT_BTREE, /* DIR, REG, LNK: di_bmbt */ - XFS_DINODE_FMT_UUID /* MNT: di_uuid */ +typedef enum xfs_dinode_fmt { + XFS_DINODE_FMT_DEV, /* xfs_dev_t */ + XFS_DINODE_FMT_LOCAL, /* bulk data */ + XFS_DINODE_FMT_EXTENTS, /* struct xfs_bmbt_rec */ + XFS_DINODE_FMT_BTREE, /* struct xfs_bmdr_block */ + XFS_DINODE_FMT_UUID /* uuid_t */ } xfs_dinode_fmt_t; /* @@ -136,8 +113,8 @@ typedef enum xfs_dinode_fmt /* * Inode data & attribute fork sizes, per inode. */ -#define XFS_DFORK_Q(dip) ((dip)->di_core.di_forkoff != 0) -#define XFS_DFORK_BOFF(dip) ((int)((dip)->di_core.di_forkoff << 3)) +#define XFS_DFORK_Q(dip) ((dip)->di_forkoff != 0) +#define XFS_DFORK_BOFF(dip) ((int)((dip)->di_forkoff << 3)) #define XFS_DFORK_DSIZE(dip,mp) \ (XFS_DFORK_Q(dip) ? \ @@ -152,22 +129,41 @@ typedef enum xfs_dinode_fmt XFS_DFORK_DSIZE(dip, mp) : \ XFS_DFORK_ASIZE(dip, mp)) -#define XFS_DFORK_DPTR(dip) ((dip)->di_u.di_c) +/* + * Return pointers to the data or attribute forks. + */ +#define XFS_DFORK_DPTR(dip) \ + ((char *)(dip) + sizeof(struct xfs_dinode)) #define XFS_DFORK_APTR(dip) \ - ((dip)->di_u.di_c + XFS_DFORK_BOFF(dip)) + (XFS_DFORK_DPTR(dip) + XFS_DFORK_BOFF(dip)) #define XFS_DFORK_PTR(dip,w) \ ((w) == XFS_DATA_FORK ? XFS_DFORK_DPTR(dip) : XFS_DFORK_APTR(dip)) + #define XFS_DFORK_FORMAT(dip,w) \ ((w) == XFS_DATA_FORK ? \ - (dip)->di_core.di_format : \ - (dip)->di_core.di_aformat) + (dip)->di_format : \ + (dip)->di_aformat) #define XFS_DFORK_NEXTENTS(dip,w) \ ((w) == XFS_DATA_FORK ? \ - be32_to_cpu((dip)->di_core.di_nextents) : \ - be16_to_cpu((dip)->di_core.di_anextents)) + be32_to_cpu((dip)->di_nextents) : \ + be16_to_cpu((dip)->di_anextents)) #define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)XFS_BUF_PTR(bp)) +/* + * For block and character special files the 32bit dev_t is stored at the + * beginning of the data fork. + */ +static inline xfs_dev_t xfs_dinode_get_rdev(struct xfs_dinode *dip) +{ + return be32_to_cpu(*(__be32 *)XFS_DFORK_DPTR(dip)); +} + +static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev) +{ + *(__be32 *)XFS_DFORK_DPTR(dip) = cpu_to_be32(rdev); +} + /* * Values for di_flags * There should be a one-to-one correspondence between these flags and the diff --git a/fs/xfs/xfs_dir2_sf.h b/fs/xfs/xfs_dir2_sf.h index deecc9d238f..6ac44b550d3 100644 --- a/fs/xfs/xfs_dir2_sf.h +++ b/fs/xfs/xfs_dir2_sf.h @@ -33,13 +33,6 @@ struct xfs_inode; struct xfs_mount; struct xfs_trans; -/* - * Maximum size of a shortform directory. - */ -#define XFS_DIR2_SF_MAX_SIZE \ - (XFS_DINODE_MAX_SIZE - (uint)sizeof(xfs_dinode_core_t) - \ - (uint)sizeof(xfs_agino_t)) - /* * Inode number stored as 8 8-bit values. */ diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 86d463ee7b9..47e94288fcb 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -355,12 +355,12 @@ xfs_ialloc_ag_alloc( xfs_biozero(fbuf, 0, ninodes << args.mp->m_sb.sb_inodelog); for (i = 0; i < ninodes; i++) { int ioffset = i << args.mp->m_sb.sb_inodelog; - uint isize = sizeof(xfs_dinode_t) + sizeof(__be32); + uint isize = sizeof(struct xfs_dinode); free = XFS_MAKE_IPTR(args.mp, fbuf, i); - free->di_core.di_magic = cpu_to_be16(XFS_DINODE_MAGIC); - free->di_core.di_version = version; - free->di_core.di_gen = cpu_to_be32(gen); + free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC); + free->di_version = version; + free->di_gen = cpu_to_be32(gen); free->di_next_unlinked = cpu_to_be32(NULLAGINO); xfs_trans_log_buf(tp, fbuf, ioffset, ioffset + isize - 1); } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 46b0526acd4..1d2912dc522 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -174,8 +174,8 @@ xfs_imap_to_bp( dip = (xfs_dinode_t *)xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog)); - di_ok = be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC && - XFS_DINODE_GOOD_VERSION(dip->di_core.di_version); + di_ok = be16_to_cpu(dip->di_magic) == XFS_DINODE_MAGIC && + XFS_DINODE_GOOD_VERSION(dip->di_version); if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, XFS_RANDOM_ITOBP_INOTOBP))) { @@ -191,7 +191,7 @@ xfs_imap_to_bp( "daddr %lld #%d (magic=%x)", XFS_BUFTARG_NAME(mp->m_ddev_targp), (unsigned long long)imap->im_blkno, i, - be16_to_cpu(dip->di_core.di_magic)); + be16_to_cpu(dip->di_magic)); #endif xfs_trans_brelse(tp, bp); return XFS_ERROR(EFSCORRUPTED); @@ -350,26 +350,26 @@ xfs_iformat( XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); error = 0; - if (unlikely(be32_to_cpu(dip->di_core.di_nextents) + - be16_to_cpu(dip->di_core.di_anextents) > - be64_to_cpu(dip->di_core.di_nblocks))) { + if (unlikely(be32_to_cpu(dip->di_nextents) + + be16_to_cpu(dip->di_anextents) > + be64_to_cpu(dip->di_nblocks))) { xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.", (unsigned long long)ip->i_ino, - (int)(be32_to_cpu(dip->di_core.di_nextents) + - be16_to_cpu(dip->di_core.di_anextents)), + (int)(be32_to_cpu(dip->di_nextents) + + be16_to_cpu(dip->di_anextents)), (unsigned long long) - be64_to_cpu(dip->di_core.di_nblocks)); + be64_to_cpu(dip->di_nblocks)); XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, ip->i_mount, dip); return XFS_ERROR(EFSCORRUPTED); } - if (unlikely(dip->di_core.di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { + if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.", (unsigned long long)ip->i_ino, - dip->di_core.di_forkoff); + dip->di_forkoff); XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, ip->i_mount, dip); return XFS_ERROR(EFSCORRUPTED); @@ -380,25 +380,25 @@ xfs_iformat( case S_IFCHR: case S_IFBLK: case S_IFSOCK: - if (unlikely(dip->di_core.di_format != XFS_DINODE_FMT_DEV)) { + if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) { XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, ip->i_mount, dip); return XFS_ERROR(EFSCORRUPTED); } ip->i_d.di_size = 0; ip->i_size = 0; - ip->i_df.if_u2.if_rdev = be32_to_cpu(dip->di_u.di_dev); + ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip); break; case S_IFREG: case S_IFLNK: case S_IFDIR: - switch (dip->di_core.di_format) { + switch (dip->di_format) { case XFS_DINODE_FMT_LOCAL: /* * no local regular files yet */ - if (unlikely((be16_to_cpu(dip->di_core.di_mode) & S_IFMT) == S_IFREG)) { + if (unlikely((be16_to_cpu(dip->di_mode) & S_IFMT) == S_IFREG)) { xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, "corrupt inode %Lu " "(local format for regular file).", @@ -409,7 +409,7 @@ xfs_iformat( return XFS_ERROR(EFSCORRUPTED); } - di_size = be64_to_cpu(dip->di_core.di_size); + di_size = be64_to_cpu(dip->di_size); if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, "corrupt inode %Lu " @@ -451,7 +451,7 @@ xfs_iformat( ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP); ip->i_afp->if_ext_max = XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); - switch (dip->di_core.di_aformat) { + switch (dip->di_aformat) { case XFS_DINODE_FMT_LOCAL: atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip); size = be16_to_cpu(atp->hdr.totsize); @@ -663,7 +663,7 @@ xfs_iformat_btree( void xfs_dinode_from_disk( xfs_icdinode_t *to, - xfs_dinode_core_t *from) + xfs_dinode_t *from) { to->di_magic = be16_to_cpu(from->di_magic); to->di_mode = be16_to_cpu(from->di_mode); @@ -697,7 +697,7 @@ xfs_dinode_from_disk( void xfs_dinode_to_disk( - xfs_dinode_core_t *to, + xfs_dinode_t *to, xfs_icdinode_t *from) { to->di_magic = cpu_to_be16(from->di_magic); @@ -784,9 +784,7 @@ uint xfs_dic2xflags( xfs_dinode_t *dip) { - xfs_dinode_core_t *dic = &dip->di_core; - - return _xfs_dic2xflags(be16_to_cpu(dic->di_flags)) | + return _xfs_dic2xflags(be16_to_cpu(dip->di_flags)) | (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0); } @@ -905,12 +903,12 @@ xfs_iread( * If we got something that isn't an inode it means someone * (nfs or dmi) has a stale handle. */ - if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC) { + if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC) { #ifdef DEBUG xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " - "dip->di_core.di_magic (0x%x) != " + "dip->di_magic (0x%x) != " "XFS_DINODE_MAGIC (0x%x)", - be16_to_cpu(dip->di_core.di_magic), + be16_to_cpu(dip->di_magic), XFS_DINODE_MAGIC); #endif /* DEBUG */ error = XFS_ERROR(EINVAL); @@ -924,8 +922,8 @@ xfs_iread( * specific information. * Otherwise, just get the truly permanent information. */ - if (dip->di_core.di_mode) { - xfs_dinode_from_disk(&ip->i_d, &dip->di_core); + if (dip->di_mode) { + xfs_dinode_from_disk(&ip->i_d, dip); error = xfs_iformat(ip, dip); if (error) { #ifdef DEBUG @@ -936,10 +934,10 @@ xfs_iread( goto out_brelse; } } else { - ip->i_d.di_magic = be16_to_cpu(dip->di_core.di_magic); - ip->i_d.di_version = dip->di_core.di_version; - ip->i_d.di_gen = be32_to_cpu(dip->di_core.di_gen); - ip->i_d.di_flushiter = be16_to_cpu(dip->di_core.di_flushiter); + ip->i_d.di_magic = be16_to_cpu(dip->di_magic); + ip->i_d.di_version = dip->di_version; + ip->i_d.di_gen = be32_to_cpu(dip->di_gen); + ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter); /* * Make sure to pull in the mode here as well in * case the inode is released without being used. @@ -2295,7 +2293,7 @@ xfs_ifree( * This is a temporary hack that would require a proper fix * in the future. */ - dip->di_core.di_mode = 0; + dip->di_mode = 0; if (delete) { xfs_ifree_cluster(ip, tp, first_ino); @@ -2909,15 +2907,16 @@ xfs_iflush_fork( case XFS_DINODE_FMT_DEV: if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { ASSERT(whichfork == XFS_DATA_FORK); - dip->di_u.di_dev = cpu_to_be32(ip->i_df.if_u2.if_rdev); + xfs_dinode_put_rdev(dip, ip->i_df.if_u2.if_rdev); } break; case XFS_DINODE_FMT_UUID: if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { ASSERT(whichfork == XFS_DATA_FORK); - memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid, - sizeof(uuid_t)); + memcpy(XFS_DFORK_DPTR(dip), + &ip->i_df.if_u2.if_uuid, + sizeof(uuid_t)); } break; @@ -3295,11 +3294,11 @@ xfs_iflush_int( */ xfs_synchronize_atime(ip); - if (XFS_TEST_ERROR(be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC, + if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC, mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p", - ip->i_ino, be16_to_cpu(dip->di_core.di_magic), dip); + ip->i_ino, be16_to_cpu(dip->di_magic), dip); goto corrupt_out; } if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, @@ -3362,7 +3361,7 @@ xfs_iflush_int( * because if the inode is dirty at all the core must * be. */ - xfs_dinode_to_disk(&dip->di_core, &ip->i_d); + xfs_dinode_to_disk(dip, &ip->i_d); /* Wrap, we never let the log put out DI_MAX_FLUSH */ if (ip->i_d.di_flushiter == DI_MAX_FLUSH) @@ -3382,7 +3381,7 @@ xfs_iflush_int( * Convert it back. */ ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); - dip->di_core.di_onlink = cpu_to_be16(ip->i_d.di_nlink); + dip->di_onlink = cpu_to_be16(ip->i_d.di_nlink); } else { /* * The superblock version has already been bumped, @@ -3390,12 +3389,12 @@ xfs_iflush_int( * format permanent. */ ip->i_d.di_version = XFS_DINODE_VERSION_2; - dip->di_core.di_version = XFS_DINODE_VERSION_2; + dip->di_version = XFS_DINODE_VERSION_2; ip->i_d.di_onlink = 0; - dip->di_core.di_onlink = 0; + dip->di_onlink = 0; memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); - memset(&(dip->di_core.di_pad[0]), 0, - sizeof(dip->di_core.di_pad)); + memset(&(dip->di_pad[0]), 0, + sizeof(dip->di_pad)); ASSERT(ip->i_d.di_projid == 0); } } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index ea691c738f2..705083a8ffa 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -19,7 +19,6 @@ #define __XFS_INODE_H__ struct xfs_dinode; -struct xfs_dinode_core; struct xfs_inode; /* @@ -112,7 +111,7 @@ typedef struct xfs_ictimestamp { } xfs_ictimestamp_t; /* - * NOTE: This structure must be kept identical to struct xfs_dinode_core + * NOTE: This structure must be kept identical to struct xfs_dinode * in xfs_dinode.h except for the endianess annotations. */ typedef struct xfs_icdinode { @@ -553,8 +552,8 @@ int xfs_itobp(struct xfs_mount *, struct xfs_trans *, struct xfs_inode *, struct xfs_dinode **, struct xfs_buf **, xfs_daddr_t, uint, uint); void xfs_dinode_from_disk(struct xfs_icdinode *, - struct xfs_dinode_core *); -void xfs_dinode_to_disk(struct xfs_dinode_core *, + struct xfs_dinode *); +void xfs_dinode_to_disk(struct xfs_dinode *, struct xfs_icdinode *); void xfs_idestroy_fork(struct xfs_inode *, int); void xfs_idata_realloc(struct xfs_inode *, int, int); diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index aa9bf05060c..27f18c15e16 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -281,7 +281,7 @@ xfs_inode_item_format( xfs_mark_inode_dirty_sync(ip); vecp->i_addr = (xfs_caddr_t)&ip->i_d; - vecp->i_len = sizeof(xfs_dinode_core_t); + vecp->i_len = sizeof(struct xfs_icdinode); XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE); vecp++; nvecs++; diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 35118032a5d..b3578cdc33d 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -125,13 +125,9 @@ STATIC void xfs_bulkstat_one_dinode( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t ino, /* inode number to get data for */ - xfs_dinode_t *dip, /* dinode inode pointer */ + xfs_dinode_t *dic, /* dinode inode pointer */ xfs_bstat_t *buf) /* return buffer */ { - xfs_dinode_core_t *dic; /* dinode core info pointer */ - - dic = &dip->di_core; - /* * The inode format changed when we moved the link count and * made it 32 bits long. If this is an old format inode, @@ -162,7 +158,7 @@ xfs_bulkstat_one_dinode( buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec); buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec); buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec); - buf->bs_xflags = xfs_dic2xflags(dip); + buf->bs_xflags = xfs_dic2xflags(dic); buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog; buf->bs_extents = be32_to_cpu(dic->di_nextents); buf->bs_gen = be32_to_cpu(dic->di_gen); @@ -173,7 +169,7 @@ xfs_bulkstat_one_dinode( switch (dic->di_format) { case XFS_DINODE_FMT_DEV: - buf->bs_rdev = be32_to_cpu(dip->di_u.di_dev); + buf->bs_rdev = xfs_dinode_get_rdev(dic); buf->bs_blksize = BLKDEV_IOSIZE; buf->bs_blocks = 0; break; @@ -287,19 +283,19 @@ xfs_bulkstat_use_dinode( * to disk yet. This is a temporary hack that would require a proper * fix in the future. */ - if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC || - !XFS_DINODE_GOOD_VERSION(dip->di_core.di_version) || - !dip->di_core.di_mode) + if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC || + !XFS_DINODE_GOOD_VERSION(dip->di_version) || + !dip->di_mode) return 0; if (flags & BULKSTAT_FG_QUICK) { *dipp = dip; return 1; } /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */ - aformat = dip->di_core.di_aformat; + aformat = dip->di_aformat; if ((XFS_DFORK_Q(dip) == 0) || (aformat == XFS_DINODE_FMT_LOCAL) || - (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_core.di_anextents)) { + (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_anextents)) { *dipp = dip; return 1; } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 9abb96a7674..4099618f5fa 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2320,7 +2320,7 @@ xlog_recover_do_inode_trans( * Make sure the place we're flushing out to really looks * like an inode! */ - if (unlikely(be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC)) { + if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) { xfs_buf_relse(bp); xfs_fs_cmn_err(CE_ALERT, mp, "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld", @@ -2343,12 +2343,12 @@ xlog_recover_do_inode_trans( } /* Skip replay when the on disk inode is newer than the log one */ - if (dicp->di_flushiter < be16_to_cpu(dip->di_core.di_flushiter)) { + if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) { /* * Deal with the wrap case, DI_MAX_FLUSH is less * than smaller numbers */ - if (be16_to_cpu(dip->di_core.di_flushiter) == DI_MAX_FLUSH && + if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH && dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) { /* do nothing */ } else { @@ -2408,7 +2408,7 @@ xlog_recover_do_inode_trans( error = EFSCORRUPTED; goto error; } - if (unlikely(item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t))) { + if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) { XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)", XFS_ERRLEVEL_LOW, mp, dicp); xfs_buf_relse(bp); @@ -2420,23 +2420,24 @@ xlog_recover_do_inode_trans( } /* The core is in in-core format */ - xfs_dinode_to_disk(&dip->di_core, - (xfs_icdinode_t *)item->ri_buf[1].i_addr); + xfs_dinode_to_disk(dip, (xfs_icdinode_t *)item->ri_buf[1].i_addr); /* the rest is in on-disk format */ - if (item->ri_buf[1].i_len > sizeof(xfs_dinode_core_t)) { - memcpy((xfs_caddr_t) dip + sizeof(xfs_dinode_core_t), - item->ri_buf[1].i_addr + sizeof(xfs_dinode_core_t), - item->ri_buf[1].i_len - sizeof(xfs_dinode_core_t)); + if (item->ri_buf[1].i_len > sizeof(struct xfs_icdinode)) { + memcpy((xfs_caddr_t) dip + sizeof(struct xfs_icdinode), + item->ri_buf[1].i_addr + sizeof(struct xfs_icdinode), + item->ri_buf[1].i_len - sizeof(struct xfs_icdinode)); } fields = in_f->ilf_fields; switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) { case XFS_ILOG_DEV: - dip->di_u.di_dev = cpu_to_be32(in_f->ilf_u.ilfu_rdev); + xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev); break; case XFS_ILOG_UUID: - dip->di_u.di_muuid = in_f->ilf_u.ilfu_uuid; + memcpy(XFS_DFORK_DPTR(dip), + &in_f->ilf_u.ilfu_uuid, + sizeof(uuid_t)); break; } @@ -2452,12 +2453,12 @@ xlog_recover_do_inode_trans( switch (fields & XFS_ILOG_DFORK) { case XFS_ILOG_DDATA: case XFS_ILOG_DEXT: - memcpy(&dip->di_u, src, len); + memcpy(XFS_DFORK_DPTR(dip), src, len); break; case XFS_ILOG_DBROOT: xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len, - &dip->di_u.di_bmbt, + (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip), XFS_DFORK_DSIZE(dip, mp)); break; diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 177976dfea0..3f999b1c038 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -575,8 +575,7 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT; mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1; mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog; - mp->m_litino = sbp->sb_inodesize - - ((uint)sizeof(xfs_dinode_core_t) + (uint)sizeof(xfs_agino_t)); + mp->m_litino = sbp->sb_inodesize - sizeof(struct xfs_dinode); mp->m_blockmask = sbp->sb_blocksize - 1; mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG; mp->m_blockwmask = mp->m_blockwsize - 1; -- cgit v1.2.3-70-g09d2 From 51ce16d519da0bc3c548e0facef7cb3aab1ac8cc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:39 +1100 Subject: [XFS] kill XFS_DINODE_VERSION_ defines These names don't add any value at all over just using the numerical values. (First sent on October 9th) Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_ioctl.c | 2 +- fs/xfs/xfs_dinode.h | 7 ++----- fs/xfs/xfs_ialloc.c | 4 ++-- fs/xfs/xfs_inode.c | 17 ++++++++--------- fs/xfs/xfs_inode_item.c | 7 +++---- fs/xfs/xfs_itable.c | 2 +- fs/xfs/xfs_utils.c | 6 +++--- 7 files changed, 20 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index d5970599953..534b175f3a4 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -1131,7 +1131,7 @@ xfs_ioctl_setattr( * the superblock version number since projids didn't * exist before DINODE_VERSION_2 and SB_VERSION_NLINK. */ - if (ip->i_d.di_version == XFS_DINODE_VERSION_1) + if (ip->i_d.di_version == 1) xfs_bump_ino_vers2(tp, ip); } diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h index 0b7ebf922af..162e8726df5 100644 --- a/fs/xfs/xfs_dinode.h +++ b/fs/xfs/xfs_dinode.h @@ -18,11 +18,8 @@ #ifndef __XFS_DINODE_H__ #define __XFS_DINODE_H__ -#define XFS_DINODE_VERSION_1 1 -#define XFS_DINODE_VERSION_2 2 -#define XFS_DINODE_GOOD_VERSION(v) \ - (((v) == XFS_DINODE_VERSION_1 || (v) == XFS_DINODE_VERSION_2)) -#define XFS_DINODE_MAGIC 0x494e /* 'IN' */ +#define XFS_DINODE_MAGIC 0x494e /* 'IN' */ +#define XFS_DINODE_GOOD_VERSION(v) (((v) == 1 || (v) == 2)) typedef struct xfs_timestamp { __be32 t_sec; /* timestamp seconds */ diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 47e94288fcb..16eda31fe79 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -321,9 +321,9 @@ xfs_ialloc_ag_alloc( * able to use the file system. */ if (xfs_sb_version_hasnlink(&args.mp->m_sb)) - version = XFS_DINODE_VERSION_2; + version = 2; else - version = XFS_DINODE_VERSION_1; + version = 1; /* * Seed the new inode cluster with a random generation number. This diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 1d2912dc522..083395cd675 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -965,7 +965,7 @@ xfs_iread( * the new format. We don't change the version number so that we * can distinguish this from a real new format inode. */ - if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { + if (ip->i_d.di_version == 1) { ip->i_d.di_nlink = ip->i_d.di_onlink; ip->i_d.di_onlink = 0; ip->i_d.di_projid = 0; @@ -1139,8 +1139,8 @@ xfs_ialloc( * here rather than here and in the flush/logging code. */ if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) && - ip->i_d.di_version == XFS_DINODE_VERSION_1) { - ip->i_d.di_version = XFS_DINODE_VERSION_2; + ip->i_d.di_version == 1) { + ip->i_d.di_version = 2; /* * We've already zeroed the old link count, the projid field, * and the pad field. @@ -1150,7 +1150,7 @@ xfs_ialloc( /* * Project ids won't be stored on disk if we are using a version 1 inode. */ - if ((prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1)) + if ((prid != 0) && (ip->i_d.di_version == 1)) xfs_bump_ino_vers2(tp, ip); if (pip && XFS_INHERIT_GID(pip)) { @@ -3373,9 +3373,8 @@ xfs_iflush_int( * convert back to the old inode format. If the superblock version * has been updated, then make the conversion permanent. */ - ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 || - xfs_sb_version_hasnlink(&mp->m_sb)); - if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { + ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb)); + if (ip->i_d.di_version == 1) { if (!xfs_sb_version_hasnlink(&mp->m_sb)) { /* * Convert it back. @@ -3388,8 +3387,8 @@ xfs_iflush_int( * so just make the conversion to the new inode * format permanent. */ - ip->i_d.di_version = XFS_DINODE_VERSION_2; - dip->di_version = XFS_DINODE_VERSION_2; + ip->i_d.di_version = 2; + dip->di_version = 2; ip->i_d.di_onlink = 0; dip->di_onlink = 0; memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 27f18c15e16..c43118148e6 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -296,9 +296,8 @@ xfs_inode_item_format( * has a new version number, then we don't bother converting back. */ mp = ip->i_mount; - ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 || - xfs_sb_version_hasnlink(&mp->m_sb)); - if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { + ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb)); + if (ip->i_d.di_version == 1) { if (!xfs_sb_version_hasnlink(&mp->m_sb)) { /* * Convert it back. @@ -311,7 +310,7 @@ xfs_inode_item_format( * so just make the conversion to the new inode * format permanent. */ - ip->i_d.di_version = XFS_DINODE_VERSION_2; + ip->i_d.di_version = 2; ip->i_d.di_onlink = 0; memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); } diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index b3578cdc33d..2cf16f4695e 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -139,7 +139,7 @@ xfs_bulkstat_one_dinode( * the new format. We don't change the version number so that we * can distinguish this from a real new format inode. */ - if (dic->di_version == XFS_DINODE_VERSION_1) { + if (dic->di_version == 1) { buf->bs_nlink = be16_to_cpu(dic->di_onlink); buf->bs_projid = 0; } else { diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c index 771144932ab..fcc2285d03e 100644 --- a/fs/xfs/xfs_utils.c +++ b/fs/xfs/xfs_utils.c @@ -274,9 +274,9 @@ xfs_bump_ino_vers2( xfs_mount_t *mp; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1); + ASSERT(ip->i_d.di_version == 1); - ip->i_d.di_version = XFS_DINODE_VERSION_2; + ip->i_d.di_version = 2; ip->i_d.di_onlink = 0; memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); mp = tp->t_mountp; @@ -308,7 +308,7 @@ xfs_bumplink( ASSERT(ip->i_d.di_nlink > 0); ip->i_d.di_nlink++; inc_nlink(VFS_I(ip)); - if ((ip->i_d.di_version == XFS_DINODE_VERSION_1) && + if ((ip->i_d.di_version == 1) && (ip->i_d.di_nlink > XFS_MAXLINK_1)) { /* * The inode has increased its number of links beyond -- cgit v1.2.3-70-g09d2 From 23fac50f959a87febf7ce4ae9d47525121f10c7a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:40 +1100 Subject: [XFS] split up xlog_recover_process_iunlinks Split out the body of the main loop into a separate helper to make the code readable. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_log_recover.c | 134 +++++++++++++++++++++++------------------------ 1 file changed, 66 insertions(+), 68 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 4099618f5fa..841398d2421 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3147,6 +3147,70 @@ out_error: return; } +STATIC xfs_agino_t +xlog_recover_process_one_iunlink( + struct xfs_mount *mp, + xfs_agnumber_t agno, + xfs_agino_t agino, + int bucket) +{ + struct xfs_buf *ibp; + struct xfs_dinode *dip; + struct xfs_inode *ip; + xfs_ino_t ino; + int error; + + ino = XFS_AGINO_TO_INO(mp, agno, agino); + error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0); + if (error) + goto fail; + + /* + * Get the on disk inode to find the next inode in the bucket. + */ + ASSERT(ip != NULL); + error = xfs_itobp(mp, NULL, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK); + if (error) + goto fail; + + ASSERT(dip != NULL); + ASSERT(ip->i_d.di_nlink == 0); + + /* setup for the next pass */ + agino = be32_to_cpu(dip->di_next_unlinked); + xfs_buf_relse(ibp); + + /* + * Prevent any DMAPI event from being sent when the reference on + * the inode is dropped. + */ + ip->i_d.di_dmevmask = 0; + + /* + * If this is a new inode, handle it specially. Otherwise, just + * drop our reference to the inode. If there are no other + * references, this will send the inode to xfs_inactive() which + * will truncate the file and free the inode. + */ + if (ip->i_d.di_mode == 0) + xfs_iput_new(ip, 0); + else + IRELE(ip); + return agino; + + fail: + /* + * We can't read in the inode this bucket points to, or this inode + * is messed up. Just ditch this bucket of inodes. We will lose + * some inodes and space, but at least we won't hang. + * + * Call xlog_recover_clear_agi_bucket() to perform a transaction to + * clear the inode pointer in the bucket. + */ + xlog_recover_clear_agi_bucket(mp, agno, bucket); + return NULLAGINO; +} + /* * xlog_iunlink_recover * @@ -3167,11 +3231,7 @@ xlog_recover_process_iunlinks( xfs_agnumber_t agno; xfs_agi_t *agi; xfs_buf_t *agibp; - xfs_buf_t *ibp; - xfs_dinode_t *dip; - xfs_inode_t *ip; xfs_agino_t agino; - xfs_ino_t ino; int bucket; int error; uint mp_dmevmask; @@ -3201,10 +3261,8 @@ xlog_recover_process_iunlinks( agi = XFS_BUF_TO_AGI(agibp); for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { - agino = be32_to_cpu(agi->agi_unlinked[bucket]); while (agino != NULLAGINO) { - /* * Release the agi buffer so that it can * be acquired in the normal course of the @@ -3212,68 +3270,8 @@ xlog_recover_process_iunlinks( */ xfs_buf_relse(agibp); - ino = XFS_AGINO_TO_INO(mp, agno, agino); - error = xfs_iget(mp, NULL, ino, 0, 0, &ip, 0); - ASSERT(error || (ip != NULL)); - - if (!error) { - /* - * Get the on disk inode to find the - * next inode in the bucket. - */ - error = xfs_itobp(mp, NULL, ip, &dip, - &ibp, 0, 0, - XFS_BUF_LOCK); - ASSERT(error || (dip != NULL)); - } - - if (!error) { - ASSERT(ip->i_d.di_nlink == 0); - - /* setup for the next pass */ - agino = be32_to_cpu( - dip->di_next_unlinked); - xfs_buf_relse(ibp); - /* - * Prevent any DMAPI event from - * being sent when the - * reference on the inode is - * dropped. - */ - ip->i_d.di_dmevmask = 0; - - /* - * If this is a new inode, handle - * it specially. Otherwise, - * just drop our reference to the - * inode. If there are no - * other references, this will - * send the inode to - * xfs_inactive() which will - * truncate the file and free - * the inode. - */ - if (ip->i_d.di_mode == 0) - xfs_iput_new(ip, 0); - else - IRELE(ip); - } else { - /* - * We can't read in the inode - * this bucket points to, or - * this inode is messed up. Just - * ditch this bucket of inodes. We - * will lose some inodes and space, - * but at least we won't hang. Call - * xlog_recover_clear_agi_bucket() - * to perform a transaction to clear - * the inode pointer in the bucket. - */ - xlog_recover_clear_agi_bucket(mp, agno, - bucket); - - agino = NULLAGINO; - } + agino = xlog_recover_process_one_iunlink(mp, + agno, agino, bucket); /* * Reacquire the agibuffer and continue around -- cgit v1.2.3-70-g09d2 From 76d8b277f7b715f78ee3cb09ee112563639693a5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:40 +1100 Subject: [XFS] stop using xfs_itobp in xfs_iread The only caller of xfs_itobp that doesn't have i_blkno setup is now the initial inode read. It needs access to the whole xfs_imap so using xfs_inotobp is not an option. Instead opencode the buffer lookup in xfs_iread and kill all the functionality for the initial map from xfs_itobp. (First sent on October 21st) Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_inode.c | 80 ++++++++++++++++++++---------------------------- fs/xfs/xfs_inode.h | 4 +-- fs/xfs/xfs_log_recover.c | 2 +- 3 files changed, 37 insertions(+), 49 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 083395cd675..1d5c28b144a 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -262,15 +262,11 @@ xfs_inotobp( * If a non-zero error is returned, then the contents of bpp and * dipp are undefined. * - * If the inode is new and has not yet been initialized, use xfs_imap() - * to determine the size and location of the buffer to read from disk. - * If the inode has already been mapped to its buffer and read in once, - * then use the mapping information stored in the inode rather than - * calling xfs_imap(). This allows us to avoid the overhead of looking - * at the inode btree for small block file systems (see xfs_dilocate()). - * We can tell whether the inode has been mapped in before by comparing - * its disk block address to 0. Only uninitialized inodes will have - * 0 for the disk block address. + * The inode is expected to already been mapped to its buffer and read + * in once, thus we can use the mapping information stored in the inode + * rather than calling xfs_imap(). This allows us to avoid the overhead + * of looking at the inode btree for small block file systems + * (see xfs_dilocate()). */ int xfs_itobp( @@ -279,40 +275,19 @@ xfs_itobp( xfs_inode_t *ip, xfs_dinode_t **dipp, xfs_buf_t **bpp, - xfs_daddr_t bno, - uint imap_flags, uint buf_flags) { xfs_imap_t imap; xfs_buf_t *bp; int error; - if (ip->i_blkno == (xfs_daddr_t)0) { - imap.im_blkno = bno; - error = xfs_imap(mp, tp, ip->i_ino, &imap, - XFS_IMAP_LOOKUP | imap_flags); - if (error) - return error; + ASSERT(ip->i_blkno != 0); - /* - * Fill in the fields in the inode that will be used to - * map the inode to its buffer from now on. - */ - ip->i_blkno = imap.im_blkno; - ip->i_len = imap.im_len; - ip->i_boffset = imap.im_boffset; - } else { - /* - * We've already mapped the inode once, so just use the - * mapping that we saved the first time. - */ - imap.im_blkno = ip->i_blkno; - imap.im_len = ip->i_len; - imap.im_boffset = ip->i_boffset; - } - ASSERT(bno == 0 || bno == imap.im_blkno); + imap.im_blkno = ip->i_blkno; + imap.im_len = ip->i_len; + imap.im_boffset = ip->i_boffset; - error = xfs_imap_to_bp(mp, tp, &imap, &bp, buf_flags, imap_flags); + error = xfs_imap_to_bp(mp, tp, &imap, &bp, buf_flags, 0); if (error) return error; @@ -882,6 +857,7 @@ xfs_iread( xfs_buf_t *bp; xfs_dinode_t *dip; xfs_inode_t *ip; + xfs_imap_t imap; int error; ip = xfs_inode_alloc(mp, ino); @@ -889,15 +865,27 @@ xfs_iread( return ENOMEM; /* - * Get pointer's to the on-disk inode and the buffer containing it. - * If the inode number refers to a block outside the file system - * then xfs_itobp() will return NULL. In this case we should - * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will - * know that this is a new incore inode. + * Get pointers to the on-disk inode and the buffer containing it. */ - error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags, XFS_BUF_LOCK); + imap.im_blkno = bno; + error = xfs_imap(mp, tp, ip->i_ino, &imap, + XFS_IMAP_LOOKUP | imap_flags); + if (error) + goto out_destroy_inode; + + /* + * Fill in the fields in the inode that will be used to + * map the inode to its buffer from now on. + */ + ip->i_blkno = imap.im_blkno; + ip->i_len = imap.im_len; + ip->i_boffset = imap.im_boffset; + ASSERT(bno == 0 || bno == imap.im_blkno); + + error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags); if (error) goto out_destroy_inode; + dip = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); /* * If we got something that isn't an inode it means someone @@ -1878,7 +1866,7 @@ xfs_iunlink( * Here we put the head pointer into our next pointer, * and then we fall through to point the head at us. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); if (error) return error; @@ -1960,7 +1948,7 @@ xfs_iunlink_remove( * of dealing with the buffer when there is no need to * change it. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); if (error) { cmn_err(CE_WARN, "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", @@ -2022,7 +2010,7 @@ xfs_iunlink_remove( * Now last_ibp points to the buffer previous to us on * the unlinked list. Pull us from the list. */ - error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK); + error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK); if (error) { cmn_err(CE_WARN, "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", @@ -2277,7 +2265,7 @@ xfs_ifree( xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK); + error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XFS_BUF_LOCK); if (error) return error; @@ -3191,7 +3179,7 @@ xfs_iflush( /* * Get the buffer containing the on-disk inode. */ - error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0, + error = xfs_itobp(mp, NULL, ip, &dip, &bp, noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK); if (error || !bp) { xfs_ifunlock(ip); diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 705083a8ffa..ec8b539439d 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -157,7 +157,7 @@ typedef struct xfs_icdinode { #define XFS_IFEXTIREC 0x08 /* Indirection array of extent blocks */ /* - * Flags for xfs_inotobp, xfs_itobp(), xfs_imap() and xfs_dilocate(). + * Flags for xfs_inotobp, xfs_imap() and xfs_dilocate(). */ #define XFS_IMAP_LOOKUP 0x1 #define XFS_IMAP_BULKSTAT 0x2 @@ -550,7 +550,7 @@ int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, struct xfs_buf **, int *, uint); int xfs_itobp(struct xfs_mount *, struct xfs_trans *, struct xfs_inode *, struct xfs_dinode **, - struct xfs_buf **, xfs_daddr_t, uint, uint); + struct xfs_buf **, uint); void xfs_dinode_from_disk(struct xfs_icdinode *, struct xfs_dinode *); void xfs_dinode_to_disk(struct xfs_dinode *, diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 841398d2421..48bdfa4dc29 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3169,7 +3169,7 @@ xlog_recover_process_one_iunlink( * Get the on disk inode to find the next inode in the bucket. */ ASSERT(ip != NULL); - error = xfs_itobp(mp, NULL, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK); + error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XFS_BUF_LOCK); if (error) goto fail; -- cgit v1.2.3-70-g09d2 From a1941895034cda2bffa23ba845607c82138ccf52 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:40 +1100 Subject: [XFS] remove dead code for old inode item recovery We have removed the support for old-style inode items a while ago and xlog_recover_do_inode_trans is now only called for XFS_LI_INODE items. That means we can remove the call to xfs_imap there and with it the XFS_IMAP_LOOKUP that is set by all other callers. We can also mark xfs_imap static now. (First sent on October 21st) Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_ialloc.c | 3 +-- fs/xfs/xfs_inode.c | 5 ++--- fs/xfs/xfs_inode.h | 3 +-- fs/xfs/xfs_log_recover.c | 32 ++++++-------------------------- 4 files changed, 10 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 16eda31fe79..0ce56d6a492 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -1262,8 +1262,7 @@ xfs_dilocate( #endif /* DEBUG */ return XFS_ERROR(EINVAL); } - if ((mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) || - !(flags & XFS_IMAP_LOOKUP)) { + if ((mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp))) { offset = XFS_INO_TO_OFFSET(mp, ino); ASSERT(offset < mp->m_sb.sb_inopblock); *bno = XFS_AGB_TO_FSB(mp, agno, agbno); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 1d5c28b144a..663ff9e517f 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -237,7 +237,7 @@ xfs_inotobp( int error; imap.im_blkno = 0; - error = xfs_imap(mp, tp, ino, &imap, imap_flags | XFS_IMAP_LOOKUP); + error = xfs_imap(mp, tp, ino, &imap, imap_flags); if (error) return error; @@ -868,8 +868,7 @@ xfs_iread( * Get pointers to the on-disk inode and the buffer containing it. */ imap.im_blkno = bno; - error = xfs_imap(mp, tp, ip->i_ino, &imap, - XFS_IMAP_LOOKUP | imap_flags); + error = xfs_imap(mp, tp, ip->i_ino, &imap, imap_flags); if (error) goto out_destroy_inode; diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index ec8b539439d..db1f6884072 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -159,8 +159,7 @@ typedef struct xfs_icdinode { /* * Flags for xfs_inotobp, xfs_imap() and xfs_dilocate(). */ -#define XFS_IMAP_LOOKUP 0x1 -#define XFS_IMAP_BULKSTAT 0x2 +#define XFS_IMAP_BULKSTAT 0x1 /* * Fork handling. diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 48bdfa4dc29..bf8573b5a7d 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2245,7 +2245,6 @@ xlog_recover_do_inode_trans( xfs_inode_log_format_t *in_f; xfs_mount_t *mp; xfs_buf_t *bp; - xfs_imap_t imap; xfs_dinode_t *dip; xfs_ino_t ino; int len; @@ -2273,48 +2272,29 @@ xlog_recover_do_inode_trans( } ino = in_f->ilf_ino; mp = log->l_mp; - if (ITEM_TYPE(item) == XFS_LI_INODE) { - imap.im_blkno = (xfs_daddr_t)in_f->ilf_blkno; - imap.im_len = in_f->ilf_len; - imap.im_boffset = in_f->ilf_boffset; - } else { - /* - * It's an old inode format record. We don't know where - * its cluster is located on disk, and we can't allow - * xfs_imap() to figure it out because the inode btrees - * are not ready to be used. Therefore do not pass the - * XFS_IMAP_LOOKUP flag to xfs_imap(). This will give - * us only the single block in which the inode lives - * rather than its cluster, so we must make sure to - * invalidate the buffer when we write it out below. - */ - imap.im_blkno = 0; - error = xfs_imap(log->l_mp, NULL, ino, &imap, 0); - if (error) - goto error; - } /* * Inode buffers can be freed, look out for it, * and do not replay the inode. */ - if (xlog_check_buffer_cancelled(log, imap.im_blkno, imap.im_len, 0)) { + if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno, + in_f->ilf_len, 0)) { error = 0; goto error; } - bp = xfs_buf_read_flags(mp->m_ddev_targp, imap.im_blkno, imap.im_len, - XFS_BUF_LOCK); + bp = xfs_buf_read_flags(mp->m_ddev_targp, in_f->ilf_blkno, + in_f->ilf_len, XFS_BUF_LOCK); if (XFS_BUF_ISERROR(bp)) { xfs_ioerror_alert("xlog_recover_do..(read#2)", mp, - bp, imap.im_blkno); + bp, in_f->ilf_blkno); error = XFS_BUF_GETERROR(bp); xfs_buf_relse(bp); goto error; } error = 0; ASSERT(in_f->ilf_fields & XFS_ILOG_CORE); - dip = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); + dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset); /* * Make sure the place we're flushing out to really looks -- cgit v1.2.3-70-g09d2 From 94e1b69d1abd108d306e926c3012ec89e481c0da Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:41 +1100 Subject: [XFS] merge xfs_imap into xfs_dilocate xfs_imap is the only caller of xfs_dilocate and doesn't add any significant value. Merge the two functions and document the various cases we have for inode cluster lookup in the new xfs_imap. Also remove the unused im_agblkno and im_ioffset fields from struct xfs_imap while we're at it. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_ialloc.c | 129 +++++++++++++++++++++++++++++++++------------------- fs/xfs/xfs_ialloc.h | 10 ++-- fs/xfs/xfs_imap.h | 7 --- fs/xfs/xfs_inode.c | 60 +----------------------- fs/xfs/xfs_inode.h | 2 +- 5 files changed, 88 insertions(+), 120 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 0ce56d6a492..348ac30174c 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -40,6 +40,7 @@ #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_bmap.h" +#include "xfs_imap.h" /* @@ -1196,36 +1197,28 @@ error0: } /* - * Return the location of the inode in bno/off, for mapping it into a buffer. + * Return the location of the inode in imap, for mapping it into a buffer. */ -/*ARGSUSED*/ int -xfs_dilocate( - xfs_mount_t *mp, /* file system mount structure */ - xfs_trans_t *tp, /* transaction pointer */ +xfs_imap( + xfs_mount_t *mp, /* file system mount structure */ + xfs_trans_t *tp, /* transaction pointer */ xfs_ino_t ino, /* inode to locate */ - xfs_fsblock_t *bno, /* output: block containing inode */ - int *len, /* output: num blocks in inode cluster */ - int *off, /* output: index in block of inode */ - uint flags) /* flags concerning inode lookup */ + struct xfs_imap *imap, /* location map structure */ + uint flags) /* flags for inode btree lookup */ { xfs_agblock_t agbno; /* block number of inode in the alloc group */ - xfs_buf_t *agbp; /* agi buffer */ xfs_agino_t agino; /* inode number within alloc group */ xfs_agnumber_t agno; /* allocation group number */ int blks_per_cluster; /* num blocks per inode cluster */ xfs_agblock_t chunk_agbno; /* first block in inode chunk */ - xfs_agino_t chunk_agino; /* first agino in inode chunk */ - __int32_t chunk_cnt; /* count of free inodes in chunk */ - xfs_inofree_t chunk_free; /* mask of free inodes in chunk */ xfs_agblock_t cluster_agbno; /* first block in inode cluster */ - xfs_btree_cur_t *cur; /* inode btree cursor */ int error; /* error code */ - int i; /* temp state */ int offset; /* index of inode in its buffer */ int offset_agbno; /* blks from chunk start to inode */ ASSERT(ino != NULLFSINO); + /* * Split up the inode number into its parts. */ @@ -1240,20 +1233,20 @@ xfs_dilocate( return XFS_ERROR(EINVAL); if (agno >= mp->m_sb.sb_agcount) { xfs_fs_cmn_err(CE_ALERT, mp, - "xfs_dilocate: agno (%d) >= " + "xfs_imap: agno (%d) >= " "mp->m_sb.sb_agcount (%d)", agno, mp->m_sb.sb_agcount); } if (agbno >= mp->m_sb.sb_agblocks) { xfs_fs_cmn_err(CE_ALERT, mp, - "xfs_dilocate: agbno (0x%llx) >= " + "xfs_imap: agbno (0x%llx) >= " "mp->m_sb.sb_agblocks (0x%lx)", (unsigned long long) agbno, (unsigned long) mp->m_sb.sb_agblocks); } if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) { xfs_fs_cmn_err(CE_ALERT, mp, - "xfs_dilocate: ino (0x%llx) != " + "xfs_imap: ino (0x%llx) != " "XFS_AGINO_TO_INO(mp, agno, agino) " "(0x%llx)", ino, XFS_AGINO_TO_INO(mp, agno, agino)); @@ -1262,63 +1255,89 @@ xfs_dilocate( #endif /* DEBUG */ return XFS_ERROR(EINVAL); } - if ((mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp))) { + + /* + * If the inode cluster size is the same as the blocksize or + * smaller we get to the buffer by simple arithmetics. + */ + if (XFS_INODE_CLUSTER_SIZE(mp) <= mp->m_sb.sb_blocksize) { offset = XFS_INO_TO_OFFSET(mp, ino); ASSERT(offset < mp->m_sb.sb_inopblock); - *bno = XFS_AGB_TO_FSB(mp, agno, agbno); - *off = offset; - *len = 1; + + imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno); + imap->im_len = XFS_FSB_TO_BB(mp, 1); + imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog); return 0; } + blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_blocklog; - if (*bno != NULLFSBLOCK) { + + /* + * If we get a block number passed from bulkstat we can use it to + * find the buffer easily. + */ + if (imap->im_blkno) { offset = XFS_INO_TO_OFFSET(mp, ino); ASSERT(offset < mp->m_sb.sb_inopblock); - cluster_agbno = XFS_FSB_TO_AGBNO(mp, *bno); - *off = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + - offset; - *len = blks_per_cluster; + + cluster_agbno = XFS_DADDR_TO_AGBNO(mp, imap->im_blkno); + offset += (agbno - cluster_agbno) * mp->m_sb.sb_inopblock; + + imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster); + imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog); return 0; } + + /* + * If the inode chunks are aligned then use simple maths to + * find the location. Otherwise we have to do a btree + * lookup to find the location. + */ if (mp->m_inoalign_mask) { offset_agbno = agbno & mp->m_inoalign_mask; chunk_agbno = agbno - offset_agbno; } else { + xfs_btree_cur_t *cur; /* inode btree cursor */ + xfs_agino_t chunk_agino; /* first agino in inode chunk */ + __int32_t chunk_cnt; /* count of free inodes in chunk */ + xfs_inofree_t chunk_free; /* mask of free inodes in chunk */ + xfs_buf_t *agbp; /* agi buffer */ + int i; /* temp state */ + down_read(&mp->m_peraglock); error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); up_read(&mp->m_peraglock); if (error) { -#ifdef DEBUG - xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: " + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " "xfs_ialloc_read_agi() returned " "error %d, agno %d", error, agno); -#endif /* DEBUG */ return error; } + cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); - if ((error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i))) { -#ifdef DEBUG - xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: " + error = xfs_inobt_lookup_le(cur, agino, 0, 0, &i); + if (error) { + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " "xfs_inobt_lookup_le() failed"); -#endif /* DEBUG */ goto error0; } - if ((error = xfs_inobt_get_rec(cur, &chunk_agino, &chunk_cnt, - &chunk_free, &i))) { -#ifdef DEBUG - xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: " + + error = xfs_inobt_get_rec(cur, &chunk_agino, &chunk_cnt, + &chunk_free, &i); + if (error) { + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " "xfs_inobt_get_rec() failed"); -#endif /* DEBUG */ goto error0; } if (i == 0) { #ifdef DEBUG - xfs_fs_cmn_err(CE_ALERT, mp, "xfs_dilocate: " + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " "xfs_inobt_get_rec() failed"); #endif /* DEBUG */ error = XFS_ERROR(EINVAL); } + error0: xfs_trans_brelse(tp, agbp); xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); if (error) @@ -1326,19 +1345,35 @@ xfs_dilocate( chunk_agbno = XFS_AGINO_TO_AGBNO(mp, chunk_agino); offset_agbno = agbno - chunk_agbno; } + ASSERT(agbno >= chunk_agbno); cluster_agbno = chunk_agbno + ((offset_agbno / blks_per_cluster) * blks_per_cluster); offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) + XFS_INO_TO_OFFSET(mp, ino); - *bno = XFS_AGB_TO_FSB(mp, agno, cluster_agbno); - *off = offset; - *len = blks_per_cluster; + + imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno); + imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster); + imap->im_boffset = (ushort)(offset << mp->m_sb.sb_inodelog); + + /* + * If the inode number maps to a block outside the bounds + * of the file system then return NULL rather than calling + * read_buf and panicing when we get an error from the + * driver. + */ + if ((imap->im_blkno + imap->im_len) > + XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { + xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " + "(imap->im_blkno (0x%llx) + imap->im_len (0x%llx)) > " + " XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) (0x%llx)", + (unsigned long long) imap->im_blkno, + (unsigned long long) imap->im_len, + XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); + return XFS_ERROR(EINVAL); + } + return 0; -error0: - xfs_trans_brelse(tp, agbp); - xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); - return error; } /* diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h index ccf554a6e0a..50f558a4e0a 100644 --- a/fs/xfs/xfs_ialloc.h +++ b/fs/xfs/xfs_ialloc.h @@ -20,6 +20,7 @@ struct xfs_buf; struct xfs_dinode; +struct xfs_imap; struct xfs_mount; struct xfs_trans; @@ -104,17 +105,14 @@ xfs_difree( xfs_ino_t *first_ino); /* first inode in deleted cluster */ /* - * Return the location of the inode in bno/len/off, - * for mapping it into a buffer. + * Return the location of the inode in imap, for mapping it into a buffer. */ int -xfs_dilocate( +xfs_imap( struct xfs_mount *mp, /* file system mount structure */ struct xfs_trans *tp, /* transaction pointer */ xfs_ino_t ino, /* inode to locate */ - xfs_fsblock_t *bno, /* output: block containing inode */ - int *len, /* output: num blocks in cluster*/ - int *off, /* output: index in block of inode */ + struct xfs_imap *imap, /* location map structure */ uint flags); /* flags for inode btree lookup */ /* diff --git a/fs/xfs/xfs_imap.h b/fs/xfs/xfs_imap.h index f9ce62890ea..08690005739 100644 --- a/fs/xfs/xfs_imap.h +++ b/fs/xfs/xfs_imap.h @@ -25,14 +25,7 @@ typedef struct xfs_imap { xfs_daddr_t im_blkno; /* starting BB of inode chunk */ uint im_len; /* length in BBs of inode chunk */ - xfs_agblock_t im_agblkno; /* logical block of inode chunk in ag */ - ushort im_ioffset; /* inode offset in block in "inodes" */ ushort im_boffset; /* inode offset in block in bytes */ } xfs_imap_t; -struct xfs_mount; -struct xfs_trans; -int xfs_imap(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, - xfs_imap_t *, uint); - #endif /* __XFS_IMAP_H__ */ diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 663ff9e517f..bf528b725ae 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -266,7 +266,7 @@ xfs_inotobp( * in once, thus we can use the mapping information stored in the inode * rather than calling xfs_imap(). This allows us to avoid the overhead * of looking at the inode btree for small block file systems - * (see xfs_dilocate()). + * (see xfs_imap()). */ int xfs_itobp( @@ -2508,64 +2508,6 @@ xfs_idata_realloc( ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork)); } - - - -/* - * Map inode to disk block and offset. - * - * mp -- the mount point structure for the current file system - * tp -- the current transaction - * ino -- the inode number of the inode to be located - * imap -- this structure is filled in with the information necessary - * to retrieve the given inode from disk - * flags -- flags to pass to xfs_dilocate indicating whether or not - * lookups in the inode btree were OK or not - */ -int -xfs_imap( - xfs_mount_t *mp, - xfs_trans_t *tp, - xfs_ino_t ino, - xfs_imap_t *imap, - uint flags) -{ - xfs_fsblock_t fsbno; - int len; - int off; - int error; - - fsbno = imap->im_blkno ? - XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK; - error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags); - if (error) - return error; - - imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno); - imap->im_len = XFS_FSB_TO_BB(mp, len); - imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno); - imap->im_ioffset = (ushort)off; - imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog); - - /* - * If the inode number maps to a block outside the bounds - * of the file system then return NULL rather than calling - * read_buf and panicing when we get an error from the - * driver. - */ - if ((imap->im_blkno + imap->im_len) > - XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { - xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " - "(imap->im_blkno (0x%llx) + imap->im_len (0x%llx)) > " - " XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) (0x%llx)", - (unsigned long long) imap->im_blkno, - (unsigned long long) imap->im_len, - XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); - return EINVAL; - } - return 0; -} - void xfs_idestroy_fork( xfs_inode_t *ip, diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index db1f6884072..0a9ad1c56a8 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -157,7 +157,7 @@ typedef struct xfs_icdinode { #define XFS_IFEXTIREC 0x08 /* Indirection array of extent blocks */ /* - * Flags for xfs_inotobp, xfs_imap() and xfs_dilocate(). + * Flags for xfs_inotobp and xfs_imap(). */ #define XFS_IMAP_BULKSTAT 0x1 -- cgit v1.2.3-70-g09d2 From 92bfc6e7c4eabbbd15e7d6d49123b296d05dcfd1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:41 +1100 Subject: [XFS] embededd struct xfs_imap into xfs_inode Most uses of struct xfs_imap are to map and inode to a buffer. To avoid copying around the inode location information we should just embedd a strcut xfs_imap into the xfs_inode. To make sure it doesn't bloat an inode the im_len is changed to a ushort, which is fine as that's what the users exepect anyway. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_ialloc.c | 1 - fs/xfs/xfs_imap.h | 31 ------------------------------ fs/xfs/xfs_inode.c | 49 ++++++++++++++++++------------------------------ fs/xfs/xfs_inode.h | 14 +++++++++++--- fs/xfs/xfs_inode_item.c | 6 +++--- fs/xfs/xfs_itable.c | 2 +- fs/xfs/xfs_log_recover.c | 1 - 7 files changed, 33 insertions(+), 71 deletions(-) delete mode 100644 fs/xfs/xfs_imap.h (limited to 'fs') diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 348ac30174c..4f4557262d9 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -40,7 +40,6 @@ #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_bmap.h" -#include "xfs_imap.h" /* diff --git a/fs/xfs/xfs_imap.h b/fs/xfs/xfs_imap.h deleted file mode 100644 index 08690005739..00000000000 --- a/fs/xfs/xfs_imap.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2000,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_IMAP_H__ -#define __XFS_IMAP_H__ - -/* - * This is the structure passed to xfs_imap() to map - * an inode number to its on disk location. - */ -typedef struct xfs_imap { - xfs_daddr_t im_blkno; /* starting BB of inode chunk */ - uint im_len; /* length in BBs of inode chunk */ - ushort im_boffset; /* inode offset in block in bytes */ -} xfs_imap_t; - -#endif /* __XFS_IMAP_H__ */ diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index bf528b725ae..72dc7a87a14 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -23,7 +23,6 @@ #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" -#include "xfs_imap.h" #include "xfs_trans.h" #include "xfs_trans_priv.h" #include "xfs_sb.h" @@ -134,7 +133,7 @@ STATIC int xfs_imap_to_bp( xfs_mount_t *mp, xfs_trans_t *tp, - xfs_imap_t *imap, + struct xfs_imap *imap, xfs_buf_t **bpp, uint buf_flags, uint imap_flags) @@ -232,7 +231,7 @@ xfs_inotobp( int *offset, uint imap_flags) { - xfs_imap_t imap; + struct xfs_imap imap; xfs_buf_t *bp; int error; @@ -277,17 +276,12 @@ xfs_itobp( xfs_buf_t **bpp, uint buf_flags) { - xfs_imap_t imap; xfs_buf_t *bp; int error; - ASSERT(ip->i_blkno != 0); + ASSERT(ip->i_imap.im_blkno != 0); - imap.im_blkno = ip->i_blkno; - imap.im_len = ip->i_len; - imap.im_boffset = ip->i_boffset; - - error = xfs_imap_to_bp(mp, tp, &imap, &bp, buf_flags, 0); + error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, buf_flags, 0); if (error) return error; @@ -298,7 +292,7 @@ xfs_itobp( return EAGAIN; } - *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); + *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); *bpp = bp; return 0; } @@ -799,9 +793,7 @@ xfs_inode_alloc( /* initialise the xfs inode */ ip->i_ino = ino; ip->i_mount = mp; - ip->i_blkno = 0; - ip->i_len = 0; - ip->i_boffset =0; + memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); ip->i_afp = NULL; memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); ip->i_flags = 0; @@ -857,7 +849,6 @@ xfs_iread( xfs_buf_t *bp; xfs_dinode_t *dip; xfs_inode_t *ip; - xfs_imap_t imap; int error; ip = xfs_inode_alloc(mp, ino); @@ -865,26 +856,22 @@ xfs_iread( return ENOMEM; /* - * Get pointers to the on-disk inode and the buffer containing it. + * Fill in the location information in the in-core inode. */ - imap.im_blkno = bno; - error = xfs_imap(mp, tp, ip->i_ino, &imap, imap_flags); + ip->i_imap.im_blkno = bno; + error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, imap_flags); if (error) goto out_destroy_inode; + ASSERT(bno == 0 || bno == ip->i_imap.im_blkno); /* - * Fill in the fields in the inode that will be used to - * map the inode to its buffer from now on. + * Get pointers to the on-disk inode and the buffer containing it. */ - ip->i_blkno = imap.im_blkno; - ip->i_len = imap.im_len; - ip->i_boffset = imap.im_boffset; - ASSERT(bno == 0 || bno == imap.im_blkno); - - error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags); + error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, + XFS_BUF_LOCK, imap_flags); if (error) goto out_destroy_inode; - dip = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset); + dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); /* * If we got something that isn't an inode it means someone @@ -1872,7 +1859,7 @@ xfs_iunlink( ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO); /* both on-disk, don't endian flip twice */ dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; - offset = ip->i_boffset + + offset = ip->i_imap.im_boffset + offsetof(xfs_dinode_t, di_next_unlinked); xfs_trans_inode_buf(tp, ibp); xfs_trans_log_buf(tp, ibp, offset, @@ -1958,7 +1945,7 @@ xfs_iunlink_remove( ASSERT(next_agino != 0); if (next_agino != NULLAGINO) { dip->di_next_unlinked = cpu_to_be32(NULLAGINO); - offset = ip->i_boffset + + offset = ip->i_imap.im_boffset + offsetof(xfs_dinode_t, di_next_unlinked); xfs_trans_inode_buf(tp, ibp); xfs_trans_log_buf(tp, ibp, offset, @@ -2021,7 +2008,7 @@ xfs_iunlink_remove( ASSERT(next_agino != agino); if (next_agino != NULLAGINO) { dip->di_next_unlinked = cpu_to_be32(NULLAGINO); - offset = ip->i_boffset + + offset = ip->i_imap.im_boffset + offsetof(xfs_dinode_t, di_next_unlinked); xfs_trans_inode_buf(tp, ibp); xfs_trans_log_buf(tp, ibp, offset, @@ -3201,7 +3188,7 @@ xfs_iflush_int( } /* set *dip = inode's place in the buffer */ - dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset); + dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); /* * Clear i_update_core before copying out the data. diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 0a9ad1c56a8..d5c3aa1b18e 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -82,6 +82,16 @@ typedef struct xfs_ifork { } if_u2; } xfs_ifork_t; +/* + * Inode location information. Stored in the inode and passed to + * xfs_imap_to_bp() to get a buffer and dinode for a given inode. + */ +struct xfs_imap { + xfs_daddr_t im_blkno; /* starting BB of inode chunk */ + ushort im_len; /* length in BBs of inode chunk */ + ushort im_boffset; /* inode offset in block in bytes */ +}; + /* * This is the xfs in-core inode structure. * Most of the on-disk inode is embedded in the i_d field. @@ -238,9 +248,7 @@ typedef struct xfs_inode { /* Inode location stuff */ xfs_ino_t i_ino; /* inode number (agno/agino)*/ - xfs_daddr_t i_blkno; /* blkno of inode buffer */ - ushort i_len; /* len of inode buffer */ - ushort i_boffset; /* off of inode in buffer */ + struct xfs_imap i_imap; /* location for xfs_imap() */ /* Extent information. */ xfs_ifork_t *i_afp; /* attribute fork pointer */ diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index c43118148e6..977c4aec587 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -942,9 +942,9 @@ xfs_inode_item_init( iip->ili_format.ilf_type = XFS_LI_INODE; iip->ili_format.ilf_ino = ip->i_ino; - iip->ili_format.ilf_blkno = ip->i_blkno; - iip->ili_format.ilf_len = ip->i_len; - iip->ili_format.ilf_boffset = ip->i_boffset; + iip->ili_format.ilf_blkno = ip->i_imap.im_blkno; + iip->ili_format.ilf_len = ip->i_imap.im_len; + iip->ili_format.ilf_boffset = ip->i_imap.im_boffset; } /* diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 2cf16f4695e..4315ce642b4 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -69,7 +69,7 @@ xfs_bulkstat_one_iget( } ASSERT(ip != NULL); - ASSERT(ip->i_blkno != (xfs_daddr_t)0); + ASSERT(ip->i_imap.im_blkno != 0); dic = &ip->i_d; diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index bf8573b5a7d..ce6e907bec6 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -36,7 +36,6 @@ #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_inode_item.h" -#include "xfs_imap.h" #include "xfs_alloc.h" #include "xfs_ialloc.h" #include "xfs_log_priv.h" -- cgit v1.2.3-70-g09d2 From b48d8d64377f39913663a06f4757f3b8c6fc6d87 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:41 +1100 Subject: [XFS] kill the XFS_IMAP_BULKSTAT flag Just pass down the XFS_IGET_* flags all the way down to xfs_imap instead of translating them mid-way. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_ialloc.c | 2 +- fs/xfs/xfs_iget.c | 3 +-- fs/xfs/xfs_inode.c | 4 ++-- fs/xfs/xfs_inode.h | 5 ----- fs/xfs/xfs_itable.c | 2 +- 5 files changed, 5 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 4f4557262d9..e6ebbaeb4dc 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -1228,7 +1228,7 @@ xfs_imap( ino != XFS_AGINO_TO_INO(mp, agno, agino)) { #ifdef DEBUG /* no diagnostics for bulkstat, ino comes from userspace */ - if (flags & XFS_IMAP_BULKSTAT) + if (flags & XFS_IGET_BULKSTAT) return XFS_ERROR(EINVAL); if (agno >= mp->m_sb.sb_agcount) { xfs_fs_cmn_err(CE_ALERT, mp, diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index bf4dc5eb4cf..d60522a73a1 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -159,8 +159,7 @@ xfs_iget_cache_miss( * Read the disk inode attributes into a new inode structure and get * a new vnode for it. This should also initialize i_ino and i_mount. */ - error = xfs_iread(mp, tp, ino, &ip, bno, - (flags & XFS_IGET_BULKSTAT) ? XFS_IMAP_BULKSTAT : 0); + error = xfs_iread(mp, tp, ino, &ip, bno, flags); if (error) return error; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 72dc7a87a14..4290760473f 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -136,7 +136,7 @@ xfs_imap_to_bp( struct xfs_imap *imap, xfs_buf_t **bpp, uint buf_flags, - uint imap_flags) + uint iget_flags) { int error; int i; @@ -178,7 +178,7 @@ xfs_imap_to_bp( if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, XFS_RANDOM_ITOBP_INOTOBP))) { - if (imap_flags & XFS_IMAP_BULKSTAT) { + if (iget_flags & XFS_IGET_BULKSTAT) { xfs_trans_brelse(tp, bp); return XFS_ERROR(EINVAL); } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index d5c3aa1b18e..49e609c39d7 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -166,11 +166,6 @@ typedef struct xfs_icdinode { #define XFS_IFBROOT 0x04 /* i_broot points to the bmap b-tree root */ #define XFS_IFEXTIREC 0x08 /* Indirection array of extent blocks */ -/* - * Flags for xfs_inotobp and xfs_imap(). - */ -#define XFS_IMAP_BULKSTAT 0x1 - /* * Fork handling. */ diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 4315ce642b4..d4c0de86012 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -595,7 +595,7 @@ xfs_bulkstat( error = xfs_inotobp(mp, NULL, ino, &dip, &bp, &offset, - XFS_IMAP_BULKSTAT); + XFS_IGET_BULKSTAT); if (!error) clustidx = offset / mp->m_sb.sb_inodesize; -- cgit v1.2.3-70-g09d2 From 24f211bad09a31f19dda0c3faffe0244f4f235f5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:42 +1100 Subject: [XFS] move inode allocation out xfs_iread Allocate the inode in xfs_iget_cache_miss and pass it into xfs_iread. This simplifies the error handling and allows xfs_iread to be shared with userspace which already uses these semantics. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_iget.c | 88 +++++++++++++++++++++++++++++++++++++++++---- fs/xfs/xfs_inode.c | 103 ++++------------------------------------------------- fs/xfs/xfs_inode.h | 4 +-- 3 files changed, 91 insertions(+), 104 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index d60522a73a1..27ec2417b85 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -40,6 +40,82 @@ #include "xfs_utils.h" #include "xfs_trans_priv.h" #include "xfs_inode_item.h" +#include "xfs_bmap.h" +#include "xfs_btree_trace.h" +#include "xfs_dir2_trace.h" + + +/* + * Allocate and initialise an xfs_inode. + */ +STATIC struct xfs_inode * +xfs_inode_alloc( + struct xfs_mount *mp, + xfs_ino_t ino) +{ + struct xfs_inode *ip; + + /* + * if this didn't occur in transactions, we could use + * KM_MAYFAIL and return NULL here on ENOMEM. Set the + * code up to do this anyway. + */ + ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); + if (!ip) + return NULL; + + ASSERT(atomic_read(&ip->i_iocount) == 0); + ASSERT(atomic_read(&ip->i_pincount) == 0); + ASSERT(!spin_is_locked(&ip->i_flags_lock)); + ASSERT(completion_done(&ip->i_flush)); + + /* + * initialise the VFS inode here to get failures + * out of the way early. + */ + if (!inode_init_always(mp->m_super, VFS_I(ip))) { + kmem_zone_free(xfs_inode_zone, ip); + return NULL; + } + + /* initialise the xfs inode */ + ip->i_ino = ino; + ip->i_mount = mp; + memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); + ip->i_afp = NULL; + memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); + ip->i_flags = 0; + ip->i_update_core = 0; + ip->i_update_size = 0; + ip->i_delayed_blks = 0; + memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); + ip->i_size = 0; + ip->i_new_size = 0; + + /* + * Initialize inode's trace buffers. + */ +#ifdef XFS_INODE_TRACE + ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS); +#endif +#ifdef XFS_BMAP_TRACE + ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS); +#endif +#ifdef XFS_BTREE_TRACE + ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS); +#endif +#ifdef XFS_RW_TRACE + ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS); +#endif +#ifdef XFS_ILOCK_TRACE + ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS); +#endif +#ifdef XFS_DIR2_TRACE + ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); +#endif + + return ip; +} /* * Check the validity of the inode we just found it the cache @@ -155,13 +231,13 @@ xfs_iget_cache_miss( unsigned long first_index, mask; xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); - /* - * Read the disk inode attributes into a new inode structure and get - * a new vnode for it. This should also initialize i_ino and i_mount. - */ - error = xfs_iread(mp, tp, ino, &ip, bno, flags); + ip = xfs_inode_alloc(mp, ino); + if (!ip) + return ENOMEM; + + error = xfs_iread(mp, tp, ip, bno, flags); if (error) - return error; + goto out_destroy; xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 4290760473f..872191b4678 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -758,119 +758,36 @@ xfs_dic2xflags( } /* - * Allocate and initialise an xfs_inode. - */ -STATIC struct xfs_inode * -xfs_inode_alloc( - struct xfs_mount *mp, - xfs_ino_t ino) -{ - struct xfs_inode *ip; - - /* - * if this didn't occur in transactions, we could use - * KM_MAYFAIL and return NULL here on ENOMEM. Set the - * code up to do this anyway. - */ - ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); - if (!ip) - return NULL; - - ASSERT(atomic_read(&ip->i_iocount) == 0); - ASSERT(atomic_read(&ip->i_pincount) == 0); - ASSERT(!spin_is_locked(&ip->i_flags_lock)); - ASSERT(completion_done(&ip->i_flush)); - - /* - * initialise the VFS inode here to get failures - * out of the way early. - */ - if (!inode_init_always(mp->m_super, VFS_I(ip))) { - kmem_zone_free(xfs_inode_zone, ip); - return NULL; - } - - /* initialise the xfs inode */ - ip->i_ino = ino; - ip->i_mount = mp; - memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); - ip->i_afp = NULL; - memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); - ip->i_flags = 0; - ip->i_update_core = 0; - ip->i_update_size = 0; - ip->i_delayed_blks = 0; - memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); - ip->i_size = 0; - ip->i_new_size = 0; - - /* - * Initialize inode's trace buffers. - */ -#ifdef XFS_INODE_TRACE - ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_BMAP_TRACE - ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_BTREE_TRACE - ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_RW_TRACE - ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_ILOCK_TRACE - ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS); -#endif -#ifdef XFS_DIR2_TRACE - ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS); -#endif - - return ip; -} - -/* - * Given a mount structure and an inode number, return a pointer - * to a newly allocated in-core inode corresponding to the given - * inode number. - * - * Initialize the inode's attributes and extent pointers if it - * already has them (it will not if the inode has no links). + * Read the disk inode attributes into the in-core inode structure. */ int xfs_iread( xfs_mount_t *mp, xfs_trans_t *tp, - xfs_ino_t ino, - xfs_inode_t **ipp, + xfs_inode_t *ip, xfs_daddr_t bno, - uint imap_flags) + uint iget_flags) { xfs_buf_t *bp; xfs_dinode_t *dip; - xfs_inode_t *ip; int error; - ip = xfs_inode_alloc(mp, ino); - if (!ip) - return ENOMEM; - /* * Fill in the location information in the in-core inode. */ ip->i_imap.im_blkno = bno; - error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, imap_flags); + error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags); if (error) - goto out_destroy_inode; + return error; ASSERT(bno == 0 || bno == ip->i_imap.im_blkno); /* * Get pointers to the on-disk inode and the buffer containing it. */ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, - XFS_BUF_LOCK, imap_flags); + XFS_BUF_LOCK, iget_flags); if (error) - goto out_destroy_inode; + return error; dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset); /* @@ -968,14 +885,8 @@ xfs_iread( * to worry about the inode being changed just because we released * the buffer. */ - xfs_trans_brelse(tp, bp); - *ipp = ip; - return 0; - out_brelse: xfs_trans_brelse(tp, bp); - out_destroy_inode: - xfs_destroy_inode(ip); return error; } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 49e609c39d7..ae5800e7d39 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -516,8 +516,8 @@ void xfs_ireclaim(xfs_inode_t *); /* * xfs_inode.c prototypes. */ -int xfs_iread(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, - xfs_inode_t **, xfs_daddr_t, uint); +int xfs_iread(struct xfs_mount *, struct xfs_trans *, + struct xfs_inode *, xfs_daddr_t, uint); int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t, xfs_nlink_t, xfs_dev_t, struct cred *, xfs_prid_t, int, struct xfs_buf **, boolean_t *, xfs_inode_t **); -- cgit v1.2.3-70-g09d2 From 0e446673a15a4e9c336b67c1a638eb12c21d0993 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 28 Nov 2008 14:23:42 +1100 Subject: [XFS] fix error handling in xlog_recover_process_one_iunlink If we fail after xfs_iget we have to drop the reference count, spotted by Dave Chinner. Also remove some useless asserts and stop trying to deal with di_mode == 0 inodes because never gets those without passing the IGET_CREATE flag to xfs_iget. Signed-off-by: Christoph Hellwig Reviewed-by: Eric Sandeen Signed-off-by: Niv Sardi --- fs/xfs/xfs_log_recover.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index ce6e907bec6..51412cced01 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3147,13 +3147,12 @@ xlog_recover_process_one_iunlink( /* * Get the on disk inode to find the next inode in the bucket. */ - ASSERT(ip != NULL); error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XFS_BUF_LOCK); if (error) - goto fail; + goto fail_iput; - ASSERT(dip != NULL); ASSERT(ip->i_d.di_nlink == 0); + ASSERT(ip->i_d.di_mode != 0); /* setup for the next pass */ agino = be32_to_cpu(dip->di_next_unlinked); @@ -3165,18 +3164,11 @@ xlog_recover_process_one_iunlink( */ ip->i_d.di_dmevmask = 0; - /* - * If this is a new inode, handle it specially. Otherwise, just - * drop our reference to the inode. If there are no other - * references, this will send the inode to xfs_inactive() which - * will truncate the file and free the inode. - */ - if (ip->i_d.di_mode == 0) - xfs_iput_new(ip, 0); - else - IRELE(ip); + IRELE(ip); return agino; + fail_iput: + IRELE(ip); fail: /* * We can't read in the inode this bucket points to, or this inode -- cgit v1.2.3-70-g09d2 From 743bb4650da9e2595d6cedd01c680b5b9398c74a Mon Sep 17 00:00:00 2001 From: "sandeen@sandeen.net" Date: Tue, 25 Nov 2008 21:20:06 -0600 Subject: [XFS] Move copy_from_user calls out of ioctl helpers into ioctl switch. Moving the copy_from_user out of some of the ioctl helpers will make it easier for the compat ioctl switch to copy in the right struct, then just pass to the underlying helper. Also, move common access checks into the helpers themselves, and out of the native ioctl switch code, to reduce code duplication between native & compat ioctl callers. Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_ioctl.c | 109 +++++++++++++++++++++---------------------- fs/xfs/xfs_dfrag.c | 8 +--- fs/xfs/xfs_dfrag.h | 2 +- fs/xfs/xfs_fsops.c | 6 +++ fs/xfs/xfs_rtalloc.c | 2 + 5 files changed, 63 insertions(+), 64 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 534b175f3a4..03c55b8fa37 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -71,23 +71,19 @@ STATIC int xfs_find_handle( unsigned int cmd, - void __user *arg) + xfs_fsop_handlereq_t *hreq) { int hsize; xfs_handle_t handle; - xfs_fsop_handlereq_t hreq; struct inode *inode; - if (copy_from_user(&hreq, arg, sizeof(hreq))) - return -XFS_ERROR(EFAULT); - memset((char *)&handle, 0, sizeof(handle)); switch (cmd) { case XFS_IOC_PATH_TO_FSHANDLE: case XFS_IOC_PATH_TO_HANDLE: { struct path path; - int error = user_lpath((const char __user *)hreq.path, &path); + int error = user_lpath((const char __user *)hreq->path, &path); if (error) return error; @@ -101,7 +97,7 @@ xfs_find_handle( case XFS_IOC_FD_TO_HANDLE: { struct file *file; - file = fget(hreq.fd); + file = fget(hreq->fd); if (!file) return -EBADF; @@ -158,8 +154,8 @@ xfs_find_handle( } /* now copy our handle into the user buffer & write out the size */ - if (copy_to_user(hreq.ohandle, &handle, hsize) || - copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) { + if (copy_to_user(hreq->ohandle, &handle, hsize) || + copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) { iput(inode); return -XFS_ERROR(EFAULT); } @@ -252,7 +248,7 @@ xfs_vget_fsop_handlereq( STATIC int xfs_open_by_handle( xfs_mount_t *mp, - void __user *arg, + xfs_fsop_handlereq_t *hreq, struct file *parfilp, struct inode *parinode) { @@ -262,14 +258,11 @@ xfs_open_by_handle( struct file *filp; struct inode *inode; struct dentry *dentry; - xfs_fsop_handlereq_t hreq; if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); - if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) - return -XFS_ERROR(EFAULT); - error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode); + error = xfs_vget_fsop_handlereq(mp, parinode, hreq, &inode); if (error) return -error; @@ -280,10 +273,10 @@ xfs_open_by_handle( } #if BITS_PER_LONG != 32 - hreq.oflags |= O_LARGEFILE; + hreq->oflags |= O_LARGEFILE; #endif /* Put open permission in namei format. */ - permflag = hreq.oflags; + permflag = hreq->oflags; if ((permflag+1) & O_ACCMODE) permflag++; if (permflag & O_TRUNC) @@ -321,7 +314,7 @@ xfs_open_by_handle( mntget(parfilp->f_path.mnt); /* Create file pointer. */ - filp = dentry_open(dentry, parfilp->f_path.mnt, hreq.oflags); + filp = dentry_open(dentry, parfilp->f_path.mnt, hreq->oflags); if (IS_ERR(filp)) { put_unused_fd(new_fd); return -XFS_ERROR(-PTR_ERR(filp)); @@ -365,21 +358,18 @@ do_readlink( STATIC int xfs_readlink_by_handle( xfs_mount_t *mp, - void __user *arg, + xfs_fsop_handlereq_t *hreq, struct inode *parinode) { struct inode *inode; - xfs_fsop_handlereq_t hreq; __u32 olen; void *link; int error; if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); - if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) - return -XFS_ERROR(EFAULT); - error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode); + error = xfs_vget_fsop_handlereq(mp, parinode, hreq, &inode); if (error) return -error; @@ -389,7 +379,7 @@ xfs_readlink_by_handle( goto out_iput; } - if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) { + if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { error = -XFS_ERROR(EFAULT); goto out_iput; } @@ -401,7 +391,7 @@ xfs_readlink_by_handle( error = -xfs_readlink(XFS_I(inode), link); if (error) goto out_kfree; - error = do_readlink(hreq.ohandle, olen, link); + error = do_readlink(hreq->ohandle, olen, link); if (error) goto out_kfree; @@ -668,12 +658,19 @@ xfs_ioc_space( struct file *filp, int ioflags, unsigned int cmd, - void __user *arg) + xfs_flock64_t *bf) { - xfs_flock64_t bf; int attr_flags = 0; int error; + /* + * Only allow the sys admin to reserve space unless + * unwritten extents are enabled. + */ + if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) && + !capable(CAP_SYS_ADMIN)) + return -XFS_ERROR(EPERM); + if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) return -XFS_ERROR(EPERM); @@ -683,15 +680,12 @@ xfs_ioc_space( if (!S_ISREG(inode->i_mode)) return -XFS_ERROR(EINVAL); - if (copy_from_user(&bf, arg, sizeof(bf))) - return -XFS_ERROR(EFAULT); - if (filp->f_flags & (O_NDELAY|O_NONBLOCK)) attr_flags |= XFS_ATTR_NONBLOCK; if (ioflags & IO_INVIS) attr_flags |= XFS_ATTR_DMI; - error = xfs_change_file_space(ip, cmd, &bf, filp->f_pos, attr_flags); + error = xfs_change_file_space(ip, cmd, bf, filp->f_pos, attr_flags); return -error; } @@ -1356,17 +1350,13 @@ xfs_ioctl( case XFS_IOC_ALLOCSP64: case XFS_IOC_FREESP64: case XFS_IOC_RESVSP64: - case XFS_IOC_UNRESVSP64: - /* - * Only allow the sys admin to reserve space unless - * unwritten extents are enabled. - */ - if (!xfs_sb_version_hasextflgbit(&mp->m_sb) && - !capable(CAP_SYS_ADMIN)) - return -EPERM; - - return xfs_ioc_space(ip, inode, filp, ioflags, cmd, arg); + case XFS_IOC_UNRESVSP64: { + xfs_flock64_t bf; + if (copy_from_user(&bf, arg, sizeof(bf))) + return -XFS_ERROR(EFAULT); + return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); + } case XFS_IOC_DIOINFO: { struct dioattr da; xfs_buftarg_t *target = @@ -1426,18 +1416,30 @@ xfs_ioctl( case XFS_IOC_FD_TO_HANDLE: case XFS_IOC_PATH_TO_HANDLE: - case XFS_IOC_PATH_TO_FSHANDLE: - return xfs_find_handle(cmd, arg); + case XFS_IOC_PATH_TO_FSHANDLE: { + xfs_fsop_handlereq_t hreq; - case XFS_IOC_OPEN_BY_HANDLE: - return xfs_open_by_handle(mp, arg, filp, inode); + if (copy_from_user(&hreq, arg, sizeof(hreq))) + return -XFS_ERROR(EFAULT); + return xfs_find_handle(cmd, &hreq); + } + case XFS_IOC_OPEN_BY_HANDLE: { + xfs_fsop_handlereq_t hreq; + if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) + return -XFS_ERROR(EFAULT); + return xfs_open_by_handle(mp, &hreq, filp, inode); + } case XFS_IOC_FSSETDM_BY_HANDLE: return xfs_fssetdm_by_handle(mp, arg, inode); - case XFS_IOC_READLINK_BY_HANDLE: - return xfs_readlink_by_handle(mp, arg, inode); + case XFS_IOC_READLINK_BY_HANDLE: { + xfs_fsop_handlereq_t hreq; + if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) + return -XFS_ERROR(EFAULT); + return xfs_readlink_by_handle(mp, &hreq, inode); + } case XFS_IOC_ATTRLIST_BY_HANDLE: return xfs_attrlist_by_handle(mp, arg, inode); @@ -1445,7 +1447,11 @@ xfs_ioctl( return xfs_attrmulti_by_handle(mp, arg, filp, inode); case XFS_IOC_SWAPEXT: { - error = xfs_swapext((struct xfs_swapext __user *)arg); + struct xfs_swapext sxp; + + if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t))) + return -XFS_ERROR(EFAULT); + error = xfs_swapext(&sxp); return -error; } @@ -1501,9 +1507,6 @@ xfs_ioctl( case XFS_IOC_FSGROWFSDATA: { xfs_growfs_data_t in; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (copy_from_user(&in, arg, sizeof(in))) return -XFS_ERROR(EFAULT); @@ -1514,9 +1517,6 @@ xfs_ioctl( case XFS_IOC_FSGROWFSLOG: { xfs_growfs_log_t in; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (copy_from_user(&in, arg, sizeof(in))) return -XFS_ERROR(EFAULT); @@ -1527,9 +1527,6 @@ xfs_ioctl( case XFS_IOC_FSGROWFSRT: { xfs_growfs_rt_t in; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (copy_from_user(&in, arg, sizeof(in))) return -XFS_ERROR(EFAULT); diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index 75b0cd4da0e..b4c1ee71349 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c @@ -49,9 +49,8 @@ */ int xfs_swapext( - xfs_swapext_t __user *sxu) + xfs_swapext_t *sxp) { - xfs_swapext_t *sxp; xfs_inode_t *ip, *tip; struct file *file, *target_file; int error = 0; @@ -62,11 +61,6 @@ xfs_swapext( goto out; } - if (copy_from_user(sxp, sxu, sizeof(xfs_swapext_t))) { - error = XFS_ERROR(EFAULT); - goto out_free_sxp; - } - /* Pull information for the target fd */ file = fget((int)sxp->sx_fdtarget); if (!file) { diff --git a/fs/xfs/xfs_dfrag.h b/fs/xfs/xfs_dfrag.h index da178205be6..4f55a630655 100644 --- a/fs/xfs/xfs_dfrag.h +++ b/fs/xfs/xfs_dfrag.h @@ -46,7 +46,7 @@ typedef struct xfs_swapext /* * Syscall interface for xfs_swapext */ -int xfs_swapext(struct xfs_swapext __user *sx); +int xfs_swapext(struct xfs_swapext *sx); int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip, struct xfs_swapext *sxp); diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index f1d0585041b..852b6d32e8d 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -435,6 +435,9 @@ xfs_growfs_data( xfs_growfs_data_t *in) { int error; + + if (!capable(CAP_SYS_ADMIN)) + return XFS_ERROR(EPERM); if (!mutex_trylock(&mp->m_growlock)) return XFS_ERROR(EWOULDBLOCK); error = xfs_growfs_data_private(mp, in); @@ -448,6 +451,9 @@ xfs_growfs_log( xfs_growfs_log_t *in) { int error; + + if (!capable(CAP_SYS_ADMIN)) + return XFS_ERROR(EPERM); if (!mutex_trylock(&mp->m_growlock)) return XFS_ERROR(EWOULDBLOCK); error = xfs_growfs_log_private(mp, in); diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index f18b9b28179..edf12c7b834 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -1876,6 +1876,8 @@ xfs_growfs_rt( /* * Initial error checking. */ + if (!capable(CAP_SYS_ADMIN)) + return XFS_ERROR(EPERM); if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL || (nrblocks = in->newblocks) <= sbp->sb_rblocks || (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize))) -- cgit v1.2.3-70-g09d2 From ffae263a640b736a7206a0d7bd14ab44eb58cd28 Mon Sep 17 00:00:00 2001 From: "sandeen@sandeen.net" Date: Tue, 25 Nov 2008 21:20:07 -0600 Subject: [XFS] Move compat ioctl structs & numbers into xfs_ioctl32.h This makes the c file less cluttered and a bit more readable. Consistently name the ioctl number macros with "_32" and the compatibility stuctures with "_compat." Rename the helpers which simply copy in the arg with "_copyin" for easy identification. Finally, for a few of the existing helpers, modify them so that they directly call the native ioctl helper after userspace argument fixup. Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_ioctl32.c | 140 ++------------------------------------- fs/xfs/linux-2.6/xfs_ioctl32.h | 145 +++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_fs.h | 4 -- 3 files changed, 150 insertions(+), 139 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index 6ce9c9c7cdd..620bb2d6d26 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -48,36 +48,12 @@ #define _NATIVE_IOC(cmd, type) \ _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type)) -#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) -#define BROKEN_X86_ALIGNMENT -#define _PACKED __attribute__((packed)) -/* on ia32 l_start is on a 32-bit boundary */ -typedef struct xfs_flock64_32 { - __s16 l_type; - __s16 l_whence; - __s64 l_start __attribute__((packed)); - /* len == 0 means until end of file */ - __s64 l_len __attribute__((packed)); - __s32 l_sysid; - __u32 l_pid; - __s32 l_pad[4]; /* reserve area */ -} xfs_flock64_32_t; - -#define XFS_IOC_ALLOCSP_32 _IOW ('X', 10, struct xfs_flock64_32) -#define XFS_IOC_FREESP_32 _IOW ('X', 11, struct xfs_flock64_32) -#define XFS_IOC_ALLOCSP64_32 _IOW ('X', 36, struct xfs_flock64_32) -#define XFS_IOC_FREESP64_32 _IOW ('X', 37, struct xfs_flock64_32) -#define XFS_IOC_RESVSP_32 _IOW ('X', 40, struct xfs_flock64_32) -#define XFS_IOC_UNRESVSP_32 _IOW ('X', 41, struct xfs_flock64_32) -#define XFS_IOC_RESVSP64_32 _IOW ('X', 42, struct xfs_flock64_32) -#define XFS_IOC_UNRESVSP64_32 _IOW ('X', 43, struct xfs_flock64_32) - -/* just account for different alignment */ +#ifdef BROKEN_X86_ALIGNMENT STATIC unsigned long xfs_ioctl32_flock( unsigned long arg) { - xfs_flock64_32_t __user *p32 = (void __user *)arg; + compat_xfs_flock64_t __user *p32 = (void __user *)arg; xfs_flock64_t __user *p = compat_alloc_user_space(sizeof(*p)); if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) || @@ -92,32 +68,6 @@ xfs_ioctl32_flock( return (unsigned long)p; } -typedef struct compat_xfs_fsop_geom_v1 { - __u32 blocksize; /* filesystem (data) block size */ - __u32 rtextsize; /* realtime extent size */ - __u32 agblocks; /* fsblocks in an AG */ - __u32 agcount; /* number of allocation groups */ - __u32 logblocks; /* fsblocks in the log */ - __u32 sectsize; /* (data) sector size, bytes */ - __u32 inodesize; /* inode size in bytes */ - __u32 imaxpct; /* max allowed inode space(%) */ - __u64 datablocks; /* fsblocks in data subvolume */ - __u64 rtblocks; /* fsblocks in realtime subvol */ - __u64 rtextents; /* rt extents in realtime subvol*/ - __u64 logstart; /* starting fsblock of the log */ - unsigned char uuid[16]; /* unique id of the filesystem */ - __u32 sunit; /* stripe unit, fsblocks */ - __u32 swidth; /* stripe width, fsblocks */ - __s32 version; /* structure version */ - __u32 flags; /* superblock version flags */ - __u32 logsectsize; /* log sector size, bytes */ - __u32 rtsectsize; /* realtime sector size, bytes */ - __u32 dirblocksize; /* directory block size, bytes */ -} __attribute__((packed)) compat_xfs_fsop_geom_v1_t; - -#define XFS_IOC_FSGEOMETRY_V1_32 \ - _IOR ('X', 100, struct compat_xfs_fsop_geom_v1) - STATIC unsigned long xfs_ioctl32_geom_v1(unsigned long arg) { compat_xfs_fsop_geom_v1_t __user *p32 = (void __user *)arg; @@ -128,12 +78,6 @@ STATIC unsigned long xfs_ioctl32_geom_v1(unsigned long arg) return (unsigned long)p; } -typedef struct compat_xfs_inogrp { - __u64 xi_startino; /* starting inode number */ - __s32 xi_alloccount; /* # bits set in allocmask */ - __u64 xi_allocmask; /* mask of allocated inodes */ -} __attribute__((packed)) compat_xfs_inogrp_t; - STATIC int xfs_inumbers_fmt_compat( void __user *ubuffer, const xfs_inogrp_t *buffer, @@ -154,19 +98,11 @@ STATIC int xfs_inumbers_fmt_compat( } #else - #define xfs_inumbers_fmt_compat xfs_inumbers_fmt -#define _PACKED - #endif /* XFS_IOC_FSBULKSTAT and friends */ -typedef struct compat_xfs_bstime { - __s32 tv_sec; /* seconds */ - __s32 tv_nsec; /* and nanoseconds */ -} compat_xfs_bstime_t; - STATIC int xfs_bstime_store_compat( compat_xfs_bstime_t __user *p32, const xfs_bstime_t *p) @@ -180,30 +116,6 @@ STATIC int xfs_bstime_store_compat( return 0; } -typedef struct compat_xfs_bstat { - __u64 bs_ino; /* inode number */ - __u16 bs_mode; /* type and mode */ - __u16 bs_nlink; /* number of links */ - __u32 bs_uid; /* user id */ - __u32 bs_gid; /* group id */ - __u32 bs_rdev; /* device value */ - __s32 bs_blksize; /* block size */ - __s64 bs_size; /* file size */ - compat_xfs_bstime_t bs_atime; /* access time */ - compat_xfs_bstime_t bs_mtime; /* modify time */ - compat_xfs_bstime_t bs_ctime; /* inode change time */ - int64_t bs_blocks; /* number of blocks */ - __u32 bs_xflags; /* extended flags */ - __s32 bs_extsize; /* extent size */ - __s32 bs_extents; /* number of extents */ - __u32 bs_gen; /* generation count */ - __u16 bs_projid; /* project id */ - unsigned char bs_pad[14]; /* pad space, unused */ - __u32 bs_dmevmask; /* DMIG event mask */ - __u16 bs_dmstate; /* DMIG state info */ - __u16 bs_aextents; /* attribute number of extents */ -} _PACKED compat_xfs_bstat_t; - STATIC int xfs_bulkstat_one_fmt_compat( void __user *ubuffer, const xfs_bstat_t *buffer) @@ -234,22 +146,6 @@ STATIC int xfs_bulkstat_one_fmt_compat( return sizeof(*p32); } - - -typedef struct compat_xfs_fsop_bulkreq { - compat_uptr_t lastip; /* last inode # pointer */ - __s32 icount; /* count of entries in buffer */ - compat_uptr_t ubuffer; /* user buffer for inode desc. */ - compat_uptr_t ocount; /* output count pointer */ -} compat_xfs_fsop_bulkreq_t; - -#define XFS_IOC_FSBULKSTAT_32 \ - _IOWR('X', 101, struct compat_xfs_fsop_bulkreq) -#define XFS_IOC_FSBULKSTAT_SINGLE_32 \ - _IOWR('X', 102, struct compat_xfs_fsop_bulkreq) -#define XFS_IOC_FSINUMBERS_32 \ - _IOWR('X', 103, struct compat_xfs_fsop_bulkreq) - /* copied from xfs_ioctl.c */ STATIC int xfs_ioc_bulkstat_compat( @@ -320,29 +216,6 @@ xfs_ioc_bulkstat_compat( return 0; } - - -typedef struct compat_xfs_fsop_handlereq { - __u32 fd; /* fd for FD_TO_HANDLE */ - compat_uptr_t path; /* user pathname */ - __u32 oflags; /* open flags */ - compat_uptr_t ihandle; /* user supplied handle */ - __u32 ihandlen; /* user supplied length */ - compat_uptr_t ohandle; /* user buffer for handle */ - compat_uptr_t ohandlen; /* user buffer length */ -} compat_xfs_fsop_handlereq_t; - -#define XFS_IOC_PATH_TO_FSHANDLE_32 \ - _IOWR('X', 104, struct compat_xfs_fsop_handlereq) -#define XFS_IOC_PATH_TO_HANDLE_32 \ - _IOWR('X', 105, struct compat_xfs_fsop_handlereq) -#define XFS_IOC_FD_TO_HANDLE_32 \ - _IOWR('X', 106, struct compat_xfs_fsop_handlereq) -#define XFS_IOC_OPEN_BY_HANDLE_32 \ - _IOWR('X', 107, struct compat_xfs_fsop_handlereq) -#define XFS_IOC_READLINK_BY_HANDLE_32 \ - _IOWR('X', 108, struct compat_xfs_fsop_handlereq) - STATIC unsigned long xfs_ioctl32_fshandle(unsigned long arg) { compat_xfs_fsop_handlereq_t __user *p32 = (void __user *)arg; @@ -365,7 +238,6 @@ STATIC unsigned long xfs_ioctl32_fshandle(unsigned long arg) return (unsigned long)p; } - STATIC long xfs_compat_ioctl( int mode, @@ -404,9 +276,9 @@ xfs_compat_ioctl( case XFS_IOC_ERROR_CLEARALL: break; - case XFS_IOC32_GETXFLAGS: - case XFS_IOC32_SETXFLAGS: - case XFS_IOC32_GETVERSION: + case XFS_IOC_GETXFLAGS_32: + case XFS_IOC_SETXFLAGS_32: + case XFS_IOC_GETVERSION_32: cmd = _NATIVE_IOC(cmd, long); break; #ifdef BROKEN_X86_ALIGNMENT @@ -426,7 +298,6 @@ xfs_compat_ioctl( arg = xfs_ioctl32_geom_v1(arg); cmd = _NATIVE_IOC(cmd, struct xfs_fsop_geom_v1); break; - #else /* These are handled fine if no alignment issues */ case XFS_IOC_ALLOCSP: case XFS_IOC_FREESP: @@ -464,7 +335,6 @@ xfs_compat_ioctl( error = xfs_ioctl(XFS_I(inode), file, mode, cmd, (void __user *)arg); xfs_iflags_set(XFS_I(inode), XFS_IMODIFIED); - return error; } diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h index 02de6e62ee3..003a10e695b 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.h +++ b/fs/xfs/linux-2.6/xfs_ioctl32.h @@ -18,7 +18,152 @@ #ifndef __XFS_IOCTL32_H__ #define __XFS_IOCTL32_H__ +#include + extern long xfs_file_compat_ioctl(struct file *, unsigned, unsigned long); extern long xfs_file_compat_invis_ioctl(struct file *, unsigned, unsigned long); +/* + * on 32-bit arches, ioctl argument structures may have different sizes + * and/or alignment. We define compat structures which match the + * 32-bit sizes/alignments here, and their associated ioctl numbers. + * + * xfs_ioctl32.c contains routines to copy these structures in and out. + */ + +/* stock kernel-level ioctls we support */ +#define XFS_IOC_GETXFLAGS_32 FS_IOC32_GETFLAGS +#define XFS_IOC_SETXFLAGS_32 FS_IOC32_SETFLAGS +#define XFS_IOC_GETVERSION_32 FS_IOC32_GETVERSION + +/* + * On intel, even if sizes match, alignment and/or padding may differ. + */ +#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) +#define BROKEN_X86_ALIGNMENT +#define __compat_packed __attribute__((packed)) +#else +#define __compat_packed +#endif + +typedef struct compat_xfs_bstime { + compat_time_t tv_sec; /* seconds */ + __s32 tv_nsec; /* and nanoseconds */ +} compat_xfs_bstime_t; + +typedef struct compat_xfs_bstat { + __u64 bs_ino; /* inode number */ + __u16 bs_mode; /* type and mode */ + __u16 bs_nlink; /* number of links */ + __u32 bs_uid; /* user id */ + __u32 bs_gid; /* group id */ + __u32 bs_rdev; /* device value */ + __s32 bs_blksize; /* block size */ + __s64 bs_size; /* file size */ + compat_xfs_bstime_t bs_atime; /* access time */ + compat_xfs_bstime_t bs_mtime; /* modify time */ + compat_xfs_bstime_t bs_ctime; /* inode change time */ + int64_t bs_blocks; /* number of blocks */ + __u32 bs_xflags; /* extended flags */ + __s32 bs_extsize; /* extent size */ + __s32 bs_extents; /* number of extents */ + __u32 bs_gen; /* generation count */ + __u16 bs_projid; /* project id */ + unsigned char bs_pad[14]; /* pad space, unused */ + __u32 bs_dmevmask; /* DMIG event mask */ + __u16 bs_dmstate; /* DMIG state info */ + __u16 bs_aextents; /* attribute number of extents */ +} __compat_packed compat_xfs_bstat_t; + +typedef struct compat_xfs_fsop_bulkreq { + compat_uptr_t lastip; /* last inode # pointer */ + __s32 icount; /* count of entries in buffer */ + compat_uptr_t ubuffer; /* user buffer for inode desc. */ + compat_uptr_t ocount; /* output count pointer */ +} compat_xfs_fsop_bulkreq_t; + +#define XFS_IOC_FSBULKSTAT_32 \ + _IOWR('X', 101, struct compat_xfs_fsop_bulkreq) +#define XFS_IOC_FSBULKSTAT_SINGLE_32 \ + _IOWR('X', 102, struct compat_xfs_fsop_bulkreq) +#define XFS_IOC_FSINUMBERS_32 \ + _IOWR('X', 103, struct compat_xfs_fsop_bulkreq) + +typedef struct compat_xfs_fsop_handlereq { + __u32 fd; /* fd for FD_TO_HANDLE */ + compat_uptr_t path; /* user pathname */ + __u32 oflags; /* open flags */ + compat_uptr_t ihandle; /* user supplied handle */ + __u32 ihandlen; /* user supplied length */ + compat_uptr_t ohandle; /* user buffer for handle */ + compat_uptr_t ohandlen; /* user buffer length */ +} compat_xfs_fsop_handlereq_t; + +#define XFS_IOC_PATH_TO_FSHANDLE_32 \ + _IOWR('X', 104, struct compat_xfs_fsop_handlereq) +#define XFS_IOC_PATH_TO_HANDLE_32 \ + _IOWR('X', 105, struct compat_xfs_fsop_handlereq) +#define XFS_IOC_FD_TO_HANDLE_32 \ + _IOWR('X', 106, struct compat_xfs_fsop_handlereq) +#define XFS_IOC_OPEN_BY_HANDLE_32 \ + _IOWR('X', 107, struct compat_xfs_fsop_handlereq) +#define XFS_IOC_READLINK_BY_HANDLE_32 \ + _IOWR('X', 108, struct compat_xfs_fsop_handlereq) + +#ifdef BROKEN_X86_ALIGNMENT +/* on ia32 l_start is on a 32-bit boundary */ +typedef struct compat_xfs_flock64 { + __s16 l_type; + __s16 l_whence; + __s64 l_start __attribute__((packed)); + /* len == 0 means until end of file */ + __s64 l_len __attribute__((packed)); + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; /* reserve area */ +} compat_xfs_flock64_t; + +#define XFS_IOC_ALLOCSP_32 _IOW('X', 10, struct compat_xfs_flock64) +#define XFS_IOC_FREESP_32 _IOW('X', 11, struct compat_xfs_flock64) +#define XFS_IOC_ALLOCSP64_32 _IOW('X', 36, struct compat_xfs_flock64) +#define XFS_IOC_FREESP64_32 _IOW('X', 37, struct compat_xfs_flock64) +#define XFS_IOC_RESVSP_32 _IOW('X', 40, struct compat_xfs_flock64) +#define XFS_IOC_UNRESVSP_32 _IOW('X', 41, struct compat_xfs_flock64) +#define XFS_IOC_RESVSP64_32 _IOW('X', 42, struct compat_xfs_flock64) +#define XFS_IOC_UNRESVSP64_32 _IOW('X', 43, struct compat_xfs_flock64) + +typedef struct compat_xfs_fsop_geom_v1 { + __u32 blocksize; /* filesystem (data) block size */ + __u32 rtextsize; /* realtime extent size */ + __u32 agblocks; /* fsblocks in an AG */ + __u32 agcount; /* number of allocation groups */ + __u32 logblocks; /* fsblocks in the log */ + __u32 sectsize; /* (data) sector size, bytes */ + __u32 inodesize; /* inode size in bytes */ + __u32 imaxpct; /* max allowed inode space(%) */ + __u64 datablocks; /* fsblocks in data subvolume */ + __u64 rtblocks; /* fsblocks in realtime subvol */ + __u64 rtextents; /* rt extents in realtime subvol*/ + __u64 logstart; /* starting fsblock of the log */ + unsigned char uuid[16]; /* unique id of the filesystem */ + __u32 sunit; /* stripe unit, fsblocks */ + __u32 swidth; /* stripe width, fsblocks */ + __s32 version; /* structure version */ + __u32 flags; /* superblock version flags */ + __u32 logsectsize; /* log sector size, bytes */ + __u32 rtsectsize; /* realtime sector size, bytes */ + __u32 dirblocksize; /* directory block size, bytes */ +} __attribute__((packed)) compat_xfs_fsop_geom_v1_t; + +#define XFS_IOC_FSGEOMETRY_V1_32 \ + _IOR('X', 100, struct compat_xfs_fsop_geom_v1) + +typedef struct compat_xfs_inogrp { + __u64 xi_startino; /* starting inode number */ + __s32 xi_alloccount; /* # bits set in allocmask */ + __u64 xi_allocmask; /* mask of allocated inodes */ +} __attribute__((packed)) compat_xfs_inogrp_t; + +#endif /* BROKEN_X86_ALIGNMENT */ + #endif /* __XFS_IOCTL32_H__ */ diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h index 4ae03bae992..589c41c3844 100644 --- a/fs/xfs/xfs_fs.h +++ b/fs/xfs/xfs_fs.h @@ -418,10 +418,6 @@ typedef struct xfs_handle { #define XFS_IOC_GETXFLAGS FS_IOC_GETFLAGS #define XFS_IOC_SETXFLAGS FS_IOC_SETFLAGS #define XFS_IOC_GETVERSION FS_IOC_GETVERSION -/* 32-bit compat counterparts */ -#define XFS_IOC32_GETXFLAGS FS_IOC32_GETFLAGS -#define XFS_IOC32_SETXFLAGS FS_IOC32_SETFLAGS -#define XFS_IOC32_GETVERSION FS_IOC32_GETVERSION /* * ioctl commands that replace IRIX fcntl()'s -- cgit v1.2.3-70-g09d2 From d5547f9feea459dfc9e7313bd1d561394e2c129f Mon Sep 17 00:00:00 2001 From: "sandeen@sandeen.net" Date: Tue, 25 Nov 2008 21:20:08 -0600 Subject: [XFS] Clean up some existing compat ioctl calls Create a new xfs_ioctl.h file which has prototypes for ioctl helpers that may be called in compat mode. Change several compat ioctl cases which are IOW to simply copy in the userspace argument, then call the common ioctl helper. This also fixes xfs_compat_ioc_fsgeometry_v1(), which had it backwards before; it copied in an (empty) arg, then copied out the native result, which probably corrupted userspace. It should be translating on the copyout. Also, a bit of formatting cleanup for consistency, and conversion of all error returns to use XFS_ERROR(). Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_ioctl.c | 8 +- fs/xfs/linux-2.6/xfs_ioctl.h | 47 ++++++++++ fs/xfs/linux-2.6/xfs_ioctl32.c | 207 ++++++++++++++++++++++++----------------- 3 files changed, 171 insertions(+), 91 deletions(-) create mode 100644 fs/xfs/linux-2.6/xfs_ioctl.h (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 03c55b8fa37..498d3e15843 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -68,7 +68,7 @@ * XFS_IOC_PATH_TO_HANDLE * returns full handle for a path */ -STATIC int +int xfs_find_handle( unsigned int cmd, xfs_fsop_handlereq_t *hreq) @@ -245,7 +245,7 @@ xfs_vget_fsop_handlereq( return 0; } -STATIC int +int xfs_open_by_handle( xfs_mount_t *mp, xfs_fsop_handlereq_t *hreq, @@ -355,7 +355,7 @@ do_readlink( } -STATIC int +int xfs_readlink_by_handle( xfs_mount_t *mp, xfs_fsop_handlereq_t *hreq, @@ -651,7 +651,7 @@ xfs_attrmulti_by_handle( return -error; } -STATIC int +int xfs_ioc_space( struct xfs_inode *ip, struct inode *inode, diff --git a/fs/xfs/linux-2.6/xfs_ioctl.h b/fs/xfs/linux-2.6/xfs_ioctl.h new file mode 100644 index 00000000000..2df849f49a5 --- /dev/null +++ b/fs/xfs/linux-2.6/xfs_ioctl.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2008 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_IOCTL_H__ +#define __XFS_IOCTL_H__ + +extern int +xfs_ioc_space( + struct xfs_inode *ip, + struct inode *inode, + struct file *filp, + int ioflags, + unsigned int cmd, + xfs_flock64_t *bf); + +extern int +xfs_find_handle( + unsigned int cmd, + xfs_fsop_handlereq_t *hreq); + +extern int +xfs_open_by_handle( + xfs_mount_t *mp, + xfs_fsop_handlereq_t *hreq, + struct file *parfilp, + struct inode *parinode); + +extern int +xfs_readlink_by_handle( + xfs_mount_t *mp, + xfs_fsop_handlereq_t *hreq, + struct inode *parinode); +#endif diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index 620bb2d6d26..35edc054fb9 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -43,55 +43,62 @@ #include "xfs_error.h" #include "xfs_dfrag.h" #include "xfs_vnodeops.h" +#include "xfs_fsops.h" +#include "xfs_ioctl.h" #include "xfs_ioctl32.h" #define _NATIVE_IOC(cmd, type) \ _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type)) #ifdef BROKEN_X86_ALIGNMENT -STATIC unsigned long -xfs_ioctl32_flock( - unsigned long arg) +STATIC int +xfs_compat_flock64_copyin( + xfs_flock64_t *bf, + compat_xfs_flock64_t __user *arg32) { - compat_xfs_flock64_t __user *p32 = (void __user *)arg; - xfs_flock64_t __user *p = compat_alloc_user_space(sizeof(*p)); - - if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) || - copy_in_user(&p->l_whence, &p32->l_whence, sizeof(s16)) || - copy_in_user(&p->l_start, &p32->l_start, sizeof(s64)) || - copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) || - copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) || - copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) || - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32))) - return -EFAULT; - - return (unsigned long)p; + if (get_user(bf->l_type, &arg32->l_type) || + get_user(bf->l_whence, &arg32->l_whence) || + get_user(bf->l_start, &arg32->l_start) || + get_user(bf->l_len, &arg32->l_len) || + get_user(bf->l_sysid, &arg32->l_sysid) || + get_user(bf->l_pid, &arg32->l_pid) || + copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32))) + return -XFS_ERROR(EFAULT); + return 0; } -STATIC unsigned long xfs_ioctl32_geom_v1(unsigned long arg) +STATIC int +xfs_compat_ioc_fsgeometry_v1( + struct xfs_mount *mp, + compat_xfs_fsop_geom_v1_t __user *arg32) { - compat_xfs_fsop_geom_v1_t __user *p32 = (void __user *)arg; - xfs_fsop_geom_v1_t __user *p = compat_alloc_user_space(sizeof(*p)); + xfs_fsop_geom_t fsgeo; + int error; - if (copy_in_user(p, p32, sizeof(*p32))) - return -EFAULT; - return (unsigned long)p; + error = xfs_fs_geometry(mp, &fsgeo, 3); + if (error) + return -error; + /* The 32-bit variant simply has some padding at the end */ + if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1))) + return -XFS_ERROR(EFAULT); + return 0; } -STATIC int xfs_inumbers_fmt_compat( - void __user *ubuffer, - const xfs_inogrp_t *buffer, - long count, - long *written) +STATIC int +xfs_inumbers_fmt_compat( + void __user *ubuffer, + const xfs_inogrp_t *buffer, + long count, + long *written) { - compat_xfs_inogrp_t __user *p32 = ubuffer; - long i; + compat_xfs_inogrp_t __user *p32 = ubuffer; + long i; for (i = 0; i < count; i++) { if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) || put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) || put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask)) - return -EFAULT; + return -XFS_ERROR(EFAULT); } *written = count * sizeof(*p32); return 0; @@ -103,24 +110,26 @@ STATIC int xfs_inumbers_fmt_compat( /* XFS_IOC_FSBULKSTAT and friends */ -STATIC int xfs_bstime_store_compat( - compat_xfs_bstime_t __user *p32, - const xfs_bstime_t *p) +STATIC int +xfs_bstime_store_compat( + compat_xfs_bstime_t __user *p32, + const xfs_bstime_t *p) { - __s32 sec32; + __s32 sec32; sec32 = p->tv_sec; if (put_user(sec32, &p32->tv_sec) || put_user(p->tv_nsec, &p32->tv_nsec)) - return -EFAULT; + return -XFS_ERROR(EFAULT); return 0; } -STATIC int xfs_bulkstat_one_fmt_compat( +STATIC int +xfs_bulkstat_one_fmt_compat( void __user *ubuffer, const xfs_bstat_t *buffer) { - compat_xfs_bstat_t __user *p32 = ubuffer; + compat_xfs_bstat_t __user *p32 = ubuffer; if (put_user(buffer->bs_ino, &p32->bs_ino) || put_user(buffer->bs_mode, &p32->bs_mode) || @@ -142,7 +151,7 @@ STATIC int xfs_bulkstat_one_fmt_compat( put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || put_user(buffer->bs_dmstate, &p32->bs_dmstate) || put_user(buffer->bs_aextents, &p32->bs_aextents)) - return -EFAULT; + return -XFS_ERROR(EFAULT); return sizeof(*p32); } @@ -165,20 +174,20 @@ xfs_ioc_bulkstat_compat( /* should be called again (unused here, but used in dmapi) */ if (!capable(CAP_SYS_ADMIN)) - return -EPERM; + return -XFS_ERROR(EPERM); if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); if (get_user(addr, &p32->lastip)) - return -EFAULT; + return -XFS_ERROR(EFAULT); bulkreq.lastip = compat_ptr(addr); if (get_user(bulkreq.icount, &p32->icount) || get_user(addr, &p32->ubuffer)) - return -EFAULT; + return -XFS_ERROR(EFAULT); bulkreq.ubuffer = compat_ptr(addr); if (get_user(addr, &p32->ocount)) - return -EFAULT; + return -XFS_ERROR(EFAULT); bulkreq.ocount = compat_ptr(addr); if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) @@ -216,38 +225,40 @@ xfs_ioc_bulkstat_compat( return 0; } -STATIC unsigned long xfs_ioctl32_fshandle(unsigned long arg) +STATIC int +xfs_compat_handlereq_copyin( + xfs_fsop_handlereq_t *hreq, + compat_xfs_fsop_handlereq_t __user *arg32) { - compat_xfs_fsop_handlereq_t __user *p32 = (void __user *)arg; - xfs_fsop_handlereq_t __user *p = compat_alloc_user_space(sizeof(*p)); - u32 addr; - - if (copy_in_user(&p->fd, &p32->fd, sizeof(__u32)) || - get_user(addr, &p32->path) || - put_user(compat_ptr(addr), &p->path) || - copy_in_user(&p->oflags, &p32->oflags, sizeof(__u32)) || - get_user(addr, &p32->ihandle) || - put_user(compat_ptr(addr), &p->ihandle) || - copy_in_user(&p->ihandlen, &p32->ihandlen, sizeof(__u32)) || - get_user(addr, &p32->ohandle) || - put_user(compat_ptr(addr), &p->ohandle) || - get_user(addr, &p32->ohandlen) || - put_user(compat_ptr(addr), &p->ohandlen)) - return -EFAULT; - - return (unsigned long)p; + compat_xfs_fsop_handlereq_t hreq32; + + if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t))) + return -XFS_ERROR(EFAULT); + + hreq->fd = hreq32.fd; + hreq->path = compat_ptr(hreq32.path); + hreq->oflags = hreq32.oflags; + hreq->ihandle = compat_ptr(hreq32.ihandle); + hreq->ihandlen = hreq32.ihandlen; + hreq->ohandle = compat_ptr(hreq32.ohandle); + hreq->ohandlen = compat_ptr(hreq32.ohandlen); + + return 0; } STATIC long xfs_compat_ioctl( - int mode, - struct file *file, + xfs_inode_t *ip, + struct file *filp, + int ioflags, unsigned cmd, - unsigned long arg) + void __user *arg) { - struct inode *inode = file->f_path.dentry->d_inode; + struct inode *inode = filp->f_path.dentry->d_inode; + xfs_mount_t *mp = ip->i_mount; int error; + xfs_itrace_entry(XFS_I(inode)); switch (cmd) { case XFS_IOC_DIOINFO: case XFS_IOC_FSGEOMETRY: @@ -290,14 +301,16 @@ xfs_compat_ioctl( case XFS_IOC_RESVSP_32: case XFS_IOC_UNRESVSP_32: case XFS_IOC_RESVSP64_32: - case XFS_IOC_UNRESVSP64_32: - arg = xfs_ioctl32_flock(arg); + case XFS_IOC_UNRESVSP64_32: { + struct xfs_flock64 bf; + + if (xfs_compat_flock64_copyin(&bf, arg)) + return -XFS_ERROR(EFAULT); cmd = _NATIVE_IOC(cmd, struct xfs_flock64); - break; + return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); + } case XFS_IOC_FSGEOMETRY_V1_32: - arg = xfs_ioctl32_geom_v1(arg); - cmd = _NATIVE_IOC(cmd, struct xfs_fsop_geom_v1); - break; + return xfs_compat_ioc_fsgeometry_v1(mp, arg); #else /* These are handled fine if no alignment issues */ case XFS_IOC_ALLOCSP: case XFS_IOC_FREESP: @@ -323,35 +336,55 @@ xfs_compat_ioctl( cmd, (void __user*)arg); case XFS_IOC_FD_TO_HANDLE_32: case XFS_IOC_PATH_TO_HANDLE_32: - case XFS_IOC_PATH_TO_FSHANDLE_32: - case XFS_IOC_OPEN_BY_HANDLE_32: - case XFS_IOC_READLINK_BY_HANDLE_32: - arg = xfs_ioctl32_fshandle(arg); + case XFS_IOC_PATH_TO_FSHANDLE_32: { + struct xfs_fsop_handlereq hreq; + + if (xfs_compat_handlereq_copyin(&hreq, arg)) + return -XFS_ERROR(EFAULT); cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq); - break; + return xfs_find_handle(cmd, &hreq); + } + case XFS_IOC_OPEN_BY_HANDLE_32: { + struct xfs_fsop_handlereq hreq; + + if (xfs_compat_handlereq_copyin(&hreq, arg)) + return -XFS_ERROR(EFAULT); + return xfs_open_by_handle(mp, &hreq, filp, inode); + } + case XFS_IOC_READLINK_BY_HANDLE_32: { + struct xfs_fsop_handlereq hreq; + + if (xfs_compat_handlereq_copyin(&hreq, arg)) + return -XFS_ERROR(EFAULT); + return xfs_readlink_by_handle(mp, &hreq, inode); + } default: - return -ENOIOCTLCMD; + return -XFS_ERROR(ENOIOCTLCMD); } - error = xfs_ioctl(XFS_I(inode), file, mode, cmd, (void __user *)arg); - xfs_iflags_set(XFS_I(inode), XFS_IMODIFIED); + error = xfs_ioctl(ip, filp, ioflags, cmd, arg); return error; } long xfs_file_compat_ioctl( - struct file *file, - unsigned cmd, - unsigned long arg) + struct file *filp, + unsigned int cmd, + unsigned long p) { - return xfs_compat_ioctl(0, file, cmd, arg); + struct inode *inode = filp->f_path.dentry->d_inode; + + return xfs_compat_ioctl(XFS_I(inode), filp, 0, cmd, (void __user *)p); } long xfs_file_compat_invis_ioctl( - struct file *file, - unsigned cmd, - unsigned long arg) + struct file *filp, + unsigned int cmd, + unsigned long p) { - return xfs_compat_ioctl(IO_INVIS, file, cmd, arg); + struct inode *inode = filp->f_path.dentry->d_inode; + + return xfs_compat_ioctl(XFS_I(inode), filp, IO_INVIS, cmd, + (void __user *)p); } -- cgit v1.2.3-70-g09d2 From e94fc4a43e5c39f689e83caf6d2f0939081f5e6b Mon Sep 17 00:00:00 2001 From: "sandeen@sandeen.net" Date: Tue, 25 Nov 2008 21:20:09 -0600 Subject: [XFS] Add compat handlers for swapext ioctl The big hitter here was the bstat field, which contains different sized time_t on 32 vs. 64 bit. Add a copyin function to translate the 32-bit arg to 64-bit, and call the swapext ioctl helper. Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_ioctl32.c | 61 ++++++++++++++++++++++++++++++++++++++---- fs/xfs/linux-2.6/xfs_ioctl32.h | 13 +++++++++ 2 files changed, 69 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index 35edc054fb9..132f3dd2c7e 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -108,6 +108,50 @@ xfs_inumbers_fmt_compat( #define xfs_inumbers_fmt_compat xfs_inumbers_fmt #endif +STATIC int +xfs_ioctl32_bstime_copyin( + xfs_bstime_t *bstime, + compat_xfs_bstime_t __user *bstime32) +{ + compat_time_t sec32; /* tv_sec differs on 64 vs. 32 */ + + if (get_user(sec32, &bstime32->tv_sec) || + get_user(bstime->tv_nsec, &bstime32->tv_nsec)) + return -XFS_ERROR(EFAULT); + bstime->tv_sec = sec32; + return 0; +} + +/* xfs_bstat_t has differing alignment on intel, & bstime_t sizes everywhere */ +STATIC int +xfs_ioctl32_bstat_copyin( + xfs_bstat_t *bstat, + compat_xfs_bstat_t __user *bstat32) +{ + if (get_user(bstat->bs_ino, &bstat32->bs_ino) || + get_user(bstat->bs_mode, &bstat32->bs_mode) || + get_user(bstat->bs_nlink, &bstat32->bs_nlink) || + get_user(bstat->bs_uid, &bstat32->bs_uid) || + get_user(bstat->bs_gid, &bstat32->bs_gid) || + get_user(bstat->bs_rdev, &bstat32->bs_rdev) || + get_user(bstat->bs_blksize, &bstat32->bs_blksize) || + get_user(bstat->bs_size, &bstat32->bs_size) || + xfs_ioctl32_bstime_copyin(&bstat->bs_atime, &bstat32->bs_atime) || + xfs_ioctl32_bstime_copyin(&bstat->bs_mtime, &bstat32->bs_mtime) || + xfs_ioctl32_bstime_copyin(&bstat->bs_ctime, &bstat32->bs_ctime) || + get_user(bstat->bs_blocks, &bstat32->bs_size) || + get_user(bstat->bs_xflags, &bstat32->bs_size) || + get_user(bstat->bs_extsize, &bstat32->bs_extsize) || + get_user(bstat->bs_extents, &bstat32->bs_extents) || + get_user(bstat->bs_gen, &bstat32->bs_gen) || + get_user(bstat->bs_projid, &bstat32->bs_projid) || + get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) || + get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) || + get_user(bstat->bs_aextents, &bstat32->bs_aextents)) + return -XFS_ERROR(EFAULT); + return 0; +} + /* XFS_IOC_FSBULKSTAT and friends */ STATIC int @@ -292,6 +336,18 @@ xfs_compat_ioctl( case XFS_IOC_GETVERSION_32: cmd = _NATIVE_IOC(cmd, long); break; + case XFS_IOC_SWAPEXT: { + struct xfs_swapext sxp; + struct compat_xfs_swapext __user *sxu = arg; + + /* Bulk copy in up to the sx_stat field, then grab bstat */ + if (copy_from_user(&sxp, sxu, + offsetof(xfs_swapext_t, sx_stat)) || + xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat)) + return -XFS_ERROR(EFAULT); + error = xfs_swapext(&sxp); + return -error; + } #ifdef BROKEN_X86_ALIGNMENT /* xfs_flock_t has wrong u32 vs u64 alignment */ case XFS_IOC_ALLOCSP_32: @@ -322,11 +378,6 @@ xfs_compat_ioctl( case XFS_IOC_UNRESVSP64: case XFS_IOC_FSGEOMETRY_V1: break; - - /* xfs_bstat_t still has wrong u32 vs u64 alignment */ - case XFS_IOC_SWAPEXT: - break; - #endif case XFS_IOC_FSBULKSTAT_32: case XFS_IOC_FSBULKSTAT_SINGLE_32: diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h index 003a10e695b..bf08ab12a6f 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.h +++ b/fs/xfs/linux-2.6/xfs_ioctl32.h @@ -110,6 +110,19 @@ typedef struct compat_xfs_fsop_handlereq { #define XFS_IOC_READLINK_BY_HANDLE_32 \ _IOWR('X', 108, struct compat_xfs_fsop_handlereq) +/* The bstat field in the swapext struct needs translation */ +typedef struct compat_xfs_swapext { + __int64_t sx_version; /* version */ + __int64_t sx_fdtarget; /* fd of target file */ + __int64_t sx_fdtmp; /* fd of tmp file */ + xfs_off_t sx_offset; /* offset into file */ + xfs_off_t sx_length; /* leng from offset */ + char sx_pad[16]; /* pad space, unused */ + compat_xfs_bstat_t sx_stat; /* stat of target b4 copy */ +} __compat_packed compat_xfs_swapext_t; + +#define XFS_IOC_SWAPEXT_32 _IOWR('X', 109, struct compat_xfs_swapext) + #ifdef BROKEN_X86_ALIGNMENT /* on ia32 l_start is on a 32-bit boundary */ typedef struct compat_xfs_flock64 { -- cgit v1.2.3-70-g09d2 From 471d59103167c84f17b9bcfee22ed10b44ff206e Mon Sep 17 00:00:00 2001 From: "sandeen@sandeen.net" Date: Tue, 25 Nov 2008 21:20:10 -0600 Subject: [XFS] Add compat handlers for data & rt growfs ioctls The args for XFS_IOC_FSGROWFSDATA and XFS_IOC_FSGROWFSRTA have padding on the end on intel, so add arg copyin functions, and then just call the growfs ioctl helpers. Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_ioctl32.c | 40 ++++++++++++++++++++++++++++++++++++++++ fs/xfs/linux-2.6/xfs_ioctl32.h | 14 ++++++++++++++ 2 files changed, 54 insertions(+) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index 132f3dd2c7e..d1ac5d5c009 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -44,6 +44,8 @@ #include "xfs_dfrag.h" #include "xfs_vnodeops.h" #include "xfs_fsops.h" +#include "xfs_alloc.h" +#include "xfs_rtalloc.h" #include "xfs_ioctl.h" #include "xfs_ioctl32.h" @@ -84,6 +86,28 @@ xfs_compat_ioc_fsgeometry_v1( return 0; } +STATIC int +xfs_compat_growfs_data_copyin( + struct xfs_growfs_data *in, + compat_xfs_growfs_data_t __user *arg32) +{ + if (get_user(in->newblocks, &arg32->newblocks) || + get_user(in->imaxpct, &arg32->imaxpct)) + return -XFS_ERROR(EFAULT); + return 0; +} + +STATIC int +xfs_compat_growfs_rt_copyin( + struct xfs_growfs_rt *in, + compat_xfs_growfs_rt_t __user *arg32) +{ + if (get_user(in->newblocks, &arg32->newblocks) || + get_user(in->extsize, &arg32->extsize)) + return -XFS_ERROR(EFAULT); + return 0; +} + STATIC int xfs_inumbers_fmt_compat( void __user *ubuffer, @@ -367,6 +391,22 @@ xfs_compat_ioctl( } case XFS_IOC_FSGEOMETRY_V1_32: return xfs_compat_ioc_fsgeometry_v1(mp, arg); + case XFS_IOC_FSGROWFSDATA_32: { + struct xfs_growfs_data in; + + if (xfs_compat_growfs_data_copyin(&in, arg)) + return -XFS_ERROR(EFAULT); + error = xfs_growfs_data(mp, &in); + return -error; + } + case XFS_IOC_FSGROWFSRT_32: { + struct xfs_growfs_rt in; + + if (xfs_compat_growfs_rt_copyin(&in, arg)) + return -XFS_ERROR(EFAULT); + error = xfs_growfs_rt(mp, &in); + return -error; + } #else /* These are handled fine if no alignment issues */ case XFS_IOC_ALLOCSP: case XFS_IOC_FREESP: diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h index bf08ab12a6f..8b2a12a8e96 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.h +++ b/fs/xfs/linux-2.6/xfs_ioctl32.h @@ -177,6 +177,20 @@ typedef struct compat_xfs_inogrp { __u64 xi_allocmask; /* mask of allocated inodes */ } __attribute__((packed)) compat_xfs_inogrp_t; +/* These growfs input structures have padding on the end, so must translate */ +typedef struct compat_xfs_growfs_data { + __u64 newblocks; /* new data subvol size, fsblocks */ + __u32 imaxpct; /* new inode space percentage limit */ +} __attribute__((packed)) compat_xfs_growfs_data_t; + +typedef struct compat_xfs_growfs_rt { + __u64 newblocks; /* new realtime size, fsblocks */ + __u32 extsize; /* new realtime extent size, fsblocks */ +} __attribute__((packed)) compat_xfs_growfs_rt_t; + +#define XFS_IOC_FSGROWFSDATA_32 _IOW('X', 110, struct compat_xfs_growfs_data) +#define XFS_IOC_FSGROWFSRT_32 _IOW('X', 112, struct compat_xfs_growfs_rt) + #endif /* BROKEN_X86_ALIGNMENT */ #endif /* __XFS_IOCTL32_H__ */ -- cgit v1.2.3-70-g09d2 From 2ee4fa5cb716eba104a4ef8efe159e1007a2aef6 Mon Sep 17 00:00:00 2001 From: "sandeen@sandeen.net" Date: Tue, 25 Nov 2008 21:20:11 -0600 Subject: [XFS] Make the bulkstat_one compat ioctl handling more sane Currently the compat formatter was handled by passing in "private_data" for the xfs_bulkstat_one formatter, which was really just another formatter... IMHO this got confusing. Instead, just make a new xfs_bulkstat_one_compat formatter for xfs_bulkstat, and call it via a wrapper. Also, don't translate the ioctl nrs into their native counterparts, that just clouds the issue; we're in a compat handler anyway, just switch on the 32-bit cmds. Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_ioctl32.c | 36 ++++++++++++++++++++++++------------ fs/xfs/xfs_itable.c | 24 ++++++++++++++++++++---- fs/xfs/xfs_itable.h | 12 ++++++++++++ 3 files changed, 56 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index d1ac5d5c009..a97022f2d9b 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -223,14 +223,30 @@ xfs_bulkstat_one_fmt_compat( return sizeof(*p32); } +STATIC int +xfs_bulkstat_one_compat( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_ino_t ino, /* inode number to get data for */ + void __user *buffer, /* buffer to place output in */ + int ubsize, /* size of buffer */ + void *private_data, /* my private data */ + xfs_daddr_t bno, /* starting bno of inode cluster */ + int *ubused, /* bytes used by me */ + void *dibuff, /* on-disk inode buffer */ + int *stat) /* BULKSTAT_RV_... */ +{ + return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, + xfs_bulkstat_one_fmt_compat, bno, + ubused, dibuff, stat); +} + /* copied from xfs_ioctl.c */ STATIC int -xfs_ioc_bulkstat_compat( - xfs_mount_t *mp, - unsigned int cmd, - void __user *arg) +xfs_compat_ioc_bulkstat( + xfs_mount_t *mp, + unsigned int cmd, + compat_xfs_fsop_bulkreq_t __user *p32) { - compat_xfs_fsop_bulkreq_t __user *p32 = (void __user *)arg; u32 addr; xfs_fsop_bulkreq_t bulkreq; int count; /* # of records returned */ @@ -267,14 +283,12 @@ xfs_ioc_bulkstat_compat( if (bulkreq.ubuffer == NULL) return -XFS_ERROR(EINVAL); - if (cmd == XFS_IOC_FSINUMBERS) + if (cmd == XFS_IOC_FSINUMBERS_32) error = xfs_inumbers(mp, &inlast, &count, bulkreq.ubuffer, xfs_inumbers_fmt_compat); else { - /* declare a var to get a warning in case the type changes */ - bulkstat_one_fmt_pf formatter = xfs_bulkstat_one_fmt_compat; error = xfs_bulkstat(mp, &inlast, &count, - xfs_bulkstat_one, formatter, + xfs_bulkstat_one_compat, NULL, sizeof(compat_xfs_bstat_t), bulkreq.ubuffer, BULKSTAT_FG_QUICK, &done); } @@ -422,9 +436,7 @@ xfs_compat_ioctl( case XFS_IOC_FSBULKSTAT_32: case XFS_IOC_FSBULKSTAT_SINGLE_32: case XFS_IOC_FSINUMBERS_32: - cmd = _NATIVE_IOC(cmd, struct xfs_fsop_bulkreq); - return xfs_ioc_bulkstat_compat(XFS_I(inode)->i_mount, - cmd, (void __user*)arg); + return xfs_compat_ioc_bulkstat(mp, cmd, arg); case XFS_IOC_FD_TO_HANDLE_32: case XFS_IOC_PATH_TO_HANDLE_32: case XFS_IOC_PATH_TO_FSHANDLE_32: { diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index d4c0de86012..7bd49b87160 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -202,13 +202,13 @@ xfs_bulkstat_one_fmt( * Return stat information for one inode. * Return 0 if ok, else errno. */ -int /* error status */ -xfs_bulkstat_one( +int /* error status */ +xfs_bulkstat_one_int( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t ino, /* inode number to get data for */ void __user *buffer, /* buffer to place output in */ int ubsize, /* size of buffer */ - void *private_data, /* my private data */ + bulkstat_one_fmt_pf formatter, /* formatter, copy to user */ xfs_daddr_t bno, /* starting bno of inode cluster */ int *ubused, /* bytes used by me */ void *dibuff, /* on-disk inode buffer */ @@ -217,7 +217,6 @@ xfs_bulkstat_one( xfs_bstat_t *buf; /* return buffer */ int error = 0; /* error value */ xfs_dinode_t *dip; /* dinode inode pointer */ - bulkstat_one_fmt_pf formatter = private_data ? : xfs_bulkstat_one_fmt; dip = (xfs_dinode_t *)dibuff; *stat = BULKSTAT_RV_NOTHING; @@ -255,6 +254,23 @@ xfs_bulkstat_one( return error; } +int +xfs_bulkstat_one( + xfs_mount_t *mp, /* mount point for filesystem */ + xfs_ino_t ino, /* inode number to get data for */ + void __user *buffer, /* buffer to place output in */ + int ubsize, /* size of buffer */ + void *private_data, /* my private data */ + xfs_daddr_t bno, /* starting bno of inode cluster */ + int *ubused, /* bytes used by me */ + void *dibuff, /* on-disk inode buffer */ + int *stat) /* BULKSTAT_RV_... */ +{ + return xfs_bulkstat_one_int(mp, ino, buffer, ubsize, + xfs_bulkstat_one_fmt, bno, + ubused, dibuff, stat); +} + /* * Test to see whether we can use the ondisk inode directly, based * on the given bulkstat flags, filling in dipp accordingly. diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h index a1f18fce9b7..e210a4c0656 100644 --- a/fs/xfs/xfs_itable.h +++ b/fs/xfs/xfs_itable.h @@ -73,6 +73,18 @@ typedef int (*bulkstat_one_fmt_pf)( /* used size in bytes or negative error */ void __user *ubuffer, /* buffer to write to */ const xfs_bstat_t *buffer); /* buffer to read from */ +int +xfs_bulkstat_one_int( + xfs_mount_t *mp, + xfs_ino_t ino, + void __user *buffer, + int ubsize, + bulkstat_one_fmt_pf formatter, + xfs_daddr_t bno, + int *ubused, + void *dibuff, + int *stat); + int xfs_bulkstat_one( xfs_mount_t *mp, -- cgit v1.2.3-70-g09d2 From 65fbaf2489c667bf79ae1f20403f30c66568d445 Mon Sep 17 00:00:00 2001 From: "sandeen@sandeen.net" Date: Tue, 25 Nov 2008 21:20:12 -0600 Subject: [XFS] Fix xfs_bulkstat_one size checks & error handling The 32-bit xfs_blkstat_one handler was failing because a size check checked whether the remaining (32-bit) user buffer was less than the (64-bit) bulkstat buffer, and failed with ENOMEM if so. Move this check into the respective handlers so that they check the correct sizes. Also, the formatters were returning negative errors or positive bytes copied; this was odd in the positive error value world of xfs, and handled wrong by at least some of the callers, which treated the bytes returned as an error value. Move the bytes-used assignment into the formatters. Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_ioctl32.c | 44 +++++++++++++++++++++++++----------------- fs/xfs/xfs_itable.c | 21 ++++++++++---------- fs/xfs/xfs_itable.h | 2 ++ 3 files changed, 39 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index a97022f2d9b..2d336062831 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -192,35 +192,43 @@ xfs_bstime_store_compat( return 0; } +/* Return 0 on success or positive error (to xfs_bulkstat()) */ STATIC int xfs_bulkstat_one_fmt_compat( void __user *ubuffer, + int ubsize, + int *ubused, const xfs_bstat_t *buffer) { compat_xfs_bstat_t __user *p32 = ubuffer; - if (put_user(buffer->bs_ino, &p32->bs_ino) || - put_user(buffer->bs_mode, &p32->bs_mode) || - put_user(buffer->bs_nlink, &p32->bs_nlink) || - put_user(buffer->bs_uid, &p32->bs_uid) || - put_user(buffer->bs_gid, &p32->bs_gid) || - put_user(buffer->bs_rdev, &p32->bs_rdev) || - put_user(buffer->bs_blksize, &p32->bs_blksize) || - put_user(buffer->bs_size, &p32->bs_size) || + if (ubsize < sizeof(*p32)) + return XFS_ERROR(ENOMEM); + + if (put_user(buffer->bs_ino, &p32->bs_ino) || + put_user(buffer->bs_mode, &p32->bs_mode) || + put_user(buffer->bs_nlink, &p32->bs_nlink) || + put_user(buffer->bs_uid, &p32->bs_uid) || + put_user(buffer->bs_gid, &p32->bs_gid) || + put_user(buffer->bs_rdev, &p32->bs_rdev) || + put_user(buffer->bs_blksize, &p32->bs_blksize) || + put_user(buffer->bs_size, &p32->bs_size) || xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) || xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) || xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) || - put_user(buffer->bs_blocks, &p32->bs_blocks) || - put_user(buffer->bs_xflags, &p32->bs_xflags) || - put_user(buffer->bs_extsize, &p32->bs_extsize) || - put_user(buffer->bs_extents, &p32->bs_extents) || - put_user(buffer->bs_gen, &p32->bs_gen) || - put_user(buffer->bs_projid, &p32->bs_projid) || - put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || - put_user(buffer->bs_dmstate, &p32->bs_dmstate) || + put_user(buffer->bs_blocks, &p32->bs_blocks) || + put_user(buffer->bs_xflags, &p32->bs_xflags) || + put_user(buffer->bs_extsize, &p32->bs_extsize) || + put_user(buffer->bs_extents, &p32->bs_extents) || + put_user(buffer->bs_gen, &p32->bs_gen) || + put_user(buffer->bs_projid, &p32->bs_projid) || + put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || + put_user(buffer->bs_dmstate, &p32->bs_dmstate) || put_user(buffer->bs_aextents, &p32->bs_aextents)) - return -XFS_ERROR(EFAULT); - return sizeof(*p32); + return XFS_ERROR(EFAULT); + if (ubused) + *ubused = sizeof(*p32); + return 0; } STATIC int diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index 7bd49b87160..e19d0a8d561 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -188,14 +188,21 @@ xfs_bulkstat_one_dinode( } } +/* Return 0 on success or positive error */ STATIC int xfs_bulkstat_one_fmt( void __user *ubuffer, + int ubsize, + int *ubused, const xfs_bstat_t *buffer) { + if (ubsize < sizeof(*buffer)) + return XFS_ERROR(ENOMEM); if (copy_to_user(ubuffer, buffer, sizeof(*buffer))) - return -EFAULT; - return sizeof(*buffer); + return XFS_ERROR(EFAULT); + if (ubused) + *ubused = sizeof(*buffer); + return 0; } /* @@ -223,8 +230,6 @@ xfs_bulkstat_one_int( if (!buffer || xfs_internal_inum(mp, ino)) return XFS_ERROR(EINVAL); - if (ubsize < sizeof(*buf)) - return XFS_ERROR(ENOMEM); buf = kmem_alloc(sizeof(*buf), KM_SLEEP); @@ -239,15 +244,11 @@ xfs_bulkstat_one_int( xfs_bulkstat_one_dinode(mp, ino, dip, buf); } - error = formatter(buffer, buf); - if (error < 0) { - error = EFAULT; + error = formatter(buffer, ubsize, ubused, buf); + if (error) goto out_free; - } *stat = BULKSTAT_RV_DIDONE; - if (ubused) - *ubused = error; out_free: kmem_free(buf); diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h index e210a4c0656..1fb04e7deb6 100644 --- a/fs/xfs/xfs_itable.h +++ b/fs/xfs/xfs_itable.h @@ -71,6 +71,8 @@ xfs_bulkstat_single( typedef int (*bulkstat_one_fmt_pf)( /* used size in bytes or negative error */ void __user *ubuffer, /* buffer to write to */ + int ubsize, /* remaining user buffer sz */ + int *ubused, /* bytes used by formatter */ const xfs_bstat_t *buffer); /* buffer to read from */ int -- cgit v1.2.3-70-g09d2 From af819d27637119105213433881f158931e29620b Mon Sep 17 00:00:00 2001 From: "sandeen@sandeen.net" Date: Tue, 25 Nov 2008 21:20:13 -0600 Subject: [XFS] Fix compat XFS_IOC_FSBULKSTAT_SINGLE ioctl The XFS_IOC_FSBULKSTAT_SINGLE ioctl passes in the desired inode number, while XFS_IOC_FSBULKSTAT passes in the previous/last-stat'd inode number. The compat handler wasn't differentiating these, so when a XFS_IOC_FSBULKSTAT_SINGLE request for inode 128 was sent in, stat information for 131 was sent out. Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_ioctl32.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index 2d336062831..cc1fe96efab 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -291,15 +291,22 @@ xfs_compat_ioc_bulkstat( if (bulkreq.ubuffer == NULL) return -XFS_ERROR(EINVAL); - if (cmd == XFS_IOC_FSINUMBERS_32) + if (cmd == XFS_IOC_FSINUMBERS_32) { error = xfs_inumbers(mp, &inlast, &count, bulkreq.ubuffer, xfs_inumbers_fmt_compat); - else { + } else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) { + int res; + + error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer, + sizeof(compat_xfs_bstat_t), + NULL, 0, NULL, NULL, &res); + } else if (cmd == XFS_IOC_FSBULKSTAT_32) { error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one_compat, NULL, sizeof(compat_xfs_bstat_t), bulkreq.ubuffer, BULKSTAT_FG_QUICK, &done); - } + } else + error = XFS_ERROR(EINVAL); if (error) return -error; -- cgit v1.2.3-70-g09d2 From ebeecd2b04645a4b79e1bc00d69cf4f98e03a684 Mon Sep 17 00:00:00 2001 From: "sandeen@sandeen.net" Date: Tue, 25 Nov 2008 21:20:14 -0600 Subject: [XFS] Hook up compat XFS_IOC_ATTRLIST_BY_HANDLE ioctl handler Add a compat handler for XFS_IOC_ATTRLIST_BY_HANDLE Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_ioctl32.c | 136 ++++++++++++++++++++++++++++++++++++++++- fs/xfs/linux-2.6/xfs_ioctl32.h | 12 ++++ 2 files changed, 147 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index cc1fe96efab..e7eb0d7e0d5 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -46,6 +46,7 @@ #include "xfs_fsops.h" #include "xfs_alloc.h" #include "xfs_rtalloc.h" +#include "xfs_attr.h" #include "xfs_ioctl.h" #include "xfs_ioctl32.h" @@ -343,6 +344,138 @@ xfs_compat_handlereq_copyin( return 0; } +/* + * Convert userspace handle data into inode. + * + * We use the fact that all the fsop_handlereq ioctl calls have a data + * structure argument whose first component is always a xfs_fsop_handlereq_t, + * so we can pass that sub structure into this handy, shared routine. + * + * If no error, caller must always iput the returned inode. + */ +STATIC int +xfs_vget_fsop_handlereq_compat( + xfs_mount_t *mp, + struct inode *parinode, /* parent inode pointer */ + compat_xfs_fsop_handlereq_t *hreq, + struct inode **inode) +{ + void __user *hanp; + size_t hlen; + xfs_fid_t *xfid; + xfs_handle_t *handlep; + xfs_handle_t handle; + xfs_inode_t *ip; + xfs_ino_t ino; + __u32 igen; + int error; + + /* + * Only allow handle opens under a directory. + */ + if (!S_ISDIR(parinode->i_mode)) + return XFS_ERROR(ENOTDIR); + + hanp = compat_ptr(hreq->ihandle); + hlen = hreq->ihandlen; + handlep = &handle; + + if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep)) + return XFS_ERROR(EINVAL); + if (copy_from_user(handlep, hanp, hlen)) + return XFS_ERROR(EFAULT); + if (hlen < sizeof(*handlep)) + memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen); + if (hlen > sizeof(handlep->ha_fsid)) { + if (handlep->ha_fid.fid_len != + (hlen - sizeof(handlep->ha_fsid) - + sizeof(handlep->ha_fid.fid_len)) || + handlep->ha_fid.fid_pad) + return XFS_ERROR(EINVAL); + } + + /* + * Crack the handle, obtain the inode # & generation # + */ + xfid = (struct xfs_fid *)&handlep->ha_fid; + if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) { + ino = xfid->fid_ino; + igen = xfid->fid_gen; + } else { + return XFS_ERROR(EINVAL); + } + + /* + * Get the XFS inode, building a Linux inode to go with it. + */ + error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); + if (error) + return error; + if (ip == NULL) + return XFS_ERROR(EIO); + if (ip->i_d.di_gen != igen) { + xfs_iput_new(ip, XFS_ILOCK_SHARED); + return XFS_ERROR(ENOENT); + } + + xfs_iunlock(ip, XFS_ILOCK_SHARED); + + *inode = VFS_I(ip); + return 0; +} + +STATIC int +xfs_compat_attrlist_by_handle( + xfs_mount_t *mp, + void __user *arg, + struct inode *parinode) +{ + int error; + attrlist_cursor_kern_t *cursor; + compat_xfs_fsop_attrlist_handlereq_t al_hreq; + struct inode *inode; + char *kbuf; + + if (!capable(CAP_SYS_ADMIN)) + return -XFS_ERROR(EPERM); + if (copy_from_user(&al_hreq, arg, + sizeof(compat_xfs_fsop_attrlist_handlereq_t))) + return -XFS_ERROR(EFAULT); + if (al_hreq.buflen > XATTR_LIST_MAX) + return -XFS_ERROR(EINVAL); + + /* + * Reject flags, only allow namespaces. + */ + if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) + return -XFS_ERROR(EINVAL); + + error = xfs_vget_fsop_handlereq_compat(mp, parinode, &al_hreq.hreq, + &inode); + if (error) + goto out; + + kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL); + if (!kbuf) + goto out_vn_rele; + + cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; + error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen, + al_hreq.flags, cursor); + if (error) + goto out_kfree; + + if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen)) + error = -EFAULT; + + out_kfree: + kfree(kbuf); + out_vn_rele: + iput(inode); + out: + return -error; +} + STATIC long xfs_compat_ioctl( xfs_inode_t *ip, @@ -368,7 +501,6 @@ xfs_compat_ioctl( case XFS_IOC_GETBMAPX: /* not handled case XFS_IOC_FSSETDM_BY_HANDLE: - case XFS_IOC_ATTRLIST_BY_HANDLE: case XFS_IOC_ATTRMULTI_BY_HANDLE: */ case XFS_IOC_FSCOUNTS: @@ -476,6 +608,8 @@ xfs_compat_ioctl( return -XFS_ERROR(EFAULT); return xfs_readlink_by_handle(mp, &hreq, inode); } + case XFS_IOC_ATTRLIST_BY_HANDLE_32: + return xfs_compat_attrlist_by_handle(mp, arg, inode); default: return -XFS_ERROR(ENOIOCTLCMD); } diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h index 8b2a12a8e96..bbf3a2cd8a7 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.h +++ b/fs/xfs/linux-2.6/xfs_ioctl32.h @@ -123,6 +123,18 @@ typedef struct compat_xfs_swapext { #define XFS_IOC_SWAPEXT_32 _IOWR('X', 109, struct compat_xfs_swapext) +typedef struct compat_xfs_fsop_attrlist_handlereq { + struct compat_xfs_fsop_handlereq hreq; /* handle interface structure */ + struct xfs_attrlist_cursor pos; /* opaque cookie, list offset */ + __u32 flags; /* which namespace to use */ + __u32 buflen; /* length of buffer supplied */ + compat_uptr_t buffer; /* returned names */ +} __compat_packed compat_xfs_fsop_attrlist_handlereq_t; + +/* Note: actually this is read/write */ +#define XFS_IOC_ATTRLIST_BY_HANDLE_32 \ + _IOW('X', 122, struct compat_xfs_fsop_attrlist_handlereq) + #ifdef BROKEN_X86_ALIGNMENT /* on ia32 l_start is on a 32-bit boundary */ typedef struct compat_xfs_flock64 { -- cgit v1.2.3-70-g09d2 From 28750975ace79c547407a84d3969cbed516be8f8 Mon Sep 17 00:00:00 2001 From: "sandeen@sandeen.net" Date: Tue, 25 Nov 2008 21:20:15 -0600 Subject: [XFS] Hook up compat XFS_IOC_ATTRMULTI_BY_HANDLE ioctl handler Add a compat handler for XFS_IOC_ATTRMULTI_BY_HANDLE Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_ioctl.c | 6 +-- fs/xfs/linux-2.6/xfs_ioctl.h | 23 +++++++++++ fs/xfs/linux-2.6/xfs_ioctl32.c | 94 ++++++++++++++++++++++++++++++++++++++++-- fs/xfs/linux-2.6/xfs_ioctl32.h | 20 +++++++++ 4 files changed, 136 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 498d3e15843..c8f1e632ba9 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -490,7 +490,7 @@ xfs_attrlist_by_handle( return -error; } -STATIC int +int xfs_attrmulti_attr_get( struct inode *inode, char *name, @@ -519,7 +519,7 @@ xfs_attrmulti_attr_get( return error; } -STATIC int +int xfs_attrmulti_attr_set( struct inode *inode, char *name, @@ -549,7 +549,7 @@ xfs_attrmulti_attr_set( return error; } -STATIC int +int xfs_attrmulti_attr_remove( struct inode *inode, char *name, diff --git a/fs/xfs/linux-2.6/xfs_ioctl.h b/fs/xfs/linux-2.6/xfs_ioctl.h index 2df849f49a5..f67dc69381e 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.h +++ b/fs/xfs/linux-2.6/xfs_ioctl.h @@ -44,4 +44,27 @@ xfs_readlink_by_handle( xfs_mount_t *mp, xfs_fsop_handlereq_t *hreq, struct inode *parinode); + +extern int +xfs_attrmulti_attr_get( + struct inode *inode, + char *name, + char __user *ubuf, + __uint32_t *len, + __uint32_t flags); + +extern int + xfs_attrmulti_attr_set( + struct inode *inode, + char *name, + const char __user *ubuf, + __uint32_t len, + __uint32_t flags); + +extern int +xfs_attrmulti_attr_remove( + struct inode *inode, + char *name, + __uint32_t flags); + #endif diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index e7eb0d7e0d5..17993352aa6 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -476,6 +476,93 @@ xfs_compat_attrlist_by_handle( return -error; } +STATIC int +xfs_compat_attrmulti_by_handle( + xfs_mount_t *mp, + void __user *arg, + struct inode *parinode) +{ + int error; + compat_xfs_attr_multiop_t *ops; + compat_xfs_fsop_attrmulti_handlereq_t am_hreq; + struct inode *inode; + unsigned int i, size; + char *attr_name; + + if (!capable(CAP_SYS_ADMIN)) + return -XFS_ERROR(EPERM); + if (copy_from_user(&am_hreq, arg, + sizeof(compat_xfs_fsop_attrmulti_handlereq_t))) + return -XFS_ERROR(EFAULT); + + error = xfs_vget_fsop_handlereq_compat(mp, parinode, &am_hreq.hreq, + &inode); + if (error) + goto out; + + error = E2BIG; + size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t); + if (!size || size > 16 * PAGE_SIZE) + goto out_vn_rele; + + error = ENOMEM; + ops = kmalloc(size, GFP_KERNEL); + if (!ops) + goto out_vn_rele; + + error = EFAULT; + if (copy_from_user(ops, compat_ptr(am_hreq.ops), size)) + goto out_kfree_ops; + + attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); + if (!attr_name) + goto out_kfree_ops; + + + error = 0; + for (i = 0; i < am_hreq.opcount; i++) { + ops[i].am_error = strncpy_from_user(attr_name, + compat_ptr(ops[i].am_attrname), + MAXNAMELEN); + if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) + error = -ERANGE; + if (ops[i].am_error < 0) + break; + + switch (ops[i].am_opcode) { + case ATTR_OP_GET: + ops[i].am_error = xfs_attrmulti_attr_get(inode, + attr_name, + compat_ptr(ops[i].am_attrvalue), + &ops[i].am_length, ops[i].am_flags); + break; + case ATTR_OP_SET: + ops[i].am_error = xfs_attrmulti_attr_set(inode, + attr_name, + compat_ptr(ops[i].am_attrvalue), + ops[i].am_length, ops[i].am_flags); + break; + case ATTR_OP_REMOVE: + ops[i].am_error = xfs_attrmulti_attr_remove(inode, + attr_name, ops[i].am_flags); + break; + default: + ops[i].am_error = EINVAL; + } + } + + if (copy_to_user(compat_ptr(am_hreq.ops), ops, size)) + error = XFS_ERROR(EFAULT); + + kfree(attr_name); + out_kfree_ops: + kfree(ops); + out_vn_rele: + iput(inode); + out: + return -error; +} + STATIC long xfs_compat_ioctl( xfs_inode_t *ip, @@ -499,10 +586,7 @@ xfs_compat_ioctl( case XFS_IOC_GETBMAP: case XFS_IOC_GETBMAPA: case XFS_IOC_GETBMAPX: -/* not handled - case XFS_IOC_FSSETDM_BY_HANDLE: - case XFS_IOC_ATTRMULTI_BY_HANDLE: -*/ +/* case XFS_IOC_FSSETDM_BY_HANDLE: not handled */ case XFS_IOC_FSCOUNTS: case XFS_IOC_SET_RESBLKS: case XFS_IOC_GET_RESBLKS: @@ -610,6 +694,8 @@ xfs_compat_ioctl( } case XFS_IOC_ATTRLIST_BY_HANDLE_32: return xfs_compat_attrlist_by_handle(mp, arg, inode); + case XFS_IOC_ATTRMULTI_BY_HANDLE_32: + return xfs_compat_attrmulti_by_handle(mp, arg, inode); default: return -XFS_ERROR(ENOIOCTLCMD); } diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h index bbf3a2cd8a7..785e7ec318c 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.h +++ b/fs/xfs/linux-2.6/xfs_ioctl32.h @@ -135,6 +135,26 @@ typedef struct compat_xfs_fsop_attrlist_handlereq { #define XFS_IOC_ATTRLIST_BY_HANDLE_32 \ _IOW('X', 122, struct compat_xfs_fsop_attrlist_handlereq) +/* am_opcodes defined in xfs_fs.h */ +typedef struct compat_xfs_attr_multiop { + __u32 am_opcode; + __s32 am_error; + compat_uptr_t am_attrname; + compat_uptr_t am_attrvalue; + __u32 am_length; + __u32 am_flags; +} compat_xfs_attr_multiop_t; + +typedef struct compat_xfs_fsop_attrmulti_handlereq { + struct compat_xfs_fsop_handlereq hreq; /* handle interface structure */ + __u32 opcount;/* count of following multiop */ + /* ptr to compat_xfs_attr_multiop */ + compat_uptr_t ops; /* attr_multi data */ +} compat_xfs_fsop_attrmulti_handlereq_t; + +#define XFS_IOC_ATTRMULTI_BY_HANDLE_32 \ + _IOW('X', 123, struct compat_xfs_fsop_attrmulti_handlereq) + #ifdef BROKEN_X86_ALIGNMENT /* on ia32 l_start is on a 32-bit boundary */ typedef struct compat_xfs_flock64 { -- cgit v1.2.3-70-g09d2 From 710d62aaaf17c841b8bdbc7a775f8910a7160248 Mon Sep 17 00:00:00 2001 From: "sandeen@sandeen.net" Date: Tue, 25 Nov 2008 21:20:16 -0600 Subject: [XFS] Hook up compat XFS_IOC_FSSETDM_BY_HANDLE ioctl handler Add a compat handler for XFS_IOC_FSSETDM_BY_HANDLE. I haven't tested this, lacking dmapi tools to do so (unless xfsqa magically gets this somehow?) Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_ioctl32.c | 43 +++++++++++++++++++++++++++++++++++++++++- fs/xfs/linux-2.6/xfs_ioctl32.h | 9 +++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index 17993352aa6..9234360910f 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -563,6 +563,46 @@ xfs_compat_attrmulti_by_handle( return -error; } +STATIC int +xfs_compat_fssetdm_by_handle( + xfs_mount_t *mp, + void __user *arg, + struct inode *parinode) +{ + int error; + struct fsdmidata fsd; + compat_xfs_fsop_setdm_handlereq_t dmhreq; + struct inode *inode; + + if (!capable(CAP_MKNOD)) + return -XFS_ERROR(EPERM); + if (copy_from_user(&dmhreq, arg, + sizeof(compat_xfs_fsop_setdm_handlereq_t))) + return -XFS_ERROR(EFAULT); + + error = xfs_vget_fsop_handlereq_compat(mp, parinode, &dmhreq.hreq, + &inode); + if (error) + return -error; + + if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) { + error = -XFS_ERROR(EPERM); + goto out; + } + + if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) { + error = -XFS_ERROR(EFAULT); + goto out; + } + + error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask, + fsd.fsd_dmstate); + +out: + iput(inode); + return error; +} + STATIC long xfs_compat_ioctl( xfs_inode_t *ip, @@ -586,7 +626,6 @@ xfs_compat_ioctl( case XFS_IOC_GETBMAP: case XFS_IOC_GETBMAPA: case XFS_IOC_GETBMAPX: -/* case XFS_IOC_FSSETDM_BY_HANDLE: not handled */ case XFS_IOC_FSCOUNTS: case XFS_IOC_SET_RESBLKS: case XFS_IOC_GET_RESBLKS: @@ -696,6 +735,8 @@ xfs_compat_ioctl( return xfs_compat_attrlist_by_handle(mp, arg, inode); case XFS_IOC_ATTRMULTI_BY_HANDLE_32: return xfs_compat_attrmulti_by_handle(mp, arg, inode); + case XFS_IOC_FSSETDM_BY_HANDLE_32: + return xfs_compat_fssetdm_by_handle(mp, arg, inode); default: return -XFS_ERROR(ENOIOCTLCMD); } diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h index 785e7ec318c..af918749d18 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.h +++ b/fs/xfs/linux-2.6/xfs_ioctl32.h @@ -155,6 +155,15 @@ typedef struct compat_xfs_fsop_attrmulti_handlereq { #define XFS_IOC_ATTRMULTI_BY_HANDLE_32 \ _IOW('X', 123, struct compat_xfs_fsop_attrmulti_handlereq) +typedef struct compat_xfs_fsop_setdm_handlereq { + struct compat_xfs_fsop_handlereq hreq; /* handle information */ + /* ptr to struct fsdmidata */ + compat_uptr_t data; /* DMAPI data */ +} compat_xfs_fsop_setdm_handlereq_t; + +#define XFS_IOC_FSSETDM_BY_HANDLE_32 \ + _IOW('X', 121, struct compat_xfs_fsop_setdm_handlereq) + #ifdef BROKEN_X86_ALIGNMENT /* on ia32 l_start is on a 32-bit boundary */ typedef struct compat_xfs_flock64 { -- cgit v1.2.3-70-g09d2 From e5d412f17846b0aea9e5250926f994ab2e4e1006 Mon Sep 17 00:00:00 2001 From: "sandeen@sandeen.net" Date: Tue, 25 Nov 2008 21:20:17 -0600 Subject: [XFS] Reorder xfs_ioctl32.c for some tidiness Put things in IMHO a more readable order, now that it's all done; add some comments. Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_ioctl32.c | 78 +++++++++++++++++++----------------------- 1 file changed, 36 insertions(+), 42 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index 9234360910f..b34b3d8892a 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -16,11 +16,7 @@ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include -#include #include -#include -#include -#include #include #include "xfs.h" #include "xfs_fs.h" @@ -131,7 +127,7 @@ xfs_inumbers_fmt_compat( #else #define xfs_inumbers_fmt_compat xfs_inumbers_fmt -#endif +#endif /* BROKEN_X86_ALIGNMENT */ STATIC int xfs_ioctl32_bstime_copyin( @@ -617,6 +613,7 @@ xfs_compat_ioctl( xfs_itrace_entry(XFS_I(inode)); switch (cmd) { + /* No size or alignment issues on any arch */ case XFS_IOC_DIOINFO: case XFS_IOC_FSGEOMETRY: case XFS_IOC_FSGETXATTR: @@ -629,35 +626,28 @@ xfs_compat_ioctl( case XFS_IOC_FSCOUNTS: case XFS_IOC_SET_RESBLKS: case XFS_IOC_GET_RESBLKS: - case XFS_IOC_FSGROWFSDATA: case XFS_IOC_FSGROWFSLOG: - case XFS_IOC_FSGROWFSRT: case XFS_IOC_FREEZE: case XFS_IOC_THAW: case XFS_IOC_GOINGDOWN: case XFS_IOC_ERROR_INJECTION: case XFS_IOC_ERROR_CLEARALL: - break; - - case XFS_IOC_GETXFLAGS_32: - case XFS_IOC_SETXFLAGS_32: - case XFS_IOC_GETVERSION_32: - cmd = _NATIVE_IOC(cmd, long); - break; - case XFS_IOC_SWAPEXT: { - struct xfs_swapext sxp; - struct compat_xfs_swapext __user *sxu = arg; - - /* Bulk copy in up to the sx_stat field, then grab bstat */ - if (copy_from_user(&sxp, sxu, - offsetof(xfs_swapext_t, sx_stat)) || - xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat)) - return -XFS_ERROR(EFAULT); - error = xfs_swapext(&sxp); - return -error; - } -#ifdef BROKEN_X86_ALIGNMENT - /* xfs_flock_t has wrong u32 vs u64 alignment */ + return xfs_ioctl(ip, filp, ioflags, cmd, arg); +#ifndef BROKEN_X86_ALIGNMENT + /* These are handled fine if no alignment issues */ + case XFS_IOC_ALLOCSP: + case XFS_IOC_FREESP: + case XFS_IOC_RESVSP: + case XFS_IOC_UNRESVSP: + case XFS_IOC_ALLOCSP64: + case XFS_IOC_FREESP64: + case XFS_IOC_RESVSP64: + case XFS_IOC_UNRESVSP64: + case XFS_IOC_FSGEOMETRY_V1: + case XFS_IOC_FSGROWFSDATA: + case XFS_IOC_FSGROWFSRT: + return xfs_ioctl(ip, filp, ioflags, cmd, arg); +#else case XFS_IOC_ALLOCSP_32: case XFS_IOC_FREESP_32: case XFS_IOC_ALLOCSP64_32: @@ -691,18 +681,25 @@ xfs_compat_ioctl( error = xfs_growfs_rt(mp, &in); return -error; } -#else /* These are handled fine if no alignment issues */ - case XFS_IOC_ALLOCSP: - case XFS_IOC_FREESP: - case XFS_IOC_RESVSP: - case XFS_IOC_UNRESVSP: - case XFS_IOC_ALLOCSP64: - case XFS_IOC_FREESP64: - case XFS_IOC_RESVSP64: - case XFS_IOC_UNRESVSP64: - case XFS_IOC_FSGEOMETRY_V1: - break; #endif + /* long changes size, but xfs only copiese out 32 bits */ + case XFS_IOC_GETXFLAGS_32: + case XFS_IOC_SETXFLAGS_32: + case XFS_IOC_GETVERSION_32: + cmd = _NATIVE_IOC(cmd, long); + return xfs_ioctl(ip, filp, ioflags, cmd, arg); + case XFS_IOC_SWAPEXT: { + struct xfs_swapext sxp; + struct compat_xfs_swapext __user *sxu = arg; + + /* Bulk copy in up to the sx_stat field, then copy bstat */ + if (copy_from_user(&sxp, sxu, + offsetof(struct xfs_swapext, sx_stat)) || + xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat)) + return -XFS_ERROR(EFAULT); + error = xfs_swapext(&sxp); + return -error; + } case XFS_IOC_FSBULKSTAT_32: case XFS_IOC_FSBULKSTAT_SINGLE_32: case XFS_IOC_FSINUMBERS_32: @@ -740,9 +737,6 @@ xfs_compat_ioctl( default: return -XFS_ERROR(ENOIOCTLCMD); } - - error = xfs_ioctl(ip, filp, ioflags, cmd, arg); - return error; } long -- cgit v1.2.3-70-g09d2 From 062e4fee4400f283307cf8ac1b7931c939010229 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 26 Oct 2008 16:58:25 +0200 Subject: UBIFS: slight compression optimization If data does not compress, it is better to leave it uncompressed because we'll read it faster then. So do not compress data if we save less than 64 bytes. Signed-off-by: Artem Bityutskiy --- fs/ubifs/compress.c | 6 +++--- fs/ubifs/ubifs-media.h | 7 +++++++ 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c index a0ada596b17..6414d50780e 100644 --- a/fs/ubifs/compress.c +++ b/fs/ubifs/compress.c @@ -119,10 +119,10 @@ void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len, } /* - * Presently, we just require that compression results in less data, - * rather than any defined minimum compression ratio or amount. + * If the data compressed only slightly, it is better to leave it + * uncompressed to improve read speed. */ - if (ALIGN(*out_len, 8) >= ALIGN(in_len, 8)) + if (in_len - *out_len < UBIFS_MIN_COMPRESS_DIFF) goto no_compr; return; diff --git a/fs/ubifs/ubifs-media.h b/fs/ubifs/ubifs-media.h index 0b378042a3a..b25fc36cf72 100644 --- a/fs/ubifs/ubifs-media.h +++ b/fs/ubifs/ubifs-media.h @@ -51,6 +51,13 @@ */ #define UBIFS_MIN_COMPR_LEN 128 +/* + * If compressed data length is less than %UBIFS_MIN_COMPRESS_DIFF bytes + * shorter than uncompressed data length, UBIFS preferes to leave this data + * node uncompress, because it'll be read faster. + */ +#define UBIFS_MIN_COMPRESS_DIFF 64 + /* Root inode number */ #define UBIFS_ROOT_INO 1 -- cgit v1.2.3-70-g09d2 From a1dc080c27ec8ea7ca1c8a9b499362a71ebff792 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sat, 1 Nov 2008 14:20:50 +0200 Subject: UBIFS: use bit-fields to store compression type Save a 4 bytes of RAM per 'struct inode' by stroring inode compression type in bit-filed, instead of using 'int'. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 8 ++++++++ fs/ubifs/ubifs.h | 6 +++--- 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index d80b2aef42b..21b4103271e 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -2020,6 +2020,14 @@ static int __init ubifs_init(void) BUILD_BUG_ON(UBIFS_INO_NODE_SZ != 160); BUILD_BUG_ON(UBIFS_REF_NODE_SZ != 64); + /* + * We use 2 bit wide bit-fields to store compression type, which should + * be amended if more compressors are added. The bit-fields are: + * @compr_type in 'struct ubifs_inode' and @default_compr in + * 'struct ubifs_info'. + */ + BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4); + /* * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2. diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 46b172560a0..4d76aba57ee 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -386,12 +386,12 @@ struct ubifs_inode { unsigned int dirty:1; unsigned int xattr:1; unsigned int bulk_read:1; + unsigned int compr_type:2; struct mutex ui_mutex; spinlock_t ui_lock; loff_t synced_i_size; loff_t ui_size; int flags; - int compr_type; pgoff_t last_page_read; pgoff_t read_in_a_row; int data_len; @@ -946,6 +946,7 @@ struct ubifs_mount_opts { * @no_chk_data_crc: do not check CRCs when reading data nodes (except during * recovery) * @bulk_read: enable bulk-reads + * @default_compr: default compression algorithm (%UBIFS_COMPR_LZO, etc) * * @tnc_mutex: protects the Tree Node Cache (TNC), @zroot, @cnext, @enext, and * @calc_idx_sz @@ -986,7 +987,6 @@ struct ubifs_mount_opts { * @main_lebs: count of LEBs in the main area * @main_first: first LEB of the main area * @main_bytes: main area size in bytes - * @default_compr: default compression algorithm (%UBIFS_COMPR_LZO, etc) * * @key_hash_type: type of the key hash * @key_hash: direntry key hash function @@ -1196,6 +1196,7 @@ struct ubifs_info { unsigned int big_lpt:1; unsigned int no_chk_data_crc:1; unsigned int bulk_read:1; + unsigned int default_compr:2; struct mutex tnc_mutex; struct ubifs_zbranch zroot; @@ -1237,7 +1238,6 @@ struct ubifs_info { int main_lebs; int main_first; long long main_bytes; - int default_compr; uint8_t key_hash_type; uint32_t (*key_hash)(const char *str, int len); -- cgit v1.2.3-70-g09d2 From 553dea4dd531562688ba01c641c7f8fc7abaaf8c Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sat, 1 Nov 2008 14:57:49 +0200 Subject: UBIFS: introduce compression mount options It is very handy to be able to change default UBIFS compressor via mount options. Introduce -o compr= mount option support. Currently only "none", "lzo" and "zlib" compressors are supported. Signed-off-by: Artem Bityutskiy --- Documentation/filesystems/ubifs.txt | 3 +++ fs/ubifs/compress.c | 6 ++--- fs/ubifs/sb.c | 10 +++++---- fs/ubifs/super.c | 44 +++++++++++++++++++++++++++++++------ fs/ubifs/ubifs.h | 12 ++++++++-- 5 files changed, 59 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/ubifs.txt b/Documentation/filesystems/ubifs.txt index dd84ea3c10d..2d0db5482d2 100644 --- a/Documentation/filesystems/ubifs.txt +++ b/Documentation/filesystems/ubifs.txt @@ -95,6 +95,9 @@ no_chk_data_crc skip checking of CRCs on data nodes in order to of this option is that corruption of the contents of a file can go unnoticed. chk_data_crc (*) do not skip checking CRCs on data nodes +compr=none override defoult comressor and set it to "none" +compr=lzo override defoult comressor and set it to "lzo" +compr=zlib override defoult comressor and set it to "zlib" Quick usage instructions diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c index 6414d50780e..4afb3ea24d4 100644 --- a/fs/ubifs/compress.c +++ b/fs/ubifs/compress.c @@ -33,7 +33,7 @@ /* Fake description object for the "none" compressor */ static struct ubifs_compressor none_compr = { .compr_type = UBIFS_COMPR_NONE, - .name = "no compression", + .name = "none", .capi_name = "", }; @@ -43,13 +43,13 @@ static DEFINE_MUTEX(lzo_mutex); static struct ubifs_compressor lzo_compr = { .compr_type = UBIFS_COMPR_LZO, .comp_mutex = &lzo_mutex, - .name = "LZO", + .name = "lzo", .capi_name = "lzo", }; #else static struct ubifs_compressor lzo_compr = { .compr_type = UBIFS_COMPR_LZO, - .name = "LZO", + .name = "lzo", }; #endif diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index 0f392351dc5..c5da201ab54 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -179,8 +179,11 @@ static int create_default_filesystem(struct ubifs_info *c) sup->fanout = cpu_to_le32(DEFAULT_FANOUT); sup->lsave_cnt = cpu_to_le32(c->lsave_cnt); sup->fmt_version = cpu_to_le32(UBIFS_FORMAT_VERSION); - sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO); sup->time_gran = cpu_to_le32(DEFAULT_TIME_GRAN); + if (c->mount_opts.override_compr) + sup->default_compr = cpu_to_le16(c->mount_opts.compr_type); + else + sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO); generate_random_uuid(sup->uuid); @@ -582,16 +585,15 @@ int ubifs_read_superblock(struct ubifs_info *c) c->jhead_cnt = le32_to_cpu(sup->jhead_cnt) + NONDATA_JHEADS_CNT; c->fanout = le32_to_cpu(sup->fanout); c->lsave_cnt = le32_to_cpu(sup->lsave_cnt); - c->default_compr = le16_to_cpu(sup->default_compr); c->rp_size = le64_to_cpu(sup->rp_size); c->rp_uid = le32_to_cpu(sup->rp_uid); c->rp_gid = le32_to_cpu(sup->rp_gid); sup_flags = le32_to_cpu(sup->flags); + if (!c->mount_opts.override_compr) + c->default_compr = le16_to_cpu(sup->default_compr); c->vfs_sb->s_time_gran = le32_to_cpu(sup->time_gran); - memcpy(&c->uuid, &sup->uuid, 16); - c->big_lpt = !!(sup_flags & UBIFS_FLG_BIGLPT); /* Automatically increase file system size to the maximum size */ diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 21b4103271e..fc81022cc26 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -417,6 +417,11 @@ static int ubifs_show_options(struct seq_file *s, struct vfsmount *mnt) else if (c->mount_opts.chk_data_crc == 1) seq_printf(s, ",no_chk_data_crc"); + if (c->mount_opts.override_compr) { + seq_printf(s, ",compr="); + seq_printf(s, ubifs_compr_name(c->mount_opts.compr_type)); + } + return 0; } @@ -878,6 +883,7 @@ static int check_volume_empty(struct ubifs_info *c) * Opt_no_bulk_read: disable bulk-reads * Opt_chk_data_crc: check CRCs when reading data nodes * Opt_no_chk_data_crc: do not check CRCs when reading data nodes + * Opt_override_compr: override default compressor * Opt_err: just end of array marker */ enum { @@ -887,6 +893,7 @@ enum { Opt_no_bulk_read, Opt_chk_data_crc, Opt_no_chk_data_crc, + Opt_override_compr, Opt_err, }; @@ -897,6 +904,7 @@ static const match_table_t tokens = { {Opt_no_bulk_read, "no_bulk_read"}, {Opt_chk_data_crc, "chk_data_crc"}, {Opt_no_chk_data_crc, "no_chk_data_crc"}, + {Opt_override_compr, "compr=%s"}, {Opt_err, NULL}, }; @@ -950,6 +958,28 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options, c->mount_opts.chk_data_crc = 1; c->no_chk_data_crc = 1; break; + case Opt_override_compr: + { + char *name = match_strdup(&args[0]); + + if (!name) + return -ENOMEM; + if (!strcmp(name, "none")) + c->mount_opts.compr_type = UBIFS_COMPR_NONE; + else if (!strcmp(name, "lzo")) + c->mount_opts.compr_type = UBIFS_COMPR_LZO; + else if (!strcmp(name, "zlib")) + c->mount_opts.compr_type = UBIFS_COMPR_ZLIB; + else { + ubifs_err("unknown compressor \"%s\"", name); + kfree(name); + return -EINVAL; + } + kfree(name); + c->mount_opts.override_compr = 1; + c->default_compr = c->mount_opts.compr_type; + break; + } default: ubifs_err("unrecognized mount option \"%s\" " "or missing value", p); @@ -1100,13 +1130,13 @@ static int mount_ubifs(struct ubifs_info *c) goto out_free; /* - * Make sure the compressor which is set as the default on in the - * superblock was actually compiled in. + * Make sure the compressor which is set as default in the superblock + * or overriden by mount options is actually compiled in. */ if (!ubifs_compr_present(c->default_compr)) { - ubifs_warn("'%s' compressor is set by superblock, but not " - "compiled in", ubifs_compr_name(c->default_compr)); - c->default_compr = UBIFS_COMPR_NONE; + ubifs_err("'compressor \"%s\" is not compiled in", + ubifs_compr_name(c->default_compr)); + goto out_free; } dbg_failure_mode_registration(c); @@ -2023,8 +2053,8 @@ static int __init ubifs_init(void) /* * We use 2 bit wide bit-fields to store compression type, which should * be amended if more compressors are added. The bit-fields are: - * @compr_type in 'struct ubifs_inode' and @default_compr in - * 'struct ubifs_info'. + * @compr_type in 'struct ubifs_inode', @default_compr in + * 'struct ubifs_info' and @compr_type in 'struct ubifs_mount_opts'. */ BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 4d76aba57ee..16840e099ef 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -893,13 +893,21 @@ struct ubifs_orphan { /** * struct ubifs_mount_opts - UBIFS-specific mount options information. * @unmount_mode: selected unmount mode (%0 default, %1 normal, %2 fast) - * @bulk_read: enable bulk-reads - * @chk_data_crc: check CRCs when reading data nodes + * @bulk_read: enable/disable bulk-reads (%0 default, %1 disabe, %2 enable) + * @chk_data_crc: enable/disable CRC data checking when reading data nodes + * (%0 default, %1 disabe, %2 enable) + * @override_compr: override default compressor (%0 - do not override and use + * superblock compressor, %1 - override and use compressor + * specified in @compr_type) + * @compr_type: compressor type to override the superblock compressor with + * (%UBIFS_COMPR_NONE, etc) */ struct ubifs_mount_opts { unsigned int unmount_mode:2; unsigned int bulk_read:2; unsigned int chk_data_crc:2; + unsigned int override_compr:1; + unsigned int compr_type:2; }; /** -- cgit v1.2.3-70-g09d2 From 5dd7cbc083f3a91fa7454125fe992826701b67bc Mon Sep 17 00:00:00 2001 From: Kukkonen Mika Date: Tue, 2 Dec 2008 11:32:49 +0200 Subject: UBIFS: avoid unnecessary checks I have a habit of compiling kernel with EXTRA_CFLAGS="-Wextra -Wno-unused -Wno-sign-compare -Wno-missing-field-initializers" and so fs/ubifs/key.h give lots (~10) of these every time: CC fs/ubifs/tnc_misc.o In file included from fs/ubifs/ubifs.h:1725, from fs/ubifs/tnc_misc.c:30: fs/ubifs/key.h: In function 'key_r5_hash': fs/ubifs/key.h:64: warning: comparison of unsigned expression >= 0 is always true fs/ubifs/key.h: In function 'key_test_hash': fs/ubifs/key.h:81: warning: comparison of unsigned expression >= 0 is always true This patch fixes the warnings. Signed-off-by: Artem Bityutskiy --- fs/ubifs/key.h | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/key.h b/fs/ubifs/key.h index 3f1f16bc25c..efb3430a258 100644 --- a/fs/ubifs/key.h +++ b/fs/ubifs/key.h @@ -37,6 +37,22 @@ #ifndef __UBIFS_KEY_H__ #define __UBIFS_KEY_H__ +/** + * key_mask_hash - mask a valid hash value. + * @val: value to be masked + * + * We use hash values as offset in directories, so values %0 and %1 are + * reserved for "." and "..". %2 is reserved for "end of readdir" marker. This + * function makes sure the reserved values are not used. + */ +static inline uint32_t key_mask_hash(uint32_t hash) +{ + hash &= UBIFS_S_KEY_HASH_MASK; + if (unlikely(hash <= 2)) + hash += 3; + return hash; +} + /** * key_r5_hash - R5 hash function (borrowed from reiserfs). * @s: direntry name @@ -54,16 +70,7 @@ static inline uint32_t key_r5_hash(const char *s, int len) str++; } - a &= UBIFS_S_KEY_HASH_MASK; - - /* - * We use hash values as offset in directories, so values %0 and %1 are - * reserved for "." and "..". %2 is reserved for "end of readdir" - * marker. - */ - if (unlikely(a >= 0 && a <= 2)) - a += 3; - return a; + return key_mask_hash(a); } /** @@ -77,10 +84,7 @@ static inline uint32_t key_test_hash(const char *str, int len) len = min_t(uint32_t, len, 4); memcpy(&a, str, len); - a &= UBIFS_S_KEY_HASH_MASK; - if (unlikely(a >= 0 && a <= 2)) - a += 3; - return a; + return key_mask_hash(a); } /** -- cgit v1.2.3-70-g09d2 From 17c2f9f85c896b48a5d74a9155d99ec5b241a0e6 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 17 Oct 2008 13:31:39 +0300 Subject: UBIFS: separate debugging fields out Introduce a new data structure which contains all debugging stuff inside. This is cleaner than having debugging stuff directly in 'c'. Signed-off-by: Artem Bityutskiy --- fs/ubifs/commit.c | 25 +++++++++--------- fs/ubifs/debug.c | 71 +++++++++++++++++++++++++++++++++++++++------------ fs/ubifs/debug.h | 51 ++++++++++++++++++++++++++++++------ fs/ubifs/lprops.c | 2 +- fs/ubifs/lpt_commit.c | 55 +++++++++++++++++++-------------------- fs/ubifs/orphan.c | 2 +- fs/ubifs/super.c | 22 +++++----------- fs/ubifs/tnc_commit.c | 7 ++--- fs/ubifs/ubifs.h | 34 +++--------------------- 9 files changed, 156 insertions(+), 113 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index b49884c8c10..f3a7945527f 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c @@ -470,12 +470,12 @@ int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot) { struct ubifs_idx_node *idx; int lnum, offs, len, err = 0; + struct ubifs_debug_info *d = c->dbg; - c->old_zroot = *zroot; - - lnum = c->old_zroot.lnum; - offs = c->old_zroot.offs; - len = c->old_zroot.len; + d->old_zroot = *zroot; + lnum = d->old_zroot.lnum; + offs = d->old_zroot.offs; + len = d->old_zroot.len; idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); if (!idx) @@ -485,8 +485,8 @@ int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot) if (err) goto out; - c->old_zroot_level = le16_to_cpu(idx->level); - c->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum); + d->old_zroot_level = le16_to_cpu(idx->level); + d->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum); out: kfree(idx); return err; @@ -509,6 +509,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) { int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt; int first = 1, iip; + struct ubifs_debug_info *d = c->dbg; union ubifs_key lower_key, upper_key, l_key, u_key; unsigned long long uninitialized_var(last_sqnum); struct ubifs_idx_node *idx; @@ -525,9 +526,9 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) UBIFS_IDX_NODE_SZ; /* Start at the old zroot */ - lnum = c->old_zroot.lnum; - offs = c->old_zroot.offs; - len = c->old_zroot.len; + lnum = d->old_zroot.lnum; + offs = d->old_zroot.offs; + len = d->old_zroot.len; iip = 0; /* @@ -560,11 +561,11 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) if (first) { first = 0; /* Check root level and sqnum */ - if (le16_to_cpu(idx->level) != c->old_zroot_level) { + if (le16_to_cpu(idx->level) != d->old_zroot_level) { err = 2; goto out_dump; } - if (le64_to_cpu(idx->ch.sqnum) != c->old_zroot_sqnum) { + if (le64_to_cpu(idx->ch.sqnum) != d->old_zroot_sqnum) { err = 3; goto out_dump; } diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 510ffa0bbda..0332a856a08 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -705,7 +705,7 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum) printk(KERN_DEBUG "(pid %d) Dumping LEB %d\n", current->pid, lnum); - sleb = ubifs_scan(c, lnum, 0, c->dbg_buf); + sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); if (IS_ERR(sleb)) { ubifs_err("scan error %d", (int)PTR_ERR(sleb)); return; @@ -2097,7 +2097,7 @@ static int simple_rand(void) return (next >> 16) & 32767; } -void dbg_failure_mode_registration(struct ubifs_info *c) +static void failure_mode_init(struct ubifs_info *c) { struct failure_mode_info *fmi; @@ -2112,7 +2112,7 @@ void dbg_failure_mode_registration(struct ubifs_info *c) spin_unlock(&fmi_lock); } -void dbg_failure_mode_deregistration(struct ubifs_info *c) +static void failure_mode_exit(struct ubifs_info *c) { struct failure_mode_info *fmi, *tmp; @@ -2146,42 +2146,44 @@ static int in_failure_mode(struct ubi_volume_desc *desc) struct ubifs_info *c = dbg_find_info(desc); if (c && dbg_failure_mode) - return c->failure_mode; + return c->dbg->failure_mode; return 0; } static int do_fail(struct ubi_volume_desc *desc, int lnum, int write) { struct ubifs_info *c = dbg_find_info(desc); + struct ubifs_debug_info *d; if (!c || !dbg_failure_mode) return 0; - if (c->failure_mode) + d = c->dbg; + if (d->failure_mode) return 1; - if (!c->fail_cnt) { + if (!d->fail_cnt) { /* First call - decide delay to failure */ if (chance(1, 2)) { unsigned int delay = 1 << (simple_rand() >> 11); if (chance(1, 2)) { - c->fail_delay = 1; - c->fail_timeout = jiffies + + d->fail_delay = 1; + d->fail_timeout = jiffies + msecs_to_jiffies(delay); dbg_rcvry("failing after %ums", delay); } else { - c->fail_delay = 2; - c->fail_cnt_max = delay; + d->fail_delay = 2; + d->fail_cnt_max = delay; dbg_rcvry("failing after %u calls", delay); } } - c->fail_cnt += 1; + d->fail_cnt += 1; } /* Determine if failure delay has expired */ - if (c->fail_delay == 1) { - if (time_before(jiffies, c->fail_timeout)) + if (d->fail_delay == 1) { + if (time_before(jiffies, d->fail_timeout)) return 0; - } else if (c->fail_delay == 2) - if (c->fail_cnt++ < c->fail_cnt_max) + } else if (d->fail_delay == 2) + if (d->fail_cnt++ < d->fail_cnt_max) return 0; if (lnum == UBIFS_SB_LNUM) { if (write) { @@ -2239,7 +2241,7 @@ static int do_fail(struct ubi_volume_desc *desc, int lnum, int write) dbg_rcvry("failing in bud LEB %d commit not running", lnum); } ubifs_err("*** SETTING FAILURE MODE ON (LEB %d) ***", lnum); - c->failure_mode = 1; + d->failure_mode = 1; dump_stack(); return 1; } @@ -2344,4 +2346,41 @@ int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype) return 0; } +/** + * ubifs_debugging_init - initialize UBIFS debugging. + * @c: UBIFS file-system description object + * + * This function initializes debugging-related data for the file system. + * Returns zero in case of success and a negative error code in case of + * failure. + */ +int ubifs_debugging_init(struct ubifs_info *c) +{ + c->dbg = kzalloc(sizeof(struct ubifs_debug_info), GFP_KERNEL); + if (!c->dbg) + return -ENOMEM; + + c->dbg->buf = vmalloc(c->leb_size); + if (!c->dbg->buf) + goto out; + + failure_mode_init(c); + return 0; + +out: + kfree(c->dbg); + return -ENOMEM; +} + +/** + * ubifs_debugging_exit - free debugging data. + * @c: UBIFS file-system description object + */ +void ubifs_debugging_exit(struct ubifs_info *c) +{ + failure_mode_exit(c); + vfree(c->dbg->buf); + kfree(c->dbg); +} + #endif /* CONFIG_UBIFS_FS_DEBUG */ diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 33d6b95071e..d6ea1362d56 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -25,7 +25,43 @@ #ifdef CONFIG_UBIFS_FS_DEBUG -#define UBIFS_DBG(op) op +/** + * ubifs_debug_info - per-FS debugging information. + * @buf: a buffer of LEB size, used for various purposes + * @old_zroot: old index root - used by 'dbg_check_old_index()' + * @old_zroot_level: old index root level - used by 'dbg_check_old_index()' + * @old_zroot_sqnum: old index root sqnum - used by 'dbg_check_old_index()' + * @failure_mode: failure mode for recovery testing + * @fail_delay: 0=>don't delay, 1=>delay a time, 2=>delay a number of calls + * @fail_timeout: time in jiffies when delay of failure mode expires + * @fail_cnt: current number of calls to failure mode I/O functions + * @fail_cnt_max: number of calls by which to delay failure mode + * @chk_lpt_sz: used by LPT tree size checker + * @chk_lpt_sz2: used by LPT tree size checker + * @chk_lpt_wastage: used by LPT tree size checker + * @chk_lpt_lebs: used by LPT tree size checker + * @new_nhead_offs: used by LPT tree size checker + * @new_ihead_lnum: used by debugging to check ihead_lnum + * @new_ihead_offs: used by debugging to check ihead_offs + */ +struct ubifs_debug_info { + void *buf; + struct ubifs_zbranch old_zroot; + int old_zroot_level; + unsigned long long old_zroot_sqnum; + int failure_mode; + int fail_delay; + unsigned long fail_timeout; + unsigned int fail_cnt; + unsigned int fail_cnt_max; + long long chk_lpt_sz; + long long chk_lpt_sz2; + long long chk_lpt_wastage; + int chk_lpt_lebs; + int new_nhead_offs; + int new_ihead_lnum; + int new_ihead_offs; +}; #define ubifs_assert(expr) do { \ if (unlikely(!(expr))) { \ @@ -211,6 +247,9 @@ extern unsigned int ubifs_msg_flags; extern unsigned int ubifs_chk_flags; extern unsigned int ubifs_tst_flags; +int ubifs_debugging_init(struct ubifs_info *c); +void ubifs_debugging_exit(struct ubifs_info *c); + /* Dump functions */ const char *dbg_ntype(int type); @@ -274,9 +313,6 @@ int dbg_force_in_the_gaps(void); #define dbg_failure_mode (ubifs_tst_flags & UBIFS_TST_RCVRY) -void dbg_failure_mode_registration(struct ubifs_info *c); -void dbg_failure_mode_deregistration(struct ubifs_info *c); - #ifndef UBIFS_DBG_PRESERVE_UBI #define ubi_leb_read dbg_leb_read @@ -320,8 +356,6 @@ static inline int dbg_change(struct ubi_volume_desc *desc, int lnum, #else /* !CONFIG_UBIFS_FS_DEBUG */ -#define UBIFS_DBG(op) - /* Use "if (0)" to make compiler check arguments even if debugging is off */ #define ubifs_assert(expr) do { \ if (0 && (expr)) \ @@ -360,6 +394,9 @@ static inline int dbg_change(struct ubi_volume_desc *desc, int lnum, #define DBGKEY(key) ((char *)(key)) #define DBGKEY1(key) ((char *)(key)) +#define ubifs_debugging_init(c) 0 +#define ubifs_debugging_exit(c) ({}) + #define dbg_ntype(type) "" #define dbg_cstate(cmt_state) "" #define dbg_get_key_dump(c, key) ({}) @@ -396,8 +433,6 @@ static inline int dbg_change(struct ubi_volume_desc *desc, int lnum, #define dbg_force_in_the_gaps_enabled 0 #define dbg_force_in_the_gaps() 0 #define dbg_failure_mode 0 -#define dbg_failure_mode_registration(c) ({}) -#define dbg_failure_mode_deregistration(c) ({}) #endif /* !CONFIG_UBIFS_FS_DEBUG */ diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index f27176e9b70..10ba663eb32 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -1088,7 +1088,7 @@ static int scan_check_cb(struct ubifs_info *c, } } - sleb = ubifs_scan(c, lnum, 0, c->dbg_buf); + sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); if (IS_ERR(sleb)) { /* * After an unclean unmount, empty and freeable LEBs diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index a41434b4278..1aefab9f0b5 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -1602,7 +1602,7 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum) { int err, len = c->leb_size, dirty = 0, node_type, node_num, node_len; int ret; - void *buf = c->dbg_buf; + void *buf = c->dbg->buf; dbg_lp("LEB %d", lnum); err = ubi_read(c->ubi, lnum, buf, 0, c->leb_size); @@ -1731,15 +1731,16 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c) */ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) { + struct ubifs_debug_info *d = c->dbg; long long chk_lpt_sz, lpt_sz; int err = 0; switch (action) { case 0: - c->chk_lpt_sz = 0; - c->chk_lpt_sz2 = 0; - c->chk_lpt_lebs = 0; - c->chk_lpt_wastage = 0; + d->chk_lpt_sz = 0; + d->chk_lpt_sz2 = 0; + d->chk_lpt_lebs = 0; + d->chk_lpt_wastage = 0; if (c->dirty_pn_cnt > c->pnode_cnt) { dbg_err("dirty pnodes %d exceed max %d", c->dirty_pn_cnt, c->pnode_cnt); @@ -1752,35 +1753,35 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) } return err; case 1: - c->chk_lpt_sz += len; + d->chk_lpt_sz += len; return 0; case 2: - c->chk_lpt_sz += len; - c->chk_lpt_wastage += len; - c->chk_lpt_lebs += 1; + d->chk_lpt_sz += len; + d->chk_lpt_wastage += len; + d->chk_lpt_lebs += 1; return 0; case 3: chk_lpt_sz = c->leb_size; - chk_lpt_sz *= c->chk_lpt_lebs; + chk_lpt_sz *= d->chk_lpt_lebs; chk_lpt_sz += len - c->nhead_offs; - if (c->chk_lpt_sz != chk_lpt_sz) { + if (d->chk_lpt_sz != chk_lpt_sz) { dbg_err("LPT wrote %lld but space used was %lld", - c->chk_lpt_sz, chk_lpt_sz); + d->chk_lpt_sz, chk_lpt_sz); err = -EINVAL; } - if (c->chk_lpt_sz > c->lpt_sz) { + if (d->chk_lpt_sz > c->lpt_sz) { dbg_err("LPT wrote %lld but lpt_sz is %lld", - c->chk_lpt_sz, c->lpt_sz); + d->chk_lpt_sz, c->lpt_sz); err = -EINVAL; } - if (c->chk_lpt_sz2 && c->chk_lpt_sz != c->chk_lpt_sz2) { + if (d->chk_lpt_sz2 && d->chk_lpt_sz != d->chk_lpt_sz2) { dbg_err("LPT layout size %lld but wrote %lld", - c->chk_lpt_sz, c->chk_lpt_sz2); + d->chk_lpt_sz, d->chk_lpt_sz2); err = -EINVAL; } - if (c->chk_lpt_sz2 && c->new_nhead_offs != len) { + if (d->chk_lpt_sz2 && d->new_nhead_offs != len) { dbg_err("LPT new nhead offs: expected %d was %d", - c->new_nhead_offs, len); + d->new_nhead_offs, len); err = -EINVAL; } lpt_sz = (long long)c->pnode_cnt * c->pnode_sz; @@ -1788,22 +1789,22 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) lpt_sz += c->ltab_sz; if (c->big_lpt) lpt_sz += c->lsave_sz; - if (c->chk_lpt_sz - c->chk_lpt_wastage > lpt_sz) { + if (d->chk_lpt_sz - d->chk_lpt_wastage > lpt_sz) { dbg_err("LPT chk_lpt_sz %lld + waste %lld exceeds %lld", - c->chk_lpt_sz, c->chk_lpt_wastage, lpt_sz); + d->chk_lpt_sz, d->chk_lpt_wastage, lpt_sz); err = -EINVAL; } if (err) dbg_dump_lpt_info(c); - c->chk_lpt_sz2 = c->chk_lpt_sz; - c->chk_lpt_sz = 0; - c->chk_lpt_wastage = 0; - c->chk_lpt_lebs = 0; - c->new_nhead_offs = len; + d->chk_lpt_sz2 = d->chk_lpt_sz; + d->chk_lpt_sz = 0; + d->chk_lpt_wastage = 0; + d->chk_lpt_lebs = 0; + d->new_nhead_offs = len; return err; case 4: - c->chk_lpt_sz += len; - c->chk_lpt_wastage += len; + d->chk_lpt_sz += len; + d->chk_lpt_wastage += len; return 0; default: return -EINVAL; diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index 9bd5a43d452..9e6f403f170 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -899,7 +899,7 @@ static int dbg_scan_orphans(struct ubifs_info *c, struct check_info *ci) for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) { struct ubifs_scan_leb *sleb; - sleb = ubifs_scan(c, lnum, 0, c->dbg_buf); + sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); if (IS_ERR(sleb)) { err = PTR_ERR(sleb); break; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index fc81022cc26..ad44822059c 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1069,11 +1069,9 @@ static int mount_ubifs(struct ubifs_info *c) if (err) return err; -#ifdef CONFIG_UBIFS_FS_DEBUG - c->dbg_buf = vmalloc(c->leb_size); - if (!c->dbg_buf) - return -ENOMEM; -#endif + err = ubifs_debugging_init(c); + if (err) + return err; err = check_volume_empty(c); if (err) @@ -1139,18 +1137,16 @@ static int mount_ubifs(struct ubifs_info *c) goto out_free; } - dbg_failure_mode_registration(c); - err = init_constants_late(c); if (err) - goto out_dereg; + goto out_free; sz = ALIGN(c->max_idx_node_sz, c->min_io_size); sz = ALIGN(sz + c->max_idx_node_sz, c->min_io_size); c->cbuf = kmalloc(sz, GFP_NOFS); if (!c->cbuf) { err = -ENOMEM; - goto out_dereg; + goto out_free; } sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id); @@ -1350,14 +1346,12 @@ out_wbufs: free_wbufs(c); out_cbuf: kfree(c->cbuf); -out_dereg: - dbg_failure_mode_deregistration(c); out_free: kfree(c->bu.buf); vfree(c->ileb_buf); vfree(c->sbuf); kfree(c->bottom_up_buf); - UBIFS_DBG(vfree(c->dbg_buf)); + ubifs_debugging_exit(c); return err; } @@ -1394,8 +1388,7 @@ static void ubifs_umount(struct ubifs_info *c) vfree(c->ileb_buf); vfree(c->sbuf); kfree(c->bottom_up_buf); - UBIFS_DBG(vfree(c->dbg_buf)); - dbg_failure_mode_deregistration(c); + ubifs_debugging_exit(c); } /** @@ -1879,7 +1872,6 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) goto out_iput; mutex_unlock(&c->umount_mutex); - return 0; out_iput: diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index 8ac76b1c2d5..3c0af452887 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -553,8 +553,8 @@ static int layout_in_empty_space(struct ubifs_info *c) } #ifdef CONFIG_UBIFS_FS_DEBUG - c->new_ihead_lnum = lnum; - c->new_ihead_offs = buf_offs; + c->dbg->new_ihead_lnum = lnum; + c->dbg->new_ihead_offs = buf_offs; #endif return 0; @@ -1002,7 +1002,8 @@ static int write_index(struct ubifs_info *c) } #ifdef CONFIG_UBIFS_FS_DEBUG - if (lnum != c->new_ihead_lnum || buf_offs != c->new_ihead_offs) { + if (lnum != c->dbg->new_ihead_lnum || + buf_offs != c->dbg->new_ihead_offs) { ubifs_err("inconsistent ihead"); return -EINVAL; } diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 16840e099ef..7e090a5e2bf 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -910,6 +910,8 @@ struct ubifs_mount_opts { unsigned int compr_type:2; }; +struct ubifs_debug_info; + /** * struct ubifs_info - UBIFS file-system description data structure * (per-superblock). @@ -972,8 +974,6 @@ struct ubifs_mount_opts { * @ileb_nxt: next pre-allocated index LEBs * @old_idx: tree of index nodes obsoleted since the last commit start * @bottom_up_buf: a buffer which is used by 'dirty_cow_bottom_up()' in tnc.c - * @new_ihead_lnum: used by debugging to check ihead_lnum - * @new_ihead_offs: used by debugging to check ihead_offs * * @mst_node: master node * @mst_offs: offset of valid master node @@ -1157,15 +1157,7 @@ struct ubifs_mount_opts { * @always_chk_crc: always check CRCs (while mounting and remounting rw) * @mount_opts: UBIFS-specific mount options * - * @dbg_buf: a buffer of LEB size used for debugging purposes - * @old_zroot: old index root - used by 'dbg_check_old_index()' - * @old_zroot_level: old index root level - used by 'dbg_check_old_index()' - * @old_zroot_sqnum: old index root sqnum - used by 'dbg_check_old_index()' - * @failure_mode: failure mode for recovery testing - * @fail_delay: 0=>don't delay, 1=>delay a time, 2=>delay a number of calls - * @fail_timeout: time in jiffies when delay of failure mode expires - * @fail_cnt: current number of calls to failure mode I/O functions - * @fail_cnt_max: number of calls by which to delay failure mode + * @dbg: debugging-related information */ struct ubifs_info { struct super_block *vfs_sb; @@ -1221,10 +1213,6 @@ struct ubifs_info { int ileb_nxt; struct rb_root old_idx; int *bottom_up_buf; -#ifdef CONFIG_UBIFS_FS_DEBUG - int new_ihead_lnum; - int new_ihead_offs; -#endif struct ubifs_mst_node *mst_node; int mst_offs; @@ -1399,21 +1387,7 @@ struct ubifs_info { struct ubifs_mount_opts mount_opts; #ifdef CONFIG_UBIFS_FS_DEBUG - void *dbg_buf; - struct ubifs_zbranch old_zroot; - int old_zroot_level; - unsigned long long old_zroot_sqnum; - int failure_mode; - int fail_delay; - unsigned long fail_timeout; - unsigned int fail_cnt; - unsigned int fail_cnt_max; - long long chk_lpt_sz; - long long chk_lpt_sz2; - long long chk_lpt_wastage; - int chk_lpt_lebs; - int new_nhead_lnum; - int new_nhead_offs; + struct ubifs_debug_info *dbg; #endif }; -- cgit v1.2.3-70-g09d2 From 552ff3179d1e93a3e982357544c059f3e9a5516e Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 23 Oct 2008 11:49:28 +0300 Subject: UBIFS: add debugfs support We need to have a possibility to see various UBIFS variables and ask UBIFS to dump various information. Debugfs is what we need. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 173 +++++++++++++++++++++++++++++++++++++++++++++++++------ fs/ubifs/debug.h | 27 ++++++++- fs/ubifs/super.c | 12 ++++ fs/ubifs/ubifs.h | 1 + 4 files changed, 193 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 0332a856a08..56842772c80 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -32,6 +32,7 @@ #include "ubifs.h" #include #include +#include #ifdef CONFIG_UBIFS_FS_DEBUG @@ -988,22 +989,20 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, err = 1; key_read(c, &dent1->key, &key); if (keys_cmp(c, &zbr1->key, &key)) { - dbg_err("1st entry at %d:%d has key %s", zbr1->lnum, - zbr1->offs, DBGKEY(&key)); - dbg_err("but it should have key %s according to tnc", - DBGKEY(&zbr1->key)); - dbg_dump_node(c, dent1); - goto out_free; + ubifs_err("1st entry at %d:%d has key %s", zbr1->lnum, + zbr1->offs, DBGKEY(&key)); + ubifs_err("but it should have key %s according to tnc", + DBGKEY(&zbr1->key)); dbg_dump_node(c, dent1); + goto out_free; } key_read(c, &dent2->key, &key); if (keys_cmp(c, &zbr2->key, &key)) { - dbg_err("2nd entry at %d:%d has key %s", zbr1->lnum, - zbr1->offs, DBGKEY(&key)); - dbg_err("but it should have key %s according to tnc", - DBGKEY(&zbr2->key)); - dbg_dump_node(c, dent2); - goto out_free; + ubifs_err("2nd entry at %d:%d has key %s", zbr1->lnum, + zbr1->offs, DBGKEY(&key)); + ubifs_err("but it should have key %s according to tnc", + DBGKEY(&zbr2->key)); dbg_dump_node(c, dent2); + goto out_free; } nlen1 = le16_to_cpu(dent1->nlen); @@ -1015,14 +1014,14 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, goto out_free; } if (cmp == 0 && nlen1 == nlen2) - dbg_err("2 xent/dent nodes with the same name"); + ubifs_err("2 xent/dent nodes with the same name"); else - dbg_err("bad order of colliding key %s", + ubifs_err("bad order of colliding key %s", DBGKEY(&key)); - dbg_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs); + ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs); dbg_dump_node(c, dent1); - dbg_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs); + ubifs_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs); dbg_dump_node(c, dent2); out_free: @@ -2103,7 +2102,7 @@ static void failure_mode_init(struct ubifs_info *c) fmi = kmalloc(sizeof(struct failure_mode_info), GFP_NOFS); if (!fmi) { - dbg_err("Failed to register failure mode - no memory"); + ubifs_err("Failed to register failure mode - no memory"); return; } fmi->c = c; @@ -2383,4 +2382,144 @@ void ubifs_debugging_exit(struct ubifs_info *c) kfree(c->dbg); } +/* + * Root directory for UBIFS stuff in debugfs. Contains sub-directories which + * contain the stuff specific to particular file-system mounts. + */ +static struct dentry *debugfs_rootdir; + +/** + * dbg_debugfs_init - initialize debugfs file-system. + * + * UBIFS uses debugfs file-system to expose various debugging knobs to + * user-space. This function creates "ubifs" directory in the debugfs + * file-system. Returns zero in case of success and a negative error code in + * case of failure. + */ +int dbg_debugfs_init(void) +{ + debugfs_rootdir = debugfs_create_dir("ubifs", NULL); + if (IS_ERR(debugfs_rootdir)) { + int err = PTR_ERR(debugfs_rootdir); + ubifs_err("cannot create \"ubifs\" debugfs directory, " + "error %d\n", err); + return err; + } + + return 0; +} + +/** + * dbg_debugfs_exit - remove the "ubifs" directory from debugfs file-system. + */ +void dbg_debugfs_exit(void) +{ + debugfs_remove(debugfs_rootdir); +} + +static int open_debugfs_file(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t write_debugfs_file(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ubifs_info *c = file->private_data; + struct ubifs_debug_info *d = c->dbg; + + if (file->f_path.dentry == d->dump_lprops) + dbg_dump_lprops(c); + else if (file->f_path.dentry == d->dump_budg) { + spin_lock(&c->space_lock); + dbg_dump_budg(c); + spin_unlock(&c->space_lock); + } else if (file->f_path.dentry == d->dump_budg) { + mutex_lock(&c->tnc_mutex); + dbg_dump_tnc(c); + mutex_unlock(&c->tnc_mutex); + } else + return -EINVAL; + + *ppos += count; + return count; +} + +static const struct file_operations debugfs_fops = { + .open = open_debugfs_file, + .write = write_debugfs_file, + .owner = THIS_MODULE, +}; + +/** + * dbg_debugfs_init_fs - initialize debugfs for UBIFS instance. + * @c: UBIFS file-system description object + * + * This function creates all debugfs files for this instance of UBIFS. Returns + * zero in case of success and a negative error code in case of failure. + * + * Note, the only reason we have not merged this function with the + * 'ubifs_debugging_init()' function is because it is better to initialize + * debugfs interfaces at the very end of the mount process, and remove them at + * the very beginning of the mount process. + */ +int dbg_debugfs_init_fs(struct ubifs_info *c) +{ + int err; + const char *fname; + struct dentry *dent; + struct ubifs_debug_info *d = c->dbg; + + sprintf(d->debugfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id); + d->debugfs_dir = debugfs_create_dir(d->debugfs_dir_name, + debugfs_rootdir); + if (IS_ERR(d->debugfs_dir)) { + err = PTR_ERR(d->debugfs_dir); + ubifs_err("cannot create \"%s\" debugfs directory, error %d\n", + d->debugfs_dir_name, err); + goto out; + } + + fname = "dump_lprops"; + dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c, + &debugfs_fops); + if (IS_ERR(dent)) + goto out_remove; + d->dump_lprops = dent; + + fname = "dump_budg"; + dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c, + &debugfs_fops); + if (IS_ERR(dent)) + goto out_remove; + d->dump_budg = dent; + + fname = "dump_tnc"; + dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c, + &debugfs_fops); + if (IS_ERR(dent)) + goto out_remove; + d->dump_tnc = dent; + + return 0; + +out_remove: + err = PTR_ERR(dent); + ubifs_err("cannot create \"%s\" debugfs directory, error %d\n", + fname, err); + debugfs_remove_recursive(d->debugfs_dir); +out: + return err; +} + +/** + * dbg_debugfs_exit_fs - remove all debugfs files. + * @c: UBIFS file-system description object + */ +void dbg_debugfs_exit_fs(struct ubifs_info *c) +{ + debugfs_remove_recursive(c->dbg->debugfs_dir); +} + #endif /* CONFIG_UBIFS_FS_DEBUG */ diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index d6ea1362d56..a6b70f8aac9 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -43,6 +43,13 @@ * @new_nhead_offs: used by LPT tree size checker * @new_ihead_lnum: used by debugging to check ihead_lnum * @new_ihead_offs: used by debugging to check ihead_offs + * + * debugfs_dir_name: name of debugfs directory containing this file-system's + * files + * debugfs_dir: direntry object of the file-system debugfs directory + * dump_lprops: "dump lprops" debugfs knob + * dump_budg: "dump budgeting information" debugfs knob + * dump_tnc: "dump TNC" debugfs knob */ struct ubifs_debug_info { void *buf; @@ -61,6 +68,12 @@ struct ubifs_debug_info { int new_nhead_offs; int new_ihead_lnum; int new_ihead_offs; + + char debugfs_dir_name[100]; + struct dentry *debugfs_dir; + struct dentry *dump_lprops; + struct dentry *dump_budg; + struct dentry *dump_tnc; }; #define ubifs_assert(expr) do { \ @@ -251,7 +264,6 @@ int ubifs_debugging_init(struct ubifs_info *c); void ubifs_debugging_exit(struct ubifs_info *c); /* Dump functions */ - const char *dbg_ntype(int type); const char *dbg_cstate(int cmt_state); const char *dbg_get_key_dump(const struct ubifs_info *c, @@ -274,7 +286,6 @@ void dbg_dump_tnc(struct ubifs_info *c); void dbg_dump_index(struct ubifs_info *c); /* Checking helper functions */ - typedef int (*dbg_leaf_callback)(struct ubifs_info *c, struct ubifs_zbranch *zbr, void *priv); typedef int (*dbg_znode_callback)(struct ubifs_info *c, @@ -354,6 +365,12 @@ static inline int dbg_change(struct ubi_volume_desc *desc, int lnum, return dbg_leb_change(desc, lnum, buf, len, UBI_UNKNOWN); } +/* Debugfs-related stuff */ +int dbg_debugfs_init(void); +void dbg_debugfs_exit(void); +int dbg_debugfs_init_fs(struct ubifs_info *c); +void dbg_debugfs_exit_fs(struct ubifs_info *c); + #else /* !CONFIG_UBIFS_FS_DEBUG */ /* Use "if (0)" to make compiler check arguments even if debugging is off */ @@ -434,6 +451,10 @@ static inline int dbg_change(struct ubi_volume_desc *desc, int lnum, #define dbg_force_in_the_gaps() 0 #define dbg_failure_mode 0 -#endif /* !CONFIG_UBIFS_FS_DEBUG */ +#define dbg_debugfs_init() 0 +#define dbg_debugfs_exit() +#define dbg_debugfs_init_fs(c) 0 +#define dbg_debugfs_exit_fs(c) 0 +#endif /* !CONFIG_UBIFS_FS_DEBUG */ #endif /* !__UBIFS_DEBUG_H__ */ diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index ad44822059c..2dbaa4fc2cb 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1258,6 +1258,10 @@ static int mount_ubifs(struct ubifs_info *c) } } + err = dbg_debugfs_init_fs(c); + if (err) + goto out_infos; + err = dbg_check_filesystem(c); if (err) goto out_infos; @@ -1369,6 +1373,7 @@ static void ubifs_umount(struct ubifs_info *c) dbg_gen("un-mounting UBI device %d, volume %d", c->vi.ubi_num, c->vi.vol_id); + dbg_debugfs_exit_fs(c); spin_lock(&ubifs_infos_lock); list_del(&c->infos_list); spin_unlock(&ubifs_infos_lock); @@ -2078,12 +2083,18 @@ static int __init ubifs_init(void) register_shrinker(&ubifs_shrinker_info); err = ubifs_compressors_init(); + if (err) + goto out_shrinker; + + err = dbg_debugfs_init(); if (err) goto out_compr; return 0; out_compr: + ubifs_compressors_exit(); +out_shrinker: unregister_shrinker(&ubifs_shrinker_info); kmem_cache_destroy(ubifs_inode_slab); out_reg: @@ -2098,6 +2109,7 @@ static void __exit ubifs_exit(void) ubifs_assert(list_empty(&ubifs_infos)); ubifs_assert(atomic_long_read(&ubifs_clean_zn_cnt) == 0); + dbg_debugfs_exit(); ubifs_compressors_exit(); unregister_shrinker(&ubifs_shrinker_info); kmem_cache_destroy(ubifs_inode_slab); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 7e090a5e2bf..4cf28e85de7 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1158,6 +1158,7 @@ struct ubifs_debug_info; * @mount_opts: UBIFS-specific mount options * * @dbg: debugging-related information + * @dfs: debugfs support-related information */ struct ubifs_info { struct super_block *vfs_sb; -- cgit v1.2.3-70-g09d2 From 45e12d901fee57bccf90f6940155724954e1aac7 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 31 Oct 2008 11:42:18 +0200 Subject: UBIFS: run debugging checks only if they are enabled Do not forget to check whether lpt debugging is enabled before running the check functions. This commit also makes some spelling fixes. Signed-off-by: Artem Bityutskiy --- fs/ubifs/lpt.c | 3 +-- fs/ubifs/lpt_commit.c | 9 +++++++++ 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index db8bd0e518b..93c181c742f 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -36,7 +36,7 @@ * can be written into a single eraseblock. In that case, garbage collection * consists of just writing the whole table, which therefore makes all other * eraseblocks reusable. In the case of the big model, dirty eraseblocks are - * selected for garbage collection, which consists are marking the nodes in + * selected for garbage collection, which consists of marking the clean nodes in * that LEB as dirty, and then only the dirty nodes are written out. Also, in * the case of the big model, a table of LEB numbers is saved so that the entire * LPT does not to be scanned looking for empty eraseblocks when UBIFS is first @@ -156,7 +156,6 @@ int ubifs_calc_lpt_geom(struct ubifs_info *c) } c->check_lpt_free = c->big_lpt; - return 0; } diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index 1aefab9f0b5..7bbf03518c7 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -1604,6 +1604,9 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum) int ret; void *buf = c->dbg->buf; + if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) + return 0; + dbg_lp("LEB %d", lnum); err = ubi_read(c->ubi, lnum, buf, 0, c->leb_size); if (err) { @@ -1704,6 +1707,9 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c) long long free = 0; int i; + if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) + return 0; + for (i = 0; i < c->lpt_lebs; i++) { if (c->ltab[i].tgc || c->ltab[i].cmt) continue; @@ -1735,6 +1741,9 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) long long chk_lpt_sz, lpt_sz; int err = 0; + if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) + return 0; + switch (action) { case 0: d->chk_lpt_sz = 0; -- cgit v1.2.3-70-g09d2 From 787845bdeadd368eedeace92d5bf53f5aa1450ba Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 31 Oct 2008 12:17:42 +0200 Subject: UBIFS: dump stack in LPT check functions It is useful to know how we got to the checking function when hunting the bugs. Signed-off-by: Artem Bityutskiy --- fs/ubifs/lpt_commit.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index 7bbf03518c7..c5c07f9cd22 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -320,6 +320,7 @@ no_space: dbg_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, " "done_lsave %d", lnum, offs, len, done_ltab, done_lsave); dbg_dump_lpt_info(c); + dump_stack(); return err; } @@ -548,6 +549,7 @@ no_space: dbg_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab " "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); dbg_dump_lpt_info(c); + dump_stack(); return err; } @@ -1722,6 +1724,7 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c) dbg_err("LPT space error: free %lld lpt_sz %lld", free, c->lpt_sz); dbg_dump_lpt_info(c); + dump_stack(); return -EINVAL; } return 0; @@ -1803,8 +1806,10 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) d->chk_lpt_sz, d->chk_lpt_wastage, lpt_sz); err = -EINVAL; } - if (err) + if (err) { dbg_dump_lpt_info(c); + dump_stack(); + } d->chk_lpt_sz2 = d->chk_lpt_sz; d->chk_lpt_sz = 0; d->chk_lpt_wastage = 0; -- cgit v1.2.3-70-g09d2 From 2ba5f7ae8165b3f575dd3a7d8bb18f421fab8273 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 31 Oct 2008 17:32:30 +0200 Subject: UBIFS: introduce LPT dump function Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 28 +++++++---- fs/ubifs/debug.h | 45 +++++++++-------- fs/ubifs/lpt.c | 27 ++++++----- fs/ubifs/lpt_commit.c | 131 ++++++++++++++++++++++++++++++++++++++++++++++++-- fs/ubifs/ubifs.h | 3 ++ 5 files changed, 186 insertions(+), 48 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 56842772c80..934db1855f0 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -646,7 +646,8 @@ void dbg_dump_lprops(struct ubifs_info *c) struct ubifs_lprops lp; struct ubifs_lp_stats lst; - printk(KERN_DEBUG "(pid %d) Dumping LEB properties\n", current->pid); + printk(KERN_DEBUG "(pid %d) start dumping LEB properties\n", + current->pid); ubifs_get_lp_stats(c, &lst); dbg_dump_lstats(&lst); @@ -657,6 +658,8 @@ void dbg_dump_lprops(struct ubifs_info *c) dbg_dump_lprop(c, &lp); } + printk(KERN_DEBUG "(pid %d) finish dumping LEB properties\n", + current->pid); } void dbg_dump_lpt_info(struct ubifs_info *c) @@ -664,6 +667,7 @@ void dbg_dump_lpt_info(struct ubifs_info *c) int i; spin_lock(&dbg_lock); + printk(KERN_DEBUG "(pid %d) dumping LPT information\n", current->pid); printk(KERN_DEBUG "\tlpt_sz: %lld\n", c->lpt_sz); printk(KERN_DEBUG "\tpnode_sz: %d\n", c->pnode_sz); printk(KERN_DEBUG "\tnnode_sz: %d\n", c->nnode_sz); @@ -704,8 +708,8 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum) if (dbg_failure_mode) return; - printk(KERN_DEBUG "(pid %d) Dumping LEB %d\n", current->pid, lnum); - + printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n", + current->pid, lnum); sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); if (IS_ERR(sleb)) { ubifs_err("scan error %d", (int)PTR_ERR(sleb)); @@ -722,6 +726,8 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum) dbg_dump_node(c, snod->node); } + printk(KERN_DEBUG "(pid %d) finish dumping LEB %d\n", + current->pid, lnum); ubifs_scan_destroy(sleb); return; } @@ -769,7 +775,7 @@ void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat) { int i; - printk(KERN_DEBUG "(pid %d) Dumping heap cat %d (%d elements)\n", + printk(KERN_DEBUG "(pid %d) start dumping heap cat %d (%d elements)\n", current->pid, cat, heap->cnt); for (i = 0; i < heap->cnt; i++) { struct ubifs_lprops *lprops = heap->arr[i]; @@ -778,6 +784,7 @@ void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat) "flags %d\n", i, lprops->lnum, lprops->hpos, lprops->free, lprops->dirty, lprops->flags); } + printk(KERN_DEBUG "(pid %d) finish dumping heap\n", current->pid); } void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, @@ -785,7 +792,7 @@ void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, { int i; - printk(KERN_DEBUG "(pid %d) Dumping pnode:\n", current->pid); + printk(KERN_DEBUG "(pid %d) dumping pnode:\n", current->pid); printk(KERN_DEBUG "\taddress %zx parent %zx cnext %zx\n", (size_t)pnode, (size_t)parent, (size_t)pnode->cnext); printk(KERN_DEBUG "\tflags %lu iip %d level %d num %d\n", @@ -804,7 +811,7 @@ void dbg_dump_tnc(struct ubifs_info *c) int level; printk(KERN_DEBUG "\n"); - printk(KERN_DEBUG "(pid %d) Dumping the TNC tree\n", current->pid); + printk(KERN_DEBUG "(pid %d) start dumping TNC tree\n", current->pid); znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL); level = znode->level; printk(KERN_DEBUG "== Level %d ==\n", level); @@ -816,8 +823,7 @@ void dbg_dump_tnc(struct ubifs_info *c) dbg_dump_znode(c, znode); znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode); } - - printk(KERN_DEBUG "\n"); + printk(KERN_DEBUG "(pid %d) finish dumping TNC tree\n", current->pid); } static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode, @@ -992,7 +998,8 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, ubifs_err("1st entry at %d:%d has key %s", zbr1->lnum, zbr1->offs, DBGKEY(&key)); ubifs_err("but it should have key %s according to tnc", - DBGKEY(&zbr1->key)); dbg_dump_node(c, dent1); + DBGKEY(&zbr1->key)); + dbg_dump_node(c, dent1); goto out_free; } @@ -1001,7 +1008,8 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, ubifs_err("2nd entry at %d:%d has key %s", zbr1->lnum, zbr1->offs, DBGKEY(&key)); ubifs_err("but it should have key %s according to tnc", - DBGKEY(&zbr2->key)); dbg_dump_node(c, dent2); + DBGKEY(&zbr2->key)); + dbg_dump_node(c, dent2); goto out_free; } diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index a6b70f8aac9..9820d6999f7 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -270,6 +270,8 @@ const char *dbg_get_key_dump(const struct ubifs_info *c, const union ubifs_key *key); void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode); void dbg_dump_node(const struct ubifs_info *c, const void *node); +void dbg_dump_lpt_node(const struct ubifs_info *c, void *node, int lnum, + int offs); void dbg_dump_budget_req(const struct ubifs_budget_req *req); void dbg_dump_lstats(const struct ubifs_lp_stats *lst); void dbg_dump_budg(struct ubifs_info *c); @@ -284,6 +286,7 @@ void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, struct ubifs_nnode *parent, int iip); void dbg_dump_tnc(struct ubifs_info *c); void dbg_dump_index(struct ubifs_info *c); +void dbg_dump_lpt_lebs(const struct ubifs_info *c); /* Checking helper functions */ typedef int (*dbg_leaf_callback)(struct ubifs_info *c, @@ -411,26 +414,28 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c); #define DBGKEY(key) ((char *)(key)) #define DBGKEY1(key) ((char *)(key)) -#define ubifs_debugging_init(c) 0 -#define ubifs_debugging_exit(c) ({}) - -#define dbg_ntype(type) "" -#define dbg_cstate(cmt_state) "" -#define dbg_get_key_dump(c, key) ({}) -#define dbg_dump_inode(c, inode) ({}) -#define dbg_dump_node(c, node) ({}) -#define dbg_dump_budget_req(req) ({}) -#define dbg_dump_lstats(lst) ({}) -#define dbg_dump_budg(c) ({}) -#define dbg_dump_lprop(c, lp) ({}) -#define dbg_dump_lprops(c) ({}) -#define dbg_dump_lpt_info(c) ({}) -#define dbg_dump_leb(c, lnum) ({}) -#define dbg_dump_znode(c, znode) ({}) -#define dbg_dump_heap(c, heap, cat) ({}) -#define dbg_dump_pnode(c, pnode, parent, iip) ({}) -#define dbg_dump_tnc(c) ({}) -#define dbg_dump_index(c) ({}) +#define ubifs_debugging_init(c) 0 +#define ubifs_debugging_exit(c) ({}) + +#define dbg_ntype(type) "" +#define dbg_cstate(cmt_state) "" +#define dbg_get_key_dump(c, key) ({}) +#define dbg_dump_inode(c, inode) ({}) +#define dbg_dump_node(c, node) ({}) +#define dbg_dump_lpt_node(c, node, lnum, offs) ({}) +#define dbg_dump_budget_req(req) ({}) +#define dbg_dump_lstats(lst) ({}) +#define dbg_dump_budg(c) ({}) +#define dbg_dump_lprop(c, lp) ({}) +#define dbg_dump_lprops(c) ({}) +#define dbg_dump_lpt_info(c) ({}) +#define dbg_dump_leb(c, lnum) ({}) +#define dbg_dump_znode(c, znode) ({}) +#define dbg_dump_heap(c, heap, cat) ({}) +#define dbg_dump_pnode(c, pnode, parent, iip) ({}) +#define dbg_dump_tnc(c) ({}) +#define dbg_dump_index(c) ({}) +#define dbg_dump_lpt_lebs(c) ({}) #define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0 #define dbg_old_index_check_init(c, zroot) 0 diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index 93c181c742f..6d914160ec5 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -557,7 +557,7 @@ static int calc_nnode_num(int row, int col) * This function calculates and returns the nnode number based on the parent's * nnode number and the index in parent. */ -static int calc_nnode_num_from_parent(struct ubifs_info *c, +static int calc_nnode_num_from_parent(const struct ubifs_info *c, struct ubifs_nnode *parent, int iip) { int num, shft; @@ -582,7 +582,7 @@ static int calc_nnode_num_from_parent(struct ubifs_info *c, * This function calculates and returns the pnode number based on the parent's * nnode number and the index in parent. */ -static int calc_pnode_num_from_parent(struct ubifs_info *c, +static int calc_pnode_num_from_parent(const struct ubifs_info *c, struct ubifs_nnode *parent, int iip) { int i, n = c->lpt_hght - 1, pnum = parent->num, num = 0; @@ -965,7 +965,7 @@ static int check_lpt_type(uint8_t **addr, int *pos, int type) * * This function returns %0 on success and a negative error code on failure. */ -static int unpack_pnode(struct ubifs_info *c, void *buf, +static int unpack_pnode(const struct ubifs_info *c, void *buf, struct ubifs_pnode *pnode) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; @@ -995,15 +995,15 @@ static int unpack_pnode(struct ubifs_info *c, void *buf, } /** - * unpack_nnode - unpack a nnode. + * ubifs_unpack_nnode - unpack a nnode. * @c: UBIFS file-system description object * @buf: buffer containing packed nnode to unpack * @nnode: nnode structure to fill * * This function returns %0 on success and a negative error code on failure. */ -static int unpack_nnode(struct ubifs_info *c, void *buf, - struct ubifs_nnode *nnode) +int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf, + struct ubifs_nnode *nnode) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0, err; @@ -1035,7 +1035,7 @@ static int unpack_nnode(struct ubifs_info *c, void *buf, * * This function returns %0 on success and a negative error code on failure. */ -static int unpack_ltab(struct ubifs_info *c, void *buf) +static int unpack_ltab(const struct ubifs_info *c, void *buf) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0, err; @@ -1067,7 +1067,7 @@ static int unpack_ltab(struct ubifs_info *c, void *buf) * * This function returns %0 on success and a negative error code on failure. */ -static int unpack_lsave(struct ubifs_info *c, void *buf) +static int unpack_lsave(const struct ubifs_info *c, void *buf) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int i, pos = 0, err; @@ -1095,7 +1095,7 @@ static int unpack_lsave(struct ubifs_info *c, void *buf) * * This function returns %0 on success and a negative error code on failure. */ -static int validate_nnode(struct ubifs_info *c, struct ubifs_nnode *nnode, +static int validate_nnode(const struct ubifs_info *c, struct ubifs_nnode *nnode, struct ubifs_nnode *parent, int iip) { int i, lvl, max_offs; @@ -1139,7 +1139,7 @@ static int validate_nnode(struct ubifs_info *c, struct ubifs_nnode *nnode, * * This function returns %0 on success and a negative error code on failure. */ -static int validate_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, +static int validate_pnode(const struct ubifs_info *c, struct ubifs_pnode *pnode, struct ubifs_nnode *parent, int iip) { int i; @@ -1173,7 +1173,8 @@ static int validate_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode, * This function calculates the LEB numbers for the LEB properties it contains * based on the pnode number. */ -static void set_pnode_lnum(struct ubifs_info *c, struct ubifs_pnode *pnode) +static void set_pnode_lnum(const struct ubifs_info *c, + struct ubifs_pnode *pnode) { int i, lnum; @@ -1226,7 +1227,7 @@ int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) err = ubi_read(c->ubi, lnum, buf, offs, c->nnode_sz); if (err) goto out; - err = unpack_nnode(c, buf, nnode); + err = ubifs_unpack_nnode(c, buf, nnode); if (err) goto out; } @@ -1815,7 +1816,7 @@ static struct ubifs_nnode *scan_get_nnode(struct ubifs_info *c, c->nnode_sz); if (err) return ERR_PTR(err); - err = unpack_nnode(c, buf, nnode); + err = ubifs_unpack_nnode(c, buf, nnode); if (err) return ERR_PTR(err); } diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index c5c07f9cd22..da60b5a0fab 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -320,6 +320,7 @@ no_space: dbg_err("LPT out of space at LEB %d:%d needing %d, done_ltab %d, " "done_lsave %d", lnum, offs, len, done_ltab, done_lsave); dbg_dump_lpt_info(c); + dbg_dump_lpt_lebs(c); dump_stack(); return err; } @@ -549,6 +550,7 @@ no_space: dbg_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab " "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); dbg_dump_lpt_info(c); + dbg_dump_lpt_lebs(c); dump_stack(); return err; } @@ -1027,7 +1029,7 @@ static int make_node_dirty(struct ubifs_info *c, int node_type, int node_num, * @c: UBIFS file-system description object * @node_type: LPT node type */ -static int get_lpt_node_len(struct ubifs_info *c, int node_type) +static int get_lpt_node_len(const struct ubifs_info *c, int node_type) { switch (node_type) { case UBIFS_LPT_NNODE: @@ -1048,7 +1050,7 @@ static int get_lpt_node_len(struct ubifs_info *c, int node_type) * @buf: buffer * @len: length of buffer */ -static int get_pad_len(struct ubifs_info *c, uint8_t *buf, int len) +static int get_pad_len(const struct ubifs_info *c, uint8_t *buf, int len) { int offs, pad_len; @@ -1065,7 +1067,8 @@ static int get_pad_len(struct ubifs_info *c, uint8_t *buf, int len) * @buf: buffer * @node_num: node number is returned here */ -static int get_lpt_node_type(struct ubifs_info *c, uint8_t *buf, int *node_num) +static int get_lpt_node_type(const struct ubifs_info *c, uint8_t *buf, + int *node_num) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int pos = 0, node_type; @@ -1083,7 +1086,7 @@ static int get_lpt_node_type(struct ubifs_info *c, uint8_t *buf, int *node_num) * * This function returns %1 if the buffer contains a node or %0 if it does not. */ -static int is_a_node(struct ubifs_info *c, uint8_t *buf, int len) +static int is_a_node(const struct ubifs_info *c, uint8_t *buf, int len) { uint8_t *addr = buf + UBIFS_LPT_CRC_BYTES; int pos = 0, node_type, node_len; @@ -1107,7 +1110,6 @@ static int is_a_node(struct ubifs_info *c, uint8_t *buf, int len) return 1; } - /** * lpt_gc_lnum - garbage collect a LPT LEB. * @c: UBIFS file-system description object @@ -1724,6 +1726,7 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c) dbg_err("LPT space error: free %lld lpt_sz %lld", free, c->lpt_sz); dbg_dump_lpt_info(c); + dbg_dump_lpt_lebs(c); dump_stack(); return -EINVAL; } @@ -1808,6 +1811,7 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) } if (err) { dbg_dump_lpt_info(c); + dbg_dump_lpt_lebs(c); dump_stack(); } d->chk_lpt_sz2 = d->chk_lpt_sz; @@ -1825,4 +1829,121 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) } } +/** + * dbg_dump_lpt_leb - dump an LPT LEB. + * @c: UBIFS file-system description object + * @lnum: LEB number to dump + * + * This function dumps an LEB from LPT area. Nodes in this area are very + * different to nodes in the main area (e.g., they do not have common headers, + * they do not have 8-byte alignments, etc), so we have a separate function to + * dump LPT area LEBs. Note, LPT has to be locked by the coller. + */ +static void dump_lpt_leb(const struct ubifs_info *c, int lnum) +{ + int err, len = c->leb_size, node_type, node_num, node_len, offs; + void *buf = c->dbg->buf; + + printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n", + current->pid, lnum); + err = ubi_read(c->ubi, lnum, buf, 0, c->leb_size); + if (err) { + ubifs_err("cannot read LEB %d, error %d", lnum, err); + return; + } + while (1) { + offs = c->leb_size - len; + if (!is_a_node(c, buf, len)) { + int pad_len; + + pad_len = get_pad_len(c, buf, len); + if (pad_len) { + printk(KERN_DEBUG "LEB %d:%d, pad %d bytes\n", + lnum, offs, pad_len); + buf += pad_len; + len -= pad_len; + continue; + } + if (len) + printk(KERN_DEBUG "LEB %d:%d, free %d bytes\n", + lnum, offs, len); + break; + } + + node_type = get_lpt_node_type(c, buf, &node_num); + switch (node_type) { + case UBIFS_LPT_PNODE: + { + node_len = c->pnode_sz; + if (c->big_lpt) + printk(KERN_DEBUG "LEB %d:%d, pnode num %d\n", + lnum, offs, node_num); + else + printk(KERN_DEBUG "LEB %d:%d, pnode\n", + lnum, offs); + break; + } + case UBIFS_LPT_NNODE: + { + int i; + struct ubifs_nnode nnode; + + node_len = c->nnode_sz; + if (c->big_lpt) + printk(KERN_DEBUG "LEB %d:%d, nnode num %d, ", + lnum, offs, node_num); + else + printk(KERN_DEBUG "LEB %d:%d, nnode, ", + lnum, offs); + err = ubifs_unpack_nnode(c, buf, &nnode); + for (i = 0; i < UBIFS_LPT_FANOUT; i++) { + printk("%d:%d", nnode.nbranch[i].lnum, + nnode.nbranch[i].offs); + if (i != UBIFS_LPT_FANOUT - 1) + printk(", "); + } + printk("\n"); + break; + } + case UBIFS_LPT_LTAB: + node_len = c->ltab_sz; + printk(KERN_DEBUG "LEB %d:%d, ltab\n", + lnum, offs); + break; + case UBIFS_LPT_LSAVE: + node_len = c->lsave_sz; + printk(KERN_DEBUG "LEB %d:%d, lsave len\n", lnum, offs); + break; + default: + ubifs_err("LPT node type %d not recognized", node_type); + return; + } + + buf += node_len; + len -= node_len; + } + + printk(KERN_DEBUG "(pid %d) finish dumping LEB %d\n", + current->pid, lnum); +} + +/** + * dbg_dump_lpt_lebs - dump LPT lebs. + * @c: UBIFS file-system description object + * + * This function dumps all LPT LEBs. The caller has to make sure the LPT is + * locked. + */ +void dbg_dump_lpt_lebs(const struct ubifs_info *c) +{ + int i; + + printk(KERN_DEBUG "(pid %d) start dumping all LPT LEBs\n", + current->pid); + for (i = 0; i < c->lpt_lebs; i++) + dump_lpt_leb(c, i + c->lpt_first); + printk(KERN_DEBUG "(pid %d) finish dumping all LPT LEBs\n", + current->pid); +} + #endif /* CONFIG_UBIFS_FS_DEBUG */ diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 4cf28e85de7..e658b06fd45 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1622,6 +1622,9 @@ void ubifs_add_lpt_dirt(struct ubifs_info *c, int lnum, int dirty); void ubifs_add_nnode_dirt(struct ubifs_info *c, struct ubifs_nnode *nnode); uint32_t ubifs_unpack_bits(uint8_t **addr, int *pos, int nrbits); struct ubifs_nnode *ubifs_first_nnode(struct ubifs_info *c, int *hght); +/* Needed only in debugging code in lpt_commit.c */ +int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf, + struct ubifs_nnode *nnode); /* lpt_commit.c */ int ubifs_lpt_start_commit(struct ubifs_info *c); -- cgit v1.2.3-70-g09d2 From ddcd856d81861a523d79d077facd875da1f66792 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 07:55:34 -0500 Subject: [XFS] fix compile on 32 bit systems The recent compat patches make xfs_file.c include xfs_ioctl32.h unconditional, which breaks the build on 32 bit systems which don't have the various compat defintions. Remove the include and move the defintion of xfs_file_compat_ioctl to xfs_ioctl.h so that we can avoid including all the compat defintions in xfs_file.c Signed-off-by: Christoph Hellwig Tested-by: Kamalesh Babulal Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_file.c | 2 +- fs/xfs/linux-2.6/xfs_ioctl.h | 12 ++++++++++++ fs/xfs/linux-2.6/xfs_ioctl32.h | 3 --- 3 files changed, 13 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index 72fc8d8c8bc..d377db05d80 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -36,9 +36,9 @@ #include "xfs_inode.h" #include "xfs_error.h" #include "xfs_rw.h" -#include "xfs_ioctl32.h" #include "xfs_vnodeops.h" #include "xfs_da_btree.h" +#include "xfs_ioctl.h" #include #include diff --git a/fs/xfs/linux-2.6/xfs_ioctl.h b/fs/xfs/linux-2.6/xfs_ioctl.h index f67dc69381e..a3446aad070 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.h +++ b/fs/xfs/linux-2.6/xfs_ioctl.h @@ -67,4 +67,16 @@ xfs_attrmulti_attr_remove( char *name, __uint32_t flags); +extern long +xfs_file_compat_ioctl( + struct file *file, + unsigned int cmd, + unsigned long arg); + +extern long +xfs_file_compat_ioctl_invis( + struct file *file, + unsigned int cmd, + unsigned long arg); + #endif diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.h b/fs/xfs/linux-2.6/xfs_ioctl32.h index af918749d18..1024c4f8ba0 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.h +++ b/fs/xfs/linux-2.6/xfs_ioctl32.h @@ -20,9 +20,6 @@ #include -extern long xfs_file_compat_ioctl(struct file *, unsigned, unsigned long); -extern long xfs_file_compat_invis_ioctl(struct file *, unsigned, unsigned long); - /* * on 32-bit arches, ioctl argument structures may have different sizes * and/or alignment. We define compat structures which match the -- cgit v1.2.3-70-g09d2 From 2234d54d3d855d6ffae88a24772a9389d6755e0c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:22 +0100 Subject: remove useless mnt_want_write call in xfs_write When mnt_want_write was introduced a call to it was added around xfs_ichgtime, but there is no need for this because a file can't be open read/write on a r/o mount, and a mount can't degrade r/o while we still have files open for writing. As the mnt_want_write changes were never merged into the CVS tree this patch is for mainline only. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_lrw.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 4959c874499..59b7d5f9e64 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -51,7 +51,6 @@ #include "xfs_vnodeops.h" #include -#include #include @@ -668,15 +667,8 @@ start: if (new_size > xip->i_size) xip->i_new_size = new_size; - /* - * We're not supposed to change timestamps in readonly-mounted - * filesystems. Throw it away if anyone asks us. - */ - if (likely(!(ioflags & IO_INVIS) && - !mnt_want_write(file->f_path.mnt))) { + if (likely(!(ioflags & IO_INVIS))) xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); - mnt_drop_write(file->f_path.mnt); - } /* * If the offset is beyond the size of the file, we have a couple -- cgit v1.2.3-70-g09d2 From 73e6335c14209e508bec8ca7985d1fbde183bd1f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:23 +0100 Subject: remove unused behvavior cruft in xfs_super.h Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_super.h | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h index 56dc48a76fa..d5d776d4cd6 100644 --- a/fs/xfs/linux-2.6/xfs_super.h +++ b/fs/xfs/linux-2.6/xfs_super.h @@ -20,24 +20,12 @@ #include -#ifdef CONFIG_XFS_DMAPI -# define vfs_insertdmapi(vfs) vfs_insertops(vfsp, &xfs_dmops) -# define vfs_initdmapi() dmapi_init() -# define vfs_exitdmapi() dmapi_uninit() -#else -# define vfs_insertdmapi(vfs) do { } while (0) -# define vfs_initdmapi() do { } while (0) -# define vfs_exitdmapi() do { } while (0) -#endif - #ifdef CONFIG_XFS_QUOTA -# define vfs_insertquota(vfs) vfs_insertops(vfsp, &xfs_qmops) extern void xfs_qm_init(void); extern void xfs_qm_exit(void); # define vfs_initquota() xfs_qm_init() # define vfs_exitquota() xfs_qm_exit() #else -# define vfs_insertquota(vfs) do { } while (0) # define vfs_initquota() do { } while (0) # define vfs_exitquota() do { } while (0) #endif -- cgit v1.2.3-70-g09d2 From ccd0be6cfc6943c4e0b3e3cdb598e0b7354a2d78 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:24 +0100 Subject: remove unused prototypes for xfs_ihash_init / xfs_ihash_free Signed-off-by: Christoph Hellwig Reviewed-by: Eric Sandeen Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_inode.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index ae5800e7d39..c38d4509c66 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -496,8 +496,6 @@ static inline void xfs_ifunlock(xfs_inode_t *ip) /* * xfs_iget.c prototypes. */ -void xfs_ihash_init(struct xfs_mount *); -void xfs_ihash_free(struct xfs_mount *); xfs_inode_t *xfs_inode_incore(struct xfs_mount *, xfs_ino_t, struct xfs_trans *); int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, -- cgit v1.2.3-70-g09d2 From 5cafdeb2891a415a5dbf0ad80f0afedf8369e6bb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:25 +0100 Subject: cleanup the inode reclaim path Merge xfs_iextract and xfs_idestroy into xfs_ireclaim as they are never called individually. Also rewrite most comments in this area as they were severly out of date. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_iget.c | 128 +++++++++++++++++++++++++++++++++++------------------ fs/xfs/xfs_inode.c | 72 ------------------------------ fs/xfs/xfs_inode.h | 2 - fs/xfs/xfs_mount.h | 1 - 4 files changed, 86 insertions(+), 117 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 27ec2417b85..f58e5e000fc 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -450,65 +450,109 @@ xfs_iput_new( IRELE(ip); } - /* - * This routine embodies the part of the reclaim code that pulls - * the inode from the inode hash table and the mount structure's - * inode list. - * This should only be called from xfs_reclaim(). + * This is called free all the memory associated with an inode. + * It must free the inode itself and any buffers allocated for + * if_extents/if_data and if_broot. It must also free the lock + * associated with the inode. + * + * Note: because we don't initialise everything on reallocation out + * of the zone, we must ensure we nullify everything correctly before + * freeing the structure. */ void -xfs_ireclaim(xfs_inode_t *ip) +xfs_ireclaim( + struct xfs_inode *ip) { - /* - * Remove from old hash list and mount list. - */ - XFS_STATS_INC(xs_ig_reclaims); + struct xfs_mount *mp = ip->i_mount; + struct xfs_perag *pag; - xfs_iextract(ip); + XFS_STATS_INC(xs_ig_reclaims); /* - * Here we do a spurious inode lock in order to coordinate with inode - * cache radix tree lookups. This is because the lookup can reference - * the inodes in the cache without taking references. We make that OK - * here by ensuring that we wait until the inode is unlocked after the - * lookup before we go ahead and free it. We get both the ilock and - * the iolock because the code may need to drop the ilock one but will - * still hold the iolock. + * Remove the inode from the per-AG radix tree. It doesn't matter + * if it was never added to it because radix_tree_delete can deal + * with that case just fine. */ - xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); + pag = xfs_get_perag(mp, ip->i_ino); + write_lock(&pag->pag_ici_lock); + radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino)); + write_unlock(&pag->pag_ici_lock); + xfs_put_perag(mp, pag); /* - * Release dquots (and their references) if any. An inode may escape - * xfs_inactive and get here via vn_alloc->vn_reclaim path. + * Here we do an (almost) spurious inode lock in order to coordinate + * with inode cache radix tree lookups. This is because the lookup + * can reference the inodes in the cache without taking references. + * + * We make that OK here by ensuring that we wait until the inode is + * unlocked after the lookup before we go ahead and free it. We get + * both the ilock and the iolock because the code may need to drop the + * ilock one but will still hold the iolock. */ - XFS_QM_DQDETACH(ip->i_mount, ip); - + xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); /* - * Free all memory associated with the inode. + * Release dquots (and their references) if any. */ + XFS_QM_DQDETACH(ip->i_mount, ip); xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); - xfs_idestroy(ip); -} -/* - * This routine removes an about-to-be-destroyed inode from - * all of the lists in which it is located with the exception - * of the behavior chain. - */ -void -xfs_iextract( - xfs_inode_t *ip) -{ - xfs_mount_t *mp = ip->i_mount; - xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino); + switch (ip->i_d.di_mode & S_IFMT) { + case S_IFREG: + case S_IFDIR: + case S_IFLNK: + xfs_idestroy_fork(ip, XFS_DATA_FORK); + break; + } - write_lock(&pag->pag_ici_lock); - radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino)); - write_unlock(&pag->pag_ici_lock); - xfs_put_perag(mp, pag); + if (ip->i_afp) + xfs_idestroy_fork(ip, XFS_ATTR_FORK); - mp->m_ireclaims++; +#ifdef XFS_INODE_TRACE + ktrace_free(ip->i_trace); +#endif +#ifdef XFS_BMAP_TRACE + ktrace_free(ip->i_xtrace); +#endif +#ifdef XFS_BTREE_TRACE + ktrace_free(ip->i_btrace); +#endif +#ifdef XFS_RW_TRACE + ktrace_free(ip->i_rwtrace); +#endif +#ifdef XFS_ILOCK_TRACE + ktrace_free(ip->i_lock_trace); +#endif +#ifdef XFS_DIR2_TRACE + ktrace_free(ip->i_dir_trace); +#endif + if (ip->i_itemp) { + /* + * Only if we are shutting down the fs will we see an + * inode still in the AIL. If it is there, we should remove + * it to prevent a use-after-free from occurring. + */ + xfs_log_item_t *lip = &ip->i_itemp->ili_item; + struct xfs_ail *ailp = lip->li_ailp; + + ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || + XFS_FORCED_SHUTDOWN(ip->i_mount)); + if (lip->li_flags & XFS_LI_IN_AIL) { + spin_lock(&ailp->xa_lock); + if (lip->li_flags & XFS_LI_IN_AIL) + xfs_trans_ail_delete(ailp, lip); + else + spin_unlock(&ailp->xa_lock); + } + xfs_inode_item_destroy(ip); + ip->i_itemp = NULL; + } + /* asserts to verify all state is correct here */ + ASSERT(atomic_read(&ip->i_iocount) == 0); + ASSERT(atomic_read(&ip->i_pincount) == 0); + ASSERT(!spin_is_locked(&ip->i_flags_lock)); + ASSERT(completion_done(&ip->i_flush)); + kmem_zone_free(xfs_inode_zone, ip); } /* diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 872191b4678..4e664f57860 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2449,78 +2449,6 @@ xfs_idestroy_fork( } } -/* - * This is called free all the memory associated with an inode. - * It must free the inode itself and any buffers allocated for - * if_extents/if_data and if_broot. It must also free the lock - * associated with the inode. - * - * Note: because we don't initialise everything on reallocation out - * of the zone, we must ensure we nullify everything correctly before - * freeing the structure. - */ -void -xfs_idestroy( - xfs_inode_t *ip) -{ - switch (ip->i_d.di_mode & S_IFMT) { - case S_IFREG: - case S_IFDIR: - case S_IFLNK: - xfs_idestroy_fork(ip, XFS_DATA_FORK); - break; - } - if (ip->i_afp) - xfs_idestroy_fork(ip, XFS_ATTR_FORK); - -#ifdef XFS_INODE_TRACE - ktrace_free(ip->i_trace); -#endif -#ifdef XFS_BMAP_TRACE - ktrace_free(ip->i_xtrace); -#endif -#ifdef XFS_BTREE_TRACE - ktrace_free(ip->i_btrace); -#endif -#ifdef XFS_RW_TRACE - ktrace_free(ip->i_rwtrace); -#endif -#ifdef XFS_ILOCK_TRACE - ktrace_free(ip->i_lock_trace); -#endif -#ifdef XFS_DIR2_TRACE - ktrace_free(ip->i_dir_trace); -#endif - if (ip->i_itemp) { - /* - * Only if we are shutting down the fs will we see an - * inode still in the AIL. If it is there, we should remove - * it to prevent a use-after-free from occurring. - */ - xfs_log_item_t *lip = &ip->i_itemp->ili_item; - struct xfs_ail *ailp = lip->li_ailp; - - ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || - XFS_FORCED_SHUTDOWN(ip->i_mount)); - if (lip->li_flags & XFS_LI_IN_AIL) { - spin_lock(&ailp->xa_lock); - if (lip->li_flags & XFS_LI_IN_AIL) - xfs_trans_ail_delete(ailp, lip); - else - spin_unlock(&ailp->xa_lock); - } - xfs_inode_item_destroy(ip); - ip->i_itemp = NULL; - } - /* asserts to verify all state is correct here */ - ASSERT(atomic_read(&ip->i_iocount) == 0); - ASSERT(atomic_read(&ip->i_pincount) == 0); - ASSERT(!spin_is_locked(&ip->i_flags_lock)); - ASSERT(completion_done(&ip->i_flush)); - kmem_zone_free(xfs_inode_zone, ip); -} - - /* * Increment the pin count of the given buffer. * This value is protected by ipinlock spinlock in the mount structure. diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index c38d4509c66..03ae96bdae7 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -529,8 +529,6 @@ int xfs_itruncate_finish(struct xfs_trans **, xfs_inode_t *, xfs_fsize_t, int, int); int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); -void xfs_idestroy(xfs_inode_t *); -void xfs_iextract(xfs_inode_t *); void xfs_iext_realloc(xfs_inode_t *, int, int); void xfs_ipin(xfs_inode_t *); void xfs_iunpin(xfs_inode_t *); diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 4fce22a8c35..38252554b1c 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -241,7 +241,6 @@ typedef struct xfs_mount { xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ spinlock_t m_agirotor_lock;/* .. and lock protecting it */ xfs_agnumber_t m_maxagi; /* highest inode alloc group */ - uint m_ireclaims; /* count of calls to reclaim*/ uint m_readio_log; /* min read size log bytes */ uint m_readio_blocks; /* min read size blocks */ uint m_writeio_log; /* min write size log bytes */ -- cgit v1.2.3-70-g09d2 From 5d765b976c3a41faf9a73718fb8cc5833990a8ef Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:26 +0100 Subject: kill xfs_buf_iostart xfs_buf_iostart is a "shared" helper for xfs_buf_read_flags, xfs_bawrite, and xfs_bdwrite - except that there isn't much shared code but rather special cases for each caller. So remove this function and move the functionality to the caller. xfs_bawrite and xfs_bdwrite are now big enough to be moved out of line and the xfs_buf_read_flags is moved into a new helper called _xfs_buf_read. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_buf.c | 84 ++++++++++++++++++++++++++-------------------- fs/xfs/linux-2.6/xfs_buf.h | 22 ++---------- 2 files changed, 50 insertions(+), 56 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 36d5fcd3f59..cd89c56715a 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -630,6 +630,29 @@ xfs_buf_get_flags( return NULL; } +STATIC int +_xfs_buf_read( + xfs_buf_t *bp, + xfs_buf_flags_t flags) +{ + int status; + + XB_TRACE(bp, "_xfs_buf_read", (unsigned long)flags); + + ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE))); + ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); + + bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \ + XBF_READ_AHEAD | _XBF_RUN_QUEUES); + bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \ + XBF_READ_AHEAD | _XBF_RUN_QUEUES); + + status = xfs_buf_iorequest(bp); + if (!status && !(flags & XBF_ASYNC)) + status = xfs_buf_iowait(bp); + return status; +} + xfs_buf_t * xfs_buf_read_flags( xfs_buftarg_t *target, @@ -646,7 +669,7 @@ xfs_buf_read_flags( if (!XFS_BUF_ISDONE(bp)) { XB_TRACE(bp, "read", (unsigned long)flags); XFS_STATS_INC(xb_get_read); - xfs_buf_iostart(bp, flags); + _xfs_buf_read(bp, flags); } else if (flags & XBF_ASYNC) { XB_TRACE(bp, "read_async", (unsigned long)flags); /* @@ -1048,50 +1071,39 @@ xfs_buf_ioerror( XB_TRACE(bp, "ioerror", (unsigned long)error); } -/* - * Initiate I/O on a buffer, based on the flags supplied. - * The b_iodone routine in the buffer supplied will only be called - * when all of the subsidiary I/O requests, if any, have been completed. - */ int -xfs_buf_iostart( - xfs_buf_t *bp, - xfs_buf_flags_t flags) +xfs_bawrite( + void *mp, + struct xfs_buf *bp) { - int status = 0; + XB_TRACE(bp, "bawrite", 0); - XB_TRACE(bp, "iostart", (unsigned long)flags); + ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); - if (flags & XBF_DELWRI) { - bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC); - bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC); - xfs_buf_delwri_queue(bp, 1); - return 0; - } + xfs_buf_delwri_dequeue(bp); - bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \ - XBF_READ_AHEAD | _XBF_RUN_QUEUES); - bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \ - XBF_READ_AHEAD | _XBF_RUN_QUEUES); + bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD); + bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES); + + bp->b_fspriv3 = mp; + bp->b_strat = xfs_bdstrat_cb; + return xfs_bdstrat_cb(bp); +} - BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL); +void +xfs_bdwrite( + void *mp, + struct xfs_buf *bp) +{ + XB_TRACE(bp, "bdwrite", 0); - /* For writes allow an alternate strategy routine to precede - * the actual I/O request (which may not be issued at all in - * a shutdown situation, for example). - */ - status = (flags & XBF_WRITE) ? - xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp); + bp->b_strat = xfs_bdstrat_cb; + bp->b_fspriv3 = mp; - /* Wait for I/O if we are not an async request. - * Note: async I/O request completion will release the buffer, - * and that can already be done by this point. So using the - * buffer pointer from here on, after async I/O, is invalid. - */ - if (!status && !(flags & XBF_ASYNC)) - status = xfs_buf_iowait(bp); + bp->b_flags &= ~XBF_READ; + bp->b_flags |= (XBF_DELWRI | XBF_ASYNC); - return status; + xfs_buf_delwri_queue(bp, 1); } STATIC_INLINE void diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 456519a088c..0e2aa16f5e4 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -214,9 +214,10 @@ extern void xfs_buf_lock(xfs_buf_t *); extern void xfs_buf_unlock(xfs_buf_t *); /* Buffer Read and Write Routines */ +extern int xfs_bawrite(void *mp, xfs_buf_t *bp); +extern void xfs_bdwrite(void *mp, xfs_buf_t *bp); extern void xfs_buf_ioend(xfs_buf_t *, int); extern void xfs_buf_ioerror(xfs_buf_t *, int); -extern int xfs_buf_iostart(xfs_buf_t *, xfs_buf_flags_t); extern int xfs_buf_iorequest(xfs_buf_t *); extern int xfs_buf_iowait(xfs_buf_t *); extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t, @@ -366,14 +367,6 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); #define XFS_BUF_TARGET(bp) ((bp)->b_target) #define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target) -static inline int xfs_bawrite(void *mp, xfs_buf_t *bp) -{ - bp->b_fspriv3 = mp; - bp->b_strat = xfs_bdstrat_cb; - xfs_buf_delwri_dequeue(bp); - return xfs_buf_iostart(bp, XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES); -} - static inline void xfs_buf_relse(xfs_buf_t *bp) { if (!bp->b_relse) @@ -414,17 +407,6 @@ static inline int XFS_bwrite(xfs_buf_t *bp) return error; } -/* - * No error can be returned from xfs_buf_iostart for delwri - * buffers as they are queued and no I/O is issued. - */ -static inline void xfs_bdwrite(void *mp, xfs_buf_t *bp) -{ - bp->b_strat = xfs_bdstrat_cb; - bp->b_fspriv3 = mp; - (void)xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC); -} - #define XFS_bdstrat(bp) xfs_buf_iorequest(bp) #define xfs_iowait(bp) xfs_buf_iowait(bp) -- cgit v1.2.3-70-g09d2 From d9424b3c4a1e96f87c6cfd4d8dd2f8d9bbb4dcc5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:27 +0100 Subject: stop using igrab in xfs_vn_link ->link is guranteed to get an already reference inode passed so we can do a simple increment of i_count instead of using igrab and thus avoid banging on the global inode_lock. This is what most filesystems already do. Also move the increment after the call to xfs_link to simplify error handling. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_iops.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 2903faf6a26..76b570dd1ab 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -367,21 +367,18 @@ xfs_vn_link( struct inode *dir, struct dentry *dentry) { - struct inode *inode; /* inode of guy being linked to */ + struct inode *inode = old_dentry->d_inode; struct xfs_name name; int error; - inode = old_dentry->d_inode; xfs_dentry_to_name(&name, dentry); - igrab(inode); error = xfs_link(XFS_I(dir), XFS_I(inode), &name); - if (unlikely(error)) { - iput(inode); + if (unlikely(error)) return -error; - } xfs_iflags_set(XFS_I(dir), XFS_IMODIFIED); + atomic_inc(&inode->i_count); d_instantiate(dentry, inode); return 0; } -- cgit v1.2.3-70-g09d2 From 39e2defe73106ca2e1c85e5286038a0a13f49513 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:28 +0100 Subject: reduce l_icloglock roundtrips All but one caller of xlog_state_want_sync drop and re-acquire l_icloglock around the call to it, just so that xlog_state_want_sync can acquire and drop it. Move all lock operation out of l_icloglock and assert that the lock is held when it is called. Note that it would make sense to extende this scheme to xlog_state_release_iclog, but the locking in there is more complicated and we'd like to keep the atomic_dec_and_lock optmization for those callers not having l_icloglock yet. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_log.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index aadaa1472f6..f4726f702a9 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -729,8 +729,8 @@ xfs_log_unmount_write(xfs_mount_t *mp) spin_lock(&log->l_icloglock); iclog = log->l_iclog; atomic_inc(&iclog->ic_refcnt); - spin_unlock(&log->l_icloglock); xlog_state_want_sync(log, iclog); + spin_unlock(&log->l_icloglock); error = xlog_state_release_iclog(log, iclog); spin_lock(&log->l_icloglock); @@ -767,9 +767,9 @@ xfs_log_unmount_write(xfs_mount_t *mp) spin_lock(&log->l_icloglock); iclog = log->l_iclog; atomic_inc(&iclog->ic_refcnt); - spin_unlock(&log->l_icloglock); xlog_state_want_sync(log, iclog); + spin_unlock(&log->l_icloglock); error = xlog_state_release_iclog(log, iclog); spin_lock(&log->l_icloglock); @@ -1984,7 +1984,9 @@ xlog_write(xfs_mount_t * mp, if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); record_cnt = data_cnt = 0; + spin_lock(&log->l_icloglock); xlog_state_want_sync(log, iclog); + spin_unlock(&log->l_icloglock); if (commit_iclog) { ASSERT(flags & XLOG_COMMIT_TRANS); *commit_iclog = iclog; @@ -3193,7 +3195,7 @@ try_again: STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) { - spin_lock(&log->l_icloglock); + ASSERT(spin_is_locked(&log->l_icloglock)); if (iclog->ic_state == XLOG_STATE_ACTIVE) { xlog_state_switch_iclogs(log, iclog, 0); @@ -3201,10 +3203,7 @@ xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) ASSERT(iclog->ic_state & (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); } - - spin_unlock(&log->l_icloglock); -} /* xlog_state_want_sync */ - +} /***************************************************************************** -- cgit v1.2.3-70-g09d2 From 63ad2a5c4cf37e3242142eee8a8dcd4a8515302e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:29 +0100 Subject: remove dead code from sv_t implementation Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/sv.h | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/sv.h b/fs/xfs/linux-2.6/sv.h index 351a8f454bd..4dfc7c37081 100644 --- a/fs/xfs/linux-2.6/sv.h +++ b/fs/xfs/linux-2.6/sv.h @@ -32,23 +32,15 @@ typedef struct sv_s { wait_queue_head_t waiters; } sv_t; -#define SV_FIFO 0x0 /* sv_t is FIFO type */ -#define SV_LIFO 0x2 /* sv_t is LIFO type */ -#define SV_PRIO 0x4 /* sv_t is PRIO type */ -#define SV_KEYED 0x6 /* sv_t is KEYED type */ -#define SV_DEFAULT SV_FIFO - - -static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state, - unsigned long timeout) +static inline void _sv_wait(sv_t *sv, spinlock_t *lock) { DECLARE_WAITQUEUE(wait, current); add_wait_queue_exclusive(&sv->waiters, &wait); - __set_current_state(state); + __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock(lock); - schedule_timeout(timeout); + schedule(); remove_wait_queue(&sv->waiters, &wait); } @@ -58,13 +50,7 @@ static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state, #define sv_destroy(sv) \ /*NOTHING*/ #define sv_wait(sv, pri, lock, s) \ - _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT) -#define sv_wait_sig(sv, pri, lock, s) \ - _sv_wait(sv, lock, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT) -#define sv_timedwait(sv, pri, lock, s, svf, ts, rts) \ - _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, timespec_to_jiffies(ts)) -#define sv_timedwait_sig(sv, pri, lock, s, svf, ts, rts) \ - _sv_wait(sv, lock, TASK_INTERRUPTIBLE, timespec_to_jiffies(ts)) + _sv_wait(sv, lock) #define sv_signal(sv) \ wake_up(&(sv)->waiters) #define sv_broadcast(sv) \ -- cgit v1.2.3-70-g09d2 From df6771bde14551eceeacf331666a92735e0773ac Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:30 +0100 Subject: kill dead quota flags Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_quota.h | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h index 12c4ec775af..48965ecaa15 100644 --- a/fs/xfs/xfs_quota.h +++ b/fs/xfs/xfs_quota.h @@ -84,11 +84,9 @@ typedef struct xfs_dqblk { #define XFS_DQ_USER 0x0001 /* a user quota */ #define XFS_DQ_PROJ 0x0002 /* project quota */ #define XFS_DQ_GROUP 0x0004 /* a group quota */ -#define XFS_DQ_FLOCKED 0x0008 /* flush lock taken */ -#define XFS_DQ_DIRTY 0x0010 /* dquot is dirty */ -#define XFS_DQ_WANT 0x0020 /* for lookup/reclaim race */ -#define XFS_DQ_INACTIVE 0x0040 /* dq off mplist & hashlist */ -#define XFS_DQ_MARKER 0x0080 /* sentinel */ +#define XFS_DQ_DIRTY 0x0008 /* dquot is dirty */ +#define XFS_DQ_WANT 0x0010 /* for lookup/reclaim race */ +#define XFS_DQ_INACTIVE 0x0020 /* dq off mplist & hashlist */ #define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP) -- cgit v1.2.3-70-g09d2 From 5efcbb853bc2f051d720a191268f8dd901fea9c2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:31 +0100 Subject: cleanup xfs_sb.h feature flag helpers The various inlines in xfs_sb.h that deal with the superblock version and fature flags were converted from macros a while ago, and this show by the odd coding style full of useless braces and backslashes and the avoidance of conditionals. Clean these up to look like normal C code. Signed-off-by: Christoph Hellwig Reviewed-by: Donald Douwsma Signed-off-by: Niv Sardi --- fs/xfs/xfs_sb.h | 166 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 86 insertions(+), 80 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h index 3f8cf1587f4..e123c1499b4 100644 --- a/fs/xfs/xfs_sb.h +++ b/fs/xfs/xfs_sb.h @@ -296,30 +296,34 @@ typedef enum { #define XFS_SB_VERSION_NUM(sbp) ((sbp)->sb_versionnum & XFS_SB_VERSION_NUMBITS) -#ifdef __KERNEL__ static inline int xfs_sb_good_version(xfs_sb_t *sbp) { - return (((sbp->sb_versionnum >= XFS_SB_VERSION_1) && \ - (sbp->sb_versionnum <= XFS_SB_VERSION_3)) || \ - ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ - !((sbp->sb_versionnum & ~XFS_SB_VERSION_OKREALBITS) || \ - ((sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) && \ - (sbp->sb_features2 & ~XFS_SB_VERSION2_OKREALBITS))) && \ - (sbp->sb_shared_vn <= XFS_SB_MAX_SHARED_VN))); -} + /* We always support version 1-3 */ + if (sbp->sb_versionnum >= XFS_SB_VERSION_1 && + sbp->sb_versionnum <= XFS_SB_VERSION_3) + return 1; + + /* We support version 4 if all feature bits are supported */ + if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) { + if ((sbp->sb_versionnum & ~XFS_SB_VERSION_OKREALBITS) || + ((sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) && + (sbp->sb_features2 & ~XFS_SB_VERSION2_OKREALBITS))) + return 0; + +#ifdef __KERNEL__ + if (sbp->sb_shared_vn > XFS_SB_MAX_SHARED_VN) + return 0; #else -static inline int xfs_sb_good_version(xfs_sb_t *sbp) -{ - return (((sbp->sb_versionnum >= XFS_SB_VERSION_1) && \ - (sbp->sb_versionnum <= XFS_SB_VERSION_3)) || \ - ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ - !((sbp->sb_versionnum & ~XFS_SB_VERSION_OKREALBITS) || \ - ((sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) && \ - (sbp->sb_features2 & ~XFS_SB_VERSION2_OKREALBITS))) && \ - (!(sbp->sb_versionnum & XFS_SB_VERSION_SHAREDBIT) || \ - (sbp->sb_shared_vn <= XFS_SB_MAX_SHARED_VN)))); + if ((sbp->sb_versionnum & XFS_SB_VERSION_SHAREDBIT) && + sbp->sb_shared_vn > XFS_SB_MAX_SHARED_VN) + return 0; +#endif + + return 1; + } + + return 0; } -#endif /* __KERNEL__ */ /* * Detect a mismatched features2 field. Older kernels read/wrote @@ -332,123 +336,127 @@ static inline int xfs_sb_has_mismatched_features2(xfs_sb_t *sbp) static inline unsigned xfs_sb_version_tonew(unsigned v) { - return ((((v) == XFS_SB_VERSION_1) ? \ - 0 : \ - (((v) == XFS_SB_VERSION_2) ? \ - XFS_SB_VERSION_ATTRBIT : \ - (XFS_SB_VERSION_ATTRBIT | XFS_SB_VERSION_NLINKBIT))) | \ - XFS_SB_VERSION_4); + if (v == XFS_SB_VERSION_1) + return XFS_SB_VERSION_4; + + if (v == XFS_SB_VERSION_2) + return XFS_SB_VERSION_4 | XFS_SB_VERSION_ATTRBIT; + + return XFS_SB_VERSION_4 | XFS_SB_VERSION_ATTRBIT | + XFS_SB_VERSION_NLINKBIT; } static inline unsigned xfs_sb_version_toold(unsigned v) { - return (((v) & (XFS_SB_VERSION_QUOTABIT | XFS_SB_VERSION_ALIGNBIT)) ? \ - 0 : \ - (((v) & XFS_SB_VERSION_NLINKBIT) ? \ - XFS_SB_VERSION_3 : \ - (((v) & XFS_SB_VERSION_ATTRBIT) ? \ - XFS_SB_VERSION_2 : \ - XFS_SB_VERSION_1))); + if (v & (XFS_SB_VERSION_QUOTABIT | XFS_SB_VERSION_ALIGNBIT)) + return 0; + if (v & XFS_SB_VERSION_NLINKBIT) + return XFS_SB_VERSION_3; + if (v & XFS_SB_VERSION_ATTRBIT) + return XFS_SB_VERSION_2; + return XFS_SB_VERSION_1; } static inline int xfs_sb_version_hasattr(xfs_sb_t *sbp) { - return ((sbp)->sb_versionnum == XFS_SB_VERSION_2) || \ - ((sbp)->sb_versionnum == XFS_SB_VERSION_3) || \ - ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ - ((sbp)->sb_versionnum & XFS_SB_VERSION_ATTRBIT)); + return sbp->sb_versionnum == XFS_SB_VERSION_2 || + sbp->sb_versionnum == XFS_SB_VERSION_3 || + (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 && + (sbp->sb_versionnum & XFS_SB_VERSION_ATTRBIT)); } static inline void xfs_sb_version_addattr(xfs_sb_t *sbp) { - (sbp)->sb_versionnum = (((sbp)->sb_versionnum == XFS_SB_VERSION_1) ? \ - XFS_SB_VERSION_2 : \ - ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) ? \ - ((sbp)->sb_versionnum | XFS_SB_VERSION_ATTRBIT) : \ - (XFS_SB_VERSION_4 | XFS_SB_VERSION_ATTRBIT))); + if (sbp->sb_versionnum == XFS_SB_VERSION_1) + sbp->sb_versionnum = XFS_SB_VERSION_2; + else if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) + sbp->sb_versionnum |= XFS_SB_VERSION_ATTRBIT; + else + sbp->sb_versionnum = XFS_SB_VERSION_4 | XFS_SB_VERSION_ATTRBIT; } static inline int xfs_sb_version_hasnlink(xfs_sb_t *sbp) { - return ((sbp)->sb_versionnum == XFS_SB_VERSION_3) || \ - ((XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ - ((sbp)->sb_versionnum & XFS_SB_VERSION_NLINKBIT)); + return sbp->sb_versionnum == XFS_SB_VERSION_3 || + (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 && + (sbp->sb_versionnum & XFS_SB_VERSION_NLINKBIT)); } static inline void xfs_sb_version_addnlink(xfs_sb_t *sbp) { - (sbp)->sb_versionnum = ((sbp)->sb_versionnum <= XFS_SB_VERSION_2 ? \ - XFS_SB_VERSION_3 : \ - ((sbp)->sb_versionnum | XFS_SB_VERSION_NLINKBIT)); + if (sbp->sb_versionnum <= XFS_SB_VERSION_2) + sbp->sb_versionnum = XFS_SB_VERSION_3; + else + sbp->sb_versionnum |= XFS_SB_VERSION_NLINKBIT; } static inline int xfs_sb_version_hasquota(xfs_sb_t *sbp) { - return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ - ((sbp)->sb_versionnum & XFS_SB_VERSION_QUOTABIT); + return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 && + (sbp->sb_versionnum & XFS_SB_VERSION_QUOTABIT); } static inline void xfs_sb_version_addquota(xfs_sb_t *sbp) { - (sbp)->sb_versionnum = \ - (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 ? \ - ((sbp)->sb_versionnum | XFS_SB_VERSION_QUOTABIT) : \ - (xfs_sb_version_tonew((sbp)->sb_versionnum) | \ - XFS_SB_VERSION_QUOTABIT)); + if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) + sbp->sb_versionnum |= XFS_SB_VERSION_QUOTABIT; + else + sbp->sb_versionnum = xfs_sb_version_tonew(sbp->sb_versionnum) | + XFS_SB_VERSION_QUOTABIT; } static inline int xfs_sb_version_hasalign(xfs_sb_t *sbp) { - return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ - ((sbp)->sb_versionnum & XFS_SB_VERSION_ALIGNBIT); + return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 && + (sbp->sb_versionnum & XFS_SB_VERSION_ALIGNBIT); } static inline int xfs_sb_version_hasdalign(xfs_sb_t *sbp) { - return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ - ((sbp)->sb_versionnum & XFS_SB_VERSION_DALIGNBIT); + return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 && + (sbp->sb_versionnum & XFS_SB_VERSION_DALIGNBIT); } static inline int xfs_sb_version_hasshared(xfs_sb_t *sbp) { - return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ - ((sbp)->sb_versionnum & XFS_SB_VERSION_SHAREDBIT); + return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 && + (sbp->sb_versionnum & XFS_SB_VERSION_SHAREDBIT); } static inline int xfs_sb_version_hasdirv2(xfs_sb_t *sbp) { - return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ - ((sbp)->sb_versionnum & XFS_SB_VERSION_DIRV2BIT); + return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 && + (sbp->sb_versionnum & XFS_SB_VERSION_DIRV2BIT); } static inline int xfs_sb_version_haslogv2(xfs_sb_t *sbp) { - return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ - ((sbp)->sb_versionnum & XFS_SB_VERSION_LOGV2BIT); + return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 && + (sbp->sb_versionnum & XFS_SB_VERSION_LOGV2BIT); } static inline int xfs_sb_version_hasextflgbit(xfs_sb_t *sbp) { - return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ - ((sbp)->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT); + return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 && + (sbp->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT); } static inline int xfs_sb_version_hassector(xfs_sb_t *sbp) { - return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ - ((sbp)->sb_versionnum & XFS_SB_VERSION_SECTORBIT); + return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 && + (sbp->sb_versionnum & XFS_SB_VERSION_SECTORBIT); } static inline int xfs_sb_version_hasasciici(xfs_sb_t *sbp) { - return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ + return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 && (sbp->sb_versionnum & XFS_SB_VERSION_BORGBIT); } static inline int xfs_sb_version_hasmorebits(xfs_sb_t *sbp) { - return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \ - ((sbp)->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT); + return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4 && + (sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT); } /* @@ -463,22 +471,20 @@ static inline int xfs_sb_version_hasmorebits(xfs_sb_t *sbp) static inline int xfs_sb_version_haslazysbcount(xfs_sb_t *sbp) { - return (xfs_sb_version_hasmorebits(sbp) && \ - ((sbp)->sb_features2 & XFS_SB_VERSION2_LAZYSBCOUNTBIT)); + return xfs_sb_version_hasmorebits(sbp) && + (sbp->sb_features2 & XFS_SB_VERSION2_LAZYSBCOUNTBIT); } static inline int xfs_sb_version_hasattr2(xfs_sb_t *sbp) { - return (xfs_sb_version_hasmorebits(sbp)) && \ - ((sbp)->sb_features2 & XFS_SB_VERSION2_ATTR2BIT); + return xfs_sb_version_hasmorebits(sbp) && + (sbp->sb_features2 & XFS_SB_VERSION2_ATTR2BIT); } static inline void xfs_sb_version_addattr2(xfs_sb_t *sbp) { - ((sbp)->sb_versionnum = \ - ((sbp)->sb_versionnum | XFS_SB_VERSION_MOREBITSBIT), \ - ((sbp)->sb_features2 = \ - ((sbp)->sb_features2 | XFS_SB_VERSION2_ATTR2BIT))); + sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT; + sbp->sb_features2 |= XFS_SB_VERSION2_ATTR2BIT; } static inline void xfs_sb_version_removeattr2(xfs_sb_t *sbp) -- cgit v1.2.3-70-g09d2 From 6bd16ff27060819d16b3e7abe59b6644b349aea3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:32 +0100 Subject: kill dead inode flags There are a few inode flags around that aren't used anywhere, so remove them. Also update xfsidbg to display all used inode flags correctly. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_aops.c | 11 +++-------- fs/xfs/linux-2.6/xfs_file.c | 10 ++-------- fs/xfs/linux-2.6/xfs_iops.c | 5 ----- fs/xfs/linux-2.6/xfs_super.c | 1 - fs/xfs/xfs_iget.c | 1 - fs/xfs/xfs_inode.h | 17 ++++++----------- 6 files changed, 11 insertions(+), 34 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 8fbc97df360..bb224d07e1e 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -317,14 +317,9 @@ xfs_map_blocks( xfs_iomap_t *mapp, int flags) { - xfs_inode_t *ip = XFS_I(inode); - int error, nmaps = 1; - - error = xfs_iomap(ip, offset, count, - flags, mapp, &nmaps); - if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE))) - xfs_iflags_set(ip, XFS_IMODIFIED); - return -error; + int nmaps = 1; + + return -xfs_iomap(XFS_I(inode), offset, count, flags, mapp, &nmaps); } STATIC_INLINE int diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index d377db05d80..f999d20a429 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -281,11 +281,8 @@ xfs_file_ioctl( unsigned int cmd, unsigned long p) { - int error; struct inode *inode = filp->f_path.dentry->d_inode; - error = xfs_ioctl(XFS_I(inode), filp, 0, cmd, (void __user *)p); - xfs_iflags_set(XFS_I(inode), XFS_IMODIFIED); /* NOTE: some of the ioctl's return positive #'s as a * byte count indicating success, such as @@ -293,7 +290,7 @@ xfs_file_ioctl( * like most other routines. This means true * errors need to be returned as a negative value. */ - return error; + return xfs_ioctl(XFS_I(inode), filp, 0, cmd, (void __user *)p); } STATIC long @@ -302,11 +299,8 @@ xfs_file_ioctl_invis( unsigned int cmd, unsigned long p) { - int error; struct inode *inode = filp->f_path.dentry->d_inode; - error = xfs_ioctl(XFS_I(inode), filp, IO_INVIS, cmd, (void __user *)p); - xfs_iflags_set(XFS_I(inode), XFS_IMODIFIED); /* NOTE: some of the ioctl's return positive #'s as a * byte count indicating success, such as @@ -314,7 +308,7 @@ xfs_file_ioctl_invis( * like most other routines. This means true * errors need to be returned as a negative value. */ - return error; + return xfs_ioctl(XFS_I(inode), filp, IO_INVIS, cmd, (void __user *)p); } /* diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 76b570dd1ab..7aa53fefc67 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -159,8 +159,6 @@ xfs_init_security( } error = xfs_attr_set(ip, name, value, length, ATTR_SECURE); - if (!error) - xfs_iflags_set(ip, XFS_IMODIFIED); kfree(name); kfree(value); @@ -261,7 +259,6 @@ xfs_vn_mknod( error = _ACL_INHERIT(inode, mode, default_acl); if (unlikely(error)) goto out_cleanup_inode; - xfs_iflags_set(ip, XFS_IMODIFIED); _ACL_FREE(default_acl); } @@ -377,7 +374,6 @@ xfs_vn_link( if (unlikely(error)) return -error; - xfs_iflags_set(XFS_I(dir), XFS_IMODIFIED); atomic_inc(&inode->i_count); d_instantiate(dentry, inode); return 0; @@ -888,7 +884,6 @@ xfs_setup_inode( inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; xfs_diflags_to_iflags(inode, ip); - xfs_iflags_clear(ip, XFS_IMODIFIED); switch (inode->i_mode & S_IFMT) { case S_IFREG: diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 5389f077874..37f2d11be4a 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1025,7 +1025,6 @@ xfs_fs_clear_inode( XFS_STATS_DEC(vn_active); xfs_inactive(ip); - xfs_iflags_clear(ip, XFS_IMODIFIED); } STATIC void diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index f58e5e000fc..82eb8ed5633 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -362,7 +362,6 @@ again: } xfs_put_perag(mp, pag); - xfs_iflags_set(ip, XFS_IMODIFIED); *ipp = ip; ASSERT(ip->i_df.if_ext_max == diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 03ae96bdae7..ddac1b806c0 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -403,17 +403,12 @@ static inline void xfs_ifunlock(xfs_inode_t *ip) /* * In-core inode flags. */ -#define XFS_IGRIO 0x0001 /* inode used for guaranteed rate i/o */ -#define XFS_IUIOSZ 0x0002 /* inode i/o sizes have been explicitly set */ -#define XFS_IQUIESCE 0x0004 /* we have started quiescing for this inode */ -#define XFS_IRECLAIM 0x0008 /* we have started reclaiming this inode */ -#define XFS_ISTALE 0x0010 /* inode has been staled */ -#define XFS_IRECLAIMABLE 0x0020 /* inode can be reclaimed */ -#define XFS_INEW 0x0040 -#define XFS_IFILESTREAM 0x0080 /* inode is in a filestream directory */ -#define XFS_IMODIFIED 0x0100 /* XFS inode state possibly differs */ - /* to the Linux inode state. */ -#define XFS_ITRUNCATED 0x0200 /* truncated down so flush-on-close */ +#define XFS_IRECLAIM 0x0001 /* we have started reclaiming this inode */ +#define XFS_ISTALE 0x0002 /* inode has been staled */ +#define XFS_IRECLAIMABLE 0x0004 /* inode can be reclaimed */ +#define XFS_INEW 0x0008 /* inode has just been allocated */ +#define XFS_IFILESTREAM 0x0010 /* inode is in a filestream directory */ +#define XFS_ITRUNCATED 0x0020 /* truncated down so flush-on-close */ /* * Flags for inode locking. -- cgit v1.2.3-70-g09d2 From e88f11abe09d14718b82a991db118c5e485aa897 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:33 +0100 Subject: remove unused m_inode_quiesce member from struct xfs_mount Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_mount.h | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 38252554b1c..cfdac80f4fc 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -302,7 +302,6 @@ typedef struct xfs_mount { int m_attr_magicpct;/* 37% of the blocksize */ int m_dir_magicpct; /* 37% of the dir blocksize */ __uint8_t m_mk_sharedro; /* mark shared ro on unmount */ - __uint8_t m_inode_quiesce;/* call quiesce on new inodes. */ __uint8_t m_sectbb_log; /* sectlog - BBSHIFT */ const struct xfs_nameops *m_dirnameops; /* vector of dir name ops */ int m_dirblksize; /* directory block sz--bytes */ -- cgit v1.2.3-70-g09d2 From b56757becf8bc62292263a24a23cf55edb4be55f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:34 +0100 Subject: remove leftovers of shared read-only support We never supported shared read-only filesystems, so remove the dead code left over from IRIX for it. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_super.c | 29 ----------------------------- fs/xfs/xfs_mount.c | 26 -------------------------- fs/xfs/xfs_mount.h | 2 -- 3 files changed, 57 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 37f2d11be4a..c4f788e1119 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1388,35 +1388,6 @@ xfs_finish_flags( return XFS_ERROR(EROFS); } -#if 0 /* shared mounts were never supported on Linux */ - /* - * check for shared mount. - */ - if (ap->flags & XFSMNT_SHARED) { - if (!xfs_sb_version_hasshared(&mp->m_sb)) - return XFS_ERROR(EINVAL); - - /* - * For IRIX 6.5, shared mounts must have the shared - * version bit set, have the persistent readonly - * field set, must be version 0 and can only be mounted - * read-only. - */ - if (!ronly || !(mp->m_sb.sb_flags & XFS_SBF_READONLY) || - (mp->m_sb.sb_shared_vn != 0)) - return XFS_ERROR(EINVAL); - - mp->m_flags |= XFS_MOUNT_SHARED; - - /* - * Shared XFS V0 can't deal with DMI. Return EINVAL. - */ - if (mp->m_sb.sb_shared_vn == 0 && - (mp->m_flags & XFS_MOUNT_DMAPI)) - return XFS_ERROR(EINVAL); - } -#endif - return 0; } diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 3f999b1c038..1672ff56a00 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1352,24 +1352,6 @@ xfs_log_sbcount( return error; } -STATIC void -xfs_mark_shared_ro( - xfs_mount_t *mp, - xfs_buf_t *bp) -{ - xfs_dsb_t *sb = XFS_BUF_TO_SBP(bp); - __uint16_t version; - - if (!(sb->sb_flags & XFS_SBF_READONLY)) - sb->sb_flags |= XFS_SBF_READONLY; - - version = be16_to_cpu(sb->sb_versionnum); - if ((version & XFS_SB_VERSION_NUMBITS) != XFS_SB_VERSION_4 || - !(version & XFS_SB_VERSION_SHAREDBIT)) - version |= XFS_SB_VERSION_SHAREDBIT; - sb->sb_versionnum = cpu_to_be16(version); -} - int xfs_unmountfs_writesb(xfs_mount_t *mp) { @@ -1385,12 +1367,6 @@ xfs_unmountfs_writesb(xfs_mount_t *mp) sbp = xfs_getsb(mp, 0); - /* - * mark shared-readonly if desired - */ - if (mp->m_mk_sharedro) - xfs_mark_shared_ro(mp, sbp); - XFS_BUF_UNDONE(sbp); XFS_BUF_UNREAD(sbp); XFS_BUF_UNDELAYWRITE(sbp); @@ -1402,8 +1378,6 @@ xfs_unmountfs_writesb(xfs_mount_t *mp) if (error) xfs_ioerror_alert("xfs_unmountfs_writesb", mp, sbp, XFS_BUF_ADDR(sbp)); - if (error && mp->m_mk_sharedro) - xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting. Filesystem may not be marked shared readonly"); xfs_buf_relse(sbp); } return error; diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index cfdac80f4fc..2ab2df57b69 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -301,7 +301,6 @@ typedef struct xfs_mount { int m_sinoalign; /* stripe unit inode alignment */ int m_attr_magicpct;/* 37% of the blocksize */ int m_dir_magicpct; /* 37% of the dir blocksize */ - __uint8_t m_mk_sharedro; /* mark shared ro on unmount */ __uint8_t m_sectbb_log; /* sectlog - BBSHIFT */ const struct xfs_nameops *m_dirnameops; /* vector of dir name ops */ int m_dirblksize; /* directory block sz--bytes */ @@ -349,7 +348,6 @@ typedef struct xfs_mount { #define XFS_MOUNT_ATTR2 (1ULL << 8) /* allow use of attr2 format */ #define XFS_MOUNT_GRPID (1ULL << 9) /* group-ID assigned from directory */ #define XFS_MOUNT_NORECOVERY (1ULL << 10) /* no recovery - dirty fs */ -#define XFS_MOUNT_SHARED (1ULL << 11) /* shared mount */ #define XFS_MOUNT_DFLT_IOSIZE (1ULL << 12) /* set default i/o size */ #define XFS_MOUNT_OSYNCISOSYNC (1ULL << 13) /* o_sync is REALLY o_sync */ /* osyncisdsync is now default*/ -- cgit v1.2.3-70-g09d2 From 070c4616ec62fc207e2aeef9d0f28af294c651d0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:35 +0100 Subject: use xfs_trans_ijoin in xfs_trans_iget Use xfs_trans_ijoin in xfs_trans_iget in case we need to join an inode into a transaction instead of opencoding it. Based on a discussion with and an incomplete patch from Niv Sardi. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/xfs_trans_inode.c | 30 +----------------------------- 1 file changed, 1 insertion(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index 2a1c0f071f9..23d276af2e0 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c @@ -85,7 +85,6 @@ xfs_trans_iget( { int error; xfs_inode_t *ip; - xfs_inode_log_item_t *iip; /* * If the transaction pointer is NULL, just call the normal @@ -138,34 +137,7 @@ xfs_trans_iget( } ASSERT(ip != NULL); - /* - * Get a log_item_desc to point at the new item. - */ - if (ip->i_itemp == NULL) - xfs_inode_item_init(ip, mp); - iip = ip->i_itemp; - (void) xfs_trans_add_item(tp, (xfs_log_item_t *)(iip)); - - xfs_trans_inode_broot_debug(ip); - - /* - * If the IO lock has been acquired, mark that in - * the inode log item so we'll know to unlock it - * when the transaction commits. - */ - ASSERT(iip->ili_flags == 0); - if (lock_flags & XFS_IOLOCK_EXCL) { - iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL; - } else if (lock_flags & XFS_IOLOCK_SHARED) { - iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED; - } - - /* - * Initialize i_transp so we can find it with xfs_inode_incore() - * above. - */ - ip->i_transp = tp; - + xfs_trans_ijoin(tp, ip, lock_flags); *ipp = ip; return 0; } -- cgit v1.2.3-70-g09d2 From e57481dc269cd3773b22f53bfb869308780a7bf1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:36 +0100 Subject: no explicit xfs_iflush for special inodes during unmount Currently we explicitly call xfs_iflush on the quota, real-time and root inodes from xfs_unmount_flush. But we just called xfs_sync_inodes with SYNC_ATTR and do an XFS_bflush aka xfs_flush_buftarg to make sure all inodes are on disk already, so there is no need for these special cases. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_vnode.h | 5 ---- fs/xfs/quota/xfs_qm.c | 44 ++++++---------------------- fs/xfs/quota/xfs_qm.h | 2 +- fs/xfs/xfs_mount.h | 2 +- fs/xfs/xfs_vfsops.c | 68 ++++---------------------------------------- 5 files changed, 15 insertions(+), 106 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index fb49e0f42d3..314ca18bc60 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h @@ -65,11 +65,6 @@ extern void vn_iowait(struct xfs_inode *ip); extern void vn_iowake(struct xfs_inode *ip); extern void vn_ioerror(struct xfs_inode *ip, int error, char *f, int l); -static inline int vn_count(struct inode *vp) -{ - return atomic_read(&vp->i_count); -} - #define IHOLD(ip) \ do { \ ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 5b198d15e76..6b13960cf31 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -395,13 +395,10 @@ xfs_qm_mount_quotas( /* * Called from the vfsops layer. */ -int +void xfs_qm_unmount_quotas( xfs_mount_t *mp) { - xfs_inode_t *uqp, *gqp; - int error = 0; - /* * Release the dquots that root inode, et al might be holding, * before we flush quotas and blow away the quotainfo structure. @@ -414,43 +411,18 @@ xfs_qm_unmount_quotas( xfs_qm_dqdetach(mp->m_rsumip); /* - * Flush out the quota inodes. + * Release the quota inodes. */ - uqp = gqp = NULL; if (mp->m_quotainfo) { - if ((uqp = mp->m_quotainfo->qi_uquotaip) != NULL) { - xfs_ilock(uqp, XFS_ILOCK_EXCL); - xfs_iflock(uqp); - error = xfs_iflush(uqp, XFS_IFLUSH_SYNC); - xfs_iunlock(uqp, XFS_ILOCK_EXCL); - if (unlikely(error == EFSCORRUPTED)) { - XFS_ERROR_REPORT("xfs_qm_unmount_quotas(1)", - XFS_ERRLEVEL_LOW, mp); - goto out; - } + if (mp->m_quotainfo->qi_uquotaip) { + IRELE(mp->m_quotainfo->qi_uquotaip); + mp->m_quotainfo->qi_uquotaip = NULL; } - if ((gqp = mp->m_quotainfo->qi_gquotaip) != NULL) { - xfs_ilock(gqp, XFS_ILOCK_EXCL); - xfs_iflock(gqp); - error = xfs_iflush(gqp, XFS_IFLUSH_SYNC); - xfs_iunlock(gqp, XFS_ILOCK_EXCL); - if (unlikely(error == EFSCORRUPTED)) { - XFS_ERROR_REPORT("xfs_qm_unmount_quotas(2)", - XFS_ERRLEVEL_LOW, mp); - goto out; - } + if (mp->m_quotainfo->qi_gquotaip) { + IRELE(mp->m_quotainfo->qi_gquotaip); + mp->m_quotainfo->qi_gquotaip = NULL; } } - if (uqp) { - IRELE(uqp); - mp->m_quotainfo->qi_uquotaip = NULL; - } - if (gqp) { - IRELE(gqp); - mp->m_quotainfo->qi_gquotaip = NULL; - } -out: - return XFS_ERROR(error); } /* diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h index 4f2de977172..ddf09166387 100644 --- a/fs/xfs/quota/xfs_qm.h +++ b/fs/xfs/quota/xfs_qm.h @@ -167,7 +167,7 @@ extern void xfs_qm_destroy_quotainfo(xfs_mount_t *); extern void xfs_qm_mount_quotas(xfs_mount_t *); extern int xfs_qm_quotacheck(xfs_mount_t *); extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *); -extern int xfs_qm_unmount_quotas(xfs_mount_t *); +extern void xfs_qm_unmount_quotas(xfs_mount_t *); extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t); extern int xfs_qm_sync(xfs_mount_t *, int); diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 2ab2df57b69..4f64fd160cc 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -117,7 +117,7 @@ struct xfs_quotainfo; typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *); typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint); -typedef int (*xfs_qmunmount_t)(struct xfs_mount *); +typedef void (*xfs_qmunmount_t)(struct xfs_mount *); typedef void (*xfs_qmdone_t)(struct xfs_mount *); typedef void (*xfs_dqrele_t)(struct xfs_dquot *); typedef int (*xfs_dqattach_t)(struct xfs_inode *, uint); diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index cad3da36fb2..559fb8d5108 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -68,74 +68,16 @@ xfs_unmount_flush( rid of. */ int relocation) /* Called from vfs relocation. */ { - xfs_inode_t *rip = mp->m_rootip; - xfs_inode_t *rbmip; - xfs_inode_t *rsumip = NULL; - int error; - - xfs_ilock(rip, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); - xfs_iflock(rip); - - /* - * Flush out the real time inodes. - */ - if ((rbmip = mp->m_rbmip) != NULL) { - xfs_ilock(rbmip, XFS_ILOCK_EXCL); - xfs_iflock(rbmip); - error = xfs_iflush(rbmip, XFS_IFLUSH_SYNC); - xfs_iunlock(rbmip, XFS_ILOCK_EXCL); - - if (error == EFSCORRUPTED) - goto fscorrupt_out; - - ASSERT(vn_count(VFS_I(rbmip)) == 1); - - rsumip = mp->m_rsumip; - xfs_ilock(rsumip, XFS_ILOCK_EXCL); - xfs_iflock(rsumip); - error = xfs_iflush(rsumip, XFS_IFLUSH_SYNC); - xfs_iunlock(rsumip, XFS_ILOCK_EXCL); - - if (error == EFSCORRUPTED) - goto fscorrupt_out; - - ASSERT(vn_count(VFS_I(rsumip)) == 1); - } - - /* - * Synchronously flush root inode to disk - */ - error = xfs_iflush(rip, XFS_IFLUSH_SYNC); - if (error == EFSCORRUPTED) - goto fscorrupt_out2; - - if (vn_count(VFS_I(rip)) != 1 && !relocation) { - xfs_iunlock(rip, XFS_ILOCK_EXCL); - return XFS_ERROR(EBUSY); - } - /* * Release dquot that rootinode, rbmino and rsumino might be holding, * flush and purge the quota inodes. */ - error = XFS_QM_UNMOUNT(mp); - if (error == EFSCORRUPTED) - goto fscorrupt_out2; + XFS_QM_UNMOUNT(mp); - if (rbmip) { - IRELE(rbmip); - IRELE(rsumip); - } + if (mp->m_rbmip) + IRELE(mp->m_rbmip); + if (mp->m_rsumip) + IRELE(mp->m_rsumip); - xfs_iunlock(rip, XFS_ILOCK_EXCL); return 0; - -fscorrupt_out: - xfs_ifunlock(rip); - -fscorrupt_out2: - xfs_iunlock(rip, XFS_ILOCK_EXCL); - - return XFS_ERROR(EFSCORRUPTED); } - -- cgit v1.2.3-70-g09d2 From f95099ba5ae06b96a9c17ef93cc655f686d79077 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:37 +0100 Subject: kill xfs_unmount_flush There's almost nothing left in this function, instead remove the IRELE on the real times inodes and the call to XFS_QM_UNMOUNT into xfs_unmountfs. For the regular unmount case that means it now also happenes after dmapi notification, but otherwise there is no difference in behaviour. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/Makefile | 1 - fs/xfs/linux-2.6/xfs_super.c | 5 --- fs/xfs/xfs_mount.c | 10 ++++++ fs/xfs/xfs_mount.h | 1 - fs/xfs/xfs_vfsops.c | 83 -------------------------------------------- 5 files changed, 10 insertions(+), 90 deletions(-) delete mode 100644 fs/xfs/xfs_vfsops.c (limited to 'fs') diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 51b87de97f8..a5a9ef0ce84 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -85,7 +85,6 @@ xfs-y += xfs_alloc.o \ xfs_trans_inode.o \ xfs_trans_item.o \ xfs_utils.o \ - xfs_vfsops.o \ xfs_vnodeops.o \ xfs_rw.o \ xfs_dmops.o \ diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index c4f788e1119..4ebbd6820e7 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1043,7 +1043,6 @@ xfs_fs_put_super( struct xfs_mount *mp = XFS_M(sb); struct xfs_inode *rip = mp->m_rootip; int unmount_event_flags = 0; - int error; xfs_syncd_stop(mp); xfs_sync_inodes(mp, SYNC_ATTR|SYNC_DELWRI); @@ -1071,8 +1070,6 @@ xfs_fs_put_super( xfs_filestream_unmount(mp); XFS_bflush(mp->m_ddev_targp); - error = xfs_unmount_flush(mp, 0); - WARN_ON(error); if (mp->m_flags & XFS_MOUNT_DMAPI) { XFS_SEND_UNMOUNT(mp, rip, DM_RIGHT_NULL, 0, 0, @@ -1535,8 +1532,6 @@ xfs_fs_fill_super( xfs_filestream_unmount(mp); XFS_bflush(mp->m_ddev_targp); - error = xfs_unmount_flush(mp, 0); - WARN_ON(error); xfs_unmountfs(mp); goto out_free_sb; diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 1672ff56a00..3c97c6463a4 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1220,6 +1220,16 @@ xfs_unmountfs( __uint64_t resblks; int error; + /* + * Release dquot that rootinode, rbmino and rsumino might be holding, + * and release the quota inodes. + */ + XFS_QM_UNMOUNT(mp); + + if (mp->m_rbmip) + IRELE(mp->m_rbmip); + if (mp->m_rsumip) + IRELE(mp->m_rsumip); IRELE(mp->m_rootip); /* diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 4f64fd160cc..ae5da88ace2 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -509,7 +509,6 @@ extern void xfs_mountfs_check_barriers(xfs_mount_t *mp); extern void xfs_unmountfs(xfs_mount_t *); extern int xfs_unmountfs_writesb(xfs_mount_t *); -extern int xfs_unmount_flush(xfs_mount_t *, int); extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int); extern int xfs_mod_incore_sb_unlocked(xfs_mount_t *, xfs_sb_field_t, int64_t, int); diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c deleted file mode 100644 index 559fb8d5108..00000000000 --- a/fs/xfs/xfs_vfsops.c +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_fs.h" -#include "xfs_types.h" -#include "xfs_bit.h" -#include "xfs_log.h" -#include "xfs_inum.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_ag.h" -#include "xfs_dir2.h" -#include "xfs_dmapi.h" -#include "xfs_mount.h" -#include "xfs_da_btree.h" -#include "xfs_bmap_btree.h" -#include "xfs_ialloc_btree.h" -#include "xfs_alloc_btree.h" -#include "xfs_dir2_sf.h" -#include "xfs_attr_sf.h" -#include "xfs_dinode.h" -#include "xfs_inode.h" -#include "xfs_inode_item.h" -#include "xfs_btree.h" -#include "xfs_alloc.h" -#include "xfs_ialloc.h" -#include "xfs_quota.h" -#include "xfs_error.h" -#include "xfs_bmap.h" -#include "xfs_rw.h" -#include "xfs_buf_item.h" -#include "xfs_log_priv.h" -#include "xfs_dir2_trace.h" -#include "xfs_extfree_item.h" -#include "xfs_acl.h" -#include "xfs_attr.h" -#include "xfs_mru_cache.h" -#include "xfs_filestream.h" -#include "xfs_fsops.h" -#include "xfs_vnodeops.h" -#include "xfs_utils.h" -#include "xfs_sync.h" - - -/* - * xfs_unmount_flush implements a set of flush operation on special - * inodes, which are needed as a separate set of operations so that - * they can be called as part of relocation process. - */ -int -xfs_unmount_flush( - xfs_mount_t *mp, /* Mount structure we are getting - rid of. */ - int relocation) /* Called from vfs relocation. */ -{ - /* - * Release dquot that rootinode, rbmino and rsumino might be holding, - * flush and purge the quota inodes. - */ - XFS_QM_UNMOUNT(mp); - - if (mp->m_rbmip) - IRELE(mp->m_rbmip); - if (mp->m_rsumip) - IRELE(mp->m_rsumip); - - return 0; -} -- cgit v1.2.3-70-g09d2 From 583fa586f0e4a8222dd091ce971b85c1364f3d92 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:38 +0100 Subject: kill vn_ioerror There's just one caller of this helper, and it's much cleaner to just merge the xfs_do_force_shutdown call into it. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_aops.c | 17 +++++++++++++---- fs/xfs/linux-2.6/xfs_vnode.c | 16 ---------------- fs/xfs/linux-2.6/xfs_vnode.h | 1 - 3 files changed, 13 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index bb224d07e1e..f35dba9bf1d 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -146,16 +146,25 @@ xfs_destroy_ioend( xfs_ioend_t *ioend) { struct buffer_head *bh, *next; + struct xfs_inode *ip = XFS_I(ioend->io_inode); for (bh = ioend->io_buffer_head; bh; bh = next) { next = bh->b_private; bh->b_end_io(bh, !ioend->io_error); } - if (unlikely(ioend->io_error)) { - vn_ioerror(XFS_I(ioend->io_inode), ioend->io_error, - __FILE__,__LINE__); + + /* + * Volume managers supporting multiple paths can send back ENODEV + * when the final path disappears. In this case continuing to fill + * the page cache with dirty data which cannot be written out is + * evil, so prevent that. + */ + if (unlikely(ioend->io_error == -ENODEV)) { + xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, + __FILE__, __LINE__); } - vn_iowake(XFS_I(ioend->io_inode)); + + vn_iowake(ip); mempool_free(ioend, xfs_ioend_pool); } diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c index ad18262d651..a8cf97a4319 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.c +++ b/fs/xfs/linux-2.6/xfs_vnode.c @@ -66,22 +66,6 @@ vn_iowake( wake_up(vptosync(ip)); } -/* - * Volume managers supporting multiple paths can send back ENODEV when the - * final path disappears. In this case continuing to fill the page cache - * with dirty data which cannot be written out is evil, so prevent that. - */ -void -vn_ioerror( - xfs_inode_t *ip, - int error, - char *f, - int l) -{ - if (unlikely(error == -ENODEV)) - xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l); -} - #ifdef XFS_INODE_TRACE #define KTRACE_ENTER(ip, vk, s, line, ra) \ diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index 314ca18bc60..07fed8837db 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h @@ -63,7 +63,6 @@ extern void vn_init(void); */ extern void vn_iowait(struct xfs_inode *ip); extern void vn_iowake(struct xfs_inode *ip); -extern void vn_ioerror(struct xfs_inode *ip, int error, char *f, int l); #define IHOLD(ip) \ do { \ -- cgit v1.2.3-70-g09d2 From 25e41b3d521f52771354a718042a753a3e77df0a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:39 +0100 Subject: move vn_iowait / vn_iowake into xfs_aops.c The whole machinery to wait on I/O completion is related to the I/O path and should be there instead of in xfs_vnode.c. Also give the functions more descriptive names. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/linux-2.6/xfs_aops.c | 38 ++++++++++++++++++++++++++++++++++++-- fs/xfs/linux-2.6/xfs_aops.h | 3 +++ fs/xfs/linux-2.6/xfs_super.c | 2 +- fs/xfs/linux-2.6/xfs_sync.c | 2 +- fs/xfs/linux-2.6/xfs_vnode.c | 34 ---------------------------------- fs/xfs/linux-2.6/xfs_vnode.h | 10 ---------- fs/xfs/xfs_inode.c | 6 +++--- fs/xfs/xfs_vnodeops.c | 7 ++++--- 8 files changed, 48 insertions(+), 54 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index f35dba9bf1d..de3a198f771 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -42,6 +42,40 @@ #include #include + +/* + * Prime number of hash buckets since address is used as the key. + */ +#define NVSYNC 37 +#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC]) +static wait_queue_head_t xfs_ioend_wq[NVSYNC]; + +void __init +xfs_ioend_init(void) +{ + int i; + + for (i = 0; i < NVSYNC; i++) + init_waitqueue_head(&xfs_ioend_wq[i]); +} + +void +xfs_ioend_wait( + xfs_inode_t *ip) +{ + wait_queue_head_t *wq = to_ioend_wq(ip); + + wait_event(*wq, (atomic_read(&ip->i_iocount) == 0)); +} + +STATIC void +xfs_ioend_wake( + xfs_inode_t *ip) +{ + if (atomic_dec_and_test(&ip->i_iocount)) + wake_up(to_ioend_wq(ip)); +} + STATIC void xfs_count_page_state( struct page *page, @@ -164,7 +198,7 @@ xfs_destroy_ioend( __FILE__, __LINE__); } - vn_iowake(ip); + xfs_ioend_wake(ip); mempool_free(ioend, xfs_ioend_pool); } @@ -516,7 +550,7 @@ xfs_cancel_ioend( unlock_buffer(bh); } while ((bh = next_bh) != NULL); - vn_iowake(XFS_I(ioend->io_inode)); + xfs_ioend_wake(XFS_I(ioend->io_inode)); mempool_free(ioend, xfs_ioend_pool); } while ((ioend = next) != NULL); } diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h index 3ba0631a381..7b26f5ff969 100644 --- a/fs/xfs/linux-2.6/xfs_aops.h +++ b/fs/xfs/linux-2.6/xfs_aops.h @@ -43,4 +43,7 @@ typedef struct xfs_ioend { extern const struct address_space_operations xfs_address_space_operations; extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int); +extern void xfs_ioend_init(void); +extern void xfs_ioend_wait(struct xfs_inode *); + #endif /* __XFS_AOPS_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 4ebbd6820e7..36f6cc703ef 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1822,7 +1822,7 @@ init_xfs_fs(void) XFS_BUILD_OPTIONS " enabled\n"); ktrace_init(64); - vn_init(); + xfs_ioend_init(); xfs_dir_startup(); error = xfs_init_zones(); diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index d12d31b86fa..ca5bd2951a8 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -133,7 +133,7 @@ xfs_sync_inodes_ag( lock_flags |= XFS_IOLOCK_SHARED; error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE); if (flags & SYNC_IOWAIT) - vn_iowait(ip); + xfs_ioend_wait(ip); } xfs_ilock(ip, XFS_ILOCK_SHARED); diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c index a8cf97a4319..f6d14112279 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.c +++ b/fs/xfs/linux-2.6/xfs_vnode.c @@ -32,40 +32,6 @@ #include "xfs_mount.h" -/* - * Dedicated vnode inactive/reclaim sync wait queues. - * Prime number of hash buckets since address is used as the key. - */ -#define NVSYNC 37 -#define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC]) -static wait_queue_head_t vsync[NVSYNC]; - -void __init -vn_init(void) -{ - int i; - - for (i = 0; i < NVSYNC; i++) - init_waitqueue_head(&vsync[i]); -} - -void -vn_iowait( - xfs_inode_t *ip) -{ - wait_queue_head_t *wq = vptosync(ip); - - wait_event(*wq, (atomic_read(&ip->i_iocount) == 0)); -} - -void -vn_iowake( - xfs_inode_t *ip) -{ - if (atomic_dec_and_test(&ip->i_iocount)) - wake_up(vptosync(ip)); -} - #ifdef XFS_INODE_TRACE #define KTRACE_ENTER(ip, vk, s, line, ra) \ diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index 07fed8837db..bd3e05c4790 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h @@ -54,16 +54,6 @@ struct attrlist_cursor_kern; Prevent VM access to the pages until the operation completes. */ - -extern void vn_init(void); - -/* - * Yeah, these don't take vnode anymore at all, all this should be - * cleaned up at some point. - */ -extern void vn_iowait(struct xfs_inode *ip); -extern void vn_iowake(struct xfs_inode *ip); - #define IHOLD(ip) \ do { \ ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 4e664f57860..063da344e18 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1322,8 +1322,8 @@ xfs_itrunc_trace( * direct I/O with the truncate operation. Also, because we hold * the IOLOCK in exclusive mode, we prevent new direct I/Os from being * started until the truncate completes and drops the lock. Essentially, - * the vn_iowait() call forms an I/O barrier that provides strict ordering - * between direct I/Os and the truncate operation. + * the xfs_ioend_wait() call forms an I/O barrier that provides strict + * ordering between direct I/Os and the truncate operation. * * The flags parameter can have either the value XFS_ITRUNC_DEFINITE * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used @@ -1354,7 +1354,7 @@ xfs_itruncate_start( /* wait for the completion of any pending DIOs */ if (new_size == 0 || new_size < ip->i_size) - vn_iowait(ip); + xfs_ioend_wait(ip); /* * Call toss_pages or flushinval_pages to get rid of pages diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index b29a0eb9c0f..2d57aae0e31 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -338,7 +338,7 @@ xfs_setattr( } /* wait for all I/O to complete */ - vn_iowait(ip); + xfs_ioend_wait(ip); if (!code) code = xfs_itruncate_data(ip, iattr->ia_size); @@ -2758,7 +2758,7 @@ xfs_reclaim( return 0; } - vn_iowait(ip); + xfs_ioend_wait(ip); ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0); @@ -3149,7 +3149,8 @@ xfs_free_file_space( need_iolock = 0; if (need_iolock) { xfs_ilock(ip, XFS_IOLOCK_EXCL); - vn_iowait(ip); /* wait for the completion of any pending DIOs */ + /* wait for the completion of any pending DIOs */ + xfs_ioend_wait(ip); } rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); -- cgit v1.2.3-70-g09d2 From 5a8d0f3c7af801c7263fbba39952504d6fc7ff60 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 Dec 2008 12:20:40 +0100 Subject: move inode tracing out of xfs_vnode. Move the inode tracing into xfs_iget.c / xfs_inode.h and kill xfs_vnode.c now that it's empty. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Niv Sardi --- fs/xfs/Makefile | 1 - fs/xfs/linux-2.6/xfs_vnode.c | 82 -------------------------------------------- fs/xfs/linux-2.6/xfs_vnode.h | 48 -------------------------- fs/xfs/xfs_iget.c | 48 ++++++++++++++++++++++++++ fs/xfs/xfs_inode.h | 45 ++++++++++++++++++++++++ 5 files changed, 93 insertions(+), 131 deletions(-) delete mode 100644 fs/xfs/linux-2.6/xfs_vnode.c (limited to 'fs') diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index a5a9ef0ce84..c3dc491fff8 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -107,7 +107,6 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \ xfs_lrw.o \ xfs_super.o \ xfs_sync.o \ - xfs_vnode.o \ xfs_xattr.o) # Objects in support/ diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c deleted file mode 100644 index f6d14112279..00000000000 --- a/fs/xfs/linux-2.6/xfs_vnode.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_vnodeops.h" -#include "xfs_bmap_btree.h" -#include "xfs_inode.h" - -/* - * And this gunk is needed for xfs_mount.h" - */ -#include "xfs_log.h" -#include "xfs_trans.h" -#include "xfs_sb.h" -#include "xfs_dmapi.h" -#include "xfs_inum.h" -#include "xfs_ag.h" -#include "xfs_mount.h" - - -#ifdef XFS_INODE_TRACE - -#define KTRACE_ENTER(ip, vk, s, line, ra) \ - ktrace_enter( (ip)->i_trace, \ -/* 0 */ (void *)(__psint_t)(vk), \ -/* 1 */ (void *)(s), \ -/* 2 */ (void *)(__psint_t) line, \ -/* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \ -/* 4 */ (void *)(ra), \ -/* 5 */ NULL, \ -/* 6 */ (void *)(__psint_t)current_cpu(), \ -/* 7 */ (void *)(__psint_t)current_pid(), \ -/* 8 */ (void *)__return_address, \ -/* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL) - -/* - * Vnode tracing code. - */ -void -_xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra) -{ - KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra); -} - -void -_xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra) -{ - KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra); -} - -void -xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra) -{ - KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra); -} - -void -_xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra) -{ - KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra); -} - -void -xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra) -{ - KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra); -} -#endif /* XFS_INODE_TRACE */ diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index bd3e05c4790..f65983a230d 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h @@ -54,19 +54,6 @@ struct attrlist_cursor_kern; Prevent VM access to the pages until the operation completes. */ -#define IHOLD(ip) \ -do { \ - ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ - atomic_inc(&(VFS_I(ip)->i_count)); \ - xfs_itrace_hold((ip), __FILE__, __LINE__, (inst_t *)__return_address); \ -} while (0) - -#define IRELE(ip) \ -do { \ - xfs_itrace_rele((ip), __FILE__, __LINE__, (inst_t *)__return_address); \ - iput(VFS_I(ip)); \ -} while (0) - /* * Dealing with bad inodes */ @@ -103,39 +90,4 @@ static inline void vn_atime_to_time_t(struct inode *vp, time_t *tt) PAGECACHE_TAG_DIRTY) -/* - * Tracking vnode activity. - */ -#if defined(XFS_INODE_TRACE) - -#define INODE_TRACE_SIZE 16 /* number of trace entries */ -#define INODE_KTRACE_ENTRY 1 -#define INODE_KTRACE_EXIT 2 -#define INODE_KTRACE_HOLD 3 -#define INODE_KTRACE_REF 4 -#define INODE_KTRACE_RELE 5 - -extern void _xfs_itrace_entry(struct xfs_inode *, const char *, inst_t *); -extern void _xfs_itrace_exit(struct xfs_inode *, const char *, inst_t *); -extern void xfs_itrace_hold(struct xfs_inode *, char *, int, inst_t *); -extern void _xfs_itrace_ref(struct xfs_inode *, char *, int, inst_t *); -extern void xfs_itrace_rele(struct xfs_inode *, char *, int, inst_t *); -#define xfs_itrace_entry(ip) \ - _xfs_itrace_entry(ip, __func__, (inst_t *)__return_address) -#define xfs_itrace_exit(ip) \ - _xfs_itrace_exit(ip, __func__, (inst_t *)__return_address) -#define xfs_itrace_exit_tag(ip, tag) \ - _xfs_itrace_exit(ip, tag, (inst_t *)__return_address) -#define xfs_itrace_ref(ip) \ - _xfs_itrace_ref(ip, __FILE__, __LINE__, (inst_t *)__return_address) - -#else -#define xfs_itrace_entry(a) -#define xfs_itrace_exit(a) -#define xfs_itrace_exit_tag(a, b) -#define xfs_itrace_hold(a, b, c, d) -#define xfs_itrace_ref(a) -#define xfs_itrace_rele(a, b, c, d) -#endif - #endif /* __XFS_VNODE_H__ */ diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 82eb8ed5633..e2fb6210d4c 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -805,3 +805,51 @@ xfs_isilocked( } #endif +#ifdef XFS_INODE_TRACE + +#define KTRACE_ENTER(ip, vk, s, line, ra) \ + ktrace_enter((ip)->i_trace, \ +/* 0 */ (void *)(__psint_t)(vk), \ +/* 1 */ (void *)(s), \ +/* 2 */ (void *)(__psint_t) line, \ +/* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \ +/* 4 */ (void *)(ra), \ +/* 5 */ NULL, \ +/* 6 */ (void *)(__psint_t)current_cpu(), \ +/* 7 */ (void *)(__psint_t)current_pid(), \ +/* 8 */ (void *)__return_address, \ +/* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL) + +/* + * Vnode tracing code. + */ +void +_xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra) +{ + KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra); +} + +void +_xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra) +{ + KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra); +} + +void +xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra) +{ + KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra); +} + +void +_xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra) +{ + KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra); +} + +void +xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra) +{ + KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra); +} +#endif /* XFS_INODE_TRACE */ diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index ddac1b806c0..558253e6fb4 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -536,6 +536,51 @@ void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); void xfs_synchronize_atime(xfs_inode_t *); void xfs_mark_inode_dirty_sync(xfs_inode_t *); +#if defined(XFS_INODE_TRACE) + +#define INODE_TRACE_SIZE 16 /* number of trace entries */ +#define INODE_KTRACE_ENTRY 1 +#define INODE_KTRACE_EXIT 2 +#define INODE_KTRACE_HOLD 3 +#define INODE_KTRACE_REF 4 +#define INODE_KTRACE_RELE 5 + +extern void _xfs_itrace_entry(struct xfs_inode *, const char *, inst_t *); +extern void _xfs_itrace_exit(struct xfs_inode *, const char *, inst_t *); +extern void xfs_itrace_hold(struct xfs_inode *, char *, int, inst_t *); +extern void _xfs_itrace_ref(struct xfs_inode *, char *, int, inst_t *); +extern void xfs_itrace_rele(struct xfs_inode *, char *, int, inst_t *); +#define xfs_itrace_entry(ip) \ + _xfs_itrace_entry(ip, __func__, (inst_t *)__return_address) +#define xfs_itrace_exit(ip) \ + _xfs_itrace_exit(ip, __func__, (inst_t *)__return_address) +#define xfs_itrace_exit_tag(ip, tag) \ + _xfs_itrace_exit(ip, tag, (inst_t *)__return_address) +#define xfs_itrace_ref(ip) \ + _xfs_itrace_ref(ip, __FILE__, __LINE__, (inst_t *)__return_address) + +#else +#define xfs_itrace_entry(a) +#define xfs_itrace_exit(a) +#define xfs_itrace_exit_tag(a, b) +#define xfs_itrace_hold(a, b, c, d) +#define xfs_itrace_ref(a) +#define xfs_itrace_rele(a, b, c, d) +#endif + +#define IHOLD(ip) \ +do { \ + ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ + atomic_inc(&(VFS_I(ip)->i_count)); \ + xfs_itrace_hold((ip), __FILE__, __LINE__, (inst_t *)__return_address); \ +} while (0) + +#define IRELE(ip) \ +do { \ + xfs_itrace_rele((ip), __FILE__, __LINE__, (inst_t *)__return_address); \ + iput(VFS_I(ip)); \ +} while (0) + #endif /* __KERNEL__ */ int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, -- cgit v1.2.3-70-g09d2 From 995be04548f62c8e6b447410cd28b0666614b461 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Thu, 4 Dec 2008 17:04:18 +0300 Subject: UBIFS: fix section mismatch This patch fixes the following section mismatch: WARNING: fs/ubifs/ubifs.o(.init.text+0xec): Section mismatch in reference from the function init_module() to the function .exit.text:ubifs_compressors_exit() Signed-off-by: Artem Bityutskiy --- fs/ubifs/compress.c | 2 +- fs/ubifs/ubifs.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c index 4afb3ea24d4..4c90ee2aef4 100644 --- a/fs/ubifs/compress.c +++ b/fs/ubifs/compress.c @@ -244,7 +244,7 @@ out_lzo: /** * ubifs_compressors_exit - de-initialize UBIFS compressors. */ -void __exit ubifs_compressors_exit(void) +void ubifs_compressors_exit(void) { compr_exit(&lzo_compr); compr_exit(&zlib_compr); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index e658b06fd45..055c6b52d2f 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1700,7 +1700,7 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); /* compressor.c */ int __init ubifs_compressors_init(void); -void __exit ubifs_compressors_exit(void); +void ubifs_compressors_exit(void); void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len, int *compr_type); int ubifs_decompress(const void *buf, int len, void *out, int *out_len, -- cgit v1.2.3-70-g09d2 From 8bb57320f3f5dd8c2373c0b66e4950391e037109 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 4 Dec 2008 14:23:27 +0100 Subject: [XFS] Fix compile with CONFIG_COMPAT enabled Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_ioctl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl.h b/fs/xfs/linux-2.6/xfs_ioctl.h index a3446aad070..d92131c827b 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.h +++ b/fs/xfs/linux-2.6/xfs_ioctl.h @@ -74,7 +74,7 @@ xfs_file_compat_ioctl( unsigned long arg); extern long -xfs_file_compat_ioctl_invis( +xfs_file_compat_invis_ioctl( struct file *file, unsigned int cmd, unsigned long arg); -- cgit v1.2.3-70-g09d2 From 6a0775a991d5597ce98f1e15373288ea133cc793 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 4 Dec 2008 09:09:34 +1100 Subject: [XFS] Fix hang after disallowed rename across directory quota domains When project quota is active and is being used for directory tree quota control, we disallow rename outside the current directory tree. This requires a check to be made after all the inodes involved in the rename are locked. We fail to unlock the inodes correctly if we disallow the rename when the target is outside the current directory tree. This results in a hang on the next access to the inodes involved in failed rename. Reported-by: Arkadiusz Miskiewicz Signed-off-by: Dave Chinner Tested-by: Arkadiusz Miskiewicz Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_rename.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c index 02f0e8f53a9..10642fcbb1f 100644 --- a/fs/xfs/xfs_rename.c +++ b/fs/xfs/xfs_rename.c @@ -212,7 +212,7 @@ xfs_rename( if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) { error = XFS_ERROR(EXDEV); - xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED); + xfs_rename_unlock4(inodes, XFS_ILOCK_EXCL); xfs_trans_cancel(tp, cancel_flags); goto std_return; } -- cgit v1.2.3-70-g09d2 From c6422617a1c0d7787e515748b01f594fe43aea98 Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Fri, 5 Dec 2008 13:16:15 +1100 Subject: [XFS] Check return value of xfs_buf_get_noaddr() We check the return value of all other calls to xfs_buf_get_noaddr(). Make sense to do it here too. Signed-off-by: Lachlan McIlroy Reviewed-by: Christoph Hellwig Reviewed-by: Eric Sandeen --- fs/xfs/xfs_vnodeops.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 2d57aae0e31..4547608b46c 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -3034,6 +3034,8 @@ xfs_zero_remaining_bytes( bp = xfs_buf_get_noaddr(mp->m_sb.sb_blocksize, XFS_IS_REALTIME_INODE(ip) ? mp->m_rtdev_targp : mp->m_ddev_targp); + if (!bp) + return XFS_ERROR(ENOMEM); for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { offset_fsb = XFS_B_TO_FSBT(mp, offset); -- cgit v1.2.3-70-g09d2 From a5b429d41fede3a90deb532f5c2318393ed3a17b Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Fri, 5 Dec 2008 13:31:51 +1100 Subject: [XFS] Remove unused variable in ktrace_free() entries_size is probably left over from when we used to pass the size to kmem_free(). Signed-off-by: Lachlan McIlroy Reviewed-by: Christoph Hellwig Reviewed-by: Eric Sandeen --- fs/xfs/support/ktrace.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c index a34ef05489b..2d494c26717 100644 --- a/fs/xfs/support/ktrace.c +++ b/fs/xfs/support/ktrace.c @@ -113,21 +113,16 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep) void ktrace_free(ktrace_t *ktp) { - int entries_size; - if (ktp == (ktrace_t *)NULL) return; /* * Special treatment for the Vnode trace buffer. */ - if (ktp->kt_nentries == ktrace_zentries) { + if (ktp->kt_nentries == ktrace_zentries) kmem_zone_free(ktrace_ent_zone, ktp->kt_entries); - } else { - entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t)); - + else kmem_free(ktp->kt_entries); - } kmem_zone_free(ktrace_hdr_zone, ktp); } -- cgit v1.2.3-70-g09d2 From 797eaed40e1df4a3b9ece6894a71ce2b568bca38 Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Fri, 5 Dec 2008 14:15:49 +1100 Subject: [XFS] Remove unnecessary assertion Hit this assert because an inode was tagged with XFS_ICI_RECLAIM_TAG but not XFS_IRECLAIMABLE|XFS_IRECLAIM. This is because xfs_iget_cache_hit() first clears XFS_IRECLAIMABLE and then calls __xfs_inode_clear_reclaim_tag() while only holding the pag_ici_lock in read mode so we can race with xfs_reclaim_inodes_ag(). Looks like xfs_reclaim_inodes_ag() will do the right thing anyway so just remove the assert. Thanks to Christoph for pointing out where the problem was. Signed-off-by: Lachlan McIlroy Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_sync.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index ca5bd2951a8..2ed035354c2 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -707,8 +707,6 @@ restart: break; } - ASSERT(xfs_iflags_test(ip, (XFS_IRECLAIMABLE|XFS_IRECLAIM))); - /* ignore if already under reclaim */ if (xfs_iflags_test(ip, XFS_IRECLAIM)) { read_unlock(&pag->pag_ici_lock); -- cgit v1.2.3-70-g09d2 From 0b8f1efad30bd58f89961b82dfe68b9edf8fd2ac Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Fri, 5 Dec 2008 18:58:31 -0800 Subject: sparse irq_desc[] array: core kernel and x86 changes Impact: new feature Problem on distro kernels: irq_desc[NR_IRQS] takes megabytes of RAM with NR_CPUS set to large values. The goal is to be able to scale up to much larger NR_IRQS value without impacting the (important) common case. To solve this, we generalize irq_desc[NR_IRQS] to an (optional) array of irq_desc pointers. When CONFIG_SPARSE_IRQ=y is used, we use kzalloc_node to get irq_desc, this also makes the IRQ descriptors NUMA-local (to the site that calls request_irq()). This gets rid of the irq_cfg[] static array on x86 as well: irq_cfg now uses desc->chip_data for x86 to store irq_cfg. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 10 ++ arch/x86/include/asm/irq_vectors.h | 9 ++ arch/x86/kernel/io_apic.c | 301 +++++++++++++++++++++++-------------- arch/x86/kernel/irq.c | 3 + arch/x86/kernel/irq_32.c | 2 + arch/x86/kernel/irq_64.c | 2 + arch/x86/kernel/irqinit_32.c | 1 - arch/x86/kernel/irqinit_64.c | 1 - drivers/char/random.c | 22 +-- drivers/pci/intr_remapping.c | 76 +++++++++- drivers/xen/events.c | 12 +- fs/proc/stat.c | 17 ++- include/linux/interrupt.h | 2 + include/linux/irq.h | 54 ++++++- include/linux/irqnr.h | 14 +- include/linux/kernel_stat.h | 14 +- include/linux/random.h | 51 +++++++ init/main.c | 11 ++ kernel/irq/autoprobe.c | 15 ++ kernel/irq/chip.c | 3 +- kernel/irq/handle.c | 181 +++++++++++++++++++++- kernel/irq/proc.c | 6 +- kernel/irq/spurious.c | 5 + 23 files changed, 649 insertions(+), 163 deletions(-) (limited to 'fs') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ac22bb7719f..48ac688de3c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -238,6 +238,16 @@ config X86_HAS_BOOT_CPU_ID def_bool y depends on X86_VOYAGER +config SPARSE_IRQ + bool "Support sparse irq numbering" + depends on PCI_MSI || HT_IRQ + default y + help + This enables support for sparse irq, esp for msi/msi-x. You may need + if you have lots of cards supports msi-x installed. + + If you don't know what to do here, say Y. + config X86_FIND_SMP_CONFIG def_bool y depends on X86_MPPARSE || X86_VOYAGER diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 0005adb0f94..bb6b69a6b12 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h @@ -102,11 +102,20 @@ #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) #if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER) + +#ifndef CONFIG_SPARSE_IRQ # if NR_CPUS < MAX_IO_APICS # define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) # else # define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) # endif +#else +# if (8 * NR_CPUS) > (32 * MAX_IO_APICS) +# define NR_IRQS (NR_VECTORS + (8 * NR_CPUS)) +# else +# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) +# endif +#endif #elif defined(CONFIG_X86_VOYAGER) diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c index 9043251210f..9de17f5c112 100644 --- a/arch/x86/kernel/io_apic.c +++ b/arch/x86/kernel/io_apic.c @@ -108,8 +108,33 @@ static int __init parse_noapic(char *str) early_param("noapic", parse_noapic); struct irq_pin_list; + +/* + * This is performance-critical, we want to do it O(1) + * + * the indexing order of this array favors 1:1 mappings + * between pins and IRQs. + */ + +struct irq_pin_list { + int apic, pin; + struct irq_pin_list *next; +}; + +static struct irq_pin_list *get_one_free_irq_2_pin(int cpu) +{ + struct irq_pin_list *pin; + int node; + + node = cpu_to_node(cpu); + + pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node); + printk(KERN_DEBUG " alloc irq_2_pin on cpu %d node %d\n", cpu, node); + + return pin; +} + struct irq_cfg { - unsigned int irq; struct irq_pin_list *irq_2_pin; cpumask_t domain; cpumask_t old_domain; @@ -119,83 +144,93 @@ struct irq_cfg { }; /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ +#ifdef CONFIG_SPARSE_IRQ +static struct irq_cfg irq_cfgx[] = { +#else static struct irq_cfg irq_cfgx[NR_IRQS] = { - [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, - [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, - [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, - [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, - [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, - [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, - [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, - [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, - [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, - [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, - [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, - [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, - [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, - [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, - [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, - [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, +#endif + [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, + [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, + [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, + [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, }, + [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, }, + [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, }, + [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, }, + [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, }, + [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, }, + [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, }, + [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, }, + [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, }, + [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, }, + [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, }, + [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, }, + [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, }; -#define for_each_irq_cfg(irq, cfg) \ - for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++) - -static struct irq_cfg *irq_cfg(unsigned int irq) +void __init arch_early_irq_init(void) { - return irq < nr_irqs ? irq_cfgx + irq : NULL; -} + struct irq_cfg *cfg; + struct irq_desc *desc; + int count; + int i; -static struct irq_cfg *irq_cfg_alloc(unsigned int irq) -{ - return irq_cfg(irq); -} + cfg = irq_cfgx; + count = ARRAY_SIZE(irq_cfgx); -/* - * Rough estimation of how many shared IRQs there are, can be changed - * anytime. - */ -#define MAX_PLUS_SHARED_IRQS NR_IRQS -#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS) + for (i = 0; i < count; i++) { + desc = irq_to_desc(i); + desc->chip_data = &cfg[i]; + } +} -/* - * This is performance-critical, we want to do it O(1) - * - * the indexing order of this array favors 1:1 mappings - * between pins and IRQs. - */ +#ifdef CONFIG_SPARSE_IRQ +static struct irq_cfg *irq_cfg(unsigned int irq) +{ + struct irq_cfg *cfg = NULL; + struct irq_desc *desc; -struct irq_pin_list { - int apic, pin; - struct irq_pin_list *next; -}; + desc = irq_to_desc(irq); + if (desc) + cfg = desc->chip_data; -static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE]; -static struct irq_pin_list *irq_2_pin_ptr; + return cfg; +} -static void __init irq_2_pin_init(void) +static struct irq_cfg *get_one_free_irq_cfg(int cpu) { - struct irq_pin_list *pin = irq_2_pin_head; - int i; + struct irq_cfg *cfg; + int node; + + node = cpu_to_node(cpu); - for (i = 1; i < PIN_MAP_SIZE; i++) - pin[i-1].next = &pin[i]; + cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node); + printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node); - irq_2_pin_ptr = &pin[0]; + return cfg; } -static struct irq_pin_list *get_one_free_irq_2_pin(void) +void arch_init_chip_data(struct irq_desc *desc, int cpu) { - struct irq_pin_list *pin = irq_2_pin_ptr; + struct irq_cfg *cfg; - if (!pin) - panic("can not get more irq_2_pin\n"); + cfg = desc->chip_data; + if (!cfg) { + desc->chip_data = get_one_free_irq_cfg(cpu); + if (!desc->chip_data) { + printk(KERN_ERR "can not alloc irq_cfg\n"); + BUG_ON(1); + } + } +} - irq_2_pin_ptr = pin->next; - pin->next = NULL; - return pin; +#else +static struct irq_cfg *irq_cfg(unsigned int irq) +{ + return irq < nr_irqs ? irq_cfgx + irq : NULL; } +#endif + struct io_apic { unsigned int index; unsigned int unused[3]; @@ -397,16 +432,19 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask) * shared ISA-space IRQs, so we have to support them. We are super * fast in the common case, and fast for shared ISA-space IRQs. */ -static void add_pin_to_irq(unsigned int irq, int apic, int pin) +static void add_pin_to_irq_cpu(unsigned int irq, int cpu, int apic, int pin) { - struct irq_cfg *cfg; struct irq_pin_list *entry; + struct irq_cfg *cfg = irq_cfg(irq); - /* first time to refer irq_cfg, so with new */ - cfg = irq_cfg_alloc(irq); entry = cfg->irq_2_pin; if (!entry) { - entry = get_one_free_irq_2_pin(); + entry = get_one_free_irq_2_pin(cpu); + if (!entry) { + printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n", + apic, pin); + return; + } cfg->irq_2_pin = entry; entry->apic = apic; entry->pin = pin; @@ -421,7 +459,7 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin) entry = entry->next; } - entry->next = get_one_free_irq_2_pin(); + entry->next = get_one_free_irq_2_pin(cpu); entry = entry->next; entry->apic = apic; entry->pin = pin; @@ -430,7 +468,7 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin) /* * Reroute an IRQ to a different pin. */ -static void __init replace_pin_at_irq(unsigned int irq, +static void __init replace_pin_at_irq(unsigned int irq, int cpu, int oldapic, int oldpin, int newapic, int newpin) { @@ -451,7 +489,7 @@ static void __init replace_pin_at_irq(unsigned int irq, /* why? call replace before add? */ if (!replaced) - add_pin_to_irq(irq, newapic, newpin); + add_pin_to_irq_cpu(irq, cpu, newapic, newpin); } static inline void io_apic_modify_irq(unsigned int irq, @@ -1162,9 +1200,13 @@ void __setup_vector_irq(int cpu) /* This function must be called with vector_lock held */ int irq, vector; struct irq_cfg *cfg; + struct irq_desc *desc; /* Mark the inuse vectors */ - for_each_irq_cfg(irq, cfg) { + for_each_irq_desc(irq, desc) { + if (!desc) + continue; + cfg = desc->chip_data; if (!cpu_isset(cpu, cfg->domain)) continue; vector = cfg->vector; @@ -1356,6 +1398,8 @@ static void __init setup_IO_APIC_irqs(void) { int apic, pin, idx, irq; int notcon = 0; + struct irq_desc *desc; + int cpu = boot_cpu_id; apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); @@ -1387,7 +1431,12 @@ static void __init setup_IO_APIC_irqs(void) if (multi_timer_check(apic, irq)) continue; #endif - add_pin_to_irq(irq, apic, pin); + desc = irq_to_desc_alloc_cpu(irq, cpu); + if (!desc) { + printk(KERN_INFO "can not get irq_desc for %d\n", irq); + continue; + } + add_pin_to_irq_cpu(irq, cpu, apic, pin); setup_IO_APIC_irq(apic, pin, irq, irq_trigger(idx), irq_polarity(idx)); @@ -1448,6 +1497,7 @@ __apicdebuginit(void) print_IO_APIC(void) union IO_APIC_reg_03 reg_03; unsigned long flags; struct irq_cfg *cfg; + struct irq_desc *desc; unsigned int irq; if (apic_verbosity == APIC_QUIET) @@ -1537,8 +1587,13 @@ __apicdebuginit(void) print_IO_APIC(void) } } printk(KERN_DEBUG "IRQ to pin mappings:\n"); - for_each_irq_cfg(irq, cfg) { - struct irq_pin_list *entry = cfg->irq_2_pin; + for_each_irq_desc(irq, desc) { + struct irq_pin_list *entry; + + if (!desc) + continue; + cfg = desc->chip_data; + entry = cfg->irq_2_pin; if (!entry) continue; printk(KERN_DEBUG "IRQ%d ", irq); @@ -2022,6 +2077,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq) { int was_pending = 0; unsigned long flags; + struct irq_cfg *cfg; spin_lock_irqsave(&ioapic_lock, flags); if (irq < 16) { @@ -2029,6 +2085,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq) if (i8259A_irq_pending(irq)) was_pending = 1; } + cfg = irq_cfg(irq); __unmask_IO_APIC_irq(irq); spin_unlock_irqrestore(&ioapic_lock, flags); @@ -2178,6 +2235,9 @@ static void ir_irq_migration(struct work_struct *work) struct irq_desc *desc; for_each_irq_desc(irq, desc) { + if (!desc) + continue; + if (desc->status & IRQ_MOVE_PENDING) { unsigned long flags; @@ -2229,6 +2289,9 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) struct irq_cfg *cfg; irq = __get_cpu_var(vector_irq)[vector]; + if (irq == -1) + continue; + desc = irq_to_desc(irq); if (!desc) continue; @@ -2430,8 +2493,12 @@ static inline void init_IO_APIC_traps(void) * Also, we've got to be careful not to trash gate * 0x80, because int 0x80 is hm, kind of importantish. ;) */ - for_each_irq_cfg(irq, cfg) { - if (IO_APIC_IRQ(irq) && !cfg->vector) { + for_each_irq_desc(irq, desc) { + if (!desc) + continue; + + cfg = desc->chip_data; + if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { /* * Hmm.. We don't have an entry for this, * so default to an old-fashioned 8259 @@ -2439,11 +2506,9 @@ static inline void init_IO_APIC_traps(void) */ if (irq < 16) make_8259A_irq(irq); - else { - desc = irq_to_desc(irq); + else /* Strange. Oh, well.. */ desc->chip = &no_irq_chip; - } } } } @@ -2654,7 +2719,7 @@ static inline void __init check_timer(void) * Ok, does IRQ0 through the IOAPIC work? */ if (no_pin1) { - add_pin_to_irq(0, apic1, pin1); + add_pin_to_irq_cpu(0, boot_cpu_id, apic1, pin1); setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); } unmask_IO_APIC_irq(0); @@ -2683,7 +2748,7 @@ static inline void __init check_timer(void) /* * legacy devices should be connected to IO APIC #0 */ - replace_pin_at_irq(0, apic1, pin1, apic2, pin2); + replace_pin_at_irq(0, boot_cpu_id, apic1, pin1, apic2, pin2); setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); unmask_IO_APIC_irq(0); enable_8259A_irq(0); @@ -2902,21 +2967,25 @@ unsigned int create_irq_nr(unsigned int irq_want) unsigned int irq; unsigned int new; unsigned long flags; - struct irq_cfg *cfg_new; - - irq_want = nr_irqs - 1; + struct irq_cfg *cfg_new = NULL; + int cpu = boot_cpu_id; + struct irq_desc *desc_new = NULL; irq = 0; spin_lock_irqsave(&vector_lock, flags); for (new = irq_want; new > 0; new--) { if (platform_legacy_irq(new)) continue; - cfg_new = irq_cfg(new); - if (cfg_new && cfg_new->vector != 0) + + desc_new = irq_to_desc_alloc_cpu(new, cpu); + if (!desc_new) { + printk(KERN_INFO "can not get irq_desc for %d\n", new); + continue; + } + cfg_new = desc_new->chip_data; + + if (cfg_new->vector != 0) continue; - /* check if need to create one */ - if (!cfg_new) - cfg_new = irq_cfg_alloc(new); if (__assign_irq_vector(new, TARGET_CPUS) == 0) irq = new; break; @@ -2925,6 +2994,9 @@ unsigned int create_irq_nr(unsigned int irq_want) if (irq > 0) { dynamic_irq_init(irq); + /* restore it, in case dynamic_irq_init clear it */ + if (desc_new) + desc_new->chip_data = cfg_new; } return irq; } @@ -2944,8 +3016,16 @@ int create_irq(void) void destroy_irq(unsigned int irq) { unsigned long flags; + struct irq_cfg *cfg; + struct irq_desc *desc; + /* store it, in case dynamic_irq_cleanup clear it */ + desc = irq_to_desc(irq); + cfg = desc->chip_data; dynamic_irq_cleanup(irq); + /* connect back irq_cfg */ + if (desc) + desc->chip_data = cfg; #ifdef CONFIG_INTR_REMAP free_irte(irq); @@ -3195,26 +3275,13 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq) return 0; } -static unsigned int build_irq_for_pci_dev(struct pci_dev *dev) -{ - unsigned int irq; - - irq = dev->bus->number; - irq <<= 8; - irq |= dev->devfn; - irq <<= 12; - - return irq; -} - -int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) +int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc) { unsigned int irq; int ret; unsigned int irq_want; - irq_want = build_irq_for_pci_dev(dev) + 0x100; - + irq_want = nr_irqs - 1; irq = create_irq_nr(irq_want); if (irq == 0) return -1; @@ -3228,7 +3295,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) goto error; no_ir: #endif - ret = setup_msi_irq(dev, desc, irq); + ret = setup_msi_irq(dev, msidesc, irq); if (ret < 0) { destroy_irq(irq); return ret; @@ -3246,7 +3313,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { unsigned int irq; int ret, sub_handle; - struct msi_desc *desc; + struct msi_desc *msidesc; unsigned int irq_want; #ifdef CONFIG_INTR_REMAP @@ -3254,10 +3321,11 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) int index = 0; #endif - irq_want = build_irq_for_pci_dev(dev) + 0x100; + irq_want = nr_irqs - 1; sub_handle = 0; - list_for_each_entry(desc, &dev->msi_list, list) { - irq = create_irq_nr(irq_want--); + list_for_each_entry(msidesc, &dev->msi_list, list) { + irq = create_irq_nr(irq_want); + irq_want--; if (irq == 0) return -1; #ifdef CONFIG_INTR_REMAP @@ -3289,7 +3357,7 @@ int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) } no_ir: #endif - ret = setup_msi_irq(dev, desc, irq); + ret = setup_msi_irq(dev, msidesc, irq); if (ret < 0) goto error; sub_handle++; @@ -3707,17 +3775,29 @@ int __init io_apic_get_version(int ioapic) int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity) { + struct irq_desc *desc; + struct irq_cfg *cfg; + int cpu = boot_cpu_id; + if (!IO_APIC_IRQ(irq)) { apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", ioapic); return -EINVAL; } + desc = irq_to_desc_alloc_cpu(irq, cpu); + if (!desc) { + printk(KERN_INFO "can not get irq_desc %d\n", irq); + return 0; + } + /* * IRQs < 16 are already in the irq_2_pin[] map */ - if (irq >= 16) - add_pin_to_irq(irq, ioapic, pin); + if (irq >= 16) { + cfg = desc->chip_data; + add_pin_to_irq_cpu(irq, cpu, ioapic, pin); + } setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity); @@ -3773,7 +3853,8 @@ void __init setup_ioapic_dest(void) * when you have too many devices, because at that time only boot * cpu is online. */ - cfg = irq_cfg(irq); + desc = irq_to_desc(irq); + cfg = desc->chip_data; if (!cfg->vector) { setup_IO_APIC_irq(ioapic, pin, irq, irq_trigger(irq_entry), @@ -3785,7 +3866,6 @@ void __init setup_ioapic_dest(void) /* * Honour affinities which have been set in early boot */ - desc = irq_to_desc(irq); if (desc->status & (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) mask = desc->affinity; @@ -3846,7 +3926,6 @@ void __init ioapic_init_mappings(void) struct resource *ioapic_res; int i; - irq_2_pin_init(); ioapic_res = ioapic_setup_resources(); for (i = 0; i < nr_ioapics; i++) { if (smp_found_config) { diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index d1d4dc52f64..3f1d9d18df6 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -118,6 +118,9 @@ int show_interrupts(struct seq_file *p, void *v) } desc = irq_to_desc(i); + if (!desc) + return 0; + spin_lock_irqsave(&desc->lock, flags); #ifndef CONFIG_SMP any_count = kstat_irqs(i); diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index a51382672de..119fc9c8ff7 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -242,6 +242,8 @@ void fixup_irqs(cpumask_t map) for_each_irq_desc(irq, desc) { cpumask_t mask; + if (!desc) + continue; if (irq == 2) continue; diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 60eb84eb77a..900009c7059 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -94,6 +94,8 @@ void fixup_irqs(cpumask_t map) int break_affinity = 0; int set_affinity = 1; + if (!desc) + continue; if (irq == 2) continue; diff --git a/arch/x86/kernel/irqinit_32.c b/arch/x86/kernel/irqinit_32.c index 845aa9803e8..5a5651b7f9e 100644 --- a/arch/x86/kernel/irqinit_32.c +++ b/arch/x86/kernel/irqinit_32.c @@ -69,7 +69,6 @@ void __init init_ISA_irqs (void) * 16 old-style INTA-cycle interrupts: */ for (i = 0; i < 16; i++) { - /* first time call this irq_desc */ struct irq_desc *desc = irq_to_desc(i); desc->status = IRQ_DISABLED; diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index ff023539128..cd9f42d028d 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c @@ -143,7 +143,6 @@ void __init init_ISA_irqs(void) init_8259A(0); for (i = 0; i < 16; i++) { - /* first time call this irq_desc */ struct irq_desc *desc = irq_to_desc(i); desc->status = IRQ_DISABLED; diff --git a/drivers/char/random.c b/drivers/char/random.c index 675076f5fca..d26891bfcd4 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -558,23 +558,9 @@ struct timer_rand_state { unsigned dont_count_entropy:1; }; -static struct timer_rand_state *irq_timer_state[NR_IRQS]; - -static struct timer_rand_state *get_timer_rand_state(unsigned int irq) -{ - if (irq >= nr_irqs) - return NULL; - - return irq_timer_state[irq]; -} - -static void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state) -{ - if (irq >= nr_irqs) - return; - - irq_timer_state[irq] = state; -} +#ifndef CONFIG_SPARSE_IRQ +struct timer_rand_state *irq_timer_state[NR_IRQS]; +#endif static struct timer_rand_state input_timer_state; @@ -933,8 +919,10 @@ void rand_initialize_irq(int irq) { struct timer_rand_state *state; +#ifndef CONFIG_SPARSE_IRQ if (irq >= nr_irqs) return; +#endif state = get_timer_rand_state(irq); diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index 2de5a3238c9..c9958ec5e25 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c @@ -19,17 +19,75 @@ struct irq_2_iommu { u8 irte_mask; }; -static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; +#ifdef CONFIG_SPARSE_IRQ +static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) +{ + struct irq_2_iommu *iommu; + int node; + + node = cpu_to_node(cpu); + + iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); + printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node); + + return iommu; +} static struct irq_2_iommu *irq_2_iommu(unsigned int irq) { - return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL; + struct irq_desc *desc; + + desc = irq_to_desc(irq); + + if (WARN_ON_ONCE(!desc)) + return NULL; + + return desc->irq_2_iommu; +} + +static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu) +{ + struct irq_desc *desc; + struct irq_2_iommu *irq_iommu; + + /* + * alloc irq desc if not allocated already. + */ + desc = irq_to_desc_alloc_cpu(irq, cpu); + if (!desc) { + printk(KERN_INFO "can not get irq_desc for %d\n", irq); + return NULL; + } + + irq_iommu = desc->irq_2_iommu; + + if (!irq_iommu) + desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu); + + return desc->irq_2_iommu; } +static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) +{ + return irq_2_iommu_alloc_cpu(irq, boot_cpu_id); +} + +#else /* !CONFIG_SPARSE_IRQ */ + +static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; + +static struct irq_2_iommu *irq_2_iommu(unsigned int irq) +{ + if (irq < nr_irqs) + return &irq_2_iommuX[irq]; + + return NULL; +} static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) { return irq_2_iommu(irq); } +#endif static DEFINE_SPINLOCK(irq_2_ir_lock); @@ -86,9 +144,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) if (!count) return -1; +#ifndef CONFIG_SPARSE_IRQ /* protect irq_2_iommu_alloc later */ if (irq >= nr_irqs) return -1; +#endif /* * start the IRTE search from index 0. @@ -130,6 +190,12 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) table->base[i].present = 1; irq_iommu = irq_2_iommu_alloc(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + printk(KERN_ERR "can't allocate irq_2_iommu\n"); + return -1; + } + irq_iommu->iommu = iommu; irq_iommu->irte_index = index; irq_iommu->sub_handle = 0; @@ -177,6 +243,12 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) irq_iommu = irq_2_iommu_alloc(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + printk(KERN_ERR "can't allocate irq_2_iommu\n"); + return -1; + } + irq_iommu->iommu = iommu; irq_iommu->irte_index = index; irq_iommu->sub_handle = subhandle; diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 1e3b934a4cf..2924faa7f6c 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -141,8 +141,12 @@ static void init_evtchn_cpu_bindings(void) int i; /* By default all event channels notify CPU#0. */ - for_each_irq_desc(i, desc) + for_each_irq_desc(i, desc) { + if (!desc) + continue; + desc->affinity = cpumask_of_cpu(0); + } #endif memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); @@ -231,7 +235,7 @@ static int find_unbound_irq(void) int irq; /* Only allocate from dynirq range */ - for_each_irq_nr(irq) + for (irq = 0; irq < nr_irqs; irq++) if (irq_bindcount[irq] == 0) break; @@ -792,7 +796,7 @@ void xen_irq_resume(void) mask_evtchn(evtchn); /* No IRQ <-> event-channel mappings. */ - for_each_irq_nr(irq) + for (irq = 0; irq < nr_irqs; irq++) irq_info[irq].evtchn = 0; /* zap event-channel binding */ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) @@ -824,7 +828,7 @@ void __init xen_init_IRQ(void) mask_evtchn(i); /* Dynamic IRQ space is currently unbound. Zero the refcnts. */ - for_each_irq_nr(i) + for (i = 0; i < nr_irqs; i++) irq_bindcount[i] = 0; irq_ctx_init(smp_processor_id()); diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 81904f07679..a13431ab7c6 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -27,6 +27,7 @@ static int show_stat(struct seq_file *p, void *v) u64 sum = 0; struct timespec boottime; unsigned int per_irq_sum; + struct irq_desc *desc; user = nice = system = idle = iowait = irq = softirq = steal = cputime64_zero; @@ -44,10 +45,11 @@ static int show_stat(struct seq_file *p, void *v) softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); - - for_each_irq_nr(j) + for_each_irq_desc(j, desc) { + if (!desc) + continue; sum += kstat_irqs_cpu(j, i); - + } sum += arch_irq_stat_cpu(i); } sum += arch_irq_stat(); @@ -90,11 +92,14 @@ static int show_stat(struct seq_file *p, void *v) seq_printf(p, "intr %llu", (unsigned long long)sum); /* sum again ? it could be updated? */ - for_each_irq_nr(j) { + for (j = 0; j < NR_IRQS; j++) { + desc = irq_to_desc(j); per_irq_sum = 0; - for_each_possible_cpu(i) - per_irq_sum += kstat_irqs_cpu(j, i); + if (desc) { + for_each_possible_cpu(i) + per_irq_sum += kstat_irqs_cpu(j, i); + } seq_printf(p, " %u", per_irq_sum); } diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index f58a0cf8929..79e915e7e8a 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -18,6 +18,8 @@ #include #include +extern int nr_irqs; + /* * These correspond to the IORESOURCE_IRQ_* defines in * linux/ioport.h to select the interrupt line behaviour. When diff --git a/include/linux/irq.h b/include/linux/irq.h index 3dddfa703eb..63b00439d4d 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -129,6 +129,8 @@ struct irq_chip { const char *typename; }; +struct timer_rand_state; +struct irq_2_iommu; /** * struct irq_desc - interrupt descriptor * @irq: interrupt number for this descriptor @@ -154,6 +156,13 @@ struct irq_chip { */ struct irq_desc { unsigned int irq; +#ifdef CONFIG_SPARSE_IRQ + struct timer_rand_state *timer_rand_state; + unsigned int *kstat_irqs; +# ifdef CONFIG_INTR_REMAP + struct irq_2_iommu *irq_2_iommu; +# endif +#endif irq_flow_handler_t handle_irq; struct irq_chip *chip; struct msi_desc *msi_desc; @@ -181,14 +190,52 @@ struct irq_desc { const char *name; } ____cacheline_internodealigned_in_smp; +extern void early_irq_init(void); +extern void arch_early_irq_init(void); +extern void arch_init_chip_data(struct irq_desc *desc, int cpu); +extern void arch_init_copy_chip_data(struct irq_desc *old_desc, + struct irq_desc *desc, int cpu); +extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); + +#ifndef CONFIG_SPARSE_IRQ extern struct irq_desc irq_desc[NR_IRQS]; static inline struct irq_desc *irq_to_desc(unsigned int irq) { - return (irq < nr_irqs) ? irq_desc + irq : NULL; + return (irq < NR_IRQS) ? irq_desc + irq : NULL; +} +static inline struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) +{ + return irq_to_desc(irq); } +#ifdef CONFIG_GENERIC_HARDIRQS +# define for_each_irq_desc(irq, desc) \ + for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) +# define for_each_irq_desc_reverse(irq, desc) \ + for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \ + irq >= 0; irq--, desc--) +#endif + +#else + +extern struct irq_desc *irq_to_desc(unsigned int irq); +extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu); +extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu); + +# define for_each_irq_desc(irq, desc) \ + for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; irq++, desc = irq_to_desc(irq)) +# define for_each_irq_desc_reverse(irq, desc) \ + for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; irq--, desc = irq_to_desc(irq)) + +#define kstat_irqs_this_cpu(DESC) \ + ((DESC)->kstat_irqs[smp_processor_id()]) +#define kstat_incr_irqs_this_cpu(irqno, DESC) \ + ((DESC)->kstat_irqs[smp_processor_id()]++) + +#endif + /* * Migration helpers for obsolete names, they will go away: */ @@ -380,6 +427,11 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) +#define get_irq_desc_chip(desc) ((desc)->chip) +#define get_irq_desc_chip_data(desc) ((desc)->chip_data) +#define get_irq_desc_data(desc) ((desc)->handler_data) +#define get_irq_desc_msi(desc) ((desc)->msi_desc) + #endif /* CONFIG_GENERIC_HARDIRQS */ #endif /* !CONFIG_S390 */ diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 452c280c811..7a299e989f8 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h @@ -7,18 +7,10 @@ # define for_each_irq_desc(irq, desc) \ for (irq = 0; irq < nr_irqs; irq++) -#else -extern int nr_irqs; -# define for_each_irq_desc(irq, desc) \ - for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) - -# define for_each_irq_desc_reverse(irq, desc) \ - for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \ - irq >= 0; irq--, desc--) +static inline early_sparse_irq_init(void) +{ +} #endif -#define for_each_irq_nr(irq) \ - for (irq = 0; irq < nr_irqs; irq++) - #endif diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 4a145caeee0..4ee4b3d2316 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -28,7 +28,9 @@ struct cpu_usage_stat { struct kernel_stat { struct cpu_usage_stat cpustat; - unsigned int irqs[NR_IRQS]; +#ifndef CONFIG_SPARSE_IRQ + unsigned int irqs[NR_IRQS]; +#endif }; DECLARE_PER_CPU(struct kernel_stat, kstat); @@ -39,6 +41,10 @@ DECLARE_PER_CPU(struct kernel_stat, kstat); extern unsigned long long nr_context_switches(void); +#ifndef CONFIG_SPARSE_IRQ +#define kstat_irqs_this_cpu(irq) \ + (kstat_this_cpu.irqs[irq]) + struct irq_desc; static inline void kstat_incr_irqs_this_cpu(unsigned int irq, @@ -46,11 +52,17 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, { kstat_this_cpu.irqs[irq]++; } +#endif + +#ifndef CONFIG_SPARSE_IRQ static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) { return kstat_cpu(cpu).irqs[irq]; } +#else +extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); +#endif /* * Number of interrupts per specific IRQ source, since bootup diff --git a/include/linux/random.h b/include/linux/random.h index 36f125c0c60..ad9daa2374d 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -44,6 +44,57 @@ struct rand_pool_info { extern void rand_initialize_irq(int irq); +struct timer_rand_state; +#ifndef CONFIG_SPARSE_IRQ + +extern struct timer_rand_state *irq_timer_state[]; + +extern int nr_irqs; +static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq) +{ + if (irq >= nr_irqs) + return NULL; + + return irq_timer_state[irq]; +} + +static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state) +{ + if (irq >= nr_irqs) + return; + + irq_timer_state[irq] = state; +} + +#else + +#include +static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + + if (!desc) + return NULL; + + return desc->timer_rand_state; +} + +static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state) +{ + struct irq_desc *desc; + + desc = irq_to_desc(irq); + + if (!desc) + return; + + desc->timer_rand_state = state; +} +#endif + + extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); extern void add_interrupt_randomness(int irq); diff --git a/init/main.c b/init/main.c index 7e117a231af..c1f999a3cf3 100644 --- a/init/main.c +++ b/init/main.c @@ -539,6 +539,15 @@ void __init __weak thread_info_cache_init(void) { } +void __init __weak arch_early_irq_init(void) +{ +} + +void __init __weak early_irq_init(void) +{ + arch_early_irq_init(); +} + asmlinkage void __init start_kernel(void) { char * command_line; @@ -603,6 +612,8 @@ asmlinkage void __init start_kernel(void) sort_main_extable(); trap_init(); rcu_init(); + /* init some links before init_ISA_irqs() */ + early_irq_init(); init_IRQ(); pidhash_init(); init_timers(); diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index cc0f7321b8c..650ce4102a6 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c @@ -40,6 +40,9 @@ unsigned long probe_irq_on(void) * flush such a longstanding irq before considering it as spurious. */ for_each_irq_desc_reverse(i, desc) { + if (!desc) + continue; + spin_lock_irq(&desc->lock); if (!desc->action && !(desc->status & IRQ_NOPROBE)) { /* @@ -68,6 +71,9 @@ unsigned long probe_irq_on(void) * happened in the previous stage, it may have masked itself) */ for_each_irq_desc_reverse(i, desc) { + if (!desc) + continue; + spin_lock_irq(&desc->lock); if (!desc->action && !(desc->status & IRQ_NOPROBE)) { desc->status |= IRQ_AUTODETECT | IRQ_WAITING; @@ -86,6 +92,9 @@ unsigned long probe_irq_on(void) * Now filter out any obviously spurious interrupts */ for_each_irq_desc(i, desc) { + if (!desc) + continue; + spin_lock_irq(&desc->lock); status = desc->status; @@ -124,6 +133,9 @@ unsigned int probe_irq_mask(unsigned long val) int i; for_each_irq_desc(i, desc) { + if (!desc) + continue; + spin_lock_irq(&desc->lock); status = desc->status; @@ -166,6 +178,9 @@ int probe_irq_off(unsigned long val) unsigned int status; for_each_irq_desc(i, desc) { + if (!desc) + continue; + spin_lock_irq(&desc->lock); status = desc->status; diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 10b5092e9bf..8e4fce4a1b1 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -24,9 +24,10 @@ */ void dynamic_irq_init(unsigned int irq) { - struct irq_desc *desc = irq_to_desc(irq); + struct irq_desc *desc; unsigned long flags; + desc = irq_to_desc(irq); if (!desc) { WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); return; diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index c815b42d0f5..96ca203eb51 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -15,9 +15,16 @@ #include #include #include +#include +#include #include "internals.h" +/* + * lockdep: we want to handle all irq_desc locks as a single lock-class: + */ +static struct lock_class_key irq_desc_lock_class; + /** * handle_bad_irq - handle spurious and unhandled irqs * @irq: the interrupt number @@ -49,6 +56,155 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc) int nr_irqs = NR_IRQS; EXPORT_SYMBOL_GPL(nr_irqs); +void __init __attribute__((weak)) arch_early_irq_init(void) +{ +} + +#ifdef CONFIG_SPARSE_IRQ +static struct irq_desc irq_desc_init = { + .irq = -1, + .status = IRQ_DISABLED, + .chip = &no_irq_chip, + .handle_irq = handle_bad_irq, + .depth = 1, + .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), +#ifdef CONFIG_SMP + .affinity = CPU_MASK_ALL +#endif +}; + +static void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) +{ + unsigned long bytes; + char *ptr; + int node; + + /* Compute how many bytes we need per irq and allocate them */ + bytes = nr * sizeof(unsigned int); + + node = cpu_to_node(cpu); + ptr = kzalloc_node(bytes, GFP_ATOMIC, node); + printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node); + + if (ptr) + desc->kstat_irqs = (unsigned int *)ptr; +} + +void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu) +{ +} + +static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) +{ + memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); + desc->irq = irq; +#ifdef CONFIG_SMP + desc->cpu = cpu; +#endif + lockdep_set_class(&desc->lock, &irq_desc_lock_class); + init_kstat_irqs(desc, cpu, nr_cpu_ids); + if (!desc->kstat_irqs) { + printk(KERN_ERR "can not alloc kstat_irqs\n"); + BUG_ON(1); + } + arch_init_chip_data(desc, cpu); +} + +/* + * Protect the sparse_irqs: + */ +static DEFINE_SPINLOCK(sparse_irq_lock); + +struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; + +static struct irq_desc irq_desc_legacy[16] __cacheline_aligned_in_smp = { + [0 ... 15] = { + .irq = -1, + .status = IRQ_DISABLED, + .chip = &no_irq_chip, + .handle_irq = handle_bad_irq, + .depth = 1, + .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), +#ifdef CONFIG_SMP + .affinity = CPU_MASK_ALL +#endif + } +}; + +/* FIXME: use bootmem alloc ...*/ +static unsigned int kstat_irqs_legacy[16][NR_CPUS]; + +void __init early_irq_init(void) +{ + struct irq_desc *desc; + int legacy_count; + int i; + + desc = irq_desc_legacy; + legacy_count = ARRAY_SIZE(irq_desc_legacy); + + for (i = 0; i < legacy_count; i++) { + desc[i].irq = i; + desc[i].kstat_irqs = kstat_irqs_legacy[i]; + + irq_desc_ptrs[i] = desc + i; + } + + for (i = legacy_count; i < NR_IRQS; i++) + irq_desc_ptrs[i] = NULL; + + arch_early_irq_init(); +} + +struct irq_desc *irq_to_desc(unsigned int irq) +{ + return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; +} + +struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) +{ + struct irq_desc *desc; + unsigned long flags; + int node; + + if (irq >= NR_IRQS) { + printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", + irq, NR_IRQS); + WARN_ON(1); + return NULL; + } + + desc = irq_desc_ptrs[irq]; + if (desc) + return desc; + + spin_lock_irqsave(&sparse_irq_lock, flags); + + /* We have to check it to avoid races with another CPU */ + desc = irq_desc_ptrs[irq]; + if (desc) + goto out_unlock; + + node = cpu_to_node(cpu); + desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); + printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", + irq, cpu, node); + if (!desc) { + printk(KERN_ERR "can not alloc irq_desc\n"); + BUG_ON(1); + } + init_one_irq_desc(irq, desc, cpu); + + irq_desc_ptrs[irq] = desc; + +out_unlock: + spin_unlock_irqrestore(&sparse_irq_lock, flags); + + return desc; +} + +#else + struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { [0 ... NR_IRQS-1] = { .status = IRQ_DISABLED, @@ -62,6 +218,8 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { } }; +#endif + /* * What should we do if we get a hw irq event on an illegal vector? * Each architecture has to answer this themself. @@ -261,17 +419,28 @@ out: #ifdef CONFIG_TRACE_IRQFLAGS -/* - * lockdep: we want to handle all irq_desc locks as a single lock-class: - */ -static struct lock_class_key irq_desc_lock_class; - void early_init_irq_lock_class(void) { +#ifndef CONFIG_SPARSE_IRQ struct irq_desc *desc; int i; - for_each_irq_desc(i, desc) + for_each_irq_desc(i, desc) { + if (!desc) + continue; + lockdep_set_class(&desc->lock, &irq_desc_lock_class); + } +#endif +} +#endif + +#ifdef CONFIG_SPARSE_IRQ +unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) +{ + struct irq_desc *desc = irq_to_desc(irq); + return desc->kstat_irqs[cpu]; } #endif +EXPORT_SYMBOL(kstat_irqs_cpu); + diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index d257e7d6a8a..f6b3440f05b 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -243,7 +243,11 @@ void init_irq_proc(void) /* * Create entries for all existing IRQs. */ - for_each_irq_desc(irq, desc) + for_each_irq_desc(irq, desc) { + if (!desc) + continue; + register_irq_proc(irq, desc); + } } diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index dd364c11e56..3738107531f 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -91,6 +91,9 @@ static int misrouted_irq(int irq) int i, ok = 0; for_each_irq_desc(i, desc) { + if (!desc) + continue; + if (!i) continue; @@ -112,6 +115,8 @@ static void poll_spurious_irqs(unsigned long dummy) for_each_irq_desc(i, desc) { unsigned int status; + if (!desc) + continue; if (!i) continue; -- cgit v1.2.3-70-g09d2 From 240d367b4e6c6e3c5075e034db14dba60a6f5fa7 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Mon, 8 Dec 2008 14:06:17 -0800 Subject: sparseirq: fix Alpha build failure Impact: build fix on Alpha -tip testing found this build failure on the Alpha defconfig: /home/mingo/tip/fs/proc/stat.c: In function 'show_stat': /home/mingo/tip/fs/proc/stat.c:48: error: implicit declaration of function 'for_each_irq_desc' /home/mingo/tip/fs/proc/stat.c:48: error: expected ';' before '{' token can not use irq_desc() in stat.c on older architectures. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- fs/proc/stat.c | 20 +++++++++++++------- include/linux/irq.h | 9 --------- include/linux/irqnr.h | 19 ++++++++++++++++--- 3 files changed, 29 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/proc/stat.c b/fs/proc/stat.c index a13431ab7c6..3cb9492801c 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -45,9 +45,12 @@ static int show_stat(struct seq_file *p, void *v) softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); - for_each_irq_desc(j, desc) { + for_each_irq_nr(j) { +#ifdef CONFIG_SPARSE_IRQ + desc = irq_to_desc(j); if (!desc) continue; +#endif sum += kstat_irqs_cpu(j, i); } sum += arch_irq_stat_cpu(i); @@ -92,14 +95,17 @@ static int show_stat(struct seq_file *p, void *v) seq_printf(p, "intr %llu", (unsigned long long)sum); /* sum again ? it could be updated? */ - for (j = 0; j < NR_IRQS; j++) { - desc = irq_to_desc(j); + for_each_irq_nr(j) { per_irq_sum = 0; - - if (desc) { - for_each_possible_cpu(i) - per_irq_sum += kstat_irqs_cpu(j, i); +#ifdef CONFIG_SPARSE_IRQ + desc = irq_to_desc(j); + if (!desc) { + seq_printf(p, " %u", per_irq_sum); + continue; } +#endif + for_each_possible_cpu(i) + per_irq_sum += kstat_irqs_cpu(j, i); seq_printf(p, " %u", per_irq_sum); } diff --git a/include/linux/irq.h b/include/linux/irq.h index 63b00439d4d..b5749db3e5a 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -198,7 +198,6 @@ extern void arch_init_copy_chip_data(struct irq_desc *old_desc, extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); #ifndef CONFIG_SPARSE_IRQ - extern struct irq_desc irq_desc[NR_IRQS]; static inline struct irq_desc *irq_to_desc(unsigned int irq) @@ -210,14 +209,6 @@ static inline struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) return irq_to_desc(irq); } -#ifdef CONFIG_GENERIC_HARDIRQS -# define for_each_irq_desc(irq, desc) \ - for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) -# define for_each_irq_desc_reverse(irq, desc) \ - for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \ - irq >= 0; irq--, desc--) -#endif - #else extern struct irq_desc *irq_to_desc(unsigned int irq); diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 7a299e989f8..13754f81358 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h @@ -8,9 +8,22 @@ # define for_each_irq_desc(irq, desc) \ for (irq = 0; irq < nr_irqs; irq++) -static inline early_sparse_irq_init(void) -{ -} +# define for_each_irq_desc_reverse(irq, desc) \ + for (irq = nr_irqs - 1; irq >= 0; irq--) +#else +#ifndef CONFIG_SPARSE_IRQ + +struct irq_desc; +extern int nr_irqs; +# define for_each_irq_desc(irq, desc) \ + for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++) +# define for_each_irq_desc_reverse(irq, desc) \ + for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \ + irq >= 0; irq--, desc--) #endif +#endif + +#define for_each_irq_nr(irq) \ + for (irq = 0; irq < nr_irqs; irq++) #endif -- cgit v1.2.3-70-g09d2 From e055f13a6d8448d4f23121b7b11340c3fb55cce6 Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Wed, 10 Dec 2008 11:51:54 +1100 Subject: [XFS] Remove unused tracing code None of this code appears to be used anywhere so remove it. Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_inode.c | 2 -- fs/xfs/xfs_inode.h | 1 - 2 files changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 063da344e18..f6b4c0f5bfd 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -3232,8 +3232,6 @@ corrupt_out: #ifdef XFS_ILOCK_TRACE -ktrace_t *xfs_ilock_trace_buf; - void xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra) { diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 558253e6fb4..64222e266df 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -223,7 +223,6 @@ struct xfs_dquot; #if defined(XFS_ILOCK_TRACE) #define XFS_ILOCK_KTRACE_SIZE 32 -extern ktrace_t *xfs_ilock_trace_buf; extern void xfs_ilock_trace(struct xfs_inode *, int, unsigned int, inst_t *); #else #define xfs_ilock_trace(i,n,f,ra) -- cgit v1.2.3-70-g09d2 From 85f334666a771680472722eee43ae0fc8730a619 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Tue, 9 Dec 2008 19:36:38 -0800 Subject: tracehook: exec double-reporting fix The patch 6341c39 "tracehook: exec" introduced a small regression in 2.6.27 regarding binfmt_misc exec event reporting. Since the reporting is now done in the common search_binary_handler() function, an exec of a misc binary will result in two (or possibly multiple) exec events being reported, instead of just a single one, because the misc handler contains a recursive call to search_binary_handler. To add to the confusion, if PTRACE_O_TRACEEXEC is not active, the multiple SIGTRAP signals will in fact cause only a single ptrace intercept, as the signals are not queued. However, if PTRACE_O_TRACEEXEC is on, the debugger will actually see multiple ptrace intercepts (PTRACE_EVENT_EXEC). The test program included below demonstrates the problem. This change fixes the bug by calling tracehook_report_exec() only in the outermost search_binary_handler() call (bprm->recursion_depth == 0). The additional change to restore bprm->recursion_depth after each binfmt load_binary call is actually superfluous for this bug, since we test the value saved on entry to search_binary_handler(). But it keeps the use of of the depth count to its most obvious expected meaning. Depending on what binfmt handlers do in certain cases, there could have been false-positive tests for recursion limits before this change. /* Test program using PTRACE_O_TRACEEXEC. This forks and exec's the first argument with the rest of the arguments, while ptrace'ing. It expects to see one PTRACE_EVENT_EXEC stop and then a successful exit, with no other signals or events in between. Test for kernel doing two PTRACE_EVENT_EXEC stops for a binfmt_misc exec: $ gcc -g traceexec.c -o traceexec $ sudo sh -c 'echo :test:M::foobar::/bin/cat: > /proc/sys/fs/binfmt_misc/register' $ echo 'foobar test' > ./foobar $ chmod +x ./foobar $ ./traceexec ./foobar; echo $? ==> good <== foobar test 0 $ ==> bad <== foobar test unexpected status 0x4057f != 0 3 $ */ #include #include #include #include #include #include #include static void wait_for (pid_t child, int expect) { int status; pid_t p = wait (&status); if (p != child) { perror ("wait"); exit (2); } if (status != expect) { fprintf (stderr, "unexpected status %#x != %#x\n", status, expect); exit (3); } } int main (int argc, char **argv) { pid_t child = fork (); if (child < 0) { perror ("fork"); return 127; } else if (child == 0) { ptrace (PTRACE_TRACEME); raise (SIGUSR1); execv (argv[1], &argv[1]); perror ("execve"); _exit (127); } wait_for (child, W_STOPCODE (SIGUSR1)); if (ptrace (PTRACE_SETOPTIONS, child, 0L, (void *) (long) PTRACE_O_TRACEEXEC) != 0) { perror ("PTRACE_SETOPTIONS"); return 4; } if (ptrace (PTRACE_CONT, child, 0L, 0L) != 0) { perror ("PTRACE_CONT"); return 5; } wait_for (child, W_STOPCODE (SIGTRAP | (PTRACE_EVENT_EXEC << 8))); if (ptrace (PTRACE_CONT, child, 0L, 0L) != 0) { perror ("PTRACE_CONT"); return 6; } wait_for (child, W_EXITCODE (0, 0)); return 0; } Reported-by: Arnd Bergmann CC: Ulrich Weigand Signed-off-by: Roland McGrath --- fs/exec.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index 4e834f16d9d..ec5df9a3831 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1159,6 +1159,7 @@ EXPORT_SYMBOL(remove_arg_zero); */ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) { + unsigned int depth = bprm->recursion_depth; int try,retval; struct linux_binfmt *fmt; #ifdef __alpha__ @@ -1219,8 +1220,15 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) continue; read_unlock(&binfmt_lock); retval = fn(bprm, regs); + /* + * Restore the depth counter to its starting value + * in this call, so we don't have to rely on every + * load_binary function to restore it on return. + */ + bprm->recursion_depth = depth; if (retval >= 0) { - tracehook_report_exec(fmt, bprm, regs); + if (depth == 0) + tracehook_report_exec(fmt, bprm, regs); put_binfmt(fmt); allow_write_access(bprm->file); if (bprm->file) -- cgit v1.2.3-70-g09d2 From 71c5576fbd809f2015f4eddf72e501e298720cf3 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 9 Dec 2008 13:14:13 -0800 Subject: revert "percpu counter: clean up percpu_counter_sum_and_set()" Revert commit 1f7c14c62ce63805f9574664a6c6de3633d4a354 Author: Mingming Cao Date: Thu Oct 9 12:50:59 2008 -0400 percpu counter: clean up percpu_counter_sum_and_set() Before this patch we had the following: percpu_counter_sum(): return the percpu_counter's value percpu_counter_sum_and_set(): return the percpu_counter's value, copying that value into the central value and zeroing the per-cpu counters before returning. After this patch, percpu_counter_sum_and_set() has gone, and percpu_counter_sum() gets the old percpu_counter_sum_and_set() functionality. Problem is, as Eric points out, the old percpu_counter_sum_and_set() functionality was racy and wrong. It zeroes out counters on "other" cpus, without holding any locks which will prevent races agaist updates from those other CPUS. This patch reverts 1f7c14c62ce63805f9574664a6c6de3633d4a354. This means that percpu_counter_sum_and_set() still has the race, but percpu_counter_sum() does not. Note that this is not a simple revert - ext4 has since started using percpu_counter_sum() for its dirty_blocks counter as well. Note that this revert patch changes percpu_counter_sum() semantics. Before the patch, a call to percpu_counter_sum() will bring the counter's central counter mostly up-to-date, so a following percpu_counter_read() will return a close value. After this patch, a call to percpu_counter_sum() will leave the counter's central accumulator unaltered, so a subsequent call to percpu_counter_read() can now return a significantly inaccurate result. If there is any code in the tree which was introduced after e8ced39d5e8911c662d4d69a342b9d053eaaac4e was merged, and which depends upon the new percpu_counter_sum() semantics, that code will break. Reported-by: Eric Dumazet Cc: "David S. Miller" Cc: Peter Zijlstra Cc: Mingming Cao Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ext4/balloc.c | 4 ++-- include/linux/percpu_counter.h | 12 +++++++++--- lib/percpu_counter.c | 8 +++++--- 3 files changed, 16 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index d2003cdc36a..c17f69bcd7d 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -609,8 +609,8 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) if (free_blocks - (nblocks + root_blocks + dirty_blocks) < EXT4_FREEBLOCKS_WATERMARK) { - free_blocks = percpu_counter_sum(fbc); - dirty_blocks = percpu_counter_sum(dbc); + free_blocks = percpu_counter_sum_and_set(fbc); + dirty_blocks = percpu_counter_sum_and_set(dbc); if (dirty_blocks < 0) { printk(KERN_CRIT "Dirty block accounting " "went wrong %lld\n", diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 9007ccdfc11..20838883535 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount); void percpu_counter_destroy(struct percpu_counter *fbc); void percpu_counter_set(struct percpu_counter *fbc, s64 amount); void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); -s64 __percpu_counter_sum(struct percpu_counter *fbc); +s64 __percpu_counter_sum(struct percpu_counter *fbc, int set); static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { @@ -44,13 +44,19 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) { - s64 ret = __percpu_counter_sum(fbc); + s64 ret = __percpu_counter_sum(fbc, 0); return ret < 0 ? 0 : ret; } +static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc) +{ + return __percpu_counter_sum(fbc, 1); +} + + static inline s64 percpu_counter_sum(struct percpu_counter *fbc) { - return __percpu_counter_sum(fbc); + return __percpu_counter_sum(fbc, 0); } static inline s64 percpu_counter_read(struct percpu_counter *fbc) diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 71b265c330c..dba1530a5b2 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add); * Add up all the per-cpu counts, return the result. This is a more accurate * but much slower version of percpu_counter_read_positive() */ -s64 __percpu_counter_sum(struct percpu_counter *fbc) +s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) { s64 ret; int cpu; @@ -62,9 +62,11 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) for_each_online_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); ret += *pcount; - *pcount = 0; + if (set) + *pcount = 0; } - fbc->count = ret; + if (set) + fbc->count = ret; spin_unlock(&fbc->lock); return ret; -- cgit v1.2.3-70-g09d2 From 02d211688727ad02bb4555b1aa8ae2de16b21b39 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 9 Dec 2008 13:14:14 -0800 Subject: revert "percpu_counter: new function percpu_counter_sum_and_set" Revert commit e8ced39d5e8911c662d4d69a342b9d053eaaac4e Author: Mingming Cao Date: Fri Jul 11 19:27:31 2008 -0400 percpu_counter: new function percpu_counter_sum_and_set As described in revert "percpu counter: clean up percpu_counter_sum_and_set()" the new percpu_counter_sum_and_set() is racy against updates to the cpu-local accumulators on other CPUs. Revert that change. This means that ext4 will be slow again. But correct. Reported-by: Eric Dumazet Cc: "David S. Miller" Cc: Peter Zijlstra Cc: Mingming Cao Cc: Cc: [2.6.27.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ext4/balloc.c | 4 ++-- include/linux/percpu_counter.h | 12 +++--------- lib/percpu_counter.c | 7 +------ 3 files changed, 6 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index c17f69bcd7d..db35cfdb3c8 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -609,8 +609,8 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) if (free_blocks - (nblocks + root_blocks + dirty_blocks) < EXT4_FREEBLOCKS_WATERMARK) { - free_blocks = percpu_counter_sum_and_set(fbc); - dirty_blocks = percpu_counter_sum_and_set(dbc); + free_blocks = percpu_counter_sum_positive(fbc); + dirty_blocks = percpu_counter_sum_positive(dbc); if (dirty_blocks < 0) { printk(KERN_CRIT "Dirty block accounting " "went wrong %lld\n", diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 20838883535..9007ccdfc11 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount); void percpu_counter_destroy(struct percpu_counter *fbc); void percpu_counter_set(struct percpu_counter *fbc, s64 amount); void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); -s64 __percpu_counter_sum(struct percpu_counter *fbc, int set); +s64 __percpu_counter_sum(struct percpu_counter *fbc); static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) { @@ -44,19 +44,13 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) { - s64 ret = __percpu_counter_sum(fbc, 0); + s64 ret = __percpu_counter_sum(fbc); return ret < 0 ? 0 : ret; } -static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc) -{ - return __percpu_counter_sum(fbc, 1); -} - - static inline s64 percpu_counter_sum(struct percpu_counter *fbc) { - return __percpu_counter_sum(fbc, 0); + return __percpu_counter_sum(fbc); } static inline s64 percpu_counter_read(struct percpu_counter *fbc) diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index dba1530a5b2..b255b939bc1 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add); * Add up all the per-cpu counts, return the result. This is a more accurate * but much slower version of percpu_counter_read_positive() */ -s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) +s64 __percpu_counter_sum(struct percpu_counter *fbc) { s64 ret; int cpu; @@ -62,12 +62,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) for_each_online_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); ret += *pcount; - if (set) - *pcount = 0; } - if (set) - fbc->count = ret; - spin_unlock(&fbc->lock); return ret; } -- cgit v1.2.3-70-g09d2 From 49c50342c728344b79c8f9e8293637fe80ef5ad5 Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Tue, 9 Dec 2008 13:14:21 -0800 Subject: pagemap: fix 32-bit pagemap regression The large pages fix from bcf8039ed45 broke 32-bit pagemap by pulling the pagemap entry code out into a function with the wrong return type. Pagemap entries are 64 bits on all systems and unsigned long is only 32 bits on 32-bit systems. Signed-off-by: Matt Mackall Reported-by: Doug Graham Cc: Alexey Dobriyan Cc: Dave Hansen Cc: [2.6.26.x, 2.6.27.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index b770c095e45..3a8bdd7f575 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -557,9 +557,9 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte) return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); } -static unsigned long pte_to_pagemap_entry(pte_t pte) +static u64 pte_to_pagemap_entry(pte_t pte) { - unsigned long pme = 0; + u64 pme = 0; if (is_swap_pte(pte)) pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte)) | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; -- cgit v1.2.3-70-g09d2 From 6ee5a399d6a92a52646836a6e10faf255c16393e Mon Sep 17 00:00:00 2001 From: Dmitri Monakhov Date: Tue, 9 Dec 2008 13:14:26 -0800 Subject: inotify: fix IN_ONESHOT unmount event watcher On umount two event will be dispatched to watcher: 1: inotify_dev_queue_event(.., IN_UNMOUNT,..) 2: remove_watch(watch, dev) ->inotify_dev_queue_event(.., IN_IGNORED, ..) But if watcher has IN_ONESHOT bit set then the watcher will be released inside first event. Which result in accessing invalid object later. IMHO it is not pure regression. This bug wasn't triggered while initial inotify interface testing phase because of another bug in IN_ONESHOT handling logic :) commit ac74c00e499ed276a965e5b5600667d5dc04a84a Author: Ulisses Furquim Date: Fri Feb 8 04:18:16 2008 -0800 inotify: fix check for one-shot watches before destroying them As the IN_ONESHOT bit is never set when an event is sent we must check it in the watch's mask and not in the event's mask. TESTCASE: mkdir mnt mount -ttmpfs none mnt mkdir mnt/d ./inotify mnt/d& umount mnt ## << lockup or crash here TESTSOURCE: /* gcc -oinotify inotify.c */ #include #include #include int main(int argc, char **argv) { char buf[1024]; struct inotify_event *ie; char *p; int i; ssize_t l; p = argv[1]; i = inotify_init(); inotify_add_watch(i, p, ~0); l = read(i, buf, sizeof(buf)); printf("read %d bytes\n", l); ie = (struct inotify_event *) buf; printf("event mask: %d\n", ie->mask); return 0; } Signed-off-by: Dmitri Monakhov Cc: John McCutchan Cc: Al Viro Cc: Robert Love Cc: Ulisses Furquim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/inotify.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/inotify.c b/fs/inotify.c index 7bbed1b8982..dae3f28f30d 100644 --- a/fs/inotify.c +++ b/fs/inotify.c @@ -428,11 +428,13 @@ void inotify_unmount_inodes(struct list_head *list) watches = &inode->inotify_watches; list_for_each_entry_safe(watch, next_w, watches, i_list) { struct inotify_handle *ih= watch->ih; + get_inotify_watch(watch); mutex_lock(&ih->mutex); ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0, NULL, NULL); inotify_remove_watch_locked(ih, watch); mutex_unlock(&ih->mutex); + put_inotify_watch(watch); } mutex_unlock(&inode->inotify_mutex); iput(inode); -- cgit v1.2.3-70-g09d2 From 9c24624727f6d6c460e45762a408ca5f5b9b8ef2 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 9 Dec 2008 13:14:27 -0800 Subject: KSYM_SYMBOL_LEN fixes Miles Lane tailing /sys files hit a BUG which Pekka Enberg has tracked to my 966c8c12dc9e77f931e2281ba25d2f0244b06949 sprint_symbol(): use less stack exposing a bug in slub's list_locations() - kallsyms_lookup() writes a 0 to namebuf[KSYM_NAME_LEN-1], but that was beyond the end of page provided. The 100 slop which list_locations() allows at end of page looks roughly enough for all the other stuff it might print after the symbol before it checks again: break out KSYM_SYMBOL_LEN earlier than before. Latencytop and ftrace and are using KSYM_NAME_LEN buffers where they need KSYM_SYMBOL_LEN buffers, and vmallocinfo a 2*KSYM_NAME_LEN buffer where it wants a KSYM_SYMBOL_LEN buffer: fix those before anyone copies them. [akpm@linux-foundation.org: ftrace.h needs module.h] Signed-off-by: Hugh Dickins Cc: Christoph Lameter Cc Miles Lane Acked-by: Pekka Enberg Acked-by: Steven Rostedt Acked-by: Frederic Weisbecker Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/base.c | 2 +- include/linux/ftrace.h | 3 ++- kernel/latencytop.c | 2 +- mm/slub.c | 2 +- mm/vmalloc.c | 2 +- 5 files changed, 6 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/proc/base.c b/fs/proc/base.c index 486cf3fe713..d4677603c88 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -371,7 +371,7 @@ static int lstats_show_proc(struct seq_file *m, void *v) task->latency_record[i].time, task->latency_record[i].max); for (q = 0; q < LT_BACKTRACEDEPTH; q++) { - char sym[KSYM_NAME_LEN]; + char sym[KSYM_SYMBOL_LEN]; char *c; if (!task->latency_record[i].backtrace[q]) break; diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 703eb53cfa2..9c5bc6be2b0 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #ifdef CONFIG_FUNCTION_TRACER @@ -231,7 +232,7 @@ ftrace_init_module(unsigned long *start, unsigned long *end) { } struct boot_trace { pid_t caller; - char func[KSYM_NAME_LEN]; + char func[KSYM_SYMBOL_LEN]; int result; unsigned long long duration; /* usecs */ ktime_t calltime; diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 5e7b45c5692..449db466bdb 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c @@ -191,7 +191,7 @@ static int lstats_show(struct seq_file *m, void *v) latency_record[i].time, latency_record[i].max); for (q = 0; q < LT_BACKTRACEDEPTH; q++) { - char sym[KSYM_NAME_LEN]; + char sym[KSYM_SYMBOL_LEN]; char *c; if (!latency_record[i].backtrace[q]) break; diff --git a/mm/slub.c b/mm/slub.c index 749588a50a5..a2cd47d89e0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3597,7 +3597,7 @@ static int list_locations(struct kmem_cache *s, char *buf, for (i = 0; i < t.count; i++) { struct location *l = &t.loc[i]; - if (len > PAGE_SIZE - 100) + if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) break; len += sprintf(buf + len, "%7ld ", l->count); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index f3f6e075856..1ddb77ba399 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1717,7 +1717,7 @@ static int s_show(struct seq_file *m, void *p) v->addr, v->addr + v->size, v->size); if (v->caller) { - char buff[2 * KSYM_NAME_LEN]; + char buff[KSYM_SYMBOL_LEN]; seq_putc(m, ' '); sprint_symbol(buff, (unsigned long)v->caller); -- cgit v1.2.3-70-g09d2 From 15ac08a8b2c129abccf1be47b6ab09491e013db2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 9 Dec 2008 04:47:30 -0500 Subject: [XFS] replace b_fspriv with b_mount Replace the b_fspriv pointer and it's ugly accessors with a properly types xfs_mount pointer. Also switch log reocvery over to it instead of using b_fspriv for the mount pointer. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_buf.c | 4 ++-- fs/xfs/linux-2.6/xfs_buf.h | 4 +--- fs/xfs/linux-2.6/xfs_lrw.c | 11 ++++------- fs/xfs/xfs_buf_item.c | 4 ++-- fs/xfs/xfs_log_recover.c | 26 +++++++++----------------- fs/xfs/xfs_rw.c | 2 +- 6 files changed, 19 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index cd89c56715a..b9c304a629e 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1085,7 +1085,7 @@ xfs_bawrite( bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD); bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES); - bp->b_fspriv3 = mp; + bp->b_mount = mp; bp->b_strat = xfs_bdstrat_cb; return xfs_bdstrat_cb(bp); } @@ -1098,7 +1098,7 @@ xfs_bdwrite( XB_TRACE(bp, "bdwrite", 0); bp->b_strat = xfs_bdstrat_cb; - bp->b_fspriv3 = mp; + bp->b_mount = mp; bp->b_flags &= ~XBF_READ; bp->b_flags |= (XBF_DELWRI | XBF_ASYNC); diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 0e2aa16f5e4..af8764e02d7 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -168,7 +168,7 @@ typedef struct xfs_buf { struct completion b_iowait; /* queue for I/O waiters */ void *b_fspriv; void *b_fspriv2; - void *b_fspriv3; + struct xfs_mount *b_mount; unsigned short b_error; /* error code on I/O */ unsigned int b_page_count; /* size of page array */ unsigned int b_offset; /* page offset in first page */ @@ -335,8 +335,6 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); #define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val)) #define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2) #define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val)) -#define XFS_BUF_FSPRIVATE3(bp, type) ((type)(bp)->b_fspriv3) -#define XFS_BUF_SET_FSPRIVATE3(bp, val) ((bp)->b_fspriv3 = (void*)(val)) #define XFS_BUF_SET_START(bp) do { } while (0) #define XFS_BUF_SET_BRELSE_FUNC(bp, func) ((bp)->b_relse = (func)) diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 59b7d5f9e64..92ce34b3787 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -847,13 +847,7 @@ retry: int xfs_bdstrat_cb(struct xfs_buf *bp) { - xfs_mount_t *mp; - - mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *); - if (!XFS_FORCED_SHUTDOWN(mp)) { - xfs_buf_iorequest(bp); - return 0; - } else { + if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { xfs_buftrace("XFS__BDSTRAT IOERROR", bp); /* * Metadata write that didn't get logged but @@ -866,6 +860,9 @@ xfs_bdstrat_cb(struct xfs_buf *bp) else return (xfs_bioerror(bp)); } + + xfs_buf_iorequest(bp); + return 0; } /* diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index d245d04e10c..d74ce113426 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -707,8 +707,8 @@ xfs_buf_item_init( * the first. If we do already have one, there is * nothing to do here so return. */ - if (XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *) != mp) - XFS_BUF_SET_FSPRIVATE3(bp, mp); + if (bp->b_mount != mp) + bp->b_mount = mp; XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb); if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 51412cced01..35cca98bd94 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -267,21 +267,16 @@ STATIC void xlog_recover_iodone( struct xfs_buf *bp) { - xfs_mount_t *mp; - - ASSERT(XFS_BUF_FSPRIVATE(bp, void *)); - if (XFS_BUF_GETERROR(bp)) { /* * We're not going to bother about retrying * this during recovery. One strike! */ - mp = XFS_BUF_FSPRIVATE(bp, xfs_mount_t *); xfs_ioerror_alert("xlog_recover_iodone", - mp, bp, XFS_BUF_ADDR(bp)); - xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); + bp->b_mount, bp, XFS_BUF_ADDR(bp)); + xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR); } - XFS_BUF_SET_FSPRIVATE(bp, NULL); + bp->b_mount = NULL; XFS_BUF_CLR_IODONE_FUNC(bp); xfs_biodone(bp); } @@ -2225,9 +2220,8 @@ xlog_recover_do_buffer_trans( XFS_BUF_STALE(bp); error = xfs_bwrite(mp, bp); } else { - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL || - XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp); - XFS_BUF_SET_FSPRIVATE(bp, mp); + ASSERT(bp->b_mount == NULL || bp->b_mount == mp); + bp->b_mount = mp; XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); xfs_bdwrite(mp, bp); } @@ -2490,9 +2484,8 @@ xlog_recover_do_inode_trans( write_inode_buffer: if (ITEM_TYPE(item) == XFS_LI_INODE) { - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL || - XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp); - XFS_BUF_SET_FSPRIVATE(bp, mp); + ASSERT(bp->b_mount == NULL || bp->b_mount == mp); + bp->b_mount = mp; XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); xfs_bdwrite(mp, bp); } else { @@ -2623,9 +2616,8 @@ xlog_recover_do_dquot_trans( memcpy(ddq, recddq, item->ri_buf[1].i_len); ASSERT(dq_f->qlf_size == 2); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL || - XFS_BUF_FSPRIVATE(bp, xfs_mount_t *) == mp); - XFS_BUF_SET_FSPRIVATE(bp, mp); + ASSERT(bp->b_mount == NULL || bp->b_mount == mp); + bp->b_mount = mp; XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); xfs_bdwrite(mp, bp); diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c index 3a82576dde9..36f3a21c54d 100644 --- a/fs/xfs/xfs_rw.c +++ b/fs/xfs/xfs_rw.c @@ -406,7 +406,7 @@ xfs_bwrite( * XXXsup how does this work for quotas. */ XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb); - XFS_BUF_SET_FSPRIVATE3(bp, mp); + bp->b_mount = mp; XFS_BUF_WRITE(bp); if ((error = XFS_bwrite(bp))) { -- cgit v1.2.3-70-g09d2 From 2175dd95741bda5f438e4efe388a8c1bb5abf1cc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 9 Dec 2008 04:47:31 -0500 Subject: [XFS] simplify projid check in xfs_rename Check for the project ID after attaching all inodes to the transaction. That way the unlock in the error case is done by the transaction subsystem, which guaratees that is uses the right flags (which was wrong from day one of this check), and avoids having special code unlocking an array of inodes with potential duplicates. Attaching the inode first is the method used by xfs_rename and the other namespace methods all other error that require multiple locked inodes. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_rename.c | 49 +++++++++++-------------------------------------- 1 file changed, 11 insertions(+), 38 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c index 10642fcbb1f..86471bb40fd 100644 --- a/fs/xfs/xfs_rename.c +++ b/fs/xfs/xfs_rename.c @@ -41,31 +41,6 @@ #include "xfs_vnodeops.h" -/* - * Given an array of up to 4 inode pointers, unlock the pointed to inodes. - * If there are fewer than 4 entries in the array, the empty entries will - * be at the end and will have NULL pointers in them. - */ -STATIC void -xfs_rename_unlock4( - xfs_inode_t **i_tab, - uint lock_mode) -{ - int i; - - xfs_iunlock(i_tab[0], lock_mode); - for (i = 1; i < 4; i++) { - if (i_tab[i] == NULL) - break; - - /* - * Watch out for duplicate entries in the table. - */ - if (i_tab[i] != i_tab[i-1]) - xfs_iunlock(i_tab[i], lock_mode); - } -} - /* * Enter all inodes for a rename transaction into a sorted array. */ @@ -204,19 +179,6 @@ xfs_rename( */ xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL); - /* - * If we are using project inheritance, we only allow renames - * into our tree when the project IDs are the same; else the - * tree quota mechanism would be circumvented. - */ - if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && - (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) { - error = XFS_ERROR(EXDEV); - xfs_rename_unlock4(inodes, XFS_ILOCK_EXCL); - xfs_trans_cancel(tp, cancel_flags); - goto std_return; - } - /* * Join all the inodes to the transaction. From this point on, * we can rely on either trans_commit or trans_cancel to unlock @@ -241,6 +203,17 @@ xfs_rename( xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); } + /* + * If we are using project inheritance, we only allow renames + * into our tree when the project IDs are the same; else the + * tree quota mechanism would be circumvented. + */ + if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && + (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) { + error = XFS_ERROR(EXDEV); + goto error_return; + } + /* * Set up the target. */ -- cgit v1.2.3-70-g09d2 From 6d73cf133c5477f7038577bfeda603ce9946f8cb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 9 Dec 2008 04:47:32 -0500 Subject: [XFS] resync headers with libxfs - xfs_sb.h add the XFS_SB_VERSION2_PARENTBIT features2 that has been around in userspace for some time - xfs_inode.h: move a few things out of __KERNEL__ that are needed by userspace - xfs_mount.h: only include xfs_sync.h under __KERNEL__ - xfs_inode.c: minor whitespace fixup. I accidentaly changes this when importing this file for use by userspace. Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_inode.c | 2 +- fs/xfs/xfs_inode.h | 16 ++++++++-------- fs/xfs/xfs_mount.h | 4 ++-- fs/xfs/xfs_sb.h | 1 + 4 files changed, 12 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index f6b4c0f5bfd..3adb868df18 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -870,7 +870,7 @@ xfs_iread( * around for a while. This helps to keep recently accessed * meta-data in-core longer. */ - XFS_BUF_SET_REF(bp, XFS_INO_REF); + XFS_BUF_SET_REF(bp, XFS_INO_REF); /* * Use xfs_trans_brelse() to release the buffer containing the diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 64222e266df..f0e4d79833e 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -481,12 +481,6 @@ static inline void xfs_ifunlock(xfs_inode_t *ip) (((pip)->i_mount->m_flags & XFS_MOUNT_GRPID) || \ ((pip)->i_d.di_mode & S_ISGID)) -/* - * Flags for xfs_iget() - */ -#define XFS_IGET_CREATE 0x1 -#define XFS_IGET_BULKSTAT 0x2 - /* * xfs_iget.c prototypes. */ @@ -508,8 +502,6 @@ void xfs_ireclaim(xfs_inode_t *); /* * xfs_inode.c prototypes. */ -int xfs_iread(struct xfs_mount *, struct xfs_trans *, - struct xfs_inode *, xfs_daddr_t, uint); int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, mode_t, xfs_nlink_t, xfs_dev_t, struct cred *, xfs_prid_t, int, struct xfs_buf **, boolean_t *, xfs_inode_t **); @@ -582,12 +574,20 @@ do { \ #endif /* __KERNEL__ */ +/* + * Flags for xfs_iget() + */ +#define XFS_IGET_CREATE 0x1 +#define XFS_IGET_BULKSTAT 0x2 + int xfs_inotobp(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, struct xfs_dinode **, struct xfs_buf **, int *, uint); int xfs_itobp(struct xfs_mount *, struct xfs_trans *, struct xfs_inode *, struct xfs_dinode **, struct xfs_buf **, uint); +int xfs_iread(struct xfs_mount *, struct xfs_trans *, + struct xfs_inode *, xfs_daddr_t, uint); void xfs_dinode_from_disk(struct xfs_icdinode *, struct xfs_dinode *); void xfs_dinode_to_disk(struct xfs_dinode *, diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index ae5da88ace2..c1e02846732 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -18,8 +18,6 @@ #ifndef __XFS_MOUNT_H__ #define __XFS_MOUNT_H__ -#include "xfs_sync.h" - typedef struct xfs_trans_reservations { uint tr_write; /* extent alloc trans */ uint tr_itruncate; /* truncate trans */ @@ -53,6 +51,8 @@ typedef struct xfs_trans_reservations { #else /* __KERNEL__ */ +#include "xfs_sync.h" + struct cred; struct log; struct xfs_mount_args; diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h index e123c1499b4..1ed71916e4c 100644 --- a/fs/xfs/xfs_sb.h +++ b/fs/xfs/xfs_sb.h @@ -79,6 +79,7 @@ struct xfs_mount; #define XFS_SB_VERSION2_LAZYSBCOUNTBIT 0x00000002 /* Superblk counters */ #define XFS_SB_VERSION2_RESERVED4BIT 0x00000004 #define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */ +#define XFS_SB_VERSION2_PARENTBIT 0x00000010 /* parent pointers */ #define XFS_SB_VERSION2_OKREALFBITS \ (XFS_SB_VERSION2_LAZYSBCOUNTBIT | \ -- cgit v1.2.3-70-g09d2 From 4d4be482a4d78ca906f45e99fd9fdb91e907f5ad Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 9 Dec 2008 04:47:33 -0500 Subject: [XFS] add a FMODE flag to make XFS invisible I/O less hacky XFS has a mode called invisble I/O that doesn't update any of the timestamps. It's used for HSM-style applications and exposed through the nasty open by handle ioctl. Instead of doing directly assignment of file operations that set an internal flag for it add a new FMODE_NOCMTIME flag that we can check in the normal file operations. (addition of the generic VFS flag has been ACKed by Al as an interims solution) Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_file.c | 145 ++++++----------------------------------- fs/xfs/linux-2.6/xfs_ioctl.c | 29 ++++++--- fs/xfs/linux-2.6/xfs_ioctl.h | 8 +-- fs/xfs/linux-2.6/xfs_ioctl32.c | 56 ++++++---------- fs/xfs/linux-2.6/xfs_iops.h | 1 - fs/xfs/xfs_vnodeops.h | 2 - include/linux/fs.h | 8 +++ 7 files changed, 71 insertions(+), 178 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index f999d20a429..a0c45cc8a6b 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -45,80 +45,44 @@ static struct vm_operations_struct xfs_file_vm_ops; -STATIC_INLINE ssize_t -__xfs_file_read( +STATIC ssize_t +xfs_file_aio_read( struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, - int ioflags, loff_t pos) { struct file *file = iocb->ki_filp; + int ioflags = IO_ISAIO; BUG_ON(iocb->ki_pos != pos); if (unlikely(file->f_flags & O_DIRECT)) ioflags |= IO_ISDIRECT; + if (file->f_mode & FMODE_NOCMTIME) + ioflags |= IO_INVIS; return xfs_read(XFS_I(file->f_path.dentry->d_inode), iocb, iov, nr_segs, &iocb->ki_pos, ioflags); } STATIC ssize_t -xfs_file_aio_read( - struct kiocb *iocb, - const struct iovec *iov, - unsigned long nr_segs, - loff_t pos) -{ - return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO, pos); -} - -STATIC ssize_t -xfs_file_aio_read_invis( - struct kiocb *iocb, - const struct iovec *iov, - unsigned long nr_segs, - loff_t pos) -{ - return __xfs_file_read(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos); -} - -STATIC_INLINE ssize_t -__xfs_file_write( +xfs_file_aio_write( struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, - int ioflags, loff_t pos) { - struct file *file = iocb->ki_filp; + struct file *file = iocb->ki_filp; + int ioflags = IO_ISAIO; BUG_ON(iocb->ki_pos != pos); if (unlikely(file->f_flags & O_DIRECT)) ioflags |= IO_ISDIRECT; + if (file->f_mode & FMODE_NOCMTIME) + ioflags |= IO_INVIS; return xfs_write(XFS_I(file->f_mapping->host), iocb, iov, nr_segs, &iocb->ki_pos, ioflags); } -STATIC ssize_t -xfs_file_aio_write( - struct kiocb *iocb, - const struct iovec *iov, - unsigned long nr_segs, - loff_t pos) -{ - return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO, pos); -} - -STATIC ssize_t -xfs_file_aio_write_invis( - struct kiocb *iocb, - const struct iovec *iov, - unsigned long nr_segs, - loff_t pos) -{ - return __xfs_file_write(iocb, iov, nr_segs, IO_ISAIO|IO_INVIS, pos); -} - STATIC ssize_t xfs_file_splice_read( struct file *infilp, @@ -127,20 +91,13 @@ xfs_file_splice_read( size_t len, unsigned int flags) { - return xfs_splice_read(XFS_I(infilp->f_path.dentry->d_inode), - infilp, ppos, pipe, len, flags, 0); -} + int ioflags = 0; + + if (infilp->f_mode & FMODE_NOCMTIME) + ioflags |= IO_INVIS; -STATIC ssize_t -xfs_file_splice_read_invis( - struct file *infilp, - loff_t *ppos, - struct pipe_inode_info *pipe, - size_t len, - unsigned int flags) -{ return xfs_splice_read(XFS_I(infilp->f_path.dentry->d_inode), - infilp, ppos, pipe, len, flags, IO_INVIS); + infilp, ppos, pipe, len, flags, ioflags); } STATIC ssize_t @@ -151,20 +108,13 @@ xfs_file_splice_write( size_t len, unsigned int flags) { - return xfs_splice_write(XFS_I(outfilp->f_path.dentry->d_inode), - pipe, outfilp, ppos, len, flags, 0); -} + int ioflags = 0; + + if (outfilp->f_mode & FMODE_NOCMTIME) + ioflags |= IO_INVIS; -STATIC ssize_t -xfs_file_splice_write_invis( - struct pipe_inode_info *pipe, - struct file *outfilp, - loff_t *ppos, - size_t len, - unsigned int flags) -{ return xfs_splice_write(XFS_I(outfilp->f_path.dentry->d_inode), - pipe, outfilp, ppos, len, flags, IO_INVIS); + pipe, outfilp, ppos, len, flags, ioflags); } STATIC int @@ -275,42 +225,6 @@ xfs_file_mmap( return 0; } -STATIC long -xfs_file_ioctl( - struct file *filp, - unsigned int cmd, - unsigned long p) -{ - struct inode *inode = filp->f_path.dentry->d_inode; - - - /* NOTE: some of the ioctl's return positive #'s as a - * byte count indicating success, such as - * readlink_by_handle. So we don't "sign flip" - * like most other routines. This means true - * errors need to be returned as a negative value. - */ - return xfs_ioctl(XFS_I(inode), filp, 0, cmd, (void __user *)p); -} - -STATIC long -xfs_file_ioctl_invis( - struct file *filp, - unsigned int cmd, - unsigned long p) -{ - struct inode *inode = filp->f_path.dentry->d_inode; - - - /* NOTE: some of the ioctl's return positive #'s as a - * byte count indicating success, such as - * readlink_by_handle. So we don't "sign flip" - * like most other routines. This means true - * errors need to be returned as a negative value. - */ - return xfs_ioctl(XFS_I(inode), filp, IO_INVIS, cmd, (void __user *)p); -} - /* * mmap()d file has taken write protection fault and is being made * writable. We can set the page state up correctly for a writable @@ -346,25 +260,6 @@ const struct file_operations xfs_file_operations = { #endif }; -const struct file_operations xfs_invis_file_operations = { - .llseek = generic_file_llseek, - .read = do_sync_read, - .write = do_sync_write, - .aio_read = xfs_file_aio_read_invis, - .aio_write = xfs_file_aio_write_invis, - .splice_read = xfs_file_splice_read_invis, - .splice_write = xfs_file_splice_write_invis, - .unlocked_ioctl = xfs_file_ioctl_invis, -#ifdef CONFIG_COMPAT - .compat_ioctl = xfs_file_compat_invis_ioctl, -#endif - .mmap = xfs_file_mmap, - .open = xfs_file_open, - .release = xfs_file_release, - .fsync = xfs_file_fsync, -}; - - const struct file_operations xfs_dir_file_operations = { .open = xfs_dir_open, .read = generic_read_dir, diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index c8f1e632ba9..0264c8719ff 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -319,10 +319,11 @@ xfs_open_by_handle( put_unused_fd(new_fd); return -XFS_ERROR(-PTR_ERR(filp)); } + if (inode->i_mode & S_IFREG) { /* invisible operation should not change atime */ filp->f_flags |= O_NOATIME; - filp->f_op = &xfs_invis_file_operations; + filp->f_mode |= FMODE_NOCMTIME; } fd_install(new_fd, filp); @@ -1328,21 +1329,31 @@ xfs_ioc_getbmapx( return 0; } -int -xfs_ioctl( - xfs_inode_t *ip, +/* + * Note: some of the ioctl's return positive numbers as a + * byte count indicating success, such as readlink_by_handle. + * So we don't "sign flip" like most other routines. This means + * true errors need to be returned as a negative value. + */ +long +xfs_file_ioctl( struct file *filp, - int ioflags, unsigned int cmd, - void __user *arg) + unsigned long p) { struct inode *inode = filp->f_path.dentry->d_inode; - xfs_mount_t *mp = ip->i_mount; + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + void __user *arg = (void __user *)p; + int ioflags = 0; int error; - xfs_itrace_entry(XFS_I(inode)); - switch (cmd) { + if (filp->f_mode & FMODE_NOCMTIME) + ioflags |= IO_INVIS; + xfs_itrace_entry(ip); + + switch (cmd) { case XFS_IOC_ALLOCSP: case XFS_IOC_FREESP: case XFS_IOC_RESVSP: diff --git a/fs/xfs/linux-2.6/xfs_ioctl.h b/fs/xfs/linux-2.6/xfs_ioctl.h index d92131c827b..8c16bf2d7e0 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.h +++ b/fs/xfs/linux-2.6/xfs_ioctl.h @@ -68,13 +68,13 @@ xfs_attrmulti_attr_remove( __uint32_t flags); extern long -xfs_file_compat_ioctl( - struct file *file, +xfs_file_ioctl( + struct file *filp, unsigned int cmd, - unsigned long arg); + unsigned long p); extern long -xfs_file_compat_invis_ioctl( +xfs_file_compat_ioctl( struct file *file, unsigned int cmd, unsigned long arg); diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index b34b3d8892a..0504cece9f6 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c @@ -599,19 +599,24 @@ out: return error; } -STATIC long -xfs_compat_ioctl( - xfs_inode_t *ip, - struct file *filp, - int ioflags, - unsigned cmd, - void __user *arg) +long +xfs_file_compat_ioctl( + struct file *filp, + unsigned cmd, + unsigned long p) { - struct inode *inode = filp->f_path.dentry->d_inode; - xfs_mount_t *mp = ip->i_mount; - int error; + struct inode *inode = filp->f_path.dentry->d_inode; + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + void __user *arg = (void __user *)p; + int ioflags = 0; + int error; + + if (filp->f_mode & FMODE_NOCMTIME) + ioflags |= IO_INVIS; + + xfs_itrace_entry(ip); - xfs_itrace_entry(XFS_I(inode)); switch (cmd) { /* No size or alignment issues on any arch */ case XFS_IOC_DIOINFO: @@ -632,7 +637,7 @@ xfs_compat_ioctl( case XFS_IOC_GOINGDOWN: case XFS_IOC_ERROR_INJECTION: case XFS_IOC_ERROR_CLEARALL: - return xfs_ioctl(ip, filp, ioflags, cmd, arg); + return xfs_file_ioctl(filp, cmd, p); #ifndef BROKEN_X86_ALIGNMENT /* These are handled fine if no alignment issues */ case XFS_IOC_ALLOCSP: @@ -646,7 +651,7 @@ xfs_compat_ioctl( case XFS_IOC_FSGEOMETRY_V1: case XFS_IOC_FSGROWFSDATA: case XFS_IOC_FSGROWFSRT: - return xfs_ioctl(ip, filp, ioflags, cmd, arg); + return xfs_file_ioctl(filp, cmd, p); #else case XFS_IOC_ALLOCSP_32: case XFS_IOC_FREESP_32: @@ -687,7 +692,7 @@ xfs_compat_ioctl( case XFS_IOC_SETXFLAGS_32: case XFS_IOC_GETVERSION_32: cmd = _NATIVE_IOC(cmd, long); - return xfs_ioctl(ip, filp, ioflags, cmd, arg); + return xfs_file_ioctl(filp, cmd, p); case XFS_IOC_SWAPEXT: { struct xfs_swapext sxp; struct compat_xfs_swapext __user *sxu = arg; @@ -738,26 +743,3 @@ xfs_compat_ioctl( return -XFS_ERROR(ENOIOCTLCMD); } } - -long -xfs_file_compat_ioctl( - struct file *filp, - unsigned int cmd, - unsigned long p) -{ - struct inode *inode = filp->f_path.dentry->d_inode; - - return xfs_compat_ioctl(XFS_I(inode), filp, 0, cmd, (void __user *)p); -} - -long -xfs_file_compat_invis_ioctl( - struct file *filp, - unsigned int cmd, - unsigned long p) -{ - struct inode *inode = filp->f_path.dentry->d_inode; - - return xfs_compat_ioctl(XFS_I(inode), filp, IO_INVIS, cmd, - (void __user *)p); -} diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h index 8b1a1e31dc2..ef41c92ce66 100644 --- a/fs/xfs/linux-2.6/xfs_iops.h +++ b/fs/xfs/linux-2.6/xfs_iops.h @@ -22,7 +22,6 @@ struct xfs_inode; extern const struct file_operations xfs_file_operations; extern const struct file_operations xfs_dir_file_operations; -extern const struct file_operations xfs_invis_file_operations; extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size); diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index 2a45b00ad32..55d955ec4ec 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h @@ -53,8 +53,6 @@ int xfs_attr_set(struct xfs_inode *dp, const char *name, char *value, int xfs_attr_remove(struct xfs_inode *dp, const char *name, int flags); int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize, int flags, struct attrlist_cursor_kern *cursor); -int xfs_ioctl(struct xfs_inode *ip, struct file *filp, - int ioflags, unsigned int cmd, void __user *arg); ssize_t xfs_read(struct xfs_inode *ip, struct kiocb *iocb, const struct iovec *iovp, unsigned int segs, loff_t *offset, int ioflags); diff --git a/include/linux/fs.h b/include/linux/fs.h index 51bd9370d43..965b9ba3865 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -81,6 +81,14 @@ extern int dir_notify_enable; #define FMODE_WRITE_IOCTL ((__force fmode_t)128) #define FMODE_NDELAY_NOW ((__force fmode_t)256) +/* + * Don't update ctime and mtime. + * + * Currently a special hack for the XFS open_by_handle ioctl, but we'll + * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon. + */ +#define FMODE_NOCMTIME ((__force fmode_t)2048) + #define RW_MASK 1 #define RWA_MASK 2 #define READ 0 -- cgit v1.2.3-70-g09d2 From c4cd747ee6c3ba1e7727878e3fce482d0d8c0136 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 9 Dec 2008 04:47:34 -0500 Subject: [XFS] use inode_change_ok for setattr permission checking Instead of implementing our own checks use inode_change_ok to check for necessary permission in setattr. There is a slight change in behaviour as inode_change_ok doesn't allow i_mode updates to add the suid or sgid without superuser privilegues while the old XFS code just stripped away those bits from the file mode. (First sent on Semptember 29th) Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_vnodeops.c | 149 ++++++++++++-------------------------------------- 1 file changed, 36 insertions(+), 113 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 4547608b46c..f07bf8768c3 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -70,7 +70,6 @@ xfs_setattr( gid_t gid=0, igid=0; int timeflags = 0; struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; - int file_owner; int need_iolock = 1; xfs_itrace_entry(ip); @@ -81,6 +80,10 @@ xfs_setattr( if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); + code = -inode_change_ok(inode, iattr); + if (code) + return code; + olddquot1 = olddquot2 = NULL; udqp = gdqp = NULL; @@ -158,56 +161,6 @@ xfs_setattr( xfs_ilock(ip, lock_flags); - /* boolean: are we the file owner? */ - file_owner = (current_fsuid() == ip->i_d.di_uid); - - /* - * Change various properties of a file. - * Only the owner or users with CAP_FOWNER - * capability may do these things. - */ - if (mask & (ATTR_MODE|ATTR_UID|ATTR_GID)) { - /* - * CAP_FOWNER overrides the following restrictions: - * - * The user ID of the calling process must be equal - * to the file owner ID, except in cases where the - * CAP_FSETID capability is applicable. - */ - if (!file_owner && !capable(CAP_FOWNER)) { - code = XFS_ERROR(EPERM); - goto error_return; - } - - /* - * CAP_FSETID overrides the following restrictions: - * - * The effective user ID of the calling process shall match - * the file owner when setting the set-user-ID and - * set-group-ID bits on that file. - * - * The effective group ID or one of the supplementary group - * IDs of the calling process shall match the group owner of - * the file when setting the set-group-ID bit on that file - */ - if (mask & ATTR_MODE) { - mode_t m = 0; - - if ((iattr->ia_mode & S_ISUID) && !file_owner) - m |= S_ISUID; - if ((iattr->ia_mode & S_ISGID) && - !in_group_p((gid_t)ip->i_d.di_gid)) - m |= S_ISGID; -#if 0 - /* Linux allows this, Irix doesn't. */ - if ((iattr->ia_mode & S_ISVTX) && !S_ISDIR(ip->i_d.di_mode)) - m |= S_ISVTX; -#endif - if (m && !capable(CAP_FSETID)) - iattr->ia_mode &= ~m; - } - } - /* * Change file ownership. Must be the owner or privileged. */ @@ -223,22 +176,6 @@ xfs_setattr( gid = (mask & ATTR_GID) ? iattr->ia_gid : igid; uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid; - /* - * CAP_CHOWN overrides the following restrictions: - * - * If _POSIX_CHOWN_RESTRICTED is defined, this capability - * shall override the restriction that a process cannot - * change the user ID of a file it owns and the restriction - * that the group ID supplied to the chown() function - * shall be equal to either the group ID or one of the - * supplementary group IDs of the calling process. - */ - if ((iuid != uid || - (igid != gid && !in_group_p((gid_t)gid))) && - !capable(CAP_CHOWN)) { - code = XFS_ERROR(EPERM); - goto error_return; - } /* * Do a quota reservation only if uid/gid is actually * going to change. @@ -276,36 +213,22 @@ xfs_setattr( code = XFS_ERROR(EINVAL); goto error_return; } + /* * Make sure that the dquots are attached to the inode. */ - if ((code = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED))) + code = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED); + if (code) goto error_return; - } - /* - * Change file access or modified times. - */ - if (mask & (ATTR_ATIME|ATTR_MTIME)) { - if (!file_owner) { - if ((mask & (ATTR_MTIME_SET|ATTR_ATIME_SET)) && - !capable(CAP_FOWNER)) { - code = XFS_ERROR(EPERM); - goto error_return; - } - } - } - - /* - * Now we can make the changes. Before we join the inode - * to the transaction, if ATTR_SIZE is set then take care of - * the part of the truncation that must be done without the - * inode lock. This needs to be done before joining the inode - * to the transaction, because the inode cannot be unlocked - * once it is a part of the transaction. - */ - if (mask & ATTR_SIZE) { - code = 0; + /* + * Now we can make the changes. Before we join the inode + * to the transaction, if ATTR_SIZE is set then take care of + * the part of the truncation that must be done without the + * inode lock. This needs to be done before joining the inode + * to the transaction, because the inode cannot be unlocked + * once it is a part of the transaction. + */ if (iattr->ia_size > ip->i_size) { /* * Do the first part of growing a file: zero any data @@ -360,17 +283,10 @@ xfs_setattr( } commit_flags = XFS_TRANS_RELEASE_LOG_RES; xfs_ilock(ip, XFS_ILOCK_EXCL); - } - if (tp) { xfs_trans_ijoin(tp, ip, lock_flags); xfs_trans_ihold(tp, ip); - } - /* - * Truncate file. Must have write permission and not be a directory. - */ - if (mask & ATTR_SIZE) { /* * Only change the c/mtime if we are changing the size * or we are explicitly asked to change it. This handles @@ -410,20 +326,9 @@ xfs_setattr( */ xfs_iflags_set(ip, XFS_ITRUNCATED); } - } - - /* - * Change file access modes. - */ - if (mask & ATTR_MODE) { - ip->i_d.di_mode &= S_IFMT; - ip->i_d.di_mode |= iattr->ia_mode & ~S_IFMT; - - inode->i_mode &= S_IFMT; - inode->i_mode |= iattr->ia_mode & ~S_IFMT; - - xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); - timeflags |= XFS_ICHGTIME_CHG; + } else if (tp) { + xfs_trans_ijoin(tp, ip, lock_flags); + xfs_trans_ihold(tp, ip); } /* @@ -471,6 +376,24 @@ xfs_setattr( timeflags |= XFS_ICHGTIME_CHG; } + /* + * Change file access modes. + */ + if (mask & ATTR_MODE) { + umode_t mode = iattr->ia_mode; + + if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) + mode &= ~S_ISGID; + + ip->i_d.di_mode &= S_IFMT; + ip->i_d.di_mode |= mode & ~S_IFMT; + + inode->i_mode &= S_IFMT; + inode->i_mode |= mode & ~S_IFMT; + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + timeflags |= XFS_ICHGTIME_CHG; + } /* * Change file access or modified times. -- cgit v1.2.3-70-g09d2 From cfbe52672fbc6f333892e8dde82c35e0a76aa5f5 Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Fri, 12 Dec 2008 15:27:25 +1100 Subject: [XFS] set b_error from bio error in xfs_buf_bio_end_io Preserve any error returned by the bio layer. Reviewed-by: Eric Sandeen Reviewed-by: Tim Shimmin Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_buf.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index cd89c56715a..aae8d0768ec 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1126,8 +1126,7 @@ xfs_buf_bio_end_io( unsigned int blocksize = bp->b_target->bt_bsize; struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) - bp->b_error = EIO; + xfs_buf_ioerror(bp, -error); do { struct page *page = bvec->bv_page; -- cgit v1.2.3-70-g09d2 From 8187926bdae98648db24db3391c4efd21ec669b1 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 1 Dec 2008 14:18:16 -0800 Subject: posix-timers: simplify de_thread()->exit_itimers() path Impact: simplify code de_thread() postpones release_task(leader) until after exit_itimers(). This was needed because !SIGEV_THREAD_ID timers could use ->group_leader without get_task_struct(). With the recent changes we can release the leader earlier and simplify the code. Signed-off-by: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner --- fs/exec.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index ec5df9a3831..b4e5b8a9216 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -772,7 +772,6 @@ static int de_thread(struct task_struct *tsk) struct signal_struct *sig = tsk->signal; struct sighand_struct *oldsighand = tsk->sighand; spinlock_t *lock = &oldsighand->siglock; - struct task_struct *leader = NULL; int count; if (thread_group_empty(tsk)) @@ -810,7 +809,7 @@ static int de_thread(struct task_struct *tsk) * and to assume its PID: */ if (!thread_group_leader(tsk)) { - leader = tsk->group_leader; + struct task_struct *leader = tsk->group_leader; sig->notify_count = -1; /* for exit_notify() */ for (;;) { @@ -862,8 +861,9 @@ static int de_thread(struct task_struct *tsk) BUG_ON(leader->exit_state != EXIT_ZOMBIE); leader->exit_state = EXIT_DEAD; - write_unlock_irq(&tasklist_lock); + + release_task(leader); } sig->group_exit_task = NULL; @@ -872,8 +872,6 @@ static int de_thread(struct task_struct *tsk) no_thread_group: exit_itimers(sig); flush_itimer_signals(); - if (leader) - release_task(leader); if (atomic_read(&oldsighand->count) != 1) { struct sighand_struct *newsighand; -- cgit v1.2.3-70-g09d2 From 6b82b3e4b54b2fce2ca11976c535012b836b2016 Mon Sep 17 00:00:00 2001 From: Anton Vorontsov Date: Tue, 9 Dec 2008 09:47:29 +0000 Subject: powerpc: Remove `have_of' global variable The `have_of' variable is a relic from the arch/ppc time, it isn't useful nowadays. Signed-off-by: Anton Vorontsov Acked-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/processor.h | 2 -- arch/powerpc/kernel/pci-common.c | 2 -- arch/powerpc/kernel/pci_32.c | 7 +------ arch/powerpc/kernel/setup_32.c | 2 -- arch/powerpc/kernel/setup_64.c | 1 - fs/proc/proc_devtree.c | 3 +-- 6 files changed, 2 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index cd7a47860e5..d3466490104 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -69,8 +69,6 @@ extern int _prep_type; #ifdef __KERNEL__ -extern int have_of; - struct task_struct; void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); void release_thread(struct task_struct *); diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 91c3f52e33a..1a32db331a5 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -160,8 +160,6 @@ EXPORT_SYMBOL(pci_domain_nr); */ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) { - if (!have_of) - return NULL; while(node) { struct pci_controller *hose, *tmp; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 7ad11e592f2..132cd80afa2 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -266,9 +266,6 @@ pci_busdev_to_OF_node(struct pci_bus *bus, int devfn) { struct device_node *parent, *np; - if (!have_of) - return NULL; - pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn); parent = scan_OF_for_pci_bus(bus); if (parent == NULL) @@ -309,8 +306,6 @@ pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn) struct pci_controller* hose; struct pci_dev* dev = NULL; - if (!have_of) - return -ENODEV; /* Make sure it's really a PCI device */ hose = pci_find_hose_for_OF_device(node); if (!hose || !hose->dn) @@ -431,7 +426,7 @@ static int __init pcibios_init(void) * numbers vs. kernel bus numbers since we may have to * remap them. */ - if (pci_assign_all_buses && have_of) + if (pci_assign_all_buses) pcibios_make_OF_bus_map(); /* Call common code to handle resource allocation */ diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index c1a27626a94..cc4679ed952 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -53,8 +53,6 @@ unsigned long ISA_DMA_THRESHOLD; unsigned int DMA_MODE_READ; unsigned int DMA_MODE_WRITE; -int have_of = 1; - #ifdef CONFIG_VGA_CONSOLE unsigned long vgacon_remap_base; EXPORT_SYMBOL(vgacon_remap_base); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 93c875ae985..ce48f5c5c54 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -70,7 +70,6 @@ #define DBG(fmt...) #endif -int have_of = 1; int boot_cpuid = 0; u64 ppc64_pft_size; diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c index d777789b7a8..de2bba5a344 100644 --- a/fs/proc/proc_devtree.c +++ b/fs/proc/proc_devtree.c @@ -218,8 +218,7 @@ void proc_device_tree_add_node(struct device_node *np, void __init proc_device_tree_init(void) { struct device_node *root; - if ( !have_of ) - return; + proc_device_tree = proc_mkdir("device-tree", NULL); if (proc_device_tree == 0) return; -- cgit v1.2.3-70-g09d2 From 13bd41bc227a48d6cf8992a3286bf6eba3c71a0c Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 16 Dec 2008 00:23:34 -0800 Subject: proc: enclose desc variable of show_stat() in CONFIG_SPARSE_IRQ Impact: restructure code to fix compiler warning commit 240d367b4e6c6e3c5075e034db14dba60a6f5fa7 moved desc usage point into #ifdef CONFIG_SPARSE_IRQ. Eliminate the desc variable, otherwise following warning happens: fs/proc/stat.c: In function 'show_stat': fs/proc/stat.c:31: warning: unused variable 'desc' [ akpm: cleaned up the patch to remove #ifdef ] Signed-off-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar --- fs/proc/stat.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 3cb9492801c..3bb1cf1e742 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -27,7 +27,6 @@ static int show_stat(struct seq_file *p, void *v) u64 sum = 0; struct timespec boottime; unsigned int per_irq_sum; - struct irq_desc *desc; user = nice = system = idle = iowait = irq = softirq = steal = cputime64_zero; @@ -47,8 +46,7 @@ static int show_stat(struct seq_file *p, void *v) guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); for_each_irq_nr(j) { #ifdef CONFIG_SPARSE_IRQ - desc = irq_to_desc(j); - if (!desc) + if (!irq_to_desc(j)) continue; #endif sum += kstat_irqs_cpu(j, i); @@ -98,8 +96,7 @@ static int show_stat(struct seq_file *p, void *v) for_each_irq_nr(j) { per_irq_sum = 0; #ifdef CONFIG_SPARSE_IRQ - desc = irq_to_desc(j); - if (!desc) { + if (!irq_to_desc(j)) { seq_printf(p, " %u", per_irq_sum); continue; } -- cgit v1.2.3-70-g09d2 From d69e83d99cf87e3328c47bb54684360e32aef17d Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Tue, 16 Dec 2008 10:21:34 -0600 Subject: jfs: ensure symlinks are NUL-terminated This is an alternate fix for a bug reported and fixed by Duane Griffin. Signed-off-by: Dave Kleikamp Reported-by: Duane Griffin --- fs/jfs/inode.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 210339784b5..b00ee9f05a0 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -59,8 +59,14 @@ struct inode *jfs_iget(struct super_block *sb, unsigned long ino) if (inode->i_size >= IDATASIZE) { inode->i_op = &page_symlink_inode_operations; inode->i_mapping->a_ops = &jfs_aops; - } else + } else { inode->i_op = &jfs_symlink_inode_operations; + /* + * The inline data should be null-terminated, but + * don't let on-disk corruption crash the kernel + */ + JFS_IP(inode)->i_inline[inode->i_size] = '\0'; + } } else { inode->i_op = &jfs_file_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); -- cgit v1.2.3-70-g09d2 From 83099bc647688d816c2f7fac8e51921bdfe8db73 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Fri, 5 Dec 2008 09:14:10 +0800 Subject: ocfs2: Always update xattr search when creating bucket. When we create xattr bucket during the process of xattr set, we always need to update the ocfs2_xattr_search since even if the bucket size is the same as block size, the offset will change because of the removal of the ocfs2_xattr_block header. Signed-off-by: Tao Ma Signed-off-by: Mark Fasheh --- fs/ocfs2/xattr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 054e2efb0b7..74d7367ade1 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -2645,9 +2645,9 @@ static int ocfs2_xattr_update_xattr_search(struct inode *inode, return ret; } - i = xs->here - old_xh->xh_entries; - xs->here = &xs->header->xh_entries[i]; } + i = xs->here - old_xh->xh_entries; + xs->here = &xs->header->xh_entries[i]; } return ret; -- cgit v1.2.3-70-g09d2 From a97721894a6dc0f9ebfe1dbaa4bb112eaf399273 Mon Sep 17 00:00:00 2001 From: Joel Becker Date: Tue, 16 Dec 2008 18:10:18 -0800 Subject: ocfs2: Add JBD2 compat feature bit. Define the OCFS2_FEATURE_COMPAT_JBD2 bit in the filesystem header. Signed-off-by: Joel Becker Signed-off-by: Mark Fasheh --- fs/ocfs2/ocfs2_fs.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 5f180cf7abb..5e0c0d0aef7 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -86,7 +86,8 @@ #define OCFS2_CLEAR_INCOMPAT_FEATURE(sb,mask) \ OCFS2_SB(sb)->s_feature_incompat &= ~(mask) -#define OCFS2_FEATURE_COMPAT_SUPP OCFS2_FEATURE_COMPAT_BACKUP_SB +#define OCFS2_FEATURE_COMPAT_SUPP (OCFS2_FEATURE_COMPAT_BACKUP_SB \ + | OCFS2_FEATURE_COMPAT_JBD2_SB) #define OCFS2_FEATURE_INCOMPAT_SUPP (OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT \ | OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC \ | OCFS2_FEATURE_INCOMPAT_INLINE_DATA \ @@ -152,6 +153,11 @@ */ #define OCFS2_FEATURE_COMPAT_BACKUP_SB 0x0001 +/* + * The filesystem will correctly handle journal feature bits. + */ +#define OCFS2_FEATURE_COMPAT_JBD2_SB 0x0002 + /* * Unwritten extents support. */ -- cgit v1.2.3-70-g09d2 From 331c31351044888916805c9cb32d8bb9e40c12e9 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 17 Dec 2008 06:31:53 -0500 Subject: cifs: fix buffer overrun in parse_DFS_referrals While testing a kernel with memory poisoning enabled, I saw some warnings about the redzone getting clobbered when chasing DFS referrals. The buffer allocation for the unicode converted version of the searchName is too small and needs to take null termination into account. Signed-off-by: Jeff Layton Acked-by: Steve French Signed-off-by: Linus Torvalds --- fs/cifs/cifssmb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 2af8626ced4..6d51696dc76 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -3983,7 +3983,8 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, node->flags = le16_to_cpu(pSMBr->DFSFlags); if (is_unicode) { - __le16 *tmp = kmalloc(strlen(searchName)*2, GFP_KERNEL); + __le16 *tmp = kmalloc(strlen(searchName)*2 + 2, + GFP_KERNEL); cifsConvertToUCS((__le16 *) tmp, searchName, PATH_MAX, nls_codepage, remap); node->path_consumed = hostlen_fromUCS(tmp, -- cgit v1.2.3-70-g09d2 From 9c2c48020ec0dd6ecd27e5a1298f73b40d85a595 Mon Sep 17 00:00:00 2001 From: Ken Chen Date: Tue, 16 Dec 2008 23:41:22 -0800 Subject: schedstat: consolidate per-task cpu runtime stats Impact: simplify code When we turn on CONFIG_SCHEDSTATS, per-task cpu runtime is accumulated twice. Once in task->se.sum_exec_runtime and once in sched_info.cpu_time. These two stats are exactly the same. Given that task->se.sum_exec_runtime is always accumulated by the core scheduler, sched_info can reuse that data instead of duplicate the accounting. Signed-off-by: Ken Chen Acked-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- fs/proc/base.c | 2 +- include/linux/sched.h | 3 +-- kernel/delayacct.c | 2 +- kernel/sched.c | 2 ++ kernel/sched_stats.h | 5 ++--- 5 files changed, 7 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/proc/base.c b/fs/proc/base.c index d4677603c88..4d745bac768 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -347,7 +347,7 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer) static int proc_pid_schedstat(struct task_struct *task, char *buffer) { return sprintf(buffer, "%llu %llu %lu\n", - task->sched_info.cpu_time, + task->se.sum_exec_runtime, task->sched_info.run_delay, task->sched_info.pcount); } diff --git a/include/linux/sched.h b/include/linux/sched.h index 8cccd6dc5d6..2d1e840ddd3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -670,8 +670,7 @@ struct reclaim_state; struct sched_info { /* cumulative counters */ unsigned long pcount; /* # of times run on this cpu */ - unsigned long long cpu_time, /* time spent on the cpu */ - run_delay; /* time spent waiting on a runqueue */ + unsigned long long run_delay; /* time spent waiting on a runqueue */ /* timestamps */ unsigned long long last_arrival,/* when we last ran on a cpu */ diff --git a/kernel/delayacct.c b/kernel/delayacct.c index b3179dad71b..abb6e17505e 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -127,7 +127,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) */ t1 = tsk->sched_info.pcount; t2 = tsk->sched_info.run_delay; - t3 = tsk->sched_info.cpu_time; + t3 = tsk->se.sum_exec_runtime; d->cpu_count += t1; diff --git a/kernel/sched.c b/kernel/sched.c index f53e2b8ef52..fd835fc320b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -596,6 +596,8 @@ struct rq { #ifdef CONFIG_SCHEDSTATS /* latency stats */ struct sched_info rq_sched_info; + unsigned long long rq_cpu_time; + /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ /* sys_sched_yield() stats */ unsigned int yld_exp_empty; diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 7dbf72a2b02..3b01098164c 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h @@ -31,7 +31,7 @@ static int show_schedstat(struct seq_file *seq, void *v) rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, rq->sched_switch, rq->sched_count, rq->sched_goidle, rq->ttwu_count, rq->ttwu_local, - rq->rq_sched_info.cpu_time, + rq->rq_cpu_time, rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); seq_printf(seq, "\n"); @@ -123,7 +123,7 @@ static inline void rq_sched_info_depart(struct rq *rq, unsigned long long delta) { if (rq) - rq->rq_sched_info.cpu_time += delta; + rq->rq_cpu_time += delta; } static inline void @@ -236,7 +236,6 @@ static inline void sched_info_depart(struct task_struct *t) unsigned long long delta = task_rq(t)->clock - t->sched_info.last_arrival; - t->sched_info.cpu_time += delta; rq_sched_info_depart(task_rq(t), delta); if (t->state == TASK_RUNNING) -- cgit v1.2.3-70-g09d2 From 12204e24b1330428c3062faee10a0d80b8a5cb61 Mon Sep 17 00:00:00 2001 From: James Morris Date: Fri, 19 Dec 2008 10:44:42 +1100 Subject: security: pass mount flags to security_sb_kern_mount() Pass mount flags to security_sb_kern_mount(), so security modules can determine if a mount operation is being performed by the kernel. Signed-off-by: James Morris Acked-by: Stephen Smalley --- fs/super.c | 2 +- include/linux/security.h | 6 +++--- security/capability.c | 2 +- security/security.c | 4 ++-- security/selinux/hooks.c | 2 +- security/smack/smack_lsm.c | 3 ++- 6 files changed, 10 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/super.c b/fs/super.c index 400a7608f15..ddba069d7a9 100644 --- a/fs/super.c +++ b/fs/super.c @@ -914,7 +914,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void goto out_free_secdata; BUG_ON(!mnt->mnt_sb); - error = security_sb_kern_mount(mnt->mnt_sb, secdata); + error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata); if (error) goto out_sb; diff --git a/include/linux/security.h b/include/linux/security.h index 6423abf1ac0..3416cb85e77 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1308,7 +1308,7 @@ struct security_operations { int (*sb_alloc_security) (struct super_block *sb); void (*sb_free_security) (struct super_block *sb); int (*sb_copy_data) (char *orig, char *copy); - int (*sb_kern_mount) (struct super_block *sb, void *data); + int (*sb_kern_mount) (struct super_block *sb, int flags, void *data); int (*sb_show_options) (struct seq_file *m, struct super_block *sb); int (*sb_statfs) (struct dentry *dentry); int (*sb_mount) (char *dev_name, struct path *path, @@ -1575,7 +1575,7 @@ int security_bprm_secureexec(struct linux_binprm *bprm); int security_sb_alloc(struct super_block *sb); void security_sb_free(struct super_block *sb); int security_sb_copy_data(char *orig, char *copy); -int security_sb_kern_mount(struct super_block *sb, void *data); +int security_sb_kern_mount(struct super_block *sb, int flags, void *data); int security_sb_show_options(struct seq_file *m, struct super_block *sb); int security_sb_statfs(struct dentry *dentry); int security_sb_mount(char *dev_name, struct path *path, @@ -1850,7 +1850,7 @@ static inline int security_sb_copy_data(char *orig, char *copy) return 0; } -static inline int security_sb_kern_mount(struct super_block *sb, void *data) +static inline int security_sb_kern_mount(struct super_block *sb, int flags, void *data) { return 0; } diff --git a/security/capability.c b/security/capability.c index b9e391425e6..2dce66fcb99 100644 --- a/security/capability.c +++ b/security/capability.c @@ -59,7 +59,7 @@ static int cap_sb_copy_data(char *orig, char *copy) return 0; } -static int cap_sb_kern_mount(struct super_block *sb, void *data) +static int cap_sb_kern_mount(struct super_block *sb, int flags, void *data) { return 0; } diff --git a/security/security.c b/security/security.c index f0d96a6cc4e..d85dbb37c97 100644 --- a/security/security.c +++ b/security/security.c @@ -254,9 +254,9 @@ int security_sb_copy_data(char *orig, char *copy) } EXPORT_SYMBOL(security_sb_copy_data); -int security_sb_kern_mount(struct super_block *sb, void *data) +int security_sb_kern_mount(struct super_block *sb, int flags, void *data) { - return security_ops->sb_kern_mount(sb, data); + return security_ops->sb_kern_mount(sb, flags, data); } int security_sb_show_options(struct seq_file *m, struct super_block *sb) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 8dbc54cde59..7465d713b53 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -2474,7 +2474,7 @@ out: return rc; } -static int selinux_sb_kern_mount(struct super_block *sb, void *data) +static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data) { const struct cred *cred = current_cred(); struct avc_audit_data ad; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 8ad48161cef..1b5551dfc1f 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -250,11 +250,12 @@ static int smack_sb_copy_data(char *orig, char *smackopts) /** * smack_sb_kern_mount - Smack specific mount processing * @sb: the file system superblock + * @flags: the mount flags * @data: the smack mount options * * Returns 0 on success, an error code on failure */ -static int smack_sb_kern_mount(struct super_block *sb, void *data) +static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data) { struct dentry *root = sb->s_root; struct inode *inode = root->d_inode; -- cgit v1.2.3-70-g09d2 From 6ff232070a1088807e24f657b5d6f299f1ec32f6 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 16:45:21 -0600 Subject: 9p: Remove potentially bad parameter from function entry debug print. Signed-off-by: Duane Griffin Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_inode.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 8314d3f43b7..7c735283809 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -1022,7 +1022,8 @@ v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) { char *s = nd_get_link(nd); - P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name, s); + P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name, + IS_ERR(s) ? "" : s); if (!IS_ERR(s)) __putname(s); } -- cgit v1.2.3-70-g09d2 From 7dd0cdc51c126915e243ee3291f7b6b5ea5e5833 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Fri, 19 Dec 2008 16:47:40 -0600 Subject: 9p: convert d_iname references to d_name.name d_iname is rubbish for long file names. Use d_name.name in printks instead. Signed-off-by: Wu Fengguang Acked-by: Eric Van Hensbergen --- fs/9p/fid.c | 4 ++-- fs/9p/vfs_dentry.c | 9 ++++++--- fs/9p/vfs_inode.c | 3 ++- 3 files changed, 10 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/9p/fid.c b/fs/9p/fid.c index 3031e3233dd..2a983d49d19 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c @@ -45,7 +45,7 @@ int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid) struct v9fs_dentry *dent; P9_DPRINTK(P9_DEBUG_VFS, "fid %d dentry %s\n", - fid->fid, dentry->d_iname); + fid->fid, dentry->d_name.name); dent = dentry->d_fsdata; if (!dent) { @@ -79,7 +79,7 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, u32 uid, int any) struct p9_fid *fid, *ret; P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p) uid %d any %d\n", - dentry->d_iname, dentry, uid, any); + dentry->d_name.name, dentry, uid, any); dent = (struct v9fs_dentry *) dentry->d_fsdata; ret = NULL; if (dent) { diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c index f9534f18df0..06dcc7c4f23 100644 --- a/fs/9p/vfs_dentry.c +++ b/fs/9p/vfs_dentry.c @@ -52,7 +52,8 @@ static int v9fs_dentry_delete(struct dentry *dentry) { - P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry); + P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name, + dentry); return 1; } @@ -69,7 +70,8 @@ static int v9fs_dentry_delete(struct dentry *dentry) static int v9fs_cached_dentry_delete(struct dentry *dentry) { struct inode *inode = dentry->d_inode; - P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry); + P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name, + dentry); if(!inode) return 1; @@ -88,7 +90,8 @@ void v9fs_dentry_release(struct dentry *dentry) struct v9fs_dentry *dent; struct p9_fid *temp, *current_fid; - P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry); + P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name, + dentry); dent = dentry->d_fsdata; if (dent) { list_for_each_entry_safe(current_fid, temp, &dent->fidlist, diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 7c735283809..2dfcf5487ef 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -963,7 +963,8 @@ static int v9fs_vfs_readlink(struct dentry *dentry, char __user * buffer, if (buflen > PATH_MAX) buflen = PATH_MAX; - P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry); + P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name, + dentry); retval = v9fs_readlink(dentry, link, buflen); -- cgit v1.2.3-70-g09d2 From f1d9e4586e79e1fc7b26d1c567599a904013f199 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 19 Dec 2008 16:50:22 -0600 Subject: fs/9p: change simple_strtol to simple_strtoul Since v9ses->uid is unsigned, it would seem better to use simple_strtoul that simple_strtol. A simplified version of the semantic patch that makes this change is as follows: (http://www.emn.fr/x-info/coccinelle/) // @r2@ long e; position p; @@ e = simple_strtol@p(...) @@ position p != r2.p; type T; T e; @@ e = - simple_strtol@p + simple_strtoul (...) // Signed-off-by: Julia Lawall Acked-by: Eric Van Hensbergen --- fs/9p/v9fs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 24eb01087b6..332b5ff02fe 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -160,7 +160,7 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses) v9ses->flags |= V9FS_ACCESS_ANY; else { v9ses->flags |= V9FS_ACCESS_SINGLE; - v9ses->uid = simple_strtol(s, &e, 10); + v9ses->uid = simple_strtoul(s, &e, 10); if (*e != '\0') v9ses->uid = ~0; } -- cgit v1.2.3-70-g09d2 From 826e08b0157c0ce8a80dfe3c0a6c5a1540dd0b1d Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 22 Dec 2008 07:37:41 +0100 Subject: sched: fix warning in fs/proc/base.c Stephen Rothwell reported this new (harmless) build warning on platforms that define u64 to long: fs/proc/base.c: In function 'proc_pid_schedstat': fs/proc/base.c:352: warning: format '%llu' expects type 'long long unsigned int', but argument 3 has type 'u64' asm-generic/int-l64.h platforms strike again: that file should be eliminated. Fix it by casting the parameters to long long. Reported-by: Stephen Rothwell Signed-off-by: Ingo Molnar --- fs/proc/base.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/proc/base.c b/fs/proc/base.c index 4d745bac768..a48200c7112 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -347,8 +347,8 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer) static int proc_pid_schedstat(struct task_struct *task, char *buffer) { return sprintf(buffer, "%llu %llu %lu\n", - task->se.sum_exec_runtime, - task->sched_info.run_delay, + (unsigned long long)task->se.sum_exec_runtime, + (unsigned long long)task->sched_info.run_delay, task->sched_info.pcount); } #endif -- cgit v1.2.3-70-g09d2 From d415867e0abc35e3b2f0d4196e98c339d6fe29a2 Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Mon, 22 Dec 2008 17:50:56 +1100 Subject: [XFS] Use the incore inode size in xfs_file_readdir() We should be using the incore inode size here not the linux inode size. The incore inode size is always up to date for directories whereas the linux inode size is not updated for directories. We've hit assertions in xfs_bmap() and traced it back to the linux inode size being zero but the incore size being correct. Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index a0c45cc8a6b..e14c4e3aea0 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -204,7 +204,7 @@ xfs_file_readdir( * point we can change the ->readdir prototype to include the * buffer size. */ - bufsize = (size_t)min_t(loff_t, PAGE_SIZE, inode->i_size); + bufsize = (size_t)min_t(loff_t, PAGE_SIZE, ip->i_d.di_size); error = xfs_readdir(ip, dirent, bufsize, (xfs_off_t *)&filp->f_pos, filldir); -- cgit v1.2.3-70-g09d2 From 4fdc7781799926dca6c3a3bb6e9533a9718c4dea Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Mon, 22 Dec 2008 17:52:58 +1100 Subject: [XFS] Remove XFS_BUF_SHUT() and friends Code does nothing so remove it. Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_buf.h | 4 ---- fs/xfs/xfs_buf_item.c | 16 +--------------- fs/xfs/xfs_inode.c | 1 - 3 files changed, 1 insertion(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index af8764e02d7..288ae7c4c80 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -312,10 +312,6 @@ extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *); #define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED) #define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED) -#define XFS_BUF_SHUT(bp) do { } while (0) -#define XFS_BUF_UNSHUT(bp) do { } while (0) -#define XFS_BUF_ISSHUT(bp) (0) - #define XFS_BUF_HOLD(bp) xfs_buf_hold(bp) #define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ) #define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index d74ce113426..92af4098c7e 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -998,21 +998,7 @@ xfs_buf_iodone_callbacks( xfs_buf_do_callbacks(bp, lip); XFS_BUF_SET_FSPRIVATE(bp, NULL); XFS_BUF_CLR_IODONE_FUNC(bp); - - /* - * XFS_SHUT flag gets set when we go thru the - * entire buffer cache and deliberately start - * throwing away delayed write buffers. - * Since there's no biowait done on those, - * we should just brelse them. - */ - if (XFS_BUF_ISSHUT(bp)) { - XFS_BUF_UNSHUT(bp); - xfs_buf_relse(bp); - } else { - xfs_biodone(bp); - } - + xfs_biodone(bp); return; } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 3adb868df18..5a5e035e5d3 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2804,7 +2804,6 @@ cluster_corrupt_out: XFS_BUF_CLR_BDSTRAT_FUNC(bp); XFS_BUF_UNDONE(bp); XFS_BUF_STALE(bp); - XFS_BUF_SHUT(bp); XFS_BUF_ERROR(bp,EIO); xfs_biodone(bp); } else { -- cgit v1.2.3-70-g09d2 From 9f6c92b9cc2fd41d6c7b493be5637cc5b5659880 Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Mon, 22 Dec 2008 17:56:49 +1100 Subject: [XFS] Fix speculative allocation beyond eof Speculative allocation beyond eof doesn't work properly. It was broken some time ago after a code cleanup that moved what is now xfs_iomap_eof_align_last_fsb() and xfs_iomap_eof_want_preallocate() out of xfs_iomap_write_delay() into separate functions. The code used to use the current file size in various checks but got changed to be max(file_size, i_new_size). Since i_new_size is the result of 'offset + count' then in xfs_iomap_eof_want_preallocate() the check for '(offset + count) <= isize' will always be true. ie if 'offset + count' is > ip->i_size then isize will be i_new_size and equal to 'offset + count'. This change fixes all the places that used to use the current file size. Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_iomap.c | 28 +++++++--------------------- 1 file changed, 7 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 67f22b2b44b..911062cf73a 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -290,7 +290,6 @@ STATIC int xfs_iomap_eof_align_last_fsb( xfs_mount_t *mp, xfs_inode_t *ip, - xfs_fsize_t isize, xfs_extlen_t extsize, xfs_fileoff_t *last_fsb) { @@ -306,14 +305,14 @@ xfs_iomap_eof_align_last_fsb( * stripe width and we are allocating past the allocation eof. */ else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) && - (isize >= XFS_FSB_TO_B(mp, mp->m_swidth))) + (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_swidth))) new_last_fsb = roundup_64(*last_fsb, mp->m_swidth); /* * Roundup the allocation request to a stripe unit (m_dalign) boundary * if the file size is >= stripe unit size, and we are allocating past * the allocation eof. */ - else if (mp->m_dalign && (isize >= XFS_FSB_TO_B(mp, mp->m_dalign))) + else if (mp->m_dalign && (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_dalign))) new_last_fsb = roundup_64(*last_fsb, mp->m_dalign); /* @@ -403,7 +402,6 @@ xfs_iomap_write_direct( xfs_filblks_t count_fsb, resaligned; xfs_fsblock_t firstfsb; xfs_extlen_t extsz, temp; - xfs_fsize_t isize; int nimaps; int bmapi_flag; int quota_flag; @@ -426,15 +424,10 @@ xfs_iomap_write_direct( rt = XFS_IS_REALTIME_INODE(ip); extsz = xfs_get_extsz_hint(ip); - isize = ip->i_size; - if (ip->i_new_size > isize) - isize = ip->i_new_size; - offset_fsb = XFS_B_TO_FSBT(mp, offset); last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); - if ((offset + count) > isize) { - error = xfs_iomap_eof_align_last_fsb(mp, ip, isize, extsz, - &last_fsb); + if ((offset + count) > ip->i_size) { + error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); if (error) goto error_out; } else { @@ -559,7 +552,6 @@ STATIC int xfs_iomap_eof_want_preallocate( xfs_mount_t *mp, xfs_inode_t *ip, - xfs_fsize_t isize, xfs_off_t offset, size_t count, int ioflag, @@ -573,7 +565,7 @@ xfs_iomap_eof_want_preallocate( int n, error, imaps; *prealloc = 0; - if ((ioflag & BMAPI_SYNC) || (offset + count) <= isize) + if ((ioflag & BMAPI_SYNC) || (offset + count) <= ip->i_size) return 0; /* @@ -617,7 +609,6 @@ xfs_iomap_write_delay( xfs_fileoff_t ioalign; xfs_fsblock_t firstblock; xfs_extlen_t extsz; - xfs_fsize_t isize; int nimaps; xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; int prealloc, fsynced = 0; @@ -637,11 +628,7 @@ xfs_iomap_write_delay( offset_fsb = XFS_B_TO_FSBT(mp, offset); retry: - isize = ip->i_size; - if (ip->i_new_size > isize) - isize = ip->i_new_size; - - error = xfs_iomap_eof_want_preallocate(mp, ip, isize, offset, count, + error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, ioflag, imap, XFS_WRITE_IMAPS, &prealloc); if (error) return error; @@ -655,8 +642,7 @@ retry: } if (prealloc || extsz) { - error = xfs_iomap_eof_align_last_fsb(mp, ip, isize, extsz, - &last_fsb); + error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); if (error) return error; } -- cgit v1.2.3-70-g09d2 From efc557570dc99b46e46a7be51c3c7402b485e829 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 17 Dec 2008 12:27:36 -0500 Subject: [XFS] avoid memory allocations in xfs_fs_vcmn_err xfs_fs_vcmn_err can be called under a spinlock, but does a sleeping memory allocation to create buffer for it's internal sprintf. Fortunately it's the only caller of icmn_err, so we can merge the two and have one single static buffer and spinlock protecting it. While we're at it make sure we proper __attribute__ format annotations so that the compiler can detect mismatched format strings. Reported-by: Alexander Beregalov Signed-off-by: Christoph Hellwig Reviewed-by: Eric Sandeen Signed-off-by: Lachlan McIlroy --- fs/xfs/support/debug.c | 37 ++++++++++++++++++++++++++++++++----- fs/xfs/support/debug.h | 2 -- fs/xfs/xfs_error.c | 15 --------------- fs/xfs/xfs_error.h | 12 ++++++++---- 4 files changed, 40 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c index 636104254cf..ae548296542 100644 --- a/fs/xfs/support/debug.c +++ b/fs/xfs/support/debug.c @@ -18,6 +18,13 @@ #include #include "debug.h" +/* xfs_mount.h drags a lot of crap in, sorry.. */ +#include "xfs_sb.h" +#include "xfs_inum.h" +#include "xfs_ag.h" +#include "xfs_dmapi.h" +#include "xfs_mount.h" + static char message[1024]; /* keep it off the stack */ static DEFINE_SPINLOCK(xfs_err_lock); @@ -55,22 +62,42 @@ cmn_err(register int level, char *fmt, ...) } void -icmn_err(register int level, char *fmt, va_list ap) +xfs_fs_vcmn_err( + int level, + struct xfs_mount *mp, + char *fmt, + va_list ap) { - ulong flags; - int len; + unsigned long flags; + int len = 0; level &= XFS_ERR_MASK; - if(level > XFS_MAX_ERR_LEVEL) + if (level > XFS_MAX_ERR_LEVEL) level = XFS_MAX_ERR_LEVEL; + spin_lock_irqsave(&xfs_err_lock,flags); - len = vsnprintf(message, sizeof(message), fmt, ap); + + if (mp) { + len = sprintf(message, "Filesystem \"%s\": ", mp->m_fsname); + + /* + * Skip the printk if we can't print anything useful + * due to an over-long device name. + */ + if (len >= sizeof(message)) + goto out; + } + + len = vsnprintf(message + len, sizeof(message) - len, fmt, ap); if (len >= sizeof(message)) len = sizeof(message) - 1; if (message[len-1] == '\n') message[len-1] = 0; + printk("%s%s\n", err_level[level], message); + out: spin_unlock_irqrestore(&xfs_err_lock,flags); + BUG_ON(level == CE_PANIC); } diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h index 75845f95081..6f4fd37c67a 100644 --- a/fs/xfs/support/debug.h +++ b/fs/xfs/support/debug.h @@ -27,8 +27,6 @@ #define CE_ALERT 1 /* alert */ #define CE_PANIC 0 /* panic */ -extern void icmn_err(int, char *, va_list) - __attribute__ ((format (printf, 2, 0))); extern void cmn_err(int, char *, ...) __attribute__ ((format (printf, 2, 3))); extern void assfail(char *expr, char *f, int l); diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index f227ecd1a29..92d5cd5bf4f 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c @@ -153,21 +153,6 @@ xfs_errortag_clearall(xfs_mount_t *mp, int loud) } #endif /* DEBUG */ -static void -xfs_fs_vcmn_err(int level, xfs_mount_t *mp, char *fmt, va_list ap) -{ - if (mp != NULL) { - char *newfmt; - int len = 16 + mp->m_fsname_len + strlen(fmt); - - newfmt = kmem_alloc(len, KM_SLEEP); - sprintf(newfmt, "Filesystem \"%s\": %s", mp->m_fsname, fmt); - icmn_err(level, newfmt, ap); - kmem_free(newfmt); - } else { - icmn_err(level, fmt, ap); - } -} void xfs_fs_cmn_err(int level, xfs_mount_t *mp, char *fmt, ...) diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h index 11543f10b0c..0c93051c465 100644 --- a/fs/xfs/xfs_error.h +++ b/fs/xfs/xfs_error.h @@ -159,11 +159,15 @@ extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud); #define XFS_PTAG_FSBLOCK_ZERO 0x00000080 struct xfs_mount; -/* PRINTFLIKE4 */ + +extern void xfs_fs_vcmn_err(int level, struct xfs_mount *mp, + char *fmt, va_list ap) + __attribute__ ((format (printf, 3, 0))); extern void xfs_cmn_err(int panic_tag, int level, struct xfs_mount *mp, - char *fmt, ...); -/* PRINTFLIKE3 */ -extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...); + char *fmt, ...) + __attribute__ ((format (printf, 4, 5))); +extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...) + __attribute__ ((format (printf, 3, 4))); extern void xfs_hex_dump(void *p, int length); -- cgit v1.2.3-70-g09d2 From ad1ad968f4e7b06c75741575ea077e25a87da49a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 22 Dec 2008 09:59:06 -0500 Subject: [XFS] handle unaligned data in xfs_bmbt_disk_get_all In libxfs xfs_bmbt_disk_get_all needs to handle unaligned data and thus has been updated to use get_unaligned_be64. In kernelspace we don't strictly need it as the routine is only used for tracing and xfsidbg, but let's keep the two implementations in sync. Signed-off-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/xfs_bmap_btree.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index e46e02b8e27..8f1ec73725d 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -208,7 +208,8 @@ xfs_bmbt_disk_get_all( xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s) { - __xfs_bmbt_get_all(be64_to_cpu(r->l0), be64_to_cpu(r->l1), s); + __xfs_bmbt_get_all(get_unaligned_be64(&r->l0), + get_unaligned_be64(&r->l1), s); } /* -- cgit v1.2.3-70-g09d2 From 7bbe5b5aa6d1e38af6f1fc866efc0aa461d73f19 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 9 Dec 2008 11:02:51 -0500 Subject: UBIFS: use PAGE_CACHE_MASK correctly It has high bits set, not low bits set as the UBIFS code assumed. Signed-off-by: Artem Bityutskiy --- fs/ubifs/file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 2624411d975..7f1de98e609 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -254,7 +254,7 @@ static int write_begin_slow(struct address_space *mapping, } if (!PageUptodate(page)) { - if (!(pos & PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) + if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) SetPageChecked(page); else { err = do_readpage(page); @@ -444,7 +444,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, if (!PageUptodate(page)) { /* The page is not loaded from the flash */ - if (!(pos & PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) + if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) /* * We change whole page so no need to load it. But we * have to set the @PG_checked flag to make the further -- cgit v1.2.3-70-g09d2 From 24fa9e9438b263600737c839b36543981d87d65b Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 17 Dec 2008 17:45:14 +0200 Subject: UBIFS: fix tnc dumping debugfs tnc dumping was broken because of an obvious typo. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 934db1855f0..367d97520d9 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -2443,7 +2443,7 @@ static ssize_t write_debugfs_file(struct file *file, const char __user *buf, spin_lock(&c->space_lock); dbg_dump_budg(c); spin_unlock(&c->space_lock); - } else if (file->f_path.dentry == d->dump_budg) { + } else if (file->f_path.dentry == d->dump_tnc) { mutex_lock(&c->tnc_mutex); dbg_dump_tnc(c); mutex_unlock(&c->tnc_mutex); -- cgit v1.2.3-70-g09d2 From 21a60258976227daaf7a4c35e96c3d77d4988b15 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 12 Dec 2008 11:13:17 -0500 Subject: UBIFS: improve budgeting dump Dump available space calculated by budgeting subsystem. Signed-off-by: Artem Bityutskiy --- fs/ubifs/budget.c | 4 ++-- fs/ubifs/debug.c | 13 +++++++++++++ fs/ubifs/ubifs.h | 2 +- 3 files changed, 16 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 1a4973e1066..d5a65037e17 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -713,8 +713,8 @@ void ubifs_release_dirty_inode_budget(struct ubifs_info *c, * (e.g., via the 'statfs()' call) reports that it has N bytes available, they * are able to write a file of size N. UBIFS attaches node headers to each data * node and it has to write indexind nodes as well. This introduces additional - * overhead, and UBIFS it has to report sligtly less free space to meet the - * above expectetion. + * overhead, and UBIFS has to report sligtly less free space to meet the above + * expectetions. * * This function assumes free space is made up of uncompressed data nodes and * full index nodes (one per data node, tripled because we always allow enough diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 367d97520d9..6ecb01a99d1 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -597,7 +597,9 @@ void dbg_dump_budg(struct ubifs_info *c) struct rb_node *rb; struct ubifs_bud *bud; struct ubifs_gced_idx_leb *idx_gc; + long long available, outstanding, free; + ubifs_assert(spin_is_locked(&c->space_lock)); spin_lock(&dbg_lock); printk(KERN_DEBUG "(pid %d) Budgeting info: budg_data_growth %lld, " "budg_dd_growth %lld, budg_idx_growth %lld\n", current->pid, @@ -630,6 +632,17 @@ void dbg_dump_budg(struct ubifs_info *c) printk(KERN_DEBUG "\tGC'ed idx LEB %d unmap %d\n", idx_gc->lnum, idx_gc->unmap); printk(KERN_DEBUG "\tcommit state %d\n", c->cmt_state); + + /* Print budgeting predictions */ + available = ubifs_calc_available(c, c->min_idx_lebs); + outstanding = c->budg_data_growth + c->budg_dd_growth; + if (available > outstanding) + free = ubifs_reported_space(c, available - outstanding); + else + free = 0; + printk(KERN_DEBUG "Budgeting predictions:\n"); + printk(KERN_DEBUG "\tavailable: %lld, outstanding %lld, free %lld\n", + available, outstanding, free); spin_unlock(&dbg_lock); } diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 055c6b52d2f..e61c08106b4 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -419,7 +419,7 @@ struct ubifs_unclean_leb { * * LPROPS_UNCAT: not categorized * LPROPS_DIRTY: dirty > 0, not index - * LPROPS_DIRTY_IDX: dirty + free > UBIFS_CH_SZ and index + * LPROPS_DIRTY_IDX: dirty + free > @c->min_idx_node_sze and index * LPROPS_FREE: free > 0, not empty, not index * LPROPS_HEAP_CNT: number of heaps used for storing categorized LEBs * LPROPS_EMPTY: LEB is empty, not taken -- cgit v1.2.3-70-g09d2 From d3cf502b6ccee1c52890d42cd18cbc98b7526126 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 16 Dec 2008 17:52:35 +0200 Subject: UBIFS: various comment improvements and fixes Signed-off-by: Artem Bityutskiy --- fs/ubifs/lprops.c | 12 ++++++------ fs/ubifs/ubifs.h | 34 ++++++++++++++++++---------------- 2 files changed, 24 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index 10ba663eb32..dfd2bcece27 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -520,13 +520,13 @@ static int is_lprops_dirty(struct ubifs_info *c, struct ubifs_lprops *lprops) * @flags: new flags * @idx_gc_cnt: change to the count of idx_gc list * - * This function changes LEB properties. This function does not change a LEB - * property (@free, @dirty or @flag) if the value passed is %LPROPS_NC. + * This function changes LEB properties (@free, @dirty or @flag). However, the + * property which has the %LPROPS_NC value is not changed. Returns a pointer to + * the updated LEB properties on success and a negative error code on failure. * - * This function returns a pointer to the updated LEB properties on success - * and a negative error code on failure. N.B. the LEB properties may have had to - * be copied (due to COW) and consequently the pointer returned may not be the - * same as the pointer passed. + * Note, the LEB properties may have had to be copied (due to COW) and + * consequently the pointer returned may not be the same as the pointer + * passed. */ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, const struct ubifs_lprops *lp, diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index e61c08106b4..f8ef7c1def1 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -482,24 +482,26 @@ struct ubifs_lpt_lprops { * @empty_lebs: number of empty LEBs * @taken_empty_lebs: number of taken LEBs * @idx_lebs: number of indexing LEBs - * @total_free: total free space in bytes - * @total_dirty: total dirty space in bytes - * @total_used: total used space in bytes (includes only data LEBs) - * @total_dead: total dead space in bytes (includes only data LEBs) - * @total_dark: total dark space in bytes (includes only data LEBs) + * @total_free: total free space in bytes (includes all LEBs) + * @total_dirty: total dirty space in bytes (includes all LEBs) + * @total_used: total used space in bytes (does not include index LEBs) + * @total_dead: total dead space in bytes (does not include index LEBs) + * @total_dark: total dark space in bytes (does not include index LEBs) * - * N.B. total_dirty and total_used are different to other total_* fields, - * because they account _all_ LEBs, not just data LEBs. + * The @taken_empty_lebs field counts the LEBs that are in the transient state + * of having been "taken" for use but not yet written to. @taken_empty_lebs is + * needed to account correctly for @gc_lnum, otherwise @empty_lebs could be + * used by itself (in which case 'unused_lebs' would be a better name). In the + * case of @gc_lnum, it is "taken" at mount time or whenever a LEB is retained + * by GC, but unlike other empty LEBs that are "taken", it may not be written + * straight away (i.e. before the next commit start or unmount), so either + * @gc_lnum must be specially accounted for, or the current approach followed + * i.e. count it under @taken_empty_lebs. * - * 'taken_empty_lebs' counts the LEBs that are in the transient state of having - * been 'taken' for use but not yet written to. 'taken_empty_lebs' is needed - * to account correctly for gc_lnum, otherwise 'empty_lebs' could be used - * by itself (in which case 'unused_lebs' would be a better name). In the case - * of gc_lnum, it is 'taken' at mount time or whenever a LEB is retained by GC, - * but unlike other empty LEBs that are 'taken', it may not be written straight - * away (i.e. before the next commit start or unmount), so either gc_lnum must - * be specially accounted for, or the current approach followed i.e. count it - * under 'taken_empty_lebs'. + * @empty_lebs includes @taken_empty_lebs. + * + * @total_used, @total_dead and @total_dark fields do not account indexing + * LEBs. */ struct ubifs_lp_stats { int empty_lebs; -- cgit v1.2.3-70-g09d2 From af14a1ad792621942a03e4bd0e5f17b6e177e2e0 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 19 Dec 2008 19:26:29 +0200 Subject: UBIFS: fix available blocks count Take into account that 2 eraseblocks are never available because they are reserved for the index. This gives more realistic count of FS blocks. To avoid future confusions like this, introduce a constant. Signed-off-by: Artem Bityutskiy --- fs/ubifs/budget.c | 9 ++------- fs/ubifs/super.c | 5 +++-- fs/ubifs/ubifs.h | 8 ++++++++ 3 files changed, 13 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index d5a65037e17..e4234254700 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -280,13 +280,8 @@ int ubifs_calc_min_idx_lebs(struct ubifs_info *c) * extra LEB to compensate. */ ret += 1; - /* - * At present the index needs at least 2 LEBs: one for the index head - * and one for in-the-gaps method (which currently does not cater for - * the index head and so excludes it from consideration). - */ - if (ret < 2) - ret = 2; + if (ret < MIN_INDEX_LEBS) + ret = MIN_INDEX_LEBS; return ret; } diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 2dbaa4fc2cb..a6a7798d020 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -695,9 +695,10 @@ static int init_constants_late(struct ubifs_info *c) * necessary to report something for the 'statfs()' call. * * Subtract the LEB reserved for GC, the LEB which is reserved for - * deletions, and assume only one journal head is available. + * deletions, minimum LEBs for the index, and assume only one journal + * head is available. */ - tmp64 = c->main_lebs - 2 - c->jhead_cnt + 1; + tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt + 1; tmp64 *= (uint64_t)c->leb_size - c->leb_overhead; tmp64 = ubifs_reported_space(c, tmp64); c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT; diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index f8ef7c1def1..543e850022e 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -63,6 +63,14 @@ #define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL #define SQNUM_WATERMARK 0xFFFFFFFFFF000000ULL +/* + * Minimum amount of LEBs reserved for the index. At present the index needs at + * least 2 LEBs: one for the index head and one for in-the-gaps method (which + * currently does not cater for the index head and so excludes it from + * consideration). + */ +#define MIN_INDEX_LEBS 2 + /* Minimum amount of data UBIFS writes to the flash */ #define MIN_WRITE_SZ (UBIFS_DATA_NODE_SZ + 8) -- cgit v1.2.3-70-g09d2 From 4d61db4f87b527734ac0cc830dda8fcc4e2add2f Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 18 Dec 2008 14:06:51 +0200 Subject: UBIFS: use nicer 64-bit math Instead of using do_div(), use better primitives from linux/math64.h. Signed-off-by: Artem Bityutskiy --- fs/ubifs/budget.c | 25 +++++++++++-------------- fs/ubifs/debug.c | 1 + fs/ubifs/lpt.c | 15 ++++++--------- fs/ubifs/sb.c | 10 +++++----- fs/ubifs/super.c | 12 ++++++------ fs/ubifs/ubifs.h | 2 +- 6 files changed, 30 insertions(+), 35 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index e4234254700..0bcb8031ca1 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -32,7 +32,7 @@ #include "ubifs.h" #include -#include +#include /* * When pessimistic budget calculations say that there is no enough space, @@ -258,8 +258,8 @@ static int make_free_space(struct ubifs_info *c, struct retries_info *ri) */ int ubifs_calc_min_idx_lebs(struct ubifs_info *c) { - int ret; - uint64_t idx_size; + int idx_lebs, eff_leb_size = c->leb_size - c->max_idx_node_sz; + long long idx_size; idx_size = c->old_idx_sz + c->budg_idx_growth + c->budg_uncommitted_idx; @@ -271,18 +271,16 @@ int ubifs_calc_min_idx_lebs(struct ubifs_info *c) * pair, nor similarly the two variables for the new index size, so we * have to do this costly 64-bit division on fast-path. */ - if (do_div(idx_size, c->leb_size - c->max_idx_node_sz)) - ret = idx_size + 1; - else - ret = idx_size; + idx_size += eff_leb_size - 1; + idx_lebs = div_u64(idx_size, eff_leb_size); /* * The index head is not available for the in-the-gaps method, so add an * extra LEB to compensate. */ - ret += 1; - if (ret < MIN_INDEX_LEBS) - ret = MIN_INDEX_LEBS; - return ret; + idx_lebs += 1; + if (idx_lebs < MIN_INDEX_LEBS) + idx_lebs = MIN_INDEX_LEBS; + return idx_lebs; } /** @@ -718,7 +716,7 @@ void ubifs_release_dirty_inode_budget(struct ubifs_info *c, * Note, the calculation is pessimistic, which means that most of the time * UBIFS reports less space than it actually has. */ -long long ubifs_reported_space(const struct ubifs_info *c, uint64_t free) +long long ubifs_reported_space(const struct ubifs_info *c, long long free) { int divisor, factor, f; @@ -740,8 +738,7 @@ long long ubifs_reported_space(const struct ubifs_info *c, uint64_t free) divisor = UBIFS_MAX_DATA_NODE_SZ; divisor += (c->max_idx_node_sz * 3) / (f - 1); free *= factor; - do_div(free, divisor); - return free; + return div_u64(free, divisor); } /** diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 6ecb01a99d1..a2be11584ad 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -33,6 +33,7 @@ #include #include #include +#include #ifdef CONFIG_UBIFS_FS_DEBUG diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index 6d914160ec5..b2792e84d24 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -43,8 +43,9 @@ * mounted. */ -#include #include "ubifs.h" +#include +#include /** * do_calc_lpt_geom - calculate sizes for the LPT area. @@ -135,15 +136,13 @@ static void do_calc_lpt_geom(struct ubifs_info *c) int ubifs_calc_lpt_geom(struct ubifs_info *c) { int lebs_needed; - uint64_t sz; + long long sz; do_calc_lpt_geom(c); /* Verify that lpt_lebs is big enough */ sz = c->lpt_sz * 2; /* Must have at least 2 times the size */ - sz += c->leb_size - 1; - do_div(sz, c->leb_size); - lebs_needed = sz; + lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size); if (lebs_needed > c->lpt_lebs) { ubifs_err("too few LPT LEBs"); return -EINVAL; @@ -175,7 +174,7 @@ static int calc_dflt_lpt_geom(struct ubifs_info *c, int *main_lebs, int *big_lpt) { int i, lebs_needed; - uint64_t sz; + long long sz; /* Start by assuming the minimum number of LPT LEBs */ c->lpt_lebs = UBIFS_MIN_LPT_LEBS; @@ -202,9 +201,7 @@ static int calc_dflt_lpt_geom(struct ubifs_info *c, int *main_lebs, /* Now check there are enough LPT LEBs */ for (i = 0; i < 64 ; i++) { sz = c->lpt_sz * 4; /* Allow 4 times the size */ - sz += c->leb_size - 1; - do_div(sz, c->leb_size); - lebs_needed = sz; + lebs_needed = div_u64(sz + c->leb_size - 1, c->leb_size); if (lebs_needed > c->lpt_lebs) { /* Not enough LPT LEBs so try again with more */ c->lpt_lebs = lebs_needed; diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index c5da201ab54..e070c643d1b 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -28,6 +28,7 @@ #include "ubifs.h" #include +#include /* * Default journal size in logical eraseblocks as a percent of total @@ -80,7 +81,7 @@ static int create_default_filesystem(struct ubifs_info *c) int err, tmp, jnl_lebs, log_lebs, max_buds, main_lebs, main_first; int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0; int min_leb_cnt = UBIFS_MIN_LEB_CNT; - uint64_t tmp64, main_bytes; + long long tmp64, main_bytes; __le64 tmp_le64; /* Some functions called from here depend on the @c->key_len filed */ @@ -160,7 +161,7 @@ static int create_default_filesystem(struct ubifs_info *c) if (!sup) return -ENOMEM; - tmp64 = (uint64_t)max_buds * c->leb_size; + tmp64 = (long long)max_buds * c->leb_size; if (big_lpt) sup_flags |= UBIFS_FLG_BIGLPT; @@ -187,9 +188,8 @@ static int create_default_filesystem(struct ubifs_info *c) generate_random_uuid(sup->uuid); - main_bytes = (uint64_t)main_lebs * c->leb_size; - tmp64 = main_bytes * DEFAULT_RP_PERCENT; - do_div(tmp64, 100); + main_bytes = (long long)main_lebs * c->leb_size; + tmp64 = div_u64(main_bytes * DEFAULT_RP_PERCENT, 100); if (tmp64 > DEFAULT_MAX_RP_SIZE) tmp64 = DEFAULT_MAX_RP_SIZE; sup->rp_size = cpu_to_le64(tmp64); diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index a6a7798d020..c3cefc84137 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "ubifs.h" /* @@ -612,7 +613,7 @@ static int bud_wbuf_callback(struct ubifs_info *c, int lnum, int free, int pad) static int init_constants_late(struct ubifs_info *c) { int tmp, err; - uint64_t tmp64; + long long tmp64; c->main_bytes = (long long)c->main_lebs * c->leb_size; c->max_znode_sz = sizeof(struct ubifs_znode) + @@ -639,9 +640,8 @@ static int init_constants_late(struct ubifs_info *c) * Make sure that the log is large enough to fit reference nodes for * all buds plus one reserved LEB. */ - tmp64 = c->max_bud_bytes; - tmp = do_div(tmp64, c->leb_size); - c->max_bud_cnt = tmp64 + !!tmp; + tmp64 = c->max_bud_bytes + c->leb_size - 1; + c->max_bud_cnt = div_u64(tmp64, c->leb_size); tmp = (c->ref_node_alsz * c->max_bud_cnt + c->leb_size - 1); tmp /= c->leb_size; tmp += 1; @@ -677,7 +677,7 @@ static int init_constants_late(struct ubifs_info *c) * Consequently, if the journal is too small, UBIFS will treat it as * always full. */ - tmp64 = (uint64_t)(c->jhead_cnt + 1) * c->leb_size + 1; + tmp64 = (long long)(c->jhead_cnt + 1) * c->leb_size + 1; if (c->bg_bud_bytes < tmp64) c->bg_bud_bytes = tmp64; if (c->max_bud_bytes < tmp64 + c->leb_size) @@ -699,7 +699,7 @@ static int init_constants_late(struct ubifs_info *c) * head is available. */ tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt + 1; - tmp64 *= (uint64_t)c->leb_size - c->leb_overhead; + tmp64 *= (long long)c->leb_size - c->leb_overhead; tmp64 = ubifs_reported_space(c, tmp64); c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT; diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 543e850022e..a17dd794ae9 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1498,7 +1498,7 @@ void ubifs_cancel_ino_op(struct ubifs_info *c, struct inode *inode, long long ubifs_get_free_space(struct ubifs_info *c); int ubifs_calc_min_idx_lebs(struct ubifs_info *c); void ubifs_convert_page_budget(struct ubifs_info *c); -long long ubifs_reported_space(const struct ubifs_info *c, uint64_t free); +long long ubifs_reported_space(const struct ubifs_info *c, long long free); long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs); /* find.c */ -- cgit v1.2.3-70-g09d2 From 650ed50f4298e76007070b7ab9d640dfe7228ab3 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 22 Dec 2008 11:09:04 +0200 Subject: UBIFS: re-calculate min_idx_size after the commit When we commit, but before we try to write anything to the flash media, @c->min_idx_size is inaccurate, because we do not re-calculate it after the commit. Do not forget to do this. Signed-off-by: Artem Bityutskiy --- fs/ubifs/tnc_commit.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index 3c0af452887..fde8d127c76 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -802,8 +802,10 @@ int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot) * budgeting subsystem to assume the index is already committed, * even though it is not. */ + ubifs_assert(c->min_idx_lebs == ubifs_calc_min_idx_lebs(c)); c->old_idx_sz = c->calc_idx_sz; c->budg_uncommitted_idx = 0; + c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); spin_unlock(&c->space_lock); mutex_unlock(&c->tnc_mutex); -- cgit v1.2.3-70-g09d2 From c8f915913afdfe1a796e312e21658b8edcf20868 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 19 Dec 2008 16:11:13 +0200 Subject: UBIFS: avoid unnecessary calculations Do not calculate min_idx_lebs, because it is available in c->min_idx_lebs Signed-off-by: Artem Bityutskiy --- fs/ubifs/budget.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 0bcb8031ca1..44cff803171 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -763,7 +763,8 @@ long long ubifs_get_free_space(struct ubifs_info *c) long long available, outstanding, free; spin_lock(&c->space_lock); - min_idx_lebs = ubifs_calc_min_idx_lebs(c); + min_idx_lebs = c->min_idx_lebs; + ubifs_assert(min_idx_lebs == ubifs_calc_min_idx_lebs(c)); outstanding = c->budg_data_growth + c->budg_dd_growth; /* -- cgit v1.2.3-70-g09d2 From 136221fc3219b3805c48db5da065e8e3467175d4 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Tue, 23 Dec 2008 15:21:30 -0500 Subject: nfs: remove redundant tests on reading new pages aops->readpages() and its NFS helper readpage_async_filler() will only be called to do readahead I/O for newly allocated pages. So it's not necessary to test for the always 0 dirty/uptodate page flags. The removal of nfs_wb_page() call also fixes a readahead bug: the NFS readahead has been synchronous since 2.6.23, because that call will clear PG_readahead, which is the reminder for asynchronous readahead. More background: the PG_readahead page flag is shared with PG_reclaim, one for read path and the other for write path. clear_page_dirty_for_io() unconditionally clears PG_readahead to prevent possible readahead residuals, assuming itself to be always called in the write path. However, NFS is one and the only exception in that it _always_ calls clear_page_dirty_for_io() in the read path, i.e. for readpages()/readpage(). Cc: Trond Myklebust Signed-off-by: Wu Fengguang Signed-off-by: Trond Myklebust --- fs/nfs/read.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 40d17987d0e..f856004bb7f 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -533,12 +533,6 @@ readpage_async_filler(void *data, struct page *page) unsigned int len; int error; - error = nfs_wb_page(inode, page); - if (error) - goto out_unlock; - if (PageUptodate(page)) - goto out_unlock; - len = nfs_page_length(page); if (len == 0) return nfs_return_empty_page(page); -- cgit v1.2.3-70-g09d2 From d716f0b8a57f8577bcd869e7dcb5a0add9f6fc5e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:32 -0500 Subject: SUNRPC: nfsacl_encode/nfsacl_decode should be exported as GPL-only Again, this has never been intended as a public abi for out-of-tree modules. Signed-off-by: Trond Myklebust --- fs/nfs_common/nfsacl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c index c11f5375d7c..04133aacb1e 100644 --- a/fs/nfs_common/nfsacl.c +++ b/fs/nfs_common/nfsacl.c @@ -29,8 +29,8 @@ MODULE_LICENSE("GPL"); -EXPORT_SYMBOL(nfsacl_encode); -EXPORT_SYMBOL(nfsacl_decode); +EXPORT_SYMBOL_GPL(nfsacl_encode); +EXPORT_SYMBOL_GPL(nfsacl_decode); struct nfsacl_encode_desc { struct xdr_array2_desc desc; -- cgit v1.2.3-70-g09d2 From 2de59872a7842143f4507832e7c1f5123c47feb7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:33 -0500 Subject: LOCKD: Make lockd_up() and lockd_down() exported GPL-only Signed-off-by: Trond Myklebust --- fs/lockd/svc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 56b076736b5..252d80163d0 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -45,7 +45,7 @@ static struct svc_program nlmsvc_program; struct nlmsvc_binding * nlmsvc_ops; -EXPORT_SYMBOL(nlmsvc_ops); +EXPORT_SYMBOL_GPL(nlmsvc_ops); static DEFINE_MUTEX(nlmsvc_mutex); static unsigned int nlmsvc_users; @@ -300,7 +300,7 @@ out: mutex_unlock(&nlmsvc_mutex); return error; } -EXPORT_SYMBOL(lockd_up); +EXPORT_SYMBOL_GPL(lockd_up); /* * Decrement the user count and bring down lockd if we're the last. @@ -329,7 +329,7 @@ lockd_down(void) out: mutex_unlock(&nlmsvc_mutex); } -EXPORT_SYMBOL(lockd_down); +EXPORT_SYMBOL_GPL(lockd_down); #ifdef CONFIG_SYSCTL -- cgit v1.2.3-70-g09d2 From df94f000c46c055cf439f5b92807cd827557ffbc Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 23 Dec 2008 15:21:33 -0500 Subject: lockd: convert reclaimer thread to kthread interface My understanding is that there is a push to turn the kernel_thread interface into a non-exported symbol and move all kernel threads to use the kthread API. This patch changes lockd to use kthread_run to spawn the reclaimer thread. I've made the assumption here that the extra module references taken when we spawn this thread are unnecessary and removed them. I've also added a KERN_ERR printk that pops if the thread can't be spawned to warn the admin that the locks won't be reclaimed. In the future, it would be nice to be able to notify userspace that locks have been lost (probably by implementing SIGLOST), and adding some good policies about how long we should reattempt to reclaim the locks. Finally, I removed a comment about memory leaks that I believe is obsolete and added a new one to clarify the result of sending a SIGKILL to the reclaimer thread. As best I can tell, doing so doesn't actually cause a memory leak. I consider this patch 2.6.29 material. Signed-off-by: Jeff Layton Signed-off-by: Trond Myklebust --- fs/lockd/clntlock.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 8307dd64bf4..94d42cc4e39 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c @@ -14,6 +14,7 @@ #include #include #include +#include #define NLMDBG_FACILITY NLMDBG_CLIENT @@ -191,11 +192,15 @@ __be32 nlmclnt_grant(const struct sockaddr *addr, const struct nlm_lock *lock) void nlmclnt_recovery(struct nlm_host *host) { + struct task_struct *task; + if (!host->h_reclaiming++) { nlm_get_host(host); - __module_get(THIS_MODULE); - if (kernel_thread(reclaimer, host, CLONE_FS | CLONE_FILES) < 0) - module_put(THIS_MODULE); + task = kthread_run(reclaimer, host, "%s-reclaim", host->h_name); + if (IS_ERR(task)) + printk(KERN_ERR "lockd: unable to spawn reclaimer " + "thread. Locks for %s won't be reclaimed! " + "(%ld)\n", host->h_name, PTR_ERR(task)); } } @@ -207,7 +212,6 @@ reclaimer(void *ptr) struct file_lock *fl, *next; u32 nsmstate; - daemonize("%s-reclaim", host->h_name); allow_signal(SIGKILL); down_write(&host->h_rwsem); @@ -233,7 +237,12 @@ restart: list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) { list_del_init(&fl->fl_u.nfs_fl.list); - /* Why are we leaking memory here? --okir */ + /* + * sending this thread a SIGKILL will result in any unreclaimed + * locks being removed from the h_granted list. This means that + * the kernel will not attempt to reclaim them again if a new + * reclaimer thread is spawned for this host. + */ if (signalled()) continue; if (nlmclnt_reclaim(host, fl) != 0) @@ -261,5 +270,5 @@ restart: nlm_release_host(host); lockd_down(); unlock_kernel(); - module_put_and_exit(0); + return 0; } -- cgit v1.2.3-70-g09d2 From 7b5d2b98e118716dd1ccc2808fae88f6c4b16d54 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 23 Dec 2008 15:21:34 -0500 Subject: NFS: rename nfs_path variable Clean up: I'm about to move the declaration of nfs_mount into fs/nfs/internal.h and include it in fs/nfs/nfsroot.c. There's a conflicting definition of nfs_path in fs/nfs/internal.h and fs/nfs/nfsroot.c, so rename the private one. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/nfsroot.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index 8478fc25dae..f1c0a066439 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c @@ -100,7 +100,7 @@ static char nfs_root_name[256] __initdata = ""; static __be32 servaddr __initdata = 0; /* Name of directory to mount */ -static char nfs_path[NFS_MAXPATHLEN] __initdata = { 0, }; +static char nfs_export_path[NFS_MAXPATHLEN] __initdata = { 0, }; /* NFS-related data */ static struct nfs_mount_data nfs_data __initdata = { 0, };/* NFS mount info */ @@ -312,7 +312,7 @@ static int __init root_nfs_name(char *name) printk(KERN_ERR "Root-NFS: Pathname for remote directory too long.\n"); return -1; } - sprintf(nfs_path, buf, cp); + sprintf(nfs_export_path, buf, cp); return 1; } @@ -340,7 +340,7 @@ static int __init root_nfs_addr(void) static void __init root_nfs_print(void) { printk(KERN_NOTICE "Root-NFS: Mounting %s on server %s as root\n", - nfs_path, nfs_data.hostname); + nfs_export_path, nfs_data.hostname); printk(KERN_NOTICE "Root-NFS: rsize = %d, wsize = %d, timeo = %d, retrans = %d\n", nfs_data.rsize, nfs_data.wsize, nfs_data.timeo, nfs_data.retrans); printk(KERN_NOTICE "Root-NFS: acreg (min,max) = (%d,%d), acdir (min,max) = (%d,%d)\n", @@ -493,10 +493,10 @@ static int __init root_nfs_get_handle(void) set_sockaddr(&sin, servaddr, htons(mount_port)); status = nfs_mount((struct sockaddr *) &sin, sizeof(sin), NULL, - nfs_path, version, protocol, &fh); + nfs_export_path, version, protocol, &fh); if (status < 0) printk(KERN_ERR "Root-NFS: Server returned error %d " - "while mounting %s\n", status, nfs_path); + "while mounting %s\n", status, nfs_export_path); else { nfs_data.root.size = fh.size; memcpy(nfs_data.root.data, fh.data, fh.size); -- cgit v1.2.3-70-g09d2 From 146ec944bbd31d241a44a00518b054fb01921d22 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 23 Dec 2008 15:21:34 -0500 Subject: NFS: Move declaration of nfs_mount() to fs/nfs/internal.h Clean up: The nfs_mount() function is not to be used outside of the NFS client. Move its public declaration to fs/nfs/internal.h. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/internal.h | 4 ++++ fs/nfs/nfsroot.c | 2 ++ include/linux/nfs_fs.h | 6 ------ 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index d212ee41caf..7a38cc7b413 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -63,6 +63,10 @@ struct nfs_parsed_mount_data { struct security_mnt_opts lsm_opts; }; +/* mount_clnt.c */ +extern int nfs_mount(struct sockaddr *, size_t, char *, char *, + int, int, struct nfs_fh *); + /* client.c */ extern struct rpc_program nfs_program; diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index f1c0a066439..a96e5fdb38a 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c @@ -86,6 +86,8 @@ #include #include +#include "internal.h" + /* Define this to allow debugging output */ #undef NFSROOT_DEBUG #define NFSDBG_FACILITY NFSDBG_ROOT diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 4eaa8347a0d..f11077285a6 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -532,12 +532,6 @@ static inline void nfs3_forget_cached_acls(struct inode *inode) } #endif /* CONFIG_NFS_V3_ACL */ -/* - * linux/fs/mount_clnt.c - */ -extern int nfs_mount(struct sockaddr *, size_t, char *, char *, - int, int, struct nfs_fh *); - /* * inline functions */ -- cgit v1.2.3-70-g09d2 From c5d120f8e8b464368a7dcb038dc5c077d234d10a Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 23 Dec 2008 15:21:35 -0500 Subject: NFS: introduce nfs_mount_info struct for calling nfs_mount() Clean up: convert nfs_mount() to take a single data structure argument to make it simpler to add more arguments. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/internal.h | 13 +++++++++++-- fs/nfs/mount_clnt.c | 30 ++++++++++++------------------ fs/nfs/nfsroot.c | 17 +++++++++++------ fs/nfs/super.c | 29 +++++++++++++++-------------- 4 files changed, 49 insertions(+), 40 deletions(-) (limited to 'fs') diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 7a38cc7b413..4e983961346 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -64,8 +64,17 @@ struct nfs_parsed_mount_data { }; /* mount_clnt.c */ -extern int nfs_mount(struct sockaddr *, size_t, char *, char *, - int, int, struct nfs_fh *); +struct nfs_mount_request { + struct sockaddr *sap; + size_t salen; + char *hostname; + char *dirpath; + u32 version; + unsigned short protocol; + struct nfs_fh *fh; +}; + +extern int nfs_mount(struct nfs_mount_request *info); /* client.c */ extern struct rpc_program nfs_program; diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index 086a6830d78..7e37113d37e 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -29,33 +29,26 @@ struct mnt_fhstatus { /** * nfs_mount - Obtain an NFS file handle for the given host and path - * @addr: pointer to server's address - * @len: size of server's address - * @hostname: name of server host, or NULL - * @path: pointer to string containing export path to mount - * @version: mount version to use for this request - * @protocol: transport protocol to use for thie request - * @fh: pointer to location to place returned file handle + * @info: pointer to mount request arguments * * Uses default timeout parameters specified by underlying transport. */ -int nfs_mount(struct sockaddr *addr, size_t len, char *hostname, char *path, - int version, int protocol, struct nfs_fh *fh) +int nfs_mount(struct nfs_mount_request *info) { struct mnt_fhstatus result = { - .fh = fh + .fh = info->fh }; struct rpc_message msg = { - .rpc_argp = path, + .rpc_argp = info->dirpath, .rpc_resp = &result, }; struct rpc_create_args args = { - .protocol = protocol, - .address = addr, - .addrsize = len, - .servername = hostname, + .protocol = info->protocol, + .address = info->sap, + .addrsize = info->salen, + .servername = info->hostname, .program = &mnt_program, - .version = version, + .version = info->version, .authflavor = RPC_AUTH_UNIX, .flags = 0, }; @@ -63,13 +56,14 @@ int nfs_mount(struct sockaddr *addr, size_t len, char *hostname, char *path, int status; dprintk("NFS: sending MNT request for %s:%s\n", - (hostname ? hostname : "server"), path); + (info->hostname ? info->hostname : "server"), + info->dirpath); mnt_clnt = rpc_create(&args); if (IS_ERR(mnt_clnt)) goto out_clnt_err; - if (version == NFS_MNT3_VERSION) + if (info->version == NFS_MNT3_VERSION) msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC3_MNT]; else msg.rpc_proc = &mnt_clnt->cl_procinfo[MNTPROC_MNT]; diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index a96e5fdb38a..f015e0d62ad 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c @@ -487,15 +487,20 @@ static int __init root_nfs_get_handle(void) { struct nfs_fh fh; struct sockaddr_in sin; + struct nfs_mount_request request = { + .sap = (struct sockaddr *)&sin, + .salen = sizeof(sin), + .dirpath = nfs_export_path, + .version = (nfs_data.flags & NFS_MOUNT_VER3) ? + NFS_MNT3_VERSION : NFS_MNT_VERSION, + .protocol = (nfs_data.flags & NFS_MOUNT_TCP) ? + XPRT_TRANSPORT_TCP : XPRT_TRANSPORT_UDP, + .fh = &fh, + }; int status; - int protocol = (nfs_data.flags & NFS_MOUNT_TCP) ? - XPRT_TRANSPORT_TCP : XPRT_TRANSPORT_UDP; - int version = (nfs_data.flags & NFS_MOUNT_VER3) ? - NFS_MNT3_VERSION : NFS_MNT_VERSION; set_sockaddr(&sin, servaddr, htons(mount_port)); - status = nfs_mount((struct sockaddr *) &sin, sizeof(sin), NULL, - nfs_export_path, version, protocol, &fh); + status = nfs_mount(&request); if (status < 0) printk(KERN_ERR "Root-NFS: Server returned error %d " "while mounting %s\n", status, nfs_export_path); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index f48db679a1c..2b0c8e132b5 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1329,8 +1329,13 @@ out_security_failure: static int nfs_try_mount(struct nfs_parsed_mount_data *args, struct nfs_fh *root_fh) { - struct sockaddr *sap = (struct sockaddr *)&args->mount_server.address; - char *hostname; + struct nfs_mount_request request = { + .sap = (struct sockaddr *) + &args->mount_server.address, + .dirpath = args->nfs_server.export_path, + .protocol = args->mount_server.protocol, + .fh = root_fh, + }; int status; if (args->mount_server.version == 0) { @@ -1339,42 +1344,38 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args, else args->mount_server.version = NFS_MNT_VERSION; } + request.version = args->mount_server.version; if (args->mount_server.hostname) - hostname = args->mount_server.hostname; + request.hostname = args->mount_server.hostname; else - hostname = args->nfs_server.hostname; + request.hostname = args->nfs_server.hostname; /* * Construct the mount server's address. */ if (args->mount_server.address.ss_family == AF_UNSPEC) { - memcpy(sap, &args->nfs_server.address, + memcpy(request.sap, &args->nfs_server.address, args->nfs_server.addrlen); args->mount_server.addrlen = args->nfs_server.addrlen; } + request.salen = args->mount_server.addrlen; /* * autobind will be used if mount_server.port == 0 */ - nfs_set_port(sap, args->mount_server.port); + nfs_set_port(request.sap, args->mount_server.port); /* * Now ask the mount server to map our export path * to a file handle. */ - status = nfs_mount(sap, - args->mount_server.addrlen, - hostname, - args->nfs_server.export_path, - args->mount_server.version, - args->mount_server.protocol, - root_fh); + status = nfs_mount(&request); if (status == 0) return 0; dfprintk(MOUNT, "NFS: unable to mount server %s, error %d\n", - hostname, status); + request.hostname, status); return status; } -- cgit v1.2.3-70-g09d2 From 4a01b8a4ee7b12becd26a49bae57f019605658cd Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 23 Dec 2008 15:21:35 -0500 Subject: NFS: expand flags passed to nfs_create_rpc_client() The nfs_create_rpc_client() function sets up an RPC client for an NFS mount point. Add an option that allows it to set up an RPC transport from an unprivileged port. Instead of having nfs_create_rpc_client()'s callers retain local knowledge about how to set up an RPC client, create a couple of flag arguments to control the use of RPC_CLNT_CREATE flags. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 7547600b617..d11cdaafb39 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -470,7 +470,7 @@ static void nfs_init_timeout_values(struct rpc_timeout *to, int proto, static int nfs_create_rpc_client(struct nfs_client *clp, const struct rpc_timeout *timeparms, rpc_authflavor_t flavor, - int flags) + int discrtry, int noresvport) { struct rpc_clnt *clnt = NULL; struct rpc_create_args args = { @@ -482,9 +482,13 @@ static int nfs_create_rpc_client(struct nfs_client *clp, .program = &nfs_program, .version = clp->rpc_ops->version, .authflavor = flavor, - .flags = flags, }; + if (discrtry) + args.flags |= RPC_CLNT_CREATE_DISCRTRY; + if (noresvport) + args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; + if (!IS_ERR(clp->cl_rpcclient)) return 0; @@ -623,7 +627,7 @@ static int nfs_init_client(struct nfs_client *clp, * Create a client RPC handle for doing FSSTAT with UNIX auth only * - RFC 2623, sec 2.3.2 */ - error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX, 0); + error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX, 0, 0); if (error < 0) goto error; nfs_mark_client_ready(clp, NFS_CS_READY); @@ -979,7 +983,7 @@ static int nfs4_init_client(struct nfs_client *clp, clp->rpc_ops = &nfs_v4_clientops; error = nfs_create_rpc_client(clp, timeparms, authflavour, - RPC_CLNT_CREATE_DISCRTRY); + 1, 0); if (error < 0) goto error; memcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr)); -- cgit v1.2.3-70-g09d2 From 542fcc334adfea36d407cbf698d549fcb2bf6b91 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 23 Dec 2008 15:21:36 -0500 Subject: NFS: move nfs_server flag initialization Make it possible for the NFSv4 mount set up logic to pass mount option flags down the stack to nfs_create_rpc_client(). This is immediately useful if we want NFS mount options to modulate settings of the underlying RPC transport, but it may be useful at some later point if other parts of the NFSv4 mount initialization logic want to know what the mount options are. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index d11cdaafb39..27190337fc1 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1063,6 +1063,10 @@ static int nfs4_init_server(struct nfs_server *server, nfs_init_timeout_values(&timeparms, data->nfs_server.protocol, data->timeo, data->retrans); + /* Initialise the client representation from the mount data */ + server->flags = data->flags; + server->caps |= NFS_CAP_ATOMIC_OPEN; + /* Get a client record */ error = nfs4_set_client(server, data->nfs_server.hostname, @@ -1075,10 +1079,6 @@ static int nfs4_init_server(struct nfs_server *server, if (error < 0) goto error; - /* Initialise the client representation from the mount data */ - server->flags = data->flags; - server->caps |= NFS_CAP_ATOMIC_OPEN; - if (data->rsize) server->rsize = nfs_block_size(data->rsize, NULL); if (data->wsize) @@ -1181,6 +1181,10 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, parent_server = NFS_SB(data->sb); parent_client = parent_server->nfs_client; + /* Initialise the client representation from the parent server */ + nfs_server_copy_userdata(server, parent_server); + server->caps |= NFS_CAP_ATOMIC_OPEN; + /* Get a client representation. * Note: NFSv4 always uses TCP, */ error = nfs4_set_client(server, data->hostname, @@ -1193,10 +1197,6 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data, if (error < 0) goto error; - /* Initialise the client representation from the parent server */ - nfs_server_copy_userdata(server, parent_server); - server->caps |= NFS_CAP_ATOMIC_OPEN; - error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor); if (error < 0) goto error; -- cgit v1.2.3-70-g09d2 From d740351bf0960e89ce1aef45cfe00167cb0f9e5b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 23 Dec 2008 15:21:37 -0500 Subject: NFS: add "[no]resvport" mount option The standard default security setting for NFS is AUTH_SYS. An NFS client connects to NFS servers via a privileged source port and a fixed standard destination port (2049). The client sends raw uid and gid numbers to identify users making NFS requests, and the server assumes an appropriate authority on the client has vetted these values because the source port is privileged. On Linux, by default in-kernel RPC services use a privileged port in the range between 650 and 1023 to avoid using source ports of well- known IP services. Using such a small range limits the number of NFS mount points and the number of unique NFS servers to which a client can connect concurrently. An NFS client can use unprivileged source ports to expand the range of source port numbers, allowing more concurrent server connections and more NFS mount points. Servers must explicitly allow NFS connections from unprivileged ports for this to work. In the past, bumping the value of the sunrpc.max_resvport sysctl on the client would permit the NFS client to use unprivileged ports. Bumping this setting also changes the maximum port number used by other in-kernel RPC services, some of which still required a port number less than 1023. This is exacerbated by the way source port numbers are chosen by the Linux RPC client, which starts at the top of the range and works downwards. It means that bumping the maximum means all RPC services requesting a source port will likely get an unprivileged port instead of a privileged one. Changing this setting effects all NFS mount points on a client. A sysadmin could not selectively choose which mount points would use non-privileged ports and which could not. Lastly, this mechanism of expanding the limit on the number of NFS mount points was entirely undocumented. To address the need for the NFS client to use a large range of source ports without interfering with the activity of other in-kernel RPC services, we introduce a new NFS mount option. This option explicitly tells only the NFS client to use a non-privileged source port when communicating with the NFS server for one specific mount point. This new mount option is called "resvport," like the similar NFS mount option on FreeBSD and Mac OS X. A sister patch for nfs-utils will be submitted that documents this new option in nfs(5). The default setting for this new mount option requires the NFS client to use a privileged port, as before. Explicitly specifying the "noresvport" mount option allows the NFS client to use an unprivileged source port for this mount point when connecting to the NFS server port. This mount option is supported only for text-based NFS mounts. [ Sidebar: it is widely known that security mechanisms based on the use of privileged source ports are ineffective. However, the NFS client can combine the use of unprivileged ports with the use of secure authentication mechanisms, such as Kerberos. This allows a large number of connections and mount points while ensuring a useful level of security. Eventually we may change the default setting for this option depending on the security flavor used for the mount. For example, if the mount is using only AUTH_SYS, then the default setting will be "resvport;" if the mount is using a strong security flavor such as krb5, the default setting will be "noresvport." ] Signed-off-by: Chuck Lever [Trond.Myklebust@netapp.com: Fixed a bug whereby nfs4_init_client() was being called with incorrect arguments.] Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 11 +++++++---- fs/nfs/super.c | 12 +++++++++++- include/linux/nfs_mount.h | 3 ++- 3 files changed, 20 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 27190337fc1..3a69cacc4fa 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -627,7 +627,8 @@ static int nfs_init_client(struct nfs_client *clp, * Create a client RPC handle for doing FSSTAT with UNIX auth only * - RFC 2623, sec 2.3.2 */ - error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX, 0, 0); + error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX, + 0, data->flags & NFS_MOUNT_NORESVPORT); if (error < 0) goto error; nfs_mark_client_ready(clp, NFS_CS_READY); @@ -969,7 +970,8 @@ error: static int nfs4_init_client(struct nfs_client *clp, const struct rpc_timeout *timeparms, const char *ip_addr, - rpc_authflavor_t authflavour) + rpc_authflavor_t authflavour, + int flags) { int error; @@ -983,7 +985,7 @@ static int nfs4_init_client(struct nfs_client *clp, clp->rpc_ops = &nfs_v4_clientops; error = nfs_create_rpc_client(clp, timeparms, authflavour, - 1, 0); + 1, flags & NFS_MOUNT_NORESVPORT); if (error < 0) goto error; memcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr)); @@ -1034,7 +1036,8 @@ static int nfs4_set_client(struct nfs_server *server, error = PTR_ERR(clp); goto error; } - error = nfs4_init_client(clp, timeparms, ip_addr, authflavour); + error = nfs4_init_client(clp, timeparms, ip_addr, authflavour, + server->flags); if (error < 0) goto error_put; diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 2b0c8e132b5..e05a77be306 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -75,6 +75,7 @@ enum { Opt_acl, Opt_noacl, Opt_rdirplus, Opt_nordirplus, Opt_sharecache, Opt_nosharecache, + Opt_resvport, Opt_noresvport, /* Mount options that take integer arguments */ Opt_port, @@ -129,6 +130,8 @@ static const match_table_t nfs_mount_option_tokens = { { Opt_nordirplus, "nordirplus" }, { Opt_sharecache, "sharecache" }, { Opt_nosharecache, "nosharecache" }, + { Opt_resvport, "resvport" }, + { Opt_noresvport, "noresvport" }, { Opt_port, "port=%u" }, { Opt_rsize, "rsize=%u" }, @@ -514,7 +517,8 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, { NFS_MOUNT_NONLM, ",nolock", "" }, { NFS_MOUNT_NOACL, ",noacl", "" }, { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" }, - { NFS_MOUNT_UNSHARED, ",nosharecache", ""}, + { NFS_MOUNT_UNSHARED, ",nosharecache", "" }, + { NFS_MOUNT_NORESVPORT, ",noresvport", "" }, { 0, NULL, NULL } }; const struct proc_nfs_info *nfs_infop; @@ -1035,6 +1039,12 @@ static int nfs_parse_mount_options(char *raw, case Opt_nosharecache: mnt->flags |= NFS_MOUNT_UNSHARED; break; + case Opt_resvport: + mnt->flags &= ~NFS_MOUNT_NORESVPORT; + break; + case Opt_noresvport: + mnt->flags |= NFS_MOUNT_NORESVPORT; + break; /* * options that take numeric values diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h index 6549a06ac16..4499016e6d0 100644 --- a/include/linux/nfs_mount.h +++ b/include/linux/nfs_mount.h @@ -45,7 +45,7 @@ struct nfs_mount_data { char context[NFS_MAX_CONTEXT_LEN + 1]; /* 6 */ }; -/* bits in the flags field */ +/* bits in the flags field visible to user space */ #define NFS_MOUNT_SOFT 0x0001 /* 1 */ #define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */ @@ -68,5 +68,6 @@ struct nfs_mount_data { /* The following are for internal use only */ #define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 #define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000 +#define NFS_MOUNT_NORESVPORT 0x40000 #endif -- cgit v1.2.3-70-g09d2 From 50a737f86dbf99daf3a8dcbdf778a3be36bb2a39 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 23 Dec 2008 15:21:37 -0500 Subject: NFS: "[no]resvport" mount option changes mountd client too If the admin has specified the "noresvport" option for an NFS mount point, the kernel's NFS client uses an unprivileged source port for the main NFS transport. The kernel's mountd client should use an unprivileged port in this case as well. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/internal.h | 1 + fs/nfs/mount_clnt.c | 4 +++- fs/nfs/super.c | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 4e983961346..340ede8f608 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -72,6 +72,7 @@ struct nfs_mount_request { u32 version; unsigned short protocol; struct nfs_fh *fh; + int noresvport; }; extern int nfs_mount(struct nfs_mount_request *info); diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index 7e37113d37e..ca905a5bb1b 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -50,7 +50,6 @@ int nfs_mount(struct nfs_mount_request *info) .program = &mnt_program, .version = info->version, .authflavor = RPC_AUTH_UNIX, - .flags = 0, }; struct rpc_clnt *mnt_clnt; int status; @@ -59,6 +58,9 @@ int nfs_mount(struct nfs_mount_request *info) (info->hostname ? info->hostname : "server"), info->dirpath); + if (info->noresvport) + args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; + mnt_clnt = rpc_create(&args); if (IS_ERR(mnt_clnt)) goto out_clnt_err; diff --git a/fs/nfs/super.c b/fs/nfs/super.c index e05a77be306..d8e062fe76b 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1345,6 +1345,7 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args, .dirpath = args->nfs_server.export_path, .protocol = args->mount_server.protocol, .fh = root_fh, + .noresvport = args->flags & NFS_MOUNT_NORESVPORT, }; int status; -- cgit v1.2.3-70-g09d2 From 0cb2659b818eca99235e17c04291cfa9985c14f7 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 23 Dec 2008 15:21:38 -0500 Subject: NLM: allow lockd requests from an unprivileged port If the admin has specified the "noresvport" option for an NFS mount point, the kernel's NFS client uses an unprivileged source port for the main NFS transport. The kernel's lockd client should use an unprivileged port in this case as well. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/clntlock.c | 2 +- fs/lockd/host.c | 10 +++++++++- fs/nfs/client.c | 2 ++ include/linux/lockd/bind.h | 1 + include/linux/lockd/lockd.h | 4 +++- 5 files changed, 16 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 94d42cc4e39..1f3b0fc0d35 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c @@ -61,7 +61,7 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init) host = nlmclnt_lookup_host(nlm_init->address, nlm_init->addrlen, nlm_init->protocol, nlm_version, - nlm_init->hostname); + nlm_init->hostname, nlm_init->noresvport); if (host == NULL) { lockd_down(); return ERR_PTR(-ENOLCK); diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 70fc63a1727..acc2aa5021d 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -48,6 +48,7 @@ struct nlm_lookup_host_info { const size_t hostname_len; /* it's length */ const struct sockaddr *src_sap; /* our address (optional) */ const size_t src_len; /* it's length */ + const int noresvport; /* use non-priv port */ }; /* @@ -222,6 +223,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) host->h_nsmstate = 0; /* real NSM state */ host->h_nsmhandle = nsm; host->h_server = ni->server; + host->h_noresvport = ni->noresvport; hlist_add_head(&host->h_hash, chain); INIT_LIST_HEAD(&host->h_lockowners); spin_lock_init(&host->h_lock); @@ -272,6 +274,7 @@ nlm_destroy_host(struct nlm_host *host) * @protocol: transport protocol to use * @version: NLM protocol version * @hostname: '\0'-terminated hostname of server + * @noresvport: 1 if non-privileged port should be used * * Returns an nlm_host structure that matches the passed-in * [server address, transport protocol, NLM version, server hostname]. @@ -281,7 +284,9 @@ nlm_destroy_host(struct nlm_host *host) struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, const size_t salen, const unsigned short protocol, - const u32 version, const char *hostname) + const u32 version, + const char *hostname, + int noresvport) { const struct sockaddr source = { .sa_family = AF_UNSPEC, @@ -296,6 +301,7 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, .hostname_len = strlen(hostname), .src_sap = &source, .src_len = sizeof(source), + .noresvport = noresvport, }; dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__, @@ -417,6 +423,8 @@ nlm_bind_host(struct nlm_host *host) */ if (!host->h_server) args.flags |= RPC_CLNT_CREATE_HARDRTRY; + if (host->h_noresvport) + args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; clnt = rpc_create(&args); if (!IS_ERR(clnt)) diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 3a69cacc4fa..70b6d9e8517 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -526,6 +526,8 @@ static int nfs_start_lockd(struct nfs_server *server) .protocol = server->flags & NFS_MOUNT_TCP ? IPPROTO_TCP : IPPROTO_UDP, .nfs_version = clp->rpc_ops->version, + .noresvport = server->flags & NFS_MOUNT_NORESVPORT ? + 1 : 0, }; if (nlm_init.nfs_version > 3) diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index e5872dc994c..fbc48f89852 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h @@ -41,6 +41,7 @@ struct nlmclnt_initdata { size_t addrlen; unsigned short protocol; u32 nfs_version; + int noresvport; }; /* diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index b56d5aa9b19..23da3fa69ef 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -49,6 +49,7 @@ struct nlm_host { unsigned short h_proto; /* transport proto */ unsigned short h_reclaiming : 1, h_server : 1, /* server side, not client side */ + h_noresvport : 1, h_inuse : 1; wait_queue_head_t h_gracewait; /* wait while reclaiming */ struct rw_semaphore h_rwsem; /* Reboot recovery lock */ @@ -220,7 +221,8 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, const size_t salen, const unsigned short protocol, const u32 version, - const char *hostname); + const char *hostname, + int noresvport); struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, const char *hostname, const size_t hostname_len); -- cgit v1.2.3-70-g09d2 From 343104308a33c4f1e23c8e841ede95e97b870842 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:38 -0500 Subject: NFSv4: Fix up another delegation related race When we can update_open_stateid(), we need to be certain that we don't race with a delegation return. While we could do this by grabbing the nfs_client->cl_lock, a dedicated spin lock in the delegation structure will scale better. Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 7 ++++- fs/nfs/delegation.h | 1 + fs/nfs/nfs4proc.c | 80 +++++++++++++++++++++++++++++++++++------------------ 3 files changed, 60 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index cc563cfa694..e0cb4ee3b23 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -140,13 +140,17 @@ static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfs if (delegation == NULL) goto nomatch; + spin_lock(&delegation->lock); if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data, sizeof(delegation->stateid.data)) != 0) - goto nomatch; + goto nomatch_unlock; list_del_rcu(&delegation->super_list); nfsi->delegation_state = 0; rcu_assign_pointer(nfsi->delegation, NULL); + spin_unlock(&delegation->lock); return delegation; +nomatch_unlock: + spin_unlock(&delegation->lock); nomatch: return NULL; } @@ -172,6 +176,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct delegation->change_attr = nfsi->change_attr; delegation->cred = get_rpccred(cred); delegation->inode = inode; + spin_lock_init(&delegation->lock); spin_lock(&clp->cl_lock); if (rcu_dereference(nfsi->delegation) != NULL) { diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index f1c5e2a5d88..8299c6220e9 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -22,6 +22,7 @@ struct nfs_delegation { long flags; loff_t maxsize; __u64 change_attr; + spinlock_t lock; struct rcu_head rcu; }; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 83e700a2b0c..254cbff103f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -388,9 +388,8 @@ static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid write_sequnlock(&state->seqlock); } -static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *deleg_stateid, int open_flags) +static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, int open_flags) { - open_flags &= (FMODE_READ|FMODE_WRITE); /* * Protect the call to nfs4_state_set_mode_locked and * serialise the stateid update @@ -408,6 +407,45 @@ static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_sta spin_unlock(&state->owner->so_lock); } +static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, int open_flags) +{ + struct nfs_inode *nfsi = NFS_I(state->inode); + struct nfs_delegation *deleg_cur; + int ret = 0; + + open_flags &= (FMODE_READ|FMODE_WRITE); + + rcu_read_lock(); + deleg_cur = rcu_dereference(nfsi->delegation); + if (deleg_cur == NULL) + goto no_delegation; + + spin_lock(&deleg_cur->lock); + if (nfsi->delegation != deleg_cur || + (deleg_cur->type & open_flags) != open_flags) + goto no_delegation_unlock; + + if (delegation == NULL) + delegation = &deleg_cur->stateid; + else if (memcmp(deleg_cur->stateid.data, delegation->data, NFS4_STATEID_SIZE) != 0) + goto no_delegation_unlock; + + __update_open_stateid(state, open_stateid, &deleg_cur->stateid, open_flags); + ret = 1; +no_delegation_unlock: + spin_unlock(&deleg_cur->lock); +no_delegation: + rcu_read_unlock(); + + if (!ret && open_stateid != NULL) { + __update_open_stateid(state, open_stateid, NULL, open_flags); + ret = 1; + } + + return ret; +} + + static void nfs4_return_incompatible_delegation(struct inode *inode, mode_t open_flags) { struct nfs_delegation *delegation; @@ -431,23 +469,23 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) nfs4_stateid stateid; int ret = -EAGAIN; - rcu_read_lock(); - delegation = rcu_dereference(nfsi->delegation); for (;;) { if (can_open_cached(state, open_mode)) { spin_lock(&state->owner->so_lock); if (can_open_cached(state, open_mode)) { update_open_stateflags(state, open_mode); spin_unlock(&state->owner->so_lock); - rcu_read_unlock(); goto out_return_state; } spin_unlock(&state->owner->so_lock); } - if (delegation == NULL) - break; - if (!can_open_delegated(delegation, open_mode)) + rcu_read_lock(); + delegation = rcu_dereference(nfsi->delegation); + if (delegation == NULL || + !can_open_delegated(delegation, open_mode)) { + rcu_read_unlock(); break; + } /* Save the delegation */ memcpy(stateid.data, delegation->stateid.data, sizeof(stateid.data)); rcu_read_unlock(); @@ -455,19 +493,11 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) if (ret != 0) goto out; ret = -EAGAIN; - rcu_read_lock(); - delegation = rcu_dereference(nfsi->delegation); - /* If no delegation, try a cached open */ - if (delegation == NULL) - continue; - /* Is the delegation still valid? */ - if (memcmp(stateid.data, delegation->stateid.data, sizeof(stateid.data)) != 0) - continue; - rcu_read_unlock(); - update_open_stateid(state, NULL, &stateid, open_mode); - goto out_return_state; + + /* Try to update the stateid using the delegation */ + if (update_open_stateid(state, NULL, &stateid, open_mode)) + goto out_return_state; } - rcu_read_unlock(); out: return ERR_PTR(ret); out_return_state: @@ -480,7 +510,6 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data struct inode *inode; struct nfs4_state *state = NULL; struct nfs_delegation *delegation; - nfs4_stateid *deleg_stateid = NULL; int ret; if (!data->rpc_done) { @@ -516,12 +545,9 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data data->owner->so_cred, &data->o_res); } - rcu_read_lock(); - delegation = rcu_dereference(NFS_I(inode)->delegation); - if (delegation != NULL) - deleg_stateid = &delegation->stateid; - update_open_stateid(state, &data->o_res.stateid, deleg_stateid, data->o_arg.open_flags); - rcu_read_unlock(); + + update_open_stateid(state, &data->o_res.stateid, NULL, + data->o_arg.open_flags); iput(inode); out: return state; -- cgit v1.2.3-70-g09d2 From 86e894899820f2b3094d5557124fc22743ae0fc7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:39 -0500 Subject: NFSv4: Fix up the dereferencing of delegation->inode Without an extra lock, we cannot just assume that the delegation->inode is valid when we're traversing the rcu-protected nfs_client lists. Use the delegation->lock to ensure that it is truly valid. Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 39 +++++++++++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index e0cb4ee3b23..13f2044a30b 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -134,6 +134,17 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation * return res; } +static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation) +{ + struct inode *inode = NULL; + + spin_lock(&delegation->lock); + if (delegation->inode != NULL) + inode = igrab(delegation->inode); + spin_unlock(&delegation->lock); + return inode; +} + static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid) { struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation); @@ -145,6 +156,7 @@ static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfs sizeof(delegation->stateid.data)) != 0) goto nomatch_unlock; list_del_rcu(&delegation->super_list); + delegation->inode = NULL; nfsi->delegation_state = 0; rcu_assign_pointer(nfsi->delegation, NULL); spin_unlock(&delegation->lock); @@ -298,9 +310,11 @@ void nfs_return_all_delegations(struct super_block *sb) restart: rcu_read_lock(); list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { - if (delegation->inode->i_sb != sb) - continue; - inode = igrab(delegation->inode); + inode = NULL; + spin_lock(&delegation->lock); + if (delegation->inode != NULL && delegation->inode->i_sb == sb) + inode = igrab(delegation->inode); + spin_unlock(&delegation->lock); if (inode == NULL) continue; spin_lock(&clp->cl_lock); @@ -329,7 +343,7 @@ restart: goto out; rcu_read_lock(); list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { - inode = igrab(delegation->inode); + inode = nfs_delegation_grab_inode(delegation); if (inode == NULL) continue; spin_lock(&clp->cl_lock); @@ -376,7 +390,7 @@ void nfs_handle_cb_pathdown(struct nfs_client *clp) restart: rcu_read_lock(); list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { - inode = igrab(delegation->inode); + inode = nfs_delegation_grab_inode(delegation); if (inode == NULL) continue; spin_lock(&clp->cl_lock); @@ -464,10 +478,14 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs struct inode *res = NULL; rcu_read_lock(); list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { - if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) { + spin_lock(&delegation->lock); + if (delegation->inode != NULL && + nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) { res = igrab(delegation->inode); - break; } + spin_unlock(&delegation->lock); + if (res != NULL) + break; } rcu_read_unlock(); return res; @@ -491,17 +509,22 @@ void nfs_delegation_mark_reclaim(struct nfs_client *clp) void nfs_delegation_reap_unclaimed(struct nfs_client *clp) { struct nfs_delegation *delegation; + struct inode *inode; restart: rcu_read_lock(); list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0) continue; + inode = nfs_delegation_grab_inode(delegation); + if (inode == NULL) + continue; spin_lock(&clp->cl_lock); - delegation = nfs_detach_delegation_locked(NFS_I(delegation->inode), NULL); + delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); spin_unlock(&clp->cl_lock); rcu_read_unlock(); if (delegation != NULL) nfs_free_delegation(delegation); + iput(inode); goto restart; } rcu_read_unlock(); -- cgit v1.2.3-70-g09d2 From 15c831bf1a3f8cab9812a96228145200726fea33 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:39 -0500 Subject: NFS: Use atomic bitops when changing struct nfs_delegation->flags Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 6 +++--- fs/nfs/delegation.h | 4 ++-- fs/nfs/nfs4proc.c | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 13f2044a30b..646ba3e75a1 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -119,7 +119,7 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, st delegation->maxsize = res->maxsize; oldcred = delegation->cred; delegation->cred = get_rpccred(cred); - delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM; + clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); NFS_I(inode)->delegation_state = delegation->type; smp_wmb(); put_rpccred(oldcred); @@ -499,7 +499,7 @@ void nfs_delegation_mark_reclaim(struct nfs_client *clp) struct nfs_delegation *delegation; rcu_read_lock(); list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) - delegation->flags |= NFS_DELEGATION_NEED_RECLAIM; + set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); rcu_read_unlock(); } @@ -513,7 +513,7 @@ void nfs_delegation_reap_unclaimed(struct nfs_client *clp) restart: rcu_read_lock(); list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { - if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0) + if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) continue; inode = nfs_delegation_grab_inode(delegation); if (inode == NULL) diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 8299c6220e9..5e9f40e0a7d 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -18,10 +18,10 @@ struct nfs_delegation { struct inode *inode; nfs4_stateid stateid; int type; -#define NFS_DELEGATION_NEED_RECLAIM 1 - long flags; loff_t maxsize; __u64 change_attr; +#define NFS_DELEGATION_NEED_RECLAIM 0 + unsigned long flags; spinlock_t lock; struct rcu_head rcu; }; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 254cbff103f..d53aa2dace8 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -344,7 +344,7 @@ static int can_open_delegated(struct nfs_delegation *delegation, mode_t open_fla { if ((delegation->type & open_flags) != open_flags) return 0; - if (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) + if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) return 0; return 1; } @@ -536,7 +536,7 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data if (delegation) delegation_flags = delegation->flags; rcu_read_unlock(); - if (!(delegation_flags & NFS_DELEGATION_NEED_RECLAIM)) + if ((delegation_flags & 1UL<inode, data->owner->so_cred, &data->o_res); @@ -667,7 +667,7 @@ static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state opendata->o_arg.fh = NFS_FH(state->inode); rcu_read_lock(); delegation = rcu_dereference(NFS_I(state->inode)->delegation); - if (delegation != NULL && (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) != 0) + if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0) delegation_type = delegation->type; rcu_read_unlock(); opendata->o_arg.u.delegation_type = delegation_type; @@ -839,7 +839,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) rcu_read_lock(); delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); if (delegation != NULL && - (delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0) { + test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) { rcu_read_unlock(); goto out_no_action; } -- cgit v1.2.3-70-g09d2 From 028600143079c8e8f8366bbc2eb29977743baf3a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:40 -0500 Subject: NFSv4: Clean up for the state loss reclaimer Signed-off-by: Trond Myklebust --- fs/nfs/nfs4state.c | 130 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 81 insertions(+), 49 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 401ef8b28f9..99182b3229e 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -811,7 +811,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp) nfs4_recover_state(clp); } -static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state) +static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) { struct inode *inode = state->inode; struct file_lock *fl; @@ -844,7 +844,7 @@ out_err: return status; } -static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp) +static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops) { struct nfs4_state *state; struct nfs4_lock_state *lock; @@ -863,15 +863,15 @@ static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct n continue; status = ops->recover_open(sp, state); if (status >= 0) { - status = nfs4_reclaim_locks(ops, state); - if (status < 0) - goto out_err; - list_for_each_entry(lock, &state->lock_states, ls_locks) { - if (!(lock->ls_flags & NFS_LOCK_INITIALIZED)) - printk("%s: Lock reclaim failed!\n", + status = nfs4_reclaim_locks(state, ops); + if (status >= 0) { + list_for_each_entry(lock, &state->lock_states, ls_locks) { + if (!(lock->ls_flags & NFS_LOCK_INITIALIZED)) + printk("%s: Lock reclaim failed!\n", __func__); + } + continue; } - continue; } switch (status) { default: @@ -928,45 +928,45 @@ static void nfs4_state_mark_reclaim(struct nfs_client *clp) } } -static int reclaimer(void *ptr) +static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops) { - struct nfs_client *clp = ptr; - struct nfs4_state_owner *sp; struct rb_node *pos; - struct nfs4_state_recovery_ops *ops; - struct rpc_cred *cred; int status = 0; - allow_signal(SIGKILL); + /* Note: list is protected by exclusive lock on cl->cl_sem */ + for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { + struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); + status = nfs4_reclaim_open_state(sp, ops); + if (status < 0) + break; + } + return status; +} + +static int nfs4_check_lease(struct nfs_client *clp) +{ + struct rpc_cred *cred; + int status = -NFS4ERR_EXPIRED; - /* Ensure exclusive access to NFSv4 state */ - down_write(&clp->cl_sem); - /* Are there any NFS mounts out there? */ - if (list_empty(&clp->cl_superblocks)) - goto out; -restart_loop: - ops = &nfs4_network_partition_recovery_ops; /* Are there any open files on this volume? */ cred = nfs4_get_renew_cred(clp); if (cred != NULL) { /* Yes there are: try to renew the old lease */ status = nfs4_proc_renew(clp, cred); put_rpccred(cred); - switch (status) { - case 0: - case -NFS4ERR_CB_PATH_DOWN: - goto out; - case -NFS4ERR_STALE_CLIENTID: - case -NFS4ERR_LEASE_MOVED: - ops = &nfs4_reboot_recovery_ops; - } - } else { - /* "reboot" to ensure we clear all state on the server */ - clp->cl_boot_time = CURRENT_TIME; + return status; } - /* We're going to have to re-establish a clientid */ - nfs4_state_mark_reclaim(clp); - status = -ENOENT; + + /* "reboot" to ensure we clear all state on the server */ + clp->cl_boot_time = CURRENT_TIME; + return status; +} + +static int nfs4_reclaim_lease(struct nfs_client *clp) +{ + struct rpc_cred *cred; + int status = -ENOENT; + cred = nfs4_get_setclientid_cred(clp); if (cred != NULL) { status = nfs4_init_client(clp, cred); @@ -974,29 +974,61 @@ restart_loop: /* Handle case where the user hasn't set up machine creds */ if (status == -EACCES && cred == clp->cl_machine_cred) { nfs4_clear_machine_cred(clp); - goto restart_loop; + status = -EAGAIN; } } - if (status) - goto out_error; - /* Mark all delegations for reclaim */ - nfs_delegation_mark_reclaim(clp); - /* Note: list is protected by exclusive lock on cl->cl_sem */ - for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { - sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); - status = nfs4_reclaim_open_state(ops, sp); + return status; +} + +static int reclaimer(void *ptr) +{ + struct nfs_client *clp = ptr; + const struct nfs4_state_recovery_ops *ops; + int status = 0; + + allow_signal(SIGKILL); + + /* Ensure exclusive access to NFSv4 state */ + down_write(&clp->cl_sem); + while (!list_empty(&clp->cl_superblocks)) { + ops = &nfs4_network_partition_recovery_ops; + status = nfs4_check_lease(clp); + switch (status) { + case 0: + case -NFS4ERR_CB_PATH_DOWN: + goto out; + case -NFS4ERR_STALE_CLIENTID: + case -NFS4ERR_LEASE_MOVED: + ops = &nfs4_reboot_recovery_ops; + } + + /* We're going to have to re-establish a clientid */ + nfs4_state_mark_reclaim(clp); + + status = nfs4_reclaim_lease(clp); + if (status) { + if (status == -EAGAIN) + continue; + goto out_error; + } + + /* Mark all delegations for reclaim */ + nfs_delegation_mark_reclaim(clp); + /* Note: list is protected by exclusive lock on cl->cl_sem */ + status = nfs4_do_reclaim(clp, ops); if (status < 0) { if (status == -NFS4ERR_NO_GRACE) { ops = &nfs4_network_partition_recovery_ops; - status = nfs4_reclaim_open_state(ops, sp); + status = nfs4_do_reclaim(clp, ops); } if (status == -NFS4ERR_STALE_CLIENTID) - goto restart_loop; + continue; if (status == -NFS4ERR_EXPIRED) - goto restart_loop; + continue; } + nfs_delegation_reap_unclaimed(clp); + break; } - nfs_delegation_reap_unclaimed(clp); out: up_write(&clp->cl_sem); if (status == -NFS4ERR_CB_PATH_DOWN) -- cgit v1.2.3-70-g09d2 From 6dc9d57af9917f5c7faa13c17b770dce17c3972b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:41 -0500 Subject: NFSv4: Callers to nfs4_get_renew_cred() need to hold nfs_client->cl_lock Ditto for nfs4_get_setclientid_cred(). Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 2 +- fs/nfs/nfs4renewd.c | 2 +- fs/nfs/nfs4state.c | 20 +++++++++++++++----- 3 files changed, 17 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index ea790645fda..0ac6bbb8eaa 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -202,7 +202,7 @@ extern void nfs4_kill_renewd(struct nfs_client *); extern void nfs4_renew_state(struct work_struct *); /* nfs4state.c */ -struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); +struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp); extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); extern void nfs4_put_state_owner(struct nfs4_state_owner *); diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 3305acbbe2a..9fe8640a88e 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -77,7 +77,7 @@ nfs4_renew_state(struct work_struct *work) timeout = (2 * lease) / 3 + (long)last - (long)now; /* Are we close to a lease timeout? */ if (time_after(now, last + lease/3)) { - cred = nfs4_get_renew_cred(clp); + cred = nfs4_get_renew_cred_locked(clp); if (cred == NULL) { set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); spin_unlock(&clp->cl_lock); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 99182b3229e..300faba9a18 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -71,14 +71,12 @@ static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred) return status; } -static struct rpc_cred *nfs4_get_machine_cred(struct nfs_client *clp) +static struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp) { struct rpc_cred *cred = NULL; - spin_lock(&clp->cl_lock); if (clp->cl_machine_cred != NULL) cred = get_rpccred(clp->cl_machine_cred); - spin_unlock(&clp->cl_lock); return cred; } @@ -94,7 +92,7 @@ static void nfs4_clear_machine_cred(struct nfs_client *clp) put_rpccred(cred); } -struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp) +struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp) { struct nfs4_state_owner *sp; struct rb_node *pos; @@ -110,13 +108,24 @@ struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp) return cred; } +static struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp) +{ + struct rpc_cred *cred; + + spin_lock(&clp->cl_lock); + cred = nfs4_get_renew_cred_locked(clp); + spin_unlock(&clp->cl_lock); + return cred; +} + static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) { struct nfs4_state_owner *sp; struct rb_node *pos; struct rpc_cred *cred; - cred = nfs4_get_machine_cred(clp); + spin_lock(&clp->cl_lock); + cred = nfs4_get_machine_cred_locked(clp); if (cred != NULL) goto out; pos = rb_first(&clp->cl_state_owners); @@ -125,6 +134,7 @@ static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) cred = get_rpccred(sp->so_cred); } out: + spin_unlock(&clp->cl_lock); return cred; } -- cgit v1.2.3-70-g09d2 From b79a4a1b45b2543e38026303a1956bdc0aababa0 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:41 -0500 Subject: NFSv4: Fix state recovery when the client runs over the grace period If the client for some reason is not able to recover all its state within the time allotted for the grace period, and the server reboots again, the client is not allowed to recover the state that was 'lost' using reboot recovery. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 8 ++- fs/nfs/nfs4proc.c | 7 +- fs/nfs/nfs4state.c | 189 ++++++++++++++++++++++++++++++++++++++++------------- 3 files changed, 155 insertions(+), 49 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 0ac6bbb8eaa..1c6fbd1cda9 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -40,6 +40,9 @@ struct idmap; enum nfs4_client_state { NFS4CLNT_STATE_RECOVER = 0, NFS4CLNT_LEASE_EXPIRED, + NFS4CLNT_RECLAIM_REBOOT, + NFS4CLNT_RECLAIM_NOGRACE, + NFS4CLNT_CB_PATH_DOWN, }; /* @@ -128,6 +131,8 @@ enum { NFS_O_RDONLY_STATE, /* OPEN stateid has read-only state */ NFS_O_WRONLY_STATE, /* OPEN stateid has write-only state */ NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */ + NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */ + NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */ }; struct nfs4_state { @@ -160,6 +165,7 @@ struct nfs4_exception { }; struct nfs4_state_recovery_ops { + int state_flag_bit; int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *); int (*recover_lock)(struct nfs4_state *, struct file_lock *); }; @@ -187,7 +193,7 @@ extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, struct nfs4_fs_locations *fs_locations, struct page *page); extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops; -extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops; +extern struct nfs4_state_recovery_ops nfs4_nograce_recovery_ops; extern const u32 nfs4_fattr_bitmap[2]; extern const u32 nfs4_statfs_bitmap[2]; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d53aa2dace8..279ab36b5a6 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -984,7 +984,7 @@ static int nfs4_recover_expired_lease(struct nfs_server *server) ret = nfs4_wait_clnt_recover(server->client, clp); if (ret != 0) return ret; - if (!test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) + if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) break; nfs4_schedule_state_recovery(clp); } @@ -2942,7 +2942,6 @@ static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cre spin_lock(&clp->cl_lock); clp->cl_lease_time = fsinfo.lease_time * HZ; clp->cl_last_renewal = now; - clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); spin_unlock(&clp->cl_lock); } return status; @@ -3690,11 +3689,13 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, } struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops = { + .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, .recover_open = nfs4_open_reclaim, .recover_lock = nfs4_lock_reclaim, }; -struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops = { +struct nfs4_state_recovery_ops nfs4_nograce_recovery_ops = { + .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, .recover_open = nfs4_open_expired, .recover_lock = nfs4_lock_expired, }; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 300faba9a18..e5cd8cacdce 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -821,6 +821,27 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp) nfs4_recover_state(clp); } +static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) +{ + + set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); + /* Don't recover state that expired before the reboot */ + if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) { + clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); + return 0; + } + set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); + return 1; +} + +static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) +{ + set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); + clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); + set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); + return 1; +} + static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) { struct inode *inode = state->inode; @@ -869,6 +890,8 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs * server that doesn't support a grace period. */ list_for_each_entry(state, &sp->so_states, open_states) { + if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) + continue; if (state->state == 0) continue; status = ops->recover_open(sp, state); @@ -888,8 +911,7 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n", __func__, status); case -ENOENT: - case -NFS4ERR_RECLAIM_BAD: - case -NFS4ERR_RECLAIM_CONFLICT: + case -ESTALE: /* * Open state on this file cannot be recovered * All we can do is revert to using the zero stateid. @@ -899,8 +921,13 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs /* Mark the file as being 'closed' */ state->state = 0; break; + case -NFS4ERR_RECLAIM_BAD: + case -NFS4ERR_RECLAIM_CONFLICT: + nfs4_state_mark_reclaim_nograce(sp->so_client, state); + break; case -NFS4ERR_EXPIRED: case -NFS4ERR_NO_GRACE: + nfs4_state_mark_reclaim_nograce(sp->so_client, state); case -NFS4ERR_STALE_CLIENTID: goto out_err; } @@ -910,12 +937,26 @@ out_err: return status; } -static void nfs4_state_mark_reclaim(struct nfs_client *clp) +static void nfs4_clear_open_state(struct nfs4_state *state) +{ + struct nfs4_lock_state *lock; + + clear_bit(NFS_DELEGATED_STATE, &state->flags); + clear_bit(NFS_O_RDONLY_STATE, &state->flags); + clear_bit(NFS_O_WRONLY_STATE, &state->flags); + clear_bit(NFS_O_RDWR_STATE, &state->flags); + list_for_each_entry(lock, &state->lock_states, ls_locks) { + lock->ls_seqid.counter = 0; + lock->ls_seqid.flags = 0; + lock->ls_flags &= ~NFS_LOCK_INITIALIZED; + } +} + +static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state)) { struct nfs4_state_owner *sp; struct rb_node *pos; struct nfs4_state *state; - struct nfs4_lock_state *lock; /* Reset all sequence ids to zero */ for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { @@ -924,20 +965,60 @@ static void nfs4_state_mark_reclaim(struct nfs_client *clp) sp->so_seqid.flags = 0; spin_lock(&sp->so_lock); list_for_each_entry(state, &sp->so_states, open_states) { - clear_bit(NFS_DELEGATED_STATE, &state->flags); - clear_bit(NFS_O_RDONLY_STATE, &state->flags); - clear_bit(NFS_O_WRONLY_STATE, &state->flags); - clear_bit(NFS_O_RDWR_STATE, &state->flags); - list_for_each_entry(lock, &state->lock_states, ls_locks) { - lock->ls_seqid.counter = 0; - lock->ls_seqid.flags = 0; - lock->ls_flags &= ~NFS_LOCK_INITIALIZED; - } + if (mark_reclaim(clp, state)) + nfs4_clear_open_state(state); } spin_unlock(&sp->so_lock); } } +static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp) +{ + /* Mark all delegations for reclaim */ + nfs_delegation_mark_reclaim(clp); + nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot); +} + +static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp) +{ + struct nfs4_state_owner *sp; + struct rb_node *pos; + struct nfs4_state *state; + + if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) + return; + + for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { + sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); + spin_lock(&sp->so_lock); + list_for_each_entry(state, &sp->so_states, open_states) { + if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags)) + continue; + nfs4_state_mark_reclaim_nograce(clp, state); + } + spin_unlock(&sp->so_lock); + } + + nfs_delegation_reap_unclaimed(clp); +} + +static void nfs_delegation_clear_all(struct nfs_client *clp) +{ + nfs_delegation_mark_reclaim(clp); + nfs_delegation_reap_unclaimed(clp); +} + +static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp) +{ + nfs_delegation_clear_all(clp); + nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce); +} + +static void nfs4_state_end_reclaim_nograce(struct nfs_client *clp) +{ + clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); +} + static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops) { struct rb_node *pos; @@ -964,11 +1045,25 @@ static int nfs4_check_lease(struct nfs_client *clp) /* Yes there are: try to renew the old lease */ status = nfs4_proc_renew(clp, cred); put_rpccred(cred); + switch (status) { + case -NFS4ERR_CB_PATH_DOWN: + set_bit(NFS4CLNT_CB_PATH_DOWN, &clp->cl_state); + break; + case -NFS4ERR_STALE_CLIENTID: + case -NFS4ERR_LEASE_MOVED: + set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); + nfs4_state_start_reclaim_reboot(clp); + break; + case -NFS4ERR_EXPIRED: + set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); + nfs4_state_start_reclaim_nograce(clp); + } return status; } /* "reboot" to ensure we clear all state on the server */ clp->cl_boot_time = CURRENT_TIME; + set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); return status; } @@ -993,7 +1088,6 @@ static int nfs4_reclaim_lease(struct nfs_client *clp) static int reclaimer(void *ptr) { struct nfs_client *clp = ptr; - const struct nfs4_state_recovery_ops *ops; int status = 0; allow_signal(SIGKILL); @@ -1001,47 +1095,51 @@ static int reclaimer(void *ptr) /* Ensure exclusive access to NFSv4 state */ down_write(&clp->cl_sem); while (!list_empty(&clp->cl_superblocks)) { - ops = &nfs4_network_partition_recovery_ops; status = nfs4_check_lease(clp); - switch (status) { - case 0: - case -NFS4ERR_CB_PATH_DOWN: - goto out; - case -NFS4ERR_STALE_CLIENTID: - case -NFS4ERR_LEASE_MOVED: - ops = &nfs4_reboot_recovery_ops; - } - /* We're going to have to re-establish a clientid */ - nfs4_state_mark_reclaim(clp); + if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) { + /* We're going to have to re-establish a clientid */ + status = nfs4_reclaim_lease(clp); + if (status) { + set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); + if (status == -EAGAIN) + continue; + goto out_error; + } + } - status = nfs4_reclaim_lease(clp); - if (status) { - if (status == -EAGAIN) + /* First recover reboot state... */ + if (test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) { + /* Note: list is protected by exclusive lock on cl->cl_sem */ + status = nfs4_do_reclaim(clp, &nfs4_reboot_recovery_ops); + if (status == -NFS4ERR_STALE_CLIENTID) { + set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); continue; - goto out_error; + } + nfs4_state_end_reclaim_reboot(clp); + continue; } - /* Mark all delegations for reclaim */ - nfs_delegation_mark_reclaim(clp); - /* Note: list is protected by exclusive lock on cl->cl_sem */ - status = nfs4_do_reclaim(clp, ops); - if (status < 0) { - if (status == -NFS4ERR_NO_GRACE) { - ops = &nfs4_network_partition_recovery_ops; - status = nfs4_do_reclaim(clp, ops); - } - if (status == -NFS4ERR_STALE_CLIENTID) - continue; - if (status == -NFS4ERR_EXPIRED) - continue; + /* Now recover expired state... */ + if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) { + /* Note: list is protected by exclusive lock on cl->cl_sem */ + status = nfs4_do_reclaim(clp, &nfs4_nograce_recovery_ops); + if (status < 0) { + set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); + if (status == -NFS4ERR_STALE_CLIENTID) + continue; + if (status == -NFS4ERR_EXPIRED) + continue; + goto out_error; + } else + nfs4_state_end_reclaim_nograce(clp); + continue; } - nfs_delegation_reap_unclaimed(clp); break; } out: up_write(&clp->cl_sem); - if (status == -NFS4ERR_CB_PATH_DOWN) + if (test_and_clear_bit(NFS4CLNT_CB_PATH_DOWN, &clp->cl_state)) nfs_handle_cb_pathdown(clp); nfs4_clear_recover_bit(clp); nfs_put_client(clp); @@ -1050,7 +1148,8 @@ out: out_error: printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %s" " with error %d\n", clp->cl_hostname, -status); - set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); + if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) + nfs4_state_end_reclaim_reboot(clp); goto out; } -- cgit v1.2.3-70-g09d2 From e598d843c08a7ab6bdfa8098de49afb017fc6c6a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:42 -0500 Subject: NFSv4: Remove redundant RENEW calls if we know the lease has expired Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 1 + fs/nfs/nfs4proc.c | 3 ++- fs/nfs/nfs4state.c | 47 +++++++++++++++++++++++++++++------------------ 3 files changed, 32 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 1c6fbd1cda9..76eda46b5a1 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -39,6 +39,7 @@ struct idmap; enum nfs4_client_state { NFS4CLNT_STATE_RECOVER = 0, + NFS4CLNT_CHECK_LEASE, NFS4CLNT_LEASE_EXPIRED, NFS4CLNT_RECLAIM_REBOOT, NFS4CLNT_RECLAIM_NOGRACE, diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 279ab36b5a6..780ba004b3d 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -984,7 +984,8 @@ static int nfs4_recover_expired_lease(struct nfs_server *server) ret = nfs4_wait_clnt_recover(server->client, clp); if (ret != 0) return ret; - if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) + if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && + !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) break; nfs4_schedule_state_recovery(clp); } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index e5cd8cacdce..3cc88a5d988 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -817,6 +817,8 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp) { if (!clp) return; + if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) + set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0) nfs4_recover_state(clp); } @@ -1019,6 +1021,23 @@ static void nfs4_state_end_reclaim_nograce(struct nfs_client *clp) clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); } +static void nfs4_recovery_handle_error(struct nfs_client *clp, int error) +{ + switch (error) { + case -NFS4ERR_CB_PATH_DOWN: + set_bit(NFS4CLNT_CB_PATH_DOWN, &clp->cl_state); + break; + case -NFS4ERR_STALE_CLIENTID: + case -NFS4ERR_LEASE_MOVED: + set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); + nfs4_state_start_reclaim_reboot(clp); + break; + case -NFS4ERR_EXPIRED: + set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); + nfs4_state_start_reclaim_nograce(clp); + } +} + static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops) { struct rb_node *pos; @@ -1031,6 +1050,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov if (status < 0) break; } + nfs4_recovery_handle_error(clp, status); return status; } @@ -1045,19 +1065,7 @@ static int nfs4_check_lease(struct nfs_client *clp) /* Yes there are: try to renew the old lease */ status = nfs4_proc_renew(clp, cred); put_rpccred(cred); - switch (status) { - case -NFS4ERR_CB_PATH_DOWN: - set_bit(NFS4CLNT_CB_PATH_DOWN, &clp->cl_state); - break; - case -NFS4ERR_STALE_CLIENTID: - case -NFS4ERR_LEASE_MOVED: - set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); - nfs4_state_start_reclaim_reboot(clp); - break; - case -NFS4ERR_EXPIRED: - set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); - nfs4_state_start_reclaim_nograce(clp); - } + nfs4_recovery_handle_error(clp, status); return status; } @@ -1095,8 +1103,6 @@ static int reclaimer(void *ptr) /* Ensure exclusive access to NFSv4 state */ down_write(&clp->cl_sem); while (!list_empty(&clp->cl_superblocks)) { - status = nfs4_check_lease(clp); - if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) { /* We're going to have to re-establish a clientid */ status = nfs4_reclaim_lease(clp); @@ -1106,16 +1112,21 @@ static int reclaimer(void *ptr) continue; goto out_error; } + clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); + } + + if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) { + status = nfs4_check_lease(clp); + if (status != 0) + continue; } /* First recover reboot state... */ if (test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) { /* Note: list is protected by exclusive lock on cl->cl_sem */ status = nfs4_do_reclaim(clp, &nfs4_reboot_recovery_ops); - if (status == -NFS4ERR_STALE_CLIENTID) { - set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); + if (status == -NFS4ERR_STALE_CLIENTID) continue; - } nfs4_state_end_reclaim_reboot(clp); continue; } -- cgit v1.2.3-70-g09d2 From 0f605b56008c4b6b075217480c36ba395ca4eaa4 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:42 -0500 Subject: NFSv4: Don't tell server we rebooted when not necessary Instead of doing a full setclientid, try doing a RENEW call first. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4state.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 3cc88a5d988..a780518c5c3 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1059,19 +1059,19 @@ static int nfs4_check_lease(struct nfs_client *clp) struct rpc_cred *cred; int status = -NFS4ERR_EXPIRED; - /* Are there any open files on this volume? */ + /* Is the client already known to have an expired lease? */ + if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) + return 0; cred = nfs4_get_renew_cred(clp); - if (cred != NULL) { - /* Yes there are: try to renew the old lease */ - status = nfs4_proc_renew(clp, cred); - put_rpccred(cred); - nfs4_recovery_handle_error(clp, status); - return status; + if (cred == NULL) { + cred = nfs4_get_setclientid_cred(clp); + if (cred == NULL) + goto out; } - - /* "reboot" to ensure we clear all state on the server */ - clp->cl_boot_time = CURRENT_TIME; - set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); + status = nfs4_proc_renew(clp, cred); + put_rpccred(cred); +out: + nfs4_recovery_handle_error(clp, status); return status; } -- cgit v1.2.3-70-g09d2 From 7eff03aec917e17f733471d7e12c262c0c96409f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:43 -0500 Subject: NFSv4: Add a recovery marking scheme for state owners Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 7 +++++++ fs/nfs/nfs4proc.c | 2 ++ fs/nfs/nfs4state.c | 21 +++++++++++++++++---- 3 files changed, 26 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 76eda46b5a1..a4e7b3feef8 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -94,12 +94,18 @@ struct nfs4_state_owner { spinlock_t so_lock; atomic_t so_count; + unsigned long so_flags; struct list_head so_states; struct list_head so_delegations; struct nfs_seqid_counter so_seqid; struct rpc_sequence so_sequence; }; +enum { + NFS_OWNER_RECLAIM_REBOOT, + NFS_OWNER_RECLAIM_NOGRACE +}; + /* * struct nfs4_state maintains the client-side state for a given * (state_owner,inode) tuple (OPEN) or state_owner (LOCK). @@ -166,6 +172,7 @@ struct nfs4_exception { }; struct nfs4_state_recovery_ops { + int owner_flag_bit; int state_flag_bit; int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *); int (*recover_lock)(struct nfs4_state *, struct file_lock *); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 780ba004b3d..019c8d67e14 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3690,12 +3690,14 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, } struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops = { + .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT, .state_flag_bit = NFS_STATE_RECLAIM_REBOOT, .recover_open = nfs4_open_reclaim, .recover_lock = nfs4_lock_reclaim, }; struct nfs4_state_recovery_ops nfs4_nograce_recovery_ops = { + .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, .recover_open = nfs4_open_expired, .recover_lock = nfs4_lock_expired, diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index a780518c5c3..7dcca23167d 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -832,6 +832,7 @@ static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_st clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); return 0; } + set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags); set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); return 1; } @@ -840,6 +841,7 @@ static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_s { set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); + set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags); set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); return 1; } @@ -1043,14 +1045,25 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov struct rb_node *pos; int status = 0; - /* Note: list is protected by exclusive lock on cl->cl_sem */ +restart: + spin_lock(&clp->cl_lock); for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); + if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags)) + continue; + atomic_inc(&sp->so_count); + spin_unlock(&clp->cl_lock); status = nfs4_reclaim_open_state(sp, ops); - if (status < 0) - break; + if (status < 0) { + set_bit(ops->owner_flag_bit, &sp->so_flags); + nfs4_put_state_owner(sp); + nfs4_recovery_handle_error(clp, status); + return status; + } + nfs4_put_state_owner(sp); + goto restart; } - nfs4_recovery_handle_error(clp, status); + spin_unlock(&clp->cl_lock); return status; } -- cgit v1.2.3-70-g09d2 From fe1d81952e7f62b9da7dc438caaa07e35ec2b908 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:43 -0500 Subject: NFSv4: Ensure that nfs4_reclaim_open_state() doesn't depend on cl_sem Signed-off-by: Trond Myklebust --- fs/nfs/nfs4state.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 7dcca23167d..beade5570b5 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -893,11 +893,15 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs * recovering after a network partition or a reboot from a * server that doesn't support a grace period. */ +restart: + spin_lock(&sp->so_lock); list_for_each_entry(state, &sp->so_states, open_states) { if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) continue; if (state->state == 0) continue; + atomic_inc(&state->count); + spin_unlock(&sp->so_lock); status = ops->recover_open(sp, state); if (status >= 0) { status = nfs4_reclaim_locks(state, ops); @@ -907,7 +911,8 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs printk("%s: Lock reclaim failed!\n", __func__); } - continue; + nfs4_put_open_state(state); + goto restart; } } switch (status) { @@ -935,9 +940,13 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs case -NFS4ERR_STALE_CLIENTID: goto out_err; } + nfs4_put_open_state(state); + goto restart; } + spin_unlock(&sp->so_lock); return 0; out_err: + nfs4_put_open_state(state); return status; } -- cgit v1.2.3-70-g09d2 From 65de872ed6501a68e918a49a5c2fa7fca9c6ce21 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:44 -0500 Subject: NFS: Remove the unnecessary argument to nfs4_wait_clnt_recover() ...and move some code around in order to clear out an unnecessary forward declaration. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 151 +++++++++++++++++++++++++++--------------------------- 1 file changed, 75 insertions(+), 76 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 019c8d67e14..660c5dcfb0a 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -63,8 +63,6 @@ struct nfs4_opendata; static int _nfs4_proc_open(struct nfs4_opendata *data); static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *); -static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception); -static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp); static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr); static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); @@ -195,6 +193,80 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent kunmap_atomic(start, KM_USER0); } +static int nfs4_wait_bit_killable(void *word) +{ + if (fatal_signal_pending(current)) + return -ERESTARTSYS; + schedule(); + return 0; +} + +static int nfs4_wait_clnt_recover(struct nfs_client *clp) +{ + int res; + + might_sleep(); + + rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_); + + res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER, + nfs4_wait_bit_killable, TASK_KILLABLE); + + rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_); + return res; +} + +static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) +{ + int res = 0; + + might_sleep(); + + if (*timeout <= 0) + *timeout = NFS4_POLL_RETRY_MIN; + if (*timeout > NFS4_POLL_RETRY_MAX) + *timeout = NFS4_POLL_RETRY_MAX; + schedule_timeout_killable(*timeout); + if (fatal_signal_pending(current)) + res = -ERESTARTSYS; + *timeout <<= 1; + return res; +} + +/* This is the error handling routine for processes that are allowed + * to sleep. + */ +static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception) +{ + struct nfs_client *clp = server->nfs_client; + int ret = errorcode; + + exception->retry = 0; + switch(errorcode) { + case 0: + return 0; + case -NFS4ERR_STALE_CLIENTID: + case -NFS4ERR_STALE_STATEID: + case -NFS4ERR_EXPIRED: + nfs4_schedule_state_recovery(clp); + ret = nfs4_wait_clnt_recover(clp); + if (ret == 0) + exception->retry = 1; + break; + case -NFS4ERR_FILE_OPEN: + case -NFS4ERR_GRACE: + case -NFS4ERR_DELAY: + ret = nfs4_delay(server->client, &exception->timeout); + if (ret != 0) + break; + case -NFS4ERR_OLD_STATEID: + exception->retry = 1; + } + /* We failed to handle the error */ + return nfs4_map_errors(ret); +} + + static void renew_lease(const struct nfs_server *server, unsigned long timestamp) { struct nfs_client *clp = server->nfs_client; @@ -981,7 +1053,7 @@ static int nfs4_recover_expired_lease(struct nfs_server *server) int ret; for (;;) { - ret = nfs4_wait_clnt_recover(server->client, clp); + ret = nfs4_wait_clnt_recover(clp); if (ret != 0) return ret; if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && @@ -2799,79 +2871,6 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) return 0; } -static int nfs4_wait_bit_killable(void *word) -{ - if (fatal_signal_pending(current)) - return -ERESTARTSYS; - schedule(); - return 0; -} - -static int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs_client *clp) -{ - int res; - - might_sleep(); - - rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_); - - res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER, - nfs4_wait_bit_killable, TASK_KILLABLE); - - rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_); - return res; -} - -static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) -{ - int res = 0; - - might_sleep(); - - if (*timeout <= 0) - *timeout = NFS4_POLL_RETRY_MIN; - if (*timeout > NFS4_POLL_RETRY_MAX) - *timeout = NFS4_POLL_RETRY_MAX; - schedule_timeout_killable(*timeout); - if (fatal_signal_pending(current)) - res = -ERESTARTSYS; - *timeout <<= 1; - return res; -} - -/* This is the error handling routine for processes that are allowed - * to sleep. - */ -static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception) -{ - struct nfs_client *clp = server->nfs_client; - int ret = errorcode; - - exception->retry = 0; - switch(errorcode) { - case 0: - return 0; - case -NFS4ERR_STALE_CLIENTID: - case -NFS4ERR_STALE_STATEID: - case -NFS4ERR_EXPIRED: - nfs4_schedule_state_recovery(clp); - ret = nfs4_wait_clnt_recover(server->client, clp); - if (ret == 0) - exception->retry = 1; - break; - case -NFS4ERR_FILE_OPEN: - case -NFS4ERR_GRACE: - case -NFS4ERR_DELAY: - ret = nfs4_delay(server->client, &exception->timeout); - if (ret != 0) - break; - case -NFS4ERR_OLD_STATEID: - exception->retry = 1; - } - /* We failed to handle the error */ - return nfs4_map_errors(ret); -} - int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short port, struct rpc_cred *cred) { nfs4_verifier sc_verifier; -- cgit v1.2.3-70-g09d2 From 19e03c570e6099ffaf24e5628d4fe1a8acbe820d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:44 -0500 Subject: NFSv4: Ensure that file unlock requests don't conflict with state recovery The unlock path is currently failing to take the nfs_client->cl_sem read lock, and hence the recovery path may see locks disappear from underneath it. Also ensure that it takes the nfs_inode->rwsem read lock so that it there is no conflict with delegation recalls. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 26 ++++++++++++++++---------- fs/nfs/nfs4state.c | 4 ++++ 2 files changed, 20 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 660c5dcfb0a..aec4e47c462 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3273,6 +3273,8 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) { + struct nfs_client *clp = state->owner->so_client; + struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_seqid *seqid; struct nfs4_lock_state *lsp; struct rpc_task *task; @@ -3282,8 +3284,15 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * status = nfs4_set_lock_state(state, request); /* Unlock _before_ we do the RPC call */ request->fl_flags |= FL_EXISTS; - if (do_vfs_lock(request->fl_file, request) == -ENOENT) + down_read(&clp->cl_sem); + down_read(&nfsi->rwsem); + if (do_vfs_lock(request->fl_file, request) == -ENOENT) { + up_read(&nfsi->rwsem); + up_read(&clp->cl_sem); goto out; + } + up_read(&nfsi->rwsem); + up_read(&clp->cl_sem); if (status != 0) goto out; /* Is this a delegated lock? */ @@ -3510,6 +3519,7 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) { struct nfs_client *clp = state->owner->so_client; + struct nfs_inode *nfsi = NFS_I(state->inode); unsigned char fl_flags = request->fl_flags; int status; @@ -3522,18 +3532,13 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock if (status < 0) goto out; down_read(&clp->cl_sem); + down_read(&nfsi->rwsem); if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { - struct nfs_inode *nfsi = NFS_I(state->inode); /* Yes: cache locks! */ - down_read(&nfsi->rwsem); /* ...but avoid races with delegation recall... */ - if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { - request->fl_flags = fl_flags & ~FL_SLEEP; - status = do_vfs_lock(request->fl_file, request); - up_read(&nfsi->rwsem); - goto out_unlock; - } - up_read(&nfsi->rwsem); + request->fl_flags = fl_flags & ~FL_SLEEP; + status = do_vfs_lock(request->fl_file, request); + goto out_unlock; } status = _nfs4_do_setlk(state, cmd, request, 0); if (status != 0) @@ -3543,6 +3548,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock if (do_vfs_lock(request->fl_file, request) < 0) printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__); out_unlock: + up_read(&nfsi->rwsem); up_read(&clp->cl_sem); out: request->fl_flags = fl_flags; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index beade5570b5..16c9fbdf97b 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -849,9 +849,11 @@ static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_s static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) { struct inode *inode = state->inode; + struct nfs_inode *nfsi = NFS_I(inode); struct file_lock *fl; int status = 0; + down_write(&nfsi->rwsem); for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) continue; @@ -874,8 +876,10 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_ goto out_err; } } + up_write(&nfsi->rwsem); return 0; out_err: + up_write(&nfsi->rwsem); return status; } -- cgit v1.2.3-70-g09d2 From 95d35cb4c473c754824967c0b069bbeb7efa4847 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:45 -0500 Subject: NFSv4: Remove nfs_client->cl_sem Now that we're using the flags to indicate state that needs to be recovered, as well as having implemented proper refcounting and spinlocking on the state and open_owners, we can get rid of nfs_client->cl_sem. The only remaining case that was dubious was the file locking, and that case is now covered by the nfsi->rwsem. Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 1 - fs/nfs/delegation.c | 5 ----- fs/nfs/nfs4proc.c | 20 +------------------- fs/nfs/nfs4renewd.c | 3 --- fs/nfs/nfs4state.c | 17 ----------------- include/linux/nfs_fs_sb.h | 6 ------ 6 files changed, 1 insertion(+), 51 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 70b6d9e8517..b643f0ec5c2 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -143,7 +143,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_ clp->cl_proto = cl_init->proto; #ifdef CONFIG_NFS_V4 - init_rwsem(&clp->cl_sem); INIT_LIST_HEAD(&clp->cl_delegations); spin_lock_init(&clp->cl_lock); INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 646ba3e75a1..ebc06f2da31 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -243,16 +243,13 @@ static void nfs_msync_inode(struct inode *inode) */ static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation) { - struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; struct nfs_inode *nfsi = NFS_I(inode); nfs_msync_inode(inode); - down_read(&clp->cl_sem); /* Guard against new delegated open calls */ down_write(&nfsi->rwsem); nfs_delegation_claim_opens(inode, &delegation->stateid); up_write(&nfsi->rwsem); - up_read(&clp->cl_sem); nfs_msync_inode(inode); return nfs_do_return_delegation(inode, delegation, 1); @@ -425,7 +422,6 @@ static int recall_thread(void *data) daemonize("nfsv4-delegreturn"); nfs_msync_inode(inode); - down_read(&clp->cl_sem); down_write(&nfsi->rwsem); spin_lock(&clp->cl_lock); delegation = nfs_detach_delegation_locked(nfsi, args->stateid); @@ -437,7 +433,6 @@ static int recall_thread(void *data) complete(&args->started); nfs_delegation_claim_opens(inode, args->stateid); up_write(&nfsi->rwsem); - up_read(&clp->cl_sem); nfs_msync_inode(inode); if (delegation != NULL) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index aec4e47c462..26dcd77abb0 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -207,12 +207,8 @@ static int nfs4_wait_clnt_recover(struct nfs_client *clp) might_sleep(); - rwsem_acquire(&clp->cl_sem.dep_map, 0, 0, _RET_IP_); - res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER, nfs4_wait_bit_killable, TASK_KILLABLE); - - rwsem_release(&clp->cl_sem.dep_map, 1, _RET_IP_); return res; } @@ -1135,7 +1131,6 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, int flags, struct struct nfs4_state_owner *sp; struct nfs4_state *state = NULL; struct nfs_server *server = NFS_SERVER(dir); - struct nfs_client *clp = server->nfs_client; struct nfs4_opendata *opendata; int status; @@ -1150,11 +1145,10 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, int flags, struct goto err_put_state_owner; if (path->dentry->d_inode != NULL) nfs4_return_incompatible_delegation(path->dentry->d_inode, flags & (FMODE_READ|FMODE_WRITE)); - down_read(&clp->cl_sem); status = -ENOMEM; opendata = nfs4_opendata_alloc(path, sp, flags, sattr); if (opendata == NULL) - goto err_release_rwsem; + goto err_put_state_owner; if (path->dentry->d_inode != NULL) opendata->state = nfs4_get_open_state(path->dentry->d_inode, sp); @@ -1172,13 +1166,10 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, int flags, struct goto err_opendata_put; nfs4_opendata_put(opendata); nfs4_put_state_owner(sp); - up_read(&clp->cl_sem); *res = state; return 0; err_opendata_put: nfs4_opendata_put(opendata); -err_release_rwsem: - up_read(&clp->cl_sem); err_put_state_owner: nfs4_put_state_owner(sp); out_err: @@ -3099,7 +3090,6 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock struct nfs4_lock_state *lsp; int status; - down_read(&clp->cl_sem); arg.lock_owner.clientid = clp->cl_clientid; status = nfs4_set_lock_state(state, request); if (status != 0) @@ -3116,7 +3106,6 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock } request->fl_ops->fl_release_private(request); out: - up_read(&clp->cl_sem); return status; } @@ -3273,7 +3262,6 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) { - struct nfs_client *clp = state->owner->so_client; struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_seqid *seqid; struct nfs4_lock_state *lsp; @@ -3284,15 +3272,12 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * status = nfs4_set_lock_state(state, request); /* Unlock _before_ we do the RPC call */ request->fl_flags |= FL_EXISTS; - down_read(&clp->cl_sem); down_read(&nfsi->rwsem); if (do_vfs_lock(request->fl_file, request) == -ENOENT) { up_read(&nfsi->rwsem); - up_read(&clp->cl_sem); goto out; } up_read(&nfsi->rwsem); - up_read(&clp->cl_sem); if (status != 0) goto out; /* Is this a delegated lock? */ @@ -3518,7 +3503,6 @@ static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) { - struct nfs_client *clp = state->owner->so_client; struct nfs_inode *nfsi = NFS_I(state->inode); unsigned char fl_flags = request->fl_flags; int status; @@ -3531,7 +3515,6 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock status = do_vfs_lock(request->fl_file, request); if (status < 0) goto out; - down_read(&clp->cl_sem); down_read(&nfsi->rwsem); if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { /* Yes: cache locks! */ @@ -3549,7 +3532,6 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__); out_unlock: up_read(&nfsi->rwsem); - up_read(&clp->cl_sem); out: request->fl_flags = fl_flags; return status; diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 9fe8640a88e..6101f955f23 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -65,7 +65,6 @@ nfs4_renew_state(struct work_struct *work) long lease, timeout; unsigned long last, now; - down_read(&clp->cl_sem); dprintk("%s: start\n", __func__); /* Are there any active superblocks? */ if (list_empty(&clp->cl_superblocks)) @@ -101,11 +100,9 @@ nfs4_renew_state(struct work_struct *work) schedule_delayed_work(&clp->cl_renewd, timeout); spin_unlock(&clp->cl_lock); out: - up_read(&clp->cl_sem); dprintk("%s: done\n", __func__); } -/* Must be called with clp->cl_sem locked for writes */ void nfs4_schedule_state_renewal(struct nfs_client *clp) { diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 16c9fbdf97b..ec0cbe00a80 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -305,10 +305,6 @@ nfs4_drop_state_owner(struct nfs4_state_owner *sp) } } -/* - * Note: must be called with clp->cl_sem held in order to prevent races - * with reboot recovery! - */ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred) { struct nfs_client *clp = server->nfs_client; @@ -337,10 +333,6 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct return sp; } -/* - * Must be called with clp->cl_sem held in order to avoid races - * with state recovery... - */ void nfs4_put_state_owner(struct nfs4_state_owner *sp) { struct nfs_client *clp = sp->so_client; @@ -442,10 +434,6 @@ out: return state; } -/* - * Beware! Caller must be holding exactly one - * reference to clp->cl_sem! - */ void nfs4_put_open_state(struct nfs4_state *state) { struct inode *inode = state->inode; @@ -578,7 +566,6 @@ static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) * Return a compatible lock_state. If no initialized lock_state structure * exists, return an uninitialized one. * - * The caller must be holding clp->cl_sem */ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) { @@ -1127,7 +1114,6 @@ static int reclaimer(void *ptr) allow_signal(SIGKILL); /* Ensure exclusive access to NFSv4 state */ - down_write(&clp->cl_sem); while (!list_empty(&clp->cl_superblocks)) { if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) { /* We're going to have to re-establish a clientid */ @@ -1149,7 +1135,6 @@ static int reclaimer(void *ptr) /* First recover reboot state... */ if (test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) { - /* Note: list is protected by exclusive lock on cl->cl_sem */ status = nfs4_do_reclaim(clp, &nfs4_reboot_recovery_ops); if (status == -NFS4ERR_STALE_CLIENTID) continue; @@ -1159,7 +1144,6 @@ static int reclaimer(void *ptr) /* Now recover expired state... */ if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) { - /* Note: list is protected by exclusive lock on cl->cl_sem */ status = nfs4_do_reclaim(clp, &nfs4_nograce_recovery_ops); if (status < 0) { set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); @@ -1175,7 +1159,6 @@ static int reclaimer(void *ptr) break; } out: - up_write(&clp->cl_sem); if (test_and_clear_bit(NFS4CLNT_CB_PATH_DOWN, &clp->cl_state)) nfs_handle_cb_pathdown(clp); nfs4_clear_recover_bit(clp); diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 4e477ae5869..9bb81aec91c 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -42,12 +42,6 @@ struct nfs_client { struct rb_root cl_openowner_id; struct rb_root cl_lockowner_id; - /* - * The following rwsem ensures exclusive access to the server - * while we recover the state following a lease expiration. - */ - struct rw_semaphore cl_sem; - struct list_head cl_delegations; struct rb_root cl_state_owners; spinlock_t cl_lock; -- cgit v1.2.3-70-g09d2 From 9e33bed55239bdcee1746c31a11177d239bac1b5 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:46 -0500 Subject: NFSv4: Add recovery for individual stateids NFSv4 defines a number of state errors which the client does not currently handle. Among those we should worry about are: NFS4ERR_ADMIN_REVOKED - the server's administrator revoked our locks and/or delegations. NFS4ERR_BAD_STATEID - the client and server are out of sync, possibly due to a delegation return racing with an OPEN request. NFS4ERR_OPENMODE - the client attempted to do something not sanctioned by the open mode of the stateid. Should normally just occur as a result of a delegation return race. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 2 ++ fs/nfs/nfs4proc.c | 37 ++++++++++++++++++++++++++++--------- fs/nfs/nfs4state.c | 2 +- 3 files changed, 31 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index a4e7b3feef8..46cbc0cdf88 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -169,6 +169,7 @@ struct nfs4_state { struct nfs4_exception { long timeout; int retry; + struct nfs4_state *state; }; struct nfs4_state_recovery_ops { @@ -226,6 +227,7 @@ extern void nfs4_close_state(struct path *, struct nfs4_state *, mode_t); extern void nfs4_close_sync(struct path *, struct nfs4_state *, mode_t); extern void nfs4_state_set_mode_locked(struct nfs4_state *, mode_t); extern void nfs4_schedule_state_recovery(struct nfs_client *); +extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 26dcd77abb0..8a2dee9caa6 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -62,7 +62,7 @@ struct nfs4_opendata; static int _nfs4_proc_open(struct nfs4_opendata *data); static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); -static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *); +static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *); static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr); static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); @@ -235,12 +235,19 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception) { struct nfs_client *clp = server->nfs_client; + struct nfs4_state *state = exception->state; int ret = errorcode; exception->retry = 0; switch(errorcode) { case 0: return 0; + case -NFS4ERR_ADMIN_REVOKED: + case -NFS4ERR_BAD_STATEID: + case -NFS4ERR_OPENMODE: + if (state == NULL) + break; + nfs4_state_mark_reclaim_nograce(clp, state); case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: @@ -1320,10 +1327,13 @@ static void nfs4_close_done(struct rpc_task *task, void *data) renew_lease(server, calldata->timestamp); break; case -NFS4ERR_STALE_STATEID: + case -NFS4ERR_OLD_STATEID: + case -NFS4ERR_BAD_STATEID: case -NFS4ERR_EXPIRED: - break; + if (calldata->arg.open_flags == 0) + break; default: - if (nfs4_async_handle_error(task, server) == -EAGAIN) { + if (nfs4_async_handle_error(task, server, state) == -EAGAIN) { rpc_restart_call(task); return; } @@ -1418,6 +1428,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait) calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid); if (calldata->arg.seqid == NULL) goto out_free_calldata; + calldata->arg.open_flags = 0; calldata->arg.bitmask = server->attr_bitmask; calldata->res.fattr = &calldata->fattr; calldata->res.seqid = calldata->arg.seqid; @@ -2064,7 +2075,7 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) { struct nfs_removeres *res = task->tk_msg.rpc_resp; - if (nfs4_async_handle_error(task, res->server) == -EAGAIN) + if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) return 0; update_changeattr(dir, &res->cinfo); nfs_post_op_update_inode(dir, &res->dir_attr); @@ -2492,7 +2503,7 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data) { struct nfs_server *server = NFS_SERVER(data->inode); - if (nfs4_async_handle_error(task, server) == -EAGAIN) { + if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { rpc_restart_call(task); return -EAGAIN; } @@ -2513,7 +2524,7 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data) { struct inode *inode = data->inode; - if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) { + if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { rpc_restart_call(task); return -EAGAIN; } @@ -2539,7 +2550,7 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data) { struct inode *inode = data->inode; - if (nfs4_async_handle_error(task, NFS_SERVER(inode)) == -EAGAIN) { + if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) { rpc_restart_call(task); return -EAGAIN; } @@ -2832,13 +2843,19 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen } static int -nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) +nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state) { struct nfs_client *clp = server->nfs_client; if (!clp || task->tk_status >= 0) return 0; switch(task->tk_status) { + case -NFS4ERR_ADMIN_REVOKED: + case -NFS4ERR_BAD_STATEID: + case -NFS4ERR_OPENMODE: + if (state == NULL) + break; + nfs4_state_mark_reclaim_nograce(clp, state); case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: @@ -3195,11 +3212,13 @@ static void nfs4_locku_done(struct rpc_task *task, void *data) sizeof(calldata->lsp->ls_stateid.data)); renew_lease(calldata->server, calldata->timestamp); break; + case -NFS4ERR_BAD_STATEID: + case -NFS4ERR_OLD_STATEID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: break; default: - if (nfs4_async_handle_error(task, calldata->server) == -EAGAIN) + if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN) rpc_restart_call(task); } } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index ec0cbe00a80..3d78706b3ef 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -824,7 +824,7 @@ static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_st return 1; } -static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) +int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) { set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); -- cgit v1.2.3-70-g09d2 From 515d86117724abe39d7d57d7ccc7cc5c44480529 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:46 -0500 Subject: NFSv4: Clean up the support for returning multiple delegations Add a flag to mark delegations as requiring return, then run a garbage collector. In the future, this will allow for more flexible delegation management, where delegations may be marked for return if it turns out that they are not being referenced. Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 97 +++++++++++++++++++++++++---------------------------- fs/nfs/delegation.h | 8 +++-- fs/nfs/super.c | 2 +- 3 files changed, 52 insertions(+), 55 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index ebc06f2da31..bc9d4f9a788 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -255,6 +255,34 @@ static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegat return nfs_do_return_delegation(inode, delegation, 1); } +/* + * Return all delegations that have been marked for return + */ +static void nfs_client_return_marked_delegations(struct nfs_client *clp) +{ + struct nfs_delegation *delegation; + struct inode *inode; + +restart: + rcu_read_lock(); + list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { + if (!test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) + continue; + inode = nfs_delegation_grab_inode(delegation); + if (inode == NULL) + continue; + spin_lock(&clp->cl_lock); + delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); + spin_unlock(&clp->cl_lock); + rcu_read_unlock(); + if (delegation != NULL) + __nfs_inode_return_delegation(inode, delegation); + iput(inode); + goto restart; + } + rcu_read_unlock(); +} + /* * This function returns the delegation without reclaiming opens * or protecting against delegation reclaims. @@ -296,63 +324,46 @@ int nfs_inode_return_delegation(struct inode *inode) /* * Return all delegations associated to a super block */ -void nfs_return_all_delegations(struct super_block *sb) +void nfs_super_return_all_delegations(struct super_block *sb) { struct nfs_client *clp = NFS_SB(sb)->nfs_client; struct nfs_delegation *delegation; - struct inode *inode; if (clp == NULL) return; -restart: rcu_read_lock(); list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { - inode = NULL; spin_lock(&delegation->lock); if (delegation->inode != NULL && delegation->inode->i_sb == sb) - inode = igrab(delegation->inode); + set_bit(NFS_DELEGATION_RETURN, &delegation->flags); spin_unlock(&delegation->lock); - if (inode == NULL) - continue; - spin_lock(&clp->cl_lock); - delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); - spin_unlock(&clp->cl_lock); - rcu_read_unlock(); - if (delegation != NULL) - __nfs_inode_return_delegation(inode, delegation); - iput(inode); - goto restart; } rcu_read_unlock(); + nfs_client_return_marked_delegations(clp); +} + +static void nfs_client_return_all_delegations(struct nfs_client *clp) +{ + struct nfs_delegation *delegation; + + rcu_read_lock(); + list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) + set_bit(NFS_DELEGATION_RETURN, &delegation->flags); + rcu_read_unlock(); + nfs_client_return_marked_delegations(clp); } static int nfs_do_expire_all_delegations(void *ptr) { struct nfs_client *clp = ptr; - struct nfs_delegation *delegation; - struct inode *inode; allow_signal(SIGKILL); -restart: + if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0) goto out; if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) goto out; - rcu_read_lock(); - list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { - inode = nfs_delegation_grab_inode(delegation); - if (inode == NULL) - continue; - spin_lock(&clp->cl_lock); - delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); - spin_unlock(&clp->cl_lock); - rcu_read_unlock(); - if (delegation) - __nfs_inode_return_delegation(inode, delegation); - iput(inode); - goto restart; - } - rcu_read_unlock(); + nfs_client_return_all_delegations(clp); out: nfs_put_client(clp); module_put_and_exit(0); @@ -379,27 +390,9 @@ void nfs_expire_all_delegations(struct nfs_client *clp) */ void nfs_handle_cb_pathdown(struct nfs_client *clp) { - struct nfs_delegation *delegation; - struct inode *inode; - if (clp == NULL) return; -restart: - rcu_read_lock(); - list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { - inode = nfs_delegation_grab_inode(delegation); - if (inode == NULL) - continue; - spin_lock(&clp->cl_lock); - delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL); - spin_unlock(&clp->cl_lock); - rcu_read_unlock(); - if (delegation != NULL) - __nfs_inode_return_delegation(inode, delegation); - iput(inode); - goto restart; - } - rcu_read_unlock(); + nfs_client_return_all_delegations(clp); } struct recall_threadargs { diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 5e9f40e0a7d..c772bab12ee 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -20,12 +20,16 @@ struct nfs_delegation { int type; loff_t maxsize; __u64 change_attr; -#define NFS_DELEGATION_NEED_RECLAIM 0 unsigned long flags; spinlock_t lock; struct rcu_head rcu; }; +enum { + NFS_DELEGATION_NEED_RECLAIM = 0, + NFS_DELEGATION_RETURN, +}; + int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); int nfs_inode_return_delegation(struct inode *inode); @@ -33,7 +37,7 @@ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *s void nfs_inode_return_delegation_noreclaim(struct inode *inode); struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle); -void nfs_return_all_delegations(struct super_block *sb); +void nfs_super_return_all_delegations(struct super_block *sb); void nfs_expire_all_delegations(struct nfs_client *clp); void nfs_handle_cb_pathdown(struct nfs_client *clp); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index d8e062fe76b..bd4c3dd550d 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2433,7 +2433,7 @@ static void nfs4_kill_super(struct super_block *sb) { struct nfs_server *server = NFS_SB(sb); - nfs_return_all_delegations(sb); + nfs_super_return_all_delegations(sb); kill_anon_super(sb); nfs4_renewd_prepare_shutdown(server); -- cgit v1.2.3-70-g09d2 From 707fb4b324371f1b4bea5eb29e39d265c66086ae Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:47 -0500 Subject: NFSv4: Clean up NFS4ERR_CB_PATH_DOWN error management... Add a delegation cleanup phase to the state management loop, and do the NFS4ERR_CB_PATH_DOWN recovery there. Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 14 ++++++++------ fs/nfs/delegation.h | 1 + fs/nfs/nfs4_fs.h | 2 +- fs/nfs/nfs4state.c | 9 ++++++--- 4 files changed, 16 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index bc9d4f9a788..ff2c1585d23 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -258,7 +258,7 @@ static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegat /* * Return all delegations that have been marked for return */ -static void nfs_client_return_marked_delegations(struct nfs_client *clp) +void nfs_client_return_marked_delegations(struct nfs_client *clp) { struct nfs_delegation *delegation; struct inode *inode; @@ -342,15 +342,16 @@ void nfs_super_return_all_delegations(struct super_block *sb) nfs_client_return_marked_delegations(clp); } -static void nfs_client_return_all_delegations(struct nfs_client *clp) +static void nfs_client_mark_return_all_delegations(struct nfs_client *clp) { struct nfs_delegation *delegation; rcu_read_lock(); - list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) + list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { set_bit(NFS_DELEGATION_RETURN, &delegation->flags); + set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); + } rcu_read_unlock(); - nfs_client_return_marked_delegations(clp); } static int nfs_do_expire_all_delegations(void *ptr) @@ -363,7 +364,8 @@ static int nfs_do_expire_all_delegations(void *ptr) goto out; if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) goto out; - nfs_client_return_all_delegations(clp); + nfs_client_mark_return_all_delegations(clp); + nfs_client_return_marked_delegations(clp); out: nfs_put_client(clp); module_put_and_exit(0); @@ -392,7 +394,7 @@ void nfs_handle_cb_pathdown(struct nfs_client *clp) { if (clp == NULL) return; - nfs_client_return_all_delegations(clp); + nfs_client_mark_return_all_delegations(clp); } struct recall_threadargs { diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index c772bab12ee..56f3eb558ef 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -40,6 +40,7 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs void nfs_super_return_all_delegations(struct super_block *sb); void nfs_expire_all_delegations(struct nfs_client *clp); void nfs_handle_cb_pathdown(struct nfs_client *clp); +void nfs_client_return_marked_delegations(struct nfs_client *clp); void nfs_delegation_mark_reclaim(struct nfs_client *clp); void nfs_delegation_reap_unclaimed(struct nfs_client *clp); diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 46cbc0cdf88..84a4f5223f6 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -43,7 +43,7 @@ enum nfs4_client_state { NFS4CLNT_LEASE_EXPIRED, NFS4CLNT_RECLAIM_REBOOT, NFS4CLNT_RECLAIM_NOGRACE, - NFS4CLNT_CB_PATH_DOWN, + NFS4CLNT_DELEGRETURN, }; /* diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 3d78706b3ef..2894502b94d 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1027,7 +1027,7 @@ static void nfs4_recovery_handle_error(struct nfs_client *clp, int error) { switch (error) { case -NFS4ERR_CB_PATH_DOWN: - set_bit(NFS4CLNT_CB_PATH_DOWN, &clp->cl_state); + nfs_handle_cb_pathdown(clp); break; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_LEASE_MOVED: @@ -1156,11 +1156,14 @@ static int reclaimer(void *ptr) nfs4_state_end_reclaim_nograce(clp); continue; } + + if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) { + nfs_client_return_marked_delegations(clp); + continue; + } break; } out: - if (test_and_clear_bit(NFS4CLNT_CB_PATH_DOWN, &clp->cl_state)) - nfs_handle_cb_pathdown(clp); nfs4_clear_recover_bit(clp); nfs_put_client(clp); module_put_and_exit(0); -- cgit v1.2.3-70-g09d2 From e005e8041c132af9f70862e1387a222198f95e7f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:48 -0500 Subject: NFSv4: Rename the state reclaimer thread It is really a more general purpose state management thread at this point. Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 2 +- fs/nfs/nfs4_fs.h | 2 +- fs/nfs/nfs4proc.c | 4 ++-- fs/nfs/nfs4state.c | 49 ++++++++++++++++++++++++++++--------------------- 4 files changed, 32 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index ff2c1585d23..4692fdc8abf 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -360,7 +360,7 @@ static int nfs_do_expire_all_delegations(void *ptr) allow_signal(SIGKILL); - if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0) + if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) goto out; if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) goto out; diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 84a4f5223f6..b7a12e7c660 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -38,7 +38,7 @@ struct idmap; ((err) != NFSERR_NOFILEHANDLE)) enum nfs4_client_state { - NFS4CLNT_STATE_RECOVER = 0, + NFS4CLNT_MANAGER_RUNNING = 0, NFS4CLNT_CHECK_LEASE, NFS4CLNT_LEASE_EXPIRED, NFS4CLNT_RECLAIM_REBOOT, diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8a2dee9caa6..2a347d47e38 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -207,7 +207,7 @@ static int nfs4_wait_clnt_recover(struct nfs_client *clp) might_sleep(); - res = wait_on_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER, + res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING, nfs4_wait_bit_killable, TASK_KILLABLE); return res; } @@ -2861,7 +2861,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, case -NFS4ERR_EXPIRED: rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); nfs4_schedule_state_recovery(clp); - if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0) + if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); task->tk_status = 0; return -EAGAIN; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 2894502b94d..83f3dd39ac5 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -767,32 +767,34 @@ unlock: return status; } -static int reclaimer(void *); +static int nfs4_run_state_manager(void *); -static inline void nfs4_clear_recover_bit(struct nfs_client *clp) +static void nfs4_clear_state_manager_bit(struct nfs_client *clp) { smp_mb__before_clear_bit(); - clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state); + clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state); smp_mb__after_clear_bit(); - wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER); + wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING); rpc_wake_up(&clp->cl_rpcwaitq); } /* - * State recovery routine + * Schedule the nfs_client asynchronous state management routine */ -static void nfs4_recover_state(struct nfs_client *clp) +static void nfs4_schedule_state_manager(struct nfs_client *clp) { struct task_struct *task; + if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) + return; __module_get(THIS_MODULE); atomic_inc(&clp->cl_count); - task = kthread_run(reclaimer, clp, "%s-reclaim", + task = kthread_run(nfs4_run_state_manager, clp, "%s-manager", rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); if (!IS_ERR(task)) return; - nfs4_clear_recover_bit(clp); + nfs4_clear_state_manager_bit(clp); nfs_put_client(clp); module_put(THIS_MODULE); } @@ -806,8 +808,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp) return; if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); - if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0) - nfs4_recover_state(clp); + nfs4_schedule_state_manager(clp); } static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) @@ -1106,13 +1107,10 @@ static int nfs4_reclaim_lease(struct nfs_client *clp) return status; } -static int reclaimer(void *ptr) +static void nfs4_state_manager(struct nfs_client *clp) { - struct nfs_client *clp = ptr; int status = 0; - allow_signal(SIGKILL); - /* Ensure exclusive access to NFSv4 state */ while (!list_empty(&clp->cl_superblocks)) { if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) { @@ -1161,19 +1159,28 @@ static int reclaimer(void *ptr) nfs_client_return_marked_delegations(clp); continue; } + + nfs4_clear_state_manager_bit(clp); break; } -out: - nfs4_clear_recover_bit(clp); - nfs_put_client(clp); - module_put_and_exit(0); - return 0; + return; out_error: - printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %s" + printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s" " with error %d\n", clp->cl_hostname, -status); if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) nfs4_state_end_reclaim_reboot(clp); - goto out; + nfs4_clear_state_manager_bit(clp); +} + +static int nfs4_run_state_manager(void *ptr) +{ + struct nfs_client *clp = ptr; + + allow_signal(SIGKILL); + nfs4_state_manager(clp); + nfs_put_client(clp); + module_put_and_exit(0); + return 0; } /* -- cgit v1.2.3-70-g09d2 From f3c76491e7ecacbb7942633f3b2a3514b7476ef9 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:48 -0500 Subject: NFSv4: Don't exit the state management if there are still tasks to do Fix up a potential race... Signed-off-by: Trond Myklebust --- fs/nfs/nfs4state.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 83f3dd39ac5..9e76712dcae 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1112,7 +1112,7 @@ static void nfs4_state_manager(struct nfs_client *clp) int status = 0; /* Ensure exclusive access to NFSv4 state */ - while (!list_empty(&clp->cl_superblocks)) { + for(;;) { if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) { /* We're going to have to re-establish a clientid */ status = nfs4_reclaim_lease(clp); @@ -1161,7 +1161,11 @@ static void nfs4_state_manager(struct nfs_client *clp) } nfs4_clear_state_manager_bit(clp); - break; + /* Did we race with an attempt to give us more work? */ + if (clp->cl_state == 0) + break; + if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) + break; } return; out_error: -- cgit v1.2.3-70-g09d2 From 0d62f85a81216f30a0ba1479b93e84103a5d535b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:49 -0500 Subject: NFSv4: Fix a BAD_SEQUENCEID condition. We really shouldn't be resetting the sequence ids when doing state expiration recovery, since we don't know if the server still remembers our previous state owners. There are servers out there that do attempt to preserve client state even if the lease has expired. Such a server would only release that state if a conflicting OPEN request occurs. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4state.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 9e76712dcae..6dc36a0d9a8 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -951,7 +951,6 @@ static void nfs4_clear_open_state(struct nfs4_state *state) clear_bit(NFS_O_WRONLY_STATE, &state->flags); clear_bit(NFS_O_RDWR_STATE, &state->flags); list_for_each_entry(lock, &state->lock_states, ls_locks) { - lock->ls_seqid.counter = 0; lock->ls_seqid.flags = 0; lock->ls_flags &= ~NFS_LOCK_INITIALIZED; } @@ -966,7 +965,6 @@ static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_re /* Reset all sequence ids to zero */ for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); - sp->so_seqid.counter = 0; sp->so_seqid.flags = 0; spin_lock(&sp->so_lock); list_for_each_entry(state, &sp->so_states, open_states) { -- cgit v1.2.3-70-g09d2 From b0d3ded1a21dc3057daff5a488469d9e6aa1b567 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:50 -0500 Subject: NFSv4: Clean up nfs_expire_all_delegations() Let the actual delegreturn stuff be run in the state manager thread rather than allocating a separate kthread. Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 31 +++++-------------------------- fs/nfs/nfs4_fs.h | 1 + fs/nfs/nfs4renewd.c | 16 +++++++++------- fs/nfs/nfs4state.c | 2 +- 4 files changed, 16 insertions(+), 34 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 4692fdc8abf..21eda6c083d 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -354,37 +354,16 @@ static void nfs_client_mark_return_all_delegations(struct nfs_client *clp) rcu_read_unlock(); } -static int nfs_do_expire_all_delegations(void *ptr) +static void nfs_delegation_run_state_manager(struct nfs_client *clp) { - struct nfs_client *clp = ptr; - - allow_signal(SIGKILL); - - if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0) - goto out; - if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) - goto out; - nfs_client_mark_return_all_delegations(clp); - nfs_client_return_marked_delegations(clp); -out: - nfs_put_client(clp); - module_put_and_exit(0); + if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) + nfs4_schedule_state_manager(clp); } void nfs_expire_all_delegations(struct nfs_client *clp) { - struct task_struct *task; - - __module_get(THIS_MODULE); - atomic_inc(&clp->cl_count); - task = kthread_run(nfs_do_expire_all_delegations, clp, - "%s-delegreturn", - rpc_peeraddr2str(clp->cl_rpcclient, - RPC_DISPLAY_ADDR)); - if (!IS_ERR(task)) - return; - nfs_put_client(clp); - module_put(THIS_MODULE); + nfs_client_mark_return_all_delegations(clp); + nfs_delegation_run_state_manager(clp); } /* diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index b7a12e7c660..ee5f51ef696 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -227,6 +227,7 @@ extern void nfs4_close_state(struct path *, struct nfs4_state *, mode_t); extern void nfs4_close_sync(struct path *, struct nfs4_state *, mode_t); extern void nfs4_state_set_mode_locked(struct nfs4_state *, mode_t); extern void nfs4_schedule_state_recovery(struct nfs_client *); +extern void nfs4_schedule_state_manager(struct nfs_client *); extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 6101f955f23..ca557e677d9 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -77,16 +77,18 @@ nfs4_renew_state(struct work_struct *work) /* Are we close to a lease timeout? */ if (time_after(now, last + lease/3)) { cred = nfs4_get_renew_cred_locked(clp); + spin_unlock(&clp->cl_lock); if (cred == NULL) { - set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); - spin_unlock(&clp->cl_lock); + if (list_empty(&clp->cl_delegations)) { + set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); + goto out; + } nfs_expire_all_delegations(clp); - goto out; + } else { + /* Queue an asynchronous RENEW. */ + nfs4_proc_async_renew(clp, cred); + put_rpccred(cred); } - spin_unlock(&clp->cl_lock); - /* Queue an asynchronous RENEW. */ - nfs4_proc_async_renew(clp, cred); - put_rpccred(cred); timeout = (2 * lease) / 3; spin_lock(&clp->cl_lock); } else diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 6dc36a0d9a8..cd16b301f13 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -781,7 +781,7 @@ static void nfs4_clear_state_manager_bit(struct nfs_client *clp) /* * Schedule the nfs_client asynchronous state management routine */ -static void nfs4_schedule_state_manager(struct nfs_client *clp) +void nfs4_schedule_state_manager(struct nfs_client *clp) { struct task_struct *task; -- cgit v1.2.3-70-g09d2 From 6411bd4a471893ab2af103d96253ba97c92d4777 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:51 -0500 Subject: NFSv4: Clean up the asynchronous delegation return Reuse the state management thread in order to return delegations when we get a callback. Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 73 ++++++++++++++--------------------------------------- 1 file changed, 19 insertions(+), 54 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 21eda6c083d..00c350c031b 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -321,6 +321,12 @@ int nfs_inode_return_delegation(struct inode *inode) return err; } +static void nfs_mark_return_delegation(struct nfs_client *clp, struct nfs_delegation *delegation) +{ + set_bit(NFS_DELEGATION_RETURN, &delegation->flags); + set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); +} + /* * Return all delegations associated to a super block */ @@ -376,66 +382,25 @@ void nfs_handle_cb_pathdown(struct nfs_client *clp) nfs_client_mark_return_all_delegations(clp); } -struct recall_threadargs { - struct inode *inode; - struct nfs_client *clp; - const nfs4_stateid *stateid; - - struct completion started; - int result; -}; - -static int recall_thread(void *data) -{ - struct recall_threadargs *args = (struct recall_threadargs *)data; - struct inode *inode = igrab(args->inode); - struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; - struct nfs_inode *nfsi = NFS_I(inode); - struct nfs_delegation *delegation; - - daemonize("nfsv4-delegreturn"); - - nfs_msync_inode(inode); - down_write(&nfsi->rwsem); - spin_lock(&clp->cl_lock); - delegation = nfs_detach_delegation_locked(nfsi, args->stateid); - if (delegation != NULL) - args->result = 0; - else - args->result = -ENOENT; - spin_unlock(&clp->cl_lock); - complete(&args->started); - nfs_delegation_claim_opens(inode, args->stateid); - up_write(&nfsi->rwsem); - nfs_msync_inode(inode); - - if (delegation != NULL) - nfs_do_return_delegation(inode, delegation, 1); - iput(inode); - module_put_and_exit(0); -} - /* * Asynchronous delegation recall! */ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid) { - struct recall_threadargs data = { - .inode = inode, - .stateid = stateid, - }; - int status; + struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; + struct nfs_delegation *delegation; - init_completion(&data.started); - __module_get(THIS_MODULE); - status = kernel_thread(recall_thread, &data, CLONE_KERNEL); - if (status < 0) - goto out_module_put; - wait_for_completion(&data.started); - return data.result; -out_module_put: - module_put(THIS_MODULE); - return status; + rcu_read_lock(); + delegation = rcu_dereference(NFS_I(inode)->delegation); + if (delegation == NULL || memcmp(delegation->stateid.data, stateid->data, + sizeof(delegation->stateid.data)) != 0) { + rcu_read_unlock(); + return -ENOENT; + } + nfs_mark_return_delegation(clp, delegation); + rcu_read_unlock(); + nfs_delegation_run_state_manager(clp); + return 0; } /* -- cgit v1.2.3-70-g09d2 From b7391f44f26b17ad25c7183a3d6ad50f0a9305ff Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:52 -0500 Subject: NFSv4: Return unreferenced delegations more promptly If the client is not using a delegation, the right thing to do is to return it as soon as possible. This helps reduce the amount of state the server has to track, as well as reducing the potential for conflicts with other clients. Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 42 ++++++++++++++++++++++++++++++++++++++++++ fs/nfs/delegation.h | 17 ++++------------- fs/nfs/nfs4proc.c | 2 ++ fs/nfs/nfs4renewd.c | 1 + 4 files changed, 49 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 00c350c031b..e75f2f8c524 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -43,6 +43,27 @@ static void nfs_free_delegation(struct nfs_delegation *delegation) put_rpccred(cred); } +void nfs_mark_delegation_referenced(struct nfs_delegation *delegation) +{ + set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags); +} + +int nfs_have_delegation(struct inode *inode, int flags) +{ + struct nfs_delegation *delegation; + int ret = 0; + + flags &= FMODE_READ|FMODE_WRITE; + rcu_read_lock(); + delegation = rcu_dereference(NFS_I(inode)->delegation); + if (delegation != NULL && (delegation->type & flags) == flags) { + nfs_mark_delegation_referenced(delegation); + ret = 1; + } + rcu_read_unlock(); + return ret; +} + static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state) { struct inode *inode = state->inode; @@ -188,6 +209,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct delegation->change_attr = nfsi->change_attr; delegation->cred = get_rpccred(cred); delegation->inode = inode; + delegation->flags = 1<lock); spin_lock(&clp->cl_lock); @@ -382,6 +404,26 @@ void nfs_handle_cb_pathdown(struct nfs_client *clp) nfs_client_mark_return_all_delegations(clp); } +static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *clp) +{ + struct nfs_delegation *delegation; + + rcu_read_lock(); + list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { + if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags)) + continue; + set_bit(NFS_DELEGATION_RETURN, &delegation->flags); + set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); + } + rcu_read_unlock(); +} + +void nfs_expire_unreferenced_delegations(struct nfs_client *clp) +{ + nfs_client_mark_return_unreferenced_delegations(clp); + nfs_delegation_run_state_manager(clp); +} + /* * Asynchronous delegation recall! */ diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 56f3eb558ef..2a74882e5d5 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -28,6 +28,7 @@ struct nfs_delegation { enum { NFS_DELEGATION_NEED_RECLAIM = 0, NFS_DELEGATION_RETURN, + NFS_DELEGATION_REFERENCED, }; int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); @@ -39,6 +40,7 @@ void nfs_inode_return_delegation_noreclaim(struct inode *inode); struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle); void nfs_super_return_all_delegations(struct super_block *sb); void nfs_expire_all_delegations(struct nfs_client *clp); +void nfs_expire_unreferenced_delegations(struct nfs_client *clp); void nfs_handle_cb_pathdown(struct nfs_client *clp); void nfs_client_return_marked_delegations(struct nfs_client *clp); @@ -51,19 +53,8 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl); int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode); -static inline int nfs_have_delegation(struct inode *inode, int flags) -{ - struct nfs_delegation *delegation; - int ret = 0; - - flags &= FMODE_READ|FMODE_WRITE; - rcu_read_lock(); - delegation = rcu_dereference(NFS_I(inode)->delegation); - if (delegation != NULL && (delegation->type & flags) == flags) - ret = 1; - rcu_read_unlock(); - return ret; -} +void nfs_mark_delegation_referenced(struct nfs_delegation *delegation); +int nfs_have_delegation(struct inode *inode, int flags); #else static inline int nfs_have_delegation(struct inode *inode, int flags) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 2a347d47e38..fc0c9d255cf 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -421,6 +421,7 @@ static int can_open_delegated(struct nfs_delegation *delegation, mode_t open_fla return 0; if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) return 0; + nfs_mark_delegation_referenced(delegation); return 1; } @@ -505,6 +506,7 @@ static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stat else if (memcmp(deleg_cur->stateid.data, delegation->data, NFS4_STATEID_SIZE) != 0) goto no_delegation_unlock; + nfs_mark_delegation_referenced(deleg_cur); __update_open_stateid(state, open_stateid, &deleg_cur->stateid, open_flags); ret = 1; no_delegation_unlock: diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index ca557e677d9..f524e932ff7 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -101,6 +101,7 @@ nfs4_renew_state(struct work_struct *work) cancel_delayed_work(&clp->cl_renewd); schedule_delayed_work(&clp->cl_renewd, timeout); spin_unlock(&clp->cl_lock); + nfs_expire_unreferenced_delegations(clp); out: dprintk("%s: done\n", __func__); } -- cgit v1.2.3-70-g09d2 From 9082a5cc1e33d081f091f54e6ed69a0628a4bdcc Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:53 -0500 Subject: NFSv4: Fix up delegation callbacks Currently, the callback server is listening on IPv6 if it is enabled. This means that IPv4 addresses will always be mapped. Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 57 ++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 38 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index b643f0ec5c2..9b728f3565a 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -223,31 +223,54 @@ void nfs_put_client(struct nfs_client *clp) } } -static int nfs_sockaddr_match_ipaddr4(const struct sockaddr_in *sa1, - const struct sockaddr_in *sa2) +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +static const struct in6_addr *nfs_map_ipv4_addr(const struct sockaddr *sa, struct in6_addr *addr_mapped) { - return sa1->sin_addr.s_addr == sa2->sin_addr.s_addr; + switch (sa->sa_family) { + default: + return NULL; + case AF_INET6: + return &((const struct sockaddr_in6 *)sa)->sin6_addr; + break; + case AF_INET: + ipv6_addr_set_v4mapped(((const struct sockaddr_in *)sa)->sin_addr.s_addr, + addr_mapped); + return addr_mapped; + } } -static int nfs_sockaddr_match_ipaddr6(const struct sockaddr_in6 *sa1, - const struct sockaddr_in6 *sa2) +static int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1, + const struct sockaddr *sa2) +{ + const struct in6_addr *addr1; + const struct in6_addr *addr2; + struct in6_addr addr1_mapped; + struct in6_addr addr2_mapped; + + addr1 = nfs_map_ipv4_addr(sa1, &addr1_mapped); + if (likely(addr1 != NULL)) { + addr2 = nfs_map_ipv4_addr(sa2, &addr2_mapped); + if (likely(addr2 != NULL)) + return ipv6_addr_equal(addr1, addr2); + } + return 0; +} +#else +static int nfs_sockaddr_match_ipaddr4(const struct sockaddr_in *sa1, + const struct sockaddr_in *sa2) { - return ipv6_addr_equal(&sa1->sin6_addr, &sa2->sin6_addr); + return sa1->sin_addr.s_addr == sa2->sin_addr.s_addr; } static int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1, const struct sockaddr *sa2) { - switch (sa1->sa_family) { - case AF_INET: - return nfs_sockaddr_match_ipaddr4((const struct sockaddr_in *)sa1, - (const struct sockaddr_in *)sa2); - case AF_INET6: - return nfs_sockaddr_match_ipaddr6((const struct sockaddr_in6 *)sa1, - (const struct sockaddr_in6 *)sa2); - } - BUG(); + if (unlikely(sa1->sa_family != AF_INET || sa2->sa_family != AF_INET)) + return 0; + return nfs_sockaddr_match_ipaddr4((const struct sockaddr_in *)sa1, + (const struct sockaddr_in *)sa2); } +#endif /* * Find a client by IP address and protocol version @@ -269,8 +292,6 @@ struct nfs_client *nfs_find_client(const struct sockaddr *addr, u32 nfsversion) if (clp->rpc_ops->version != nfsversion) continue; - if (addr->sa_family != clap->sa_family) - continue; /* Match only the IP address, not the port number */ if (!nfs_sockaddr_match_ipaddr(addr, clap)) continue; @@ -304,8 +325,6 @@ struct nfs_client *nfs_find_client_next(struct nfs_client *clp) if (clp->rpc_ops->version != nfsvers) continue; - if (sap->sa_family != clap->sa_family) - continue; /* Match only the IP address, not the port number */ if (!nfs_sockaddr_match_ipaddr(sap, clap)) continue; -- cgit v1.2.3-70-g09d2 From bd7bf9d540c001055fba796ebf146d90e4dd2eb2 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:53 -0500 Subject: NFSv4: Convert delegation->type field to fmode_t Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 2 +- fs/nfs/delegation.h | 6 +++--- fs/nfs/nfs4xdr.c | 4 ++-- include/linux/nfs_fs.h | 2 +- include/linux/nfs_xdr.h | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index e75f2f8c524..968225a8801 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -48,7 +48,7 @@ void nfs_mark_delegation_referenced(struct nfs_delegation *delegation) set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags); } -int nfs_have_delegation(struct inode *inode, int flags) +int nfs_have_delegation(struct inode *inode, fmode_t flags) { struct nfs_delegation *delegation; int ret = 0; diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 2a74882e5d5..09f38379517 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -17,7 +17,7 @@ struct nfs_delegation { struct rpc_cred *cred; struct inode *inode; nfs4_stateid stateid; - int type; + fmode_t type; loff_t maxsize; __u64 change_attr; unsigned long flags; @@ -54,10 +54,10 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl); int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode); void nfs_mark_delegation_referenced(struct nfs_delegation *delegation); -int nfs_have_delegation(struct inode *inode, int flags); +int nfs_have_delegation(struct inode *inode, fmode_t flags); #else -static inline int nfs_have_delegation(struct inode *inode, int flags) +static inline int nfs_have_delegation(struct inode *inode, fmode_t flags) { return 0; } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index b916297d233..879911c9903 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1024,7 +1024,7 @@ static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *a } } -static inline void encode_delegation_type(struct xdr_stream *xdr, int delegation_type) +static inline void encode_delegation_type(struct xdr_stream *xdr, fmode_t delegation_type) { __be32 *p; @@ -1053,7 +1053,7 @@ static inline void encode_claim_null(struct xdr_stream *xdr, const struct qstr * encode_string(xdr, name->len, name->name); } -static inline void encode_claim_previous(struct xdr_stream *xdr, int type) +static inline void encode_claim_previous(struct xdr_stream *xdr, fmode_t type) { __be32 *p; diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index f11077285a6..8d71d7b7c78 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -180,7 +180,7 @@ struct nfs_inode { /* NFSv4 state */ struct list_head open_states; struct nfs_delegation *delegation; - int delegation_state; + fmode_t delegation_state; struct rw_semaphore rwsem; #endif /* CONFIG_NFS_V4*/ struct inode vfs_inode; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index c1c31acb8a2..32c1a0ecdbf 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -126,7 +126,7 @@ struct nfs_openargs { struct iattr * attrs; /* UNCHECKED, GUARDED */ nfs4_verifier verifier; /* EXCLUSIVE */ nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ - int delegation_type; /* CLAIM_PREVIOUS */ + fmode_t delegation_type; /* CLAIM_PREVIOUS */ } u; const struct qstr * name; const struct nfs_server *server; /* Needed for ID mapping */ @@ -143,7 +143,7 @@ struct nfs_openres { struct nfs_fattr * dir_attr; struct nfs_seqid * seqid; const struct nfs_server *server; - int delegation_type; + fmode_t delegation_type; nfs4_stateid delegation; __u32 do_recall; __u64 maxsize; -- cgit v1.2.3-70-g09d2 From 5584c30630f8a4aac557093b1603e166fe7385be Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:54 -0500 Subject: NFSv4: Clean up is_atomic_open() Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 3e64b98f3a9..121b533f1f2 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -973,7 +973,7 @@ struct dentry_operations nfs4_dentry_operations = { * Use intent information to determine whether we need to substitute * the NFSv4-style stateful OPEN for the LOOKUP call */ -static int is_atomic_open(struct inode *dir, struct nameidata *nd) +static int is_atomic_open(struct nameidata *nd) { if (nd == NULL || nfs_lookup_check_intent(nd, LOOKUP_OPEN) == 0) return 0; @@ -996,7 +996,7 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry dir->i_sb->s_id, dir->i_ino, dentry->d_name.name); /* Check that we are indeed trying to open this file */ - if (!is_atomic_open(dir, nd)) + if (!is_atomic_open(nd)) goto no_open; if (dentry->d_name.len > NFS_SERVER(dir)->namelen) { @@ -1047,10 +1047,10 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) struct inode *dir; int openflags, ret = 0; + if (!is_atomic_open(nd)) + goto no_open; parent = dget_parent(dentry); dir = parent->d_inode; - if (!is_atomic_open(dir, nd)) - goto no_open; /* We can't create new files in nfs_open_revalidate(), so we * optimize away revalidation of negative dentries. */ @@ -1062,11 +1062,11 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) /* NFS only supports OPEN on regular files */ if (!S_ISREG(inode->i_mode)) - goto no_open; + goto no_open_dput; openflags = nd->intent.open.flags; /* We cannot do exclusive creation on a positive dentry */ if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) - goto no_open; + goto no_open_dput; /* We can't create new files, or truncate existing ones here */ openflags &= ~(O_CREAT|O_TRUNC); @@ -1081,8 +1081,9 @@ out: if (!ret) d_drop(dentry); return ret; -no_open: +no_open_dput: dput(parent); +no_open: if (inode != NULL && nfs_have_delegation(inode, FMODE_READ)) return 1; return nfs_lookup_revalidate(dentry, nd); -- cgit v1.2.3-70-g09d2 From 15860ab1d7700249ebe3b0b8ca86ce43dfd0d66f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:54 -0500 Subject: NFSv4: Ensure that we set the verifier when revalidating delegated dentries This ensures that we don't have to look up the dentry again after we return the delegation if we know that the directory didn't change. Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 121b533f1f2..ff167aa6243 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -799,6 +799,9 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd) goto out_bad; } + if (nfs_have_delegation(inode, FMODE_READ)) + goto out_set_verifier; + /* Force a full look up iff the parent directory has changed */ if (!nfs_is_exclusive_create(dir, nd) && nfs_check_verifier(dir, dentry)) { if (nfs_lookup_verify_inode(inode, nd)) @@ -817,6 +820,7 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd) if ((error = nfs_refresh_inode(inode, &fattr)) != 0) goto out_bad; +out_set_verifier: nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); out_valid: dput(parent); @@ -1084,8 +1088,6 @@ out: no_open_dput: dput(parent); no_open: - if (inode != NULL && nfs_have_delegation(inode, FMODE_READ)) - return 1; return nfs_lookup_revalidate(dentry, nd); } #endif /* CONFIG_NFSV4 */ -- cgit v1.2.3-70-g09d2 From 7a50c60e461f6ff97428da9448c3dad5b7bef491 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:55 -0500 Subject: NFS: Use delegations to optimise ACCESS calls Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index ff167aa6243..ed7024c3488 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1797,7 +1797,8 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str cache = nfs_access_search_rbtree(inode, cred); if (cache == NULL) goto out; - if (!time_in_range(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo)) + if (!nfs_have_delegation(inode, FMODE_READ) && + !time_in_range(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo)) goto out_stale; res->jiffies = cache->jiffies; res->cred = cache->cred; -- cgit v1.2.3-70-g09d2 From dc0b027dfadfcb8a5504f7d8052754bf8d501ab9 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 15:21:56 -0500 Subject: NFSv4: Convert the open and close ops to use fmode Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 2 +- fs/nfs/nfs4_fs.h | 8 +-- fs/nfs/nfs4proc.c | 126 ++++++++++++++++++++++++++---------------------- fs/nfs/nfs4state.c | 24 ++++----- fs/nfs/nfs4xdr.c | 10 ++-- include/linux/nfs_fs.h | 4 +- include/linux/nfs_xdr.h | 3 +- 7 files changed, 94 insertions(+), 83 deletions(-) (limited to 'fs') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index d22eb383e1c..1cf39202c3e 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -592,7 +592,7 @@ static void nfs_file_set_open_context(struct file *filp, struct nfs_open_context /* * Given an inode, search for an open context with the desired characteristics */ -struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode) +struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs_open_context *pos, *ctx = NULL; diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index ee5f51ef696..d5f5efaf2b2 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -161,7 +161,7 @@ struct nfs4_state { unsigned int n_rdonly; /* Number of read-only references */ unsigned int n_wronly; /* Number of write-only references */ unsigned int n_rdwr; /* Number of read/write references */ - int state; /* State on the server (R,W, or RW) */ + fmode_t state; /* State on the server (R,W, or RW) */ atomic_t count; }; @@ -223,9 +223,9 @@ extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struc extern void nfs4_put_state_owner(struct nfs4_state_owner *); extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); extern void nfs4_put_open_state(struct nfs4_state *); -extern void nfs4_close_state(struct path *, struct nfs4_state *, mode_t); -extern void nfs4_close_sync(struct path *, struct nfs4_state *, mode_t); -extern void nfs4_state_set_mode_locked(struct nfs4_state *, mode_t); +extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t); +extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t); +extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); extern void nfs4_schedule_state_recovery(struct nfs_client *); extern void nfs4_schedule_state_manager(struct nfs_client *); extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index fc0c9d255cf..e8df52dc2bc 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -323,7 +323,7 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p) } static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path, - struct nfs4_state_owner *sp, int flags, + struct nfs4_state_owner *sp, fmode_t fmode, int flags, const struct iattr *attrs) { struct dentry *parent = dget_parent(path->dentry); @@ -343,7 +343,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path, p->owner = sp; atomic_inc(&sp->so_count); p->o_arg.fh = NFS_FH(dir); - p->o_arg.open_flags = flags, + p->o_arg.open_flags = flags; + p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); p->o_arg.clientid = server->nfs_client->cl_clientid; p->o_arg.id = sp->so_owner_id.id; p->o_arg.name = &p->path.dentry->d_name; @@ -399,10 +400,13 @@ static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task) return ret; } -static int can_open_cached(struct nfs4_state *state, int mode) +static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode) { int ret = 0; - switch (mode & (FMODE_READ|FMODE_WRITE|O_EXCL)) { + + if (open_mode & O_EXCL) + goto out; + switch (mode & (FMODE_READ|FMODE_WRITE)) { case FMODE_READ: ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0; break; @@ -412,12 +416,13 @@ static int can_open_cached(struct nfs4_state *state, int mode) case FMODE_READ|FMODE_WRITE: ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0; } +out: return ret; } -static int can_open_delegated(struct nfs_delegation *delegation, mode_t open_flags) +static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) { - if ((delegation->type & open_flags) != open_flags) + if ((delegation->type & fmode) != fmode) return 0; if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) return 0; @@ -425,9 +430,9 @@ static int can_open_delegated(struct nfs_delegation *delegation, mode_t open_fla return 1; } -static void update_open_stateflags(struct nfs4_state *state, mode_t open_flags) +static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode) { - switch (open_flags) { + switch (fmode) { case FMODE_WRITE: state->n_wronly++; break; @@ -437,15 +442,15 @@ static void update_open_stateflags(struct nfs4_state *state, mode_t open_flags) case FMODE_READ|FMODE_WRITE: state->n_rdwr++; } - nfs4_state_set_mode_locked(state, state->state | open_flags); + nfs4_state_set_mode_locked(state, state->state | fmode); } -static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags) +static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) { if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) memcpy(state->stateid.data, stateid->data, sizeof(state->stateid.data)); memcpy(state->open_stateid.data, stateid->data, sizeof(state->open_stateid.data)); - switch (open_flags) { + switch (fmode) { case FMODE_READ: set_bit(NFS_O_RDONLY_STATE, &state->flags); break; @@ -457,14 +462,14 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid * } } -static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags) +static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) { write_seqlock(&state->seqlock); - nfs_set_open_stateid_locked(state, stateid, open_flags); + nfs_set_open_stateid_locked(state, stateid, fmode); write_sequnlock(&state->seqlock); } -static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, int open_flags) +static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode) { /* * Protect the call to nfs4_state_set_mode_locked and @@ -476,20 +481,20 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s set_bit(NFS_DELEGATED_STATE, &state->flags); } if (open_stateid != NULL) - nfs_set_open_stateid_locked(state, open_stateid, open_flags); + nfs_set_open_stateid_locked(state, open_stateid, fmode); write_sequnlock(&state->seqlock); spin_lock(&state->owner->so_lock); - update_open_stateflags(state, open_flags); + update_open_stateflags(state, fmode); spin_unlock(&state->owner->so_lock); } -static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, int open_flags) +static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode) { struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_delegation *deleg_cur; int ret = 0; - open_flags &= (FMODE_READ|FMODE_WRITE); + fmode &= (FMODE_READ|FMODE_WRITE); rcu_read_lock(); deleg_cur = rcu_dereference(nfsi->delegation); @@ -498,7 +503,7 @@ static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stat spin_lock(&deleg_cur->lock); if (nfsi->delegation != deleg_cur || - (deleg_cur->type & open_flags) != open_flags) + (deleg_cur->type & fmode) != fmode) goto no_delegation_unlock; if (delegation == NULL) @@ -507,7 +512,7 @@ static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stat goto no_delegation_unlock; nfs_mark_delegation_referenced(deleg_cur); - __update_open_stateid(state, open_stateid, &deleg_cur->stateid, open_flags); + __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode); ret = 1; no_delegation_unlock: spin_unlock(&deleg_cur->lock); @@ -515,7 +520,7 @@ no_delegation: rcu_read_unlock(); if (!ret && open_stateid != NULL) { - __update_open_stateid(state, open_stateid, NULL, open_flags); + __update_open_stateid(state, open_stateid, NULL, fmode); ret = 1; } @@ -523,13 +528,13 @@ no_delegation: } -static void nfs4_return_incompatible_delegation(struct inode *inode, mode_t open_flags) +static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) { struct nfs_delegation *delegation; rcu_read_lock(); delegation = rcu_dereference(NFS_I(inode)->delegation); - if (delegation == NULL || (delegation->type & open_flags) == open_flags) { + if (delegation == NULL || (delegation->type & fmode) == fmode) { rcu_read_unlock(); return; } @@ -542,15 +547,16 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) struct nfs4_state *state = opendata->state; struct nfs_inode *nfsi = NFS_I(state->inode); struct nfs_delegation *delegation; - int open_mode = opendata->o_arg.open_flags & (FMODE_READ|FMODE_WRITE|O_EXCL); + int open_mode = opendata->o_arg.open_flags & O_EXCL; + fmode_t fmode = opendata->o_arg.fmode; nfs4_stateid stateid; int ret = -EAGAIN; for (;;) { - if (can_open_cached(state, open_mode)) { + if (can_open_cached(state, fmode, open_mode)) { spin_lock(&state->owner->so_lock); - if (can_open_cached(state, open_mode)) { - update_open_stateflags(state, open_mode); + if (can_open_cached(state, fmode, open_mode)) { + update_open_stateflags(state, fmode); spin_unlock(&state->owner->so_lock); goto out_return_state; } @@ -559,7 +565,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) rcu_read_lock(); delegation = rcu_dereference(nfsi->delegation); if (delegation == NULL || - !can_open_delegated(delegation, open_mode)) { + !can_open_delegated(delegation, fmode)) { rcu_read_unlock(); break; } @@ -572,7 +578,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) ret = -EAGAIN; /* Try to update the stateid using the delegation */ - if (update_open_stateid(state, NULL, &stateid, open_mode)) + if (update_open_stateid(state, NULL, &stateid, fmode)) goto out_return_state; } out: @@ -624,7 +630,7 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data } update_open_stateid(state, &data->o_res.stateid, NULL, - data->o_arg.open_flags); + data->o_arg.fmode); iput(inode); out: return state; @@ -655,7 +661,7 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context { struct nfs4_opendata *opendata; - opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, NULL); + opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL); if (opendata == NULL) return ERR_PTR(-ENOMEM); opendata->state = state; @@ -663,12 +669,13 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context return opendata; } -static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, mode_t openflags, struct nfs4_state **res) +static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res) { struct nfs4_state *newstate; int ret; - opendata->o_arg.open_flags = openflags; + opendata->o_arg.open_flags = 0; + opendata->o_arg.fmode = fmode; memset(&opendata->o_res, 0, sizeof(opendata->o_res)); memset(&opendata->c_res, 0, sizeof(opendata->c_res)); nfs4_init_opendata_res(opendata); @@ -678,7 +685,7 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, mode_t openf newstate = nfs4_opendata_to_nfs4_state(opendata); if (IS_ERR(newstate)) return PTR_ERR(newstate); - nfs4_close_state(&opendata->path, newstate, openflags); + nfs4_close_state(&opendata->path, newstate, fmode); *res = newstate; return 0; } @@ -734,7 +741,7 @@ static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state { struct nfs_delegation *delegation; struct nfs4_opendata *opendata; - int delegation_type = 0; + fmode_t delegation_type = 0; int status; opendata = nfs4_open_recoverdata_alloc(ctx, state); @@ -847,7 +854,7 @@ static void nfs4_open_confirm_release(void *calldata) goto out_free; state = nfs4_opendata_to_nfs4_state(data); if (!IS_ERR(state)) - nfs4_close_state(&data->path, state, data->o_arg.open_flags); + nfs4_close_state(&data->path, state, data->o_arg.fmode); out_free: nfs4_opendata_put(data); } @@ -911,7 +918,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) if (data->state != NULL) { struct nfs_delegation *delegation; - if (can_open_cached(data->state, data->o_arg.open_flags & (FMODE_READ|FMODE_WRITE|O_EXCL))) + if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags)) goto out_no_action; rcu_read_lock(); delegation = rcu_dereference(NFS_I(data->state->inode)->delegation); @@ -980,7 +987,7 @@ static void nfs4_open_release(void *calldata) goto out_free; state = nfs4_opendata_to_nfs4_state(data); if (!IS_ERR(state)) - nfs4_close_state(&data->path, state, data->o_arg.open_flags); + nfs4_close_state(&data->path, state, data->o_arg.fmode); out_free: nfs4_opendata_put(data); } @@ -1135,7 +1142,7 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct /* * Returns a referenced nfs4_state */ -static int _nfs4_do_open(struct inode *dir, struct path *path, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) +static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) { struct nfs4_state_owner *sp; struct nfs4_state *state = NULL; @@ -1153,9 +1160,9 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, int flags, struct if (status != 0) goto err_put_state_owner; if (path->dentry->d_inode != NULL) - nfs4_return_incompatible_delegation(path->dentry->d_inode, flags & (FMODE_READ|FMODE_WRITE)); + nfs4_return_incompatible_delegation(path->dentry->d_inode, fmode); status = -ENOMEM; - opendata = nfs4_opendata_alloc(path, sp, flags, sattr); + opendata = nfs4_opendata_alloc(path, sp, fmode, flags, sattr); if (opendata == NULL) goto err_put_state_owner; @@ -1187,14 +1194,14 @@ out_err: } -static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, int flags, struct iattr *sattr, struct rpc_cred *cred) +static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred) { struct nfs4_exception exception = { }; struct nfs4_state *res; int status; do { - status = _nfs4_do_open(dir, path, flags, sattr, cred, &res); + status = _nfs4_do_open(dir, path, fmode, flags, sattr, cred, &res); if (status == 0) break; /* NOTE: BAD_SEQID means the server and client disagree about the @@ -1332,7 +1339,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data) case -NFS4ERR_OLD_STATEID: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_EXPIRED: - if (calldata->arg.open_flags == 0) + if (calldata->arg.fmode == 0) break; default: if (nfs4_async_handle_error(task, server, state) == -EAGAIN) { @@ -1374,10 +1381,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) nfs_fattr_init(calldata->res.fattr); if (test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0) { task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; - calldata->arg.open_flags = FMODE_READ; + calldata->arg.fmode = FMODE_READ; } else if (test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0) { task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; - calldata->arg.open_flags = FMODE_WRITE; + calldata->arg.fmode = FMODE_WRITE; } calldata->timestamp = jiffies; rpc_call_start(task); @@ -1430,7 +1437,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait) calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid); if (calldata->arg.seqid == NULL) goto out_free_calldata; - calldata->arg.open_flags = 0; + calldata->arg.fmode = 0; calldata->arg.bitmask = server->attr_bitmask; calldata->res.fattr = &calldata->fattr; calldata->res.seqid = calldata->arg.seqid; @@ -1457,13 +1464,13 @@ out: return status; } -static int nfs4_intent_set_file(struct nameidata *nd, struct path *path, struct nfs4_state *state) +static int nfs4_intent_set_file(struct nameidata *nd, struct path *path, struct nfs4_state *state, fmode_t fmode) { struct file *filp; int ret; /* If the open_intent is for execute, we have an extra check to make */ - if (nd->intent.open.flags & FMODE_EXEC) { + if (fmode & FMODE_EXEC) { ret = nfs_may_open(state->inode, state->owner->so_cred, nd->intent.open.flags); @@ -1479,7 +1486,7 @@ static int nfs4_intent_set_file(struct nameidata *nd, struct path *path, struct } ret = PTR_ERR(filp); out_close: - nfs4_close_sync(path, state, nd->intent.open.flags); + nfs4_close_sync(path, state, fmode & (FMODE_READ|FMODE_WRITE)); return ret; } @@ -1495,6 +1502,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) struct rpc_cred *cred; struct nfs4_state *state; struct dentry *res; + fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); if (nd->flags & LOOKUP_CREATE) { attr.ia_mode = nd->intent.open.create_mode; @@ -1512,7 +1520,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) parent = dentry->d_parent; /* Protect against concurrent sillydeletes */ nfs_block_sillyrename(parent); - state = nfs4_do_open(dir, &path, nd->intent.open.flags, &attr, cred); + state = nfs4_do_open(dir, &path, fmode, nd->intent.open.flags, &attr, cred); put_rpccred(cred); if (IS_ERR(state)) { if (PTR_ERR(state) == -ENOENT) { @@ -1527,7 +1535,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) path.dentry = res; nfs_set_verifier(path.dentry, nfs_save_change_attribute(dir)); nfs_unblock_sillyrename(parent); - nfs4_intent_set_file(nd, &path, state); + nfs4_intent_set_file(nd, &path, state, fmode); return res; } @@ -1540,11 +1548,12 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, st }; struct rpc_cred *cred; struct nfs4_state *state; + fmode_t fmode = openflags & (FMODE_READ | FMODE_WRITE); cred = rpc_lookup_cred(); if (IS_ERR(cred)) return PTR_ERR(cred); - state = nfs4_do_open(dir, &path, openflags, NULL, cred); + state = nfs4_do_open(dir, &path, fmode, openflags, NULL, cred); put_rpccred(cred); if (IS_ERR(state)) { switch (PTR_ERR(state)) { @@ -1561,10 +1570,10 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, st } if (state->inode == dentry->d_inode) { nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); - nfs4_intent_set_file(nd, &path, state); + nfs4_intent_set_file(nd, &path, state, fmode); return 1; } - nfs4_close_sync(&path, state, openflags); + nfs4_close_sync(&path, state, fmode); out_drop: d_drop(dentry); return 0; @@ -1990,6 +1999,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, }; struct nfs4_state *state; struct rpc_cred *cred; + fmode_t fmode = flags & (FMODE_READ | FMODE_WRITE); int status = 0; cred = rpc_lookup_cred(); @@ -1997,7 +2007,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, status = PTR_ERR(cred); goto out; } - state = nfs4_do_open(dir, &path, flags, sattr, cred); + state = nfs4_do_open(dir, &path, fmode, flags, sattr, cred); d_drop(dentry); if (IS_ERR(state)) { status = PTR_ERR(state); @@ -2013,9 +2023,9 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, nfs_post_op_update_inode(state->inode, &fattr); } if (status == 0 && (nd->flags & LOOKUP_OPEN) != 0) - status = nfs4_intent_set_file(nd, &path, state); + status = nfs4_intent_set_file(nd, &path, state, fmode); else - nfs4_close_sync(&path, state, flags); + nfs4_close_sync(&path, state, fmode); out_putcred: put_rpccred(cred); out: diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index cd16b301f13..2022fe47966 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -363,18 +363,18 @@ nfs4_alloc_open_state(void) } void -nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode) +nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode) { - if (state->state == mode) + if (state->state == fmode) return; /* NB! List reordering - see the reclaim code for why. */ - if ((mode & FMODE_WRITE) != (state->state & FMODE_WRITE)) { - if (mode & FMODE_WRITE) + if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) { + if (fmode & FMODE_WRITE) list_move(&state->open_states, &state->owner->so_states); else list_move_tail(&state->open_states, &state->owner->so_states); } - state->state = mode; + state->state = fmode; } static struct nfs4_state * @@ -454,16 +454,16 @@ void nfs4_put_open_state(struct nfs4_state *state) /* * Close the current file. */ -static void __nfs4_close(struct path *path, struct nfs4_state *state, mode_t mode, int wait) +static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fmode, int wait) { struct nfs4_state_owner *owner = state->owner; int call_close = 0; - int newstate; + fmode_t newstate; atomic_inc(&owner->so_count); /* Protect against nfs4_find_state() */ spin_lock(&owner->so_lock); - switch (mode & (FMODE_READ | FMODE_WRITE)) { + switch (fmode & (FMODE_READ | FMODE_WRITE)) { case FMODE_READ: state->n_rdonly--; break; @@ -498,14 +498,14 @@ static void __nfs4_close(struct path *path, struct nfs4_state *state, mode_t mod nfs4_do_close(path, state, wait); } -void nfs4_close_state(struct path *path, struct nfs4_state *state, mode_t mode) +void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode) { - __nfs4_close(path, state, mode, 0); + __nfs4_close(path, state, fmode, 0); } -void nfs4_close_sync(struct path *path, struct nfs4_state *state, mode_t mode) +void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode) { - __nfs4_close(path, state, mode, 1); + __nfs4_close(path, state, fmode, 1); } /* diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 879911c9903..d8ddfc5467d 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -953,12 +953,12 @@ static int encode_lookup(struct xdr_stream *xdr, const struct qstr *name) return 0; } -static void encode_share_access(struct xdr_stream *xdr, int open_flags) +static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode) { __be32 *p; RESERVE_SPACE(8); - switch (open_flags & (FMODE_READ|FMODE_WRITE)) { + switch (fmode & (FMODE_READ|FMODE_WRITE)) { case FMODE_READ: WRITE32(NFS4_SHARE_ACCESS_READ); break; @@ -969,7 +969,7 @@ static void encode_share_access(struct xdr_stream *xdr, int open_flags) WRITE32(NFS4_SHARE_ACCESS_BOTH); break; default: - BUG(); + WRITE32(0); } WRITE32(0); /* for linux, share_deny = 0 always */ } @@ -984,7 +984,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena RESERVE_SPACE(8); WRITE32(OP_OPEN); WRITE32(arg->seqid->sequence->counter); - encode_share_access(xdr, arg->open_flags); + encode_share_access(xdr, arg->fmode); RESERVE_SPACE(28); WRITE64(arg->clientid); WRITE32(16); @@ -1112,7 +1112,7 @@ static int encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closea WRITE32(OP_OPEN_DOWNGRADE); WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE); WRITE32(arg->seqid->sequence->counter); - encode_share_access(xdr, arg->open_flags); + encode_share_access(xdr, arg->fmode); return 0; } diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 8d71d7b7c78..b8d9c6dd4f6 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -83,7 +83,7 @@ struct nfs_open_context { struct rpc_cred *cred; struct nfs4_state *state; fl_owner_t lockowner; - int mode; + fmode_t mode; unsigned long flags; #define NFS_CONTEXT_ERROR_WRITE (0) @@ -342,7 +342,7 @@ extern int nfs_setattr(struct dentry *, struct iattr *); extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); -extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode); +extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); extern u64 nfs_compat_user_ino64(u64 fileid); extern void nfs_fattr_init(struct nfs_fattr *fattr); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 32c1a0ecdbf..a550b528319 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -120,6 +120,7 @@ struct nfs_openargs { const struct nfs_fh * fh; struct nfs_seqid * seqid; int open_flags; + fmode_t fmode; __u64 clientid; __u64 id; union { @@ -171,7 +172,7 @@ struct nfs_closeargs { struct nfs_fh * fh; nfs4_stateid * stateid; struct nfs_seqid * seqid; - int open_flags; + fmode_t fmode; const u32 * bitmask; }; -- cgit v1.2.3-70-g09d2 From 64672d55d93c26fb4035fd1a84a803cbc09cb058 Mon Sep 17 00:00:00 2001 From: Peter Staubach Date: Tue, 23 Dec 2008 15:21:56 -0500 Subject: optimize attribute timeouts for "noac" and "actimeo=0" Hi. I've been looking at a bugzilla which describes a problem where a customer was advised to use either the "noac" or "actimeo=0" mount options to solve a consistency problem that they were seeing in the file attributes. It turned out that this solution did not work reliably for them because sometimes, the local attribute cache was believed to be valid and not timed out. (With an attribute cache timeout of 0, the cache should always appear to be timed out.) In looking at this situation, it appears to me that the problem is that the attribute cache timeout code has an off-by-one error in it. It is assuming that the cache is valid in the region, [read_cache_jiffies, read_cache_jiffies + attrtimeo]. The cache should be considered valid only in the region, [read_cache_jiffies, read_cache_jiffies + attrtimeo). With this change, the options, "noac" and "actimeo=0", work as originally expected. This problem was previously addressed by special casing the attrtimeo == 0 case. However, since the problem is only an off- by-one error, the cleaner solution is address the off-by-one error and thus, not require the special case. Thanx... ps Signed-off-by: Peter Staubach Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 2 +- fs/nfs/inode.c | 11 ++--------- include/linux/jiffies.h | 10 ++++++++++ include/linux/nfs_fs.h | 5 ++++- net/sunrpc/auth.c | 2 +- 5 files changed, 18 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index ed7024c3488..e35c8199f82 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1798,7 +1798,7 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str if (cache == NULL) goto out; if (!nfs_have_delegation(inode, FMODE_READ) && - !time_in_range(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo)) + !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo)) goto out_stale; res->jiffies = cache->jiffies; res->cred = cache->cred; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 1cf39202c3e..0c381686171 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -712,14 +712,7 @@ int nfs_attribute_timeout(struct inode *inode) if (nfs_have_delegation(inode, FMODE_READ)) return 0; - /* - * Special case: if the attribute timeout is set to 0, then always - * treat the cache as having expired (unless holding - * a delegation). - */ - if (nfsi->attrtimeo == 0) - return 1; - return !time_in_range(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo); + return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo); } /** @@ -1182,7 +1175,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) nfsi->attrtimeo_timestamp = now; nfsi->attr_gencount = nfs_inc_attr_generation_counter(); } else { - if (!time_in_range(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) { + if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) { if ((nfsi->attrtimeo <<= 1) > NFS_MAXATTRTIMEO(inode)) nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode); nfsi->attrtimeo_timestamp = now; diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index abb6ac639e8..1a9cf78bfce 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -115,10 +115,20 @@ static inline u64 get_jiffies_64(void) ((long)(a) - (long)(b) >= 0)) #define time_before_eq(a,b) time_after_eq(b,a) +/* + * Calculate whether a is in the range of [b, c]. + */ #define time_in_range(a,b,c) \ (time_after_eq(a,b) && \ time_before_eq(a,c)) +/* + * Calculate whether a is in the range of [b, c). + */ +#define time_in_range_open(a,b,c) \ + (time_after_eq(a,b) && \ + time_before(a,c)) + /* Same as above, but does so with platform independent 64bit types. * These must be used when utilizing jiffies_64 (i.e. return value of * get_jiffies_64() */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index b8d9c6dd4f6..db867b04ac3 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -130,7 +130,10 @@ struct nfs_inode { * * We need to revalidate the cached attrs for this inode if * - * jiffies - read_cache_jiffies > attrtimeo + * jiffies - read_cache_jiffies >= attrtimeo + * + * Please note the comparison is greater than or equal + * so that zero timeout values can be specified. */ unsigned long read_cache_jiffies; unsigned long attrtimeo; diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 6e28744b170..050e4e84d9e 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -234,7 +234,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan) list_for_each_entry_safe(cred, next, &cred_unused, cr_lru) { /* Enforce a 60 second garbage collection moratorium */ - if (time_in_range(cred->cr_expire, expired, jiffies) && + if (time_in_range_open(cred->cr_expire, expired, jiffies) && test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) continue; -- cgit v1.2.3-70-g09d2 From 027b6ca02192f381a5a91237ba8a8cf625dc6f6a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 23 Dec 2008 16:04:13 -0500 Subject: NFSv4: Fix an infinite loop in the NFS state recovery code Marten Gajda states: I tracked the problem down to the function nfs4_do_open_expired. Within this function _nfs4_open_expired is called and may return -NFS4ERR_DELAY. When a further call to _nfs4_open_expired is executed and does not return -NFS4ERR_DELAY the "exception.retry" variable is not reset to 0, causing the loop to iterate again (and as long as err != -NFS4ERR_DELAY, probably forever) Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e8df52dc2bc..49eebe25bba 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1104,8 +1104,9 @@ static inline int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4 do { err = _nfs4_open_expired(ctx, state); - if (err == -NFS4ERR_DELAY) - nfs4_handle_exception(server, err, &exception); + if (err != -NFS4ERR_DELAY) + break; + nfs4_handle_exception(server, err, &exception); } while (exception.retry); return err; } -- cgit v1.2.3-70-g09d2 From aadf61521199e5c0b2976002213819cafa41b897 Mon Sep 17 00:00:00 2001 From: Benny Halevy Date: Tue, 23 Dec 2008 16:06:13 -0500 Subject: nfs: return compound hdr.status when there are no op replies When there are no op replies encoded in the compound reply hdr.status still contains the overall status of the compound rpc. This can happen, e.g., when the server returns a NFS4ERR_MINOR_VERS_MISMATCH error. Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index d8ddfc5467d..3f18a266a49 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -2222,6 +2222,8 @@ static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr) hdr->tag = (char *)p; p += XDR_QUADLEN(hdr->taglen); READ32(hdr->nops); + if (unlikely(hdr->nops < 1)) + return nfs4_stat_to_errno(hdr->status); return 0; } -- cgit v1.2.3-70-g09d2 From 374130770efc80418b155b2966ff958495e03948 Mon Sep 17 00:00:00 2001 From: Benny Halevy Date: Tue, 23 Dec 2008 16:06:14 -0500 Subject: nfs: remove incorrect usage of nfs4 compound response hdr.status 3 call sites look at hdr.status before returning success. hdr.status must be zero in this case so there's no point in this. Currently, hdr.status is correctly processed at decode_op_hdr time if the op status cannot be decoded. Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 3f18a266a49..7eeed0ed589 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4423,8 +4423,6 @@ static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs_fsinf status = decode_putfh(&xdr); if (!status) status = decode_fsinfo(&xdr, fsinfo); - if (!status) - status = nfs4_stat_to_errno(hdr.status); return status; } @@ -4513,8 +4511,6 @@ static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p, status = decode_compound_hdr(&xdr, &hdr); if (!status) status = decode_setclientid(&xdr, clp); - if (!status) - status = nfs4_stat_to_errno(hdr.status); return status; } @@ -4535,8 +4531,6 @@ static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, str status = decode_putrootfh(&xdr); if (!status) status = decode_fsinfo(&xdr, fsinfo); - if (!status) - status = nfs4_stat_to_errno(hdr.status); return status; } -- cgit v1.2.3-70-g09d2 From 6c0195a4681c08335a33a483be801aaf0ed7f192 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Tue, 23 Dec 2008 16:06:15 -0500 Subject: NFS: remove white space from nfs4xdr.c Clean-up Signed-off-by: Andy Adamson Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 62 ++++++++++++++++++++++++++------------------------------ 1 file changed, 29 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 7eeed0ed589..7dde309ce1a 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -8,7 +8,7 @@ * * Kendrick Smith * Andy Adamson - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: @@ -67,7 +67,7 @@ static int nfs4_stat_to_errno(int); #define NFS4_MAXTAGLEN 0 #endif -/* lock,open owner id: +/* lock,open owner id: * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) */ #define open_owner_id_maxsz (1 + 4) @@ -709,7 +709,7 @@ static int encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const s bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET; WRITE32(NFS4_SET_TO_SERVER_TIME); } - + /* * Now we backfill the bitmap and the attribute buffer length. */ @@ -735,7 +735,7 @@ static int encode_access(struct xdr_stream *xdr, u32 access) RESERVE_SPACE(8); WRITE32(OP_ACCESS); WRITE32(access); - + return 0; } @@ -747,14 +747,14 @@ static int encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg) WRITE32(OP_CLOSE); WRITE32(arg->seqid->sequence->counter); WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE); - + return 0; } static int encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args) { __be32 *p; - + RESERVE_SPACE(16); WRITE32(OP_COMMIT); WRITE64(args->offset); @@ -766,7 +766,7 @@ static int encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *arg static int encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create) { __be32 *p; - + RESERVE_SPACE(8); WRITE32(OP_CREATE); WRITE32(create->ftype); @@ -856,7 +856,7 @@ static int encode_link(struct xdr_stream *xdr, const struct qstr *name) WRITE32(OP_LINK); WRITE32(name->len); WRITEMEM(name->name, name->len); - + return 0; } @@ -1133,7 +1133,7 @@ encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh) static int encode_putrootfh(struct xdr_stream *xdr) { __be32 *p; - + RESERVE_SPACE(4); WRITE32(OP_PUTROOTFH); @@ -1232,7 +1232,7 @@ static int encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, con WRITE32(OP_RENAME); WRITE32(oldname->len); WRITEMEM(oldname->name, oldname->len); - + RESERVE_SPACE(4 + newname->len); WRITE32(newname->len); WRITEMEM(newname->name, newname->len); @@ -1296,7 +1296,7 @@ static int encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs * { int status; __be32 *p; - + RESERVE_SPACE(4+NFS4_STATEID_SIZE); WRITE32(OP_SETATTR); WRITEMEM(arg->stateid.data, NFS4_STATEID_SIZE); @@ -2217,7 +2217,7 @@ static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr) READ_BUF(8); READ32(hdr->status); READ32(hdr->taglen); - + READ_BUF(hdr->taglen + 4); hdr->tag = (char *)p; p += XDR_QUADLEN(hdr->taglen); @@ -3049,8 +3049,7 @@ static int decode_create(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_res *res) { __be32 *savep; - uint32_t attrlen, - bitmap[2] = {0}; + uint32_t attrlen, bitmap[2] = {0}; int status; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) @@ -3072,14 +3071,13 @@ xdr_error: dprintk("%s: xdr returned %d!\n", __func__, -status); return status; } - + static int decode_statfs(struct xdr_stream *xdr, struct nfs_fsstat *fsstat) { __be32 *savep; - uint32_t attrlen, - bitmap[2] = {0}; + uint32_t attrlen, bitmap[2] = {0}; int status; - + if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto xdr_error; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) @@ -3109,10 +3107,9 @@ xdr_error: static int decode_pathconf(struct xdr_stream *xdr, struct nfs_pathconf *pathconf) { __be32 *savep; - uint32_t attrlen, - bitmap[2] = {0}; + uint32_t attrlen, bitmap[2] = {0}; int status; - + if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto xdr_error; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) @@ -3258,7 +3255,7 @@ static int decode_getfh(struct xdr_stream *xdr, struct nfs_fh *fh) static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo) { int status; - + status = decode_op_hdr(xdr, OP_LINK); if (status) return status; @@ -3564,7 +3561,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n dprintk("NFS: readdir reply truncated!\n"); entry[1] = 1; } -out: +out: kunmap_atomic(kaddr, KM_USER0); return 0; short_pkt: @@ -3720,7 +3717,6 @@ static int decode_setattr(struct xdr_stream *xdr, struct nfs_setattrres *res) uint32_t bmlen; int status; - status = decode_op_hdr(xdr, OP_SETATTR); if (status) return status; @@ -3740,7 +3736,7 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp) READ32(opnum); if (opnum != OP_SETCLIENTID) { dprintk("nfs: decode_setclientid: Server returned operation" - " %d\n", opnum); + " %d\n", opnum); return -EIO; } READ32(nfserr); @@ -3829,7 +3825,7 @@ static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_ac struct xdr_stream xdr; struct compound_hdr hdr; int status; - + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); if ((status = decode_compound_hdr(&xdr, &hdr)) != 0) goto out; @@ -3852,7 +3848,7 @@ static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lo struct xdr_stream xdr; struct compound_hdr hdr; int status; - + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); if ((status = decode_compound_hdr(&xdr, &hdr)) != 0) goto out; @@ -3875,7 +3871,7 @@ static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, __be32 *p, struct nf struct xdr_stream xdr; struct compound_hdr hdr; int status; - + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); if ((status = decode_compound_hdr(&xdr, &hdr)) != 0) goto out; @@ -3895,7 +3891,7 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_rem struct xdr_stream xdr; struct compound_hdr hdr; int status; - + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); if ((status = decode_compound_hdr(&xdr, &hdr)) != 0) goto out; @@ -3916,7 +3912,7 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_re struct xdr_stream xdr; struct compound_hdr hdr; int status; - + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); if ((status = decode_compound_hdr(&xdr, &hdr)) != 0) goto out; @@ -3946,7 +3942,7 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_link struct xdr_stream xdr; struct compound_hdr hdr; int status; - + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); if ((status = decode_compound_hdr(&xdr, &hdr)) != 0) goto out; @@ -3979,7 +3975,7 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_cr struct xdr_stream xdr; struct compound_hdr hdr; int status; - + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); if ((status = decode_compound_hdr(&xdr, &hdr)) != 0) goto out; @@ -4016,7 +4012,7 @@ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_g struct xdr_stream xdr; struct compound_hdr hdr; int status; - + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); status = decode_compound_hdr(&xdr, &hdr); if (status) -- cgit v1.2.3-70-g09d2 From 05d564fe00c05bf8ff93948057ca1acb5bc68e10 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Tue, 23 Dec 2008 16:06:15 -0500 Subject: NFS: fix tabs in nfs4xdr.c Signed-off-by: Andy Adamson Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 490 +++++++++++++++++++++++++++---------------------------- 1 file changed, 245 insertions(+), 245 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 7dde309ce1a..29656c5090c 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -755,12 +755,12 @@ static int encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *arg { __be32 *p; - RESERVE_SPACE(16); - WRITE32(OP_COMMIT); - WRITE64(args->offset); - WRITE32(args->count); + RESERVE_SPACE(16); + WRITE32(OP_COMMIT); + WRITE64(args->offset); + WRITE32(args->count); - return 0; + return 0; } static int encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create) @@ -797,25 +797,25 @@ static int encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *c static int encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap) { - __be32 *p; + __be32 *p; - RESERVE_SPACE(12); - WRITE32(OP_GETATTR); - WRITE32(1); - WRITE32(bitmap); - return 0; + RESERVE_SPACE(12); + WRITE32(OP_GETATTR); + WRITE32(1); + WRITE32(bitmap); + return 0; } static int encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1) { - __be32 *p; + __be32 *p; - RESERVE_SPACE(16); - WRITE32(OP_GETATTR); - WRITE32(2); - WRITE32(bm0); - WRITE32(bm1); - return 0; + RESERVE_SPACE(16); + WRITE32(OP_GETATTR); + WRITE32(2); + WRITE32(bm0); + WRITE32(bm1); + return 0; } static int encode_getfattr(struct xdr_stream *xdr, const u32* bitmask) @@ -959,17 +959,17 @@ static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode) RESERVE_SPACE(8); switch (fmode & (FMODE_READ|FMODE_WRITE)) { - case FMODE_READ: - WRITE32(NFS4_SHARE_ACCESS_READ); - break; - case FMODE_WRITE: - WRITE32(NFS4_SHARE_ACCESS_WRITE); - break; - case FMODE_READ|FMODE_WRITE: - WRITE32(NFS4_SHARE_ACCESS_BOTH); - break; - default: - WRITE32(0); + case FMODE_READ: + WRITE32(NFS4_SHARE_ACCESS_READ); + break; + case FMODE_WRITE: + WRITE32(NFS4_SHARE_ACCESS_WRITE); + break; + case FMODE_READ|FMODE_WRITE: + WRITE32(NFS4_SHARE_ACCESS_BOTH); + break; + default: + WRITE32(0); } WRITE32(0); /* for linux, share_deny = 0 always */ } @@ -998,13 +998,13 @@ static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_op RESERVE_SPACE(4); switch(arg->open_flags & O_EXCL) { - case 0: - WRITE32(NFS4_CREATE_UNCHECKED); - encode_attrs(xdr, arg->u.attrs, arg->server); - break; - default: - WRITE32(NFS4_CREATE_EXCLUSIVE); - encode_nfs4_verifier(xdr, &arg->u.verifier); + case 0: + WRITE32(NFS4_CREATE_UNCHECKED); + encode_attrs(xdr, arg->u.attrs, arg->server); + break; + default: + WRITE32(NFS4_CREATE_EXCLUSIVE); + encode_nfs4_verifier(xdr, &arg->u.verifier); } } @@ -1014,13 +1014,13 @@ static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *a RESERVE_SPACE(4); switch (arg->open_flags & O_CREAT) { - case 0: - WRITE32(NFS4_OPEN_NOCREATE); - break; - default: - BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL); - WRITE32(NFS4_OPEN_CREATE); - encode_createmode(xdr, arg); + case 0: + WRITE32(NFS4_OPEN_NOCREATE); + break; + default: + BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL); + WRITE32(NFS4_OPEN_CREATE); + encode_createmode(xdr, arg); } } @@ -1030,17 +1030,17 @@ static inline void encode_delegation_type(struct xdr_stream *xdr, fmode_t delega RESERVE_SPACE(4); switch (delegation_type) { - case 0: - WRITE32(NFS4_OPEN_DELEGATE_NONE); - break; - case FMODE_READ: - WRITE32(NFS4_OPEN_DELEGATE_READ); - break; - case FMODE_WRITE|FMODE_READ: - WRITE32(NFS4_OPEN_DELEGATE_WRITE); - break; - default: - BUG(); + case 0: + WRITE32(NFS4_OPEN_DELEGATE_NONE); + break; + case FMODE_READ: + WRITE32(NFS4_OPEN_DELEGATE_READ); + break; + case FMODE_WRITE|FMODE_READ: + WRITE32(NFS4_OPEN_DELEGATE_WRITE); + break; + default: + BUG(); } } @@ -1077,17 +1077,17 @@ static int encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg) encode_openhdr(xdr, arg); encode_opentype(xdr, arg); switch (arg->claim) { - case NFS4_OPEN_CLAIM_NULL: - encode_claim_null(xdr, arg->name); - break; - case NFS4_OPEN_CLAIM_PREVIOUS: - encode_claim_previous(xdr, arg->u.delegation_type); - break; - case NFS4_OPEN_CLAIM_DELEGATE_CUR: - encode_claim_delegate_cur(xdr, arg->name, &arg->u.delegation); - break; - default: - BUG(); + case NFS4_OPEN_CLAIM_NULL: + encode_claim_null(xdr, arg->name); + break; + case NFS4_OPEN_CLAIM_PREVIOUS: + encode_claim_previous(xdr, arg->u.delegation_type); + break; + case NFS4_OPEN_CLAIM_DELEGATE_CUR: + encode_claim_delegate_cur(xdr, arg->name, &arg->u.delegation); + break; + default: + BUG(); } return 0; } @@ -1132,12 +1132,12 @@ encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh) static int encode_putrootfh(struct xdr_stream *xdr) { - __be32 *p; + __be32 *p; - RESERVE_SPACE(4); - WRITE32(OP_PUTROOTFH); + RESERVE_SPACE(4); + WRITE32(OP_PUTROOTFH); - return 0; + return 0; } static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx) @@ -1297,14 +1297,14 @@ static int encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs * int status; __be32 *p; - RESERVE_SPACE(4+NFS4_STATEID_SIZE); - WRITE32(OP_SETATTR); + RESERVE_SPACE(4+NFS4_STATEID_SIZE); + WRITE32(OP_SETATTR); WRITEMEM(arg->stateid.data, NFS4_STATEID_SIZE); - if ((status = encode_attrs(xdr, arg->iap, server))) + if ((status = encode_attrs(xdr, arg->iap, server))) return status; - return 0; + return 0; } static int encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid) @@ -1328,14 +1328,14 @@ static int encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclien static int encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_client *client_state) { - __be32 *p; + __be32 *p; - RESERVE_SPACE(12 + NFS4_VERIFIER_SIZE); - WRITE32(OP_SETCLIENTID_CONFIRM); - WRITE64(client_state->cl_clientid); - WRITEMEM(client_state->cl_confirm.data, NFS4_VERIFIER_SIZE); + RESERVE_SPACE(12 + NFS4_VERIFIER_SIZE); + WRITE32(OP_SETCLIENTID_CONFIRM); + WRITE64(client_state->cl_clientid); + WRITEMEM(client_state->cl_confirm.data, NFS4_VERIFIER_SIZE); - return 0; + return 0; } static int encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args) @@ -1584,23 +1584,23 @@ static int nfs4_xdr_enc_getattr(struct rpc_rqst *req, __be32 *p, const struct nf */ static int nfs4_xdr_enc_close(struct rpc_rqst *req, __be32 *p, struct nfs_closeargs *args) { - struct xdr_stream xdr; - struct compound_hdr hdr = { - .nops = 3, - }; - int status; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); - if(status) - goto out; - status = encode_close(&xdr, args); + struct xdr_stream xdr; + struct compound_hdr hdr = { + .nops = 3, + }; + int status; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_compound_hdr(&xdr, &hdr); + status = encode_putfh(&xdr, args->fh); + if(status) + goto out; + status = encode_close(&xdr, args); if (status != 0) goto out; status = encode_getfattr(&xdr, args->bitmask); out: - return status; + return status; } /* @@ -1875,25 +1875,24 @@ out: * Encode an SETATTR request */ static int nfs4_xdr_enc_setattr(struct rpc_rqst *req, __be32 *p, struct nfs_setattrargs *args) - { - struct xdr_stream xdr; - struct compound_hdr hdr = { - .nops = 3, - }; - int status; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); - if(status) - goto out; - status = encode_setattr(&xdr, args, args->server); - if(status) - goto out; + struct xdr_stream xdr; + struct compound_hdr hdr = { + .nops = 3, + }; + int status; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_compound_hdr(&xdr, &hdr); + status = encode_putfh(&xdr, args->fh); + if(status) + goto out; + status = encode_setattr(&xdr, args, args->server); + if(status) + goto out; status = encode_getfattr(&xdr, args->bitmask); out: - return status; + return status; } /* @@ -3343,27 +3342,27 @@ static int decode_lookup(struct xdr_stream *xdr) /* This is too sick! */ static int decode_space_limit(struct xdr_stream *xdr, u64 *maxsize) { - __be32 *p; + __be32 *p; uint32_t limit_type, nblocks, blocksize; READ_BUF(12); READ32(limit_type); switch (limit_type) { - case 1: - READ64(*maxsize); - break; - case 2: - READ32(nblocks); - READ32(blocksize); - *maxsize = (uint64_t)nblocks * (uint64_t)blocksize; + case 1: + READ64(*maxsize); + break; + case 2: + READ32(nblocks); + READ32(blocksize); + *maxsize = (uint64_t)nblocks * (uint64_t)blocksize; } return 0; } static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res) { - __be32 *p; - uint32_t delegation_type; + __be32 *p; + uint32_t delegation_type; READ_BUF(4); READ32(delegation_type); @@ -3374,13 +3373,14 @@ static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res) READ_BUF(NFS4_STATEID_SIZE+4); COPYMEM(res->delegation.data, NFS4_STATEID_SIZE); READ32(res->do_recall); + switch (delegation_type) { - case NFS4_OPEN_DELEGATE_READ: - res->delegation_type = FMODE_READ; - break; - case NFS4_OPEN_DELEGATE_WRITE: - res->delegation_type = FMODE_WRITE|FMODE_READ; - if (decode_space_limit(xdr, &res->maxsize) < 0) + case NFS4_OPEN_DELEGATE_READ: + res->delegation_type = FMODE_READ; + break; + case NFS4_OPEN_DELEGATE_WRITE: + res->delegation_type = FMODE_WRITE|FMODE_READ; + if (decode_space_limit(xdr, &res->maxsize) < 0) return -EIO; } return decode_ace(xdr, NULL, res->server->nfs_client); @@ -3388,27 +3388,27 @@ static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res) static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res) { - __be32 *p; + __be32 *p; uint32_t savewords, bmlen, i; - int status; + int status; - status = decode_op_hdr(xdr, OP_OPEN); + status = decode_op_hdr(xdr, OP_OPEN); if (status != -EIO) nfs_increment_open_seqid(status, res->seqid); - if (status) - return status; - READ_BUF(NFS4_STATEID_SIZE); - COPYMEM(res->stateid.data, NFS4_STATEID_SIZE); + if (status) + return status; + READ_BUF(NFS4_STATEID_SIZE); + COPYMEM(res->stateid.data, NFS4_STATEID_SIZE); - decode_change_info(xdr, &res->cinfo); + decode_change_info(xdr, &res->cinfo); - READ_BUF(8); - READ32(res->rflags); - READ32(bmlen); - if (bmlen > 10) - goto xdr_error; + READ_BUF(8); + READ32(res->rflags); + READ32(bmlen); + if (bmlen > 10) + goto xdr_error; - READ_BUF(bmlen << 2); + READ_BUF(bmlen << 2); savewords = min_t(uint32_t, bmlen, NFS4_BITMAP_SIZE); for (i = 0; i < savewords; ++i) READ32(res->attrset[i]); @@ -3423,17 +3423,17 @@ xdr_error: static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmres *res) { - __be32 *p; + __be32 *p; int status; - status = decode_op_hdr(xdr, OP_OPEN_CONFIRM); + status = decode_op_hdr(xdr, OP_OPEN_CONFIRM); if (status != -EIO) nfs_increment_open_seqid(status, res->seqid); - if (status) - return status; - READ_BUF(NFS4_STATEID_SIZE); - COPYMEM(res->stateid.data, NFS4_STATEID_SIZE); - return 0; + if (status) + return status; + READ_BUF(NFS4_STATEID_SIZE); + COPYMEM(res->stateid.data, NFS4_STATEID_SIZE); + return 0; } static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *res) @@ -3794,23 +3794,23 @@ static int decode_delegreturn(struct xdr_stream *xdr) */ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, __be32 *p, struct nfs_closeres *res) { - struct xdr_stream xdr; - struct compound_hdr hdr; - int status; - - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); - if (status) - goto out; - status = decode_putfh(&xdr); - if (status) - goto out; - status = decode_open_downgrade(&xdr, res); + struct xdr_stream xdr; + struct compound_hdr hdr; + int status; + + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); + status = decode_compound_hdr(&xdr, &hdr); + if (status) + goto out; + status = decode_putfh(&xdr); + if (status) + goto out; + status = decode_open_downgrade(&xdr, res); if (status != 0) goto out; decode_getfattr(&xdr, res->fattr, res->server); out: - return status; + return status; } /* @@ -4023,7 +4023,6 @@ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_g status = decode_getfattr(&xdr, res->fattr, res->server); out: return status; - } /* @@ -4032,21 +4031,22 @@ out: static int nfs4_xdr_enc_setacl(struct rpc_rqst *req, __be32 *p, struct nfs_setaclargs *args) { - struct xdr_stream xdr; - struct compound_hdr hdr = { - .nops = 2, - }; - int status; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); - if (status) - goto out; - status = encode_setacl(&xdr, args); + struct xdr_stream xdr; + struct compound_hdr hdr = { + .nops = 2, + }; + int status; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_compound_hdr(&xdr, &hdr); + status = encode_putfh(&xdr, args->fh); + if (status) + goto out; + status = encode_setacl(&xdr, args); out: - return status; + return status; } + /* * Decode SETACL response */ @@ -4097,18 +4097,18 @@ out: */ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_closeres *res) { - struct xdr_stream xdr; - struct compound_hdr hdr; - int status; - - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); - if (status) - goto out; - status = decode_putfh(&xdr); - if (status) - goto out; - status = decode_close(&xdr, res); + struct xdr_stream xdr; + struct compound_hdr hdr; + int status; + + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); + status = decode_compound_hdr(&xdr, &hdr); + if (status) + goto out; + status = decode_putfh(&xdr); + if (status) + goto out; + status = decode_close(&xdr, res); if (status != 0) goto out; /* @@ -4119,7 +4119,7 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_clos */ decode_getfattr(&xdr, res->fattr, res->server); out: - return status; + return status; } /* @@ -4127,23 +4127,23 @@ out: */ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openres *res) { - struct xdr_stream xdr; - struct compound_hdr hdr; - int status; - - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); - if (status) - goto out; - status = decode_putfh(&xdr); - if (status) - goto out; - status = decode_savefh(&xdr); + struct xdr_stream xdr; + struct compound_hdr hdr; + int status; + + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); + status = decode_compound_hdr(&xdr, &hdr); + if (status) + goto out; + status = decode_putfh(&xdr); + if (status) + goto out; + status = decode_savefh(&xdr); + if (status) + goto out; + status = decode_open(&xdr, res); if (status) goto out; - status = decode_open(&xdr, res); - if (status) - goto out; if (decode_getfh(&xdr, &res->fh) != 0) goto out; if (decode_getfattr(&xdr, res->f_attr, res->server) != 0) @@ -4152,7 +4152,7 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openr goto out; decode_getfattr(&xdr, res->dir_attr, res->server); out: - return status; + return status; } /* @@ -4160,20 +4160,20 @@ out: */ static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp, __be32 *p, struct nfs_open_confirmres *res) { - struct xdr_stream xdr; - struct compound_hdr hdr; - int status; - - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); - if (status) - goto out; - status = decode_putfh(&xdr); - if (status) - goto out; - status = decode_open_confirm(&xdr, res); + struct xdr_stream xdr; + struct compound_hdr hdr; + int status; + + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); + status = decode_compound_hdr(&xdr, &hdr); + if (status) + goto out; + status = decode_putfh(&xdr); + if (status) + goto out; + status = decode_open_confirm(&xdr, res); out: - return status; + return status; } /* @@ -4181,23 +4181,23 @@ out: */ static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openres *res) { - struct xdr_stream xdr; - struct compound_hdr hdr; - int status; - - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); - if (status) - goto out; - status = decode_putfh(&xdr); - if (status) - goto out; - status = decode_open(&xdr, res); - if (status) - goto out; + struct xdr_stream xdr; + struct compound_hdr hdr; + int status; + + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); + status = decode_compound_hdr(&xdr, &hdr); + if (status) + goto out; + status = decode_putfh(&xdr); + if (status) + goto out; + status = decode_open(&xdr, res); + if (status) + goto out; decode_getfattr(&xdr, res->f_attr, res->server); out: - return status; + return status; } /* @@ -4205,25 +4205,25 @@ out: */ static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_setattrres *res) { - struct xdr_stream xdr; - struct compound_hdr hdr; - int status; - - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); - if (status) - goto out; - status = decode_putfh(&xdr); - if (status) - goto out; - status = decode_setattr(&xdr, res); - if (status) - goto out; + struct xdr_stream xdr; + struct compound_hdr hdr; + int status; + + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); + status = decode_compound_hdr(&xdr, &hdr); + if (status) + goto out; + status = decode_putfh(&xdr); + if (status) + goto out; + status = decode_setattr(&xdr, res); + if (status) + goto out; status = decode_getfattr(&xdr, res->fattr, res->server); if (status == NFS4ERR_DELAY) status = 0; out: - return status; + return status; } /* @@ -4707,7 +4707,7 @@ nfs4_stat_to_errno(int stat) .p_replen = NFS4_##restype##_sz, \ .p_statidx = NFSPROC4_CLNT_##proc, \ .p_name = #proc, \ - } +} struct rpc_procinfo nfs4_procedures[] = { PROC(READ, enc_read, dec_read), -- cgit v1.2.3-70-g09d2 From 49c2559e29884fbb13fd273a108b213decd1d8a5 Mon Sep 17 00:00:00 2001 From: Benny Halevy Date: Tue, 23 Dec 2008 16:06:16 -0500 Subject: NFS: fix comment placement in nfs4xdr.c Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 29656c5090c..273d0010060 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3789,6 +3789,10 @@ static int decode_delegreturn(struct xdr_stream *xdr) return decode_op_hdr(xdr, OP_DELEGRETURN); } +/* + * END OF "GENERIC" DECODE ROUTINES. + */ + /* * Decode OPEN_DOWNGRADE response */ @@ -3813,10 +3817,6 @@ out: return status; } -/* - * END OF "GENERIC" DECODE ROUTINES. - */ - /* * Decode ACCESS response */ -- cgit v1.2.3-70-g09d2 From d017931cff77f2a1603c9c76e96477743abc9f41 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Tue, 23 Dec 2008 16:06:17 -0500 Subject: NFS: increment number of operations in each encode routine Signed-off-by: Andy Adamson Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 417 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 247 insertions(+), 170 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 273d0010060..8f6c061bb6c 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -541,6 +541,7 @@ static struct { struct compound_hdr { int32_t status; uint32_t nops; + __be32 * nops_p; uint32_t taglen; char * tag; }; @@ -578,7 +579,7 @@ static void encode_string(struct xdr_stream *xdr, unsigned int len, const char * xdr_encode_opaque(p, str, len); } -static int encode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr) +static void encode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; @@ -588,8 +589,13 @@ static int encode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr) WRITE32(hdr->taglen); WRITEMEM(hdr->tag, hdr->taglen); WRITE32(NFS4_MINOR_VERSION); + hdr->nops_p = p; WRITE32(hdr->nops); - return 0; +} + +static void encode_nops(struct compound_hdr *hdr) +{ + *hdr->nops_p = htonl(hdr->nops); } static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *verf) @@ -728,18 +734,19 @@ static int encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const s return status; } -static int encode_access(struct xdr_stream *xdr, u32 access) +static int encode_access(struct xdr_stream *xdr, u32 access, struct compound_hdr *hdr) { __be32 *p; RESERVE_SPACE(8); WRITE32(OP_ACCESS); WRITE32(access); + hdr->nops++; return 0; } -static int encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg) +static int encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr) { __be32 *p; @@ -747,11 +754,12 @@ static int encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg) WRITE32(OP_CLOSE); WRITE32(arg->seqid->sequence->counter); WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE); + hdr->nops++; return 0; } -static int encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args) +static int encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr) { __be32 *p; @@ -759,11 +767,12 @@ static int encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *arg WRITE32(OP_COMMIT); WRITE64(args->offset); WRITE32(args->count); + hdr->nops++; return 0; } -static int encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create) +static int encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create, struct compound_hdr *hdr) { __be32 *p; @@ -791,11 +800,12 @@ static int encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *c RESERVE_SPACE(4 + create->name->len); WRITE32(create->name->len); WRITEMEM(create->name->name, create->name->len); + hdr->nops++; return encode_attrs(xdr, create->attrs, create->server); } -static int encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap) +static int encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct compound_hdr *hdr) { __be32 *p; @@ -803,10 +813,11 @@ static int encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap) WRITE32(OP_GETATTR); WRITE32(1); WRITE32(bitmap); + hdr->nops++; return 0; } -static int encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1) +static int encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1, struct compound_hdr *hdr) { __be32 *p; @@ -815,40 +826,43 @@ static int encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1 WRITE32(2); WRITE32(bm0); WRITE32(bm1); + hdr->nops++; return 0; } -static int encode_getfattr(struct xdr_stream *xdr, const u32* bitmask) +static int encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) { return encode_getattr_two(xdr, bitmask[0] & nfs4_fattr_bitmap[0], - bitmask[1] & nfs4_fattr_bitmap[1]); + bitmask[1] & nfs4_fattr_bitmap[1], hdr); } -static int encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask) +static int encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) { return encode_getattr_two(xdr, bitmask[0] & nfs4_fsinfo_bitmap[0], - bitmask[1] & nfs4_fsinfo_bitmap[1]); + bitmask[1] & nfs4_fsinfo_bitmap[1], hdr); } -static int encode_fs_locations(struct xdr_stream *xdr, const u32* bitmask) +static int encode_fs_locations(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) { return encode_getattr_two(xdr, bitmask[0] & nfs4_fs_locations_bitmap[0], - bitmask[1] & nfs4_fs_locations_bitmap[1]); + bitmask[1] & nfs4_fs_locations_bitmap[1], + hdr); } -static int encode_getfh(struct xdr_stream *xdr) +static int encode_getfh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; RESERVE_SPACE(4); WRITE32(OP_GETFH); + hdr->nops++; return 0; } -static int encode_link(struct xdr_stream *xdr, const struct qstr *name) +static int encode_link(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { __be32 *p; @@ -856,6 +870,7 @@ static int encode_link(struct xdr_stream *xdr, const struct qstr *name) WRITE32(OP_LINK); WRITE32(name->len); WRITEMEM(name->name, name->len); + hdr->nops++; return 0; } @@ -878,7 +893,7 @@ static inline uint64_t nfs4_lock_length(struct file_lock *fl) * opcode,type,reclaim,offset,length,new_lock_owner = 32 * open_seqid,open_stateid,lock_seqid,lock_owner.clientid, lock_owner.id = 40 */ -static int encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args) +static int encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args, struct compound_hdr *hdr) { __be32 *p; @@ -904,11 +919,12 @@ static int encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args) WRITEMEM(args->lock_stateid->data, NFS4_STATEID_SIZE); WRITE32(args->lock_seqid->sequence->counter); } + hdr->nops++; return 0; } -static int encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *args) +static int encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *args, struct compound_hdr *hdr) { __be32 *p; @@ -921,11 +937,12 @@ static int encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *arg WRITE32(16); WRITEMEM("lock id:", 8); WRITE64(args->lock_owner.id); + hdr->nops++; return 0; } -static int encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *args) +static int encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *args, struct compound_hdr *hdr) { __be32 *p; @@ -936,11 +953,12 @@ static int encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *arg WRITEMEM(args->stateid->data, NFS4_STATEID_SIZE); WRITE64(args->fl->fl_start); WRITE64(nfs4_lock_length(args->fl)); + hdr->nops++; return 0; } -static int encode_lookup(struct xdr_stream *xdr, const struct qstr *name) +static int encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { int len = name->len; __be32 *p; @@ -949,6 +967,7 @@ static int encode_lookup(struct xdr_stream *xdr, const struct qstr *name) WRITE32(OP_LOOKUP); WRITE32(len); WRITEMEM(name->name, len); + hdr->nops++; return 0; } @@ -1072,7 +1091,7 @@ static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struc encode_string(xdr, name->len, name->name); } -static int encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg) +static int encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg, struct compound_hdr *hdr) { encode_openhdr(xdr, arg); encode_opentype(xdr, arg); @@ -1089,10 +1108,11 @@ static int encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg) default: BUG(); } + hdr->nops++; return 0; } -static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg) +static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg, struct compound_hdr *hdr) { __be32 *p; @@ -1100,11 +1120,12 @@ static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_con WRITE32(OP_OPEN_CONFIRM); WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE); WRITE32(arg->seqid->sequence->counter); + hdr->nops++; return 0; } -static int encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg) +static int encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr) { __be32 *p; @@ -1113,11 +1134,12 @@ static int encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closea WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE); WRITE32(arg->seqid->sequence->counter); encode_share_access(xdr, arg->fmode); + hdr->nops++; return 0; } static int -encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh) +encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh, struct compound_hdr *hdr) { int len = fh->size; __be32 *p; @@ -1126,16 +1148,18 @@ encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh) WRITE32(OP_PUTFH); WRITE32(len); WRITEMEM(fh->data, len); + hdr->nops++; return 0; } -static int encode_putrootfh(struct xdr_stream *xdr) +static int encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; RESERVE_SPACE(4); WRITE32(OP_PUTROOTFH); + hdr->nops++; return 0; } @@ -1153,7 +1177,7 @@ static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context WRITEMEM(zero_stateid.data, NFS4_STATEID_SIZE); } -static int encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args) +static int encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, struct compound_hdr *hdr) { __be32 *p; @@ -1165,11 +1189,12 @@ static int encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args) RESERVE_SPACE(12); WRITE64(args->offset); WRITE32(args->count); + hdr->nops++; return 0; } -static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req) +static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr) { uint32_t attrs[2] = { FATTR4_WORD0_RDATTR_ERROR|FATTR4_WORD0_FILEID, @@ -1191,6 +1216,7 @@ static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg attrs[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID; WRITE32(attrs[0] & readdir->bitmask[0]); WRITE32(attrs[1] & readdir->bitmask[1]); + hdr->nops++; dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n", __func__, (unsigned long long)readdir->cookie, @@ -1202,17 +1228,18 @@ static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg return 0; } -static int encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req) +static int encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req, struct compound_hdr *hdr) { __be32 *p; RESERVE_SPACE(4); WRITE32(OP_READLINK); + hdr->nops++; return 0; } -static int encode_remove(struct xdr_stream *xdr, const struct qstr *name) +static int encode_remove(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { __be32 *p; @@ -1220,11 +1247,12 @@ static int encode_remove(struct xdr_stream *xdr, const struct qstr *name) WRITE32(OP_REMOVE); WRITE32(name->len); WRITEMEM(name->name, name->len); + hdr->nops++; return 0; } -static int encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, const struct qstr *newname) +static int encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, const struct qstr *newname, struct compound_hdr *hdr) { __be32 *p; @@ -1236,34 +1264,37 @@ static int encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, con RESERVE_SPACE(4 + newname->len); WRITE32(newname->len); WRITEMEM(newname->name, newname->len); + hdr->nops++; return 0; } -static int encode_renew(struct xdr_stream *xdr, const struct nfs_client *client_stateid) +static int encode_renew(struct xdr_stream *xdr, const struct nfs_client *client_stateid, struct compound_hdr *hdr) { __be32 *p; RESERVE_SPACE(12); WRITE32(OP_RENEW); WRITE64(client_stateid->cl_clientid); + hdr->nops++; return 0; } static int -encode_restorefh(struct xdr_stream *xdr) +encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; RESERVE_SPACE(4); WRITE32(OP_RESTOREFH); + hdr->nops++; return 0; } static int -encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg) +encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compound_hdr *hdr) { __be32 *p; @@ -1278,21 +1309,23 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg) RESERVE_SPACE(4); WRITE32(arg->acl_len); xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len); + hdr->nops++; return 0; } static int -encode_savefh(struct xdr_stream *xdr) +encode_savefh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; RESERVE_SPACE(4); WRITE32(OP_SAVEFH); + hdr->nops++; return 0; } -static int encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *arg, const struct nfs_server *server) +static int encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *arg, const struct nfs_server *server, struct compound_hdr *hdr) { int status; __be32 *p; @@ -1300,6 +1333,7 @@ static int encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs * RESERVE_SPACE(4+NFS4_STATEID_SIZE); WRITE32(OP_SETATTR); WRITEMEM(arg->stateid.data, NFS4_STATEID_SIZE); + hdr->nops++; if ((status = encode_attrs(xdr, arg->iap, server))) return status; @@ -1307,7 +1341,7 @@ static int encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs * return 0; } -static int encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid) +static int encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid, struct compound_hdr *hdr) { __be32 *p; @@ -1322,11 +1356,12 @@ static int encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclien encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr); RESERVE_SPACE(4); WRITE32(setclientid->sc_cb_ident); + hdr->nops++; return 0; } -static int encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_client *client_state) +static int encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_client *client_state, struct compound_hdr *hdr) { __be32 *p; @@ -1334,11 +1369,12 @@ static int encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_c WRITE32(OP_SETCLIENTID_CONFIRM); WRITE64(client_state->cl_clientid); WRITEMEM(client_state->cl_confirm.data, NFS4_VERIFIER_SIZE); + hdr->nops++; return 0; } -static int encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args) +static int encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr) { __be32 *p; @@ -1353,11 +1389,12 @@ static int encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args WRITE32(args->count); xdr_write_pages(xdr, args->pages, args->pgbase, args->count); + hdr->nops++; return 0; } -static int encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid) +static int encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid, struct compound_hdr *hdr) { __be32 *p; @@ -1365,6 +1402,7 @@ static int encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *statei WRITE32(OP_DELEGRETURN); WRITEMEM(stateid->data, NFS4_STATEID_SIZE); + hdr->nops++; return 0; } @@ -1379,20 +1417,21 @@ static int nfs4_xdr_enc_access(struct rpc_rqst *req, __be32 *p, const struct nfs { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 3, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if (status != 0) goto out; - status = encode_access(&xdr, args->access); + status = encode_access(&xdr, args->access, &hdr); if (status != 0) goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -1403,20 +1442,21 @@ static int nfs4_xdr_enc_lookup(struct rpc_rqst *req, __be32 *p, const struct nfs { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 4, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->dir_fh)) != 0) + if ((status = encode_putfh(&xdr, args->dir_fh, &hdr)) != 0) goto out; - if ((status = encode_lookup(&xdr, args->name)) != 0) + if ((status = encode_lookup(&xdr, args->name, &hdr)) != 0) goto out; - if ((status = encode_getfh(&xdr)) != 0) + if ((status = encode_getfh(&xdr, &hdr)) != 0) goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -1427,17 +1467,18 @@ static int nfs4_xdr_enc_lookup_root(struct rpc_rqst *req, __be32 *p, const struc { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 3, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putrootfh(&xdr)) != 0) + if ((status = encode_putrootfh(&xdr, &hdr)) != 0) goto out; - if ((status = encode_getfh(&xdr)) == 0) - status = encode_getfattr(&xdr, args->bitmask); + if ((status = encode_getfh(&xdr, &hdr)) == 0) + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -1448,18 +1489,19 @@ static int nfs4_xdr_enc_remove(struct rpc_rqst *req, __be32 *p, const struct nfs { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 3, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->fh)) != 0) + if ((status = encode_putfh(&xdr, args->fh, &hdr)) != 0) goto out; - if ((status = encode_remove(&xdr, &args->name)) != 0) + if ((status = encode_remove(&xdr, &args->name, &hdr)) != 0) goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -1470,26 +1512,28 @@ static int nfs4_xdr_enc_rename(struct rpc_rqst *req, __be32 *p, const struct nfs { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 7, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->old_dir)) != 0) + if ((status = encode_putfh(&xdr, args->old_dir, &hdr)) != 0) goto out; - if ((status = encode_savefh(&xdr)) != 0) + if ((status = encode_savefh(&xdr, &hdr)) != 0) goto out; - if ((status = encode_putfh(&xdr, args->new_dir)) != 0) + if ((status = encode_putfh(&xdr, args->new_dir, &hdr)) != 0) goto out; - if ((status = encode_rename(&xdr, args->old_name, args->new_name)) != 0) + if ((status = encode_rename(&xdr, args->old_name, args->new_name, + &hdr)) != 0) goto out; - if ((status = encode_getfattr(&xdr, args->bitmask)) != 0) + if ((status = encode_getfattr(&xdr, args->bitmask, &hdr)) != 0) goto out; - if ((status = encode_restorefh(&xdr)) != 0) + if ((status = encode_restorefh(&xdr, &hdr)) != 0) goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -1500,26 +1544,27 @@ static int nfs4_xdr_enc_link(struct rpc_rqst *req, __be32 *p, const struct nfs4_ { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 7, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->fh)) != 0) + if ((status = encode_putfh(&xdr, args->fh, &hdr)) != 0) goto out; - if ((status = encode_savefh(&xdr)) != 0) + if ((status = encode_savefh(&xdr, &hdr)) != 0) goto out; - if ((status = encode_putfh(&xdr, args->dir_fh)) != 0) + if ((status = encode_putfh(&xdr, args->dir_fh, &hdr)) != 0) goto out; - if ((status = encode_link(&xdr, args->name)) != 0) + if ((status = encode_link(&xdr, args->name, &hdr)) != 0) goto out; - if ((status = encode_getfattr(&xdr, args->bitmask)) != 0) + if ((status = encode_getfattr(&xdr, args->bitmask, &hdr)) != 0) goto out; - if ((status = encode_restorefh(&xdr)) != 0) + if ((status = encode_restorefh(&xdr, &hdr)) != 0) goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -1530,26 +1575,27 @@ static int nfs4_xdr_enc_create(struct rpc_rqst *req, __be32 *p, const struct nfs { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 7, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->dir_fh)) != 0) + if ((status = encode_putfh(&xdr, args->dir_fh, &hdr)) != 0) goto out; - if ((status = encode_savefh(&xdr)) != 0) + if ((status = encode_savefh(&xdr, &hdr)) != 0) goto out; - if ((status = encode_create(&xdr, args)) != 0) + if ((status = encode_create(&xdr, args, &hdr)) != 0) goto out; - if ((status = encode_getfh(&xdr)) != 0) + if ((status = encode_getfh(&xdr, &hdr)) != 0) goto out; - if ((status = encode_getfattr(&xdr, args->bitmask)) != 0) + if ((status = encode_getfattr(&xdr, args->bitmask, &hdr)) != 0) goto out; - if ((status = encode_restorefh(&xdr)) != 0) + if ((status = encode_restorefh(&xdr, &hdr)) != 0) goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -1568,14 +1614,15 @@ static int nfs4_xdr_enc_getattr(struct rpc_rqst *req, __be32 *p, const struct nf { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->fh)) == 0) - status = encode_getfattr(&xdr, args->bitmask); + if ((status = encode_putfh(&xdr, args->fh, &hdr)) == 0) + status = encode_getfattr(&xdr, args->bitmask, &hdr); + encode_nops(&hdr); return status; } @@ -1586,20 +1633,21 @@ static int nfs4_xdr_enc_close(struct rpc_rqst *req, __be32 *p, struct nfs_closea { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 3, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if(status) goto out; - status = encode_close(&xdr, args); + status = encode_close(&xdr, args, &hdr); if (status != 0) goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -1610,32 +1658,33 @@ static int nfs4_xdr_enc_open(struct rpc_rqst *req, __be32 *p, struct nfs_openarg { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 7, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if (status) goto out; - status = encode_savefh(&xdr); + status = encode_savefh(&xdr, &hdr); if (status) goto out; - status = encode_open(&xdr, args); + status = encode_open(&xdr, args, &hdr); if (status) goto out; - status = encode_getfh(&xdr); + status = encode_getfh(&xdr, &hdr); if (status) goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); if (status) goto out; - status = encode_restorefh(&xdr); + status = encode_restorefh(&xdr, &hdr); if (status) goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -1646,17 +1695,18 @@ static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, __be32 *p, struct nfs { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if(status) goto out; - status = encode_open_confirm(&xdr, args); + status = encode_open_confirm(&xdr, args, &hdr); out: + encode_nops(&hdr); return status; } @@ -1667,20 +1717,21 @@ static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, __be32 *p, struct nfs_ { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 3, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if (status) goto out; - status = encode_open(&xdr, args); + status = encode_open(&xdr, args, &hdr); if (status) goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -1691,20 +1742,21 @@ static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, __be32 *p, struct n { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 3, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if (status) goto out; - status = encode_open_downgrade(&xdr, args); + status = encode_open_downgrade(&xdr, args, &hdr); if (status != 0) goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -1715,17 +1767,18 @@ static int nfs4_xdr_enc_lock(struct rpc_rqst *req, __be32 *p, struct nfs_lock_ar { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if(status) goto out; - status = encode_lock(&xdr, args); + status = encode_lock(&xdr, args, &hdr); out: + encode_nops(&hdr); return status; } @@ -1736,17 +1789,18 @@ static int nfs4_xdr_enc_lockt(struct rpc_rqst *req, __be32 *p, struct nfs_lockt_ { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if(status) goto out; - status = encode_lockt(&xdr, args); + status = encode_lockt(&xdr, args, &hdr); out: + encode_nops(&hdr); return status; } @@ -1757,17 +1811,18 @@ static int nfs4_xdr_enc_locku(struct rpc_rqst *req, __be32 *p, struct nfs_locku_ { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if(status) goto out; - status = encode_locku(&xdr, args); + status = encode_locku(&xdr, args, &hdr); out: + encode_nops(&hdr); return status; } @@ -1778,7 +1833,7 @@ static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, __be32 *p, const struct n { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 0, }; struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; unsigned int replen; @@ -1786,10 +1841,10 @@ static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, __be32 *p, const struct n xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if(status) goto out; - status = encode_readlink(&xdr, args, req); + status = encode_readlink(&xdr, args, req, &hdr); /* set up reply kvec * toplevel_status + taglen + rescount + OP_PUTFH + status @@ -1800,6 +1855,7 @@ static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, __be32 *p, const struct n args->pgbase, args->pglen); out: + encode_nops(&hdr); return status; } @@ -1810,7 +1866,7 @@ static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nf { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 0, }; struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; int replen; @@ -1818,10 +1874,10 @@ static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nf xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if(status) goto out; - status = encode_readdir(&xdr, args, req); + status = encode_readdir(&xdr, args, req, &hdr); /* set up reply kvec * toplevel_status + taglen + rescount + OP_PUTFH + status @@ -1835,6 +1891,7 @@ static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nf args->pgbase, args->count); out: + encode_nops(&hdr); return status; } @@ -1846,16 +1903,16 @@ static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readarg struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 0, }; int replen, status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if (status) goto out; - status = encode_read(&xdr, args); + status = encode_read(&xdr, args, &hdr); if (status) goto out; @@ -1868,6 +1925,7 @@ static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readarg args->pages, args->pgbase, args->count); req->rq_rcv_buf.flags |= XDRBUF_READ; out: + encode_nops(&hdr); return status; } @@ -1878,20 +1936,21 @@ static int nfs4_xdr_enc_setattr(struct rpc_rqst *req, __be32 *p, struct nfs_seta { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 3, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if(status) goto out; - status = encode_setattr(&xdr, args, args->server); + status = encode_setattr(&xdr, args, args->server, &hdr); if(status) goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -1905,21 +1964,22 @@ nfs4_xdr_enc_getacl(struct rpc_rqst *req, __be32 *p, struct xdr_stream xdr; struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; struct compound_hdr hdr = { - .nops = 2, + .nops = 0, }; int replen, status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if (status) goto out; - status = encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0); + status = encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr); /* set up reply buffer: */ replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_getacl_sz) << 2; xdr_inline_pages(&req->rq_rcv_buf, replen, args->acl_pages, args->acl_pgbase, args->acl_len); out: + encode_nops(&hdr); return status; } @@ -1930,21 +1990,22 @@ static int nfs4_xdr_enc_write(struct rpc_rqst *req, __be32 *p, struct nfs_writea { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 3, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if (status) goto out; - status = encode_write(&xdr, args); + status = encode_write(&xdr, args, &hdr); if (status) goto out; req->rq_snd_buf.flags |= XDRBUF_WRITE; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -1955,20 +2016,21 @@ static int nfs4_xdr_enc_commit(struct rpc_rqst *req, __be32 *p, struct nfs_write { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 3, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if (status) goto out; - status = encode_commit(&xdr, args); + status = encode_commit(&xdr, args, &hdr); if (status) goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -1979,15 +2041,16 @@ static int nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs4_fsin { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if (!status) - status = encode_fsinfo(&xdr, args->bitmask); + status = encode_fsinfo(&xdr, args->bitmask, &hdr); + encode_nops(&hdr); return status; } @@ -1998,16 +2061,18 @@ static int nfs4_xdr_enc_pathconf(struct rpc_rqst *req, __be32 *p, const struct n { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if (!status) status = encode_getattr_one(&xdr, - args->bitmask[0] & nfs4_pathconf_bitmap[0]); + args->bitmask[0] & nfs4_pathconf_bitmap[0], + &hdr); + encode_nops(&hdr); return status; } @@ -2018,17 +2083,18 @@ static int nfs4_xdr_enc_statfs(struct rpc_rqst *req, __be32 *p, const struct nfs { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if (status == 0) status = encode_getattr_two(&xdr, args->bitmask[0] & nfs4_statfs_bitmap[0], - args->bitmask[1] & nfs4_statfs_bitmap[1]); + args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr); + encode_nops(&hdr); return status; } @@ -2039,18 +2105,19 @@ static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, __be32 *p, const struc { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, fhandle); + status = encode_putfh(&xdr, fhandle, &hdr); if (status == 0) status = encode_getattr_one(&xdr, FATTR4_WORD0_SUPPORTED_ATTRS| FATTR4_WORD0_LINK_SUPPORT| FATTR4_WORD0_SYMLINK_SUPPORT| - FATTR4_WORD0_ACLSUPPORT); + FATTR4_WORD0_ACLSUPPORT, &hdr); + encode_nops(&hdr); return status; } @@ -2061,12 +2128,15 @@ static int nfs4_xdr_enc_renew(struct rpc_rqst *req, __be32 *p, struct nfs_client { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 1, + .nops = 0, }; + int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - return encode_renew(&xdr, clp); + status = encode_renew(&xdr, clp, &hdr); + encode_nops(&hdr); + return status; } /* @@ -2076,12 +2146,15 @@ static int nfs4_xdr_enc_setclientid(struct rpc_rqst *req, __be32 *p, struct nfs4 { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 1, + .nops = 0, }; + int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - return encode_setclientid(&xdr, sc); + status = encode_setclientid(&xdr, sc, &hdr); + encode_nops(&hdr); + return status; } /* @@ -2091,18 +2164,19 @@ static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, str { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 3, + .nops = 0, }; const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_setclientid_confirm(&xdr, clp); + status = encode_setclientid_confirm(&xdr, clp, &hdr); if (!status) - status = encode_putrootfh(&xdr); + status = encode_putrootfh(&xdr, &hdr); if (!status) - status = encode_fsinfo(&xdr, lease_bitmap); + status = encode_fsinfo(&xdr, lease_bitmap, &hdr); + encode_nops(&hdr); return status; } @@ -2113,20 +2187,21 @@ static int nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, __be32 *p, const struc { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 3, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fhandle); + status = encode_putfh(&xdr, args->fhandle, &hdr); if (status != 0) goto out; - status = encode_delegreturn(&xdr, args->stateid); + status = encode_delegreturn(&xdr, args->stateid, &hdr); if (status != 0) goto out; - status = encode_getfattr(&xdr, args->bitmask); + status = encode_getfattr(&xdr, args->bitmask, &hdr); out: + encode_nops(&hdr); return status; } @@ -2137,7 +2212,7 @@ static int nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 3, + .nops = 0, }; struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; int replen; @@ -2145,11 +2220,11 @@ static int nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->dir_fh)) != 0) + if ((status = encode_putfh(&xdr, args->dir_fh, &hdr)) != 0) goto out; - if ((status = encode_lookup(&xdr, args->name)) != 0) + if ((status = encode_lookup(&xdr, args->name, &hdr)) != 0) goto out; - if ((status = encode_fs_locations(&xdr, args->bitmask)) != 0) + if ((status = encode_fs_locations(&xdr, args->bitmask, &hdr)) != 0) goto out; /* set up reply * toplevel_status + OP_PUTFH + status @@ -2159,6 +2234,7 @@ static int nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs xdr_inline_pages(&req->rq_rcv_buf, replen, &args->page, 0, PAGE_SIZE); out: + encode_nops(&hdr); return status; } @@ -4033,17 +4109,18 @@ nfs4_xdr_enc_setacl(struct rpc_rqst *req, __be32 *p, struct nfs_setaclargs *args { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 0, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh); + status = encode_putfh(&xdr, args->fh, &hdr); if (status) goto out; - status = encode_setacl(&xdr, args); + status = encode_setacl(&xdr, args, &hdr); out: + encode_nops(&hdr); return status; } -- cgit v1.2.3-70-g09d2 From cf8cdbe5bd662eeaece96b017a4d6676ae416537 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Tue, 23 Dec 2008 16:06:18 -0500 Subject: NFS: remove unused status from encode routines Signed-off-by: Andy Adamson Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 578 +++++++++++++++++-------------------------------------- 1 file changed, 180 insertions(+), 398 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 8f6c061bb6c..d1e4c8f8a0a 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -607,7 +607,7 @@ static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *ve xdr_encode_opaque_fixed(p, verf->data, NFS4_VERIFIER_SIZE); } -static int encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const struct nfs_server *server) +static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const struct nfs_server *server) { char owner_name[IDMAP_NAMESZ]; char owner_group[IDMAP_NAMESZ]; @@ -618,7 +618,6 @@ static int encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const s int len; uint32_t bmval0 = 0; uint32_t bmval1 = 0; - int status; /* * We reserve enough space to write the entire attribute buffer at once. @@ -729,12 +728,10 @@ static int encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const s *q++ = htonl(bmval1); *q++ = htonl(len); - status = 0; /* out: */ - return status; } -static int encode_access(struct xdr_stream *xdr, u32 access, struct compound_hdr *hdr) +static void encode_access(struct xdr_stream *xdr, u32 access, struct compound_hdr *hdr) { __be32 *p; @@ -742,11 +739,9 @@ static int encode_access(struct xdr_stream *xdr, u32 access, struct compound_hdr WRITE32(OP_ACCESS); WRITE32(access); hdr->nops++; - - return 0; } -static int encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr) +static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr) { __be32 *p; @@ -755,11 +750,9 @@ static int encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg, WRITE32(arg->seqid->sequence->counter); WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE); hdr->nops++; - - return 0; } -static int encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr) +static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr) { __be32 *p; @@ -768,11 +761,9 @@ static int encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *arg WRITE64(args->offset); WRITE32(args->count); hdr->nops++; - - return 0; } -static int encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create, struct compound_hdr *hdr) +static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create, struct compound_hdr *hdr) { __be32 *p; @@ -802,10 +793,10 @@ static int encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *c WRITEMEM(create->name->name, create->name->len); hdr->nops++; - return encode_attrs(xdr, create->attrs, create->server); + encode_attrs(xdr, create->attrs, create->server); } -static int encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct compound_hdr *hdr) +static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct compound_hdr *hdr) { __be32 *p; @@ -814,10 +805,9 @@ static int encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct co WRITE32(1); WRITE32(bitmap); hdr->nops++; - return 0; } -static int encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1, struct compound_hdr *hdr) +static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1, struct compound_hdr *hdr) { __be32 *p; @@ -827,42 +817,36 @@ static int encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1 WRITE32(bm0); WRITE32(bm1); hdr->nops++; - return 0; } -static int encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) +static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) { - return encode_getattr_two(xdr, - bitmask[0] & nfs4_fattr_bitmap[0], - bitmask[1] & nfs4_fattr_bitmap[1], hdr); + encode_getattr_two(xdr, bitmask[0] & nfs4_fattr_bitmap[0], + bitmask[1] & nfs4_fattr_bitmap[1], hdr); } -static int encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) +static void encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) { - return encode_getattr_two(xdr, bitmask[0] & nfs4_fsinfo_bitmap[0], - bitmask[1] & nfs4_fsinfo_bitmap[1], hdr); + encode_getattr_two(xdr, bitmask[0] & nfs4_fsinfo_bitmap[0], + bitmask[1] & nfs4_fsinfo_bitmap[1], hdr); } -static int encode_fs_locations(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) +static void encode_fs_locations(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) { - return encode_getattr_two(xdr, - bitmask[0] & nfs4_fs_locations_bitmap[0], - bitmask[1] & nfs4_fs_locations_bitmap[1], - hdr); + encode_getattr_two(xdr, bitmask[0] & nfs4_fs_locations_bitmap[0], + bitmask[1] & nfs4_fs_locations_bitmap[1], hdr); } -static int encode_getfh(struct xdr_stream *xdr, struct compound_hdr *hdr) +static void encode_getfh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; RESERVE_SPACE(4); WRITE32(OP_GETFH); hdr->nops++; - - return 0; } -static int encode_link(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) +static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { __be32 *p; @@ -871,8 +855,6 @@ static int encode_link(struct xdr_stream *xdr, const struct qstr *name, struct c WRITE32(name->len); WRITEMEM(name->name, name->len); hdr->nops++; - - return 0; } static inline int nfs4_lock_type(struct file_lock *fl, int block) @@ -893,7 +875,7 @@ static inline uint64_t nfs4_lock_length(struct file_lock *fl) * opcode,type,reclaim,offset,length,new_lock_owner = 32 * open_seqid,open_stateid,lock_seqid,lock_owner.clientid, lock_owner.id = 40 */ -static int encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args, struct compound_hdr *hdr) +static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args, struct compound_hdr *hdr) { __be32 *p; @@ -920,11 +902,9 @@ static int encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args, WRITE32(args->lock_seqid->sequence->counter); } hdr->nops++; - - return 0; } -static int encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *args, struct compound_hdr *hdr) +static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *args, struct compound_hdr *hdr) { __be32 *p; @@ -938,11 +918,9 @@ static int encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *arg WRITEMEM("lock id:", 8); WRITE64(args->lock_owner.id); hdr->nops++; - - return 0; } -static int encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *args, struct compound_hdr *hdr) +static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *args, struct compound_hdr *hdr) { __be32 *p; @@ -954,11 +932,9 @@ static int encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *arg WRITE64(args->fl->fl_start); WRITE64(nfs4_lock_length(args->fl)); hdr->nops++; - - return 0; } -static int encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) +static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { int len = name->len; __be32 *p; @@ -968,8 +944,6 @@ static int encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct WRITE32(len); WRITEMEM(name->name, len); hdr->nops++; - - return 0; } static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode) @@ -1091,7 +1065,7 @@ static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struc encode_string(xdr, name->len, name->name); } -static int encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg, struct compound_hdr *hdr) +static void encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg, struct compound_hdr *hdr) { encode_openhdr(xdr, arg); encode_opentype(xdr, arg); @@ -1109,10 +1083,9 @@ static int encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg, s BUG(); } hdr->nops++; - return 0; } -static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg, struct compound_hdr *hdr) +static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg, struct compound_hdr *hdr) { __be32 *p; @@ -1121,11 +1094,9 @@ static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_con WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE); WRITE32(arg->seqid->sequence->counter); hdr->nops++; - - return 0; } -static int encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr) +static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr) { __be32 *p; @@ -1135,10 +1106,9 @@ static int encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closea WRITE32(arg->seqid->sequence->counter); encode_share_access(xdr, arg->fmode); hdr->nops++; - return 0; } -static int +static void encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh, struct compound_hdr *hdr) { int len = fh->size; @@ -1149,19 +1119,15 @@ encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh, struct compound_hd WRITE32(len); WRITEMEM(fh->data, len); hdr->nops++; - - return 0; } -static int encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr) +static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; RESERVE_SPACE(4); WRITE32(OP_PUTROOTFH); hdr->nops++; - - return 0; } static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx) @@ -1177,7 +1143,7 @@ static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context WRITEMEM(zero_stateid.data, NFS4_STATEID_SIZE); } -static int encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, struct compound_hdr *hdr) +static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, struct compound_hdr *hdr) { __be32 *p; @@ -1190,11 +1156,9 @@ static int encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, WRITE64(args->offset); WRITE32(args->count); hdr->nops++; - - return 0; } -static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr) +static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr) { uint32_t attrs[2] = { FATTR4_WORD0_RDATTR_ERROR|FATTR4_WORD0_FILEID, @@ -1224,22 +1188,18 @@ static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg ((u32 *)readdir->verifier.data)[1], attrs[0] & readdir->bitmask[0], attrs[1] & readdir->bitmask[1]); - - return 0; } -static int encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req, struct compound_hdr *hdr) +static void encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req, struct compound_hdr *hdr) { __be32 *p; RESERVE_SPACE(4); WRITE32(OP_READLINK); hdr->nops++; - - return 0; } -static int encode_remove(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) +static void encode_remove(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr) { __be32 *p; @@ -1248,11 +1208,9 @@ static int encode_remove(struct xdr_stream *xdr, const struct qstr *name, struct WRITE32(name->len); WRITEMEM(name->name, name->len); hdr->nops++; - - return 0; } -static int encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, const struct qstr *newname, struct compound_hdr *hdr) +static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, const struct qstr *newname, struct compound_hdr *hdr) { __be32 *p; @@ -1265,11 +1223,9 @@ static int encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, con WRITE32(newname->len); WRITEMEM(newname->name, newname->len); hdr->nops++; - - return 0; } -static int encode_renew(struct xdr_stream *xdr, const struct nfs_client *client_stateid, struct compound_hdr *hdr) +static void encode_renew(struct xdr_stream *xdr, const struct nfs_client *client_stateid, struct compound_hdr *hdr) { __be32 *p; @@ -1277,11 +1233,9 @@ static int encode_renew(struct xdr_stream *xdr, const struct nfs_client *client_ WRITE32(OP_RENEW); WRITE64(client_stateid->cl_clientid); hdr->nops++; - - return 0; } -static int +static void encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; @@ -1289,8 +1243,6 @@ encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr) RESERVE_SPACE(4); WRITE32(OP_RESTOREFH); hdr->nops++; - - return 0; } static int @@ -1313,7 +1265,7 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compoun return 0; } -static int +static void encode_savefh(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; @@ -1321,27 +1273,20 @@ encode_savefh(struct xdr_stream *xdr, struct compound_hdr *hdr) RESERVE_SPACE(4); WRITE32(OP_SAVEFH); hdr->nops++; - - return 0; } -static int encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *arg, const struct nfs_server *server, struct compound_hdr *hdr) +static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *arg, const struct nfs_server *server, struct compound_hdr *hdr) { - int status; __be32 *p; RESERVE_SPACE(4+NFS4_STATEID_SIZE); WRITE32(OP_SETATTR); WRITEMEM(arg->stateid.data, NFS4_STATEID_SIZE); hdr->nops++; - - if ((status = encode_attrs(xdr, arg->iap, server))) - return status; - - return 0; + encode_attrs(xdr, arg->iap, server); } -static int encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid, struct compound_hdr *hdr) +static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid, struct compound_hdr *hdr) { __be32 *p; @@ -1357,11 +1302,9 @@ static int encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclien RESERVE_SPACE(4); WRITE32(setclientid->sc_cb_ident); hdr->nops++; - - return 0; } -static int encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_client *client_state, struct compound_hdr *hdr) +static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_client *client_state, struct compound_hdr *hdr) { __be32 *p; @@ -1370,11 +1313,9 @@ static int encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_c WRITE64(client_state->cl_clientid); WRITEMEM(client_state->cl_confirm.data, NFS4_VERIFIER_SIZE); hdr->nops++; - - return 0; } -static int encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr) +static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr) { __be32 *p; @@ -1390,11 +1331,9 @@ static int encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args xdr_write_pages(xdr, args->pages, args->pgbase, args->count); hdr->nops++; - - return 0; } -static int encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid, struct compound_hdr *hdr) +static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid, struct compound_hdr *hdr) { __be32 *p; @@ -1403,8 +1342,6 @@ static int encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *statei WRITE32(OP_DELEGRETURN); WRITEMEM(stateid->data, NFS4_STATEID_SIZE); hdr->nops++; - return 0; - } /* * END OF "GENERIC" ENCODE ROUTINES. @@ -1419,20 +1356,14 @@ static int nfs4_xdr_enc_access(struct rpc_rqst *req, __be32 *p, const struct nfs struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if (status != 0) - goto out; - status = encode_access(&xdr, args->access, &hdr); - if (status != 0) - goto out; - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_putfh(&xdr, args->fh, &hdr); + encode_access(&xdr, args->access, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1444,20 +1375,15 @@ static int nfs4_xdr_enc_lookup(struct rpc_rqst *req, __be32 *p, const struct nfs struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->dir_fh, &hdr)) != 0) - goto out; - if ((status = encode_lookup(&xdr, args->name, &hdr)) != 0) - goto out; - if ((status = encode_getfh(&xdr, &hdr)) != 0) - goto out; - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_putfh(&xdr, args->dir_fh, &hdr); + encode_lookup(&xdr, args->name, &hdr); + encode_getfh(&xdr, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1469,17 +1395,14 @@ static int nfs4_xdr_enc_lookup_root(struct rpc_rqst *req, __be32 *p, const struc struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putrootfh(&xdr, &hdr)) != 0) - goto out; - if ((status = encode_getfh(&xdr, &hdr)) == 0) - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_putrootfh(&xdr, &hdr); + encode_getfh(&xdr, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1491,18 +1414,14 @@ static int nfs4_xdr_enc_remove(struct rpc_rqst *req, __be32 *p, const struct nfs struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->fh, &hdr)) != 0) - goto out; - if ((status = encode_remove(&xdr, &args->name, &hdr)) != 0) - goto out; - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_putfh(&xdr, args->fh, &hdr); + encode_remove(&xdr, &args->name, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1514,27 +1433,18 @@ static int nfs4_xdr_enc_rename(struct rpc_rqst *req, __be32 *p, const struct nfs struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->old_dir, &hdr)) != 0) - goto out; - if ((status = encode_savefh(&xdr, &hdr)) != 0) - goto out; - if ((status = encode_putfh(&xdr, args->new_dir, &hdr)) != 0) - goto out; - if ((status = encode_rename(&xdr, args->old_name, args->new_name, - &hdr)) != 0) - goto out; - if ((status = encode_getfattr(&xdr, args->bitmask, &hdr)) != 0) - goto out; - if ((status = encode_restorefh(&xdr, &hdr)) != 0) - goto out; - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_putfh(&xdr, args->old_dir, &hdr); + encode_savefh(&xdr, &hdr); + encode_putfh(&xdr, args->new_dir, &hdr); + encode_rename(&xdr, args->old_name, args->new_name, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); + encode_restorefh(&xdr, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1546,26 +1456,18 @@ static int nfs4_xdr_enc_link(struct rpc_rqst *req, __be32 *p, const struct nfs4_ struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->fh, &hdr)) != 0) - goto out; - if ((status = encode_savefh(&xdr, &hdr)) != 0) - goto out; - if ((status = encode_putfh(&xdr, args->dir_fh, &hdr)) != 0) - goto out; - if ((status = encode_link(&xdr, args->name, &hdr)) != 0) - goto out; - if ((status = encode_getfattr(&xdr, args->bitmask, &hdr)) != 0) - goto out; - if ((status = encode_restorefh(&xdr, &hdr)) != 0) - goto out; - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_putfh(&xdr, args->fh, &hdr); + encode_savefh(&xdr, &hdr); + encode_putfh(&xdr, args->dir_fh, &hdr); + encode_link(&xdr, args->name, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); + encode_restorefh(&xdr, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1577,26 +1479,18 @@ static int nfs4_xdr_enc_create(struct rpc_rqst *req, __be32 *p, const struct nfs struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->dir_fh, &hdr)) != 0) - goto out; - if ((status = encode_savefh(&xdr, &hdr)) != 0) - goto out; - if ((status = encode_create(&xdr, args, &hdr)) != 0) - goto out; - if ((status = encode_getfh(&xdr, &hdr)) != 0) - goto out; - if ((status = encode_getfattr(&xdr, args->bitmask, &hdr)) != 0) - goto out; - if ((status = encode_restorefh(&xdr, &hdr)) != 0) - goto out; - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_putfh(&xdr, args->dir_fh, &hdr); + encode_savefh(&xdr, &hdr); + encode_create(&xdr, args, &hdr); + encode_getfh(&xdr, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); + encode_restorefh(&xdr, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1616,14 +1510,13 @@ static int nfs4_xdr_enc_getattr(struct rpc_rqst *req, __be32 *p, const struct nf struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->fh, &hdr)) == 0) - status = encode_getfattr(&xdr, args->bitmask, &hdr); + encode_putfh(&xdr, args->fh, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1635,20 +1528,14 @@ static int nfs4_xdr_enc_close(struct rpc_rqst *req, __be32 *p, struct nfs_closea struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if(status) - goto out; - status = encode_close(&xdr, args, &hdr); - if (status != 0) - goto out; - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_putfh(&xdr, args->fh, &hdr); + encode_close(&xdr, args, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1660,32 +1547,18 @@ static int nfs4_xdr_enc_open(struct rpc_rqst *req, __be32 *p, struct nfs_openarg struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if (status) - goto out; - status = encode_savefh(&xdr, &hdr); - if (status) - goto out; - status = encode_open(&xdr, args, &hdr); - if (status) - goto out; - status = encode_getfh(&xdr, &hdr); - if (status) - goto out; - status = encode_getfattr(&xdr, args->bitmask, &hdr); - if (status) - goto out; - status = encode_restorefh(&xdr, &hdr); - if (status) - goto out; - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_putfh(&xdr, args->fh, &hdr); + encode_savefh(&xdr, &hdr); + encode_open(&xdr, args, &hdr); + encode_getfh(&xdr, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); + encode_restorefh(&xdr, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1697,17 +1570,13 @@ static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, __be32 *p, struct nfs struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if(status) - goto out; - status = encode_open_confirm(&xdr, args, &hdr); -out: + encode_putfh(&xdr, args->fh, &hdr); + encode_open_confirm(&xdr, args, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1719,20 +1588,14 @@ static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, __be32 *p, struct nfs_ struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if (status) - goto out; - status = encode_open(&xdr, args, &hdr); - if (status) - goto out; - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_putfh(&xdr, args->fh, &hdr); + encode_open(&xdr, args, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1744,20 +1607,14 @@ static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, __be32 *p, struct n struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if (status) - goto out; - status = encode_open_downgrade(&xdr, args, &hdr); - if (status != 0) - goto out; - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_putfh(&xdr, args->fh, &hdr); + encode_open_downgrade(&xdr, args, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1769,17 +1626,13 @@ static int nfs4_xdr_enc_lock(struct rpc_rqst *req, __be32 *p, struct nfs_lock_ar struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if(status) - goto out; - status = encode_lock(&xdr, args, &hdr); -out: + encode_putfh(&xdr, args->fh, &hdr); + encode_lock(&xdr, args, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1791,17 +1644,13 @@ static int nfs4_xdr_enc_lockt(struct rpc_rqst *req, __be32 *p, struct nfs_lockt_ struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if(status) - goto out; - status = encode_lockt(&xdr, args, &hdr); -out: + encode_putfh(&xdr, args->fh, &hdr); + encode_lockt(&xdr, args, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1813,17 +1662,13 @@ static int nfs4_xdr_enc_locku(struct rpc_rqst *req, __be32 *p, struct nfs_locku_ struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if(status) - goto out; - status = encode_locku(&xdr, args, &hdr); -out: + encode_putfh(&xdr, args->fh, &hdr); + encode_locku(&xdr, args, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1837,14 +1682,11 @@ static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, __be32 *p, const struct n }; struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; unsigned int replen; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if(status) - goto out; - status = encode_readlink(&xdr, args, req, &hdr); + encode_putfh(&xdr, args->fh, &hdr); + encode_readlink(&xdr, args, req, &hdr); /* set up reply kvec * toplevel_status + taglen + rescount + OP_PUTFH + status @@ -1853,10 +1695,8 @@ static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, __be32 *p, const struct n replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_readlink_sz) << 2; xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen); - -out: encode_nops(&hdr); - return status; + return 0; } /* @@ -1870,14 +1710,11 @@ static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nf }; struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; int replen; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if(status) - goto out; - status = encode_readdir(&xdr, args, req, &hdr); + encode_putfh(&xdr, args->fh, &hdr); + encode_readdir(&xdr, args, req, &hdr); /* set up reply kvec * toplevel_status + taglen + rescount + OP_PUTFH + status @@ -1889,10 +1726,8 @@ static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nf dprintk("%s: inlined page args = (%u, %p, %u, %u)\n", __func__, replen, args->pages, args->pgbase, args->count); - -out: encode_nops(&hdr); - return status; + return 0; } /* @@ -1905,16 +1740,12 @@ static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readarg struct compound_hdr hdr = { .nops = 0, }; - int replen, status; + int replen; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if (status) - goto out; - status = encode_read(&xdr, args, &hdr); - if (status) - goto out; + encode_putfh(&xdr, args->fh, &hdr); + encode_read(&xdr, args, &hdr); /* set up reply kvec * toplevel status + taglen=0 + rescount + OP_PUTFH + status @@ -1924,9 +1755,8 @@ static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readarg xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->count); req->rq_rcv_buf.flags |= XDRBUF_READ; -out: encode_nops(&hdr); - return status; + return 0; } /* @@ -1938,20 +1768,14 @@ static int nfs4_xdr_enc_setattr(struct rpc_rqst *req, __be32 *p, struct nfs_seta struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if(status) - goto out; - status = encode_setattr(&xdr, args, args->server, &hdr); - if(status) - goto out; - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_putfh(&xdr, args->fh, &hdr); + encode_setattr(&xdr, args, args->server, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -1966,21 +1790,19 @@ nfs4_xdr_enc_getacl(struct rpc_rqst *req, __be32 *p, struct compound_hdr hdr = { .nops = 0, }; - int replen, status; + int replen; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if (status) - goto out; - status = encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr); + encode_putfh(&xdr, args->fh, &hdr); + encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr); + /* set up reply buffer: */ replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_getacl_sz) << 2; xdr_inline_pages(&req->rq_rcv_buf, replen, args->acl_pages, args->acl_pgbase, args->acl_len); -out: encode_nops(&hdr); - return status; + return 0; } /* @@ -1992,21 +1814,15 @@ static int nfs4_xdr_enc_write(struct rpc_rqst *req, __be32 *p, struct nfs_writea struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if (status) - goto out; - status = encode_write(&xdr, args, &hdr); - if (status) - goto out; + encode_putfh(&xdr, args->fh, &hdr); + encode_write(&xdr, args, &hdr); req->rq_snd_buf.flags |= XDRBUF_WRITE; - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -2018,20 +1834,14 @@ static int nfs4_xdr_enc_commit(struct rpc_rqst *req, __be32 *p, struct nfs_write struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if (status) - goto out; - status = encode_commit(&xdr, args, &hdr); - if (status) - goto out; - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_putfh(&xdr, args->fh, &hdr); + encode_commit(&xdr, args, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -2043,15 +1853,13 @@ static int nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs4_fsin struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if (!status) - status = encode_fsinfo(&xdr, args->bitmask, &hdr); + encode_putfh(&xdr, args->fh, &hdr); + encode_fsinfo(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -2063,17 +1871,14 @@ static int nfs4_xdr_enc_pathconf(struct rpc_rqst *req, __be32 *p, const struct n struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if (!status) - status = encode_getattr_one(&xdr, - args->bitmask[0] & nfs4_pathconf_bitmap[0], - &hdr); + encode_putfh(&xdr, args->fh, &hdr); + encode_getattr_one(&xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0], + &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -2085,17 +1890,14 @@ static int nfs4_xdr_enc_statfs(struct rpc_rqst *req, __be32 *p, const struct nfs struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if (status == 0) - status = encode_getattr_two(&xdr, - args->bitmask[0] & nfs4_statfs_bitmap[0], - args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr); + encode_putfh(&xdr, args->fh, &hdr); + encode_getattr_two(&xdr, args->bitmask[0] & nfs4_statfs_bitmap[0], + args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -2107,18 +1909,16 @@ static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, __be32 *p, const struc struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, fhandle, &hdr); - if (status == 0) - status = encode_getattr_one(&xdr, FATTR4_WORD0_SUPPORTED_ATTRS| - FATTR4_WORD0_LINK_SUPPORT| - FATTR4_WORD0_SYMLINK_SUPPORT| - FATTR4_WORD0_ACLSUPPORT, &hdr); + encode_putfh(&xdr, fhandle, &hdr); + encode_getattr_one(&xdr, FATTR4_WORD0_SUPPORTED_ATTRS| + FATTR4_WORD0_LINK_SUPPORT| + FATTR4_WORD0_SYMLINK_SUPPORT| + FATTR4_WORD0_ACLSUPPORT, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -2130,13 +1930,12 @@ static int nfs4_xdr_enc_renew(struct rpc_rqst *req, __be32 *p, struct nfs_client struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_renew(&xdr, clp, &hdr); + encode_renew(&xdr, clp, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -2148,13 +1947,12 @@ static int nfs4_xdr_enc_setclientid(struct rpc_rqst *req, __be32 *p, struct nfs4 struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_setclientid(&xdr, sc, &hdr); + encode_setclientid(&xdr, sc, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -2167,17 +1965,14 @@ static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, str .nops = 0, }; const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_setclientid_confirm(&xdr, clp, &hdr); - if (!status) - status = encode_putrootfh(&xdr, &hdr); - if (!status) - status = encode_fsinfo(&xdr, lease_bitmap, &hdr); + encode_setclientid_confirm(&xdr, clp, &hdr); + encode_putrootfh(&xdr, &hdr); + encode_fsinfo(&xdr, lease_bitmap, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -2189,20 +1984,14 @@ static int nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, __be32 *p, const struc struct compound_hdr hdr = { .nops = 0, }; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fhandle, &hdr); - if (status != 0) - goto out; - status = encode_delegreturn(&xdr, args->stateid, &hdr); - if (status != 0) - goto out; - status = encode_getfattr(&xdr, args->bitmask, &hdr); -out: + encode_putfh(&xdr, args->fhandle, &hdr); + encode_delegreturn(&xdr, args->stateid, &hdr); + encode_getfattr(&xdr, args->bitmask, &hdr); encode_nops(&hdr); - return status; + return 0; } /* @@ -2216,16 +2005,13 @@ static int nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs }; struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth; int replen; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->dir_fh, &hdr)) != 0) - goto out; - if ((status = encode_lookup(&xdr, args->name, &hdr)) != 0) - goto out; - if ((status = encode_fs_locations(&xdr, args->bitmask, &hdr)) != 0) - goto out; + encode_putfh(&xdr, args->dir_fh, &hdr); + encode_lookup(&xdr, args->name, &hdr); + encode_fs_locations(&xdr, args->bitmask, &hdr); + /* set up reply * toplevel_status + OP_PUTFH + status * + OP_LOOKUP + status + OP_GETATTR + status = 7 @@ -2233,9 +2019,8 @@ static int nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs replen = (RPC_REPHDRSIZE + auth->au_rslack + 7) << 2; xdr_inline_pages(&req->rq_rcv_buf, replen, &args->page, 0, PAGE_SIZE); -out: encode_nops(&hdr); - return status; + return 0; } /* @@ -4115,11 +3900,8 @@ nfs4_xdr_enc_setacl(struct rpc_rqst *req, __be32 *p, struct nfs_setaclargs *args xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - status = encode_putfh(&xdr, args->fh, &hdr); - if (status) - goto out; + encode_putfh(&xdr, args->fh, &hdr); status = encode_setacl(&xdr, args, &hdr); -out: encode_nops(&hdr); return status; } -- cgit v1.2.3-70-g09d2 From 68e76ad0baf8f5d5060377c2423ee6eed5c63057 Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Tue, 23 Dec 2008 16:17:15 -0500 Subject: nfsd: pass client principal name in rsc downcall Two principals are involved in krb5 authentication: the target, who we authenticate *to* (normally the name of the server, like nfs/server.citi.umich.edu@CITI.UMICH.EDU), and the source, we we authenticate *as* (normally a user, like bfields@UMICH.EDU) In the case of NFSv4 callbacks, the target of the callback should be the source of the client's setclientid call, and the source should be the nfs server's own principal. Therefore we allow svcgssd to pass down the name of the principal that just authenticated, so that on setclientid we can store that principal name with the new client, to be used later on callbacks. Signed-off-by: Olga Kornievskaia Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfsd/nfs4state.c | 11 +++++++++++ include/linux/nfsd/state.h | 1 + include/linux/sunrpc/svcauth_gss.h | 1 + net/sunrpc/auth_gss/svcauth_gss.c | 23 +++++++++++++++++++++++ 4 files changed, 36 insertions(+) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 1a052ac2bde..f3b9a8d064f 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -54,6 +54,7 @@ #include #include #include +#include #define NFSDDBG_FACILITY NFSDDBG_PROC @@ -377,6 +378,7 @@ free_client(struct nfs4_client *clp) shutdown_callback_client(clp); if (clp->cl_cred.cr_group_info) put_group_info(clp->cl_cred.cr_group_info); + kfree(clp->cl_principal); kfree(clp->cl_name.data); kfree(clp); } @@ -696,6 +698,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, unsigned int strhashval; struct nfs4_client *conf, *unconf, *new; __be32 status; + char *princ; char dname[HEXDIR_LEN]; if (!check_name(clname)) @@ -783,6 +786,14 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, } copy_verf(new, &clverifier); new->cl_addr = sin->sin_addr.s_addr; + princ = svc_gss_principal(rqstp); + if (princ) { + new->cl_principal = kstrdup(princ, GFP_KERNEL); + if (new->cl_principal == NULL) { + free_client(new); + goto out; + } + } copy_cred(&new->cl_cred, &rqstp->rq_cred); gen_confirm(new); gen_callback(new, setclid); diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index d0fe2e37845..ce7cbf4b7c9 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -124,6 +124,7 @@ struct nfs4_client { nfs4_verifier cl_verifier; /* generated by client */ time_t cl_time; /* time of last lease renewal */ __be32 cl_addr; /* client ipaddress */ + char *cl_principal; /* setclientid principal name */ struct svc_cred cl_cred; /* setclientid principal */ clientid_t cl_clientid; /* generated by server */ nfs4_verifier cl_confirm; /* generated by server */ diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h index c9165d9771a..ca7d725861f 100644 --- a/include/linux/sunrpc/svcauth_gss.h +++ b/include/linux/sunrpc/svcauth_gss.h @@ -20,6 +20,7 @@ int gss_svc_init(void); void gss_svc_shutdown(void); int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); u32 svcauth_gss_flavor(struct auth_domain *dom); +char *svc_gss_principal(struct svc_rqst *); #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 12803da95dc..e9baa6ebb1d 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -332,6 +332,7 @@ struct rsc { struct svc_cred cred; struct gss_svc_seq_data seqdata; struct gss_ctx *mechctx; + char *client_name; }; static struct cache_head *rsc_table[RSC_HASHMAX]; @@ -346,6 +347,7 @@ static void rsc_free(struct rsc *rsci) gss_delete_sec_context(&rsci->mechctx); if (rsci->cred.cr_group_info) put_group_info(rsci->cred.cr_group_info); + kfree(rsci->client_name); } static void rsc_put(struct kref *ref) @@ -383,6 +385,7 @@ rsc_init(struct cache_head *cnew, struct cache_head *ctmp) tmp->handle.data = NULL; new->mechctx = NULL; new->cred.cr_group_info = NULL; + new->client_name = NULL; } static void @@ -397,6 +400,8 @@ update_rsc(struct cache_head *cnew, struct cache_head *ctmp) spin_lock_init(&new->seqdata.sd_lock); new->cred = tmp->cred; tmp->cred.cr_group_info = NULL; + new->client_name = tmp->client_name; + tmp->client_name = NULL; } static struct cache_head * @@ -486,6 +491,15 @@ static int rsc_parse(struct cache_detail *cd, status = gss_import_sec_context(buf, len, gm, &rsci.mechctx); if (status) goto out; + + /* get client name */ + len = qword_get(&mesg, buf, mlen); + if (len > 0) { + rsci.client_name = kstrdup(buf, GFP_KERNEL); + if (!rsci.client_name) + goto out; + } + } rsci.h.expiry_time = expiry; rscp = rsc_update(&rsci, rscp); @@ -913,6 +927,15 @@ struct gss_svc_data { struct rsc *rsci; }; +char *svc_gss_principal(struct svc_rqst *rqstp) +{ + struct gss_svc_data *gd = (struct gss_svc_data *)rqstp->rq_auth_data; + + if (gd && gd->rsci) + return gd->rsci->client_name; + return NULL; +} + static int svcauth_gss_set_client(struct svc_rqst *rqstp) { -- cgit v1.2.3-70-g09d2 From 608207e8884e083ad8b8d33eda868da70f0d63e8 Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Tue, 23 Dec 2008 16:17:40 -0500 Subject: rpc: pass target name down to rpc level on callbacks The rpc client needs to know the principal that the setclientid was done as, so it can tell gssd who to authenticate to. Signed-off-by: Olga Kornievskaia Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfsd/nfs4callback.c | 6 ++++++ include/linux/sunrpc/clnt.h | 2 ++ net/sunrpc/clnt.c | 16 ++++++++++++++++ 3 files changed, 24 insertions(+) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 094747a1227..3ca14178214 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -384,6 +384,7 @@ static int do_probe_callback(void *data) .version = nfs_cb_version[1]->number, .authflavor = RPC_AUTH_UNIX, /* XXX: need AUTH_GSS... */ .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET), + .client_name = clp->cl_principal, }; struct rpc_message msg = { .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL], @@ -392,6 +393,11 @@ static int do_probe_callback(void *data) struct rpc_clnt *client; int status; + if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5)) { + status = nfserr_cb_path_down; + goto out_err; + } + /* Initialize address */ memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 6f0ee1b84a4..c39a21040dc 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -58,6 +58,7 @@ struct rpc_clnt { struct rpc_timeout cl_timeout_default; struct rpc_program * cl_program; char cl_inline_name[32]; + char *cl_principal; /* target to authenticate to */ }; /* @@ -108,6 +109,7 @@ struct rpc_create_args { u32 version; rpc_authflavor_t authflavor; unsigned long flags; + char *client_name; }; /* Values for "flags" field */ diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 4895c341e46..347f2a25abb 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -197,6 +197,12 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru clnt->cl_rtt = &clnt->cl_rtt_default; rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); + clnt->cl_principal = NULL; + if (args->client_name) { + clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL); + if (!clnt->cl_principal) + goto out_no_principal; + } kref_init(&clnt->cl_kref); @@ -226,6 +232,8 @@ out_no_auth: rpc_put_mount(); } out_no_path: + kfree(clnt->cl_principal); +out_no_principal: rpc_free_iostats(clnt->cl_metrics); out_no_stats: if (clnt->cl_server != clnt->cl_inline_name) @@ -354,6 +362,11 @@ rpc_clone_client(struct rpc_clnt *clnt) new->cl_metrics = rpc_alloc_iostats(clnt); if (new->cl_metrics == NULL) goto out_no_stats; + if (clnt->cl_principal) { + new->cl_principal = kstrdup(clnt->cl_principal, GFP_KERNEL); + if (new->cl_principal == NULL) + goto out_no_principal; + } kref_init(&new->cl_kref); err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); if (err != 0) @@ -366,6 +379,8 @@ rpc_clone_client(struct rpc_clnt *clnt) rpciod_up(); return new; out_no_path: + kfree(new->cl_principal); +out_no_principal: rpc_free_iostats(new->cl_metrics); out_no_stats: kfree(new); @@ -417,6 +432,7 @@ rpc_free_client(struct kref *kref) out_free: rpc_unregister_client(clnt); rpc_free_iostats(clnt->cl_metrics); + kfree(clnt->cl_principal); clnt->cl_metrics = NULL; xprt_put(clnt->cl_xprt); rpciod_down(); -- cgit v1.2.3-70-g09d2 From 945b34a7725a5f0741de7775132aafc58bfecfbb Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Tue, 23 Dec 2008 16:18:34 -0500 Subject: rpc: allow gss callbacks to client This patch adds client-side support to allow for callbacks other than AUTH_SYS. Signed-off-by: Olga Kornievskaia Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/callback.c | 36 +++++++++++++++++++++++++++++++----- net/sunrpc/auth_gss/svcauth_gss.c | 1 + 2 files changed, 32 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index c2e9cfd9e5a..3e634f2a108 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -16,6 +16,7 @@ #include #include #include +#include #include @@ -182,10 +183,34 @@ void nfs_callback_down(void) mutex_unlock(&nfs_callback_mutex); } +static int check_gss_callback_principal(struct nfs_client *clp, + struct svc_rqst *rqstp) +{ + struct rpc_clnt *r = clp->cl_rpcclient; + char *p = svc_gss_principal(rqstp); + + /* + * It might just be a normal user principal, in which case + * userspace won't bother to tell us the name at all. + */ + if (p == NULL) + return SVC_DENIED; + + /* Expect a GSS_C_NT_HOSTBASED_NAME like "nfs@serverhostname" */ + + if (memcmp(p, "nfs@", 4) != 0) + return SVC_DENIED; + p += 4; + if (strcmp(p, r->cl_server) != 0) + return SVC_DENIED; + return SVC_OK; +} + static int nfs_callback_authenticate(struct svc_rqst *rqstp) { struct nfs_client *clp; RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); + int ret = SVC_OK; /* Don't talk to strangers */ clp = nfs_find_client(svc_addr(rqstp), 4); @@ -194,21 +219,22 @@ static int nfs_callback_authenticate(struct svc_rqst *rqstp) dprintk("%s: %s NFSv4 callback!\n", __func__, svc_print_addr(rqstp, buf, sizeof(buf))); - nfs_put_client(clp); switch (rqstp->rq_authop->flavour) { case RPC_AUTH_NULL: if (rqstp->rq_proc != CB_NULL) - return SVC_DENIED; + ret = SVC_DENIED; break; case RPC_AUTH_UNIX: break; case RPC_AUTH_GSS: - /* FIXME: RPCSEC_GSS handling? */ + ret = check_gss_callback_principal(clp, rqstp); + break; default: - return SVC_DENIED; + ret = SVC_DENIED; } - return SVC_OK; + nfs_put_client(clp); + return ret; } /* diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index e9baa6ebb1d..2278a50c644 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -935,6 +935,7 @@ char *svc_gss_principal(struct svc_rqst *rqstp) return gd->rsci->client_name; return NULL; } +EXPORT_SYMBOL_GPL(svc_gss_principal); static int svcauth_gss_set_client(struct svc_rqst *rqstp) -- cgit v1.2.3-70-g09d2 From 61054b14d545e257b9415d5ca0cd5f43762b4d0c Mon Sep 17 00:00:00 2001 From: Olga Kornievskaia Date: Tue, 23 Dec 2008 16:19:00 -0500 Subject: nfsd: support callbacks with gss flavors This patch adds server-side support for callbacks other than AUTH_SYS. Signed-off-by: Olga Kornievskaia Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfsd/nfs4callback.c | 3 ++- fs/nfsd/nfs4state.c | 1 + include/linux/nfsd/state.h | 1 + net/sunrpc/rpc_pipe.c | 5 +++++ 4 files changed, 9 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 3ca14178214..6d7d8c02c19 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -358,6 +358,7 @@ static struct rpc_program cb_program = { .nrvers = ARRAY_SIZE(nfs_cb_version), .version = nfs_cb_version, .stats = &cb_stats, + .pipe_dir_name = "/nfsd4_cb", }; /* Reference counting, callback cleanup, etc., all look racy as heck. @@ -382,7 +383,7 @@ static int do_probe_callback(void *data) .program = &cb_program, .prognumber = cb->cb_prog, .version = nfs_cb_version[1]->number, - .authflavor = RPC_AUTH_UNIX, /* XXX: need AUTH_GSS... */ + .authflavor = clp->cl_flavor, .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET), .client_name = clp->cl_principal, }; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f3b9a8d064f..07db31568ac 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -786,6 +786,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, } copy_verf(new, &clverifier); new->cl_addr = sin->sin_addr.s_addr; + new->cl_flavor = rqstp->rq_flavor; princ = svc_gss_principal(rqstp); if (princ) { new->cl_principal = kstrdup(princ, GFP_KERNEL); diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index ce7cbf4b7c9..128298c0362 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h @@ -124,6 +124,7 @@ struct nfs4_client { nfs4_verifier cl_verifier; /* generated by client */ time_t cl_time; /* time of last lease renewal */ __be32 cl_addr; /* client ipaddress */ + u32 cl_flavor; /* setclientid pseudoflavor */ char *cl_principal; /* setclientid principal name */ struct svc_cred cl_cred; /* setclientid principal */ clientid_t cl_clientid; /* generated by server */ diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 3105efbb182..19245324887 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -407,6 +407,7 @@ enum { RPCAUTH_nfs, RPCAUTH_portmap, RPCAUTH_statd, + RPCAUTH_nfsd4_cb, RPCAUTH_RootEOF }; @@ -440,6 +441,10 @@ static struct rpc_filelist files[] = { .name = "statd", .mode = S_IFDIR | S_IRUGO | S_IXUGO, }, + [RPCAUTH_nfsd4_cb] = { + .name = "nfsd4_cb", + .mode = S_IFDIR | S_IRUGO | S_IXUGO, + }, }; enum { -- cgit v1.2.3-70-g09d2 From 25051158bbed127e8672b43396c71c5eb610e5f1 Mon Sep 17 00:00:00 2001 From: Lachlan McIlroy Date: Wed, 24 Dec 2008 14:07:32 +1100 Subject: [XFS] Fix race in xfs_write() between direct and buffered I/O with DMAPI The iolock is dropped and re-acquired around the call to XFS_SEND_NAMESP(). While the iolock is released the file can become cached. We then 'goto retry' and - if we are doing direct I/O - mapping->nrpages may now be non zero but need_i_mutex will be zero and we will hit the WARN_ON(). Since we have dropped the I/O lock then the file size may have also changed so what we need to do here is 'goto start' like we do for the XFS_SEND_DATA() DMAPI event. We also need to update the filesize before releasing the iolock so that needs to be done before the XFS_SEND_NAMESP event. If we drop the iolock before setting the filesize we could race with a truncate. Reviewed-by: Christoph Hellwig Signed-off-by: Lachlan McIlroy --- fs/xfs/linux-2.6/xfs_lrw.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 92ce34b3787..7e90daa0d1d 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -707,7 +707,6 @@ start: } } -retry: /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; @@ -763,6 +762,17 @@ retry: if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO)) ret = wait_on_sync_kiocb(iocb); + isize = i_size_read(inode); + if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize)) + *offset = isize; + + if (*offset > xip->i_size) { + xfs_ilock(xip, XFS_ILOCK_EXCL); + if (*offset > xip->i_size) + xip->i_size = *offset; + xfs_iunlock(xip, XFS_ILOCK_EXCL); + } + if (ret == -ENOSPC && DM_EVENT_ENABLED(xip, DM_EVENT_NOSPACE) && !(ioflags & IO_INVIS)) { xfs_iunlock(xip, iolock); @@ -776,20 +786,7 @@ retry: xfs_ilock(xip, iolock); if (error) goto out_unlock_internal; - pos = xip->i_size; - ret = 0; - goto retry; - } - - isize = i_size_read(inode); - if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize)) - *offset = isize; - - if (*offset > xip->i_size) { - xfs_ilock(xip, XFS_ILOCK_EXCL); - if (*offset > xip->i_size) - xip->i_size = *offset; - xfs_iunlock(xip, XFS_ILOCK_EXCL); + goto start; } error = -ret; -- cgit v1.2.3-70-g09d2 From fc5243d98ac2575ad14a974b3c097e9ba874c03d Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 25 Dec 2008 13:38:35 +0100 Subject: [S390] arch_setup_additional_pages arguments arch_setup_additional_pages currently gets two arguments, the binary format descripton and an indication if the process uses an executable stack or not. The second argument is not used by anybody, it could be removed without replacement. What actually does make sense is to pass an indication if the process uses the elf interpreter or not. The glibc code will not use anything from the vdso if the process does not use the dynamic linker, so for statically linked binaries the architecture backend can choose not to map the vdso. Acked-by: Ingo Molnar Signed-off-by: Martin Schwidefsky --- arch/powerpc/include/asm/elf.h | 2 +- arch/powerpc/kernel/vdso.c | 3 +-- arch/sh/include/asm/elf.h | 2 +- arch/sh/kernel/vsyscall/vsyscall.c | 3 +-- arch/x86/include/asm/elf.h | 2 +- arch/x86/vdso/vdso32-setup.c | 2 +- arch/x86/vdso/vma.c | 2 +- fs/binfmt_elf.c | 2 +- 8 files changed, 8 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index d812929390e..cd46f023ec6 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -267,7 +267,7 @@ extern int ucache_bsize; #define ARCH_HAS_SETUP_ADDITIONAL_PAGES struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack); + int uses_interp); #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); #endif /* __KERNEL__ */ diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 65639a43e64..f7ec7d0888f 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -184,8 +184,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) * This is called from binfmt_elf, we create the special vma for the * vDSO and insert it into the mm struct tree */ -int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack) +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; struct page **vdso_pagelist; diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index 9eb9036a1bd..9381397ebeb 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h @@ -204,7 +204,7 @@ do { \ #define ARCH_HAS_SETUP_ADDITIONAL_PAGES struct linux_binprm; extern int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack); + int uses_interp); extern unsigned int vdso_enabled; extern void __kernel_vsyscall; diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c index 95f4de0800e..3f7e415be86 100644 --- a/arch/sh/kernel/vsyscall/vsyscall.c +++ b/arch/sh/kernel/vsyscall/vsyscall.c @@ -59,8 +59,7 @@ int __init vsyscall_init(void) } /* Setup a VMA at program startup for the vsyscall page */ -int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack) +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 40ca1bea791..f51a3ddde01 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -325,7 +325,7 @@ struct linux_binprm; #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 extern int arch_setup_additional_pages(struct linux_binprm *bprm, - int executable_stack); + int uses_interp); extern int syscall32_setup_pages(struct linux_binprm *, int exstack); #define compat_arch_setup_additional_pages syscall32_setup_pages diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 513f330c583..1241f118ab5 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -310,7 +310,7 @@ int __init sysenter_setup(void) } /* Setup a VMA at program startup for the vsyscall page */ -int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 257ba4a10ab..9c98cc6ba97 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -98,7 +98,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) /* Setup a VMA at program startup for the vsyscall page. Not called for compat tasks */ -int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; unsigned long addr; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 8fcfa398d35..95a76ff9e01 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -949,7 +949,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) set_binfmt(&elf_format); #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES - retval = arch_setup_additional_pages(bprm, executable_stack); + retval = arch_setup_additional_pages(bprm, !!elf_interpreter); if (retval < 0) { send_sig(SIGKILL, current, 0); goto out; -- cgit v1.2.3-70-g09d2 From 24b9b06ba7ea53aa0c4d0b1c8c1e93aa1bd9fe72 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 1 Dec 2008 07:09:34 -0500 Subject: cifs: remove unused SMB session pointer from struct mid_q_entry Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsglob.h | 1 - fs/cifs/transport.c | 13 ++++--------- 2 files changed, 4 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index c57c0565547..82e28f5515c 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -422,7 +422,6 @@ struct mid_q_entry { unsigned long when_sent; /* time when smb send finished */ unsigned long when_received; /* when demux complete (taken off wire) */ #endif - struct cifsSesInfo *ses; /* smb was sent to this server */ struct task_struct *tsk; /* task waiting for response */ struct smb_hdr *resp_buf; /* response buffer */ int midState; /* wish this were enum but can not pass to wait_event */ diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index ff8243a8fe3..8e0d1c397c7 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -37,15 +37,11 @@ extern mempool_t *cifs_mid_poolp; extern struct kmem_cache *cifs_oplock_cachep; static struct mid_q_entry * -AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses) +AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) { struct mid_q_entry *temp; - if (ses == NULL) { - cERROR(1, ("Null session passed in to AllocMidQEntry")); - return NULL; - } - if (ses->server == NULL) { + if (server == NULL) { cERROR(1, ("Null TCP session in AllocMidQEntry")); return NULL; } @@ -62,12 +58,11 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses) /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */ /* when mid allocated can be before when sent */ temp->when_alloc = jiffies; - temp->ses = ses; temp->tsk = current; } spin_lock(&GlobalMid_Lock); - list_add_tail(&temp->qhead, &ses->server->pending_mid_q); + list_add_tail(&temp->qhead, &server->pending_mid_q); atomic_inc(&midCount); temp->midState = MID_REQUEST_ALLOCATED; spin_unlock(&GlobalMid_Lock); @@ -400,7 +395,7 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf, return -EAGAIN; /* else ok - we are setting up session */ } - *ppmidQ = AllocMidQEntry(in_buf, ses); + *ppmidQ = AllocMidQEntry(in_buf, ses->server); if (*ppmidQ == NULL) return -ENOMEM; return 0; -- cgit v1.2.3-70-g09d2 From 80909022ce966191e6140fcc15d8aff57a7df32e Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 1 Dec 2008 07:09:35 -0500 Subject: cifs: display addr and prefixpath options in /proc/mounts Have cifs_show_options display the addr and prefixpath options in /proc/mounts. Reduce struct dereferencing by adding some local variables. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 44 ++++++++++++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index d9cf467309e..7f87066ce29 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -337,39 +337,59 @@ static int cifs_show_options(struct seq_file *s, struct vfsmount *m) { struct cifs_sb_info *cifs_sb; + struct cifsTconInfo *tcon; + struct TCP_Server_Info *server; cifs_sb = CIFS_SB(m->mnt_sb); if (cifs_sb) { - if (cifs_sb->tcon) { + tcon = cifs_sb->tcon; + if (tcon) { /* BB add prepath to mount options displayed */ seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName); - if (cifs_sb->tcon->ses) { - if (cifs_sb->tcon->ses->userName) + if (tcon->ses) { + if (tcon->ses->userName) seq_printf(s, ",username=%s", - cifs_sb->tcon->ses->userName); - if (cifs_sb->tcon->ses->domainName) + tcon->ses->userName); + if (tcon->ses->domainName) seq_printf(s, ",domain=%s", - cifs_sb->tcon->ses->domainName); + tcon->ses->domainName); + server = tcon->ses->server; + if (server) { + seq_printf(s, ",addr="); + switch (server->addr.sockAddr6. + sin6_family) { + case AF_INET6: + seq_printf(s, NIP6_FMT, + NIP6(server->addr.sockAddr6.sin6_addr)); + break; + case AF_INET: + seq_printf(s, NIPQUAD_FMT, + NIPQUAD(server->addr.sockAddr.sin_addr.s_addr)); + break; + } + } } if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) || - !(cifs_sb->tcon->unix_ext)) + !(tcon->unix_ext)) seq_printf(s, ",uid=%d", cifs_sb->mnt_uid); if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) || - !(cifs_sb->tcon->unix_ext)) + !(tcon->unix_ext)) seq_printf(s, ",gid=%d", cifs_sb->mnt_gid); - if (!cifs_sb->tcon->unix_ext) { + if (!tcon->unix_ext) { seq_printf(s, ",file_mode=0%o,dir_mode=0%o", cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode); } - if (cifs_sb->tcon->seal) + if (tcon->seal) seq_printf(s, ",seal"); - if (cifs_sb->tcon->nocase) + if (tcon->nocase) seq_printf(s, ",nocase"); - if (cifs_sb->tcon->retry) + if (tcon->retry) seq_printf(s, ",hard"); } + if (cifs_sb->prepath) + seq_printf(s, ",prepath=%s", cifs_sb->prepath); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) seq_printf(s, ",posixpaths"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) -- cgit v1.2.3-70-g09d2 From 0468a2cf914e79442b8309ce62e3f861599d8cab Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 1 Dec 2008 07:09:35 -0500 Subject: cifs: take module reference when starting cifsd cifsd can outlive the last cifs mount. We need to hold a module reference until it exits to prevent someone from unplugging the module until we're ready. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index c7d34171458..701daf411a4 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -776,7 +776,7 @@ multi_t2_fnd: set_current_state(TASK_RUNNING); } - return 0; + module_put_and_exit(0); } /* extract the host portion of the UNC string */ @@ -2176,10 +2176,17 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, so no need to spinlock this init of tcpStatus */ srvTcp->tcpStatus = CifsNew; init_MUTEX(&srvTcp->tcpSem); + + /* + * since we're in a cifs function already, we know that + * this will succeed. No need for try_module_get(). + */ + __module_get(THIS_MODULE); srvTcp->tsk = kthread_run((void *)(void *)cifs_demultiplex_thread, srvTcp, "cifsd"); if (IS_ERR(srvTcp->tsk)) { rc = PTR_ERR(srvTcp->tsk); cERROR(1, ("error %d create cifsd thread", rc)); + module_put(THIS_MODULE); srvTcp->tsk = NULL; sock_release(csocket); kfree(srvTcp->hostname); -- cgit v1.2.3-70-g09d2 From 72ca545b2d83ac7de671bf66d2dbc214528b4c0c Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 1 Dec 2008 07:09:36 -0500 Subject: cifs: convert tcpSem to a mutex Mutexes are preferred for single-holder semaphores... Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsglob.h | 2 +- fs/cifs/connect.c | 6 +++--- fs/cifs/transport.c | 28 ++++++++++++++-------------- 3 files changed, 18 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 82e28f5515c..9b002cca6bb 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -151,7 +151,7 @@ struct TCP_Server_Info { atomic_t num_waiters; /* blocked waiting to get in sendrecv */ #endif enum statusEnum tcpStatus; /* what we think the status is */ - struct semaphore tcpSem; + struct mutex srv_mutex; struct task_struct *tsk; char server_GUID[16]; char secMode; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 701daf411a4..61b68a0ee2f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -156,7 +156,7 @@ cifs_reconnect(struct TCP_Server_Info *server) } read_unlock(&cifs_tcp_ses_lock); /* do not want to be sending data on a socket we are freeing */ - down(&server->tcpSem); + mutex_lock(&server->srv_mutex); if (server->ssocket) { cFYI(1, ("State: 0x%x Flags: 0x%lx", server->ssocket->state, server->ssocket->flags)); @@ -182,7 +182,7 @@ cifs_reconnect(struct TCP_Server_Info *server) } } spin_unlock(&GlobalMid_Lock); - up(&server->tcpSem); + mutex_unlock(&server->srv_mutex); while ((server->tcpStatus != CifsExiting) && (server->tcpStatus != CifsGood)) { @@ -2175,7 +2175,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, to the struct since the kernel thread not created yet so no need to spinlock this init of tcpStatus */ srvTcp->tcpStatus = CifsNew; - init_MUTEX(&srvTcp->tcpSem); + mutex_init(&srvTcp->srv_mutex); /* * since we're in a cifs function already, we know that diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 8e0d1c397c7..cd4ed65d6cd 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -516,11 +516,11 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, and avoid races inside tcp sendmsg code that could cause corruption of smb data */ - down(&ses->server->tcpSem); + mutex_lock(&ses->server->srv_mutex); rc = allocate_mid(ses, in_buf, &midQ); if (rc) { - up(&ses->server->tcpSem); + mutex_unlock(&ses->server->srv_mutex); cifs_small_buf_release(in_buf); /* Update # of requests on wire to server */ atomic_dec(&ses->server->inFlight); @@ -541,7 +541,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, midQ->when_sent = jiffies; #endif - up(&ses->server->tcpSem); + mutex_unlock(&ses->server->srv_mutex); cifs_small_buf_release(in_buf); if (rc < 0) @@ -698,11 +698,11 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, and avoid races inside tcp sendmsg code that could cause corruption of smb data */ - down(&ses->server->tcpSem); + mutex_lock(&ses->server->srv_mutex); rc = allocate_mid(ses, in_buf, &midQ); if (rc) { - up(&ses->server->tcpSem); + mutex_unlock(&ses->server->srv_mutex); /* Update # of requests on wire to server */ atomic_dec(&ses->server->inFlight); wake_up(&ses->server->request_q); @@ -713,7 +713,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, cERROR(1, ("Illegal length, greater than maximum frame, %d", in_buf->smb_buf_length)); DeleteMidQEntry(midQ); - up(&ses->server->tcpSem); + mutex_unlock(&ses->server->srv_mutex); /* Update # of requests on wire to server */ atomic_dec(&ses->server->inFlight); wake_up(&ses->server->request_q); @@ -733,7 +733,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, atomic_dec(&ses->server->inSend); midQ->when_sent = jiffies; #endif - up(&ses->server->tcpSem); + mutex_unlock(&ses->server->srv_mutex); if (rc < 0) goto out; @@ -861,16 +861,16 @@ send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf, header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0); in_buf->Mid = mid; - down(&ses->server->tcpSem); + mutex_lock(&ses->server->srv_mutex); rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); if (rc) { - up(&ses->server->tcpSem); + mutex_unlock(&ses->server->srv_mutex); return rc; } rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length, (struct sockaddr *) &(ses->server->addr.sockAddr), ses->server->noblocksnd); - up(&ses->server->tcpSem); + mutex_unlock(&ses->server->srv_mutex); return rc; } @@ -936,16 +936,16 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, and avoid races inside tcp sendmsg code that could cause corruption of smb data */ - down(&ses->server->tcpSem); + mutex_lock(&ses->server->srv_mutex); rc = allocate_mid(ses, in_buf, &midQ); if (rc) { - up(&ses->server->tcpSem); + mutex_unlock(&ses->server->srv_mutex); return rc; } if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { - up(&ses->server->tcpSem); + mutex_unlock(&ses->server->srv_mutex); cERROR(1, ("Illegal length, greater than maximum frame, %d", in_buf->smb_buf_length)); DeleteMidQEntry(midQ); @@ -965,7 +965,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, atomic_dec(&ses->server->inSend); midQ->when_sent = jiffies; #endif - up(&ses->server->tcpSem); + mutex_unlock(&ses->server->srv_mutex); if (rc < 0) { DeleteMidQEntry(midQ); -- cgit v1.2.3-70-g09d2 From 954d7a1cf12158fed23dd8b0f3f563d5a5c97f28 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 1 Dec 2008 15:23:50 -0500 Subject: cifs: make dnotify thread experimental code Now that tasks sleeping in wait_for_response will time out on their own, we're not reliant on the dnotify thread to do this. Mark it as experimental code for now. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 7f87066ce29..061a1dca987 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -66,7 +66,9 @@ unsigned int sign_CIFS_PDUs = 1; extern struct task_struct *oplockThread; /* remove sparse warning */ struct task_struct *oplockThread = NULL; /* extern struct task_struct * dnotifyThread; remove sparse warning */ +#ifdef CONFIG_CIFS_EXPERIMENTAL static struct task_struct *dnotifyThread = NULL; +#endif static const struct super_operations cifs_super_ops; unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; module_param(CIFSMaxBufSize, int, 0); @@ -1049,6 +1051,7 @@ static int cifs_oplock_thread(void *dummyarg) return 0; } +#ifdef CONFIG_CIFS_EXPERIMENTAL static int cifs_dnotify_thread(void *dummyarg) { struct list_head *tmp; @@ -1074,6 +1077,7 @@ static int cifs_dnotify_thread(void *dummyarg) return 0; } +#endif static int __init init_cifs(void) @@ -1151,16 +1155,20 @@ init_cifs(void) goto out_unregister_dfs_key_type; } +#ifdef CONFIG_CIFS_EXPERIMENTAL dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd"); if (IS_ERR(dnotifyThread)) { rc = PTR_ERR(dnotifyThread); cERROR(1, ("error %d create dnotify thread", rc)); goto out_stop_oplock_thread; } +#endif return 0; +#ifdef CONFIG_CIFS_EXPERIMENTAL out_stop_oplock_thread: +#endif kthread_stop(oplockThread); out_unregister_dfs_key_type: #ifdef CONFIG_CIFS_DFS_UPCALL @@ -1199,8 +1207,10 @@ exit_cifs(void) cifs_destroy_inodecache(); cifs_destroy_mids(); cifs_destroy_request_bufs(); - kthread_stop(oplockThread); +#ifdef CONFIG_CIFS_EXPERIMENTAL kthread_stop(dnotifyThread); +#endif + kthread_stop(oplockThread); } MODULE_AUTHOR("Steve French "); -- cgit v1.2.3-70-g09d2 From 8ecaf67a8ea58c8f131ff045475c74e9538d6b7a Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 1 Dec 2008 15:23:50 -0500 Subject: cifs: account for IPv6 in ses->serverName and clean up netbios name handling The current code for setting the session serverName is IPv4-specific. Allow it to be an IPv6 address as well. Use NIP* macros to set the format. This also entails increasing the length of the serverName field, so declare a new macro for RFC1001 name length and use it in the appropriate places. Finally, drop the unicode_server_Name field from TCP_Server_Info since it's not used. We can add it back later if needed, but for now it just wastes memory. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsglob.h | 11 +++++++---- fs/cifs/connect.c | 38 ++++++++++++++++++++++++++------------ 2 files changed, 33 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 9b002cca6bb..0fb934d3623 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -47,7 +47,11 @@ */ #define CIFS_MAX_REQ 50 -#define SERVER_NAME_LENGTH 15 +#define RFC1001_NAME_LEN 15 +#define RFC1001_NAME_LEN_WITH_NULL (RFC1001_NAME_LEN + 1) + +/* currently length of NIP6_FMT */ +#define SERVER_NAME_LENGTH 40 #define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1) /* used to define string lengths for reversing unicode strings */ @@ -125,8 +129,7 @@ struct TCP_Server_Info { struct list_head smb_ses_list; int srv_count; /* reference counter */ /* 15 character server name + 0x20 16th byte indicating type = srv */ - char server_RFC1001_name[SERVER_NAME_LEN_WITH_NULL]; - char unicode_server_Name[SERVER_NAME_LEN_WITH_NULL * 2]; + char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; char *hostname; /* hostname portion of UNC string */ struct socket *ssocket; union { @@ -171,7 +174,7 @@ struct TCP_Server_Info { __u16 CurrentMid; /* multiplex id - rotating counter */ char cryptKey[CIFS_CRYPTO_KEY_SIZE]; /* 16th byte of RFC1001 workstation name is always null */ - char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL]; + char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; __u32 sequence_number; /* needed for CIFS PDU signature */ struct mac_key mac_signing_key; char ntlmv2_hash[16]; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 61b68a0ee2f..65c12194006 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1690,22 +1690,30 @@ ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket, if (ses_init_buf) { ses_init_buf->trailer.session_req.called_len = 32; if (target_name && (target_name[0] != 0)) { - rfc1002mangle(ses_init_buf->trailer.session_req.called_name, - target_name, 16); + rfc1002mangle(ses_init_buf->trailer. + session_req.called_name, + target_name, + RFC1001_NAME_LEN_WITH_NULL); } else { - rfc1002mangle(ses_init_buf->trailer.session_req.called_name, - DEFAULT_CIFS_CALLED_NAME, 16); + rfc1002mangle(ses_init_buf->trailer. + session_req.called_name, + DEFAULT_CIFS_CALLED_NAME, + RFC1001_NAME_LEN_WITH_NULL); } ses_init_buf->trailer.session_req.calling_len = 32; /* calling name ends in null (byte 16) from old smb convention. */ if (netbios_name && (netbios_name[0] != 0)) { - rfc1002mangle(ses_init_buf->trailer.session_req.calling_name, - netbios_name, 16); + rfc1002mangle(ses_init_buf->trailer. + session_req.calling_name, + netbios_name, + RFC1001_NAME_LEN_WITH_NULL); } else { - rfc1002mangle(ses_init_buf->trailer.session_req.calling_name, - "LINUX_CIFS_CLNT", 16); + rfc1002mangle(ses_init_buf->trailer. + session_req.calling_name, + "LINUX_CIFS_CLNT", + RFC1001_NAME_LEN_WITH_NULL); } ses_init_buf->trailer.session_req.scope1 = 0; ses_init_buf->trailer.session_req.scope2 = 0; @@ -2194,9 +2202,11 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, } rc = 0; memcpy(srvTcp->workstation_RFC1001_name, - volume_info.source_rfc1001_name, 16); + volume_info.source_rfc1001_name, + RFC1001_NAME_LEN_WITH_NULL); memcpy(srvTcp->server_RFC1001_name, - volume_info.target_rfc1001_name, 16); + volume_info.target_rfc1001_name, + RFC1001_NAME_LEN_WITH_NULL); srvTcp->sequence_number = 0; INIT_LIST_HEAD(&srvTcp->tcp_ses_list); INIT_LIST_HEAD(&srvTcp->smb_ses_list); @@ -2235,8 +2245,12 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, /* new SMB session uses our srvTcp ref */ pSesInfo->server = srvTcp; - sprintf(pSesInfo->serverName, "%u.%u.%u.%u", - NIPQUAD(sin_server->sin_addr.s_addr)); + if (addr.sa_family == AF_INET6) + sprintf(pSesInfo->serverName, NIP6_FMT, + NIP6(sin_server6->sin6_addr)); + else + sprintf(pSesInfo->serverName, NIPQUAD_FMT, + NIPQUAD(sin_server->sin_addr.s_addr)); write_lock(&cifs_tcp_ses_lock); list_add(&pSesInfo->smb_ses_list, &srvTcp->smb_ses_list); -- cgit v1.2.3-70-g09d2 From 63c038c29774476c5dae759e348c269342b4dbef Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 1 Dec 2008 18:41:46 -0500 Subject: cifs: move allocation of new TCP_Server_Info into separate function Clean up cifs_mount a bit by moving the code that creates new TCP sessions into a separate function. Have that function search for an existing socket and then create a new one if one isn't found. Also reorganize the initializion of TCP_Server_Info a bit to prepare for cleanup of the socket connection code. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 282 +++++++++++++++++++++++++++++------------------------- 1 file changed, 150 insertions(+), 132 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 65c12194006..56095b24d5b 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1417,6 +1417,148 @@ cifs_put_tcp_session(struct TCP_Server_Info *server) force_sig(SIGKILL, task); } +static struct TCP_Server_Info * +cifs_get_tcp_session(struct smb_vol *volume_info) +{ + struct TCP_Server_Info *tcp_ses = NULL; + struct sockaddr addr; + struct sockaddr_in *sin_server = (struct sockaddr_in *) &addr; + struct sockaddr_in6 *sin_server6 = (struct sockaddr_in6 *) &addr; + int rc; + + memset(&addr, 0, sizeof(struct sockaddr)); + + if (volume_info->UNCip && volume_info->UNC) { + rc = cifs_inet_pton(AF_INET, volume_info->UNCip, + &sin_server->sin_addr.s_addr); + + if (rc <= 0) { + /* not ipv4 address, try ipv6 */ + rc = cifs_inet_pton(AF_INET6, volume_info->UNCip, + &sin_server6->sin6_addr.in6_u); + if (rc > 0) + addr.sa_family = AF_INET6; + } else { + addr.sa_family = AF_INET; + } + + if (rc <= 0) { + /* we failed translating address */ + rc = -EINVAL; + goto out_err; + } + + cFYI(1, ("UNC: %s ip: %s", volume_info->UNC, + volume_info->UNCip)); + } else if (volume_info->UNCip) { + /* BB using ip addr as tcp_ses name to connect to the + DFS root below */ + cERROR(1, ("Connecting to DFS root not implemented yet")); + rc = -EINVAL; + goto out_err; + } else /* which tcp_sess DFS root would we conect to */ { + cERROR(1, + ("CIFS mount error: No UNC path (e.g. -o " + "unc=//192.168.1.100/public) specified")); + rc = -EINVAL; + goto out_err; + } + + /* see if we already have a matching tcp_ses */ + tcp_ses = cifs_find_tcp_session(&addr); + if (tcp_ses) + return tcp_ses; + + tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL); + if (!tcp_ses) { + rc = -ENOMEM; + goto out_err; + } + + tcp_ses->hostname = extract_hostname(volume_info->UNC); + if (IS_ERR(tcp_ses->hostname)) { + rc = PTR_ERR(tcp_ses->hostname); + goto out_err; + } + + tcp_ses->noblocksnd = volume_info->noblocksnd; + tcp_ses->noautotune = volume_info->noautotune; + atomic_set(&tcp_ses->inFlight, 0); + init_waitqueue_head(&tcp_ses->response_q); + init_waitqueue_head(&tcp_ses->request_q); + INIT_LIST_HEAD(&tcp_ses->pending_mid_q); + mutex_init(&tcp_ses->srv_mutex); + memcpy(tcp_ses->workstation_RFC1001_name, + volume_info->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); + memcpy(tcp_ses->server_RFC1001_name, + volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); + tcp_ses->sequence_number = 0; + INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); + INIT_LIST_HEAD(&tcp_ses->smb_ses_list); + + /* + * at this point we are the only ones with the pointer + * to the struct since the kernel thread not created yet + * no need to spinlock this init of tcpStatus or srv_count + */ + tcp_ses->tcpStatus = CifsNew; + ++tcp_ses->srv_count; + + if (addr.sa_family == AF_INET6) { + cFYI(1, ("attempting ipv6 connect")); + /* BB should we allow ipv6 on port 139? */ + /* other OS never observed in Wild doing 139 with v6 */ + memcpy(&tcp_ses->addr.sockAddr6, sin_server6, + sizeof(struct sockaddr_in6)); + sin_server6->sin6_port = htons(volume_info->port); + rc = ipv6_connect(sin_server6, &tcp_ses->ssocket, + volume_info->noblocksnd); + } else { + memcpy(&tcp_ses->addr.sockAddr, sin_server, + sizeof(struct sockaddr_in)); + sin_server->sin_port = htons(volume_info->port); + rc = ipv4_connect(sin_server, &tcp_ses->ssocket, + volume_info->source_rfc1001_name, + volume_info->target_rfc1001_name, + volume_info->noblocksnd, + volume_info->noautotune); + } + if (rc < 0) { + cERROR(1, ("Error connecting to socket. Aborting operation")); + goto out_err; + } + + /* + * since we're in a cifs function already, we know that + * this will succeed. No need for try_module_get(). + */ + __module_get(THIS_MODULE); + tcp_ses->tsk = kthread_run((void *)(void *)cifs_demultiplex_thread, + tcp_ses, "cifsd"); + if (IS_ERR(tcp_ses->tsk)) { + rc = PTR_ERR(tcp_ses->tsk); + cERROR(1, ("error %d create cifsd thread", rc)); + module_put(THIS_MODULE); + goto out_err; + } + + /* thread spawned, put it on the list */ + write_lock(&cifs_tcp_ses_lock); + list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list); + write_unlock(&cifs_tcp_ses_lock); + + return tcp_ses; + +out_err: + if (tcp_ses) { + kfree(tcp_ses->hostname); + if (tcp_ses->ssocket) + sock_release(tcp_ses->ssocket); + kfree(tcp_ses); + } + return ERR_PTR(rc); +} + static struct cifsSesInfo * cifs_find_smb_ses(struct TCP_Server_Info *server, char *username) { @@ -2043,10 +2185,6 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, { int rc = 0; int xid; - struct socket *csocket = NULL; - struct sockaddr addr; - struct sockaddr_in *sin_server = (struct sockaddr_in *) &addr; - struct sockaddr_in6 *sin_server6 = (struct sockaddr_in6 *) &addr; struct smb_vol volume_info; struct cifsSesInfo *pSesInfo = NULL; struct cifsTconInfo *tcon = NULL; @@ -2056,7 +2194,6 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, /* cFYI(1, ("Entering cifs_mount. Xid: %d with: %s", xid, mount_data)); */ - memset(&addr, 0, sizeof(struct sockaddr)); memset(&volume_info, 0, sizeof(struct smb_vol)); if (cifs_parse_mount_options(mount_data, devname, &volume_info)) { rc = -EINVAL; @@ -2077,42 +2214,6 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, goto out; } - if (volume_info.UNCip && volume_info.UNC) { - rc = cifs_inet_pton(AF_INET, volume_info.UNCip, - &sin_server->sin_addr.s_addr); - - if (rc <= 0) { - /* not ipv4 address, try ipv6 */ - rc = cifs_inet_pton(AF_INET6, volume_info.UNCip, - &sin_server6->sin6_addr.in6_u); - if (rc > 0) - addr.sa_family = AF_INET6; - } else { - addr.sa_family = AF_INET; - } - - if (rc <= 0) { - /* we failed translating address */ - rc = -EINVAL; - goto out; - } - - cFYI(1, ("UNC: %s ip: %s", volume_info.UNC, volume_info.UNCip)); - /* success */ - rc = 0; - } else if (volume_info.UNCip) { - /* BB using ip addr as server name to connect to the - DFS root below */ - cERROR(1, ("Connecting to DFS root not implemented yet")); - rc = -EINVAL; - goto out; - } else /* which servers DFS root would we conect to */ { - cERROR(1, - ("CIFS mount error: No UNC path (e.g. -o " - "unc=//192.168.1.100/public) specified")); - rc = -EINVAL; - goto out; - } /* this is needed for ASCII cp to Unicode converts */ if (volume_info.iocharset == NULL) { @@ -2128,94 +2229,11 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, } } - srvTcp = cifs_find_tcp_session(&addr); - if (!srvTcp) { /* create socket */ - if (addr.sa_family == AF_INET6) { - cFYI(1, ("attempting ipv6 connect")); - /* BB should we allow ipv6 on port 139? */ - /* other OS never observed in Wild doing 139 with v6 */ - sin_server6->sin6_port = htons(volume_info.port); - rc = ipv6_connect(sin_server6, &csocket, - volume_info.noblocksnd); - } else { - sin_server->sin_port = htons(volume_info.port); - rc = ipv4_connect(sin_server, &csocket, - volume_info.source_rfc1001_name, - volume_info.target_rfc1001_name, - volume_info.noblocksnd, - volume_info.noautotune); - } - if (rc < 0) { - cERROR(1, ("Error connecting to socket. " - "Aborting operation")); - if (csocket != NULL) - sock_release(csocket); - goto out; - } - - srvTcp = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL); - if (!srvTcp) { - rc = -ENOMEM; - sock_release(csocket); - goto out; - } else { - srvTcp->noblocksnd = volume_info.noblocksnd; - srvTcp->noautotune = volume_info.noautotune; - if (addr.sa_family == AF_INET6) - memcpy(&srvTcp->addr.sockAddr6, sin_server6, - sizeof(struct sockaddr_in6)); - else - memcpy(&srvTcp->addr.sockAddr, sin_server, - sizeof(struct sockaddr_in)); - atomic_set(&srvTcp->inFlight, 0); - /* BB Add code for ipv6 case too */ - srvTcp->ssocket = csocket; - srvTcp->hostname = extract_hostname(volume_info.UNC); - if (IS_ERR(srvTcp->hostname)) { - rc = PTR_ERR(srvTcp->hostname); - sock_release(csocket); - goto out; - } - init_waitqueue_head(&srvTcp->response_q); - init_waitqueue_head(&srvTcp->request_q); - INIT_LIST_HEAD(&srvTcp->pending_mid_q); - /* at this point we are the only ones with the pointer - to the struct since the kernel thread not created yet - so no need to spinlock this init of tcpStatus */ - srvTcp->tcpStatus = CifsNew; - mutex_init(&srvTcp->srv_mutex); - - /* - * since we're in a cifs function already, we know that - * this will succeed. No need for try_module_get(). - */ - __module_get(THIS_MODULE); - srvTcp->tsk = kthread_run((void *)(void *)cifs_demultiplex_thread, srvTcp, "cifsd"); - if (IS_ERR(srvTcp->tsk)) { - rc = PTR_ERR(srvTcp->tsk); - cERROR(1, ("error %d create cifsd thread", rc)); - module_put(THIS_MODULE); - srvTcp->tsk = NULL; - sock_release(csocket); - kfree(srvTcp->hostname); - goto out; - } - rc = 0; - memcpy(srvTcp->workstation_RFC1001_name, - volume_info.source_rfc1001_name, - RFC1001_NAME_LEN_WITH_NULL); - memcpy(srvTcp->server_RFC1001_name, - volume_info.target_rfc1001_name, - RFC1001_NAME_LEN_WITH_NULL); - srvTcp->sequence_number = 0; - INIT_LIST_HEAD(&srvTcp->tcp_ses_list); - INIT_LIST_HEAD(&srvTcp->smb_ses_list); - ++srvTcp->srv_count; - write_lock(&cifs_tcp_ses_lock); - list_add(&srvTcp->tcp_ses_list, - &cifs_tcp_ses_list); - write_unlock(&cifs_tcp_ses_lock); - } + /* get a reference to a tcp session */ + srvTcp = cifs_get_tcp_session(&volume_info); + if (IS_ERR(srvTcp)) { + rc = PTR_ERR(srvTcp); + goto out; } pSesInfo = cifs_find_smb_ses(srvTcp, volume_info.username); @@ -2245,12 +2263,12 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, /* new SMB session uses our srvTcp ref */ pSesInfo->server = srvTcp; - if (addr.sa_family == AF_INET6) + if (srvTcp->addr.sockAddr6.sin6_family == AF_INET6) sprintf(pSesInfo->serverName, NIP6_FMT, - NIP6(sin_server6->sin6_addr)); + NIP6(srvTcp->addr.sockAddr6.sin6_addr)); else sprintf(pSesInfo->serverName, NIPQUAD_FMT, - NIPQUAD(sin_server->sin_addr.s_addr)); + NIPQUAD(srvTcp->addr.sockAddr.sin_addr.s_addr)); write_lock(&cifs_tcp_ses_lock); list_add(&pSesInfo->smb_ses_list, &srvTcp->smb_ses_list); -- cgit v1.2.3-70-g09d2 From 7586b76585d15db767c19255ba0ecfb164df99f7 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 1 Dec 2008 18:41:49 -0500 Subject: cifs: don't declare smb_vol info on the stack struct smb_vol is fairly large, it's probably best to kzalloc it... Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 91 ++++++++++++++++++++++++++++++------------------------- 1 file changed, 49 insertions(+), 42 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 56095b24d5b..c2ed0f57a66 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2185,27 +2185,30 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, { int rc = 0; int xid; - struct smb_vol volume_info; + struct smb_vol *volume_info; struct cifsSesInfo *pSesInfo = NULL; struct cifsTconInfo *tcon = NULL; struct TCP_Server_Info *srvTcp = NULL; xid = GetXid(); -/* cFYI(1, ("Entering cifs_mount. Xid: %d with: %s", xid, mount_data)); */ + volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL); + if (!volume_info) { + rc = -ENOMEM; + goto out; + } - memset(&volume_info, 0, sizeof(struct smb_vol)); - if (cifs_parse_mount_options(mount_data, devname, &volume_info)) { + if (cifs_parse_mount_options(mount_data, devname, volume_info)) { rc = -EINVAL; goto out; } - if (volume_info.nullauth) { + if (volume_info->nullauth) { cFYI(1, ("null user")); - volume_info.username = ""; - } else if (volume_info.username) { + volume_info->username = ""; + } else if (volume_info->username) { /* BB fixme parse for domain name here */ - cFYI(1, ("Username: %s", volume_info.username)); + cFYI(1, ("Username: %s", volume_info->username)); } else { cifserror("No username specified"); /* In userspace mount helper we can get user name from alternate @@ -2216,27 +2219,27 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, /* this is needed for ASCII cp to Unicode converts */ - if (volume_info.iocharset == NULL) { + if (volume_info->iocharset == NULL) { cifs_sb->local_nls = load_nls_default(); /* load_nls_default can not return null */ } else { - cifs_sb->local_nls = load_nls(volume_info.iocharset); + cifs_sb->local_nls = load_nls(volume_info->iocharset); if (cifs_sb->local_nls == NULL) { cERROR(1, ("CIFS mount error: iocharset %s not found", - volume_info.iocharset)); + volume_info->iocharset)); rc = -ELIBACC; goto out; } } /* get a reference to a tcp session */ - srvTcp = cifs_get_tcp_session(&volume_info); + srvTcp = cifs_get_tcp_session(volume_info); if (IS_ERR(srvTcp)) { rc = PTR_ERR(srvTcp); goto out; } - pSesInfo = cifs_find_smb_ses(srvTcp, volume_info.username); + pSesInfo = cifs_find_smb_ses(srvTcp, volume_info->username); if (pSesInfo) { cFYI(1, ("Existing smb sess found (status=%d)", pSesInfo->status)); @@ -2274,24 +2277,24 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, list_add(&pSesInfo->smb_ses_list, &srvTcp->smb_ses_list); write_unlock(&cifs_tcp_ses_lock); - /* volume_info.password freed at unmount */ - if (volume_info.password) { - pSesInfo->password = volume_info.password; + /* volume_info->password freed at unmount */ + if (volume_info->password) { + pSesInfo->password = volume_info->password; /* set to NULL to prevent freeing on exit */ - volume_info.password = NULL; + volume_info->password = NULL; } - if (volume_info.username) - strncpy(pSesInfo->userName, volume_info.username, + if (volume_info->username) + strncpy(pSesInfo->userName, volume_info->username, MAX_USERNAME_SIZE); - if (volume_info.domainname) { - int len = strlen(volume_info.domainname); + if (volume_info->domainname) { + int len = strlen(volume_info->domainname); pSesInfo->domainName = kmalloc(len + 1, GFP_KERNEL); if (pSesInfo->domainName) strcpy(pSesInfo->domainName, - volume_info.domainname); + volume_info->domainname); } - pSesInfo->linux_uid = volume_info.linux_uid; - pSesInfo->overrideSecFlg = volume_info.secFlg; + pSesInfo->linux_uid = volume_info->linux_uid; + pSesInfo->overrideSecFlg = volume_info->secFlg; down(&pSesInfo->sesSem); /* BB FIXME need to pass vol->secFlgs BB */ @@ -2302,14 +2305,14 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, /* search for existing tcon to this server share */ if (!rc) { - setup_cifs_sb(&volume_info, cifs_sb); + setup_cifs_sb(volume_info, cifs_sb); - tcon = cifs_find_tcon(pSesInfo, volume_info.UNC); + tcon = cifs_find_tcon(pSesInfo, volume_info->UNC); if (tcon) { cFYI(1, ("Found match on UNC path")); /* existing tcon already has a reference */ cifs_put_smb_ses(pSesInfo); - if (tcon->seal != volume_info.seal) + if (tcon->seal != volume_info->seal) cERROR(1, ("transport encryption setting " "conflicts with existing tid")); } else { @@ -2321,8 +2324,8 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, tcon->ses = pSesInfo; /* check for null share name ie connect to dfs root */ - if ((strchr(volume_info.UNC + 3, '\\') == NULL) - && (strchr(volume_info.UNC + 3, '/') == NULL)) { + if ((strchr(volume_info->UNC + 3, '\\') == NULL) + && (strchr(volume_info->UNC + 3, '/') == NULL)) { /* rc = connect_to_dfs_path(...) */ cFYI(1, ("DFS root not supported")); rc = -ENODEV; @@ -2331,10 +2334,10 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, /* BB Do we need to wrap sesSem around * this TCon call and Unix SetFS as * we do on SessSetup and reconnect? */ - rc = CIFSTCon(xid, pSesInfo, volume_info.UNC, + rc = CIFSTCon(xid, pSesInfo, volume_info->UNC, tcon, cifs_sb->local_nls); cFYI(1, ("CIFS Tcon rc = %d", rc)); - if (volume_info.nodfs) { + if (volume_info->nodfs) { tcon->Flags &= ~SMB_SHARE_IS_IN_DFS; cFYI(1, ("DFS disabled (%d)", tcon->Flags)); @@ -2342,7 +2345,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, } if (rc) goto mount_fail_check; - tcon->seal = volume_info.seal; + tcon->seal = volume_info->seal; write_lock(&cifs_tcp_ses_lock); list_add(&tcon->tcon_list, &pSesInfo->tcon_list); write_unlock(&cifs_tcp_ses_lock); @@ -2352,9 +2355,9 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, to a share so for resources mounted more than once to the same server share the last value passed in for the retry flag is used */ - tcon->retry = volume_info.retry; - tcon->nocase = volume_info.nocase; - tcon->local_lease = volume_info.local_lease; + tcon->retry = volume_info->retry; + tcon->nocase = volume_info->nocase; + tcon->local_lease = volume_info->local_lease; } if (pSesInfo) { if (pSesInfo->capabilities & CAP_LARGE_FILES) { @@ -2391,7 +2394,7 @@ mount_fail_check: if (tcon->ses->capabilities & CAP_UNIX) /* reset of caps checks mount to see if unix extensions disabled for just this mount */ - reset_cifs_unix_caps(xid, tcon, sb, &volume_info); + reset_cifs_unix_caps(xid, tcon, sb, volume_info); else tcon->unix_ext = 0; /* server does not support them */ @@ -2410,18 +2413,22 @@ mount_fail_check: cifs_sb->rsize = min(cifs_sb->rsize, (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)); - /* volume_info.password is freed above when existing session found + /* volume_info->password is freed above when existing session found (in which case it is not needed anymore) but when new sesion is created the password ptr is put in the new session structure (in which case the password will be freed at unmount time) */ out: /* zero out password before freeing */ - if (volume_info.password != NULL) { - memset(volume_info.password, 0, strlen(volume_info.password)); - kfree(volume_info.password); + if (volume_info) { + if (volume_info->password != NULL) { + memset(volume_info->password, 0, + strlen(volume_info->password)); + kfree(volume_info->password); + } + kfree(volume_info->UNC); + kfree(volume_info->prepath); + kfree(volume_info); } - kfree(volume_info.UNC); - kfree(volume_info.prepath); FreeXid(xid); return rc; } -- cgit v1.2.3-70-g09d2 From bcf4b1063db246a90b9e09e0556f635d632eef36 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 1 Dec 2008 18:42:15 -0500 Subject: cifs: make ipv4_connect take a TCP_Server_Info arg In order to unify the smb_send routines, we need to reorganize the routines that connect the sockets. Have ipv4_connect take a TCP_Server_Info pointer and get the necessary fields from that. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 146 ++++++++++++++++++++++++++---------------------------- 1 file changed, 69 insertions(+), 77 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index c2ed0f57a66..a36f4bd5a76 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -101,12 +101,7 @@ struct smb_vol { char *prepath; }; -static int ipv4_connect(struct sockaddr_in *psin_server, - struct socket **csocket, - char *netb_name, - char *server_netb_name, - bool noblocksnd, - bool nosndbuf); /* ipv6 never set sndbuf size */ +static int ipv4_connect(struct TCP_Server_Info *server); static int ipv6_connect(struct sockaddr_in6 *psin_server, struct socket **csocket, bool noblocksnd); @@ -187,16 +182,11 @@ cifs_reconnect(struct TCP_Server_Info *server) while ((server->tcpStatus != CifsExiting) && (server->tcpStatus != CifsGood)) { try_to_freeze(); - if (server->addr.sockAddr6.sin6_family == AF_INET6) { + if (server->addr.sockAddr6.sin6_family == AF_INET6) rc = ipv6_connect(&server->addr.sockAddr6, &server->ssocket, server->noautotune); - } else { - rc = ipv4_connect(&server->addr.sockAddr, - &server->ssocket, - server->workstation_RFC1001_name, - server->server_RFC1001_name, - server->noblocksnd, server->noautotune); - } + else + rc = ipv4_connect(server); if (rc) { cFYI(1, ("reconnect error %d", rc)); msleep(3000); @@ -1517,11 +1507,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) memcpy(&tcp_ses->addr.sockAddr, sin_server, sizeof(struct sockaddr_in)); sin_server->sin_port = htons(volume_info->port); - rc = ipv4_connect(sin_server, &tcp_ses->ssocket, - volume_info->source_rfc1001_name, - volume_info->target_rfc1001_name, - volume_info->noblocksnd, - volume_info->noautotune); + rc = ipv4_connect(tcp_ses); } if (rc < 0) { cERROR(1, ("Error connecting to socket. Aborting operation")); @@ -1735,93 +1721,96 @@ static void rfc1002mangle(char *target, char *source, unsigned int length) static int -ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket, - char *netbios_name, char *target_name, - bool noblocksnd, bool noautotune) +ipv4_connect(struct TCP_Server_Info *server) { int rc = 0; - int connected = 0; + bool connected = false; __be16 orig_port = 0; + struct socket *socket = server->ssocket; - if (*csocket == NULL) { + if (socket == NULL) { rc = sock_create_kern(PF_INET, SOCK_STREAM, - IPPROTO_TCP, csocket); + IPPROTO_TCP, &socket); if (rc < 0) { cERROR(1, ("Error %d creating socket", rc)); - *csocket = NULL; return rc; - } else { - /* BB other socket options to set KEEPALIVE, NODELAY? */ - cFYI(1, ("Socket created")); - (*csocket)->sk->sk_allocation = GFP_NOFS; - cifs_reclassify_socket4(*csocket); } + + /* BB other socket options to set KEEPALIVE, NODELAY? */ + cFYI(1, ("Socket created")); + server->ssocket = socket; + socket->sk->sk_allocation = GFP_NOFS; + cifs_reclassify_socket4(socket); } - psin_server->sin_family = AF_INET; - if (psin_server->sin_port) { /* user overrode default port */ - rc = (*csocket)->ops->connect(*csocket, - (struct sockaddr *) psin_server, - sizeof(struct sockaddr_in), 0); + /* user overrode default port */ + if (server->addr.sockAddr.sin_port) { + rc = socket->ops->connect(socket, (struct sockaddr *) + &server->addr.sockAddr, + sizeof(struct sockaddr_in), 0); if (rc >= 0) - connected = 1; + connected = true; } if (!connected) { /* save original port so we can retry user specified port later if fall back ports fail this time */ - orig_port = psin_server->sin_port; + orig_port = server->addr.sockAddr.sin_port; /* do not retry on the same port we just failed on */ - if (psin_server->sin_port != htons(CIFS_PORT)) { - psin_server->sin_port = htons(CIFS_PORT); - - rc = (*csocket)->ops->connect(*csocket, - (struct sockaddr *) psin_server, - sizeof(struct sockaddr_in), 0); + if (server->addr.sockAddr.sin_port != htons(CIFS_PORT)) { + server->addr.sockAddr.sin_port = htons(CIFS_PORT); + rc = socket->ops->connect(socket, + (struct sockaddr *) + &server->addr.sockAddr, + sizeof(struct sockaddr_in), 0); if (rc >= 0) - connected = 1; + connected = true; } } if (!connected) { - psin_server->sin_port = htons(RFC1001_PORT); - rc = (*csocket)->ops->connect(*csocket, (struct sockaddr *) - psin_server, + server->addr.sockAddr.sin_port = htons(RFC1001_PORT); + rc = socket->ops->connect(socket, (struct sockaddr *) + &server->addr.sockAddr, sizeof(struct sockaddr_in), 0); if (rc >= 0) - connected = 1; + connected = true; } /* give up here - unless we want to retry on different protocol families some day */ if (!connected) { if (orig_port) - psin_server->sin_port = orig_port; + server->addr.sockAddr.sin_port = orig_port; cFYI(1, ("Error %d connecting to server via ipv4", rc)); - sock_release(*csocket); - *csocket = NULL; + sock_release(socket); + server->ssocket = NULL; return rc; } - /* Eventually check for other socket options to change from - the default. sock_setsockopt not used because it expects - user space buffer */ - cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx", - (*csocket)->sk->sk_sndbuf, - (*csocket)->sk->sk_rcvbuf, (*csocket)->sk->sk_rcvtimeo)); - (*csocket)->sk->sk_rcvtimeo = 7 * HZ; - if (!noblocksnd) - (*csocket)->sk->sk_sndtimeo = 3 * HZ; + + + /* + * Eventually check for other socket options to change from + * the default. sock_setsockopt not used because it expects + * user space buffer + */ + socket->sk->sk_rcvtimeo = 7 * HZ; + socket->sk->sk_sndtimeo = 3 * HZ; /* make the bufsizes depend on wsize/rsize and max requests */ - if (noautotune) { - if ((*csocket)->sk->sk_sndbuf < (200 * 1024)) - (*csocket)->sk->sk_sndbuf = 200 * 1024; - if ((*csocket)->sk->sk_rcvbuf < (140 * 1024)) - (*csocket)->sk->sk_rcvbuf = 140 * 1024; + if (server->noautotune) { + if (socket->sk->sk_sndbuf < (200 * 1024)) + socket->sk->sk_sndbuf = 200 * 1024; + if (socket->sk->sk_rcvbuf < (140 * 1024)) + socket->sk->sk_rcvbuf = 140 * 1024; } + cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx", + socket->sk->sk_sndbuf, + socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo)); + /* send RFC1001 sessinit */ - if (psin_server->sin_port == htons(RFC1001_PORT)) { + if (server->addr.sockAddr.sin_port == htons(RFC1001_PORT)) { /* some servers require RFC1001 sessinit before sending negprot - BB check reconnection in case where second sessinit is sent but no second negprot */ @@ -1831,39 +1820,42 @@ ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket, GFP_KERNEL); if (ses_init_buf) { ses_init_buf->trailer.session_req.called_len = 32; - if (target_name && (target_name[0] != 0)) { + if (server->server_RFC1001_name && + server->server_RFC1001_name[0] != 0) rfc1002mangle(ses_init_buf->trailer. session_req.called_name, - target_name, + server->server_RFC1001_name, RFC1001_NAME_LEN_WITH_NULL); - } else { + else rfc1002mangle(ses_init_buf->trailer. session_req.called_name, DEFAULT_CIFS_CALLED_NAME, RFC1001_NAME_LEN_WITH_NULL); - } ses_init_buf->trailer.session_req.calling_len = 32; + /* calling name ends in null (byte 16) from old smb convention. */ - if (netbios_name && (netbios_name[0] != 0)) { + if (server->workstation_RFC1001_name && + server->workstation_RFC1001_name[0] != 0) rfc1002mangle(ses_init_buf->trailer. session_req.calling_name, - netbios_name, + server->workstation_RFC1001_name, RFC1001_NAME_LEN_WITH_NULL); - } else { + else rfc1002mangle(ses_init_buf->trailer. session_req.calling_name, "LINUX_CIFS_CLNT", RFC1001_NAME_LEN_WITH_NULL); - } + ses_init_buf->trailer.session_req.scope1 = 0; ses_init_buf->trailer.session_req.scope2 = 0; smb_buf = (struct smb_hdr *)ses_init_buf; /* sizeof RFC1002_SESSION_REQUEST with no scope */ smb_buf->smb_buf_length = 0x81000044; - rc = smb_send(*csocket, smb_buf, 0x44, - (struct sockaddr *)psin_server, noblocksnd); + rc = smb_send(socket, smb_buf, 0x44, + (struct sockaddr *) &server->addr.sockAddr, + server->noblocksnd); kfree(ses_init_buf); msleep(1); /* RFC1001 layer in at least one server requires very short break before negprot -- cgit v1.2.3-70-g09d2 From d5c5605c27c92dac6de1a7a658af5b030847f949 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 1 Dec 2008 18:42:33 -0500 Subject: cifs: make ipv6_connect take a TCP_Server_Info arg Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 103 ++++++++++++++++++++++++++---------------------------- 1 file changed, 50 insertions(+), 53 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a36f4bd5a76..d6a3c1c8a6f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -102,19 +102,16 @@ struct smb_vol { }; static int ipv4_connect(struct TCP_Server_Info *server); -static int ipv6_connect(struct sockaddr_in6 *psin_server, - struct socket **csocket, bool noblocksnd); - - - /* - * cifs tcp session reconnection - * - * mark tcp session as reconnecting so temporarily locked - * mark all smb sessions as reconnecting for tcp session - * reconnect tcp session - * wake up waiters on reconnection? - (not needed currently) - */ +static int ipv6_connect(struct TCP_Server_Info *server); +/* + * cifs tcp session reconnection + * + * mark tcp session as reconnecting so temporarily locked + * mark all smb sessions as reconnecting for tcp session + * reconnect tcp session + * wake up waiters on reconnection? - (not needed currently) + */ static int cifs_reconnect(struct TCP_Server_Info *server) { @@ -183,8 +180,7 @@ cifs_reconnect(struct TCP_Server_Info *server) (server->tcpStatus != CifsGood)) { try_to_freeze(); if (server->addr.sockAddr6.sin6_family == AF_INET6) - rc = ipv6_connect(&server->addr.sockAddr6, - &server->ssocket, server->noautotune); + rc = ipv6_connect(server); else rc = ipv4_connect(server); if (rc) { @@ -1501,8 +1497,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) memcpy(&tcp_ses->addr.sockAddr6, sin_server6, sizeof(struct sockaddr_in6)); sin_server6->sin6_port = htons(volume_info->port); - rc = ipv6_connect(sin_server6, &tcp_ses->ssocket, - volume_info->noblocksnd); + rc = ipv6_connect(tcp_ses); } else { memcpy(&tcp_ses->addr.sockAddr, sin_server, sizeof(struct sockaddr_in)); @@ -1875,79 +1870,81 @@ ipv4_connect(struct TCP_Server_Info *server) } static int -ipv6_connect(struct sockaddr_in6 *psin_server, struct socket **csocket, - bool noblocksnd) +ipv6_connect(struct TCP_Server_Info *server) { int rc = 0; - int connected = 0; + bool connected = false; __be16 orig_port = 0; + struct socket *socket = server->ssocket; - if (*csocket == NULL) { + if (socket == NULL) { rc = sock_create_kern(PF_INET6, SOCK_STREAM, - IPPROTO_TCP, csocket); + IPPROTO_TCP, &socket); if (rc < 0) { cERROR(1, ("Error %d creating ipv6 socket", rc)); - *csocket = NULL; + socket = NULL; return rc; - } else { - /* BB other socket options to set KEEPALIVE, NODELAY? */ - cFYI(1, ("ipv6 Socket created")); - (*csocket)->sk->sk_allocation = GFP_NOFS; - cifs_reclassify_socket6(*csocket); } - } - psin_server->sin6_family = AF_INET6; + /* BB other socket options to set KEEPALIVE, NODELAY? */ + cFYI(1, ("ipv6 Socket created")); + server->ssocket = socket; + socket->sk->sk_allocation = GFP_NOFS; + cifs_reclassify_socket6(socket); + } - if (psin_server->sin6_port) { /* user overrode default port */ - rc = (*csocket)->ops->connect(*csocket, - (struct sockaddr *) psin_server, + /* user overrode default port */ + if (server->addr.sockAddr6.sin6_port) { + rc = socket->ops->connect(socket, + (struct sockaddr *) &server->addr.sockAddr6, sizeof(struct sockaddr_in6), 0); if (rc >= 0) - connected = 1; + connected = true; } if (!connected) { /* save original port so we can retry user specified port later if fall back ports fail this time */ - orig_port = psin_server->sin6_port; + orig_port = server->addr.sockAddr6.sin6_port; /* do not retry on the same port we just failed on */ - if (psin_server->sin6_port != htons(CIFS_PORT)) { - psin_server->sin6_port = htons(CIFS_PORT); - - rc = (*csocket)->ops->connect(*csocket, - (struct sockaddr *) psin_server, + if (server->addr.sockAddr6.sin6_port != htons(CIFS_PORT)) { + server->addr.sockAddr6.sin6_port = htons(CIFS_PORT); + rc = socket->ops->connect(socket, (struct sockaddr *) + &server->addr.sockAddr6, sizeof(struct sockaddr_in6), 0); if (rc >= 0) - connected = 1; + connected = true; } } if (!connected) { - psin_server->sin6_port = htons(RFC1001_PORT); - rc = (*csocket)->ops->connect(*csocket, (struct sockaddr *) - psin_server, sizeof(struct sockaddr_in6), 0); + server->addr.sockAddr6.sin6_port = htons(RFC1001_PORT); + rc = socket->ops->connect(socket, (struct sockaddr *) + &server->addr.sockAddr6, + sizeof(struct sockaddr_in6), 0); if (rc >= 0) - connected = 1; + connected = true; } /* give up here - unless we want to retry on different protocol families some day */ if (!connected) { if (orig_port) - psin_server->sin6_port = orig_port; + server->addr.sockAddr6.sin6_port = orig_port; cFYI(1, ("Error %d connecting to server via ipv6", rc)); - sock_release(*csocket); - *csocket = NULL; + sock_release(socket); + server->ssocket = NULL; return rc; } - /* Eventually check for other socket options to change from - the default. sock_setsockopt not used because it expects - user space buffer */ - (*csocket)->sk->sk_rcvtimeo = 7 * HZ; - if (!noblocksnd) - (*csocket)->sk->sk_sndtimeo = 3 * HZ; + /* + * Eventually check for other socket options to change from + * the default. sock_setsockopt not used because it expects + * user space buffer + */ + socket->sk->sk_rcvtimeo = 7 * HZ; + socket->sk->sk_sndtimeo = 3 * HZ; + server->ssocket = socket; return rc; } -- cgit v1.2.3-70-g09d2 From 13a6e42af8d90e2e8eb7fa50adf862a525b70518 Mon Sep 17 00:00:00 2001 From: Steve French Date: Tue, 2 Dec 2008 17:24:33 +0000 Subject: [CIFS] add mount option to send mandatory rather than advisory locks Some applications/subsystems require mandatory byte range locks (as is used for Windows/DOS/OS2 etc). Sending advisory (posix style) byte range lock requests (instead of mandatory byte range locks) can lead to problems for these applications (which expect that other clients be prevented from writing to portions of the file which they have locked and are updating). This mount option allows mounting cifs with the new mount option "forcemand" (or "forcemandatorylock") in order to have the cifs client use mandatory byte range locks (ie SMB/CIFS/Windows/NTFS style locks) rather than posix byte range lock requests, even if the server would support posix byte range lock requests. This has no effect if the server does not support the CIFS Unix Extensions (since posix style locks require support for the CIFS Unix Extensions), but for mounts to Samba servers this can be helpful for Wine and applications that require mandatory byte range locks. Acked-by: Jeff Layton CC: Alexander Bokovoy Signed-off-by: Steve French --- fs/cifs/CHANGES | 6 ++++++ fs/cifs/README | 12 +++++++++++- fs/cifs/cifs_fs_sb.h | 5 +++-- fs/cifs/cifsfs.h | 2 +- fs/cifs/connect.c | 14 ++++++++++++++ fs/cifs/file.c | 25 +++++++++++++------------ 6 files changed, 48 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index e078b7aea14..3d848f463c4 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES @@ -1,3 +1,9 @@ +Version 1.56 +------------ +Add "forcemandatorylock" mount option to allow user to use mandatory +rather than posix (advisory) byte range locks, even though server would +support posix byte range locks. + Version 1.55 ------------ Various fixes to make delete of open files behavior more predictable diff --git a/fs/cifs/README b/fs/cifs/README index a439dc1739b..da4515e3be2 100644 --- a/fs/cifs/README +++ b/fs/cifs/README @@ -463,9 +463,19 @@ A partial list of the supported mount options follows: with cifs style mandatory byte range locks (and most cifs servers do not yet support requesting advisory byte range locks). + forcemandatorylock Even if the server supports posix (advisory) byte range + locking, send only mandatory lock requests. For some + (presumably rare) applications, originally coded for + DOS/Windows, which require Windows style mandatory byte range + locking, they may be able to take advantage of this option, + forcing the cifs client to only send mandatory locks + even if the cifs server would support posix advisory locks. + "forcemand" is accepted as a shorter form of this mount + option. nodfs Disable DFS (global name space support) even if the server claims to support it. This can help work around - a problem with parsing of DFS paths with Samba 3.0.24 server. + a problem with parsing of DFS paths with Samba server + versions 3.0.24 and 3.0.25. remount remount the share (often used to change from ro to rw mounts or vice versa) cifsacl Report mode bits (e.g. on stat) based on the Windows ACL for diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h index 877c85409f1..5edd5cc415a 100644 --- a/fs/cifs/cifs_fs_sb.h +++ b/fs/cifs/cifs_fs_sb.h @@ -20,7 +20,7 @@ #define CIFS_MOUNT_NO_PERM 1 /* do not do client vfs_perm check */ #define CIFS_MOUNT_SET_UID 2 /* set current->euid in create etc. */ -#define CIFS_MOUNT_SERVER_INUM 4 /* inode numbers from uniqueid from server */ +#define CIFS_MOUNT_SERVER_INUM 4 /* inode numbers from uniqueid from server */ #define CIFS_MOUNT_DIRECT_IO 8 /* do not write nor read through page cache */ #define CIFS_MOUNT_NO_XATTR 0x10 /* if set - disable xattr support */ #define CIFS_MOUNT_MAP_SPECIAL_CHR 0x20 /* remap illegal chars in filenames */ @@ -30,7 +30,8 @@ #define CIFS_MOUNT_CIFS_ACL 0x200 /* send ACL requests to non-POSIX srv */ #define CIFS_MOUNT_OVERR_UID 0x400 /* override uid returned from server */ #define CIFS_MOUNT_OVERR_GID 0x800 /* override gid returned from server */ -#define CIFS_MOUNT_DYNPERM 0x1000 /* allow in-memory only mode setting */ +#define CIFS_MOUNT_DYNPERM 0x1000 /* allow in-memory only mode setting */ +#define CIFS_MOUNT_NOPOSIXBRL 0x2000 /* mandatory not posix byte range lock */ struct cifs_sb_info { struct cifsTconInfo *tcon; /* primary mount */ diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 074de0b5064..fb676076946 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -101,5 +101,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* EXPERIMENTAL */ -#define CIFS_VERSION "1.55" +#define CIFS_VERSION "1.5666666" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index d6a3c1c8a6f..35194204283 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -89,6 +89,7 @@ struct smb_vol { bool nullauth:1; /* attempt to authenticate with null user */ bool nocase:1; /* request case insensitive filenames */ bool nobrl:1; /* disable sending byte range locks to srv */ + bool mand_lock:1; /* send mandatory not posix byte range lock reqs */ bool seal:1; /* request transport encryption on share */ bool nodfs:1; /* Do not request DFS, even if available */ bool local_lease:1; /* check leases only on local system, not remote */ @@ -1246,6 +1247,17 @@ cifs_parse_mount_options(char *options, const char *devname, if (vol->file_mode == (S_IALLUGO & ~(S_ISUID | S_IXGRP))) vol->file_mode = S_IALLUGO; + } else if (strnicmp(data, "forcemandatorylock", 9) == 0) { + /* will take the shorter form "forcemand" as well */ + /* This mount option will force use of mandatory + (DOS/Windows style) byte range locks, instead of + using posix advisory byte range locks, even if the + Unix extensions are available and posix locks would + be supported otherwise. If Unix extensions are not + negotiated this has no effect since mandatory locks + would be used (mandatory locks is all that those + those servers support) */ + vol->mand_lock = 1; } else if (strnicmp(data, "setuids", 7) == 0) { vol->setuids = 1; } else if (strnicmp(data, "nosetuids", 9) == 0) { @@ -2150,6 +2162,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info, cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL; if (pvolume_info->nobrl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL; + if (pvolume_info->mand_lock) + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL; if (pvolume_info->cifs_acl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; if (pvolume_info->override_uid) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index f0a81e631ae..babd27a4699 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -644,10 +644,10 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) __u64 length; bool wait_flag = false; struct cifs_sb_info *cifs_sb; - struct cifsTconInfo *pTcon; + struct cifsTconInfo *tcon; __u16 netfid; __u8 lockType = LOCKING_ANDX_LARGE_FILES; - bool posix_locking; + bool posix_locking = 0; length = 1 + pfLock->fl_end - pfLock->fl_start; rc = -EACCES; @@ -698,7 +698,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) cFYI(1, ("Unknown type of lock")); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); - pTcon = cifs_sb->tcon; + tcon = cifs_sb->tcon; if (file->private_data == NULL) { FreeXid(xid); @@ -706,9 +706,10 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) } netfid = ((struct cifsFileInfo *)file->private_data)->netfid; - posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) && - (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability)); - + if ((tcon->ses->capabilities & CAP_UNIX) && + (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && + (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL == 0)) + posix_locking = 1; /* BB add code here to normalize offset and length to account for negative length which we can not accept over the wire */ @@ -719,7 +720,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) posix_lock_type = CIFS_RDLCK; else posix_lock_type = CIFS_WRLCK; - rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */, + rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */, length, pfLock, posix_lock_type, wait_flag); FreeXid(xid); @@ -727,10 +728,10 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) } /* BB we could chain these into one lock request BB */ - rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start, + rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 0, 1, lockType, 0 /* wait flag */ ); if (rc == 0) { - rc = CIFSSMBLock(xid, pTcon, netfid, length, + rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 1 /* numUnlock */ , 0 /* numLock */ , lockType, 0 /* wait flag */ ); @@ -767,7 +768,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) if (numUnlock == 1) posix_lock_type = CIFS_UNLCK; - rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */, + rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */, length, pfLock, posix_lock_type, wait_flag); } else { @@ -775,7 +776,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) (struct cifsFileInfo *)file->private_data; if (numLock) { - rc = CIFSSMBLock(xid, pTcon, netfid, length, + rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 0, numLock, lockType, wait_flag); @@ -796,7 +797,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) if (pfLock->fl_start <= li->offset && (pfLock->fl_start + length) >= (li->offset + li->length)) { - stored_rc = CIFSSMBLock(xid, pTcon, + stored_rc = CIFSSMBLock(xid, tcon, netfid, li->length, li->offset, 1, 0, li->type, false); -- cgit v1.2.3-70-g09d2 From acc18aa1e643519035abdab5e72dc75e534b5198 Mon Sep 17 00:00:00 2001 From: Steve French Date: Tue, 2 Dec 2008 18:53:55 +0000 Subject: [CIFS] remove sparse warning Signed-off-by: Steve French --- fs/cifs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/file.c b/fs/cifs/file.c index babd27a4699..b1e1fc6a6e6 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -708,7 +708,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) if ((tcon->ses->capabilities & CAP_UNIX) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && - (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL == 0)) + ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) posix_locking = 1; /* BB add code here to normalize offset and length to account for negative length which we can not accept over the -- cgit v1.2.3-70-g09d2 From 3de2091ac722e7dbc37d87d9112ab19ec6a871de Mon Sep 17 00:00:00 2001 From: Steve French Date: Tue, 2 Dec 2008 20:52:28 +0000 Subject: [CIFS] fix typo Signed-off-by: Steve French --- fs/cifs/cifsfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index fb676076946..2ce04c73d74 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -101,5 +101,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* EXPERIMENTAL */ -#define CIFS_VERSION "1.5666666" +#define CIFS_VERSION "1.56" #endif /* _CIFSFS_H */ -- cgit v1.2.3-70-g09d2 From 61e748015866e48aff91284e3d300c6e3035a87a Mon Sep 17 00:00:00 2001 From: Steve French Date: Wed, 3 Dec 2008 00:57:54 +0000 Subject: [CIFS] various minor cleanups pointed out by checkpatch script Signed-off-by: Steve French --- fs/cifs/AUTHORS | 2 ++ fs/cifs/cifsfs.c | 19 +++++++------------ fs/cifs/cifspdu.h | 2 +- fs/cifs/cifssmb.c | 16 ++++++++-------- fs/cifs/connect.c | 18 +++++++----------- fs/cifs/dir.c | 9 ++++----- 6 files changed, 29 insertions(+), 37 deletions(-) (limited to 'fs') diff --git a/fs/cifs/AUTHORS b/fs/cifs/AUTHORS index 9c136d7803d..7f7fa3c302a 100644 --- a/fs/cifs/AUTHORS +++ b/fs/cifs/AUTHORS @@ -36,7 +36,9 @@ Miklos Szeredi Kazeon team for various fixes especially for 2.4 version. Asser Ferno (Change Notify support) Shaggy (Dave Kleikamp) for inumerable small fs suggestions and some good cleanup +Gunter Kukkukk (testing and suggestions for support of old servers) Igor Mammedov (DFS support) +Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code) Test case and Bug Report contributors ------------------------------------- diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 061a1dca987..974c8f0e615 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -347,7 +347,6 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m) if (cifs_sb) { tcon = cifs_sb->tcon; if (tcon) { -/* BB add prepath to mount options displayed */ seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName); if (tcon->ses) { if (tcon->ses->userName) @@ -439,9 +438,8 @@ int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid, xid = GetXid(); if (pTcon) { cFYI(1, ("set type: 0x%x id: %d", quota_type, qid)); - } else { + } else rc = -EIO; - } FreeXid(xid); return rc; @@ -463,9 +461,8 @@ int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid, xid = GetXid(); if (pTcon) { cFYI(1, ("set type: 0x%x id: %d", quota_type, qid)); - } else { + } else rc = -EIO; - } FreeXid(xid); return rc; @@ -486,9 +483,8 @@ int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation) xid = GetXid(); if (pTcon) { cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation)); - } else { + } else rc = -EIO; - } FreeXid(xid); return rc; @@ -501,17 +497,16 @@ int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats) struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifsTconInfo *pTcon; - if (cifs_sb) { + if (cifs_sb) pTcon = cifs_sb->tcon; - } else { + else return -EIO; - } + xid = GetXid(); if (pTcon) { cFYI(1, ("pqstats %p", qstats)); - } else { + } else rc = -EIO; - } FreeXid(xid); return rc; diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index d2a073edd1b..b4e2e9f0ee3 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h @@ -1922,7 +1922,7 @@ typedef struct smb_com_transaction2_get_dfs_refer_req { /* DFS server target type */ #define DFS_TYPE_LINK 0x0000 /* also for sysvol targets */ #define DFS_TYPE_ROOT 0x0001 - + /* Referral Entry Flags */ #define DFS_NAME_LIST_REF 0x0200 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 6d51696dc76..ef9786b6bea 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1382,13 +1382,13 @@ openRetry: if (cpu_to_le32(FILE_CREATE) == pSMBr->CreateAction) *pOplock |= CIFS_CREATE_ACTION; if (pfile_info) { - memcpy((char *)pfile_info, (char *)&pSMBr->CreationTime, - 36 /* CreationTime to Attributes */); - /* the file_info buf is endian converted by caller */ - pfile_info->AllocationSize = pSMBr->AllocationSize; - pfile_info->EndOfFile = pSMBr->EndOfFile; - pfile_info->NumberOfLinks = cpu_to_le32(1); - pfile_info->DeletePending = 0; + memcpy((char *)pfile_info, (char *)&pSMBr->CreationTime, + 36 /* CreationTime to Attributes */); + /* the file_info buf is endian converted by caller */ + pfile_info->AllocationSize = pSMBr->AllocationSize; + pfile_info->EndOfFile = pSMBr->EndOfFile; + pfile_info->NumberOfLinks = cpu_to_le32(1); + pfile_info->DeletePending = 0; } } @@ -1558,7 +1558,7 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, pSMB->DataOffset = cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); if (buf) - memcpy(pSMB->Data, buf, bytes_sent); + memcpy(pSMB->Data, buf, bytes_sent); else if (ubuf) { if (copy_from_user(pSMB->Data, ubuf, bytes_sent)) { cifs_buf_release(pSMB); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 35194204283..6107ee42b09 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2582,7 +2582,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses, __u16 action = le16_to_cpu(pSMBr->resp.Action); __u16 blob_len = le16_to_cpu(pSMBr->resp.SecurityBlobLength); if (action & GUEST_LOGIN) - cFYI(1, (" Guest login")); /* BB mark SesInfo struct? */ + cFYI(1, ("Guest login")); /* BB mark SesInfo struct? */ ses->Suid = smb_buffer_response->Uid; /* UID left in wire format (little endian) */ cFYI(1, ("UID = %d ", ses->Suid)); @@ -2728,13 +2728,11 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses, len)); } } else { - cERROR(1, - (" Security Blob Length extends beyond " + cERROR(1, ("Security Blob Length extends beyond " "end of SMB")); } } else { - cERROR(1, - (" Invalid Word count %d: ", + cERROR(1, ("Invalid Word count %d: ", smb_buffer_response->WordCount)); rc = -EIO; } @@ -2892,7 +2890,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, __u16 blob_len = le16_to_cpu(pSMBr->resp.SecurityBlobLength); if (action & GUEST_LOGIN) - cFYI(1, (" Guest login")); + cFYI(1, ("Guest login")); /* Do we want to set anything in SesInfo struct when guest login? */ bcc_ptr = pByteArea(smb_buffer_response); @@ -2900,8 +2898,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, SecurityBlob2 = (PCHALLENGE_MESSAGE) bcc_ptr; if (SecurityBlob2->MessageType != NtLmChallenge) { - cFYI(1, - ("Unexpected NTLMSSP message type received %d", + cFYI(1, ("Unexpected NTLMSSP message type received %d", SecurityBlob2->MessageType)); } else if (ses) { ses->Suid = smb_buffer_response->Uid; /* UID left in le format */ @@ -3073,8 +3070,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid, cERROR(1, ("No session structure passed in.")); } } else { - cERROR(1, - (" Invalid Word count %d:", + cERROR(1, ("Invalid Word count %d:", smb_buffer_response->WordCount)); rc = -EIO; } @@ -3313,7 +3309,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses, __u16 action = le16_to_cpu(pSMBr->resp.Action); __u16 blob_len = le16_to_cpu(pSMBr->resp.SecurityBlobLength); if (action & GUEST_LOGIN) - cFYI(1, (" Guest login")); /* BB Should we set anything + cFYI(1, ("Guest login")); /* BB Should we set anything in SesInfo struct ? */ /* if (SecurityBlob2->MessageType != NtLm??) { cFYI("Unexpected message type on auth response is %d")); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index e962e75e6f7..0592dbb3a6b 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -483,7 +483,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, xid = GetXid(); - cFYI(1, (" parent inode = 0x%p name is: %s and dentry = 0x%p", + cFYI(1, ("parent inode = 0x%p name is: %s and dentry = 0x%p", parent_dir_inode, direntry->d_name.name, direntry)); /* check whether path exists */ @@ -515,12 +515,11 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, } if (direntry->d_inode != NULL) { - cFYI(1, (" non-NULL inode in lookup")); + cFYI(1, ("non-NULL inode in lookup")); } else { - cFYI(1, (" NULL inode in lookup")); + cFYI(1, ("NULL inode in lookup")); } - cFYI(1, - (" Full path: %s inode = 0x%p", full_path, direntry->d_inode)); + cFYI(1, ("Full path: %s inode = 0x%p", full_path, direntry->d_inode)); if (pTcon->unix_ext) rc = cifs_get_inode_info_unix(&newInode, full_path, -- cgit v1.2.3-70-g09d2 From 8be0ed44c2fa4afcf2c6d2fb3102c926e9f989df Mon Sep 17 00:00:00 2001 From: Steve French Date: Fri, 5 Dec 2008 19:14:12 +0000 Subject: [CIFS] Can not mount with prefixpath if root directory of share is inaccessible Windows allows you to deny access to the top of a share, but permit access to a directory lower in the path. With the prefixpath feature of cifs (ie mounting \\server\share\directory\subdirectory\etc.) this should have worked if the user specified a prefixpath which put the root of the mount at a directory to which he had access, but we still were doing a lookup on the root of the share (null path) when we should have been doing it on the prefixpath subdirectory. This fixes Samba bug # 5925 Acked-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/CHANGES | 4 +++- fs/cifs/inode.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 56 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 3d848f463c4..5ab6bcce880 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES @@ -2,7 +2,9 @@ Version 1.56 ------------ Add "forcemandatorylock" mount option to allow user to use mandatory rather than posix (advisory) byte range locks, even though server would -support posix byte range locks. +support posix byte range locks. Fix query of root inode when prefixpath +specified and user does not have access to query information about the +top of the share. Version 1.55 ------------ diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index ff8c68de4a9..b8821b0e73d 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1,7 +1,7 @@ /* * fs/cifs/inode.c * - * Copyright (C) International Business Machines Corp., 2002,2007 + * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) * * This library is free software; you can redistribute it and/or modify @@ -621,6 +621,47 @@ static const struct inode_operations cifs_ipc_inode_ops = { .lookup = cifs_lookup, }; +static char *build_path_to_root(struct cifs_sb_info *cifs_sb) +{ + int pplen = cifs_sb->prepathlen; + int dfsplen; + char *full_path = NULL; + + /* if no prefix path, simply set path to the root of share to "" */ + if (pplen == 0) { + full_path = kmalloc(1, GFP_KERNEL); + if (full_path) + full_path[0] = 0; + return full_path; + } + + if (cifs_sb->tcon && (cifs_sb->tcon->Flags & SMB_SHARE_IS_IN_DFS)) + dfsplen = strnlen(cifs_sb->tcon->treeName, MAX_TREE_SIZE + 1); + else + dfsplen = 0; + + full_path = kmalloc(dfsplen + pplen + 1, GFP_KERNEL); + if (full_path == NULL) + return full_path; + + if (dfsplen) { + strncpy(full_path, cifs_sb->tcon->treeName, dfsplen); + /* switch slash direction in prepath depending on whether + * windows or posix style path names + */ + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { + int i; + for (i = 0; i < dfsplen; i++) { + if (full_path[i] == '\\') + full_path[i] = '/'; + } + } + } + strncpy(full_path + dfsplen, cifs_sb->prepath, pplen); + full_path[dfsplen + pplen] = 0; /* add trailing null */ + return full_path; +} + /* gets root inode */ struct inode *cifs_iget(struct super_block *sb, unsigned long ino) { @@ -628,6 +669,7 @@ struct inode *cifs_iget(struct super_block *sb, unsigned long ino) struct cifs_sb_info *cifs_sb; struct inode *inode; long rc; + char *full_path; inode = iget_locked(sb, ino); if (!inode) @@ -636,13 +678,17 @@ struct inode *cifs_iget(struct super_block *sb, unsigned long ino) return inode; cifs_sb = CIFS_SB(inode->i_sb); - xid = GetXid(); + full_path = build_path_to_root(cifs_sb); + if (full_path == NULL) + return ERR_PTR(-ENOMEM); + xid = GetXid(); if (cifs_sb->tcon->unix_ext) - rc = cifs_get_inode_info_unix(&inode, "", inode->i_sb, xid); + rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, + xid); else - rc = cifs_get_inode_info(&inode, "", NULL, inode->i_sb, xid, - NULL); + rc = cifs_get_inode_info(&inode, full_path, NULL, inode->i_sb, + xid, NULL); if (rc && cifs_sb->tcon->ipc) { cFYI(1, ("ipc connection - fake read inode")); inode->i_mode |= S_IFDIR; @@ -652,6 +698,7 @@ struct inode *cifs_iget(struct super_block *sb, unsigned long ino) inode->i_uid = cifs_sb->mnt_uid; inode->i_gid = cifs_sb->mnt_gid; } else if (rc) { + kfree(full_path); _FreeXid(xid); iget_failed(inode); return ERR_PTR(rc); @@ -659,6 +706,7 @@ struct inode *cifs_iget(struct super_block *sb, unsigned long ino) unlock_new_inode(inode); + kfree(full_path); /* can not call macro FreeXid here since in a void func * TODO: This is no longer true */ -- cgit v1.2.3-70-g09d2 From 85705524258f93a6086c3247a58f34a661b82b3d Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 5 Dec 2008 20:41:21 -0500 Subject: cifs: fix wait_for_response to time out sleeping processes correctly cifs: fix wait_for_response to time out sleeping processes correctly The current scheme that CIFS uses to sleep and wait for a response is not quite what we want. After sending a request, wait_for_response puts the task to sleep with wait_event(). One of the conditions for wait_event is a timeout (using time_after()). The problem with this is that there is no guarantee that the process will ever be woken back up. If the server stops sending data, then cifs_demultiplex_thread will leave its response queue sleeping. I think the only thing that saves us here is the fact that cifs_dnotify_thread periodically (every 15s) wakes up sleeping processes on all response_q's that have calls in flight. This makes for unnecessary wakeups of some processes. It also means large variability in the timeouts since they're all woken up at once. Instead of this, put the tasks to sleep with wait_event_timeout. This makes them wake up on their own if they time out. With this change, cifs_dnotify_thread should no longer be needed. I've been testing this in conjunction with some other patches that I'm working on. It doesn't seem to affect performance at all with with heavy I/O. Identical iozone -ac runs complete in almost exactly the same time (<1% difference in times). Thanks to Wasrshi Nimara for initially pointing this out. Wasrshi, it would be nice to know whether this patch also helps your testcase. Signed-off-by: Jeff Layton Cc: Wasrshi Nimara Signed-off-by: Steve French --- fs/cifs/transport.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index cd4ed65d6cd..4d076be46d9 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -410,11 +410,8 @@ static int wait_for_response(struct cifsSesInfo *ses, for (;;) { curr_timeout = timeout + jiffies; - wait_event(ses->server->response_q, - (!(midQ->midState == MID_REQUEST_SUBMITTED)) || - time_after(jiffies, curr_timeout) || - ((ses->server->tcpStatus != CifsGood) && - (ses->server->tcpStatus != CifsNew))); + wait_event_timeout(ses->server->response_q, + midQ->midState != MID_REQUEST_SUBMITTED, timeout); if (time_after(jiffies, curr_timeout) && (midQ->midState == MID_REQUEST_SUBMITTED) && -- cgit v1.2.3-70-g09d2 From 55162dec9371a6f6ac63ff546c182cc6144a649e Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 5 Dec 2008 20:41:21 -0500 Subject: cifs: zero out session password before freeing it cifs: zero out session password before freeing it ...just to be on the safe side. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/misc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 9ee3f689c2b..7c3f4b9230d 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -97,7 +97,10 @@ sesInfoFree(struct cifsSesInfo *buf_to_free) kfree(buf_to_free->serverOS); kfree(buf_to_free->serverDomain); kfree(buf_to_free->serverNOS); - kfree(buf_to_free->password); + if (buf_to_free->password) { + memset(buf_to_free->password, 0, strlen(buf_to_free->password)); + kfree(buf_to_free->password); + } kfree(buf_to_free->domainName); kfree(buf_to_free); } -- cgit v1.2.3-70-g09d2 From 4e53a3fb98d3d5c2941d2e7199dab317a9d4ead3 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 5 Dec 2008 20:41:21 -0500 Subject: cifs: have calc_lanman_hash take more granular args cifs: have calc_lanman_hash take more granular args We need to use this routine to encrypt passwords associated with the tcon too. Don't assume that the password will be attached to the smb_session. Also, make some of the values in the lower encryption functions const since they aren't changed. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsencrypt.c | 30 ++++++++++++++---------------- fs/cifs/cifsencrypt.h | 3 ++- fs/cifs/cifsproto.h | 3 ++- fs/cifs/connect.c | 5 ++++- fs/cifs/sess.c | 5 ++++- fs/cifs/smbdes.c | 5 +++-- fs/cifs/smbencrypt.c | 9 +++++---- 7 files changed, 34 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index bd5f13d3845..d4839cf0cb2 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -37,7 +37,7 @@ extern void mdfour(unsigned char *out, unsigned char *in, int n); extern void E_md4hash(const unsigned char *passwd, unsigned char *p16); -extern void SMBencrypt(unsigned char *passwd, unsigned char *c8, +extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8, unsigned char *p24); static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, @@ -280,25 +280,22 @@ int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *ses, } #ifdef CONFIG_CIFS_WEAK_PW_HASH -void calc_lanman_hash(struct cifsSesInfo *ses, char *lnm_session_key) +void calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt, + char *lnm_session_key) { int i; char password_with_pad[CIFS_ENCPWD_SIZE]; - if (ses->server == NULL) - return; - memset(password_with_pad, 0, CIFS_ENCPWD_SIZE); - if (ses->password) - strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE); - - if ((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0) - if (extended_security & CIFSSEC_MAY_PLNTXT) { - memset(lnm_session_key, 0, CIFS_SESS_KEY_SIZE); - memcpy(lnm_session_key, password_with_pad, - CIFS_ENCPWD_SIZE); - return; - } + if (password) + strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE); + + if (!encrypt && extended_security & CIFSSEC_MAY_PLNTXT) { + memset(lnm_session_key, 0, CIFS_SESS_KEY_SIZE); + memcpy(lnm_session_key, password_with_pad, + CIFS_ENCPWD_SIZE); + return; + } /* calculate old style session key */ /* calling toupper is less broken than repeatedly @@ -314,7 +311,8 @@ void calc_lanman_hash(struct cifsSesInfo *ses, char *lnm_session_key) for (i = 0; i < CIFS_ENCPWD_SIZE; i++) password_with_pad[i] = toupper(password_with_pad[i]); - SMBencrypt(password_with_pad, ses->server->cryptKey, lnm_session_key); + SMBencrypt(password_with_pad, cryptkey, lnm_session_key); + /* clear password before we return/free memory */ memset(password_with_pad, 0, CIFS_ENCPWD_SIZE); } diff --git a/fs/cifs/cifsencrypt.h b/fs/cifs/cifsencrypt.h index 152fa2dcfc6..15d2ec00647 100644 --- a/fs/cifs/cifsencrypt.h +++ b/fs/cifs/cifsencrypt.h @@ -26,7 +26,8 @@ extern void mdfour(unsigned char *out, unsigned char *in, int n); /* smbdes.c */ extern void E_P16(unsigned char *p14, unsigned char *p16); -extern void E_P24(unsigned char *p21, unsigned char *c8, unsigned char *p24); +extern void E_P24(unsigned char *p21, const unsigned char *c8, + unsigned char *p24); diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 6f21ecb85ce..f4861653607 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -330,7 +330,8 @@ extern void CalcNTLMv2_response(const struct cifsSesInfo *, char *); extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *, const struct nls_table *); #ifdef CONFIG_CIFS_WEAK_PW_HASH -extern void calc_lanman_hash(struct cifsSesInfo *ses, char *lnm_session_key); +extern void calc_lanman_hash(const char *password, const char *cryptkey, + bool encrypt, char *lnm_session_key); #endif /* CIFS_WEAK_PW_HASH */ extern int CIFSSMBCopy(int xid, struct cifsTconInfo *source_tcon, diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 6107ee42b09..3a84a375cb6 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3533,7 +3533,10 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, #ifdef CONFIG_CIFS_WEAK_PW_HASH if ((extended_security & CIFSSEC_MAY_LANMAN) && (ses->server->secType == LANMAN)) - calc_lanman_hash(ses, bcc_ptr); + calc_lanman_hash(ses->password, ses->server->cryptKey, + ses->server->secMode & + SECMODE_PW_ENCRYPT ? true : false, + bcc_ptr); else #endif /* CIFS_WEAK_PW_HASH */ SMBNTencrypt(ses->password, diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 2851d5da0c8..5f22de7b79a 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -417,7 +417,10 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time, /* BB calculate hash with password */ /* and copy into bcc */ - calc_lanman_hash(ses, lnm_session_key); + calc_lanman_hash(ses->password, ses->server->cryptKey, + ses->server->secMode & SECMODE_PW_ENCRYPT ? + true : false, lnm_session_key); + ses->flags |= CIFS_SES_LANMAN; memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE); bcc_ptr += CIFS_SESS_KEY_SIZE; diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c index 04943c976f9..224a1f47896 100644 --- a/fs/cifs/smbdes.c +++ b/fs/cifs/smbdes.c @@ -318,7 +318,8 @@ str_to_key(unsigned char *str, unsigned char *key) } static void -smbhash(unsigned char *out, unsigned char *in, unsigned char *key, int forw) +smbhash(unsigned char *out, const unsigned char *in, unsigned char *key, + int forw) { int i; char *outb; /* outb[64] */ @@ -363,7 +364,7 @@ E_P16(unsigned char *p14, unsigned char *p16) } void -E_P24(unsigned char *p21, unsigned char *c8, unsigned char *p24) +E_P24(unsigned char *p21, const unsigned char *c8, unsigned char *p24) { smbhash(p24, c8, p21, 1); smbhash(p24 + 8, c8, p21 + 7, 1); diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c index ff3232fa101..93fb09a99c6 100644 --- a/fs/cifs/smbencrypt.c +++ b/fs/cifs/smbencrypt.c @@ -49,9 +49,10 @@ /*The following definitions come from libsmb/smbencrypt.c */ -void SMBencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24); +void SMBencrypt(unsigned char *passwd, const unsigned char *c8, + unsigned char *p24); void E_md4hash(const unsigned char *passwd, unsigned char *p16); -static void SMBOWFencrypt(unsigned char passwd[16], unsigned char *c8, +static void SMBOWFencrypt(unsigned char passwd[16], const unsigned char *c8, unsigned char p24[24]); void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24); @@ -61,7 +62,7 @@ void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24); encrypted password into p24 */ /* Note that password must be uppercased and null terminated */ void -SMBencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24) +SMBencrypt(unsigned char *passwd, const unsigned char *c8, unsigned char *p24) { unsigned char p14[15], p21[21]; @@ -212,7 +213,7 @@ ntv2_owf_gen(const unsigned char owf[16], const char *user_n, /* Does the des encryption from the NT or LM MD4 hash. */ static void -SMBOWFencrypt(unsigned char passwd[16], unsigned char *c8, +SMBOWFencrypt(unsigned char passwd[16], const unsigned char *c8, unsigned char p24[24]) { unsigned char p21[21]; -- cgit v1.2.3-70-g09d2 From 00e485b0198ea4f509341373f1d9adb0a5977a2f Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 5 Dec 2008 20:41:21 -0500 Subject: cifs: store password in tcon cifs: store password in tcon Each tcon has its own password for share-level security. Store it in the tcon and wipe it clean and free it when freeing the tcon. When doing the tree connect with share-level security, use the tcon password instead of the session password. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsglob.h | 1 + fs/cifs/connect.c | 25 ++++++++++++++++++------- fs/cifs/misc.c | 4 ++++ 3 files changed, 23 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 0fb934d3623..94c1ca0ec95 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -242,6 +242,7 @@ struct cifsTconInfo { struct cifsSesInfo *ses; /* pointer to session associated with */ char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */ char *nativeFileSystem; + char *password; /* for share-level security */ __u16 tid; /* The 2 byte tree id */ __u16 Flags; /* optional support bits */ enum statusEnum tidStatus; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 3a84a375cb6..3caadf12d76 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2282,9 +2282,12 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, /* volume_info->password freed at unmount */ if (volume_info->password) { - pSesInfo->password = volume_info->password; - /* set to NULL to prevent freeing on exit */ - volume_info->password = NULL; + pSesInfo->password = kstrdup(volume_info->password, + GFP_KERNEL); + if (!pSesInfo->password) { + rc = -ENOMEM; + goto mount_fail_check; + } } if (volume_info->username) strncpy(pSesInfo->userName, volume_info->username, @@ -2324,7 +2327,16 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, rc = -ENOMEM; goto mount_fail_check; } + tcon->ses = pSesInfo; + if (volume_info->password) { + tcon->password = kstrdup(volume_info->password, + GFP_KERNEL); + if (!tcon->password) { + rc = -ENOMEM; + goto mount_fail_check; + } + } /* check for null share name ie connect to dfs root */ if ((strchr(volume_info->UNC + 3, '\\') == NULL) @@ -3532,15 +3544,14 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, NTLMv2 password here) */ #ifdef CONFIG_CIFS_WEAK_PW_HASH if ((extended_security & CIFSSEC_MAY_LANMAN) && - (ses->server->secType == LANMAN)) - calc_lanman_hash(ses->password, ses->server->cryptKey, + (ses->server->secType == LANMAN)) + calc_lanman_hash(tcon->password, ses->server->cryptKey, ses->server->secMode & SECMODE_PW_ENCRYPT ? true : false, bcc_ptr); else #endif /* CIFS_WEAK_PW_HASH */ - SMBNTencrypt(ses->password, - ses->server->cryptKey, + SMBNTencrypt(tcon->password, ses->server->cryptKey, bcc_ptr); bcc_ptr += CIFS_SESS_KEY_SIZE; diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 7c3f4b9230d..a0513605d7e 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -132,6 +132,10 @@ tconInfoFree(struct cifsTconInfo *buf_to_free) } atomic_dec(&tconInfoAllocCount); kfree(buf_to_free->nativeFileSystem); + if (buf_to_free->password) { + memset(buf_to_free->password, 0, strlen(buf_to_free->password)); + kfree(buf_to_free->password); + } kfree(buf_to_free); } -- cgit v1.2.3-70-g09d2 From 6d9c6d543165d1d492602c1371cb019040093584 Mon Sep 17 00:00:00 2001 From: Volker Lendecke Date: Mon, 8 Dec 2008 20:50:24 +0000 Subject: [CIFS] In SendReceive, move consistency check out of the mutexed region inbuf->smb_buf_length does not change in in wait_for_free_request() or in allocate_mid(), so we can check it early. Signed-off-by: Volker Lendecke Acked-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/transport.c | 31 ++++++++++++------------------- 1 file changed, 12 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 4d076be46d9..e80210693ff 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -687,6 +687,12 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, to the same server. We may make this configurable later or use ses->maxReq */ + if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { + cERROR(1, ("Illegal length, greater than maximum frame, %d", + in_buf->smb_buf_length)); + return -EIO; + } + rc = wait_for_free_request(ses, long_op); if (rc) return rc; @@ -706,17 +712,6 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, return rc; } - if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { - cERROR(1, ("Illegal length, greater than maximum frame, %d", - in_buf->smb_buf_length)); - DeleteMidQEntry(midQ); - mutex_unlock(&ses->server->srv_mutex); - /* Update # of requests on wire to server */ - atomic_dec(&ses->server->inFlight); - wake_up(&ses->server->request_q); - return -EIO; - } - rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); midQ->midState = MID_REQUEST_SUBMITTED; @@ -925,6 +920,12 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, to the same server. We may make this configurable later or use ses->maxReq */ + if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { + cERROR(1, ("Illegal length, greater than maximum frame, %d", + in_buf->smb_buf_length)); + return -EIO; + } + rc = wait_for_free_request(ses, CIFS_BLOCKING_OP); if (rc) return rc; @@ -941,14 +942,6 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, return rc; } - if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { - mutex_unlock(&ses->server->srv_mutex); - cERROR(1, ("Illegal length, greater than maximum frame, %d", - in_buf->smb_buf_length)); - DeleteMidQEntry(midQ); - return -EIO; - } - rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); midQ->midState = MID_REQUEST_SUBMITTED; -- cgit v1.2.3-70-g09d2 From 8fbbd365cc700e288fb6f9780b092c5afa4946e5 Mon Sep 17 00:00:00 2001 From: Volker Lendecke Date: Sat, 6 Dec 2008 13:12:34 +0100 Subject: Simplify allocate_mid() slightly: Remove some unnecessary "else" branches Simplify allocate_mid() slightly: Remove some unnecessary "else" branches Signed-off-by: Volker Lendecke Acked-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/transport.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index e80210693ff..c98f929315f 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -385,10 +385,14 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf, { if (ses->server->tcpStatus == CifsExiting) { return -ENOENT; - } else if (ses->server->tcpStatus == CifsNeedReconnect) { + } + + if (ses->server->tcpStatus == CifsNeedReconnect) { cFYI(1, ("tcp session dead - return to caller to retry")); return -EAGAIN; - } else if (ses->status != CifsGood) { + } + + if (ses->status != CifsGood) { /* check if SMB session is bad because we are setting it up */ if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && (in_buf->Command != SMB_COM_NEGOTIATE)) -- cgit v1.2.3-70-g09d2 From 27a97a613b96688e59dd116cae3f0c94107b434c Mon Sep 17 00:00:00 2001 From: Volker Lendecke Date: Mon, 8 Dec 2008 20:59:39 +0000 Subject: [CIFS] Slightly simplify wait_for_free_request(), remove an unnecessary "else" branch This is no functional change, because in the "if" branch we do an early "return 0;". Signed-off-by: Volker Lendecke Signed-off-by: Steve French --- fs/cifs/transport.c | 51 ++++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index c98f929315f..5f224e99958 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -344,37 +344,38 @@ static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op) if (long_op == CIFS_ASYNC_OP) { /* oplock breaks must not be held up */ atomic_inc(&ses->server->inFlight); - } else { - spin_lock(&GlobalMid_Lock); - while (1) { - if (atomic_read(&ses->server->inFlight) >= - cifs_max_pending){ - spin_unlock(&GlobalMid_Lock); + return 0; + } + + spin_lock(&GlobalMid_Lock); + while (1) { + if (atomic_read(&ses->server->inFlight) >= + cifs_max_pending){ + spin_unlock(&GlobalMid_Lock); #ifdef CONFIG_CIFS_STATS2 - atomic_inc(&ses->server->num_waiters); + atomic_inc(&ses->server->num_waiters); #endif - wait_event(ses->server->request_q, - atomic_read(&ses->server->inFlight) - < cifs_max_pending); + wait_event(ses->server->request_q, + atomic_read(&ses->server->inFlight) + < cifs_max_pending); #ifdef CONFIG_CIFS_STATS2 - atomic_dec(&ses->server->num_waiters); + atomic_dec(&ses->server->num_waiters); #endif - spin_lock(&GlobalMid_Lock); - } else { - if (ses->server->tcpStatus == CifsExiting) { - spin_unlock(&GlobalMid_Lock); - return -ENOENT; - } - - /* can not count locking commands against total - as they are allowed to block on server */ - - /* update # of requests on the wire to server */ - if (long_op != CIFS_BLOCKING_OP) - atomic_inc(&ses->server->inFlight); + spin_lock(&GlobalMid_Lock); + } else { + if (ses->server->tcpStatus == CifsExiting) { spin_unlock(&GlobalMid_Lock); - break; + return -ENOENT; } + + /* can not count locking commands against total + as they are allowed to block on server */ + + /* update # of requests on the wire to server */ + if (long_op != CIFS_BLOCKING_OP) + atomic_inc(&ses->server->inFlight); + spin_unlock(&GlobalMid_Lock); + break; } } return 0; -- cgit v1.2.3-70-g09d2 From 4c3130efda1ef4f28d5f26819fae2e58c3945f0b Mon Sep 17 00:00:00 2001 From: Steve French Date: Tue, 9 Dec 2008 00:28:16 +0000 Subject: [CIFS] Cleanup: Move the check for too large R/W requests This avoids an unnecessary else branch Signed-off-by: Volker Lendecke Signed-off-by: Steve French --- fs/cifs/cifssmb.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index ef9786b6bea..824df142f28 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1414,8 +1414,13 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid, cFYI(1, ("Reading %d bytes on fid %d", count, netfid)); if (tcon->ses->capabilities & CAP_LARGE_FILES) wct = 12; - else + else { wct = 10; /* old style read */ + if ((lseek >> 32) > 0) { + /* can not handle this big offset for old */ + return -EIO; + } + } *nbytes = 0; rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **) &pSMB); @@ -1431,8 +1436,6 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid, pSMB->OffsetLow = cpu_to_le32(lseek & 0xFFFFFFFF); if (wct == 12) pSMB->OffsetHigh = cpu_to_le32(lseek >> 32); - else if ((lseek >> 32) > 0) /* can not handle this big offset for old */ - return -EIO; pSMB->Remaining = 0; pSMB->MaxCount = cpu_to_le16(count & 0xFFFF); @@ -1519,8 +1522,13 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, if (tcon->ses->capabilities & CAP_LARGE_FILES) wct = 14; - else + else { wct = 12; + if ((offset >> 32) > 0) { + /* can not handle big offset for old srv */ + return -EIO; + } + } rc = smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB, (void **) &pSMBr); @@ -1535,8 +1543,6 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); if (wct == 14) pSMB->OffsetHigh = cpu_to_le32(offset >> 32); - else if ((offset >> 32) > 0) /* can not handle big offset for old srv */ - return -EIO; pSMB->Reserved = 0xFFFFFFFF; pSMB->WriteMode = 0; @@ -1621,10 +1627,15 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, cFYI(1, ("write2 at %lld %d bytes", (long long)offset, count)); - if (tcon->ses->capabilities & CAP_LARGE_FILES) + if (tcon->ses->capabilities & CAP_LARGE_FILES) { wct = 14; - else + } else { wct = 12; + if ((offset >> 32) > 0) { + /* can not handle big offset for old srv */ + return -EIO; + } + } rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB); if (rc) return rc; @@ -1637,8 +1648,6 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); if (wct == 14) pSMB->OffsetHigh = cpu_to_le32(offset >> 32); - else if ((offset >> 32) > 0) /* can not handle big offset for old srv */ - return -EIO; pSMB->Reserved = 0xFFFFFFFF; pSMB->WriteMode = 0; pSMB->Remaining = 0; -- cgit v1.2.3-70-g09d2 From 829049cbb1d2ddda2be17ea008b6b3c457808d91 Mon Sep 17 00:00:00 2001 From: Volker Lendecke Date: Sat, 6 Dec 2008 16:00:53 +0100 Subject: Check the return value of cifs_sign_smb[2] Check the return value of cifs_sign_smb[2] Signed-off-by: Volker Lendecke Signed-off-by: Steve French --- fs/cifs/transport.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 5f224e99958..dc2d1b0bba5 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -530,6 +530,11 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, return rc; } rc = cifs_sign_smb2(iov, n_vec, ses->server, &midQ->sequence_number); + if (rc) { + mutex_unlock(&ses->server->srv_mutex); + cifs_small_buf_release(in_buf); + goto out; + } midQ->midState = MID_REQUEST_SUBMITTED; #ifdef CONFIG_CIFS_STATS2 @@ -718,6 +723,10 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, } rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); + if (rc) { + mutex_unlock(&ses->server->srv_mutex); + goto out; + } midQ->midState = MID_REQUEST_SUBMITTED; #ifdef CONFIG_CIFS_STATS2 @@ -948,6 +957,11 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, } rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); + if (rc) { + DeleteMidQEntry(midQ); + mutex_unlock(&ses->server->srv_mutex); + return rc; + } midQ->midState = MID_REQUEST_SUBMITTED; #ifdef CONFIG_CIFS_STATS2 -- cgit v1.2.3-70-g09d2 From 8e4f2e8a1e5cfa07c5b2731accee0e6eb4c64575 Mon Sep 17 00:00:00 2001 From: Volker Lendecke Date: Sat, 6 Dec 2008 16:22:15 +0100 Subject: Slightly streamline SendReceive[2] Slightly streamline SendReceive[2] Remove an else branch by naming the error condition what it is Signed-off-by: Volker Lendecke Signed-off-by: Steve French --- fs/cifs/transport.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index dc2d1b0bba5..01b3aa5f928 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -583,10 +583,8 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, wait_for_response(ses, midQ, timeout, 10 * HZ); spin_lock(&GlobalMid_Lock); - if (midQ->resp_buf) { - spin_unlock(&GlobalMid_Lock); - receive_len = midQ->resp_buf->smb_buf_length; - } else { + + if (midQ->resp_buf == NULL) { cERROR(1, ("No response to cmd %d mid %d", midQ->command, midQ->mid)); if (midQ->midState == MID_REQUEST_SUBMITTED) { @@ -614,6 +612,9 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, return rc; } + spin_unlock(&GlobalMid_Lock); + receive_len = midQ->resp_buf->smb_buf_length; + if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { cERROR(1, ("Frame too large received. Length: %d Xid: %d", receive_len, xid)); @@ -773,10 +774,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, wait_for_response(ses, midQ, timeout, 10 * HZ); spin_lock(&GlobalMid_Lock); - if (midQ->resp_buf) { - spin_unlock(&GlobalMid_Lock); - receive_len = midQ->resp_buf->smb_buf_length; - } else { + if (midQ->resp_buf == NULL) { cERROR(1, ("No response for cmd %d mid %d", midQ->command, midQ->mid)); if (midQ->midState == MID_REQUEST_SUBMITTED) { @@ -804,6 +802,9 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, return rc; } + spin_unlock(&GlobalMid_Lock); + receive_len = midQ->resp_buf->smb_buf_length; + if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { cERROR(1, ("Frame too large received. Length: %d Xid: %d", receive_len, xid)); -- cgit v1.2.3-70-g09d2 From 2b2bdfba7a3679f67b7c3aca4a4b08b24bb675a8 Mon Sep 17 00:00:00 2001 From: Steve French Date: Thu, 11 Dec 2008 17:26:54 +0000 Subject: [CIFS] Streamline SendReceive[2] by using "goto out:" in an error condition Signed-off-by: Volker Lendecke Signed-off-by: Steve French --- fs/cifs/transport.c | 139 +++++++++++++++++++++++++++------------------------- 1 file changed, 72 insertions(+), 67 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 01b3aa5f928..ca015e60002 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -619,49 +619,52 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, cERROR(1, ("Frame too large received. Length: %d Xid: %d", receive_len, xid)); rc = -EIO; - } else { /* rcvd frame is ok */ - if (midQ->resp_buf && - (midQ->midState == MID_RESPONSE_RECEIVED)) { + goto out; + } - iov[0].iov_base = (char *)midQ->resp_buf; - if (midQ->largeBuf) - *pRespBufType = CIFS_LARGE_BUFFER; - else - *pRespBufType = CIFS_SMALL_BUFFER; - iov[0].iov_len = receive_len + 4; + /* rcvd frame is ok */ - dump_smb(midQ->resp_buf, 80); - /* convert the length into a more usable form */ - if ((receive_len > 24) && - (ses->server->secMode & (SECMODE_SIGN_REQUIRED | - SECMODE_SIGN_ENABLED))) { - rc = cifs_verify_signature(midQ->resp_buf, + if (midQ->resp_buf && + (midQ->midState == MID_RESPONSE_RECEIVED)) { + + iov[0].iov_base = (char *)midQ->resp_buf; + if (midQ->largeBuf) + *pRespBufType = CIFS_LARGE_BUFFER; + else + *pRespBufType = CIFS_SMALL_BUFFER; + iov[0].iov_len = receive_len + 4; + + dump_smb(midQ->resp_buf, 80); + /* convert the length into a more usable form */ + if ((receive_len > 24) && + (ses->server->secMode & (SECMODE_SIGN_REQUIRED | + SECMODE_SIGN_ENABLED))) { + rc = cifs_verify_signature(midQ->resp_buf, &ses->server->mac_signing_key, midQ->sequence_number+1); - if (rc) { - cERROR(1, ("Unexpected SMB signature")); - /* BB FIXME add code to kill session */ - } + if (rc) { + cERROR(1, ("Unexpected SMB signature")); + /* BB FIXME add code to kill session */ } - - /* BB special case reconnect tid and uid here? */ - rc = map_smb_to_linux_error(midQ->resp_buf, - flags & CIFS_LOG_ERROR); - - /* convert ByteCount if necessary */ - if (receive_len >= sizeof(struct smb_hdr) - 4 - /* do not count RFC1001 header */ + - (2 * midQ->resp_buf->WordCount) + 2 /* bcc */ ) - BCC(midQ->resp_buf) = - le16_to_cpu(BCC_LE(midQ->resp_buf)); - if ((flags & CIFS_NO_RESP) == 0) - midQ->resp_buf = NULL; /* mark it so buf will - not be freed by - DeleteMidQEntry */ - } else { - rc = -EIO; - cFYI(1, ("Bad MID state?")); } + + /* BB special case reconnect tid and uid here? */ + rc = map_smb_to_linux_error(midQ->resp_buf, + flags & CIFS_LOG_ERROR); + + /* convert ByteCount if necessary */ + if (receive_len >= sizeof(struct smb_hdr) - 4 + /* do not count RFC1001 header */ + + (2 * midQ->resp_buf->WordCount) + 2 /* bcc */ ) + BCC(midQ->resp_buf) = + le16_to_cpu(BCC_LE(midQ->resp_buf)); + if ((flags & CIFS_NO_RESP) == 0) + midQ->resp_buf = NULL; /* mark it so buf will + not be freed by + DeleteMidQEntry */ + } else { + rc = -EIO; + cFYI(1, ("Bad MID state?")); } out: @@ -809,43 +812,45 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, cERROR(1, ("Frame too large received. Length: %d Xid: %d", receive_len, xid)); rc = -EIO; - } else { /* rcvd frame is ok */ - - if (midQ->resp_buf && out_buf - && (midQ->midState == MID_RESPONSE_RECEIVED)) { - out_buf->smb_buf_length = receive_len; - memcpy((char *)out_buf + 4, - (char *)midQ->resp_buf + 4, - receive_len); + goto out; + } - dump_smb(out_buf, 92); - /* convert the length into a more usable form */ - if ((receive_len > 24) && - (ses->server->secMode & (SECMODE_SIGN_REQUIRED | - SECMODE_SIGN_ENABLED))) { - rc = cifs_verify_signature(out_buf, + /* rcvd frame is ok */ + + if (midQ->resp_buf && out_buf + && (midQ->midState == MID_RESPONSE_RECEIVED)) { + out_buf->smb_buf_length = receive_len; + memcpy((char *)out_buf + 4, + (char *)midQ->resp_buf + 4, + receive_len); + + dump_smb(out_buf, 92); + /* convert the length into a more usable form */ + if ((receive_len > 24) && + (ses->server->secMode & (SECMODE_SIGN_REQUIRED | + SECMODE_SIGN_ENABLED))) { + rc = cifs_verify_signature(out_buf, &ses->server->mac_signing_key, midQ->sequence_number+1); - if (rc) { - cERROR(1, ("Unexpected SMB signature")); - /* BB FIXME add code to kill session */ - } + if (rc) { + cERROR(1, ("Unexpected SMB signature")); + /* BB FIXME add code to kill session */ } + } - *pbytes_returned = out_buf->smb_buf_length; + *pbytes_returned = out_buf->smb_buf_length; - /* BB special case reconnect tid and uid here? */ - rc = map_smb_to_linux_error(out_buf, 0 /* no log */ ); + /* BB special case reconnect tid and uid here? */ + rc = map_smb_to_linux_error(out_buf, 0 /* no log */ ); - /* convert ByteCount if necessary */ - if (receive_len >= sizeof(struct smb_hdr) - 4 - /* do not count RFC1001 header */ + - (2 * out_buf->WordCount) + 2 /* bcc */ ) - BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf)); - } else { - rc = -EIO; - cERROR(1, ("Bad MID state?")); - } + /* convert ByteCount if necessary */ + if (receive_len >= sizeof(struct smb_hdr) - 4 + /* do not count RFC1001 header */ + + (2 * out_buf->WordCount) + 2 /* bcc */ ) + BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf)); + } else { + rc = -EIO; + cERROR(1, ("Bad MID state?")); } out: -- cgit v1.2.3-70-g09d2 From 17c8bfed8abbbed82937a751abfc40d2866e3196 Mon Sep 17 00:00:00 2001 From: Volker Lendecke Date: Sat, 6 Dec 2008 16:38:19 +0100 Subject: Streamline SendReceiveBlockingLock: Use "goto out:" in an error condition Streamline SendReceiveBlockingLock: Use "goto out:" in an error condition Signed-off-by: Volker Lendecke Signed-off-by: Steve French --- fs/cifs/transport.c | 70 ++++++++++++++++++++++++++++------------------------- 1 file changed, 37 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index ca015e60002..0fe2527ce45 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -1062,44 +1062,48 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, cERROR(1, ("Frame too large received. Length: %d Xid: %d", receive_len, xid)); rc = -EIO; - } else { /* rcvd frame is ok */ - - if (midQ->resp_buf && out_buf - && (midQ->midState == MID_RESPONSE_RECEIVED)) { - out_buf->smb_buf_length = receive_len; - memcpy((char *)out_buf + 4, - (char *)midQ->resp_buf + 4, - receive_len); - - dump_smb(out_buf, 92); - /* convert the length into a more usable form */ - if ((receive_len > 24) && - (ses->server->secMode & (SECMODE_SIGN_REQUIRED | - SECMODE_SIGN_ENABLED))) { - rc = cifs_verify_signature(out_buf, - &ses->server->mac_signing_key, - midQ->sequence_number+1); - if (rc) { - cERROR(1, ("Unexpected SMB signature")); - /* BB FIXME add code to kill session */ - } - } + goto out; + } - *pbytes_returned = out_buf->smb_buf_length; + /* rcvd frame is ok */ - /* BB special case reconnect tid and uid here? */ - rc = map_smb_to_linux_error(out_buf, 0 /* no log */ ); + if (midQ->resp_buf && out_buf + && (midQ->midState == MID_RESPONSE_RECEIVED)) { + out_buf->smb_buf_length = receive_len; + memcpy((char *)out_buf + 4, + (char *)midQ->resp_buf + 4, + receive_len); - /* convert ByteCount if necessary */ - if (receive_len >= sizeof(struct smb_hdr) - 4 - /* do not count RFC1001 header */ + - (2 * out_buf->WordCount) + 2 /* bcc */ ) - BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf)); - } else { - rc = -EIO; - cERROR(1, ("Bad MID state?")); + dump_smb(out_buf, 92); + /* convert the length into a more usable form */ + if ((receive_len > 24) && + (ses->server->secMode & (SECMODE_SIGN_REQUIRED | + SECMODE_SIGN_ENABLED))) { + rc = cifs_verify_signature(out_buf, + &ses->server->mac_signing_key, + midQ->sequence_number+1); + if (rc) { + cERROR(1, ("Unexpected SMB signature")); + /* BB FIXME add code to kill session */ + } } + + *pbytes_returned = out_buf->smb_buf_length; + + /* BB special case reconnect tid and uid here? */ + rc = map_smb_to_linux_error(out_buf, 0 /* no log */ ); + + /* convert ByteCount if necessary */ + if (receive_len >= sizeof(struct smb_hdr) - 4 + /* do not count RFC1001 header */ + + (2 * out_buf->WordCount) + 2 /* bcc */ ) + BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf)); + } else { + rc = -EIO; + cERROR(1, ("Bad MID state?")); } + +out: DeleteMidQEntry(midQ); if (rstart && rc == -EACCES) return -ERESTARTSYS; -- cgit v1.2.3-70-g09d2 From 698e96a826939bb24063f6a61801c174e19c32b1 Mon Sep 17 00:00:00 2001 From: Volker Lendecke Date: Sat, 6 Dec 2008 16:39:31 +0100 Subject: Streamline SendReceiveBlockingLock: Use "goto out:" in an error condition Streamline SendReceiveBlockingLock: Use "goto out:" in an error condition Signed-off-by: Volker Lendecke Signed-off-by: Steve French --- fs/cifs/transport.c | 61 +++++++++++++++++++++++++++-------------------------- 1 file changed, 31 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 0fe2527ce45..7e10b13d130 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -1067,41 +1067,42 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, /* rcvd frame is ok */ - if (midQ->resp_buf && out_buf - && (midQ->midState == MID_RESPONSE_RECEIVED)) { - out_buf->smb_buf_length = receive_len; - memcpy((char *)out_buf + 4, - (char *)midQ->resp_buf + 4, - receive_len); + if ((midQ->resp_buf == NULL) || (out_buf == NULL) + || (midQ->midState != MID_RESPONSE_RECEIVED)) { + rc = -EIO; + cERROR(1, ("Bad MID state?")); + goto out; + } - dump_smb(out_buf, 92); - /* convert the length into a more usable form */ - if ((receive_len > 24) && - (ses->server->secMode & (SECMODE_SIGN_REQUIRED | - SECMODE_SIGN_ENABLED))) { - rc = cifs_verify_signature(out_buf, - &ses->server->mac_signing_key, - midQ->sequence_number+1); - if (rc) { - cERROR(1, ("Unexpected SMB signature")); - /* BB FIXME add code to kill session */ - } + out_buf->smb_buf_length = receive_len; + memcpy((char *)out_buf + 4, + (char *)midQ->resp_buf + 4, + receive_len); + + dump_smb(out_buf, 92); + /* convert the length into a more usable form */ + if ((receive_len > 24) && + (ses->server->secMode & (SECMODE_SIGN_REQUIRED | + SECMODE_SIGN_ENABLED))) { + rc = cifs_verify_signature(out_buf, + &ses->server->mac_signing_key, + midQ->sequence_number+1); + if (rc) { + cERROR(1, ("Unexpected SMB signature")); + /* BB FIXME add code to kill session */ } + } - *pbytes_returned = out_buf->smb_buf_length; + *pbytes_returned = out_buf->smb_buf_length; - /* BB special case reconnect tid and uid here? */ - rc = map_smb_to_linux_error(out_buf, 0 /* no log */ ); + /* BB special case reconnect tid and uid here? */ + rc = map_smb_to_linux_error(out_buf, 0 /* no log */ ); - /* convert ByteCount if necessary */ - if (receive_len >= sizeof(struct smb_hdr) - 4 - /* do not count RFC1001 header */ + - (2 * out_buf->WordCount) + 2 /* bcc */ ) - BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf)); - } else { - rc = -EIO; - cERROR(1, ("Bad MID state?")); - } + /* convert ByteCount if necessary */ + if (receive_len >= sizeof(struct smb_hdr) - 4 + /* do not count RFC1001 header */ + + (2 * out_buf->WordCount) + 2 /* bcc */ ) + BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf)); out: DeleteMidQEntry(midQ); -- cgit v1.2.3-70-g09d2 From ac6a3ef405f314c206906463ca9913a826a577ee Mon Sep 17 00:00:00 2001 From: Volker Lendecke Date: Sat, 6 Dec 2008 16:40:40 +0100 Subject: Remove an already-checked error condition in SendReceiveBlockingLock Remove an already-checked error condition in SendReceiveBlockingLock Signed-off-by: Volker Lendecke Signed-off-by: Steve French --- fs/cifs/transport.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 7e10b13d130..7ebe6599ed3 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -1067,8 +1067,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, /* rcvd frame is ok */ - if ((midQ->resp_buf == NULL) || (out_buf == NULL) - || (midQ->midState != MID_RESPONSE_RECEIVED)) { + if ((out_buf == NULL) || (midQ->midState != MID_RESPONSE_RECEIVED)) { rc = -EIO; cERROR(1, ("Bad MID state?")); goto out; -- cgit v1.2.3-70-g09d2 From c6fbba0546d3ead18d4a623e76e28bcbaa66a325 Mon Sep 17 00:00:00 2001 From: Steve French Date: Thu, 18 Dec 2008 01:41:20 +0000 Subject: [CIFS] make sure that DFS pathnames are properly formed The paths in a DFS request are supposed to only have a single preceding backslash, but we are sending them with a double backslash. This is exposing a bug in Windows where it also sends a path in the response that has a double backslash. The existing code that builds the mount option string however expects a double backslash prefix in a couple of places when it tries to use the path returned by build_path_from_dentry. Fix compose_mount_options to expect properly formed DFS paths (single backslash at front). Also clean up error handling in that function. There was a possible NULL pointer dereference and situations where a partially built option string would be returned. Tested against Samba 3.0.28-ish server and Samba 3.3 and Win2k8. CC: Stable Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/CHANGES | 3 ++- fs/cifs/cifs_dfs_ref.c | 48 ++++++++++++++++++++++++++++++++++++------------ 2 files changed, 38 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES index 5ab6bcce880..080703a15f4 100644 --- a/fs/cifs/CHANGES +++ b/fs/cifs/CHANGES @@ -4,7 +4,8 @@ Add "forcemandatorylock" mount option to allow user to use mandatory rather than posix (advisory) byte range locks, even though server would support posix byte range locks. Fix query of root inode when prefixpath specified and user does not have access to query information about the -top of the share. +top of the share. Fix problem in 2.6.28 resolving DFS paths to +Samba servers (worked to Windows). Version 1.55 ------------ diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index e1c18362ba4..85c0a74d034 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -122,7 +122,7 @@ static char *compose_mount_options(const char *sb_mountdata, char **devname) { int rc; - char *mountdata; + char *mountdata = NULL; int md_len; char *tkn_e; char *srvIP = NULL; @@ -136,10 +136,9 @@ static char *compose_mount_options(const char *sb_mountdata, *devname = cifs_get_share_name(ref->node_name); rc = dns_resolve_server_name_to_ip(*devname, &srvIP); if (rc != 0) { - cERROR(1, ("%s: Failed to resolve server part of %s to IP", - __func__, *devname)); - mountdata = ERR_PTR(rc); - goto compose_mount_options_out; + cERROR(1, ("%s: Failed to resolve server part of %s to IP: %d", + __func__, *devname, rc));; + goto compose_mount_options_err; } /* md_len = strlen(...) + 12 for 'sep+prefixpath=' * assuming that we have 'unc=' and 'ip=' in @@ -149,8 +148,8 @@ static char *compose_mount_options(const char *sb_mountdata, strlen(ref->node_name) + 12; mountdata = kzalloc(md_len+1, GFP_KERNEL); if (mountdata == NULL) { - mountdata = ERR_PTR(-ENOMEM); - goto compose_mount_options_out; + rc = -ENOMEM; + goto compose_mount_options_err; } /* copy all options except of unc,ip,prefixpath */ @@ -197,18 +196,32 @@ static char *compose_mount_options(const char *sb_mountdata, /* find & copy prefixpath */ tkn_e = strchr(ref->node_name + 2, '\\'); - if (tkn_e == NULL) /* invalid unc, missing share name*/ - goto compose_mount_options_out; + if (tkn_e == NULL) { + /* invalid unc, missing share name*/ + rc = -EINVAL; + goto compose_mount_options_err; + } + /* + * this function gives us a path with a double backslash prefix. We + * require a single backslash for DFS. Temporarily increment fullpath + * to put it in the proper form and decrement before freeing it. + */ fullpath = build_path_from_dentry(dentry); + if (!fullpath) { + rc = -ENOMEM; + goto compose_mount_options_err; + } + ++fullpath; tkn_e = strchr(tkn_e + 1, '\\'); - if (tkn_e || strlen(fullpath) - (ref->path_consumed)) { + if (tkn_e || (strlen(fullpath) - ref->path_consumed)) { strncat(mountdata, &sep, 1); strcat(mountdata, "prefixpath="); if (tkn_e) strcat(mountdata, tkn_e + 1); - strcat(mountdata, fullpath + (ref->path_consumed)); + strcat(mountdata, fullpath + ref->path_consumed); } + --fullpath; kfree(fullpath); /*cFYI(1,("%s: parent mountdata: %s", __func__,sb_mountdata));*/ @@ -217,6 +230,11 @@ static char *compose_mount_options(const char *sb_mountdata, compose_mount_options_out: kfree(srvIP); return mountdata; + +compose_mount_options_err: + kfree(mountdata); + mountdata = ERR_PTR(rc); + goto compose_mount_options_out; } @@ -309,13 +327,19 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd) goto out_err; } + /* + * The MSDFS spec states that paths in DFS referral requests and + * responses must be prefixed by a single '\' character instead of + * the double backslashes usually used in the UNC. This function + * gives us the latter, so we must adjust the result. + */ full_path = build_path_from_dentry(dentry); if (full_path == NULL) { rc = -ENOMEM; goto out_err; } - rc = get_dfs_path(xid, ses , full_path, cifs_sb->local_nls, + rc = get_dfs_path(xid, ses , full_path + 1, cifs_sb->local_nls, &num_referrals, &referrals, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); -- cgit v1.2.3-70-g09d2 From 359d67d6ad054ae11ad459665fdfb883aca87782 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 22 Dec 2008 21:53:40 +0000 Subject: [CIFS] Remove redundant test In fs/cifs/cifssmb.c, pLockData is tested for being NULL at the beginning of the function, and not reassigned subsequently. A simplified version of the semantic patch that makes this change is as follows: (http://www.emn.fr/x-info/coccinelle/) Signed-off-by: Julia Lawall Signed-off-by: Steve French --- fs/cifs/cifssmb.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 824df142f28..552642a507c 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1871,10 +1871,6 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, rc = -EIO; /* bad smb */ goto plk_err_exit; } - if (pLockData == NULL) { - rc = -EINVAL; - goto plk_err_exit; - } data_offset = le16_to_cpu(pSMBr->t2.DataOffset); data_count = le16_to_cpu(pSMBr->t2.DataCount); if (data_count < sizeof(struct cifs_posix_lock)) { -- cgit v1.2.3-70-g09d2 From 26ddd8d5cac8a563953d5febe8c6e40909f7bce1 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Fri, 26 Dec 2008 14:24:10 +0900 Subject: proc: remove ifdef CONFIG_SPARSE_IRQ from stat.c Impact: cleanup irq_desc can be NULL when CONFIG_SPARSE_IRQ=y only. therefore, NULL checking can move into kstat_irqs_cpu() of SPARSE_IRQ version. Signed-off-by: KOSAKI Motohiro Acked-by: "Yinghai Lu" Signed-off-by: Ingo Molnar --- fs/proc/stat.c | 11 +---------- kernel/irq/handle.c | 2 +- 2 files changed, 2 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 3bb1cf1e742..f75efa22df5 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #ifndef arch_irq_stat_cpu @@ -45,10 +46,6 @@ static int show_stat(struct seq_file *p, void *v) steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); for_each_irq_nr(j) { -#ifdef CONFIG_SPARSE_IRQ - if (!irq_to_desc(j)) - continue; -#endif sum += kstat_irqs_cpu(j, i); } sum += arch_irq_stat_cpu(i); @@ -95,12 +92,6 @@ static int show_stat(struct seq_file *p, void *v) /* sum again ? it could be updated? */ for_each_irq_nr(j) { per_irq_sum = 0; -#ifdef CONFIG_SPARSE_IRQ - if (!irq_to_desc(j)) { - seq_printf(p, " %u", per_irq_sum); - continue; - } -#endif for_each_possible_cpu(i) per_irq_sum += kstat_irqs_cpu(j, i); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 4db7d2df86b..03479dfdebb 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -448,7 +448,7 @@ void early_init_irq_lock_class(void) unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) { struct irq_desc *desc = irq_to_desc(irq); - return desc->kstat_irqs[cpu]; + return desc ? desc->kstat_irqs[cpu] : 0; } #endif EXPORT_SYMBOL(kstat_irqs_cpu); -- cgit v1.2.3-70-g09d2 From bf66542bef3771a42ad3b1d5dc503c804bc22b33 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 3 Dec 2008 13:49:23 +1100 Subject: cifs: update for new IP4/6 address printing Signed-off-by: Stephen Rothwell Signed-off-by: Linus Torvalds --- fs/cifs/cifsfs.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 974c8f0e615..0005a194a75 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -361,12 +361,12 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m) switch (server->addr.sockAddr6. sin6_family) { case AF_INET6: - seq_printf(s, NIP6_FMT, - NIP6(server->addr.sockAddr6.sin6_addr)); + seq_printf(s, "%pI6", + &server->addr.sockAddr6.sin6_addr); break; case AF_INET: - seq_printf(s, NIPQUAD_FMT, - NIPQUAD(server->addr.sockAddr.sin_addr.s_addr)); + seq_printf(s, "%pI4", + &server->addr.sockAddr.sin_addr.s_addr); break; } } -- cgit v1.2.3-70-g09d2 From 08bafc0341f2f7920e9045bc32c40299cac8c21b Mon Sep 17 00:00:00 2001 From: Keith Mannthey Date: Tue, 25 Nov 2008 10:24:35 +0100 Subject: block: Supress Buffer I/O errors when SCSI REQ_QUIET flag set Allow the scsi request REQ_QUIET flag to be propagated to the buffer file system layer. The basic ideas is to pass the flag from the scsi request to the bio (block IO) and then to the buffer layer. The buffer layer can then suppress needless printks. This patch declutters the kernel log by removed the 40-50 (per lun) buffer io error messages seen during a boot in my multipath setup . It is a good chance any real errors will be missed in the "noise" it the logs without this patch. During boot I see blocks of messages like " __ratelimit: 211 callbacks suppressed Buffer I/O error on device sdm, logical block 5242879 Buffer I/O error on device sdm, logical block 5242879 Buffer I/O error on device sdm, logical block 5242847 Buffer I/O error on device sdm, logical block 1 Buffer I/O error on device sdm, logical block 5242878 Buffer I/O error on device sdm, logical block 5242879 Buffer I/O error on device sdm, logical block 5242879 Buffer I/O error on device sdm, logical block 5242879 Buffer I/O error on device sdm, logical block 5242879 Buffer I/O error on device sdm, logical block 5242872 " in my logs. My disk environment is multipath fiber channel using the SCSI_DH_RDAC code and multipathd. This topology includes an "active" and "ghost" path for each lun. IO's to the "ghost" path will never complete and the SCSI layer, via the scsi device handler rdac code, quick returns the IOs to theses paths and sets the REQ_QUIET scsi flag to suppress the scsi layer messages. I am wanting to extend the QUIET behavior to include the buffer file system layer to deal with these errors as well. I have been running this patch for a while now on several boxes without issue. A few runs of bonnie++ show no noticeable difference in performance in my setup. Thanks for John Stultz for the quiet_error finalization. Submitted-by: Keith Mannthey Signed-off-by: Jens Axboe --- block/blk-core.c | 3 +++ fs/buffer.c | 19 +++++++++++++++---- include/linux/bio.h | 1 + include/linux/buffer_head.h | 1 + 4 files changed, 20 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/block/blk-core.c b/block/blk-core.c index 243d18b4ceb..20e1724ccb4 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -153,6 +153,9 @@ static void req_bio_endio(struct request *rq, struct bio *bio, nbytes = bio->bi_size; } + if (unlikely(rq->cmd_flags & REQ_QUIET)) + set_bit(BIO_QUIET, &bio->bi_flags); + bio->bi_size -= nbytes; bio->bi_sector += (nbytes >> 9); diff --git a/fs/buffer.c b/fs/buffer.c index 10179cfa115..776ae091d3b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -99,10 +99,18 @@ __clear_page_buffers(struct page *page) page_cache_release(page); } + +static int quiet_error(struct buffer_head *bh) +{ + if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit()) + return 0; + return 1; +} + + static void buffer_io_error(struct buffer_head *bh) { char b[BDEVNAME_SIZE]; - printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n", bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr); @@ -144,7 +152,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate) if (uptodate) { set_buffer_uptodate(bh); } else { - if (!buffer_eopnotsupp(bh) && printk_ratelimit()) { + if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) { buffer_io_error(bh); printk(KERN_WARNING "lost page write due to " "I/O error on %s\n", @@ -394,7 +402,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) set_buffer_uptodate(bh); } else { clear_buffer_uptodate(bh); - if (printk_ratelimit()) + if (!quiet_error(bh)) buffer_io_error(bh); SetPageError(page); } @@ -455,7 +463,7 @@ static void end_buffer_async_write(struct buffer_head *bh, int uptodate) if (uptodate) { set_buffer_uptodate(bh); } else { - if (printk_ratelimit()) { + if (!quiet_error(bh)) { buffer_io_error(bh); printk(KERN_WARNING "lost page write due to " "I/O error on %s\n", @@ -2913,6 +2921,9 @@ static void end_bio_bh_io_sync(struct bio *bio, int err) set_bit(BH_Eopnotsupp, &bh->b_state); } + if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags))) + set_bit(BH_Quiet, &bh->b_state); + bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags)); bio_put(bio); } diff --git a/include/linux/bio.h b/include/linux/bio.h index 6a642098e5c..cf132bfbbac 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -117,6 +117,7 @@ struct bio { #define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ +#define BIO_QUIET 11 /* Make BIO Quiet */ #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) /* diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 3ce64b90118..8605f8a74df 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -35,6 +35,7 @@ enum bh_state_bits { BH_Ordered, /* ordered write */ BH_Eopnotsupp, /* operation not supported (barrier) */ BH_Unwritten, /* Buffer is allocated on disk but not written */ + BH_Quiet, /* Buffer Error Prinks to be quiet */ BH_PrivateStart,/* not a state bit, but the first bit available * for private allocation by other entities -- cgit v1.2.3-70-g09d2 From 7ff9345ffac56743b5001561bc2dc1e041b79149 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 11 Dec 2008 11:53:43 +0100 Subject: bio: only mempool back the largest bio_vec slab cache We only very rarely need the mempool backing, so it makes sense to get rid of all but one of the mempool in a bio_set. So keep the largest bio_vec count mempool so we can always honor the largest allocation, and "upgrade" callers that fail. Signed-off-by: Jens Axboe --- fs/bio.c | 125 ++++++++++++++++++++++++++++++---------------------- include/linux/bio.h | 3 +- 2 files changed, 74 insertions(+), 54 deletions(-) (limited to 'fs') diff --git a/fs/bio.c b/fs/bio.c index df99c882b80..eb6b4683a26 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -58,7 +58,8 @@ unsigned int bvec_nr_vecs(unsigned short idx) return bvec_slabs[idx].nr_vecs; } -struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) +struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, + struct bio_set *bs) { struct bio_vec *bvl; @@ -67,60 +68,90 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct * If not, this is a bio_kmalloc() allocation and just do a * kzalloc() for the exact number of vecs right away. */ - if (bs) { + if (!bs) + bvl = kzalloc(nr * sizeof(struct bio_vec), gfp_mask); + + /* + * see comment near bvec_array define! + */ + switch (nr) { + case 1: + *idx = 0; + break; + case 2 ... 4: + *idx = 1; + break; + case 5 ... 16: + *idx = 2; + break; + case 17 ... 64: + *idx = 3; + break; + case 65 ... 128: + *idx = 4; + break; + case 129 ... BIO_MAX_PAGES: + *idx = 5; + break; + default: + return NULL; + } + + /* + * idx now points to the pool we want to allocate from. only the + * 1-vec entry pool is mempool backed. + */ + if (*idx == BIOVEC_MAX_IDX) { +fallback: + bvl = mempool_alloc(bs->bvec_pool, gfp_mask); + } else { + struct biovec_slab *bvs = bvec_slabs + *idx; + gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); + /* - * see comment near bvec_array define! + * Make this allocation restricted and don't dump info on + * allocation failures, since we'll fallback to the mempool + * in case of failure. */ - switch (nr) { - case 1: - *idx = 0; - break; - case 2 ... 4: - *idx = 1; - break; - case 5 ... 16: - *idx = 2; - break; - case 17 ... 64: - *idx = 3; - break; - case 65 ... 128: - *idx = 4; - break; - case 129 ... BIO_MAX_PAGES: - *idx = 5; - break; - default: - return NULL; - } + __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; /* - * idx now points to the pool we want to allocate from + * Try a slab allocation. If this fails and __GFP_WAIT + * is set, retry with the 1-entry mempool */ - bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask); - if (bvl) - memset(bvl, 0, - bvec_nr_vecs(*idx) * sizeof(struct bio_vec)); - } else - bvl = kzalloc(nr * sizeof(struct bio_vec), gfp_mask); + bvl = kmem_cache_alloc(bvs->slab, __gfp_mask); + if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) { + *idx = BIOVEC_MAX_IDX; + goto fallback; + } + } + + if (bvl) + memset(bvl, 0, bvec_nr_vecs(*idx) * sizeof(struct bio_vec)); return bvl; } -void bio_free(struct bio *bio, struct bio_set *bio_set) +void bio_free(struct bio *bio, struct bio_set *bs) { if (bio->bi_io_vec) { const int pool_idx = BIO_POOL_IDX(bio); BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); - mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]); + if (pool_idx == BIOVEC_MAX_IDX) + mempool_free(bio->bi_io_vec, bs->bvec_pool); + else { + struct biovec_slab *bvs = bvec_slabs + pool_idx; + + kmem_cache_free(bvs->slab, bio->bi_io_vec); + } } if (bio_integrity(bio)) - bio_integrity_free(bio, bio_set); + bio_integrity_free(bio, bs); - mempool_free(bio, bio_set->bio_pool); + mempool_free(bio, bs->bio_pool); } /* @@ -1346,30 +1377,18 @@ EXPORT_SYMBOL(bio_sector_offset); */ static int biovec_create_pools(struct bio_set *bs, int pool_entries) { - int i; + struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX; - for (i = 0; i < BIOVEC_NR_POOLS; i++) { - struct biovec_slab *bp = bvec_slabs + i; - mempool_t **bvp = bs->bvec_pools + i; + bs->bvec_pool = mempool_create_slab_pool(pool_entries, bp->slab); + if (!bs->bvec_pool) + return -ENOMEM; - *bvp = mempool_create_slab_pool(pool_entries, bp->slab); - if (!*bvp) - return -ENOMEM; - } return 0; } static void biovec_free_pools(struct bio_set *bs) { - int i; - - for (i = 0; i < BIOVEC_NR_POOLS; i++) { - mempool_t *bvp = bs->bvec_pools[i]; - - if (bvp) - mempool_destroy(bvp); - } - + mempool_destroy(bs->bvec_pool); } void bioset_free(struct bio_set *bs) diff --git a/include/linux/bio.h b/include/linux/bio.h index 3ed714eb54d..d76e4bf22f2 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -397,13 +397,14 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu) */ #define BIO_POOL_SIZE 2 #define BIOVEC_NR_POOLS 6 +#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1) struct bio_set { mempool_t *bio_pool; #if defined(CONFIG_BLK_DEV_INTEGRITY) mempool_t *bio_integrity_pool; #endif - mempool_t *bvec_pools[BIOVEC_NR_POOLS]; + mempool_t *bvec_pool; }; struct biovec_slab { -- cgit v1.2.3-70-g09d2 From 1b4344986926da324b5cd10b683e5a1a5e1b7db3 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 22 Oct 2008 20:32:58 +0200 Subject: bio: move the slab pointer inside the bio_set In preparation for adding differently sized bios. Signed-off-by: Jens Axboe --- fs/bio.c | 7 +++++-- include/linux/bio.h | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/bio.c b/fs/bio.c index eb6b4683a26..1ab8986b041 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -1404,12 +1404,15 @@ void bioset_free(struct bio_set *bs) struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size) { - struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL); + struct bio_set *bs; + bs = kzalloc(sizeof(*bs), GFP_KERNEL); if (!bs) return NULL; - bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab); + bs->bio_slab = bio_slab; + + bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bs->bio_slab); if (!bs->bio_pool) goto bad; diff --git a/include/linux/bio.h b/include/linux/bio.h index d76e4bf22f2..9340098d75d 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -400,6 +400,7 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu) #define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1) struct bio_set { + struct kmem_cache *bio_slab; mempool_t *bio_pool; #if defined(CONFIG_BLK_DEV_INTEGRITY) mempool_t *bio_integrity_pool; -- cgit v1.2.3-70-g09d2 From bb799ca0202a360fa74d5f17039b9100caebdde7 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 10 Dec 2008 15:35:05 +0100 Subject: bio: allow individual slabs in the bio_set Instead of having a global bio slab cache, add a reference to one in each bio_set that is created. This allows for personalized slabs in each bio_set, so that they can have bios of different sizes. This means we can personalize the bios we return. File systems may want to embed the bio inside another structure, to avoid allocation more items (and stuffing them in ->bi_private) after the get a bio. Or we may want to embed a number of bio_vecs directly at the end of a bio, to avoid doing two allocations to return a bio. This is now possible. Signed-off-by: Jens Axboe --- drivers/md/dm-crypt.c | 2 +- drivers/md/dm-io.c | 2 +- drivers/md/dm.c | 2 +- fs/bio-integrity.c | 2 +- fs/bio.c | 191 ++++++++++++++++++++++++++++++++++++++++++-------- include/linux/bio.h | 6 +- 6 files changed, 170 insertions(+), 35 deletions(-) (limited to 'fs') diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index ce26c84af06..3326750ec02 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1060,7 +1060,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_page_pool; } - cc->bs = bioset_create(MIN_IOS, MIN_IOS); + cc->bs = bioset_create(MIN_IOS, 0); if (!cc->bs) { ti->error = "Cannot allocate crypt bioset"; goto bad_bs; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 2fd6d445063..a34338567a2 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -56,7 +56,7 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages) if (!client->pool) goto bad; - client->bios = bioset_create(16, 16); + client->bios = bioset_create(16, 0); if (!client->bios) goto bad; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 343094c3fee..421c9f02d8c 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1093,7 +1093,7 @@ static struct mapped_device *alloc_dev(int minor) if (!md->tio_pool) goto bad_tio_pool; - md->bs = bioset_create(16, 16); + md->bs = bioset_create(16, 0); if (!md->bs) goto bad_no_bioset; diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 19caf7c962a..77ebc3c263d 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -111,7 +111,7 @@ void bio_integrity_free(struct bio *bio, struct bio_set *bs) && bip->bip_buf != NULL) kfree(bip->bip_buf); - mempool_free(bip->bip_vec, bs->bvec_pools[bip->bip_pool]); + bvec_free_bs(bs, bip->bip_vec, bip->bip_pool); mempool_free(bip, bs->bio_integrity_pool); bio->bi_integrity = NULL; diff --git a/fs/bio.c b/fs/bio.c index 1ab8986b041..0146f80789e 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -31,8 +31,6 @@ DEFINE_TRACE(block_split); -static struct kmem_cache *bio_slab __read_mostly; - static mempool_t *bio_split_pool __read_mostly; /* @@ -40,9 +38,8 @@ static mempool_t *bio_split_pool __read_mostly; * break badly! cannot be bigger than what you can fit into an * unsigned short */ - #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } -static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { +struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), }; #undef BV @@ -53,11 +50,119 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { */ struct bio_set *fs_bio_set; +/* + * Our slab pool management + */ +struct bio_slab { + struct kmem_cache *slab; + unsigned int slab_ref; + unsigned int slab_size; + char name[8]; +}; +static DEFINE_MUTEX(bio_slab_lock); +static struct bio_slab *bio_slabs; +static unsigned int bio_slab_nr, bio_slab_max; + +static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) +{ + unsigned int sz = sizeof(struct bio) + extra_size; + struct kmem_cache *slab = NULL; + struct bio_slab *bslab; + unsigned int i, entry = -1; + + mutex_lock(&bio_slab_lock); + + i = 0; + while (i < bio_slab_nr) { + struct bio_slab *bslab = &bio_slabs[i]; + + if (!bslab->slab && entry == -1) + entry = i; + else if (bslab->slab_size == sz) { + slab = bslab->slab; + bslab->slab_ref++; + break; + } + i++; + } + + if (slab) + goto out_unlock; + + if (bio_slab_nr == bio_slab_max && entry == -1) { + bio_slab_max <<= 1; + bio_slabs = krealloc(bio_slabs, + bio_slab_max * sizeof(struct bio_slab), + GFP_KERNEL); + if (!bio_slabs) + goto out_unlock; + } + if (entry == -1) + entry = bio_slab_nr++; + + bslab = &bio_slabs[entry]; + + snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); + slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL); + if (!slab) + goto out_unlock; + + printk("bio: create slab <%s> at %d\n", bslab->name, entry); + bslab->slab = slab; + bslab->slab_ref = 1; + bslab->slab_size = sz; +out_unlock: + mutex_unlock(&bio_slab_lock); + return slab; +} + +static void bio_put_slab(struct bio_set *bs) +{ + struct bio_slab *bslab = NULL; + unsigned int i; + + mutex_lock(&bio_slab_lock); + + for (i = 0; i < bio_slab_nr; i++) { + if (bs->bio_slab == bio_slabs[i].slab) { + bslab = &bio_slabs[i]; + break; + } + } + + if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) + goto out; + + WARN_ON(!bslab->slab_ref); + + if (--bslab->slab_ref) + goto out; + + kmem_cache_destroy(bslab->slab); + bslab->slab = NULL; + +out: + mutex_unlock(&bio_slab_lock); +} + unsigned int bvec_nr_vecs(unsigned short idx) { return bvec_slabs[idx].nr_vecs; } +void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx) +{ + BIO_BUG_ON(idx >= BIOVEC_NR_POOLS); + + if (idx == BIOVEC_MAX_IDX) + mempool_free(bv, bs->bvec_pool); + else { + struct biovec_slab *bvs = bvec_slabs + idx; + + kmem_cache_free(bvs->slab, bv); + } +} + struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) { @@ -134,24 +239,22 @@ fallback: void bio_free(struct bio *bio, struct bio_set *bs) { - if (bio->bi_io_vec) { - const int pool_idx = BIO_POOL_IDX(bio); + void *p; - BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS); - - if (pool_idx == BIOVEC_MAX_IDX) - mempool_free(bio->bi_io_vec, bs->bvec_pool); - else { - struct biovec_slab *bvs = bvec_slabs + pool_idx; - - kmem_cache_free(bvs->slab, bio->bi_io_vec); - } - } + if (bio->bi_io_vec) + bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); if (bio_integrity(bio)) bio_integrity_free(bio, bs); - mempool_free(bio, bs->bio_pool); + /* + * If we have front padding, adjust the bio pointer before freeing + */ + p = bio; + if (bs->front_pad) + p -= bs->front_pad; + + mempool_free(p, bs->bio_pool); } /* @@ -188,16 +291,20 @@ void bio_init(struct bio *bio) * for a &struct bio to become free. If a %NULL @bs is passed in, we will * fall back to just using @kmalloc to allocate the required memory. * - * allocate bio and iovecs from the memory pools specified by the - * bio_set structure, or @kmalloc if none given. + * Note that the caller must set ->bi_destructor on succesful return + * of a bio, to do the appropriate freeing of the bio once the reference + * count drops to zero. **/ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) { - struct bio *bio; + struct bio *bio = NULL; + + if (bs) { + void *p = mempool_alloc(bs->bio_pool, gfp_mask); - if (bs) - bio = mempool_alloc(bs->bio_pool, gfp_mask); - else + if (p) + bio = p + bs->front_pad; + } else bio = kmalloc(sizeof(*bio), gfp_mask); if (likely(bio)) { @@ -1398,11 +1505,25 @@ void bioset_free(struct bio_set *bs) bioset_integrity_free(bs); biovec_free_pools(bs); + bio_put_slab(bs); kfree(bs); } -struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size) +/** + * bioset_create - Create a bio_set + * @pool_size: Number of bio and bio_vecs to cache in the mempool + * @front_pad: Number of bytes to allocate in front of the returned bio + * + * Description: + * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller + * to ask for a number of bytes to be allocated in front of the bio. + * Front pad allocation is useful for embedding the bio inside + * another structure, to avoid allocating extra data to go with the bio. + * Note that the bio must be embedded at the END of that structure always, + * or things will break badly. + */ +struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) { struct bio_set *bs; @@ -1410,16 +1531,22 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size) if (!bs) return NULL; - bs->bio_slab = bio_slab; + bs->front_pad = front_pad; - bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bs->bio_slab); + bs->bio_slab = bio_find_or_create_slab(front_pad); + if (!bs->bio_slab) { + kfree(bs); + return NULL; + } + + bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab); if (!bs->bio_pool) goto bad; - if (bioset_integrity_create(bs, bio_pool_size)) + if (bioset_integrity_create(bs, pool_size)) goto bad; - if (!biovec_create_pools(bs, bvec_pool_size)) + if (!biovec_create_pools(bs, pool_size)) return bs; bad: @@ -1443,12 +1570,16 @@ static void __init biovec_init_slabs(void) static int __init init_bio(void) { - bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC); + bio_slab_max = 2; + bio_slab_nr = 0; + bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL); + if (!bio_slabs) + panic("bio: can't allocate bios\n"); bio_integrity_init_slab(); biovec_init_slabs(); - fs_bio_set = bioset_create(BIO_POOL_SIZE, 2); + fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); if (!fs_bio_set) panic("bio: can't allocate bios\n"); diff --git a/include/linux/bio.h b/include/linux/bio.h index 9340098d75d..4b80d3537f9 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -334,7 +334,7 @@ struct bio_pair { extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); extern void bio_pair_release(struct bio_pair *dbio); -extern struct bio_set *bioset_create(int, int); +extern struct bio_set *bioset_create(unsigned int, unsigned int); extern void bioset_free(struct bio_set *); extern struct bio *bio_alloc(gfp_t, int); @@ -379,6 +379,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *, extern int bio_uncopy_user(struct bio *); void zero_fill_bio(struct bio *bio); extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); +extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int); extern unsigned int bvec_nr_vecs(unsigned short idx); /* @@ -401,6 +402,8 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu) struct bio_set { struct kmem_cache *bio_slab; + unsigned int front_pad; + mempool_t *bio_pool; #if defined(CONFIG_BLK_DEV_INTEGRITY) mempool_t *bio_integrity_pool; @@ -415,6 +418,7 @@ struct biovec_slab { }; extern struct bio_set *fs_bio_set; +extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly; /* * a small number of entries is fine, not going to be performance critical. -- cgit v1.2.3-70-g09d2 From 392ddc32982a5c661dd90dd49a3cb37f1c68b782 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 23 Dec 2008 12:42:54 +0100 Subject: bio: add support for inlining a number of bio_vecs inside the bio When we go and allocate a bio for IO, we actually do two allocations. One for the bio itself, and one for the bi_io_vec that holds the actual pages we are interested in. This feature inlines a definable amount of io vecs inside the bio itself, so we eliminate the bio_vec array allocation for IO's up to a certain size. It defaults to 4 vecs, which is typically 16k of IO. Signed-off-by: Jens Axboe --- fs/bio.c | 27 ++++++++++++++++++++++----- include/linux/bio.h | 12 ++++++++++++ 2 files changed, 34 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/bio.c b/fs/bio.c index 0146f80789e..75e6be18ecd 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -31,6 +31,12 @@ DEFINE_TRACE(block_split); +/* + * Test patch to inline a certain number of bi_io_vec's inside the bio + * itself, to shrink a bio data allocation from two mempool calls to one + */ +#define BIO_INLINE_VECS 4 + static mempool_t *bio_split_pool __read_mostly; /* @@ -241,7 +247,7 @@ void bio_free(struct bio *bio, struct bio_set *bs) { void *p; - if (bio->bi_io_vec) + if (bio_has_allocated_vec(bio)) bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); if (bio_integrity(bio)) @@ -267,7 +273,8 @@ static void bio_fs_destructor(struct bio *bio) static void bio_kmalloc_destructor(struct bio *bio) { - kfree(bio->bi_io_vec); + if (bio_has_allocated_vec(bio)) + kfree(bio->bi_io_vec); kfree(bio); } @@ -314,7 +321,16 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) if (likely(nr_iovecs)) { unsigned long uninitialized_var(idx); - bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); + if (nr_iovecs <= BIO_INLINE_VECS) { + idx = 0; + bvl = bio->bi_inline_vecs; + nr_iovecs = BIO_INLINE_VECS; + memset(bvl, 0, BIO_INLINE_VECS * sizeof(*bvl)); + } else { + bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, + bs); + nr_iovecs = bvec_nr_vecs(idx); + } if (unlikely(!bvl)) { if (bs) mempool_free(bio, bs->bio_pool); @@ -324,7 +340,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) goto out; } bio->bi_flags |= idx << BIO_POOL_OFFSET; - bio->bi_max_vecs = bvec_nr_vecs(idx); + bio->bi_max_vecs = nr_iovecs; } bio->bi_io_vec = bvl; } @@ -1525,6 +1541,7 @@ void bioset_free(struct bio_set *bs) */ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) { + unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); struct bio_set *bs; bs = kzalloc(sizeof(*bs), GFP_KERNEL); @@ -1533,7 +1550,7 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) bs->front_pad = front_pad; - bs->bio_slab = bio_find_or_create_slab(front_pad); + bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); if (!bs->bio_slab) { kfree(bs); return NULL; diff --git a/include/linux/bio.h b/include/linux/bio.h index 4b80d3537f9..18462c5b8ff 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -102,6 +102,13 @@ struct bio { #endif bio_destructor_t *bi_destructor; /* destructor */ + + /* + * We can inline a number of vecs at the end of the bio, to avoid + * double allocations for a small number of bio_vecs. This member + * MUST obviously be kept at the very end of the bio. + */ + struct bio_vec bi_inline_vecs[0]; }; /* @@ -213,6 +220,11 @@ static inline void *bio_data(struct bio *bio) return NULL; } +static inline int bio_has_allocated_vec(struct bio *bio) +{ + return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs; +} + /* * will die */ -- cgit v1.2.3-70-g09d2 From abf137dd7712132ee56d5b3143c2ff61a72a5faa Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 9 Dec 2008 08:11:22 +0100 Subject: aio: make the lookup_ioctx() lockless The mm->ioctx_list is currently protected by a reader-writer lock, so we always grab that lock on the read side for doing ioctx lookups. As the workload is extremely reader biased, turn this into an rcu hlist so we can make lookup_ioctx() lockless. Get rid of the rwlock and use a spinlock for providing update side exclusion. There's usually only 1 entry on this list, so it doesn't make sense to look into fancier data structures. Reviewed-by: Jeff Moyer Signed-off-by: Jens Axboe --- arch/s390/mm/pgtable.c | 4 +- fs/aio.c | 100 ++++++++++++++++++++++++++--------------------- include/linux/aio.h | 5 ++- include/linux/mm_types.h | 5 ++- kernel/fork.c | 4 +- 5 files changed, 67 insertions(+), 51 deletions(-) (limited to 'fs') diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index ef3635b52fc..0767827540b 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -263,7 +263,7 @@ int s390_enable_sie(void) /* lets check if we are allowed to replace the mm */ task_lock(tsk); if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || - tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { + tsk->mm != tsk->active_mm || !hlist_empty(&tsk->mm->ioctx_list)) { task_unlock(tsk); return -EINVAL; } @@ -279,7 +279,7 @@ int s390_enable_sie(void) /* Now lets check again if something happened */ task_lock(tsk); if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || - tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) { + tsk->mm != tsk->active_mm || !hlist_empty(&tsk->mm->ioctx_list)) { mmput(mm); task_unlock(tsk); return -EINVAL; diff --git a/fs/aio.c b/fs/aio.c index f658441d566..d6f89d3c15e 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -191,6 +191,20 @@ static int aio_setup_ring(struct kioctx *ctx) kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ } while(0) +static void ctx_rcu_free(struct rcu_head *head) +{ + struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); + unsigned nr_events = ctx->max_reqs; + + kmem_cache_free(kioctx_cachep, ctx); + + if (nr_events) { + spin_lock(&aio_nr_lock); + BUG_ON(aio_nr - nr_events > aio_nr); + aio_nr -= nr_events; + spin_unlock(&aio_nr_lock); + } +} /* __put_ioctx * Called when the last user of an aio context has gone away, @@ -198,8 +212,6 @@ static int aio_setup_ring(struct kioctx *ctx) */ static void __put_ioctx(struct kioctx *ctx) { - unsigned nr_events = ctx->max_reqs; - BUG_ON(ctx->reqs_active); cancel_delayed_work(&ctx->wq); @@ -208,14 +220,7 @@ static void __put_ioctx(struct kioctx *ctx) mmdrop(ctx->mm); ctx->mm = NULL; pr_debug("__put_ioctx: freeing %p\n", ctx); - kmem_cache_free(kioctx_cachep, ctx); - - if (nr_events) { - spin_lock(&aio_nr_lock); - BUG_ON(aio_nr - nr_events > aio_nr); - aio_nr -= nr_events; - spin_unlock(&aio_nr_lock); - } + call_rcu(&ctx->rcu_head, ctx_rcu_free); } #define get_ioctx(kioctx) do { \ @@ -235,6 +240,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) { struct mm_struct *mm; struct kioctx *ctx; + int did_sync = 0; /* Prevent overflows */ if ((nr_events > (0x10000000U / sizeof(struct io_event))) || @@ -267,21 +273,30 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) goto out_freectx; /* limit the number of system wide aios */ - spin_lock(&aio_nr_lock); - if (aio_nr + ctx->max_reqs > aio_max_nr || - aio_nr + ctx->max_reqs < aio_nr) - ctx->max_reqs = 0; - else - aio_nr += ctx->max_reqs; - spin_unlock(&aio_nr_lock); + do { + spin_lock_bh(&aio_nr_lock); + if (aio_nr + nr_events > aio_max_nr || + aio_nr + nr_events < aio_nr) + ctx->max_reqs = 0; + else + aio_nr += ctx->max_reqs; + spin_unlock_bh(&aio_nr_lock); + if (ctx->max_reqs || did_sync) + break; + + /* wait for rcu callbacks to have completed before giving up */ + synchronize_rcu(); + did_sync = 1; + ctx->max_reqs = nr_events; + } while (1); + if (ctx->max_reqs == 0) goto out_cleanup; /* now link into global list. */ - write_lock(&mm->ioctx_list_lock); - ctx->next = mm->ioctx_list; - mm->ioctx_list = ctx; - write_unlock(&mm->ioctx_list_lock); + spin_lock(&mm->ioctx_lock); + hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); + spin_unlock(&mm->ioctx_lock); dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", ctx, ctx->user_id, current->mm, ctx->ring_info.nr); @@ -375,11 +390,12 @@ ssize_t wait_on_sync_kiocb(struct kiocb *iocb) */ void exit_aio(struct mm_struct *mm) { - struct kioctx *ctx = mm->ioctx_list; - mm->ioctx_list = NULL; - while (ctx) { - struct kioctx *next = ctx->next; - ctx->next = NULL; + struct kioctx *ctx; + + while (!hlist_empty(&mm->ioctx_list)) { + ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list); + hlist_del_rcu(&ctx->list); + aio_cancel_all(ctx); wait_for_all_aios(ctx); @@ -394,7 +410,6 @@ void exit_aio(struct mm_struct *mm) atomic_read(&ctx->users), ctx->dead, ctx->reqs_active); put_ioctx(ctx); - ctx = next; } } @@ -555,19 +570,21 @@ int aio_put_req(struct kiocb *req) static struct kioctx *lookup_ioctx(unsigned long ctx_id) { - struct kioctx *ioctx; - struct mm_struct *mm; + struct mm_struct *mm = current->mm; + struct kioctx *ctx = NULL; + struct hlist_node *n; - mm = current->mm; - read_lock(&mm->ioctx_list_lock); - for (ioctx = mm->ioctx_list; ioctx; ioctx = ioctx->next) - if (likely(ioctx->user_id == ctx_id && !ioctx->dead)) { - get_ioctx(ioctx); + rcu_read_lock(); + + hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { + if (ctx->user_id == ctx_id && !ctx->dead) { + get_ioctx(ctx); break; } - read_unlock(&mm->ioctx_list_lock); + } - return ioctx; + rcu_read_unlock(); + return ctx; } /* @@ -1215,19 +1232,14 @@ out: static void io_destroy(struct kioctx *ioctx) { struct mm_struct *mm = current->mm; - struct kioctx **tmp; int was_dead; /* delete the entry from the list is someone else hasn't already */ - write_lock(&mm->ioctx_list_lock); + spin_lock(&mm->ioctx_lock); was_dead = ioctx->dead; ioctx->dead = 1; - for (tmp = &mm->ioctx_list; *tmp && *tmp != ioctx; - tmp = &(*tmp)->next) - ; - if (*tmp) - *tmp = ioctx->next; - write_unlock(&mm->ioctx_list_lock); + hlist_del_rcu(&ioctx->list); + spin_unlock(&mm->ioctx_lock); dprintk("aio_release(%p)\n", ioctx); if (likely(!was_dead)) diff --git a/include/linux/aio.h b/include/linux/aio.h index f6b8cf99b59..b16a957030f 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -5,6 +5,7 @@ #include #include #include +#include #include @@ -183,7 +184,7 @@ struct kioctx { /* This needs improving */ unsigned long user_id; - struct kioctx *next; + struct hlist_node list; wait_queue_head_t wait; @@ -199,6 +200,8 @@ struct kioctx { struct aio_ring_info ring_info; struct delayed_work wq; + + struct rcu_head rcu_head; }; /* prototypes */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fe825471d5a..9cfc9b627fd 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -232,8 +232,9 @@ struct mm_struct { struct core_state *core_state; /* coredumping support */ /* aio bits */ - rwlock_t ioctx_list_lock; /* aio lock */ - struct kioctx *ioctx_list; + spinlock_t ioctx_lock; + struct hlist_head ioctx_list; + #ifdef CONFIG_MM_OWNER /* * "owner" points to a task that is regarded as the canonical diff --git a/kernel/fork.c b/kernel/fork.c index 6144b36cd89..43cbf30669e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -415,8 +415,8 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) set_mm_counter(mm, file_rss, 0); set_mm_counter(mm, anon_rss, 0); spin_lock_init(&mm->page_table_lock); - rwlock_init(&mm->ioctx_list_lock); - mm->ioctx_list = NULL; + spin_lock_init(&mm->ioctx_lock); + INIT_HLIST_HEAD(&mm->ioctx_list); mm->free_area_cache = TASK_UNMAPPED_BASE; mm->cached_hole_size = ~0UL; mm_init_owner(mm, p); -- cgit v1.2.3-70-g09d2 From b3a6ffe16b5cc48abe7db8d04882dc45280eb693 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 12 Dec 2008 09:51:16 +0100 Subject: Get rid of CONFIG_LSF We have two seperate config entries for large devices/files. One is CONFIG_LBD that guards just the devices, the other is CONFIG_LSF that handles large files. This doesn't make a lot of sense, you typically want both or none. So get rid of CONFIG_LSF and change CONFIG_LBD wording to indicate that it covers both. Acked-by: Jean Delvare Signed-off-by: Jens Axboe --- block/Kconfig | 23 +++++------------------ fs/ext4/super.c | 8 ++++---- include/linux/types.h | 11 +++-------- 3 files changed, 12 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/block/Kconfig b/block/Kconfig index 290b219fad9..ac0956f7778 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -24,21 +24,17 @@ menuconfig BLOCK if BLOCK config LBD - bool "Support for Large Block Devices" + bool "Support for large block devices and files" depends on !64BIT help - Enable block devices of size 2TB and larger. + Enable block devices or files of size 2TB and larger. This option is required to support the full capacity of large (2TB+) block devices, including RAID, disk, Network Block Device, Logical Volume Manager (LVM) and loopback. - - For example, RAID devices are frequently bigger than the capacity - of the largest individual hard drive. - - This option is not required if you have individual disk drives - which total 2TB+ and you are not aggregating the capacity into - a large block device (e.g. using RAID or LVM). + + This option also enables support for single files larger than + 2TB. If unsure, say N. @@ -58,15 +54,6 @@ config BLK_DEV_IO_TRACE If unsure, say N. -config LSF - bool "Support for Large Single Files" - depends on !64BIT - help - Say Y here if you want to be able to handle very large files (2TB - and larger), otherwise say N. - - If unsure, say Y. - config BLK_DEV_BSG bool "Block layer SG support v4 (EXPERIMENTAL)" depends on EXPERIMENTAL diff --git a/fs/ext4/super.c b/fs/ext4/super.c index e4a241c65db..04158ad74db 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1721,7 +1721,7 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files) /* small i_blocks in vfs inode? */ if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) { /* - * CONFIG_LSF is not enabled implies the inode + * CONFIG_LBD is not enabled implies the inode * i_block represent total blocks in 512 bytes * 32 == size of vfs inode i_blocks * 8 */ @@ -1764,7 +1764,7 @@ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files) if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) { /* - * !has_huge_files or CONFIG_LSF is not enabled + * !has_huge_files or CONFIG_LBD is not enabled * implies the inode i_block represent total blocks in * 512 bytes 32 == size of vfs inode i_blocks * 8 */ @@ -2021,13 +2021,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (has_huge_files) { /* * Large file size enabled file system can only be - * mount if kernel is build with CONFIG_LSF + * mount if kernel is build with CONFIG_LBD */ if (sizeof(root->i_blocks) < sizeof(u64) && !(sb->s_flags & MS_RDONLY)) { printk(KERN_ERR "EXT4-fs: %s: Filesystem with huge " "files cannot be mounted read-write " - "without CONFIG_LSF.\n", sb->s_id); + "without CONFIG_LBD.\n", sb->s_id); goto failed_mount; } } diff --git a/include/linux/types.h b/include/linux/types.h index 1d98330b1f2..121f349cb7e 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -135,19 +135,14 @@ typedef __s64 int64_t; * * Linux always considers sectors to be 512 bytes long independently * of the devices real block size. + * + * blkcnt_t is the type of the inode's block count. */ #ifdef CONFIG_LBD typedef u64 sector_t; -#else -typedef unsigned long sector_t; -#endif - -/* - * The type of the inode's block count. - */ -#ifdef CONFIG_LSF typedef u64 blkcnt_t; #else +typedef unsigned long sector_t; typedef unsigned long blkcnt_t; #endif -- cgit v1.2.3-70-g09d2 From d3f761104b097738932afcc310fbbbbfb007ef92 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 23 Dec 2008 12:46:21 +0100 Subject: bio: get rid of bio_vec clearing We don't need to clear the memory used for adding bio_vec entries, since nobody should be looking at members unitialized. Any valid use should be below bio->bi_vcnt, and that members up until that count must be valid since they were added through bio_add_page(). Signed-off-by: Jens Axboe --- fs/bio.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/bio.c b/fs/bio.c index 75e6be18ecd..711cee10360 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -180,7 +180,7 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, * kzalloc() for the exact number of vecs right away. */ if (!bs) - bvl = kzalloc(nr * sizeof(struct bio_vec), gfp_mask); + bvl = kmalloc(nr * sizeof(struct bio_vec), gfp_mask); /* * see comment near bvec_array define! @@ -237,9 +237,6 @@ fallback: } } - if (bvl) - memset(bvl, 0, bvec_nr_vecs(*idx) * sizeof(struct bio_vec)); - return bvl; } @@ -325,7 +322,6 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) idx = 0; bvl = bio->bi_inline_vecs; nr_iovecs = BIO_INLINE_VECS; - memset(bvl, 0, BIO_INLINE_VECS * sizeof(*bvl)); } else { bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); -- cgit v1.2.3-70-g09d2 From cb78a0ce69fad2026825f957e24e2d9cda1ec9f1 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 30 Dec 2008 09:05:14 +1030 Subject: bitmap: fix seq_bitmap and seq_cpumask to take const pointer Impact: cleanup seq_bitmap just calls bitmap_scnprintf on the bits: that arg can be const. Similarly, seq_cpumask just calls seq_bitmap. Signed-off-by: Rusty Russell --- fs/seq_file.c | 3 ++- include/linux/seq_file.h | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/seq_file.c b/fs/seq_file.c index 16c211558c2..c99358a5217 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -462,7 +462,8 @@ int seq_dentry(struct seq_file *m, struct dentry *dentry, char *esc) return -1; } -int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits) +int seq_bitmap(struct seq_file *m, const unsigned long *bits, + unsigned int nr_bits) { if (m->count < m->size) { int len = bitmap_scnprintf(m->buf + m->count, diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index b3dfa72f13b..952e0187ba1 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -50,8 +50,9 @@ int seq_path(struct seq_file *, struct path *, char *); int seq_dentry(struct seq_file *, struct dentry *, char *); int seq_path_root(struct seq_file *m, struct path *path, struct path *root, char *esc); -int seq_bitmap(struct seq_file *m, unsigned long *bits, unsigned int nr_bits); -static inline int seq_cpumask(struct seq_file *m, cpumask_t *mask) +int seq_bitmap(struct seq_file *m, const unsigned long *bits, + unsigned int nr_bits); +static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask) { return seq_bitmap(m, mask->bits, NR_CPUS); } -- cgit v1.2.3-70-g09d2 From 46f72f57d279688c4524df78edb5738db730eeef Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Tue, 30 Dec 2008 16:35:55 -0500 Subject: fs/nfs/nfs4proc.c: make nfs4_map_errors() static nfs4_map_errors() can become static. Signed-off-by: WANG Cong Cc: J. Bruce Fields Cc: Trond Myklebust Signed-off-by: Andrew Morton Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 1 - fs/nfs/nfs4proc.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index d5f5efaf2b2..4e4d3320437 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -189,7 +189,6 @@ extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t); /* nfs4proc.c */ -extern int nfs4_map_errors(int err); extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *); extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *); extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 49eebe25bba..8dde84b988d 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -67,7 +67,7 @@ static int _nfs4_proc_lookup(struct inode *dir, const struct qstr *name, struct static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); /* Prevent leaks of NFSv4 errors into userland */ -int nfs4_map_errors(int err) +static int nfs4_map_errors(int err) { if (err < -1000) { dprintk("%s could not handle NFSv4 error %d\n", -- cgit v1.2.3-70-g09d2 From 79807d075ab8d1ca3574f5f52421e0047c1f1256 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sat, 27 Dec 2008 19:18:00 +0200 Subject: UBIFS: fix constants initialization The c->min_idx_lebs constant depends on c->old_idx_sz, which is read from the master node. This means that we have to initialize c->min_idx_lebs only after we have read the master node. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index c3cefc84137..13097830e8b 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -602,7 +602,7 @@ static int bud_wbuf_callback(struct ubifs_info *c, int lnum, int free, int pad) } /* - * init_constants_late - initialize UBIFS constants. + * init_constants_sb - initialize UBIFS constants. * @c: UBIFS file-system description object * * This is a helper function which initializes various UBIFS constants after @@ -610,7 +610,7 @@ static int bud_wbuf_callback(struct ubifs_info *c, int lnum, int free, int pad) * makes sure they are all right. Returns zero in case of success and a * negative error code in case of failure. */ -static int init_constants_late(struct ubifs_info *c) +static int init_constants_sb(struct ubifs_info *c) { int tmp, err; long long tmp64; @@ -687,6 +687,21 @@ static int init_constants_late(struct ubifs_info *c) if (err) return err; + return 0; +} + +/* + * init_constants_master - initialize UBIFS constants. + * @c: UBIFS file-system description object + * + * This is a helper function which initializes various UBIFS constants after + * the master node has been read. It also checks various UBIFS parameters and + * makes sure they are all right. + */ +static void init_constants_master(struct ubifs_info *c) +{ + long long tmp64; + c->min_idx_lebs = ubifs_calc_min_idx_lebs(c); /* @@ -702,8 +717,6 @@ static int init_constants_late(struct ubifs_info *c) tmp64 *= (long long)c->leb_size - c->leb_overhead; tmp64 = ubifs_reported_space(c, tmp64); c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT; - - return 0; } /** @@ -1138,7 +1151,7 @@ static int mount_ubifs(struct ubifs_info *c) goto out_free; } - err = init_constants_late(c); + err = init_constants_sb(c); if (err) goto out_free; @@ -1172,6 +1185,8 @@ static int mount_ubifs(struct ubifs_info *c) if (err) goto out_master; + init_constants_master(c); + if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { ubifs_msg("recovery needed"); c->need_recovery = 1; -- cgit v1.2.3-70-g09d2 From 304d427cd99eb645b44b08d77e70ce308e6bcd8c Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 28 Dec 2008 08:04:17 +0200 Subject: UBIFS: fix file-system synchronization Argh. The ->sync_fs call is called _before_ all inodes are flushed. This means we first sync write buffers and commit, then all inodes are synced, and we end up with unflushed write buffers! Fix this by forcing synching all indoes from 'ubifs_sync_fs()'. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 13097830e8b..471301799c5 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "ubifs.h" /* @@ -431,6 +432,23 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) struct ubifs_info *c = sb->s_fs_info; int i, ret = 0, err; long long bud_bytes; + struct writeback_control wbc = { + .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD, + .range_start = 0, + .range_end = LLONG_MAX, + .nr_to_write = LONG_MAX, + }; + + /* + * VFS calls '->sync_fs()' before synchronizing all dirty inodes and + * pages, so synchronize them first, then commit the journal. Strictly + * speaking, it is not necessary to commit the journal here, + * synchronizing write-buffers would be enough. But committing makes + * UBIFS free space predictions much more accurate, so we want to let + * the user be able to get more accurate results of 'statfs()' after + * they synchronize the file system. + */ + generic_sync_sb_inodes(sb, &wbc); if (c->jheads) { for (i = 0; i < c->jhead_cnt; i++) { -- cgit v1.2.3-70-g09d2 From f10383006c26b33539820759b9dc8656497b02a4 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 28 Dec 2008 08:16:32 +0200 Subject: UBIFS: always commit in sync_fs Always run commit in sync_fs, because even if the journal seems to be almost empty, there may be a deletion which removes a large file, which affects the index greatly. And because we want better free space predictions after 'sync_fs()', we have to commit. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 471301799c5..ee8e7749eae 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -429,9 +429,8 @@ static int ubifs_show_options(struct seq_file *s, struct vfsmount *mnt) static int ubifs_sync_fs(struct super_block *sb, int wait) { + int i, err; struct ubifs_info *c = sb->s_fs_info; - int i, ret = 0, err; - long long bud_bytes; struct writeback_control wbc = { .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD, .range_start = 0, @@ -439,6 +438,19 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) .nr_to_write = LONG_MAX, }; + if (sb->s_flags & MS_RDONLY) + return 0; + + /* + * Synchronize write buffers, because 'ubifs_run_commit()' does not + * do this if it waits for an already running commit. + */ + for (i = 0; i < c->jhead_cnt; i++) { + err = ubifs_wbuf_sync(&c->jheads[i].wbuf); + if (err) + return err; + } + /* * VFS calls '->sync_fs()' before synchronizing all dirty inodes and * pages, so synchronize them first, then commit the journal. Strictly @@ -450,30 +462,16 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) */ generic_sync_sb_inodes(sb, &wbc); - if (c->jheads) { - for (i = 0; i < c->jhead_cnt; i++) { - err = ubifs_wbuf_sync(&c->jheads[i].wbuf); - if (err && !ret) - ret = err; - } - - /* Commit the journal unless it has too little data */ - spin_lock(&c->buds_lock); - bud_bytes = c->bud_bytes; - spin_unlock(&c->buds_lock); - if (bud_bytes > c->leb_size) { - err = ubifs_run_commit(c); - if (err) - return err; - } - } + err = ubifs_run_commit(c); + if (err) + return err; /* * We ought to call sync for c->ubi but it does not have one. If it had * it would in turn call mtd->sync, however mtd operations are * synchronous anyway, so we don't lose any sleep here. */ - return ret; + return err; } /** -- cgit v1.2.3-70-g09d2 From cb5c6a2b2be59b480a3746c5113cb3411c053bff Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 28 Dec 2008 08:18:43 +0200 Subject: UBIFS: use ubi_sync UBI now has (fake for now, though) synchronization call - use it. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index ee8e7749eae..a14703e0a9a 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -466,12 +466,7 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) if (err) return err; - /* - * We ought to call sync for c->ubi but it does not have one. If it had - * it would in turn call mtd->sync, however mtd operations are - * synchronous anyway, so we don't lose any sleep here. - */ - return err; + return ubi_sync(c->vi.ubi_num); } /** -- cgit v1.2.3-70-g09d2 From 26d05777b0a23062a39e83c369c0a3583918f164 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 28 Dec 2008 09:11:02 +0200 Subject: UBIFS: always commit on unmount UBIFS commits on unmount to make the next mount faster. Currently, it commits only if there is more than LEB size bytes in the journal. This is not very good, because journal size may be large (512KiB). And there may be few deletions in the journal which do not take much journal space, but which do introduce a lot of TNC changes and make mount slow. Thus, jurt remove this condition and always commit. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index a14703e0a9a..1c1bbe4135c 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1570,20 +1570,24 @@ out: * @c: UBIFS file-system description object * * This function is called during un-mounting and re-mounting, and it commits - * the journal unless the "fast unmount" mode is enabled. It also avoids - * committing the journal if it contains too few data. + * the journal unless the "fast unmount" mode is enabled. */ static void commit_on_unmount(struct ubifs_info *c) { - if (!c->fast_unmount) { - long long bud_bytes; + struct super_block *sb = c->vfs_sb; + long long bud_bytes; - spin_lock(&c->buds_lock); - bud_bytes = c->bud_bytes; - spin_unlock(&c->buds_lock); - if (bud_bytes > c->leb_size) - ubifs_run_commit(c); - } + /* + * This function is called before the background thread is stopped, so + * we may race with ongoing commit, which means we have to take + * @c->bud_lock to access @c->bud_bytes. + */ + spin_lock(&c->buds_lock); + bud_bytes = c->bud_bytes; + spin_unlock(&c->buds_lock); + + if (!c->fast_unmount && !(sb->s_flags & MS_RDONLY) && bud_bytes) + ubifs_run_commit(c); } /** @@ -2009,7 +2013,7 @@ static void ubifs_kill_sb(struct super_block *sb) * We do 'commit_on_unmount()' here instead of 'ubifs_put_super()' * in order to be outside BKL. */ - if (sb->s_root && !(sb->s_flags & MS_RDONLY)) + if (sb->s_root) commit_on_unmount(c); /* The un-mount routine is actually done in put_super() */ generic_shutdown_super(sb); -- cgit v1.2.3-70-g09d2 From 6edbfafda682b30ad984964cc432da6fa1c8fab5 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 30 Dec 2008 20:06:49 +0200 Subject: UBIFS: restore budg_uncommitted_idx UBIFS stores uncommitted index size in c->budg_uncommitted_idx, and this affect budgeting calculations. When mounting and replaying, this variable is not updated, so we may end up with "over-budgeting". This patch fixes the issue. Signed-off-by: Artem Bityutskiy --- fs/ubifs/replay.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 21f7d047c30..ce42a7b0ca5 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -144,7 +144,7 @@ static int set_bud_lprops(struct ubifs_info *c, struct replay_entry *r) /* * If the replay order was perfect the dirty space would now be * zero. The order is not perfect because the the journal heads - * race with eachother. This is not a problem but is does mean + * race with each other. This is not a problem but is does mean * that the dirty space may temporarily exceed c->leb_size * during the replay. */ @@ -656,7 +656,7 @@ out_dump: * @dirty: amount of dirty space from padding and deletion nodes * * This function inserts a reference node to the replay tree and returns zero - * in case of success ort a negative error code in case of failure. + * in case of success or a negative error code in case of failure. */ static int insert_ref_node(struct ubifs_info *c, int lnum, int offs, unsigned long long sqnum, int free, int dirty) @@ -883,7 +883,7 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) * This means that we reached end of log and now * look to the older log data, which was already * committed but the eraseblock was not erased (UBIFS - * only unmaps it). So this basically means we have to + * only un-maps it). So this basically means we have to * exit with "end of log" code. */ err = 1; @@ -1062,6 +1062,15 @@ int ubifs_replay_journal(struct ubifs_info *c) if (err) goto out; + /* + * UBIFS budgeting calculations use @c->budg_uncommitted_idx variable + * to roughly estimate index growth. Things like @c->min_idx_lebs + * depend on it. This means we have to initialize it to make sure + * budgeting works properly. + */ + c->budg_uncommitted_idx = atomic_long_read(&c->dirty_zn_cnt); + c->budg_uncommitted_idx *= c->max_idx_node_sz; + ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery); dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, " "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum, -- cgit v1.2.3-70-g09d2 From 2edc2025c2583a18eafe5cdbc7deb36e320aaec5 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 22 Dec 2008 11:21:03 +0200 Subject: UBIFS: do not lie about used blocks Do not force UBIFS return 0 used space when it is empty. It leads to a situation when creating any file immediately produces tens of used blocks, which looks very weird. It is better to be honest and say that some blocks are used even if the FS is empty. And ext2 does the same. Signed-off-by: Artem Bityutskiy --- fs/ubifs/budget.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 44cff803171..3715d011495 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -766,16 +766,6 @@ long long ubifs_get_free_space(struct ubifs_info *c) min_idx_lebs = c->min_idx_lebs; ubifs_assert(min_idx_lebs == ubifs_calc_min_idx_lebs(c)); outstanding = c->budg_data_growth + c->budg_dd_growth; - - /* - * Force the amount available to the total size reported if the used - * space is zero. - */ - if (c->lst.total_used <= UBIFS_INO_NODE_SZ && !outstanding) { - spin_unlock(&c->space_lock); - return (long long)c->block_cnt << UBIFS_BLOCK_SHIFT; - } - available = ubifs_calc_available(c, min_idx_lebs); /* -- cgit v1.2.3-70-g09d2 From 2acf80675800d5e6775990d1280cca5c2ffb30e6 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 9 Dec 2008 11:04:40 -0500 Subject: UBIFS: simplify make_free_space The 'make_free_space()' function was too complex and this patch simplifies it. It also fixes a bug - the freespace test failed straight away on UBI volumes with 512 bytes LEB size. Signed-off-by: Artem Bityutskiy --- fs/ubifs/budget.c | 151 ++++++++++++++++++------------------------------------ 1 file changed, 49 insertions(+), 102 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 3715d011495..4d270f0a856 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -37,13 +37,10 @@ /* * When pessimistic budget calculations say that there is no enough space, * UBIFS starts writing back dirty inodes and pages, doing garbage collection, - * or committing. The below constants define maximum number of times UBIFS + * or committing. The below constant defines maximum number of times UBIFS * repeats the operations. */ -#define MAX_SHRINK_RETRIES 8 -#define MAX_GC_RETRIES 4 -#define MAX_CMT_RETRIES 2 -#define MAX_NOSPC_RETRIES 1 +#define MAX_MKSPC_RETRIES 3 /* * The below constant defines amount of dirty pages which should be written @@ -51,30 +48,6 @@ */ #define NR_TO_WRITE 16 -/** - * struct retries_info - information about re-tries while making free space. - * @prev_liability: previous liability - * @shrink_cnt: how many times the liability was shrinked - * @shrink_retries: count of liability shrink re-tries (increased when - * liability does not shrink) - * @try_gc: GC should be tried first - * @gc_retries: how many times GC was run - * @cmt_retries: how many times commit has been done - * @nospc_retries: how many times GC returned %-ENOSPC - * - * Since we consider budgeting to be the fast-path, and this structure has to - * be allocated on stack and zeroed out, we make it smaller using bit-fields. - */ -struct retries_info { - long long prev_liability; - unsigned int shrink_cnt; - unsigned int shrink_retries:5; - unsigned int try_gc:1; - unsigned int gc_retries:4; - unsigned int cmt_retries:3; - unsigned int nospc_retries:1; -}; - /** * shrink_liability - write-back some dirty pages/inodes. * @c: UBIFS file-system description object @@ -146,10 +119,26 @@ static int run_gc(struct ubifs_info *c) return 0; } +/** + * get_liability - calculate current liability. + * @c: UBIFS file-system description object + * + * This function calculates and returns current UBIFS liability, i.e. the + * amount of bytes UBIFS has "promised" to write to the media. + */ +static long long get_liability(struct ubifs_info *c) +{ + long long liab; + + spin_lock(&c->space_lock); + liab = c->budg_idx_growth + c->budg_data_growth + c->budg_dd_growth; + spin_unlock(&c->space_lock); + return liab; +} + /** * make_free_space - make more free space on the file-system. * @c: UBIFS file-system description object - * @ri: information about previous invocations of this function * * This function is called when an operation cannot be budgeted because there * is supposedly no free space. But in most cases there is some free space: @@ -165,87 +154,42 @@ static int run_gc(struct ubifs_info *c) * Returns %-ENOSPC if it couldn't do more free space, and other negative error * codes on failures. */ -static int make_free_space(struct ubifs_info *c, struct retries_info *ri) +static int make_free_space(struct ubifs_info *c) { - int err; - - /* - * If we have some dirty pages and inodes (liability), try to write - * them back unless this was tried too many times without effect - * already. - */ - if (ri->shrink_retries < MAX_SHRINK_RETRIES && !ri->try_gc) { - long long liability; - - spin_lock(&c->space_lock); - liability = c->budg_idx_growth + c->budg_data_growth + - c->budg_dd_growth; - spin_unlock(&c->space_lock); + int err, retries = 0; + long long liab1, liab2; - if (ri->prev_liability >= liability) { - /* Liability does not shrink, next time try GC then */ - ri->shrink_retries += 1; - if (ri->gc_retries < MAX_GC_RETRIES) - ri->try_gc = 1; - dbg_budg("liability did not shrink: retries %d of %d", - ri->shrink_retries, MAX_SHRINK_RETRIES); - } - - dbg_budg("force write-back (count %d)", ri->shrink_cnt); - shrink_liability(c, NR_TO_WRITE + ri->shrink_cnt); + do { + liab1 = get_liability(c); + /* + * We probably have some dirty pages or inodes (liability), try + * to write them back. + */ + dbg_budg("liability %lld, run write-back", liab1); + shrink_liability(c, NR_TO_WRITE); - ri->prev_liability = liability; - ri->shrink_cnt += 1; - return -EAGAIN; - } + liab2 = get_liability(c); + if (liab2 < liab1) + return -EAGAIN; - /* - * Try to run garbage collector unless it was already tried too many - * times. - */ - if (ri->gc_retries < MAX_GC_RETRIES) { - ri->gc_retries += 1; - dbg_budg("run GC, retries %d of %d", - ri->gc_retries, MAX_GC_RETRIES); + dbg_budg("new liability %lld (not shrinked)", liab2); - ri->try_gc = 0; + /* Liability did not shrink again, try GC */ + dbg_budg("Run GC"); err = run_gc(c); if (!err) return -EAGAIN; - if (err == -EAGAIN) { - dbg_budg("GC asked to commit"); - err = ubifs_run_commit(c); - if (err) - return err; - return -EAGAIN; - } - - if (err != -ENOSPC) + if (err != -EAGAIN && err != -ENOSPC) + /* Some real error happened */ return err; - /* - * GC could not make any progress. If this is the first time, - * then it makes sense to try to commit, because it might make - * some dirty space. - */ - dbg_budg("GC returned -ENOSPC, retries %d", - ri->nospc_retries); - if (ri->nospc_retries >= MAX_NOSPC_RETRIES) - return err; - ri->nospc_retries += 1; - } - - /* Neither GC nor write-back helped, try to commit */ - if (ri->cmt_retries < MAX_CMT_RETRIES) { - ri->cmt_retries += 1; - dbg_budg("run commit, retries %d of %d", - ri->cmt_retries, MAX_CMT_RETRIES); + dbg_budg("Run commit (retries %d)", retries); err = ubifs_run_commit(c); if (err) return err; - return -EAGAIN; - } + } while (retries++ < MAX_MKSPC_RETRIES); + return -ENOSPC; } @@ -523,8 +467,7 @@ static int calc_dd_growth(const struct ubifs_info *c, int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req) { int uninitialized_var(cmt_retries), uninitialized_var(wb_retries); - int err, idx_growth, data_growth, dd_growth; - struct retries_info ri; + int err, idx_growth, data_growth, dd_growth, retried = 0; ubifs_assert(req->new_page <= 1); ubifs_assert(req->dirtied_page <= 1); @@ -542,7 +485,6 @@ int ubifs_budget_space(struct ubifs_info *c, struct ubifs_budget_req *req) if (!data_growth && !dd_growth) return 0; idx_growth = calc_idx_growth(c, req); - memset(&ri, 0, sizeof(struct retries_info)); again: spin_lock(&c->space_lock); @@ -580,12 +522,17 @@ again: return err; } - err = make_free_space(c, &ri); + err = make_free_space(c); + cond_resched(); if (err == -EAGAIN) { dbg_budg("try again"); - cond_resched(); goto again; } else if (err == -ENOSPC) { + if (!retried) { + retried = 1; + dbg_budg("-ENOSPC, but anyway try once again"); + goto again; + } dbg_budg("FS is full, -ENOSPC"); c->nospace = 1; if (can_use_rp(c) || c->rp_size == 0) -- cgit v1.2.3-70-g09d2 From 6a4a9b438fe43397f4652853838f284cddd629b5 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 28 Dec 2008 11:00:55 +0200 Subject: UBIFS: fix sparse warnings fs/ubifs/compress.c:111:8: warning: incorrect type in argument 5 (different signedness) fs/ubifs/compress.c:111:8: expected unsigned int *dlen fs/ubifs/compress.c:111:8: got int *out_len fs/ubifs/compress.c:175:10: warning: incorrect type in argument 5 (different signedness) fs/ubifs/compress.c:175:10: expected unsigned int *dlen fs/ubifs/compress.c:175:10: got int *out_len Fix this by adding a cast to (unsigned int *). We guarantee that our lengths are small and no overflow is possible. Signed-off-by: Artem Bityutskiy --- fs/ubifs/compress.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c index 4c90ee2aef4..11e4132f314 100644 --- a/fs/ubifs/compress.c +++ b/fs/ubifs/compress.c @@ -108,7 +108,7 @@ void ubifs_compress(const void *in_buf, int in_len, void *out_buf, int *out_len, if (compr->comp_mutex) mutex_lock(compr->comp_mutex); err = crypto_comp_compress(compr->cc, in_buf, in_len, out_buf, - out_len); + (unsigned int *)out_len); if (compr->comp_mutex) mutex_unlock(compr->comp_mutex); if (unlikely(err)) { @@ -172,7 +172,7 @@ int ubifs_decompress(const void *in_buf, int in_len, void *out_buf, if (compr->decomp_mutex) mutex_lock(compr->decomp_mutex); err = crypto_comp_decompress(compr->cc, in_buf, in_len, out_buf, - out_len); + (unsigned int *)out_len); if (compr->decomp_mutex) mutex_unlock(compr->decomp_mutex); if (err) -- cgit v1.2.3-70-g09d2 From f92b982680e4b4149c559789a54e1e9db190752a Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Sun, 28 Dec 2008 11:34:26 +0200 Subject: UBIFS: fix checkpatch.pl warnings These are mostly long lines and wrong indentation warning fixes. But also there are two volatile variables and checkpatch.pl complains about them: WARNING: Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt + volatile int gc_seq; WARNING: Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt + volatile int gced_lnum; Well, we anyway use smp_wmb() for c->gc_seq and c->gced_lnum, so these 'volatile' modifiers can be just dropped. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 3 ++- fs/ubifs/file.c | 4 ++-- fs/ubifs/journal.c | 2 +- fs/ubifs/lpt_commit.c | 2 +- fs/ubifs/tnc.c | 31 +++++++++++++++---------------- fs/ubifs/ubifs.h | 8 ++++---- 6 files changed, 25 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index a2be11584ad..350fedecc83 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -703,7 +703,8 @@ void dbg_dump_lpt_info(struct ubifs_info *c) printk(KERN_DEBUG "\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs); printk(KERN_DEBUG "\tLPT head is at %d:%d\n", c->nhead_lnum, c->nhead_offs); - printk(KERN_DEBUG "\tLPT ltab is at %d:%d\n", c->ltab_lnum, c->ltab_offs); + printk(KERN_DEBUG "\tLPT ltab is at %d:%d\n", + c->ltab_lnum, c->ltab_offs); if (c->big_lpt) printk(KERN_DEBUG "\tLPT lsave is at %d:%d\n", c->lsave_lnum, c->lsave_offs); diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 7f1de98e609..fe82d2464d4 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -72,8 +72,8 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, return err; } - ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); - + ubifs_assert(le64_to_cpu(dn->ch.sqnum) > + ubifs_inode(inode)->creat_sqnum); len = le32_to_cpu(dn->size); if (len <= 0 || len > UBIFS_BLOCK_SIZE) goto dump; diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index f91b745908e..3b0fa704d55 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -1220,7 +1220,7 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, data_key_init(c, &key, inum, blk); bit = old_size & (UBIFS_BLOCK_SIZE - 1); - blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0: 1); + blk = (old_size >> UBIFS_BLOCK_SHIFT) - (bit ? 0 : 1); data_key_init(c, &to_key, inum, blk); err = ubifs_tnc_remove_range(c, &key, &to_key); diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index da60b5a0fab..b8a06079423 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -548,7 +548,7 @@ static int write_cnodes(struct ubifs_info *c) no_space: ubifs_err("LPT out of space mismatch"); dbg_err("LPT out of space mismatch at LEB %d:%d needing %d, done_ltab " - "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); + "%d, done_lsave %d", lnum, offs, len, done_ltab, done_lsave); dbg_dump_lpt_info(c); dbg_dump_lpt_lebs(c); dump_stack(); diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 6eef5344a14..f7e36f54552 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -2245,12 +2245,11 @@ int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key, if (found) { /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { - znode = dirty_cow_bottom_up(c, - znode); - if (IS_ERR(znode)) { - err = PTR_ERR(znode); - goto out_unlock; - } + znode = dirty_cow_bottom_up(c, znode); + if (IS_ERR(znode)) { + err = PTR_ERR(znode); + goto out_unlock; + } } zbr = &znode->zbranch[n]; lnc_free(zbr); @@ -2317,11 +2316,11 @@ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key, /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { - znode = dirty_cow_bottom_up(c, znode); - if (IS_ERR(znode)) { - err = PTR_ERR(znode); - goto out_unlock; - } + znode = dirty_cow_bottom_up(c, znode); + if (IS_ERR(znode)) { + err = PTR_ERR(znode); + goto out_unlock; + } } if (found == 1) { @@ -2627,11 +2626,11 @@ int ubifs_tnc_remove_range(struct ubifs_info *c, union ubifs_key *from_key, /* Ensure the znode is dirtied */ if (znode->cnext || !ubifs_zn_dirty(znode)) { - znode = dirty_cow_bottom_up(c, znode); - if (IS_ERR(znode)) { - err = PTR_ERR(znode); - goto out_unlock; - } + znode = dirty_cow_bottom_up(c, znode); + if (IS_ERR(znode)) { + err = PTR_ERR(znode); + goto out_unlock; + } } /* Remove all keys in range except the first */ diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index a17dd794ae9..3275c89a358 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -481,8 +481,8 @@ struct ubifs_lprops { struct ubifs_lpt_lprops { int free; int dirty; - unsigned tgc : 1; - unsigned cmt : 1; + unsigned tgc:1; + unsigned cmt:1; }; /** @@ -1322,8 +1322,8 @@ struct ubifs_info { void *sbuf; struct list_head idx_gc; int idx_gc_cnt; - volatile int gc_seq; - volatile int gced_lnum; + int gc_seq; + int gced_lnum; struct list_head infos_list; struct mutex umount_mutex; -- cgit v1.2.3-70-g09d2 From a9f2fc0e251e71a51deb8059b181c375a4a5e979 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 23 Dec 2008 14:39:14 +0200 Subject: UBIFS: fix writing uncompressed files UBIFS does not disable compression if ui->flags is non-zero, e.g. if the file has "sync" flag. This is because of the typo which is fixed by this patch. The patch also adds a couple of useful debugging prints. Signed-off-by: Artem Bityutskiy --- fs/ubifs/ioctl.c | 2 ++ fs/ubifs/journal.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c index 5e82cffe969..6db7a6be6c9 100644 --- a/fs/ubifs/ioctl.c +++ b/fs/ubifs/ioctl.c @@ -154,6 +154,7 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case FS_IOC_GETFLAGS: flags = ubifs2ioctl(ubifs_inode(inode)->flags); + dbg_gen("get flags: %#x, i_flags %#x", flags, inode->i_flags); return put_user(flags, (int __user *) arg); case FS_IOC_SETFLAGS: { @@ -176,6 +177,7 @@ long ubifs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = mnt_want_write(file->f_path.mnt); if (err) return err; + dbg_gen("set flags: %#x, i_flags %#x", flags, inode->i_flags); err = setflags(inode, flags); mnt_drop_write(file->f_path.mnt); return err; diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 3b0fa704d55..10ae25b7d1d 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -704,7 +704,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, data->size = cpu_to_le32(len); zero_data_node_unused(data); - if (!(ui->flags && UBIFS_COMPR_FL)) + if (!(ui->flags & UBIFS_COMPR_FL)) /* Compression is disabled for this inode */ compr_type = UBIFS_COMPR_NONE; else -- cgit v1.2.3-70-g09d2 From 57a450e95932f7798677885b8a01443aca72fdc7 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 30 Dec 2008 16:23:34 +0200 Subject: UBIFS: allow mounting when short of space It is fine if there is not free space - we should still allow mounting this FS. This patch relaxes the free space requirements and adds info dumps. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 40 +++++++++++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 1c1bbe4135c..2c91d6fa4e0 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1073,6 +1073,30 @@ again: } } +/** + * check_free_space - check if there is enough free space to mount. + * @c: UBIFS file-system description object + * + * This function makes sure UBIFS has enough free space to be mounted in + * read/write mode. UBIFS must always have some free space to allow deletions. + */ +static int check_free_space(struct ubifs_info *c) +{ + ubifs_assert(c->dark_wm > 0); + if (c->lst.total_free + c->lst.total_dirty < c->dark_wm) { + ubifs_err("insufficient free space to mount in read/write mode"); + dbg_dump_budg(c); + dbg_dump_lprops(c); + /* + * We return %-EINVAL instead of %-ENOSPC because it seems to + * be the closest error code mentioned in the mount function + * documentation. + */ + return -EINVAL; + } + return 0; +} + /** * mount_ubifs - mount UBIFS file-system. * @c: UBIFS file-system description object @@ -1154,7 +1178,7 @@ static int mount_ubifs(struct ubifs_info *c) /* * Make sure the compressor which is set as default in the superblock - * or overriden by mount options is actually compiled in. + * or overridden by mount options is actually compiled in. */ if (!ubifs_compr_present(c->default_compr)) { ubifs_err("'compressor \"%s\" is not compiled in", @@ -1236,12 +1260,9 @@ static int mount_ubifs(struct ubifs_info *c) if (!mounted_read_only) { int lnum; - /* Check for enough free space */ - if (ubifs_calc_available(c, c->min_idx_lebs) <= 0) { - ubifs_err("insufficient available space"); - err = -EINVAL; + err = check_free_space(c); + if (err) goto out_orphans; - } /* Check for enough log space */ lnum = c->lhead_lnum + 1; @@ -1442,12 +1463,9 @@ static int ubifs_remount_rw(struct ubifs_info *c) c->remounting_rw = 1; c->always_chk_crc = 1; - /* Check for enough free space */ - if (ubifs_calc_available(c, c->min_idx_lebs) <= 0) { - ubifs_err("insufficient available space"); - err = -EINVAL; + err = check_free_space(c); + if (err) goto out; - } if (c->old_leb_cnt != c->leb_cnt) { struct ubifs_sb_node *sup; -- cgit v1.2.3-70-g09d2 From 80736d41f895bc472b2433a1c27fa6d4afe6ca35 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 30 Dec 2008 17:44:02 +0200 Subject: UBIFS: fix numerous spelling mistakes Signed-off-by: Artem Bityutskiy --- Documentation/filesystems/ubifs.txt | 6 +++--- fs/ubifs/budget.c | 14 +++++++------- fs/ubifs/lpt_commit.c | 8 ++++---- fs/ubifs/ubifs.h | 1 - 4 files changed, 14 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/ubifs.txt b/Documentation/filesystems/ubifs.txt index 2d0db5482d2..84da2a4ba25 100644 --- a/Documentation/filesystems/ubifs.txt +++ b/Documentation/filesystems/ubifs.txt @@ -95,9 +95,9 @@ no_chk_data_crc skip checking of CRCs on data nodes in order to of this option is that corruption of the contents of a file can go unnoticed. chk_data_crc (*) do not skip checking CRCs on data nodes -compr=none override defoult comressor and set it to "none" -compr=lzo override defoult comressor and set it to "lzo" -compr=zlib override defoult comressor and set it to "zlib" +compr=none override default compressor and set it to "none" +compr=lzo override default compressor and set it to "lzo" +compr=zlib override default compressor and set it to "zlib" Quick usage instructions diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 4d270f0a856..31870d8dab8 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -652,9 +652,9 @@ void ubifs_release_dirty_inode_budget(struct ubifs_info *c, * user-space. User-space application tend to expect that if the file-system * (e.g., via the 'statfs()' call) reports that it has N bytes available, they * are able to write a file of size N. UBIFS attaches node headers to each data - * node and it has to write indexind nodes as well. This introduces additional - * overhead, and UBIFS has to report sligtly less free space to meet the above - * expectetions. + * node and it has to write indexing nodes as well. This introduces additional + * overhead, and UBIFS has to report slightly less free space to meet the above + * expectations. * * This function assumes free space is made up of uncompressed data nodes and * full index nodes (one per data node, tripled because we always allow enough @@ -677,7 +677,7 @@ long long ubifs_reported_space(const struct ubifs_info *c, long long free) * of data nodes, f - fanout. Because effective UBIFS fanout is twice * as less than maximum fanout, we assume that each data node * introduces 3 * @c->max_idx_node_sz / (@c->fanout/2 - 1) bytes. - * Note, the multiplier 3 is because UBIFS reseves thrice as more space + * Note, the multiplier 3 is because UBIFS reserves thrice as more space * for the index. */ f = c->fanout > 3 ? c->fanout >> 1 : 2; @@ -695,10 +695,10 @@ long long ubifs_reported_space(const struct ubifs_info *c, long long free) * This function calculates amount of free space to report to user-space. * * Because UBIFS may introduce substantial overhead (the index, node headers, - * alighment, wastage at the end of eraseblocks, etc), it cannot report real + * alignment, wastage at the end of eraseblocks, etc), it cannot report real * amount of free flash space it has (well, because not all dirty space is - * reclamable, UBIFS does not actually know the real amount). If UBIFS did so, - * it would bread user expectetion about what free space is. Users seem to + * reclaimable, UBIFS does not actually know the real amount). If UBIFS did so, + * it would bread user expectations about what free space is. Users seem to * accustomed to assume that if the file-system reports N bytes of free space, * they would be able to fit a file of N bytes to the FS. This almost works for * traditional file-systems, because they have way less overhead than UBIFS. diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index b8a06079423..96ca9570717 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -753,7 +753,7 @@ static void lpt_tgc_start(struct ubifs_info *c) * LPT trivial garbage collection is where a LPT LEB contains only dirty and * free space and so may be reused as soon as the next commit is completed. * This function is called after the commit is completed (master node has been - * written) and unmaps LPT LEBs that were marked for trivial GC. + * written) and un-maps LPT LEBs that were marked for trivial GC. */ static int lpt_tgc_end(struct ubifs_info *c) { @@ -1467,7 +1467,7 @@ void ubifs_lpt_free(struct ubifs_info *c, int wr_only) #ifdef CONFIG_UBIFS_FS_DEBUG /** - * dbg_is_all_ff - determine if a buffer contains only 0xff bytes. + * dbg_is_all_ff - determine if a buffer contains only 0xFF bytes. * @buf: buffer * @len: buffer length */ @@ -1492,7 +1492,7 @@ static int dbg_is_nnode_dirty(struct ubifs_info *c, int lnum, int offs) struct ubifs_nnode *nnode; int hght; - /* Entire tree is in memory so first_nnode / next_nnode are ok */ + /* Entire tree is in memory so first_nnode / next_nnode are OK */ nnode = first_nnode(c, &hght); for (; nnode; nnode = next_nnode(c, nnode, &hght)) { struct ubifs_nbranch *branch; @@ -1837,7 +1837,7 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) * This function dumps an LEB from LPT area. Nodes in this area are very * different to nodes in the main area (e.g., they do not have common headers, * they do not have 8-byte alignments, etc), so we have a separate function to - * dump LPT area LEBs. Note, LPT has to be locked by the coller. + * dump LPT area LEBs. Note, LPT has to be locked by the caller. */ static void dump_lpt_leb(const struct ubifs_info *c, int lnum) { diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 3275c89a358..fc2a4cc66d0 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1168,7 +1168,6 @@ struct ubifs_debug_info; * @mount_opts: UBIFS-specific mount options * * @dbg: debugging-related information - * @dfs: debugfs support-related information */ struct ubifs_info { struct super_block *vfs_sb; -- cgit v1.2.3-70-g09d2 From 5d38b3ac78e0e0e420fba134716fc3d20e6b978a Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 30 Dec 2008 17:58:42 +0200 Subject: UBIFS: print debugging messages properly We cannot use ubifs_err() macro with DBGKEY() and DBGKEY1(), because this is racy and holding dbg_lock is needed. Use dbg_err() instead, which does have the lock held. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 350fedecc83..792c5a16c18 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -1010,20 +1010,20 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, err = 1; key_read(c, &dent1->key, &key); if (keys_cmp(c, &zbr1->key, &key)) { - ubifs_err("1st entry at %d:%d has key %s", zbr1->lnum, - zbr1->offs, DBGKEY(&key)); - ubifs_err("but it should have key %s according to tnc", - DBGKEY(&zbr1->key)); + dbg_err("1st entry at %d:%d has key %s", zbr1->lnum, + zbr1->offs, DBGKEY(&key)); + dbg_err("but it should have key %s according to tnc", + DBGKEY(&zbr1->key)); dbg_dump_node(c, dent1); goto out_free; } key_read(c, &dent2->key, &key); if (keys_cmp(c, &zbr2->key, &key)) { - ubifs_err("2nd entry at %d:%d has key %s", zbr1->lnum, - zbr1->offs, DBGKEY(&key)); - ubifs_err("but it should have key %s according to tnc", - DBGKEY(&zbr2->key)); + dbg_err("2nd entry at %d:%d has key %s", zbr1->lnum, + zbr1->offs, DBGKEY(&key)); + dbg_err("but it should have key %s according to tnc", + DBGKEY(&zbr2->key)); dbg_dump_node(c, dent2); goto out_free; } @@ -1037,9 +1037,9 @@ static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1, goto out_free; } if (cmp == 0 && nlen1 == nlen2) - ubifs_err("2 xent/dent nodes with the same name"); + dbg_err("2 xent/dent nodes with the same name"); else - ubifs_err("bad order of colliding key %s", + dbg_err("bad order of colliding key %s", DBGKEY(&key)); ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs); -- cgit v1.2.3-70-g09d2 From 8e5033adc78ff4fbeab7052134e7af1f6ff04187 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 30 Dec 2008 18:37:45 +0200 Subject: UBIFS: add more useful debugging prints Print node sizes and maximum node sizes. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 2c91d6fa4e0..0d7564b95f8 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1361,8 +1361,20 @@ static int mount_ubifs(struct ubifs_info *c) dbg_msg("tree fanout: %d", c->fanout); dbg_msg("reserved GC LEB: %d", c->gc_lnum); dbg_msg("first main LEB: %d", c->main_first); + dbg_msg("max. znode size %d", c->max_znode_sz); + dbg_msg("max. index node size %d", c->max_idx_node_sz); + dbg_msg("node sizes: data %zu, inode %zu, dentry %zu", + UBIFS_DATA_NODE_SZ, UBIFS_INO_NODE_SZ, UBIFS_DENT_NODE_SZ); + dbg_msg("node sizes: trun %zu, sb %zu, master %zu", + UBIFS_TRUN_NODE_SZ, UBIFS_SB_NODE_SZ, UBIFS_MST_NODE_SZ); + dbg_msg("node sizes: ref %zu, cmt. start %zu, orph %zu", + UBIFS_REF_NODE_SZ, UBIFS_CS_NODE_SZ, UBIFS_ORPH_NODE_SZ); + dbg_msg("max. node sizes: data %zu, inode %zu dentry %zu", + UBIFS_MAX_DATA_NODE_SZ, UBIFS_MAX_INO_NODE_SZ, + UBIFS_MAX_DENT_NODE_SZ); dbg_msg("dead watermark: %d", c->dead_wm); dbg_msg("dark watermark: %d", c->dark_wm); + dbg_msg("LEB overhead: %d", c->leb_overhead); x = (long long)c->main_lebs * c->dark_wm; dbg_msg("max. dark space: %lld (%lld KiB, %lld MiB)", x, x >> 10, x >> 20); -- cgit v1.2.3-70-g09d2 From e3a2a0d4e5ace731e60e2eff4fb7056ecb34adc1 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Tue, 2 Dec 2008 11:16:03 +0100 Subject: anon_inodes: use fops->owner for module refcount There is an imbalance for anonymous inodes. If the fops->owner field is set, the module reference count of owner is decreases on release. ("filp_close" --> "__fput" ---> "fops_put") On the other hand, anon_inode_getfd does not increase the module reference count of owner. This causes two problems: - if owner is set, the module refcount goes negative - if owner is not set, the module can be unloaded while code is running This patch changes anon_inode_getfd to be symmetric regarding fops->owner handling. I have checked all existing users of anon_inode_getfd. Noone sets fops->owner, thats why nobody has seen the module refcount negative. The refcounting was tested with a patched and unpatched KVM module.(see patch 2/2) I also did an epoll_open/close test. Signed-off-by: Christian Borntraeger Reviewed-by: Davide Libenzi Signed-off-by: Avi Kivity --- fs/anon_inodes.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index c16d9be1b01..3bbdb9d0237 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -79,9 +79,12 @@ int anon_inode_getfd(const char *name, const struct file_operations *fops, if (IS_ERR(anon_inode_inode)) return -ENODEV; + if (fops->owner && !try_module_get(fops->owner)) + return -ENOENT; + error = get_unused_fd_flags(flags); if (error < 0) - return error; + goto err_module; fd = error; /* @@ -128,6 +131,8 @@ err_dput: dput(dentry); err_put_unused_fd: put_unused_fd(fd); +err_module: + module_put(fops->owner); return error; } EXPORT_SYMBOL_GPL(anon_inode_getfd); -- cgit v1.2.3-70-g09d2 From be6d3e56a6b9b3a4ee44a0685e39e595073c6f0d Mon Sep 17 00:00:00 2001 From: Kentaro Takeda Date: Wed, 17 Dec 2008 13:24:15 +0900 Subject: introduce new LSM hooks where vfsmount is available. Add new LSM hooks for path-based checks. Call them on directory-modifying operations at the points where we still know the vfsmount involved. Signed-off-by: Kentaro Takeda Signed-off-by: Tetsuo Handa Signed-off-by: Toshiharu Harada Signed-off-by: Al Viro --- fs/namei.c | 36 +++++++++++++ fs/open.c | 5 ++ include/linux/security.h | 137 +++++++++++++++++++++++++++++++++++++++++++++++ net/unix/af_unix.c | 4 ++ security/Kconfig | 9 ++++ security/capability.c | 57 ++++++++++++++++++++ security/security.c | 66 +++++++++++++++++++++++ 7 files changed, 314 insertions(+) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index af3783fff1d..ab441af4196 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1556,6 +1556,9 @@ int may_open(struct nameidata *nd, int acc_mode, int flag) * Refuse to truncate files with mandatory locks held on them. */ error = locks_verify_locked(inode); + if (!error) + error = security_path_truncate(&nd->path, 0, + ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); if (!error) { DQUOT_INIT(inode); @@ -1586,7 +1589,11 @@ static int __open_namei_create(struct nameidata *nd, struct path *path, if (!IS_POSIXACL(dir->d_inode)) mode &= ~current->fs->umask; + error = security_path_mknod(&nd->path, path->dentry, mode, 0); + if (error) + goto out_unlock; error = vfs_create(dir->d_inode, path->dentry, mode, nd); +out_unlock: mutex_unlock(&dir->d_inode->i_mutex); dput(nd->path.dentry); nd->path.dentry = path->dentry; @@ -1999,6 +2006,9 @@ asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode, error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; + error = security_path_mknod(&nd.path, dentry, mode, dev); + if (error) + goto out_drop_write; switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(nd.path.dentry->d_inode,dentry,mode,&nd); @@ -2011,6 +2021,7 @@ asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode, error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,0); break; } +out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(dentry); @@ -2070,7 +2081,11 @@ asmlinkage long sys_mkdirat(int dfd, const char __user *pathname, int mode) error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; + error = security_path_mkdir(&nd.path, dentry, mode); + if (error) + goto out_drop_write; error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); +out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(dentry); @@ -2180,7 +2195,11 @@ static long do_rmdir(int dfd, const char __user *pathname) error = mnt_want_write(nd.path.mnt); if (error) goto exit3; + error = security_path_rmdir(&nd.path, dentry); + if (error) + goto exit4; error = vfs_rmdir(nd.path.dentry->d_inode, dentry); +exit4: mnt_drop_write(nd.path.mnt); exit3: dput(dentry); @@ -2265,7 +2284,11 @@ static long do_unlinkat(int dfd, const char __user *pathname) error = mnt_want_write(nd.path.mnt); if (error) goto exit2; + error = security_path_unlink(&nd.path, dentry); + if (error) + goto exit3; error = vfs_unlink(nd.path.dentry->d_inode, dentry); +exit3: mnt_drop_write(nd.path.mnt); exit2: dput(dentry); @@ -2346,7 +2369,11 @@ asmlinkage long sys_symlinkat(const char __user *oldname, error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; + error = security_path_symlink(&nd.path, dentry, from); + if (error) + goto out_drop_write; error = vfs_symlink(nd.path.dentry->d_inode, dentry, from); +out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(dentry); @@ -2443,7 +2470,11 @@ asmlinkage long sys_linkat(int olddfd, const char __user *oldname, error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; + error = security_path_link(old_path.dentry, &nd.path, new_dentry); + if (error) + goto out_drop_write; error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry); +out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(new_dentry); @@ -2679,8 +2710,13 @@ asmlinkage long sys_renameat(int olddfd, const char __user *oldname, error = mnt_want_write(oldnd.path.mnt); if (error) goto exit5; + error = security_path_rename(&oldnd.path, old_dentry, + &newnd.path, new_dentry); + if (error) + goto exit6; error = vfs_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, new_dentry); +exit6: mnt_drop_write(oldnd.path.mnt); exit5: dput(new_dentry); diff --git a/fs/open.c b/fs/open.c index c0a426d5766..1cd7d40e999 100644 --- a/fs/open.c +++ b/fs/open.c @@ -272,6 +272,8 @@ static long do_sys_truncate(const char __user *pathname, loff_t length) goto put_write_and_out; error = locks_verify_truncate(inode, NULL, length); + if (!error) + error = security_path_truncate(&path, length, 0); if (!error) { DQUOT_INIT(inode); error = do_truncate(path.dentry, length, 0, NULL); @@ -328,6 +330,9 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small) goto out_putf; error = locks_verify_truncate(inode, file, length); + if (!error) + error = security_path_truncate(&file->f_path, length, + ATTR_MTIME|ATTR_CTIME); if (!error) error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file); out_putf: diff --git a/include/linux/security.h b/include/linux/security.h index 3416cb85e77..b92b5e453f6 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -335,17 +335,37 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @dir contains the inode structure of the parent directory of the new link. * @new_dentry contains the dentry structure for the new link. * Return 0 if permission is granted. + * @path_link: + * Check permission before creating a new hard link to a file. + * @old_dentry contains the dentry structure for an existing link + * to the file. + * @new_dir contains the path structure of the parent directory of + * the new link. + * @new_dentry contains the dentry structure for the new link. + * Return 0 if permission is granted. * @inode_unlink: * Check the permission to remove a hard link to a file. * @dir contains the inode structure of parent directory of the file. * @dentry contains the dentry structure for file to be unlinked. * Return 0 if permission is granted. + * @path_unlink: + * Check the permission to remove a hard link to a file. + * @dir contains the path structure of parent directory of the file. + * @dentry contains the dentry structure for file to be unlinked. + * Return 0 if permission is granted. * @inode_symlink: * Check the permission to create a symbolic link to a file. * @dir contains the inode structure of parent directory of the symbolic link. * @dentry contains the dentry structure of the symbolic link. * @old_name contains the pathname of file. * Return 0 if permission is granted. + * @path_symlink: + * Check the permission to create a symbolic link to a file. + * @dir contains the path structure of parent directory of + * the symbolic link. + * @dentry contains the dentry structure of the symbolic link. + * @old_name contains the pathname of file. + * Return 0 if permission is granted. * @inode_mkdir: * Check permissions to create a new directory in the existing directory * associated with inode strcture @dir. @@ -353,11 +373,25 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @dentry contains the dentry structure of new directory. * @mode contains the mode of new directory. * Return 0 if permission is granted. + * @path_mkdir: + * Check permissions to create a new directory in the existing directory + * associated with path strcture @path. + * @dir containst the path structure of parent of the directory + * to be created. + * @dentry contains the dentry structure of new directory. + * @mode contains the mode of new directory. + * Return 0 if permission is granted. * @inode_rmdir: * Check the permission to remove a directory. * @dir contains the inode structure of parent of the directory to be removed. * @dentry contains the dentry structure of directory to be removed. * Return 0 if permission is granted. + * @path_rmdir: + * Check the permission to remove a directory. + * @dir contains the path structure of parent of the directory to be + * removed. + * @dentry contains the dentry structure of directory to be removed. + * Return 0 if permission is granted. * @inode_mknod: * Check permissions when creating a special file (or a socket or a fifo * file created via the mknod system call). Note that if mknod operation @@ -368,6 +402,15 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @mode contains the mode of the new file. * @dev contains the device number. * Return 0 if permission is granted. + * @path_mknod: + * Check permissions when creating a file. Note that this hook is called + * even if mknod operation is being done for a regular file. + * @dir contains the path structure of parent of the new file. + * @dentry contains the dentry structure of the new file. + * @mode contains the mode of the new file. + * @dev contains the undecoded device number. Use new_decode_dev() to get + * the decoded device number. + * Return 0 if permission is granted. * @inode_rename: * Check for permission to rename a file or directory. * @old_dir contains the inode structure for parent of the old link. @@ -375,6 +418,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @new_dir contains the inode structure for parent of the new link. * @new_dentry contains the dentry structure of the new link. * Return 0 if permission is granted. + * @path_rename: + * Check for permission to rename a file or directory. + * @old_dir contains the path structure for parent of the old link. + * @old_dentry contains the dentry structure of the old link. + * @new_dir contains the path structure for parent of the new link. + * @new_dentry contains the dentry structure of the new link. + * Return 0 if permission is granted. * @inode_readlink: * Check the permission to read the symbolic link. * @dentry contains the dentry structure for the file link. @@ -403,6 +453,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @dentry contains the dentry structure for the file. * @attr is the iattr structure containing the new file attributes. * Return 0 if permission is granted. + * @path_truncate: + * Check permission before truncating a file. + * @path contains the path structure for the file. + * @length is the new length of the file. + * @time_attrs is the flags passed to do_truncate(). + * Return 0 if permission is granted. * @inode_getattr: * Check permission before obtaining file attributes. * @mnt is the vfsmount where the dentry was looked up @@ -1331,6 +1387,22 @@ struct security_operations { struct super_block *newsb); int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts); +#ifdef CONFIG_SECURITY_PATH + int (*path_unlink) (struct path *dir, struct dentry *dentry); + int (*path_mkdir) (struct path *dir, struct dentry *dentry, int mode); + int (*path_rmdir) (struct path *dir, struct dentry *dentry); + int (*path_mknod) (struct path *dir, struct dentry *dentry, int mode, + unsigned int dev); + int (*path_truncate) (struct path *path, loff_t length, + unsigned int time_attrs); + int (*path_symlink) (struct path *dir, struct dentry *dentry, + const char *old_name); + int (*path_link) (struct dentry *old_dentry, struct path *new_dir, + struct dentry *new_dentry); + int (*path_rename) (struct path *old_dir, struct dentry *old_dentry, + struct path *new_dir, struct dentry *new_dentry); +#endif + int (*inode_alloc_security) (struct inode *inode); void (*inode_free_security) (struct inode *inode); int (*inode_init_security) (struct inode *inode, struct inode *dir, @@ -2705,6 +2777,71 @@ static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi #endif /* CONFIG_SECURITY_NETWORK_XFRM */ +#ifdef CONFIG_SECURITY_PATH +int security_path_unlink(struct path *dir, struct dentry *dentry); +int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode); +int security_path_rmdir(struct path *dir, struct dentry *dentry); +int security_path_mknod(struct path *dir, struct dentry *dentry, int mode, + unsigned int dev); +int security_path_truncate(struct path *path, loff_t length, + unsigned int time_attrs); +int security_path_symlink(struct path *dir, struct dentry *dentry, + const char *old_name); +int security_path_link(struct dentry *old_dentry, struct path *new_dir, + struct dentry *new_dentry); +int security_path_rename(struct path *old_dir, struct dentry *old_dentry, + struct path *new_dir, struct dentry *new_dentry); +#else /* CONFIG_SECURITY_PATH */ +static inline int security_path_unlink(struct path *dir, struct dentry *dentry) +{ + return 0; +} + +static inline int security_path_mkdir(struct path *dir, struct dentry *dentry, + int mode) +{ + return 0; +} + +static inline int security_path_rmdir(struct path *dir, struct dentry *dentry) +{ + return 0; +} + +static inline int security_path_mknod(struct path *dir, struct dentry *dentry, + int mode, unsigned int dev) +{ + return 0; +} + +static inline int security_path_truncate(struct path *path, loff_t length, + unsigned int time_attrs) +{ + return 0; +} + +static inline int security_path_symlink(struct path *dir, struct dentry *dentry, + const char *old_name) +{ + return 0; +} + +static inline int security_path_link(struct dentry *old_dentry, + struct path *new_dir, + struct dentry *new_dentry) +{ + return 0; +} + +static inline int security_path_rename(struct path *old_dir, + struct dentry *old_dentry, + struct path *new_dir, + struct dentry *new_dentry) +{ + return 0; +} +#endif /* CONFIG_SECURITY_PATH */ + #ifdef CONFIG_KEYS #ifdef CONFIG_SECURITY diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index c6250d0055d..d1b89820ab4 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -836,7 +836,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) err = mnt_want_write(nd.path.mnt); if (err) goto out_mknod_dput; + err = security_path_mknod(&nd.path, dentry, mode, 0); + if (err) + goto out_mknod_drop_write; err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0); +out_mknod_drop_write: mnt_drop_write(nd.path.mnt); if (err) goto out_mknod_dput; diff --git a/security/Kconfig b/security/Kconfig index d9f47ce7e20..9438535d7fd 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -81,6 +81,15 @@ config SECURITY_NETWORK_XFRM IPSec. If you are unsure how to answer this question, answer N. +config SECURITY_PATH + bool "Security hooks for pathname based access control" + depends on SECURITY + help + This enables the security hooks for pathname based access control. + If enabled, a security module can use these hooks to + implement pathname based access controls. + If you are unsure how to answer this question, answer N. + config SECURITY_FILE_CAPABILITIES bool "File POSIX Capabilities" default n diff --git a/security/capability.c b/security/capability.c index 2dce66fcb99..c545bd1300b 100644 --- a/security/capability.c +++ b/security/capability.c @@ -263,6 +263,53 @@ static void cap_inode_getsecid(const struct inode *inode, u32 *secid) *secid = 0; } +#ifdef CONFIG_SECURITY_PATH +static int cap_path_mknod(struct path *dir, struct dentry *dentry, int mode, + unsigned int dev) +{ + return 0; +} + +static int cap_path_mkdir(struct path *dir, struct dentry *dentry, int mode) +{ + return 0; +} + +static int cap_path_rmdir(struct path *dir, struct dentry *dentry) +{ + return 0; +} + +static int cap_path_unlink(struct path *dir, struct dentry *dentry) +{ + return 0; +} + +static int cap_path_symlink(struct path *dir, struct dentry *dentry, + const char *old_name) +{ + return 0; +} + +static int cap_path_link(struct dentry *old_dentry, struct path *new_dir, + struct dentry *new_dentry) +{ + return 0; +} + +static int cap_path_rename(struct path *old_path, struct dentry *old_dentry, + struct path *new_path, struct dentry *new_dentry) +{ + return 0; +} + +static int cap_path_truncate(struct path *path, loff_t length, + unsigned int time_attrs) +{ + return 0; +} +#endif + static int cap_file_permission(struct file *file, int mask) { return 0; @@ -883,6 +930,16 @@ void security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, inode_setsecurity); set_to_cap_if_null(ops, inode_listsecurity); set_to_cap_if_null(ops, inode_getsecid); +#ifdef CONFIG_SECURITY_PATH + set_to_cap_if_null(ops, path_mknod); + set_to_cap_if_null(ops, path_mkdir); + set_to_cap_if_null(ops, path_rmdir); + set_to_cap_if_null(ops, path_unlink); + set_to_cap_if_null(ops, path_symlink); + set_to_cap_if_null(ops, path_link); + set_to_cap_if_null(ops, path_rename); + set_to_cap_if_null(ops, path_truncate); +#endif set_to_cap_if_null(ops, file_permission); set_to_cap_if_null(ops, file_alloc_security); set_to_cap_if_null(ops, file_free_security); diff --git a/security/security.c b/security/security.c index d85dbb37c97..678d4d07b85 100644 --- a/security/security.c +++ b/security/security.c @@ -355,6 +355,72 @@ int security_inode_init_security(struct inode *inode, struct inode *dir, } EXPORT_SYMBOL(security_inode_init_security); +#ifdef CONFIG_SECURITY_PATH +int security_path_mknod(struct path *path, struct dentry *dentry, int mode, + unsigned int dev) +{ + if (unlikely(IS_PRIVATE(path->dentry->d_inode))) + return 0; + return security_ops->path_mknod(path, dentry, mode, dev); +} +EXPORT_SYMBOL(security_path_mknod); + +int security_path_mkdir(struct path *path, struct dentry *dentry, int mode) +{ + if (unlikely(IS_PRIVATE(path->dentry->d_inode))) + return 0; + return security_ops->path_mkdir(path, dentry, mode); +} + +int security_path_rmdir(struct path *path, struct dentry *dentry) +{ + if (unlikely(IS_PRIVATE(path->dentry->d_inode))) + return 0; + return security_ops->path_rmdir(path, dentry); +} + +int security_path_unlink(struct path *path, struct dentry *dentry) +{ + if (unlikely(IS_PRIVATE(path->dentry->d_inode))) + return 0; + return security_ops->path_unlink(path, dentry); +} + +int security_path_symlink(struct path *path, struct dentry *dentry, + const char *old_name) +{ + if (unlikely(IS_PRIVATE(path->dentry->d_inode))) + return 0; + return security_ops->path_symlink(path, dentry, old_name); +} + +int security_path_link(struct dentry *old_dentry, struct path *new_dir, + struct dentry *new_dentry) +{ + if (unlikely(IS_PRIVATE(old_dentry->d_inode))) + return 0; + return security_ops->path_link(old_dentry, new_dir, new_dentry); +} + +int security_path_rename(struct path *old_dir, struct dentry *old_dentry, + struct path *new_dir, struct dentry *new_dentry) +{ + if (unlikely(IS_PRIVATE(old_dentry->d_inode) || + (new_dentry->d_inode && IS_PRIVATE(new_dentry->d_inode)))) + return 0; + return security_ops->path_rename(old_dir, old_dentry, new_dir, + new_dentry); +} + +int security_path_truncate(struct path *path, loff_t length, + unsigned int time_attrs) +{ + if (unlikely(IS_PRIVATE(path->dentry->d_inode))) + return 0; + return security_ops->path_truncate(path, length, time_attrs); +} +#endif + int security_inode_create(struct inode *dir, struct dentry *dentry, int mode) { if (unlikely(IS_PRIVATE(dir))) -- cgit v1.2.3-70-g09d2 From e2b689d82c0394e5239a3557a217f19e2f47f1be Mon Sep 17 00:00:00 2001 From: Richard Kennedy Date: Thu, 4 Dec 2008 11:17:47 +0000 Subject: fs: reorder struct inotify_device on 64bits to remove padding Reorder struct inotify_device to remove 8 bytes of padding on 64bit builds, reducing size to 128 bytes . Therefore allocating from a smaller slab & using one fewer cachelines. Signed-off-by: Richard Kennedy ---- Hi, patch against 2.6.28-rc7. built & tested on AMDX2 desktop. I've not been able to send this to the listed inotify maintainers, I just get mail failures. So I guessed filesystem was the best home for it, hope that's ok. regards Richard Signed-off-by: Al Viro --- fs/inotify_user.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/inotify_user.c b/fs/inotify_user.c index e2425bbd871..400f8064a54 100644 --- a/fs/inotify_user.c +++ b/fs/inotify_user.c @@ -76,10 +76,10 @@ struct inotify_device { struct mutex ev_mutex; /* protects event queue */ struct mutex up_mutex; /* synchronizes watch updates */ struct list_head events; /* list of queued events */ - atomic_t count; /* reference count */ struct user_struct *user; /* user who opened this dev */ struct inotify_handle *ih; /* inotify handle */ struct fasync_struct *fa; /* async notification */ + atomic_t count; /* reference count */ unsigned int queue_size; /* size of the queue (bytes) */ unsigned int event_count; /* number of pending events */ unsigned int max_events; /* maximum number of events */ -- cgit v1.2.3-70-g09d2 From c2452f32786159ed85f0e4b21fec09258f822fc8 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Mon, 1 Dec 2008 09:33:43 +0100 Subject: shrink struct dentry struct dentry is one of the most critical structures in the kernel. So it's sad to see it going neglected. With CONFIG_PROFILING turned on (which is probably the common case at least for distros and kernel developers), sizeof(struct dcache) == 208 here (64-bit). This gives 19 objects per slab. I packed d_mounted into a hole, and took another 4 bytes off the inline name length to take the padding out from the end of the structure. This shinks it to 200 bytes. I could have gone the other way and increased the length to 40, but I'm aiming for a magic number, read on... I then got rid of the d_cookie pointer. This shrinks it to 192 bytes. Rant: why was this ever a good idea? The cookie system should increase its hash size or use a tree or something if lookups are a problem. Also the "fast dcookie lookups" in oprofile should be moved into the dcookie code -- how can oprofile possibly care about the dcookie_mutex? It gets dropped after get_dcookie() returns so it can't be providing any sort of protection. At 192 bytes, 21 objects fit into a 4K page, saving about 3MB on my system with ~140 000 entries allocated. 192 is also a multiple of 64, so we get nice cacheline alignment on 64 and 32 byte line systems -- any given dentry will now require 3 cachelines to touch all fields wheras previously it would require 4. I know the inline name size was chosen quite carefully, however with the reduction in cacheline footprint, it should actually be just about as fast to do a name lookup for a 36 character name as it was before the patch (and faster for other sizes). The memory footprint savings for names which are <= 32 or > 36 bytes long should more than make up for the memory cost for 33-36 byte names. Performance is a feature... Signed-off-by: Al Viro --- arch/powerpc/oprofile/cell/spu_task_sync.c | 2 +- drivers/oprofile/buffer_sync.c | 2 +- fs/dcache.c | 4 ---- fs/dcookies.c | 28 +++++++++++++++++++--------- include/linux/dcache.h | 21 ++++++++++++++------- 5 files changed, 35 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c index 2949126d28d..6b793aeda72 100644 --- a/arch/powerpc/oprofile/cell/spu_task_sync.c +++ b/arch/powerpc/oprofile/cell/spu_task_sync.c @@ -297,7 +297,7 @@ static inline unsigned long fast_get_dcookie(struct path *path) { unsigned long cookie; - if (path->dentry->d_cookie) + if (path->dentry->d_flags & DCACHE_COOKIE) return (unsigned long)path->dentry; get_dcookie(path, &cookie); return cookie; diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 737bd948482..65e8294a9e2 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -200,7 +200,7 @@ static inline unsigned long fast_get_dcookie(struct path *path) { unsigned long cookie; - if (path->dentry->d_cookie) + if (path->dentry->d_flags & DCACHE_COOKIE) return (unsigned long)path->dentry; get_dcookie(path, &cookie); return cookie; diff --git a/fs/dcache.c b/fs/dcache.c index a1d86c7f3e6..fd244c7a7cc 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -34,7 +34,6 @@ #include #include "internal.h" - int sysctl_vfs_cache_pressure __read_mostly = 100; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); @@ -948,9 +947,6 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) dentry->d_op = NULL; dentry->d_fsdata = NULL; dentry->d_mounted = 0; -#ifdef CONFIG_PROFILING - dentry->d_cookie = NULL; -#endif INIT_HLIST_NODE(&dentry->d_hash); INIT_LIST_HEAD(&dentry->d_lru); INIT_LIST_HEAD(&dentry->d_subdirs); diff --git a/fs/dcookies.c b/fs/dcookies.c index 855d4b1d619..180e9fec4ad 100644 --- a/fs/dcookies.c +++ b/fs/dcookies.c @@ -93,10 +93,15 @@ static struct dcookie_struct *alloc_dcookie(struct path *path) { struct dcookie_struct *dcs = kmem_cache_alloc(dcookie_cache, GFP_KERNEL); + struct dentry *d; if (!dcs) return NULL; - path->dentry->d_cookie = dcs; + d = path->dentry; + spin_lock(&d->d_lock); + d->d_flags |= DCACHE_COOKIE; + spin_unlock(&d->d_lock); + dcs->path = *path; path_get(path); hash_dcookie(dcs); @@ -119,14 +124,14 @@ int get_dcookie(struct path *path, unsigned long *cookie) goto out; } - dcs = path->dentry->d_cookie; - - if (!dcs) + if (path->dentry->d_flags & DCACHE_COOKIE) { + dcs = find_dcookie((unsigned long)path->dentry); + } else { dcs = alloc_dcookie(path); - - if (!dcs) { - err = -ENOMEM; - goto out; + if (!dcs) { + err = -ENOMEM; + goto out; + } } *cookie = dcookie_value(dcs); @@ -251,7 +256,12 @@ out_kmem: static void free_dcookie(struct dcookie_struct * dcs) { - dcs->path.dentry->d_cookie = NULL; + struct dentry *d = dcs->path.dentry; + + spin_lock(&d->d_lock); + d->d_flags &= ~DCACHE_COOKIE; + spin_unlock(&d->d_lock); + path_put(&dcs->path); kmem_cache_free(dcookie_cache, dcs); } diff --git a/include/linux/dcache.h b/include/linux/dcache.h index a37359d0bad..c66d22487bf 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -75,14 +75,22 @@ full_name_hash(const unsigned char *name, unsigned int len) return end_name_hash(hash); } -struct dcookie_struct; - -#define DNAME_INLINE_LEN_MIN 36 +/* + * Try to keep struct dentry aligned on 64 byte cachelines (this will + * give reasonable cacheline footprint with larger lines without the + * large memory footprint increase). + */ +#ifdef CONFIG_64BIT +#define DNAME_INLINE_LEN_MIN 32 /* 192 bytes */ +#else +#define DNAME_INLINE_LEN_MIN 40 /* 128 bytes */ +#endif struct dentry { atomic_t d_count; unsigned int d_flags; /* protected by d_lock */ spinlock_t d_lock; /* per dentry lock */ + int d_mounted; struct inode *d_inode; /* Where the name belongs to - NULL is * negative */ /* @@ -107,10 +115,7 @@ struct dentry { struct dentry_operations *d_op; struct super_block *d_sb; /* The root of the dentry tree */ void *d_fsdata; /* fs-specific data */ -#ifdef CONFIG_PROFILING - struct dcookie_struct *d_cookie; /* cookie, if any */ -#endif - int d_mounted; + unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */ }; @@ -177,6 +182,8 @@ d_iput: no no no yes #define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched */ +#define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */ + extern spinlock_t dcache_lock; extern seqlock_t rename_lock; -- cgit v1.2.3-70-g09d2 From 5cc4a0341a1295ea56b2e62eb70d96d8fdb94ded Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 1 Dec 2008 14:34:51 -0800 Subject: fs/namespace.c: drop code after return The extra semicolon serves no purpose. Signed-off-by: Julia Lawall Reviewed-by: Richard Genoud Cc: Al Viro Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Al Viro --- fs/namespace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namespace.c b/fs/namespace.c index 1c09cab8f7c..a40685d800a 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1990,7 +1990,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, if (!new_ns->root) { up_write(&namespace_sem); kfree(new_ns); - return ERR_PTR(-ENOMEM);; + return ERR_PTR(-ENOMEM); } spin_lock(&vfsmount_lock); list_add_tail(&new_ns->list, &new_ns->root->mnt_list); -- cgit v1.2.3-70-g09d2 From a17d5232de7b53d34229de79ec22f4bb04adb7e4 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:10 +0000 Subject: eCryptfs: check readlink result was not an error before using it The result from readlink is being used to index into the link name buffer without checking whether it is a valid length. If readlink returns an error this will fault or cause memory corruption. Cc: Tyler Hicks Cc: Dustin Kirkland Cc: ecryptfs-devel@lists.launchpad.net Signed-off-by: Duane Griffin Acked-by: Michael Halcrow Acked-by: Tyler Hicks Signed-off-by: Al Viro --- fs/ecryptfs/inode.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 89209f00f9c..5e78fc17988 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -673,10 +673,11 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd) ecryptfs_printk(KERN_DEBUG, "Calling readlink w/ " "dentry->d_name.name = [%s]\n", dentry->d_name.name); rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len); - buf[rc] = '\0'; set_fs(old_fs); if (rc < 0) goto out_free; + else + buf[rc] = '\0'; rc = 0; nd_set_link(nd, buf); goto out; -- cgit v1.2.3-70-g09d2 From ebd09abbd9699f328165aee50a070403fbf55a37 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:12 +0000 Subject: vfs: ensure page symlinks are NUL-terminated On-disk data corruption could cause a page link to have its i_size set to PAGE_SIZE (or a multiple thereof) and its contents all non-NUL. NUL-terminate the link name to ensure this doesn't cause further problems for the kernel. Cc: Al Viro Cc: Andrew Morton Signed-off-by: Duane Griffin Signed-off-by: Al Viro --- fs/namei.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index ab441af4196..9ed5e2818f8 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2786,13 +2786,16 @@ int vfs_follow_link(struct nameidata *nd, const char *link) /* get the link contents into pagecache */ static char *page_getlink(struct dentry * dentry, struct page **ppage) { - struct page * page; + char *kaddr; + struct page *page; struct address_space *mapping = dentry->d_inode->i_mapping; page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) return (char*)page; *ppage = page; - return kmap(page); + kaddr = kmap(page); + nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1); + return kaddr; } int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) -- cgit v1.2.3-70-g09d2 From 8d6d0c4da2dbbe0a69fea3692146af39f139f8b4 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:13 +0000 Subject: ext2: ensure fast symlinks are NUL-terminated Ensure fast symlink targets are NUL-terminated, even if corrupted on-disk. Cc: Andrew Morton Signed-off-by: Duane Griffin Signed-off-by: Al Viro --- fs/ext2/inode.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 7658b33e265..02b39a5deb7 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "ext2.h" #include "acl.h" #include "xip.h" @@ -1286,9 +1287,11 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino) else inode->i_mapping->a_ops = &ext2_aops; } else if (S_ISLNK(inode->i_mode)) { - if (ext2_inode_is_fast_symlink(inode)) + if (ext2_inode_is_fast_symlink(inode)) { inode->i_op = &ext2_fast_symlink_inode_operations; - else { + nd_terminate_link(ei->i_data, inode->i_size, + sizeof(ei->i_data) - 1); + } else { inode->i_op = &ext2_symlink_inode_operations; if (test_opt(inode->i_sb, NOBH)) inode->i_mapping->a_ops = &ext2_nobh_aops; -- cgit v1.2.3-70-g09d2 From b5ed3112b5f74c8ec1c7aa03a76c596635e85197 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:14 +0000 Subject: ext3: ensure fast symlinks are NUL-terminated Ensure fast symlink targets are NUL-terminated, even if corrupted on-disk. Cc: Andrew Morton Cc: Stephen Tweedie Cc: linux-ext4@vger.kernel.org Signed-off-by: Duane Griffin Signed-off-by: Al Viro --- fs/ext3/inode.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index f8424ad8997..c4bdccf976b 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "xattr.h" #include "acl.h" @@ -2817,9 +2818,11 @@ struct inode *ext3_iget(struct super_block *sb, unsigned long ino) inode->i_op = &ext3_dir_inode_operations; inode->i_fop = &ext3_dir_operations; } else if (S_ISLNK(inode->i_mode)) { - if (ext3_inode_is_fast_symlink(inode)) + if (ext3_inode_is_fast_symlink(inode)) { inode->i_op = &ext3_fast_symlink_inode_operations; - else { + nd_terminate_link(ei->i_data, inode->i_size, + sizeof(ei->i_data) - 1); + } else { inode->i_op = &ext3_symlink_inode_operations; ext3_set_aops(inode); } -- cgit v1.2.3-70-g09d2 From e83c1397cafc4e44f868289db5e417463c0d09a4 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:15 +0000 Subject: ext4: ensure fast symlinks are NUL-terminated Ensure fast symlink targets are NUL-terminated, even if corrupted on-disk. Cc: Andrew Morton Cc: Theodore Ts'o Cc: adilger@sun.com Cc: linux-ext4@vger.kernel.org Signed-off-by: Duane Griffin Signed-off-by: Al Viro --- fs/ext4/inode.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index be21a5ae33c..7c3325e0b00 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include "ext4_jbd2.h" @@ -4164,9 +4165,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) inode->i_op = &ext4_dir_inode_operations; inode->i_fop = &ext4_dir_operations; } else if (S_ISLNK(inode->i_mode)) { - if (ext4_inode_is_fast_symlink(inode)) + if (ext4_inode_is_fast_symlink(inode)) { inode->i_op = &ext4_fast_symlink_inode_operations; - else { + nd_terminate_link(ei->i_data, inode->i_size, + sizeof(ei->i_data) - 1); + } else { inode->i_op = &ext4_symlink_inode_operations; ext4_set_aops(inode); } -- cgit v1.2.3-70-g09d2 From 21acaf8e8da00235be59a3e489d5fa2a8721cafc Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:16 +0000 Subject: sysv: ensure fast symlinks are NUL-terminated Ensure fast symlink targets are NUL-terminated, even if corrupted on-disk. Cc: Christoph Hellwig Cc: Al Viro Signed-off-by: Duane Griffin Signed-off-by: Al Viro --- fs/sysv/inode.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index df0d435baa4..3d81bf58dae 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include "sysv.h" @@ -163,8 +164,11 @@ void sysv_set_inode(struct inode *inode, dev_t rdev) if (inode->i_blocks) { inode->i_op = &sysv_symlink_inode_operations; inode->i_mapping->a_ops = &sysv_aops; - } else + } else { inode->i_op = &sysv_fast_symlink_inode_operations; + nd_terminate_link(SYSV_I(inode)->i_data, inode->i_size, + sizeof(SYSV_I(inode)->i_data) - 1); + } } else init_special_inode(inode, inode->i_mode, rdev); } -- cgit v1.2.3-70-g09d2 From a63d0ff31a136bdf52350c4e6c2929eaf47ea2b2 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:17 +0000 Subject: freevxfs: ensure fast symlinks are NUL-terminated Ensure fast symlink targets are NUL-terminated, even if corrupted on-disk. Cc: Christoph Hellwig Signed-off-by: Duane Griffin Signed-off-by: Al Viro --- fs/freevxfs/vxfs_inode.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c index 9f3f2ceb73f..03a6ea5e99f 100644 --- a/fs/freevxfs/vxfs_inode.c +++ b/fs/freevxfs/vxfs_inode.c @@ -325,8 +325,10 @@ vxfs_iget(struct super_block *sbp, ino_t ino) if (!VXFS_ISIMMED(vip)) { ip->i_op = &page_symlink_inode_operations; ip->i_mapping->a_ops = &vxfs_aops; - } else + } else { ip->i_op = &vxfs_immed_symlink_iops; + vip->vii_immed.vi_immed[ip->i_size] = '\0'; + } } else init_special_inode(ip, ip->i_mode, old_decode_dev(vip->vii_rdev)); -- cgit v1.2.3-70-g09d2 From 7df5fa06de89a4ac311957e0cb9c1d87552b4325 Mon Sep 17 00:00:00 2001 From: Duane Griffin Date: Fri, 19 Dec 2008 20:47:18 +0000 Subject: befs: ensure fast symlinks are NUL-terminated Ensure fast symlink targets are NUL-terminated, even if corrupted on-disk. Cc: Sergey S. Kostyliov Signed-off-by: Duane Griffin Signed-off-by: Al Viro --- fs/befs/linuxvfs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index b6dfee37c7b..d06cb023ad0 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -378,7 +378,8 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino) inode->i_size = 0; inode->i_blocks = befs_sb->block_size / VFS_BLOCK_SIZE; strncpy(befs_ino->i_data.symlink, raw_inode->data.symlink, - BEFS_SYMLINK_LEN); + BEFS_SYMLINK_LEN - 1); + befs_ino->i_data.symlink[BEFS_SYMLINK_LEN - 1] = '\0'; } else { int num_blks; @@ -477,6 +478,8 @@ befs_follow_link(struct dentry *dentry, struct nameidata *nd) kfree(link); befs_error(sb, "Failed to read entire long symlink"); link = ERR_PTR(-EIO); + } else { + link[len - 1] = '\0'; } } else { link = befs_ino->i_data.symlink; -- cgit v1.2.3-70-g09d2 From dc711ca35f9d95a1eec02118e0c298b5e3068315 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 3 Nov 2008 15:03:50 -0500 Subject: fix switch_names() breakage in short-to-short case We want ->name.len to match the resulting name on *both* source and target Signed-off-by: Al Viro --- fs/dcache.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index fd244c7a7cc..eeafc14c2a1 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1616,8 +1616,11 @@ static void switch_names(struct dentry *dentry, struct dentry *target) */ memcpy(dentry->d_iname, target->d_name.name, target->d_name.len + 1); + dentry->d_name.len = target->d_name.len; + return; } } + do_switch(dentry->d_name.len, target->d_name.len); } /* @@ -1677,7 +1680,6 @@ already_unhashed: /* Switch the names.. */ switch_names(dentry, target); - do_switch(dentry->d_name.len, target->d_name.len); do_switch(dentry->d_name.hash, target->d_name.hash); /* ... and switch the parents */ @@ -1787,7 +1789,6 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) struct dentry *dparent, *aparent; switch_names(dentry, anon); - do_switch(dentry->d_name.len, anon->d_name.len); do_switch(dentry->d_name.hash, anon->d_name.hash); dparent = dentry->d_parent; -- cgit v1.2.3-70-g09d2 From be42c4c433c2c0d3f1583c08908fead00d36d222 Mon Sep 17 00:00:00 2001 From: Zhaolei Date: Mon, 1 Dec 2008 14:34:58 -0800 Subject: correct wrong function name of d_put in kernel document and source comment no function named d_put(), it should be dput(). Impact: fix document and comment, no functionality changed Signed-off-by: Zhao Lei Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Al Viro --- Documentation/filesystems/vfs.txt | 2 +- fs/dcache.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 5579bda58a6..041cb771d50 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -931,7 +931,7 @@ manipulate dentries: d_lookup: look up a dentry given its parent and path name component It looks up the child of that given name from the dcache hash table. If it is found, the reference count is incremented - and the dentry is returned. The caller must use d_put() + and the dentry is returned. The caller must use dput() to free the dentry when it finishes using it. For further information on dentry locking, please refer to the document diff --git a/fs/dcache.c b/fs/dcache.c index eeafc14c2a1..c231a639c2a 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1332,7 +1332,7 @@ err_out: * * Searches the children of the parent dentry for the name in question. If * the dentry is found its reference count is incremented and the dentry - * is returned. The caller must use d_put to free the entry when it has + * is returned. The caller must use dput to free the entry when it has * finished using it. %NULL is returned on failure. * * __d_lookup is dcache_lock free. The hash list is protected using RCU. -- cgit v1.2.3-70-g09d2 From 52afeefb9dac9287429642189996426a2bfd6a25 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 1 Dec 2008 14:35:00 -0800 Subject: expand some comments (d_path / seq_path) Explain that you really need to use the return value of d_path rather than the buffer you passed into it. Also fix the comment for seq_path(), the function arguments changed recently but the comment hadn't been updated in sync. Signed-off-by: Arjan van de Ven Cc: Al Viro Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Al Viro --- fs/dcache.c | 8 ++++++-- fs/seq_file.c | 10 ++++++++-- 2 files changed, 14 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index c231a639c2a..bdb3f50248a 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1908,7 +1908,8 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name) * Convert a dentry into an ASCII path name. If the entry has been deleted * the string " (deleted)" is appended. Note that this is ambiguous. * - * Returns the buffer or an error code if the path was too long. + * Returns a pointer into the buffer or an error code if the + * path was too long. * * "buflen" should be positive. Caller holds the dcache_lock. * @@ -1984,7 +1985,10 @@ Elong: * Convert a dentry into an ASCII path name. If the entry has been deleted * the string " (deleted)" is appended. Note that this is ambiguous. * - * Returns the buffer or an error code if the path was too long. + * Returns a pointer into the buffer or an error code if the path was + * too long. Note: Callers should use the returned pointer, not the passed + * in buffer, to use the name! The implementation often starts at an offset + * into the buffer, and may leave 0 bytes at the start. * * "buflen" should be positive. */ diff --git a/fs/seq_file.c b/fs/seq_file.c index 16c211558c2..99d8b8cfc9b 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -389,8 +389,14 @@ char *mangle_path(char *s, char *p, char *esc) } EXPORT_SYMBOL(mangle_path); -/* - * return the absolute path of 'dentry' residing in mount 'mnt'. +/** + * seq_path - seq_file interface to print a pathname + * @m: the seq_file handle + * @path: the struct path to print + * @esc: set of characters to escape in the output + * + * return the absolute path of 'path', as represented by the + * dentry / mnt pair in the path parameter. */ int seq_path(struct seq_file *m, struct path *path, char *esc) { -- cgit v1.2.3-70-g09d2 From 66f221875dc10813aa2f06c83ad60d0eb1356406 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 5 Nov 2008 15:04:29 +0100 Subject: remove incorrect comment in inode_permission We now pass on all MAY_ flags to the filesystems permission routines, so remove the comment stating the contrary. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/namei.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 9ed5e2818f8..631cfdd45c6 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -247,7 +247,6 @@ int inode_permission(struct inode *inode, int mask) return -EACCES; } - /* Ordinary permission routines do not understand MAY_APPEND. */ if (inode->i_op && inode->i_op->permission) retval = inode->i_op->permission(inode, mask); else -- cgit v1.2.3-70-g09d2 From b4091d5f6fde28ab762e1094a1a26d81f3badfa5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 5 Nov 2008 15:07:21 +0100 Subject: kill walk_init_root walk_init_root is a tiny helper that is marked __always_inline, has just one caller and an unused argument. Just merge it into the caller. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/namei.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 631cfdd45c6..d4d0b59ed2c 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -526,18 +526,6 @@ out_unlock: return result; } -/* SMP-safe */ -static __always_inline void -walk_init_root(const char *name, struct nameidata *nd) -{ - struct fs_struct *fs = current->fs; - - read_lock(&fs->lock); - nd->path = fs->root; - path_get(&fs->root); - read_unlock(&fs->lock); -} - /* * Wrapper to retry pathname resolution whenever the underlying * file system returns an ESTALE. @@ -575,9 +563,16 @@ static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *l goto fail; if (*link == '/') { + struct fs_struct *fs = current->fs; + path_put(&nd->path); - walk_init_root(link, nd); + + read_lock(&fs->lock); + nd->path = fs->root; + path_get(&fs->root); + read_unlock(&fs->lock); } + res = link_path_walk(link, nd); if (nd->depth || res || nd->last_type!=LAST_NORM) return res; -- cgit v1.2.3-70-g09d2 From 3fb64190aa3c23c10e6e9fd0124ac030115c99bf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Oct 2008 09:58:10 +0200 Subject: pass a struct path * to may_open No need for the nameidata in may_open - a struct path is enough. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/namei.c | 14 +++++++------- fs/nfsctl.c | 5 +++-- include/linux/fs.h | 2 +- 3 files changed, 11 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index d4d0b59ed2c..5cc0dc95a7a 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1487,9 +1487,9 @@ int vfs_create(struct inode *dir, struct dentry *dentry, int mode, return error; } -int may_open(struct nameidata *nd, int acc_mode, int flag) +int may_open(struct path *path, int acc_mode, int flag) { - struct dentry *dentry = nd->path.dentry; + struct dentry *dentry = path->dentry; struct inode *inode = dentry->d_inode; int error; @@ -1510,13 +1510,13 @@ int may_open(struct nameidata *nd, int acc_mode, int flag) if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { flag &= ~O_TRUNC; } else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { - if (nd->path.mnt->mnt_flags & MNT_NODEV) + if (path->mnt->mnt_flags & MNT_NODEV) return -EACCES; flag &= ~O_TRUNC; } - error = vfs_permission(nd, acc_mode); + error = inode_permission(inode, acc_mode); if (error) return error; /* @@ -1551,7 +1551,7 @@ int may_open(struct nameidata *nd, int acc_mode, int flag) */ error = locks_verify_locked(inode); if (!error) - error = security_path_truncate(&nd->path, 0, + error = security_path_truncate(path, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN); if (!error) { DQUOT_INIT(inode); @@ -1594,7 +1594,7 @@ out_unlock: if (error) return error; /* Don't check for write permission, don't truncate */ - return may_open(nd, 0, flag & ~O_TRUNC); + return may_open(&nd->path, 0, flag & ~O_TRUNC); } /* @@ -1780,7 +1780,7 @@ ok: if (error) goto exit; } - error = may_open(&nd, acc_mode, flag); + error = may_open(&nd.path, acc_mode, flag); if (error) { if (will_write) mnt_drop_write(nd.path.mnt); diff --git a/fs/nfsctl.c b/fs/nfsctl.c index b1acbd6ab6f..b27451909df 100644 --- a/fs/nfsctl.c +++ b/fs/nfsctl.c @@ -38,9 +38,10 @@ static struct file *do_open(char *name, int flags) return ERR_PTR(error); if (flags == O_RDWR) - error = may_open(&nd,MAY_READ|MAY_WRITE,FMODE_READ|FMODE_WRITE); + error = may_open(&nd.path, MAY_READ|MAY_WRITE, + FMODE_READ|FMODE_WRITE); else - error = may_open(&nd, MAY_WRITE, FMODE_WRITE); + error = may_open(&nd.path, MAY_WRITE, FMODE_WRITE); if (!error) return dentry_open(nd.path.dentry, nd.path.mnt, flags, diff --git a/include/linux/fs.h b/include/linux/fs.h index c5e4c5c7403..3468df5a06e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1869,7 +1869,7 @@ extern void free_write_pipe(struct file *); extern struct file *do_filp_open(int dfd, const char *pathname, int open_flag, int mode); -extern int may_open(struct nameidata *, int, int); +extern int may_open(struct path *, int, int); extern int kernel_read(struct file *, unsigned long, char *, unsigned long); extern struct file * open_exec(const char *); -- cgit v1.2.3-70-g09d2 From cb23beb55100171646e69e248fb45f10db6e99a4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Oct 2008 09:59:29 +0200 Subject: kill vfs_permission With all the nameidata removal there's no point anymore for this helper. Of the three callers left two will go away with the next lookup series anyway. Also add proper kerneldoc to inode_permission as this is the main permission check routine now. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/exec.c | 5 +++-- fs/namei.c | 31 +++++++++++++------------------ include/linux/fs.h | 1 - 3 files changed, 16 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index 02d2e120542..dfbf7009fbe 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -127,7 +127,8 @@ asmlinkage long sys_uselib(const char __user * library) if (nd.path.mnt->mnt_flags & MNT_NOEXEC) goto exit; - error = vfs_permission(&nd, MAY_READ | MAY_EXEC | MAY_OPEN); + error = inode_permission(nd.path.dentry->d_inode, + MAY_READ | MAY_EXEC | MAY_OPEN); if (error) goto exit; @@ -680,7 +681,7 @@ struct file *open_exec(const char *name) if (nd.path.mnt->mnt_flags & MNT_NOEXEC) goto out_path_put; - err = vfs_permission(&nd, MAY_EXEC | MAY_OPEN); + err = inode_permission(nd.path.dentry->d_inode, MAY_EXEC | MAY_OPEN); if (err) goto out_path_put; diff --git a/fs/namei.c b/fs/namei.c index 5cc0dc95a7a..3f88e043d45 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -226,6 +226,16 @@ int generic_permission(struct inode *inode, int mask, return -EACCES; } +/** + * inode_permission - check for access rights to a given inode + * @inode: inode to check permission on + * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) + * + * Used to check for read/write/execute permissions on an inode. + * We use "fsuid" for this, letting us set arbitrary permissions + * for filesystem access without changing the "normal" uids which + * are used for other things. + */ int inode_permission(struct inode *inode, int mask) { int retval; @@ -263,21 +273,6 @@ int inode_permission(struct inode *inode, int mask) mask & (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND)); } -/** - * vfs_permission - check for access rights to a given path - * @nd: lookup result that describes the path - * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) - * - * Used to check for read/write/execute permissions on a path. - * We use "fsuid" for this, letting us set arbitrary permissions - * for filesystem access without changing the "normal" uids which - * are used for other things. - */ -int vfs_permission(struct nameidata *nd, int mask) -{ - return inode_permission(nd->path.dentry->d_inode, mask); -} - /** * file_permission - check for additional access rights to a given file * @file: file to check access rights for @@ -288,7 +283,7 @@ int vfs_permission(struct nameidata *nd, int mask) * * Note: * Do not use this function in new code. All access checks should - * be done using vfs_permission(). + * be done using inode_permission(). */ int file_permission(struct file *file, int mask) { @@ -853,7 +848,8 @@ static int __link_path_walk(const char *name, struct nameidata *nd) nd->flags |= LOOKUP_CONTINUE; err = exec_permission_lite(inode); if (err == -EAGAIN) - err = vfs_permission(nd, MAY_EXEC); + err = inode_permission(nd->path.dentry->d_inode, + MAY_EXEC); if (err) break; @@ -2882,7 +2878,6 @@ EXPORT_SYMBOL(path_lookup); EXPORT_SYMBOL(kern_path); EXPORT_SYMBOL(vfs_path_lookup); EXPORT_SYMBOL(inode_permission); -EXPORT_SYMBOL(vfs_permission); EXPORT_SYMBOL(file_permission); EXPORT_SYMBOL(unlock_rename); EXPORT_SYMBOL(vfs_create); diff --git a/include/linux/fs.h b/include/linux/fs.h index 3468df5a06e..fd615986a41 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1212,7 +1212,6 @@ extern void unlock_super(struct super_block *); /* * VFS helper functions.. */ -extern int vfs_permission(struct nameidata *, int); extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *); extern int vfs_mkdir(struct inode *, struct dentry *, int); extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t); -- cgit v1.2.3-70-g09d2 From 18d8fda7c3c9439be04d7ea2e82da2513b121acb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 26 Dec 2008 00:35:37 -0500 Subject: take init_fs to saner place Signed-off-by: Al Viro --- arch/alpha/kernel/init_task.c | 1 - arch/arm/kernel/init_task.c | 1 - arch/avr32/kernel/init_task.c | 1 - arch/blackfin/kernel/init_task.c | 1 - arch/cris/kernel/process.c | 1 - arch/frv/kernel/init_task.c | 1 - arch/h8300/kernel/init_task.c | 1 - arch/ia64/kernel/init_task.c | 1 - arch/m32r/kernel/init_task.c | 1 - arch/m68k/kernel/process.c | 1 - arch/m68knommu/kernel/init_task.c | 1 - arch/mips/kernel/init_task.c | 1 - arch/mn10300/kernel/init_task.c | 1 - arch/parisc/kernel/init_task.c | 1 - arch/powerpc/kernel/init_task.c | 1 - arch/s390/kernel/init_task.c | 1 - arch/sh/kernel/init_task.c | 1 - arch/sparc/kernel/init_task.c | 1 - arch/um/kernel/init_task.c | 1 - arch/x86/kernel/init_task.c | 1 - arch/xtensa/kernel/init_task.c | 1 - fs/namei.c | 7 +++++++ include/linux/fs_struct.h | 6 ------ include/linux/init_task.h | 1 + 24 files changed, 8 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/arch/alpha/kernel/init_task.c b/arch/alpha/kernel/init_task.c index 1f762189fa6..c2938e574a4 100644 --- a/arch/alpha/kernel/init_task.c +++ b/arch/alpha/kernel/init_task.c @@ -8,7 +8,6 @@ #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c index 0bbf8062539..e859af34946 100644 --- a/arch/arm/kernel/init_task.c +++ b/arch/arm/kernel/init_task.c @@ -12,7 +12,6 @@ #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/avr32/kernel/init_task.c b/arch/avr32/kernel/init_task.c index 44058469c6e..993d56ee3cf 100644 --- a/arch/avr32/kernel/init_task.c +++ b/arch/avr32/kernel/init_task.c @@ -13,7 +13,6 @@ #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c index 6bdba7b2110..2c228c02097 100644 --- a/arch/blackfin/kernel/init_task.c +++ b/arch/blackfin/kernel/init_task.c @@ -33,7 +33,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c index 5933656db5a..60816e87645 100644 --- a/arch/cris/kernel/process.c +++ b/arch/cris/kernel/process.c @@ -37,7 +37,6 @@ * setup. */ -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/frv/kernel/init_task.c b/arch/frv/kernel/init_task.c index e2198815b63..29429a8b7f6 100644 --- a/arch/frv/kernel/init_task.c +++ b/arch/frv/kernel/init_task.c @@ -10,7 +10,6 @@ #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/h8300/kernel/init_task.c b/arch/h8300/kernel/init_task.c index 93a4899e46c..cb5dc552da9 100644 --- a/arch/h8300/kernel/init_task.c +++ b/arch/h8300/kernel/init_task.c @@ -12,7 +12,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index 9d7e1c66faf..5b0e830c6f3 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c @@ -17,7 +17,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/m32r/kernel/init_task.c b/arch/m32r/kernel/init_task.c index 0d658dbb676..016885c6f26 100644 --- a/arch/m32r/kernel/init_task.c +++ b/arch/m32r/kernel/init_task.c @@ -11,7 +11,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 3042c2bc8c5..632ce016014 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -40,7 +40,6 @@ * alignment requirements and potentially different initial * setup. */ -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/m68knommu/kernel/init_task.c b/arch/m68knommu/kernel/init_task.c index 344c01aede0..fe282de1d59 100644 --- a/arch/m68knommu/kernel/init_task.c +++ b/arch/m68knommu/kernel/init_task.c @@ -12,7 +12,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/mips/kernel/init_task.c b/arch/mips/kernel/init_task.c index d72487ad7c1..149cd914526 100644 --- a/arch/mips/kernel/init_task.c +++ b/arch/mips/kernel/init_task.c @@ -9,7 +9,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/mn10300/kernel/init_task.c b/arch/mn10300/kernel/init_task.c index af16f6e5c91..5ac3566f8c9 100644 --- a/arch/mn10300/kernel/init_task.c +++ b/arch/mn10300/kernel/init_task.c @@ -18,7 +18,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/parisc/kernel/init_task.c b/arch/parisc/kernel/init_task.c index f5941c08655..1e25a45d64c 100644 --- a/arch/parisc/kernel/init_task.c +++ b/arch/parisc/kernel/init_task.c @@ -34,7 +34,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c index 4c85b8d5647..688b329800b 100644 --- a/arch/powerpc/kernel/init_task.c +++ b/arch/powerpc/kernel/init_task.c @@ -7,7 +7,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c index e8071684361..7db95c0b869 100644 --- a/arch/s390/kernel/init_task.c +++ b/arch/s390/kernel/init_task.c @@ -16,7 +16,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c index b151a25cb14..80c35ff71d5 100644 --- a/arch/sh/kernel/init_task.c +++ b/arch/sh/kernel/init_task.c @@ -7,7 +7,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct pt_regs fake_swapper_regs; diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c index 62126e4cec5..f28cb8278e9 100644 --- a/arch/sparc/kernel/init_task.c +++ b/arch/sparc/kernel/init_task.c @@ -8,7 +8,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/um/kernel/init_task.c b/arch/um/kernel/init_task.c index 910eda8fca1..806d381947b 100644 --- a/arch/um/kernel/init_task.c +++ b/arch/um/kernel/init_task.c @@ -10,7 +10,6 @@ #include "linux/mqueue.h" #include "asm/uaccess.h" -static struct fs_struct init_fs = INIT_FS; struct mm_struct init_mm = INIT_MM(init_mm); static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index d39918076bb..df3bf269bea 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c @@ -10,7 +10,6 @@ #include #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/arch/xtensa/kernel/init_task.c b/arch/xtensa/kernel/init_task.c index 3df469dbe81..e07f5c9fcd3 100644 --- a/arch/xtensa/kernel/init_task.c +++ b/arch/xtensa/kernel/init_task.c @@ -21,7 +21,6 @@ #include -static struct fs_struct init_fs = INIT_FS; static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); struct mm_struct init_mm = INIT_MM(init_mm); diff --git a/fs/namei.c b/fs/namei.c index 3f88e043d45..e203691b9d1 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2893,3 +2893,10 @@ EXPORT_SYMBOL(vfs_symlink); EXPORT_SYMBOL(vfs_unlink); EXPORT_SYMBOL(dentry_unhash); EXPORT_SYMBOL(generic_readlink); + +/* to be mentioned only in INIT_TASK */ +struct fs_struct init_fs = { + .count = ATOMIC_INIT(1), + .lock = RW_LOCK_UNLOCKED, + .umask = 0022, +}; diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index 9e5a06e78d0..a97c053d3a9 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -10,12 +10,6 @@ struct fs_struct { struct path root, pwd; }; -#define INIT_FS { \ - .count = ATOMIC_INIT(1), \ - .lock = RW_LOCK_UNLOCKED, \ - .umask = 0022, \ -} - extern struct kmem_cache *fs_cachep; extern void exit_fs(struct task_struct *); diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 959f5522d10..2f3c2d4ef73 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -12,6 +12,7 @@ #include extern struct files_struct init_files; +extern struct fs_struct init_fs; #define INIT_KIOCTX(name, which_mm) \ { \ -- cgit v1.2.3-70-g09d2 From 1239f26c05899f1f3c541b41e719c59d58038786 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Wed, 10 Dec 2008 18:37:28 -0500 Subject: make INIT_FS use the __RW_LOCK_UNLOCKED initialization [AV: rediffed on top of unification of init_fs] Initialization of init_fs still uses the deprecated RW_LOCK_UNLOCKED macro. This patch updates it to use the __RW_LOCK_UNLOCKED(lock) macro. Signed-off-by: Steven Rostedt Signed-off-by: Al Viro --- fs/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index e203691b9d1..dd5c9f0bf82 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2897,6 +2897,6 @@ EXPORT_SYMBOL(generic_readlink); /* to be mentioned only in INIT_TASK */ struct fs_struct init_fs = { .count = ATOMIC_INIT(1), - .lock = RW_LOCK_UNLOCKED, + .lock = __RW_LOCK_UNLOCKED(init_fs.lock), .umask = 0022, }; -- cgit v1.2.3-70-g09d2 From b6b3fdead251d432f32f2cfce2a893ab8a658110 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 10 Dec 2008 09:35:45 -0800 Subject: filp_cachep can be static in fs/file_table.c Instead of creating the "filp" kmem_cache in vfs_caches_init(), we can do it a litle be later in files_init(), so that filp_cachep is static to fs/file_table.c Acked-by: Paul E. McKenney Signed-off-by: Eric Dumazet Signed-off-by: Al Viro --- fs/dcache.c | 6 ------ fs/file_table.c | 10 +++++++++- include/linux/fdtable.h | 2 -- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index bdb3f50248a..e88c23b85a3 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2314,9 +2314,6 @@ static void __init dcache_init(void) /* SLAB cache for __getname() consumers */ struct kmem_cache *names_cachep __read_mostly; -/* SLAB cache for file structures */ -struct kmem_cache *filp_cachep __read_mostly; - EXPORT_SYMBOL(d_genocide); void __init vfs_caches_init_early(void) @@ -2338,9 +2335,6 @@ void __init vfs_caches_init(unsigned long mempages) names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); - filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); - dcache_init(); inode_init(); files_init(mempages); diff --git a/fs/file_table.c b/fs/file_table.c index 0fbcacc3ea7..bbeeac6efa1 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -32,6 +32,9 @@ struct files_stat_struct files_stat = { /* public. Not pretty! */ __cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock); +/* SLAB cache for file structures */ +static struct kmem_cache *filp_cachep __read_mostly; + static struct percpu_counter nr_files __cacheline_aligned_in_smp; static inline void file_free_rcu(struct rcu_head *head) @@ -397,7 +400,12 @@ too_bad: void __init files_init(unsigned long mempages) { int n; - /* One file with associated inode and dcache is very roughly 1K. + + filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, + SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); + + /* + * One file with associated inode and dcache is very roughly 1K. * Per default don't use more than 10% of our memory for files. */ diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 4aab6f12cfa..09d6c5bbddd 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -57,8 +57,6 @@ struct files_struct { #define files_fdtable(files) (rcu_dereference((files)->fdt)) -extern struct kmem_cache *filp_cachep; - struct file_operations; struct vfsmount; struct dentry; -- cgit v1.2.3-70-g09d2 From 6badd79bd002788aaec27b50a74ab69ef65ab8ee Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 26 Dec 2008 00:57:40 -0500 Subject: kill ->dir_notify() Remove the hopelessly misguided ->dir_notify(). The only instance (cifs) has been broken by design from the very beginning; the objects it creates are never destroyed, keep references to struct file they can outlive, nothing that could possibly evict them exists on close(2) path *and* no locking whatsoever is done to prevent races with close(), should the previous, er, deficiencies someday be dealt with. Signed-off-by: Al Viro --- Documentation/filesystems/Locking | 2 - Documentation/filesystems/vfs.txt | 3 - fs/bad_inode.c | 6 -- fs/cifs/Makefile | 2 +- fs/cifs/cifsfs.c | 7 --- fs/cifs/cifsfs.h | 1 - fs/cifs/fcntl.c | 118 -------------------------------------- fs/dnotify.c | 3 - include/linux/fs.h | 1 - 9 files changed, 1 insertion(+), 142 deletions(-) delete mode 100644 fs/cifs/fcntl.c (limited to 'fs') diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 23d2f4460de..ccec5539438 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -394,7 +394,6 @@ prototypes: unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); - int (*dir_notify)(struct file *, unsigned long); }; locking rules: @@ -424,7 +423,6 @@ sendfile: no sendpage: no get_unmapped_area: no check_flags: no -dir_notify: no ->llseek() locking has moved from llseek to the individual llseek implementations. If your fs is not using generic_file_llseek, you diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 041cb771d50..ef19afa186a 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -733,7 +733,6 @@ struct file_operations { ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); - int (*dir_notify)(struct file *filp, unsigned long arg); int (*flock) (struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned int); @@ -800,8 +799,6 @@ otherwise noted. check_flags: called by the fcntl(2) system call for F_SETFL command - dir_notify: called by the fcntl(2) system call for F_NOTIFY command - flock: called by the flock(2) system call splice_write: called by the VFS to splice data from a pipe to a file. This diff --git a/fs/bad_inode.c b/fs/bad_inode.c index 5f1538c03b1..a05287a23f6 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -132,11 +132,6 @@ static int bad_file_check_flags(int flags) return -EIO; } -static int bad_file_dir_notify(struct file *file, unsigned long arg) -{ - return -EIO; -} - static int bad_file_flock(struct file *filp, int cmd, struct file_lock *fl) { return -EIO; @@ -179,7 +174,6 @@ static const struct file_operations bad_file_ops = .sendpage = bad_file_sendpage, .get_unmapped_area = bad_file_get_unmapped_area, .check_flags = bad_file_check_flags, - .dir_notify = bad_file_dir_notify, .flock = bad_file_flock, .splice_write = bad_file_splice_write, .splice_read = bad_file_splice_read, diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile index 6ba43fb346f..9948c0030e8 100644 --- a/fs/cifs/Makefile +++ b/fs/cifs/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_CIFS) += cifs.o cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \ link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o \ - md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o \ + md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o \ readdir.o ioctl.o sess.o export.o cifsacl.o cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 0005a194a75..13ea53251dc 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -747,7 +747,6 @@ const struct file_operations cifs_file_ops = { #endif /* CONFIG_CIFS_POSIX */ #ifdef CONFIG_CIFS_EXPERIMENTAL - .dir_notify = cifs_dir_notify, .setlease = cifs_setlease, #endif /* CONFIG_CIFS_EXPERIMENTAL */ }; @@ -768,7 +767,6 @@ const struct file_operations cifs_file_direct_ops = { #endif /* CONFIG_CIFS_POSIX */ .llseek = cifs_llseek, #ifdef CONFIG_CIFS_EXPERIMENTAL - .dir_notify = cifs_dir_notify, .setlease = cifs_setlease, #endif /* CONFIG_CIFS_EXPERIMENTAL */ }; @@ -789,7 +787,6 @@ const struct file_operations cifs_file_nobrl_ops = { #endif /* CONFIG_CIFS_POSIX */ #ifdef CONFIG_CIFS_EXPERIMENTAL - .dir_notify = cifs_dir_notify, .setlease = cifs_setlease, #endif /* CONFIG_CIFS_EXPERIMENTAL */ }; @@ -809,7 +806,6 @@ const struct file_operations cifs_file_direct_nobrl_ops = { #endif /* CONFIG_CIFS_POSIX */ .llseek = cifs_llseek, #ifdef CONFIG_CIFS_EXPERIMENTAL - .dir_notify = cifs_dir_notify, .setlease = cifs_setlease, #endif /* CONFIG_CIFS_EXPERIMENTAL */ }; @@ -818,9 +814,6 @@ const struct file_operations cifs_dir_ops = { .readdir = cifs_readdir, .release = cifs_closedir, .read = generic_read_dir, -#ifdef CONFIG_CIFS_EXPERIMENTAL - .dir_notify = cifs_dir_notify, -#endif /* CONFIG_CIFS_EXPERIMENTAL */ .unlocked_ioctl = cifs_ioctl, .llseek = generic_file_llseek, }; diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 2ce04c73d74..7ac481841f8 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -76,7 +76,6 @@ extern int cifs_file_mmap(struct file * , struct vm_area_struct *); extern const struct file_operations cifs_dir_ops; extern int cifs_dir_open(struct inode *inode, struct file *file); extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir); -extern int cifs_dir_notify(struct file *, unsigned long arg); /* Functions related to dir entries */ extern struct dentry_operations cifs_dentry_ops; diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c deleted file mode 100644 index 5a57581eb4b..00000000000 --- a/fs/cifs/fcntl.c +++ /dev/null @@ -1,118 +0,0 @@ -/* - * fs/cifs/fcntl.c - * - * vfs operations that deal with the file control API - * - * Copyright (C) International Business Machines Corp., 2003,2004 - * Author(s): Steve French (sfrench@us.ibm.com) - * - * This library is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; either version 2.1 of the License, or - * (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See - * the GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#include -#include -#include -#include "cifsglob.h" -#include "cifsproto.h" -#include "cifs_unicode.h" -#include "cifs_debug.h" -#include "cifsfs.h" - -static __u32 convert_to_cifs_notify_flags(unsigned long fcntl_notify_flags) -{ - __u32 cifs_ntfy_flags = 0; - - /* No way on Linux VFS to ask to monitor xattr - changes (and no stream support either */ - if (fcntl_notify_flags & DN_ACCESS) - cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_ACCESS; - if (fcntl_notify_flags & DN_MODIFY) { - /* What does this mean on directories? */ - cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_WRITE | - FILE_NOTIFY_CHANGE_SIZE; - } - if (fcntl_notify_flags & DN_CREATE) { - cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_CREATION | - FILE_NOTIFY_CHANGE_LAST_WRITE; - } - if (fcntl_notify_flags & DN_DELETE) - cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_WRITE; - if (fcntl_notify_flags & DN_RENAME) { - /* BB review this - checking various server behaviors */ - cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_DIR_NAME | - FILE_NOTIFY_CHANGE_FILE_NAME; - } - if (fcntl_notify_flags & DN_ATTRIB) { - cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_SECURITY | - FILE_NOTIFY_CHANGE_ATTRIBUTES; - } -/* if (fcntl_notify_flags & DN_MULTISHOT) { - cifs_ntfy_flags |= ; - } */ /* BB fixme - not sure how to handle this with CIFS yet */ - - return cifs_ntfy_flags; -} - -int cifs_dir_notify(struct file *file, unsigned long arg) -{ - int xid; - int rc = -EINVAL; - int oplock = 0; - struct cifs_sb_info *cifs_sb; - struct cifsTconInfo *pTcon; - char *full_path = NULL; - __u32 filter = FILE_NOTIFY_CHANGE_NAME | FILE_NOTIFY_CHANGE_ATTRIBUTES; - __u16 netfid; - - if (experimEnabled == 0) - return 0; - - xid = GetXid(); - cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); - pTcon = cifs_sb->tcon; - - full_path = build_path_from_dentry(file->f_path.dentry); - - if (full_path == NULL) { - rc = -ENOMEM; - } else { - cFYI(1, ("dir notify on file %s Arg 0x%lx", full_path, arg)); - rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN, - GENERIC_READ | SYNCHRONIZE, 0 /* create options */, - &netfid, &oplock, NULL, cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); - /* BB fixme - add this handle to a notify handle list */ - if (rc) { - cFYI(1, ("Could not open directory for notify")); - } else { - filter = convert_to_cifs_notify_flags(arg); - if (filter != 0) { - rc = CIFSSMBNotify(xid, pTcon, - 0 /* no subdirs */, netfid, - filter, file, arg & DN_MULTISHOT, - cifs_sb->local_nls); - } else { - rc = -EINVAL; - } - /* BB add code to close file eventually (at unmount - it would close automatically but may be a way - to do it easily when inode freed or when - notify info is cleared/changed */ - cFYI(1, ("notify rc %d", rc)); - } - } - - FreeXid(xid); - return rc; -} diff --git a/fs/dnotify.c b/fs/dnotify.c index 676073b8dda..b0aa2cde80b 100644 --- a/fs/dnotify.c +++ b/fs/dnotify.c @@ -115,9 +115,6 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) dn->dn_next = inode->i_dnotify; inode->i_dnotify = dn; spin_unlock(&inode->i_lock); - - if (filp->f_op && filp->f_op->dir_notify) - return filp->f_op->dir_notify(filp, arg); return 0; out_free: diff --git a/include/linux/fs.h b/include/linux/fs.h index fd615986a41..be16ce01fb1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1309,7 +1309,6 @@ struct file_operations { ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); int (*check_flags)(int); - int (*dir_notify)(struct file *filp, unsigned long arg); int (*flock) (struct file *, int, struct file_lock *); ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); -- cgit v1.2.3-70-g09d2 From c2acf7b90821785fe812cc0aa05148e5a1f84204 Mon Sep 17 00:00:00 2001 From: Denis ChengRq Date: Mon, 1 Dec 2008 14:34:56 -0800 Subject: fs/block_dev.c: __read_mostly improvement and sb_is_blkdev_sb utilization - iget5_locked in bdget really needs blockdev_superblock, instead of bd_mnt, so bd_mnt could be just a local variable; - blockdev_superblock really needs __read_mostly, while local var bd_mnt not; - make use of sb_is_blkdev_sb in bd_forget, instead of direct reference to blockdev_superblock. Signed-off-by: Denis ChengRq Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Al Viro --- fs/block_dev.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 99e0ae1a4c7..349a26c1000 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -326,12 +326,13 @@ static struct file_system_type bd_type = { .kill_sb = kill_anon_super, }; -static struct vfsmount *bd_mnt __read_mostly; -struct super_block *blockdev_superblock; +struct super_block *blockdev_superblock __read_mostly; void __init bdev_cache_init(void) { int err; + struct vfsmount *bd_mnt; + bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_PANIC), @@ -373,7 +374,7 @@ struct block_device *bdget(dev_t dev) struct block_device *bdev; struct inode *inode; - inode = iget5_locked(bd_mnt->mnt_sb, hash(dev), + inode = iget5_locked(blockdev_superblock, hash(dev), bdev_test, bdev_set, &dev); if (!inode) @@ -463,7 +464,7 @@ void bd_forget(struct inode *inode) spin_lock(&bdev_lock); if (inode->i_bdev) { - if (inode->i_sb != blockdev_superblock) + if (!sb_is_blkdev_sb(inode->i_sb)) bdev = inode->i_bdev; __bd_forget(inode); } -- cgit v1.2.3-70-g09d2 From 272eb01485dda98e3b8910c7c1a53d597616b0a0 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Wed, 17 Dec 2008 13:59:41 -0500 Subject: filesystem notification: create fs/notify to contain all fs notification Creating a generic filesystem notification interface, fsnotify, which will be used by inotify, dnotify, and eventually fanotify is really starting to clutter the fs directory. This patch simply moves inotify and dnotify into fs/notify/inotify and fs/notify/dnotify respectively to make both current fs/ and future notification tidier. Signed-off-by: Eric Paris Signed-off-by: Al Viro --- fs/Kconfig | 39 +- fs/Makefile | 5 +- fs/dnotify.c | 191 -------- fs/inotify.c | 913 --------------------------------------- fs/inotify_user.c | 778 --------------------------------- fs/notify/Kconfig | 2 + fs/notify/Makefile | 2 + fs/notify/dnotify/Kconfig | 10 + fs/notify/dnotify/Makefile | 1 + fs/notify/dnotify/dnotify.c | 191 ++++++++ fs/notify/inotify/Kconfig | 27 ++ fs/notify/inotify/Makefile | 2 + fs/notify/inotify/inotify.c | 913 +++++++++++++++++++++++++++++++++++++++ fs/notify/inotify/inotify_user.c | 778 +++++++++++++++++++++++++++++++++ 14 files changed, 1928 insertions(+), 1924 deletions(-) delete mode 100644 fs/dnotify.c delete mode 100644 fs/inotify.c delete mode 100644 fs/inotify_user.c create mode 100644 fs/notify/Kconfig create mode 100644 fs/notify/Makefile create mode 100644 fs/notify/dnotify/Kconfig create mode 100644 fs/notify/dnotify/Makefile create mode 100644 fs/notify/dnotify/dnotify.c create mode 100644 fs/notify/inotify/Kconfig create mode 100644 fs/notify/inotify/Makefile create mode 100644 fs/notify/inotify/inotify.c create mode 100644 fs/notify/inotify/inotify_user.c (limited to 'fs') diff --git a/fs/Kconfig b/fs/Kconfig index 522469a7eca..ff0e8198020 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -270,44 +270,7 @@ config OCFS2_COMPAT_JBD endif # BLOCK -config DNOTIFY - bool "Dnotify support" - default y - help - Dnotify is a directory-based per-fd file change notification system - that uses signals to communicate events to user-space. There exist - superior alternatives, but some applications may still rely on - dnotify. - - If unsure, say Y. - -config INOTIFY - bool "Inotify file change notification support" - default y - ---help--- - Say Y here to enable inotify support. Inotify is a file change - notification system and a replacement for dnotify. Inotify fixes - numerous shortcomings in dnotify and introduces several new features - including multiple file events, one-shot support, and unmount - notification. - - For more information, see - - If unsure, say Y. - -config INOTIFY_USER - bool "Inotify support for userspace" - depends on INOTIFY - default y - ---help--- - Say Y here to enable inotify support for userspace, including the - associated system calls. Inotify allows monitoring of both files and - directories via a single open fd. Events are read from the file - descriptor, which is also select()- and poll()-able. - - For more information, see - - If unsure, say Y. +source "fs/notify/Kconfig" config QUOTA bool "Quota support" diff --git a/fs/Makefile b/fs/Makefile index d9f8afe6f0c..e6f423d1d22 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -20,8 +20,7 @@ obj-y += no-block.o endif obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o -obj-$(CONFIG_INOTIFY) += inotify.o -obj-$(CONFIG_INOTIFY_USER) += inotify_user.o +obj-y += notify/ obj-$(CONFIG_EPOLL) += eventpoll.o obj-$(CONFIG_ANON_INODES) += anon_inodes.o obj-$(CONFIG_SIGNALFD) += signalfd.o @@ -57,8 +56,6 @@ obj-$(CONFIG_QFMT_V1) += quota_v1.o obj-$(CONFIG_QFMT_V2) += quota_v2.o obj-$(CONFIG_QUOTACTL) += quota.o -obj-$(CONFIG_DNOTIFY) += dnotify.o - obj-$(CONFIG_PROC_FS) += proc/ obj-y += partitions/ obj-$(CONFIG_SYSFS) += sysfs/ diff --git a/fs/dnotify.c b/fs/dnotify.c deleted file mode 100644 index b0aa2cde80b..00000000000 --- a/fs/dnotify.c +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Directory notifications for Linux. - * - * Copyright (C) 2000,2001,2002 Stephen Rothwell - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2, or (at your option) any - * later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ -#include -#include -#include -#include -#include -#include -#include -#include - -int dir_notify_enable __read_mostly = 1; - -static struct kmem_cache *dn_cache __read_mostly; - -static void redo_inode_mask(struct inode *inode) -{ - unsigned long new_mask; - struct dnotify_struct *dn; - - new_mask = 0; - for (dn = inode->i_dnotify; dn != NULL; dn = dn->dn_next) - new_mask |= dn->dn_mask & ~DN_MULTISHOT; - inode->i_dnotify_mask = new_mask; -} - -void dnotify_flush(struct file *filp, fl_owner_t id) -{ - struct dnotify_struct *dn; - struct dnotify_struct **prev; - struct inode *inode; - - inode = filp->f_path.dentry->d_inode; - if (!S_ISDIR(inode->i_mode)) - return; - spin_lock(&inode->i_lock); - prev = &inode->i_dnotify; - while ((dn = *prev) != NULL) { - if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { - *prev = dn->dn_next; - redo_inode_mask(inode); - kmem_cache_free(dn_cache, dn); - break; - } - prev = &dn->dn_next; - } - spin_unlock(&inode->i_lock); -} - -int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) -{ - struct dnotify_struct *dn; - struct dnotify_struct *odn; - struct dnotify_struct **prev; - struct inode *inode; - fl_owner_t id = current->files; - struct file *f; - int error = 0; - - if ((arg & ~DN_MULTISHOT) == 0) { - dnotify_flush(filp, id); - return 0; - } - if (!dir_notify_enable) - return -EINVAL; - inode = filp->f_path.dentry->d_inode; - if (!S_ISDIR(inode->i_mode)) - return -ENOTDIR; - dn = kmem_cache_alloc(dn_cache, GFP_KERNEL); - if (dn == NULL) - return -ENOMEM; - spin_lock(&inode->i_lock); - prev = &inode->i_dnotify; - while ((odn = *prev) != NULL) { - if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { - odn->dn_fd = fd; - odn->dn_mask |= arg; - inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; - goto out_free; - } - prev = &odn->dn_next; - } - - rcu_read_lock(); - f = fcheck(fd); - rcu_read_unlock(); - /* we'd lost the race with close(), sod off silently */ - /* note that inode->i_lock prevents reordering problems - * between accesses to descriptor table and ->i_dnotify */ - if (f != filp) - goto out_free; - - error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); - if (error) - goto out_free; - - dn->dn_mask = arg; - dn->dn_fd = fd; - dn->dn_filp = filp; - dn->dn_owner = id; - inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; - dn->dn_next = inode->i_dnotify; - inode->i_dnotify = dn; - spin_unlock(&inode->i_lock); - return 0; - -out_free: - spin_unlock(&inode->i_lock); - kmem_cache_free(dn_cache, dn); - return error; -} - -void __inode_dir_notify(struct inode *inode, unsigned long event) -{ - struct dnotify_struct * dn; - struct dnotify_struct **prev; - struct fown_struct * fown; - int changed = 0; - - spin_lock(&inode->i_lock); - prev = &inode->i_dnotify; - while ((dn = *prev) != NULL) { - if ((dn->dn_mask & event) == 0) { - prev = &dn->dn_next; - continue; - } - fown = &dn->dn_filp->f_owner; - send_sigio(fown, dn->dn_fd, POLL_MSG); - if (dn->dn_mask & DN_MULTISHOT) - prev = &dn->dn_next; - else { - *prev = dn->dn_next; - changed = 1; - kmem_cache_free(dn_cache, dn); - } - } - if (changed) - redo_inode_mask(inode); - spin_unlock(&inode->i_lock); -} - -EXPORT_SYMBOL(__inode_dir_notify); - -/* - * This is hopelessly wrong, but unfixable without API changes. At - * least it doesn't oops the kernel... - * - * To safely access ->d_parent we need to keep d_move away from it. Use the - * dentry's d_lock for this. - */ -void dnotify_parent(struct dentry *dentry, unsigned long event) -{ - struct dentry *parent; - - if (!dir_notify_enable) - return; - - spin_lock(&dentry->d_lock); - parent = dentry->d_parent; - if (parent->d_inode->i_dnotify_mask & event) { - dget(parent); - spin_unlock(&dentry->d_lock); - __inode_dir_notify(parent->d_inode, event); - dput(parent); - } else { - spin_unlock(&dentry->d_lock); - } -} -EXPORT_SYMBOL_GPL(dnotify_parent); - -static int __init dnotify_init(void) -{ - dn_cache = kmem_cache_create("dnotify_cache", - sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL); - return 0; -} - -module_init(dnotify_init) diff --git a/fs/inotify.c b/fs/inotify.c deleted file mode 100644 index dae3f28f30d..00000000000 --- a/fs/inotify.c +++ /dev/null @@ -1,913 +0,0 @@ -/* - * fs/inotify.c - inode-based file event notifications - * - * Authors: - * John McCutchan - * Robert Love - * - * Kernel API added by: Amy Griffis - * - * Copyright (C) 2005 John McCutchan - * Copyright 2006 Hewlett-Packard Development Company, L.P. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2, or (at your option) any - * later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static atomic_t inotify_cookie; - -/* - * Lock ordering: - * - * dentry->d_lock (used to keep d_move() away from dentry->d_parent) - * iprune_mutex (synchronize shrink_icache_memory()) - * inode_lock (protects the super_block->s_inodes list) - * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) - * inotify_handle->mutex (protects inotify_handle and watches->h_list) - * - * The inode->inotify_mutex and inotify_handle->mutex and held during execution - * of a caller's event handler. Thus, the caller must not hold any locks - * taken in their event handler while calling any of the published inotify - * interfaces. - */ - -/* - * Lifetimes of the three main data structures--inotify_handle, inode, and - * inotify_watch--are managed by reference count. - * - * inotify_handle: Lifetime is from inotify_init() to inotify_destroy(). - * Additional references can bump the count via get_inotify_handle() and drop - * the count via put_inotify_handle(). - * - * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch() - * to remove_watch_no_event(). Additional references can bump the count via - * get_inotify_watch() and drop the count via put_inotify_watch(). The caller - * is reponsible for the final put after receiving IN_IGNORED, or when using - * IN_ONESHOT after receiving the first event. Inotify does the final put if - * inotify_destroy() is called. - * - * inode: Pinned so long as the inode is associated with a watch, from - * inotify_add_watch() to the final put_inotify_watch(). - */ - -/* - * struct inotify_handle - represents an inotify instance - * - * This structure is protected by the mutex 'mutex'. - */ -struct inotify_handle { - struct idr idr; /* idr mapping wd -> watch */ - struct mutex mutex; /* protects this bad boy */ - struct list_head watches; /* list of watches */ - atomic_t count; /* reference count */ - u32 last_wd; /* the last wd allocated */ - const struct inotify_operations *in_ops; /* inotify caller operations */ -}; - -static inline void get_inotify_handle(struct inotify_handle *ih) -{ - atomic_inc(&ih->count); -} - -static inline void put_inotify_handle(struct inotify_handle *ih) -{ - if (atomic_dec_and_test(&ih->count)) { - idr_destroy(&ih->idr); - kfree(ih); - } -} - -/** - * get_inotify_watch - grab a reference to an inotify_watch - * @watch: watch to grab - */ -void get_inotify_watch(struct inotify_watch *watch) -{ - atomic_inc(&watch->count); -} -EXPORT_SYMBOL_GPL(get_inotify_watch); - -int pin_inotify_watch(struct inotify_watch *watch) -{ - struct super_block *sb = watch->inode->i_sb; - spin_lock(&sb_lock); - if (sb->s_count >= S_BIAS) { - atomic_inc(&sb->s_active); - spin_unlock(&sb_lock); - atomic_inc(&watch->count); - return 1; - } - spin_unlock(&sb_lock); - return 0; -} - -/** - * put_inotify_watch - decrements the ref count on a given watch. cleans up - * watch references if the count reaches zero. inotify_watch is freed by - * inotify callers via the destroy_watch() op. - * @watch: watch to release - */ -void put_inotify_watch(struct inotify_watch *watch) -{ - if (atomic_dec_and_test(&watch->count)) { - struct inotify_handle *ih = watch->ih; - - iput(watch->inode); - ih->in_ops->destroy_watch(watch); - put_inotify_handle(ih); - } -} -EXPORT_SYMBOL_GPL(put_inotify_watch); - -void unpin_inotify_watch(struct inotify_watch *watch) -{ - struct super_block *sb = watch->inode->i_sb; - put_inotify_watch(watch); - deactivate_super(sb); -} - -/* - * inotify_handle_get_wd - returns the next WD for use by the given handle - * - * Callers must hold ih->mutex. This function can sleep. - */ -static int inotify_handle_get_wd(struct inotify_handle *ih, - struct inotify_watch *watch) -{ - int ret; - - do { - if (unlikely(!idr_pre_get(&ih->idr, GFP_KERNEL))) - return -ENOSPC; - ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd); - } while (ret == -EAGAIN); - - if (likely(!ret)) - ih->last_wd = watch->wd; - - return ret; -} - -/* - * inotify_inode_watched - returns nonzero if there are watches on this inode - * and zero otherwise. We call this lockless, we do not care if we race. - */ -static inline int inotify_inode_watched(struct inode *inode) -{ - return !list_empty(&inode->inotify_watches); -} - -/* - * Get child dentry flag into synch with parent inode. - * Flag should always be clear for negative dentrys. - */ -static void set_dentry_child_flags(struct inode *inode, int watched) -{ - struct dentry *alias; - - spin_lock(&dcache_lock); - list_for_each_entry(alias, &inode->i_dentry, d_alias) { - struct dentry *child; - - list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { - if (!child->d_inode) - continue; - - spin_lock(&child->d_lock); - if (watched) - child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; - else - child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED; - spin_unlock(&child->d_lock); - } - } - spin_unlock(&dcache_lock); -} - -/* - * inotify_find_handle - find the watch associated with the given inode and - * handle - * - * Callers must hold inode->inotify_mutex. - */ -static struct inotify_watch *inode_find_handle(struct inode *inode, - struct inotify_handle *ih) -{ - struct inotify_watch *watch; - - list_for_each_entry(watch, &inode->inotify_watches, i_list) { - if (watch->ih == ih) - return watch; - } - - return NULL; -} - -/* - * remove_watch_no_event - remove watch without the IN_IGNORED event. - * - * Callers must hold both inode->inotify_mutex and ih->mutex. - */ -static void remove_watch_no_event(struct inotify_watch *watch, - struct inotify_handle *ih) -{ - list_del(&watch->i_list); - list_del(&watch->h_list); - - if (!inotify_inode_watched(watch->inode)) - set_dentry_child_flags(watch->inode, 0); - - idr_remove(&ih->idr, watch->wd); -} - -/** - * inotify_remove_watch_locked - Remove a watch from both the handle and the - * inode. Sends the IN_IGNORED event signifying that the inode is no longer - * watched. May be invoked from a caller's event handler. - * @ih: inotify handle associated with watch - * @watch: watch to remove - * - * Callers must hold both inode->inotify_mutex and ih->mutex. - */ -void inotify_remove_watch_locked(struct inotify_handle *ih, - struct inotify_watch *watch) -{ - remove_watch_no_event(watch, ih); - ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL); -} -EXPORT_SYMBOL_GPL(inotify_remove_watch_locked); - -/* Kernel API for producing events */ - -/* - * inotify_d_instantiate - instantiate dcache entry for inode - */ -void inotify_d_instantiate(struct dentry *entry, struct inode *inode) -{ - struct dentry *parent; - - if (!inode) - return; - - spin_lock(&entry->d_lock); - parent = entry->d_parent; - if (parent->d_inode && inotify_inode_watched(parent->d_inode)) - entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; - spin_unlock(&entry->d_lock); -} - -/* - * inotify_d_move - dcache entry has been moved - */ -void inotify_d_move(struct dentry *entry) -{ - struct dentry *parent; - - parent = entry->d_parent; - if (inotify_inode_watched(parent->d_inode)) - entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; - else - entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED; -} - -/** - * inotify_inode_queue_event - queue an event to all watches on this inode - * @inode: inode event is originating from - * @mask: event mask describing this event - * @cookie: cookie for synchronization, or zero - * @name: filename, if any - * @n_inode: inode associated with name - */ -void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, - const char *name, struct inode *n_inode) -{ - struct inotify_watch *watch, *next; - - if (!inotify_inode_watched(inode)) - return; - - mutex_lock(&inode->inotify_mutex); - list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { - u32 watch_mask = watch->mask; - if (watch_mask & mask) { - struct inotify_handle *ih= watch->ih; - mutex_lock(&ih->mutex); - if (watch_mask & IN_ONESHOT) - remove_watch_no_event(watch, ih); - ih->in_ops->handle_event(watch, watch->wd, mask, cookie, - name, n_inode); - mutex_unlock(&ih->mutex); - } - } - mutex_unlock(&inode->inotify_mutex); -} -EXPORT_SYMBOL_GPL(inotify_inode_queue_event); - -/** - * inotify_dentry_parent_queue_event - queue an event to a dentry's parent - * @dentry: the dentry in question, we queue against this dentry's parent - * @mask: event mask describing this event - * @cookie: cookie for synchronization, or zero - * @name: filename, if any - */ -void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask, - u32 cookie, const char *name) -{ - struct dentry *parent; - struct inode *inode; - - if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED)) - return; - - spin_lock(&dentry->d_lock); - parent = dentry->d_parent; - inode = parent->d_inode; - - if (inotify_inode_watched(inode)) { - dget(parent); - spin_unlock(&dentry->d_lock); - inotify_inode_queue_event(inode, mask, cookie, name, - dentry->d_inode); - dput(parent); - } else - spin_unlock(&dentry->d_lock); -} -EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event); - -/** - * inotify_get_cookie - return a unique cookie for use in synchronizing events. - */ -u32 inotify_get_cookie(void) -{ - return atomic_inc_return(&inotify_cookie); -} -EXPORT_SYMBOL_GPL(inotify_get_cookie); - -/** - * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes. - * @list: list of inodes being unmounted (sb->s_inodes) - * - * Called with inode_lock held, protecting the unmounting super block's list - * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. - * We temporarily drop inode_lock, however, and CAN block. - */ -void inotify_unmount_inodes(struct list_head *list) -{ - struct inode *inode, *next_i, *need_iput = NULL; - - list_for_each_entry_safe(inode, next_i, list, i_sb_list) { - struct inotify_watch *watch, *next_w; - struct inode *need_iput_tmp; - struct list_head *watches; - - /* - * If i_count is zero, the inode cannot have any watches and - * doing an __iget/iput with MS_ACTIVE clear would actually - * evict all inodes with zero i_count from icache which is - * unnecessarily violent and may in fact be illegal to do. - */ - if (!atomic_read(&inode->i_count)) - continue; - - /* - * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or - * I_WILL_FREE which is fine because by that point the inode - * cannot have any associated watches. - */ - if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE)) - continue; - - need_iput_tmp = need_iput; - need_iput = NULL; - /* In case inotify_remove_watch_locked() drops a reference. */ - if (inode != need_iput_tmp) - __iget(inode); - else - need_iput_tmp = NULL; - /* In case the dropping of a reference would nuke next_i. */ - if ((&next_i->i_sb_list != list) && - atomic_read(&next_i->i_count) && - !(next_i->i_state & (I_CLEAR | I_FREEING | - I_WILL_FREE))) { - __iget(next_i); - need_iput = next_i; - } - - /* - * We can safely drop inode_lock here because we hold - * references on both inode and next_i. Also no new inodes - * will be added since the umount has begun. Finally, - * iprune_mutex keeps shrink_icache_memory() away. - */ - spin_unlock(&inode_lock); - - if (need_iput_tmp) - iput(need_iput_tmp); - - /* for each watch, send IN_UNMOUNT and then remove it */ - mutex_lock(&inode->inotify_mutex); - watches = &inode->inotify_watches; - list_for_each_entry_safe(watch, next_w, watches, i_list) { - struct inotify_handle *ih= watch->ih; - get_inotify_watch(watch); - mutex_lock(&ih->mutex); - ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0, - NULL, NULL); - inotify_remove_watch_locked(ih, watch); - mutex_unlock(&ih->mutex); - put_inotify_watch(watch); - } - mutex_unlock(&inode->inotify_mutex); - iput(inode); - - spin_lock(&inode_lock); - } -} -EXPORT_SYMBOL_GPL(inotify_unmount_inodes); - -/** - * inotify_inode_is_dead - an inode has been deleted, cleanup any watches - * @inode: inode that is about to be removed - */ -void inotify_inode_is_dead(struct inode *inode) -{ - struct inotify_watch *watch, *next; - - mutex_lock(&inode->inotify_mutex); - list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { - struct inotify_handle *ih = watch->ih; - mutex_lock(&ih->mutex); - inotify_remove_watch_locked(ih, watch); - mutex_unlock(&ih->mutex); - } - mutex_unlock(&inode->inotify_mutex); -} -EXPORT_SYMBOL_GPL(inotify_inode_is_dead); - -/* Kernel Consumer API */ - -/** - * inotify_init - allocate and initialize an inotify instance - * @ops: caller's inotify operations - */ -struct inotify_handle *inotify_init(const struct inotify_operations *ops) -{ - struct inotify_handle *ih; - - ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL); - if (unlikely(!ih)) - return ERR_PTR(-ENOMEM); - - idr_init(&ih->idr); - INIT_LIST_HEAD(&ih->watches); - mutex_init(&ih->mutex); - ih->last_wd = 0; - ih->in_ops = ops; - atomic_set(&ih->count, 0); - get_inotify_handle(ih); - - return ih; -} -EXPORT_SYMBOL_GPL(inotify_init); - -/** - * inotify_init_watch - initialize an inotify watch - * @watch: watch to initialize - */ -void inotify_init_watch(struct inotify_watch *watch) -{ - INIT_LIST_HEAD(&watch->h_list); - INIT_LIST_HEAD(&watch->i_list); - atomic_set(&watch->count, 0); - get_inotify_watch(watch); /* initial get */ -} -EXPORT_SYMBOL_GPL(inotify_init_watch); - -/* - * Watch removals suck violently. To kick the watch out we need (in this - * order) inode->inotify_mutex and ih->mutex. That's fine if we have - * a hold on inode; however, for all other cases we need to make damn sure - * we don't race with umount. We can *NOT* just grab a reference to a - * watch - inotify_unmount_inodes() will happily sail past it and we'll end - * with reference to inode potentially outliving its superblock. Ideally - * we just want to grab an active reference to superblock if we can; that - * will make sure we won't go into inotify_umount_inodes() until we are - * done. Cleanup is just deactivate_super(). However, that leaves a messy - * case - what if we *are* racing with umount() and active references to - * superblock can't be acquired anymore? We can bump ->s_count, grab - * ->s_umount, which will almost certainly wait until the superblock is shut - * down and the watch in question is pining for fjords. That's fine, but - * there is a problem - we might have hit the window between ->s_active - * getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock - * is past the point of no return and is heading for shutdown) and the - * moment when deactivate_super() acquires ->s_umount. We could just do - * drop_super() yield() and retry, but that's rather antisocial and this - * stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having - * found that we'd got there first (i.e. that ->s_root is non-NULL) we know - * that we won't race with inotify_umount_inodes(). So we could grab a - * reference to watch and do the rest as above, just with drop_super() instead - * of deactivate_super(), right? Wrong. We had to drop ih->mutex before we - * could grab ->s_umount. So the watch could've been gone already. - * - * That still can be dealt with - we need to save watch->wd, do idr_find() - * and compare its result with our pointer. If they match, we either have - * the damn thing still alive or we'd lost not one but two races at once, - * the watch had been killed and a new one got created with the same ->wd - * at the same address. That couldn't have happened in inotify_destroy(), - * but inotify_rm_wd() could run into that. Still, "new one got created" - * is not a problem - we have every right to kill it or leave it alone, - * whatever's more convenient. - * - * So we can use idr_find(...) == watch && watch->inode->i_sb == sb as - * "grab it and kill it" check. If it's been our original watch, we are - * fine, if it's a newcomer - nevermind, just pretend that we'd won the - * race and kill the fscker anyway; we are safe since we know that its - * superblock won't be going away. - * - * And yes, this is far beyond mere "not very pretty"; so's the entire - * concept of inotify to start with. - */ - -/** - * pin_to_kill - pin the watch down for removal - * @ih: inotify handle - * @watch: watch to kill - * - * Called with ih->mutex held, drops it. Possible return values: - * 0 - nothing to do, it has died - * 1 - remove it, drop the reference and deactivate_super() - * 2 - remove it, drop the reference and drop_super(); we tried hard to avoid - * that variant, since it involved a lot of PITA, but that's the best that - * could've been done. - */ -static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch) -{ - struct super_block *sb = watch->inode->i_sb; - s32 wd = watch->wd; - - spin_lock(&sb_lock); - if (sb->s_count >= S_BIAS) { - atomic_inc(&sb->s_active); - spin_unlock(&sb_lock); - get_inotify_watch(watch); - mutex_unlock(&ih->mutex); - return 1; /* the best outcome */ - } - sb->s_count++; - spin_unlock(&sb_lock); - mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */ - down_read(&sb->s_umount); - if (likely(!sb->s_root)) { - /* fs is already shut down; the watch is dead */ - drop_super(sb); - return 0; - } - /* raced with the final deactivate_super() */ - mutex_lock(&ih->mutex); - if (idr_find(&ih->idr, wd) != watch || watch->inode->i_sb != sb) { - /* the watch is dead */ - mutex_unlock(&ih->mutex); - drop_super(sb); - return 0; - } - /* still alive or freed and reused with the same sb and wd; kill */ - get_inotify_watch(watch); - mutex_unlock(&ih->mutex); - return 2; -} - -static void unpin_and_kill(struct inotify_watch *watch, int how) -{ - struct super_block *sb = watch->inode->i_sb; - put_inotify_watch(watch); - switch (how) { - case 1: - deactivate_super(sb); - break; - case 2: - drop_super(sb); - } -} - -/** - * inotify_destroy - clean up and destroy an inotify instance - * @ih: inotify handle - */ -void inotify_destroy(struct inotify_handle *ih) -{ - /* - * Destroy all of the watches for this handle. Unfortunately, not very - * pretty. We cannot do a simple iteration over the list, because we - * do not know the inode until we iterate to the watch. But we need to - * hold inode->inotify_mutex before ih->mutex. The following works. - * - * AV: it had to become even uglier to start working ;-/ - */ - while (1) { - struct inotify_watch *watch; - struct list_head *watches; - struct super_block *sb; - struct inode *inode; - int how; - - mutex_lock(&ih->mutex); - watches = &ih->watches; - if (list_empty(watches)) { - mutex_unlock(&ih->mutex); - break; - } - watch = list_first_entry(watches, struct inotify_watch, h_list); - sb = watch->inode->i_sb; - how = pin_to_kill(ih, watch); - if (!how) - continue; - - inode = watch->inode; - mutex_lock(&inode->inotify_mutex); - mutex_lock(&ih->mutex); - - /* make sure we didn't race with another list removal */ - if (likely(idr_find(&ih->idr, watch->wd))) { - remove_watch_no_event(watch, ih); - put_inotify_watch(watch); - } - - mutex_unlock(&ih->mutex); - mutex_unlock(&inode->inotify_mutex); - unpin_and_kill(watch, how); - } - - /* free this handle: the put matching the get in inotify_init() */ - put_inotify_handle(ih); -} -EXPORT_SYMBOL_GPL(inotify_destroy); - -/** - * inotify_find_watch - find an existing watch for an (ih,inode) pair - * @ih: inotify handle - * @inode: inode to watch - * @watchp: pointer to existing inotify_watch - * - * Caller must pin given inode (via nameidata). - */ -s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode, - struct inotify_watch **watchp) -{ - struct inotify_watch *old; - int ret = -ENOENT; - - mutex_lock(&inode->inotify_mutex); - mutex_lock(&ih->mutex); - - old = inode_find_handle(inode, ih); - if (unlikely(old)) { - get_inotify_watch(old); /* caller must put watch */ - *watchp = old; - ret = old->wd; - } - - mutex_unlock(&ih->mutex); - mutex_unlock(&inode->inotify_mutex); - - return ret; -} -EXPORT_SYMBOL_GPL(inotify_find_watch); - -/** - * inotify_find_update_watch - find and update the mask of an existing watch - * @ih: inotify handle - * @inode: inode's watch to update - * @mask: mask of events to watch - * - * Caller must pin given inode (via nameidata). - */ -s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode, - u32 mask) -{ - struct inotify_watch *old; - int mask_add = 0; - int ret; - - if (mask & IN_MASK_ADD) - mask_add = 1; - - /* don't allow invalid bits: we don't want flags set */ - mask &= IN_ALL_EVENTS | IN_ONESHOT; - if (unlikely(!mask)) - return -EINVAL; - - mutex_lock(&inode->inotify_mutex); - mutex_lock(&ih->mutex); - - /* - * Handle the case of re-adding a watch on an (inode,ih) pair that we - * are already watching. We just update the mask and return its wd. - */ - old = inode_find_handle(inode, ih); - if (unlikely(!old)) { - ret = -ENOENT; - goto out; - } - - if (mask_add) - old->mask |= mask; - else - old->mask = mask; - ret = old->wd; -out: - mutex_unlock(&ih->mutex); - mutex_unlock(&inode->inotify_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(inotify_find_update_watch); - -/** - * inotify_add_watch - add a watch to an inotify instance - * @ih: inotify handle - * @watch: caller allocated watch structure - * @inode: inode to watch - * @mask: mask of events to watch - * - * Caller must pin given inode (via nameidata). - * Caller must ensure it only calls inotify_add_watch() once per watch. - * Calls inotify_handle_get_wd() so may sleep. - */ -s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch, - struct inode *inode, u32 mask) -{ - int ret = 0; - int newly_watched; - - /* don't allow invalid bits: we don't want flags set */ - mask &= IN_ALL_EVENTS | IN_ONESHOT; - if (unlikely(!mask)) - return -EINVAL; - watch->mask = mask; - - mutex_lock(&inode->inotify_mutex); - mutex_lock(&ih->mutex); - - /* Initialize a new watch */ - ret = inotify_handle_get_wd(ih, watch); - if (unlikely(ret)) - goto out; - ret = watch->wd; - - /* save a reference to handle and bump the count to make it official */ - get_inotify_handle(ih); - watch->ih = ih; - - /* - * Save a reference to the inode and bump the ref count to make it - * official. We hold a reference to nameidata, which makes this safe. - */ - watch->inode = igrab(inode); - - /* Add the watch to the handle's and the inode's list */ - newly_watched = !inotify_inode_watched(inode); - list_add(&watch->h_list, &ih->watches); - list_add(&watch->i_list, &inode->inotify_watches); - /* - * Set child flags _after_ adding the watch, so there is no race - * windows where newly instantiated children could miss their parent's - * watched flag. - */ - if (newly_watched) - set_dentry_child_flags(inode, 1); - -out: - mutex_unlock(&ih->mutex); - mutex_unlock(&inode->inotify_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(inotify_add_watch); - -/** - * inotify_clone_watch - put the watch next to existing one - * @old: already installed watch - * @new: new watch - * - * Caller must hold the inotify_mutex of inode we are dealing with; - * it is expected to remove the old watch before unlocking the inode. - */ -s32 inotify_clone_watch(struct inotify_watch *old, struct inotify_watch *new) -{ - struct inotify_handle *ih = old->ih; - int ret = 0; - - new->mask = old->mask; - new->ih = ih; - - mutex_lock(&ih->mutex); - - /* Initialize a new watch */ - ret = inotify_handle_get_wd(ih, new); - if (unlikely(ret)) - goto out; - ret = new->wd; - - get_inotify_handle(ih); - - new->inode = igrab(old->inode); - - list_add(&new->h_list, &ih->watches); - list_add(&new->i_list, &old->inode->inotify_watches); -out: - mutex_unlock(&ih->mutex); - return ret; -} - -void inotify_evict_watch(struct inotify_watch *watch) -{ - get_inotify_watch(watch); - mutex_lock(&watch->ih->mutex); - inotify_remove_watch_locked(watch->ih, watch); - mutex_unlock(&watch->ih->mutex); -} - -/** - * inotify_rm_wd - remove a watch from an inotify instance - * @ih: inotify handle - * @wd: watch descriptor to remove - * - * Can sleep. - */ -int inotify_rm_wd(struct inotify_handle *ih, u32 wd) -{ - struct inotify_watch *watch; - struct super_block *sb; - struct inode *inode; - int how; - - mutex_lock(&ih->mutex); - watch = idr_find(&ih->idr, wd); - if (unlikely(!watch)) { - mutex_unlock(&ih->mutex); - return -EINVAL; - } - sb = watch->inode->i_sb; - how = pin_to_kill(ih, watch); - if (!how) - return 0; - - inode = watch->inode; - - mutex_lock(&inode->inotify_mutex); - mutex_lock(&ih->mutex); - - /* make sure that we did not race */ - if (likely(idr_find(&ih->idr, wd) == watch)) - inotify_remove_watch_locked(ih, watch); - - mutex_unlock(&ih->mutex); - mutex_unlock(&inode->inotify_mutex); - unpin_and_kill(watch, how); - - return 0; -} -EXPORT_SYMBOL_GPL(inotify_rm_wd); - -/** - * inotify_rm_watch - remove a watch from an inotify instance - * @ih: inotify handle - * @watch: watch to remove - * - * Can sleep. - */ -int inotify_rm_watch(struct inotify_handle *ih, - struct inotify_watch *watch) -{ - return inotify_rm_wd(ih, watch->wd); -} -EXPORT_SYMBOL_GPL(inotify_rm_watch); - -/* - * inotify_setup - core initialization function - */ -static int __init inotify_setup(void) -{ - atomic_set(&inotify_cookie, 0); - - return 0; -} - -module_init(inotify_setup); diff --git a/fs/inotify_user.c b/fs/inotify_user.c deleted file mode 100644 index 400f8064a54..00000000000 --- a/fs/inotify_user.c +++ /dev/null @@ -1,778 +0,0 @@ -/* - * fs/inotify_user.c - inotify support for userspace - * - * Authors: - * John McCutchan - * Robert Love - * - * Copyright (C) 2005 John McCutchan - * Copyright 2006 Hewlett-Packard Development Company, L.P. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2, or (at your option) any - * later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -static struct kmem_cache *watch_cachep __read_mostly; -static struct kmem_cache *event_cachep __read_mostly; - -static struct vfsmount *inotify_mnt __read_mostly; - -/* these are configurable via /proc/sys/fs/inotify/ */ -static int inotify_max_user_instances __read_mostly; -static int inotify_max_user_watches __read_mostly; -static int inotify_max_queued_events __read_mostly; - -/* - * Lock ordering: - * - * inotify_dev->up_mutex (ensures we don't re-add the same watch) - * inode->inotify_mutex (protects inode's watch list) - * inotify_handle->mutex (protects inotify_handle's watch list) - * inotify_dev->ev_mutex (protects device's event queue) - */ - -/* - * Lifetimes of the main data structures: - * - * inotify_device: Lifetime is managed by reference count, from - * sys_inotify_init() until release. Additional references can bump the count - * via get_inotify_dev() and drop the count via put_inotify_dev(). - * - * inotify_user_watch: Lifetime is from create_watch() to the receipt of an - * IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the - * first event, or to inotify_destroy(). - */ - -/* - * struct inotify_device - represents an inotify instance - * - * This structure is protected by the mutex 'mutex'. - */ -struct inotify_device { - wait_queue_head_t wq; /* wait queue for i/o */ - struct mutex ev_mutex; /* protects event queue */ - struct mutex up_mutex; /* synchronizes watch updates */ - struct list_head events; /* list of queued events */ - struct user_struct *user; /* user who opened this dev */ - struct inotify_handle *ih; /* inotify handle */ - struct fasync_struct *fa; /* async notification */ - atomic_t count; /* reference count */ - unsigned int queue_size; /* size of the queue (bytes) */ - unsigned int event_count; /* number of pending events */ - unsigned int max_events; /* maximum number of events */ -}; - -/* - * struct inotify_kernel_event - An inotify event, originating from a watch and - * queued for user-space. A list of these is attached to each instance of the - * device. In read(), this list is walked and all events that can fit in the - * buffer are returned. - * - * Protected by dev->ev_mutex of the device in which we are queued. - */ -struct inotify_kernel_event { - struct inotify_event event; /* the user-space event */ - struct list_head list; /* entry in inotify_device's list */ - char *name; /* filename, if any */ -}; - -/* - * struct inotify_user_watch - our version of an inotify_watch, we add - * a reference to the associated inotify_device. - */ -struct inotify_user_watch { - struct inotify_device *dev; /* associated device */ - struct inotify_watch wdata; /* inotify watch data */ -}; - -#ifdef CONFIG_SYSCTL - -#include - -static int zero; - -ctl_table inotify_table[] = { - { - .ctl_name = INOTIFY_MAX_USER_INSTANCES, - .procname = "max_user_instances", - .data = &inotify_max_user_instances, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &zero, - }, - { - .ctl_name = INOTIFY_MAX_USER_WATCHES, - .procname = "max_user_watches", - .data = &inotify_max_user_watches, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &zero, - }, - { - .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, - .procname = "max_queued_events", - .data = &inotify_max_queued_events, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &zero - }, - { .ctl_name = 0 } -}; -#endif /* CONFIG_SYSCTL */ - -static inline void get_inotify_dev(struct inotify_device *dev) -{ - atomic_inc(&dev->count); -} - -static inline void put_inotify_dev(struct inotify_device *dev) -{ - if (atomic_dec_and_test(&dev->count)) { - atomic_dec(&dev->user->inotify_devs); - free_uid(dev->user); - kfree(dev); - } -} - -/* - * free_inotify_user_watch - cleans up the watch and its references - */ -static void free_inotify_user_watch(struct inotify_watch *w) -{ - struct inotify_user_watch *watch; - struct inotify_device *dev; - - watch = container_of(w, struct inotify_user_watch, wdata); - dev = watch->dev; - - atomic_dec(&dev->user->inotify_watches); - put_inotify_dev(dev); - kmem_cache_free(watch_cachep, watch); -} - -/* - * kernel_event - create a new kernel event with the given parameters - * - * This function can sleep. - */ -static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, - const char *name) -{ - struct inotify_kernel_event *kevent; - - kevent = kmem_cache_alloc(event_cachep, GFP_NOFS); - if (unlikely(!kevent)) - return NULL; - - /* we hand this out to user-space, so zero it just in case */ - memset(&kevent->event, 0, sizeof(struct inotify_event)); - - kevent->event.wd = wd; - kevent->event.mask = mask; - kevent->event.cookie = cookie; - - INIT_LIST_HEAD(&kevent->list); - - if (name) { - size_t len, rem, event_size = sizeof(struct inotify_event); - - /* - * We need to pad the filename so as to properly align an - * array of inotify_event structures. Because the structure is - * small and the common case is a small filename, we just round - * up to the next multiple of the structure's sizeof. This is - * simple and safe for all architectures. - */ - len = strlen(name) + 1; - rem = event_size - len; - if (len > event_size) { - rem = event_size - (len % event_size); - if (len % event_size == 0) - rem = 0; - } - - kevent->name = kmalloc(len + rem, GFP_KERNEL); - if (unlikely(!kevent->name)) { - kmem_cache_free(event_cachep, kevent); - return NULL; - } - memcpy(kevent->name, name, len); - if (rem) - memset(kevent->name + len, 0, rem); - kevent->event.len = len + rem; - } else { - kevent->event.len = 0; - kevent->name = NULL; - } - - return kevent; -} - -/* - * inotify_dev_get_event - return the next event in the given dev's queue - * - * Caller must hold dev->ev_mutex. - */ -static inline struct inotify_kernel_event * -inotify_dev_get_event(struct inotify_device *dev) -{ - return list_entry(dev->events.next, struct inotify_kernel_event, list); -} - -/* - * inotify_dev_get_last_event - return the last event in the given dev's queue - * - * Caller must hold dev->ev_mutex. - */ -static inline struct inotify_kernel_event * -inotify_dev_get_last_event(struct inotify_device *dev) -{ - if (list_empty(&dev->events)) - return NULL; - return list_entry(dev->events.prev, struct inotify_kernel_event, list); -} - -/* - * inotify_dev_queue_event - event handler registered with core inotify, adds - * a new event to the given device - * - * Can sleep (calls kernel_event()). - */ -static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask, - u32 cookie, const char *name, - struct inode *ignored) -{ - struct inotify_user_watch *watch; - struct inotify_device *dev; - struct inotify_kernel_event *kevent, *last; - - watch = container_of(w, struct inotify_user_watch, wdata); - dev = watch->dev; - - mutex_lock(&dev->ev_mutex); - - /* we can safely put the watch as we don't reference it while - * generating the event - */ - if (mask & IN_IGNORED || w->mask & IN_ONESHOT) - put_inotify_watch(w); /* final put */ - - /* coalescing: drop this event if it is a dupe of the previous */ - last = inotify_dev_get_last_event(dev); - if (last && last->event.mask == mask && last->event.wd == wd && - last->event.cookie == cookie) { - const char *lastname = last->name; - - if (!name && !lastname) - goto out; - if (name && lastname && !strcmp(lastname, name)) - goto out; - } - - /* the queue overflowed and we already sent the Q_OVERFLOW event */ - if (unlikely(dev->event_count > dev->max_events)) - goto out; - - /* if the queue overflows, we need to notify user space */ - if (unlikely(dev->event_count == dev->max_events)) - kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL); - else - kevent = kernel_event(wd, mask, cookie, name); - - if (unlikely(!kevent)) - goto out; - - /* queue the event and wake up anyone waiting */ - dev->event_count++; - dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; - list_add_tail(&kevent->list, &dev->events); - wake_up_interruptible(&dev->wq); - kill_fasync(&dev->fa, SIGIO, POLL_IN); - -out: - mutex_unlock(&dev->ev_mutex); -} - -/* - * remove_kevent - cleans up the given kevent - * - * Caller must hold dev->ev_mutex. - */ -static void remove_kevent(struct inotify_device *dev, - struct inotify_kernel_event *kevent) -{ - list_del(&kevent->list); - - dev->event_count--; - dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; -} - -/* - * free_kevent - frees the given kevent. - */ -static void free_kevent(struct inotify_kernel_event *kevent) -{ - kfree(kevent->name); - kmem_cache_free(event_cachep, kevent); -} - -/* - * inotify_dev_event_dequeue - destroy an event on the given device - * - * Caller must hold dev->ev_mutex. - */ -static void inotify_dev_event_dequeue(struct inotify_device *dev) -{ - if (!list_empty(&dev->events)) { - struct inotify_kernel_event *kevent; - kevent = inotify_dev_get_event(dev); - remove_kevent(dev, kevent); - free_kevent(kevent); - } -} - -/* - * find_inode - resolve a user-given path to a specific inode - */ -static int find_inode(const char __user *dirname, struct path *path, - unsigned flags) -{ - int error; - - error = user_path_at(AT_FDCWD, dirname, flags, path); - if (error) - return error; - /* you can only watch an inode if you have read permissions on it */ - error = inode_permission(path->dentry->d_inode, MAY_READ); - if (error) - path_put(path); - return error; -} - -/* - * create_watch - creates a watch on the given device. - * - * Callers must hold dev->up_mutex. - */ -static int create_watch(struct inotify_device *dev, struct inode *inode, - u32 mask) -{ - struct inotify_user_watch *watch; - int ret; - - if (atomic_read(&dev->user->inotify_watches) >= - inotify_max_user_watches) - return -ENOSPC; - - watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); - if (unlikely(!watch)) - return -ENOMEM; - - /* save a reference to device and bump the count to make it official */ - get_inotify_dev(dev); - watch->dev = dev; - - atomic_inc(&dev->user->inotify_watches); - - inotify_init_watch(&watch->wdata); - ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask); - if (ret < 0) - free_inotify_user_watch(&watch->wdata); - - return ret; -} - -/* Device Interface */ - -static unsigned int inotify_poll(struct file *file, poll_table *wait) -{ - struct inotify_device *dev = file->private_data; - int ret = 0; - - poll_wait(file, &dev->wq, wait); - mutex_lock(&dev->ev_mutex); - if (!list_empty(&dev->events)) - ret = POLLIN | POLLRDNORM; - mutex_unlock(&dev->ev_mutex); - - return ret; -} - -static ssize_t inotify_read(struct file *file, char __user *buf, - size_t count, loff_t *pos) -{ - size_t event_size = sizeof (struct inotify_event); - struct inotify_device *dev; - char __user *start; - int ret; - DEFINE_WAIT(wait); - - start = buf; - dev = file->private_data; - - while (1) { - - prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); - - mutex_lock(&dev->ev_mutex); - if (!list_empty(&dev->events)) { - ret = 0; - break; - } - mutex_unlock(&dev->ev_mutex); - - if (file->f_flags & O_NONBLOCK) { - ret = -EAGAIN; - break; - } - - if (signal_pending(current)) { - ret = -EINTR; - break; - } - - schedule(); - } - - finish_wait(&dev->wq, &wait); - if (ret) - return ret; - - while (1) { - struct inotify_kernel_event *kevent; - - ret = buf - start; - if (list_empty(&dev->events)) - break; - - kevent = inotify_dev_get_event(dev); - if (event_size + kevent->event.len > count) { - if (ret == 0 && count > 0) { - /* - * could not get a single event because we - * didn't have enough buffer space. - */ - ret = -EINVAL; - } - break; - } - remove_kevent(dev, kevent); - - /* - * Must perform the copy_to_user outside the mutex in order - * to avoid a lock order reversal with mmap_sem. - */ - mutex_unlock(&dev->ev_mutex); - - if (copy_to_user(buf, &kevent->event, event_size)) { - ret = -EFAULT; - break; - } - buf += event_size; - count -= event_size; - - if (kevent->name) { - if (copy_to_user(buf, kevent->name, kevent->event.len)){ - ret = -EFAULT; - break; - } - buf += kevent->event.len; - count -= kevent->event.len; - } - - free_kevent(kevent); - - mutex_lock(&dev->ev_mutex); - } - mutex_unlock(&dev->ev_mutex); - - return ret; -} - -static int inotify_fasync(int fd, struct file *file, int on) -{ - struct inotify_device *dev = file->private_data; - - return fasync_helper(fd, file, on, &dev->fa) >= 0 ? 0 : -EIO; -} - -static int inotify_release(struct inode *ignored, struct file *file) -{ - struct inotify_device *dev = file->private_data; - - inotify_destroy(dev->ih); - - /* destroy all of the events on this device */ - mutex_lock(&dev->ev_mutex); - while (!list_empty(&dev->events)) - inotify_dev_event_dequeue(dev); - mutex_unlock(&dev->ev_mutex); - - /* free this device: the put matching the get in inotify_init() */ - put_inotify_dev(dev); - - return 0; -} - -static long inotify_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct inotify_device *dev; - void __user *p; - int ret = -ENOTTY; - - dev = file->private_data; - p = (void __user *) arg; - - switch (cmd) { - case FIONREAD: - ret = put_user(dev->queue_size, (int __user *) p); - break; - } - - return ret; -} - -static const struct file_operations inotify_fops = { - .poll = inotify_poll, - .read = inotify_read, - .fasync = inotify_fasync, - .release = inotify_release, - .unlocked_ioctl = inotify_ioctl, - .compat_ioctl = inotify_ioctl, -}; - -static const struct inotify_operations inotify_user_ops = { - .handle_event = inotify_dev_queue_event, - .destroy_watch = free_inotify_user_watch, -}; - -asmlinkage long sys_inotify_init1(int flags) -{ - struct inotify_device *dev; - struct inotify_handle *ih; - struct user_struct *user; - struct file *filp; - int fd, ret; - - /* Check the IN_* constants for consistency. */ - BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); - BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); - - if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) - return -EINVAL; - - fd = get_unused_fd_flags(flags & O_CLOEXEC); - if (fd < 0) - return fd; - - filp = get_empty_filp(); - if (!filp) { - ret = -ENFILE; - goto out_put_fd; - } - - user = get_current_user(); - if (unlikely(atomic_read(&user->inotify_devs) >= - inotify_max_user_instances)) { - ret = -EMFILE; - goto out_free_uid; - } - - dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); - if (unlikely(!dev)) { - ret = -ENOMEM; - goto out_free_uid; - } - - ih = inotify_init(&inotify_user_ops); - if (IS_ERR(ih)) { - ret = PTR_ERR(ih); - goto out_free_dev; - } - dev->ih = ih; - dev->fa = NULL; - - filp->f_op = &inotify_fops; - filp->f_path.mnt = mntget(inotify_mnt); - filp->f_path.dentry = dget(inotify_mnt->mnt_root); - filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; - filp->f_mode = FMODE_READ; - filp->f_flags = O_RDONLY | (flags & O_NONBLOCK); - filp->private_data = dev; - - INIT_LIST_HEAD(&dev->events); - init_waitqueue_head(&dev->wq); - mutex_init(&dev->ev_mutex); - mutex_init(&dev->up_mutex); - dev->event_count = 0; - dev->queue_size = 0; - dev->max_events = inotify_max_queued_events; - dev->user = user; - atomic_set(&dev->count, 0); - - get_inotify_dev(dev); - atomic_inc(&user->inotify_devs); - fd_install(fd, filp); - - return fd; -out_free_dev: - kfree(dev); -out_free_uid: - free_uid(user); - put_filp(filp); -out_put_fd: - put_unused_fd(fd); - return ret; -} - -asmlinkage long sys_inotify_init(void) -{ - return sys_inotify_init1(0); -} - -asmlinkage long sys_inotify_add_watch(int fd, const char __user *pathname, u32 mask) -{ - struct inode *inode; - struct inotify_device *dev; - struct path path; - struct file *filp; - int ret, fput_needed; - unsigned flags = 0; - - filp = fget_light(fd, &fput_needed); - if (unlikely(!filp)) - return -EBADF; - - /* verify that this is indeed an inotify instance */ - if (unlikely(filp->f_op != &inotify_fops)) { - ret = -EINVAL; - goto fput_and_out; - } - - if (!(mask & IN_DONT_FOLLOW)) - flags |= LOOKUP_FOLLOW; - if (mask & IN_ONLYDIR) - flags |= LOOKUP_DIRECTORY; - - ret = find_inode(pathname, &path, flags); - if (unlikely(ret)) - goto fput_and_out; - - /* inode held in place by reference to path; dev by fget on fd */ - inode = path.dentry->d_inode; - dev = filp->private_data; - - mutex_lock(&dev->up_mutex); - ret = inotify_find_update_watch(dev->ih, inode, mask); - if (ret == -ENOENT) - ret = create_watch(dev, inode, mask); - mutex_unlock(&dev->up_mutex); - - path_put(&path); -fput_and_out: - fput_light(filp, fput_needed); - return ret; -} - -asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) -{ - struct file *filp; - struct inotify_device *dev; - int ret, fput_needed; - - filp = fget_light(fd, &fput_needed); - if (unlikely(!filp)) - return -EBADF; - - /* verify that this is indeed an inotify instance */ - if (unlikely(filp->f_op != &inotify_fops)) { - ret = -EINVAL; - goto out; - } - - dev = filp->private_data; - - /* we free our watch data when we get IN_IGNORED */ - ret = inotify_rm_wd(dev->ih, wd); - -out: - fput_light(filp, fput_needed); - return ret; -} - -static int -inotify_get_sb(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data, struct vfsmount *mnt) -{ - return get_sb_pseudo(fs_type, "inotify", NULL, - INOTIFYFS_SUPER_MAGIC, mnt); -} - -static struct file_system_type inotify_fs_type = { - .name = "inotifyfs", - .get_sb = inotify_get_sb, - .kill_sb = kill_anon_super, -}; - -/* - * inotify_user_setup - Our initialization function. Note that we cannnot return - * error because we have compiled-in VFS hooks. So an (unlikely) failure here - * must result in panic(). - */ -static int __init inotify_user_setup(void) -{ - int ret; - - ret = register_filesystem(&inotify_fs_type); - if (unlikely(ret)) - panic("inotify: register_filesystem returned %d!\n", ret); - - inotify_mnt = kern_mount(&inotify_fs_type); - if (IS_ERR(inotify_mnt)) - panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); - - inotify_max_queued_events = 16384; - inotify_max_user_instances = 128; - inotify_max_user_watches = 8192; - - watch_cachep = kmem_cache_create("inotify_watch_cache", - sizeof(struct inotify_user_watch), - 0, SLAB_PANIC, NULL); - event_cachep = kmem_cache_create("inotify_event_cache", - sizeof(struct inotify_kernel_event), - 0, SLAB_PANIC, NULL); - - return 0; -} - -module_init(inotify_user_setup); diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig new file mode 100644 index 00000000000..50914d7303c --- /dev/null +++ b/fs/notify/Kconfig @@ -0,0 +1,2 @@ +source "fs/notify/dnotify/Kconfig" +source "fs/notify/inotify/Kconfig" diff --git a/fs/notify/Makefile b/fs/notify/Makefile new file mode 100644 index 00000000000..5a95b6010ce --- /dev/null +++ b/fs/notify/Makefile @@ -0,0 +1,2 @@ +obj-y += dnotify/ +obj-y += inotify/ diff --git a/fs/notify/dnotify/Kconfig b/fs/notify/dnotify/Kconfig new file mode 100644 index 00000000000..26adf5dfa64 --- /dev/null +++ b/fs/notify/dnotify/Kconfig @@ -0,0 +1,10 @@ +config DNOTIFY + bool "Dnotify support" + default y + help + Dnotify is a directory-based per-fd file change notification system + that uses signals to communicate events to user-space. There exist + superior alternatives, but some applications may still rely on + dnotify. + + If unsure, say Y. diff --git a/fs/notify/dnotify/Makefile b/fs/notify/dnotify/Makefile new file mode 100644 index 00000000000..f145251dcad --- /dev/null +++ b/fs/notify/dnotify/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_DNOTIFY) += dnotify.o diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c new file mode 100644 index 00000000000..b0aa2cde80b --- /dev/null +++ b/fs/notify/dnotify/dnotify.c @@ -0,0 +1,191 @@ +/* + * Directory notifications for Linux. + * + * Copyright (C) 2000,2001,2002 Stephen Rothwell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +int dir_notify_enable __read_mostly = 1; + +static struct kmem_cache *dn_cache __read_mostly; + +static void redo_inode_mask(struct inode *inode) +{ + unsigned long new_mask; + struct dnotify_struct *dn; + + new_mask = 0; + for (dn = inode->i_dnotify; dn != NULL; dn = dn->dn_next) + new_mask |= dn->dn_mask & ~DN_MULTISHOT; + inode->i_dnotify_mask = new_mask; +} + +void dnotify_flush(struct file *filp, fl_owner_t id) +{ + struct dnotify_struct *dn; + struct dnotify_struct **prev; + struct inode *inode; + + inode = filp->f_path.dentry->d_inode; + if (!S_ISDIR(inode->i_mode)) + return; + spin_lock(&inode->i_lock); + prev = &inode->i_dnotify; + while ((dn = *prev) != NULL) { + if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { + *prev = dn->dn_next; + redo_inode_mask(inode); + kmem_cache_free(dn_cache, dn); + break; + } + prev = &dn->dn_next; + } + spin_unlock(&inode->i_lock); +} + +int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) +{ + struct dnotify_struct *dn; + struct dnotify_struct *odn; + struct dnotify_struct **prev; + struct inode *inode; + fl_owner_t id = current->files; + struct file *f; + int error = 0; + + if ((arg & ~DN_MULTISHOT) == 0) { + dnotify_flush(filp, id); + return 0; + } + if (!dir_notify_enable) + return -EINVAL; + inode = filp->f_path.dentry->d_inode; + if (!S_ISDIR(inode->i_mode)) + return -ENOTDIR; + dn = kmem_cache_alloc(dn_cache, GFP_KERNEL); + if (dn == NULL) + return -ENOMEM; + spin_lock(&inode->i_lock); + prev = &inode->i_dnotify; + while ((odn = *prev) != NULL) { + if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { + odn->dn_fd = fd; + odn->dn_mask |= arg; + inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; + goto out_free; + } + prev = &odn->dn_next; + } + + rcu_read_lock(); + f = fcheck(fd); + rcu_read_unlock(); + /* we'd lost the race with close(), sod off silently */ + /* note that inode->i_lock prevents reordering problems + * between accesses to descriptor table and ->i_dnotify */ + if (f != filp) + goto out_free; + + error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); + if (error) + goto out_free; + + dn->dn_mask = arg; + dn->dn_fd = fd; + dn->dn_filp = filp; + dn->dn_owner = id; + inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; + dn->dn_next = inode->i_dnotify; + inode->i_dnotify = dn; + spin_unlock(&inode->i_lock); + return 0; + +out_free: + spin_unlock(&inode->i_lock); + kmem_cache_free(dn_cache, dn); + return error; +} + +void __inode_dir_notify(struct inode *inode, unsigned long event) +{ + struct dnotify_struct * dn; + struct dnotify_struct **prev; + struct fown_struct * fown; + int changed = 0; + + spin_lock(&inode->i_lock); + prev = &inode->i_dnotify; + while ((dn = *prev) != NULL) { + if ((dn->dn_mask & event) == 0) { + prev = &dn->dn_next; + continue; + } + fown = &dn->dn_filp->f_owner; + send_sigio(fown, dn->dn_fd, POLL_MSG); + if (dn->dn_mask & DN_MULTISHOT) + prev = &dn->dn_next; + else { + *prev = dn->dn_next; + changed = 1; + kmem_cache_free(dn_cache, dn); + } + } + if (changed) + redo_inode_mask(inode); + spin_unlock(&inode->i_lock); +} + +EXPORT_SYMBOL(__inode_dir_notify); + +/* + * This is hopelessly wrong, but unfixable without API changes. At + * least it doesn't oops the kernel... + * + * To safely access ->d_parent we need to keep d_move away from it. Use the + * dentry's d_lock for this. + */ +void dnotify_parent(struct dentry *dentry, unsigned long event) +{ + struct dentry *parent; + + if (!dir_notify_enable) + return; + + spin_lock(&dentry->d_lock); + parent = dentry->d_parent; + if (parent->d_inode->i_dnotify_mask & event) { + dget(parent); + spin_unlock(&dentry->d_lock); + __inode_dir_notify(parent->d_inode, event); + dput(parent); + } else { + spin_unlock(&dentry->d_lock); + } +} +EXPORT_SYMBOL_GPL(dnotify_parent); + +static int __init dnotify_init(void) +{ + dn_cache = kmem_cache_create("dnotify_cache", + sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL); + return 0; +} + +module_init(dnotify_init) diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig new file mode 100644 index 00000000000..44679284102 --- /dev/null +++ b/fs/notify/inotify/Kconfig @@ -0,0 +1,27 @@ +config INOTIFY + bool "Inotify file change notification support" + default y + ---help--- + Say Y here to enable inotify support. Inotify is a file change + notification system and a replacement for dnotify. Inotify fixes + numerous shortcomings in dnotify and introduces several new features + including multiple file events, one-shot support, and unmount + notification. + + For more information, see + + If unsure, say Y. + +config INOTIFY_USER + bool "Inotify support for userspace" + depends on INOTIFY + default y + ---help--- + Say Y here to enable inotify support for userspace, including the + associated system calls. Inotify allows monitoring of both files and + directories via a single open fd. Events are read from the file + descriptor, which is also select()- and poll()-able. + + For more information, see + + If unsure, say Y. diff --git a/fs/notify/inotify/Makefile b/fs/notify/inotify/Makefile new file mode 100644 index 00000000000..e290f3bb9d8 --- /dev/null +++ b/fs/notify/inotify/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_INOTIFY) += inotify.o +obj-$(CONFIG_INOTIFY_USER) += inotify_user.o diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c new file mode 100644 index 00000000000..dae3f28f30d --- /dev/null +++ b/fs/notify/inotify/inotify.c @@ -0,0 +1,913 @@ +/* + * fs/inotify.c - inode-based file event notifications + * + * Authors: + * John McCutchan + * Robert Love + * + * Kernel API added by: Amy Griffis + * + * Copyright (C) 2005 John McCutchan + * Copyright 2006 Hewlett-Packard Development Company, L.P. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static atomic_t inotify_cookie; + +/* + * Lock ordering: + * + * dentry->d_lock (used to keep d_move() away from dentry->d_parent) + * iprune_mutex (synchronize shrink_icache_memory()) + * inode_lock (protects the super_block->s_inodes list) + * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) + * inotify_handle->mutex (protects inotify_handle and watches->h_list) + * + * The inode->inotify_mutex and inotify_handle->mutex and held during execution + * of a caller's event handler. Thus, the caller must not hold any locks + * taken in their event handler while calling any of the published inotify + * interfaces. + */ + +/* + * Lifetimes of the three main data structures--inotify_handle, inode, and + * inotify_watch--are managed by reference count. + * + * inotify_handle: Lifetime is from inotify_init() to inotify_destroy(). + * Additional references can bump the count via get_inotify_handle() and drop + * the count via put_inotify_handle(). + * + * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch() + * to remove_watch_no_event(). Additional references can bump the count via + * get_inotify_watch() and drop the count via put_inotify_watch(). The caller + * is reponsible for the final put after receiving IN_IGNORED, or when using + * IN_ONESHOT after receiving the first event. Inotify does the final put if + * inotify_destroy() is called. + * + * inode: Pinned so long as the inode is associated with a watch, from + * inotify_add_watch() to the final put_inotify_watch(). + */ + +/* + * struct inotify_handle - represents an inotify instance + * + * This structure is protected by the mutex 'mutex'. + */ +struct inotify_handle { + struct idr idr; /* idr mapping wd -> watch */ + struct mutex mutex; /* protects this bad boy */ + struct list_head watches; /* list of watches */ + atomic_t count; /* reference count */ + u32 last_wd; /* the last wd allocated */ + const struct inotify_operations *in_ops; /* inotify caller operations */ +}; + +static inline void get_inotify_handle(struct inotify_handle *ih) +{ + atomic_inc(&ih->count); +} + +static inline void put_inotify_handle(struct inotify_handle *ih) +{ + if (atomic_dec_and_test(&ih->count)) { + idr_destroy(&ih->idr); + kfree(ih); + } +} + +/** + * get_inotify_watch - grab a reference to an inotify_watch + * @watch: watch to grab + */ +void get_inotify_watch(struct inotify_watch *watch) +{ + atomic_inc(&watch->count); +} +EXPORT_SYMBOL_GPL(get_inotify_watch); + +int pin_inotify_watch(struct inotify_watch *watch) +{ + struct super_block *sb = watch->inode->i_sb; + spin_lock(&sb_lock); + if (sb->s_count >= S_BIAS) { + atomic_inc(&sb->s_active); + spin_unlock(&sb_lock); + atomic_inc(&watch->count); + return 1; + } + spin_unlock(&sb_lock); + return 0; +} + +/** + * put_inotify_watch - decrements the ref count on a given watch. cleans up + * watch references if the count reaches zero. inotify_watch is freed by + * inotify callers via the destroy_watch() op. + * @watch: watch to release + */ +void put_inotify_watch(struct inotify_watch *watch) +{ + if (atomic_dec_and_test(&watch->count)) { + struct inotify_handle *ih = watch->ih; + + iput(watch->inode); + ih->in_ops->destroy_watch(watch); + put_inotify_handle(ih); + } +} +EXPORT_SYMBOL_GPL(put_inotify_watch); + +void unpin_inotify_watch(struct inotify_watch *watch) +{ + struct super_block *sb = watch->inode->i_sb; + put_inotify_watch(watch); + deactivate_super(sb); +} + +/* + * inotify_handle_get_wd - returns the next WD for use by the given handle + * + * Callers must hold ih->mutex. This function can sleep. + */ +static int inotify_handle_get_wd(struct inotify_handle *ih, + struct inotify_watch *watch) +{ + int ret; + + do { + if (unlikely(!idr_pre_get(&ih->idr, GFP_KERNEL))) + return -ENOSPC; + ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd); + } while (ret == -EAGAIN); + + if (likely(!ret)) + ih->last_wd = watch->wd; + + return ret; +} + +/* + * inotify_inode_watched - returns nonzero if there are watches on this inode + * and zero otherwise. We call this lockless, we do not care if we race. + */ +static inline int inotify_inode_watched(struct inode *inode) +{ + return !list_empty(&inode->inotify_watches); +} + +/* + * Get child dentry flag into synch with parent inode. + * Flag should always be clear for negative dentrys. + */ +static void set_dentry_child_flags(struct inode *inode, int watched) +{ + struct dentry *alias; + + spin_lock(&dcache_lock); + list_for_each_entry(alias, &inode->i_dentry, d_alias) { + struct dentry *child; + + list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { + if (!child->d_inode) + continue; + + spin_lock(&child->d_lock); + if (watched) + child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; + else + child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED; + spin_unlock(&child->d_lock); + } + } + spin_unlock(&dcache_lock); +} + +/* + * inotify_find_handle - find the watch associated with the given inode and + * handle + * + * Callers must hold inode->inotify_mutex. + */ +static struct inotify_watch *inode_find_handle(struct inode *inode, + struct inotify_handle *ih) +{ + struct inotify_watch *watch; + + list_for_each_entry(watch, &inode->inotify_watches, i_list) { + if (watch->ih == ih) + return watch; + } + + return NULL; +} + +/* + * remove_watch_no_event - remove watch without the IN_IGNORED event. + * + * Callers must hold both inode->inotify_mutex and ih->mutex. + */ +static void remove_watch_no_event(struct inotify_watch *watch, + struct inotify_handle *ih) +{ + list_del(&watch->i_list); + list_del(&watch->h_list); + + if (!inotify_inode_watched(watch->inode)) + set_dentry_child_flags(watch->inode, 0); + + idr_remove(&ih->idr, watch->wd); +} + +/** + * inotify_remove_watch_locked - Remove a watch from both the handle and the + * inode. Sends the IN_IGNORED event signifying that the inode is no longer + * watched. May be invoked from a caller's event handler. + * @ih: inotify handle associated with watch + * @watch: watch to remove + * + * Callers must hold both inode->inotify_mutex and ih->mutex. + */ +void inotify_remove_watch_locked(struct inotify_handle *ih, + struct inotify_watch *watch) +{ + remove_watch_no_event(watch, ih); + ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL); +} +EXPORT_SYMBOL_GPL(inotify_remove_watch_locked); + +/* Kernel API for producing events */ + +/* + * inotify_d_instantiate - instantiate dcache entry for inode + */ +void inotify_d_instantiate(struct dentry *entry, struct inode *inode) +{ + struct dentry *parent; + + if (!inode) + return; + + spin_lock(&entry->d_lock); + parent = entry->d_parent; + if (parent->d_inode && inotify_inode_watched(parent->d_inode)) + entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; + spin_unlock(&entry->d_lock); +} + +/* + * inotify_d_move - dcache entry has been moved + */ +void inotify_d_move(struct dentry *entry) +{ + struct dentry *parent; + + parent = entry->d_parent; + if (inotify_inode_watched(parent->d_inode)) + entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED; + else + entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED; +} + +/** + * inotify_inode_queue_event - queue an event to all watches on this inode + * @inode: inode event is originating from + * @mask: event mask describing this event + * @cookie: cookie for synchronization, or zero + * @name: filename, if any + * @n_inode: inode associated with name + */ +void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, + const char *name, struct inode *n_inode) +{ + struct inotify_watch *watch, *next; + + if (!inotify_inode_watched(inode)) + return; + + mutex_lock(&inode->inotify_mutex); + list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { + u32 watch_mask = watch->mask; + if (watch_mask & mask) { + struct inotify_handle *ih= watch->ih; + mutex_lock(&ih->mutex); + if (watch_mask & IN_ONESHOT) + remove_watch_no_event(watch, ih); + ih->in_ops->handle_event(watch, watch->wd, mask, cookie, + name, n_inode); + mutex_unlock(&ih->mutex); + } + } + mutex_unlock(&inode->inotify_mutex); +} +EXPORT_SYMBOL_GPL(inotify_inode_queue_event); + +/** + * inotify_dentry_parent_queue_event - queue an event to a dentry's parent + * @dentry: the dentry in question, we queue against this dentry's parent + * @mask: event mask describing this event + * @cookie: cookie for synchronization, or zero + * @name: filename, if any + */ +void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask, + u32 cookie, const char *name) +{ + struct dentry *parent; + struct inode *inode; + + if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED)) + return; + + spin_lock(&dentry->d_lock); + parent = dentry->d_parent; + inode = parent->d_inode; + + if (inotify_inode_watched(inode)) { + dget(parent); + spin_unlock(&dentry->d_lock); + inotify_inode_queue_event(inode, mask, cookie, name, + dentry->d_inode); + dput(parent); + } else + spin_unlock(&dentry->d_lock); +} +EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event); + +/** + * inotify_get_cookie - return a unique cookie for use in synchronizing events. + */ +u32 inotify_get_cookie(void) +{ + return atomic_inc_return(&inotify_cookie); +} +EXPORT_SYMBOL_GPL(inotify_get_cookie); + +/** + * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes. + * @list: list of inodes being unmounted (sb->s_inodes) + * + * Called with inode_lock held, protecting the unmounting super block's list + * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. + * We temporarily drop inode_lock, however, and CAN block. + */ +void inotify_unmount_inodes(struct list_head *list) +{ + struct inode *inode, *next_i, *need_iput = NULL; + + list_for_each_entry_safe(inode, next_i, list, i_sb_list) { + struct inotify_watch *watch, *next_w; + struct inode *need_iput_tmp; + struct list_head *watches; + + /* + * If i_count is zero, the inode cannot have any watches and + * doing an __iget/iput with MS_ACTIVE clear would actually + * evict all inodes with zero i_count from icache which is + * unnecessarily violent and may in fact be illegal to do. + */ + if (!atomic_read(&inode->i_count)) + continue; + + /* + * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or + * I_WILL_FREE which is fine because by that point the inode + * cannot have any associated watches. + */ + if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE)) + continue; + + need_iput_tmp = need_iput; + need_iput = NULL; + /* In case inotify_remove_watch_locked() drops a reference. */ + if (inode != need_iput_tmp) + __iget(inode); + else + need_iput_tmp = NULL; + /* In case the dropping of a reference would nuke next_i. */ + if ((&next_i->i_sb_list != list) && + atomic_read(&next_i->i_count) && + !(next_i->i_state & (I_CLEAR | I_FREEING | + I_WILL_FREE))) { + __iget(next_i); + need_iput = next_i; + } + + /* + * We can safely drop inode_lock here because we hold + * references on both inode and next_i. Also no new inodes + * will be added since the umount has begun. Finally, + * iprune_mutex keeps shrink_icache_memory() away. + */ + spin_unlock(&inode_lock); + + if (need_iput_tmp) + iput(need_iput_tmp); + + /* for each watch, send IN_UNMOUNT and then remove it */ + mutex_lock(&inode->inotify_mutex); + watches = &inode->inotify_watches; + list_for_each_entry_safe(watch, next_w, watches, i_list) { + struct inotify_handle *ih= watch->ih; + get_inotify_watch(watch); + mutex_lock(&ih->mutex); + ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0, + NULL, NULL); + inotify_remove_watch_locked(ih, watch); + mutex_unlock(&ih->mutex); + put_inotify_watch(watch); + } + mutex_unlock(&inode->inotify_mutex); + iput(inode); + + spin_lock(&inode_lock); + } +} +EXPORT_SYMBOL_GPL(inotify_unmount_inodes); + +/** + * inotify_inode_is_dead - an inode has been deleted, cleanup any watches + * @inode: inode that is about to be removed + */ +void inotify_inode_is_dead(struct inode *inode) +{ + struct inotify_watch *watch, *next; + + mutex_lock(&inode->inotify_mutex); + list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { + struct inotify_handle *ih = watch->ih; + mutex_lock(&ih->mutex); + inotify_remove_watch_locked(ih, watch); + mutex_unlock(&ih->mutex); + } + mutex_unlock(&inode->inotify_mutex); +} +EXPORT_SYMBOL_GPL(inotify_inode_is_dead); + +/* Kernel Consumer API */ + +/** + * inotify_init - allocate and initialize an inotify instance + * @ops: caller's inotify operations + */ +struct inotify_handle *inotify_init(const struct inotify_operations *ops) +{ + struct inotify_handle *ih; + + ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL); + if (unlikely(!ih)) + return ERR_PTR(-ENOMEM); + + idr_init(&ih->idr); + INIT_LIST_HEAD(&ih->watches); + mutex_init(&ih->mutex); + ih->last_wd = 0; + ih->in_ops = ops; + atomic_set(&ih->count, 0); + get_inotify_handle(ih); + + return ih; +} +EXPORT_SYMBOL_GPL(inotify_init); + +/** + * inotify_init_watch - initialize an inotify watch + * @watch: watch to initialize + */ +void inotify_init_watch(struct inotify_watch *watch) +{ + INIT_LIST_HEAD(&watch->h_list); + INIT_LIST_HEAD(&watch->i_list); + atomic_set(&watch->count, 0); + get_inotify_watch(watch); /* initial get */ +} +EXPORT_SYMBOL_GPL(inotify_init_watch); + +/* + * Watch removals suck violently. To kick the watch out we need (in this + * order) inode->inotify_mutex and ih->mutex. That's fine if we have + * a hold on inode; however, for all other cases we need to make damn sure + * we don't race with umount. We can *NOT* just grab a reference to a + * watch - inotify_unmount_inodes() will happily sail past it and we'll end + * with reference to inode potentially outliving its superblock. Ideally + * we just want to grab an active reference to superblock if we can; that + * will make sure we won't go into inotify_umount_inodes() until we are + * done. Cleanup is just deactivate_super(). However, that leaves a messy + * case - what if we *are* racing with umount() and active references to + * superblock can't be acquired anymore? We can bump ->s_count, grab + * ->s_umount, which will almost certainly wait until the superblock is shut + * down and the watch in question is pining for fjords. That's fine, but + * there is a problem - we might have hit the window between ->s_active + * getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock + * is past the point of no return and is heading for shutdown) and the + * moment when deactivate_super() acquires ->s_umount. We could just do + * drop_super() yield() and retry, but that's rather antisocial and this + * stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having + * found that we'd got there first (i.e. that ->s_root is non-NULL) we know + * that we won't race with inotify_umount_inodes(). So we could grab a + * reference to watch and do the rest as above, just with drop_super() instead + * of deactivate_super(), right? Wrong. We had to drop ih->mutex before we + * could grab ->s_umount. So the watch could've been gone already. + * + * That still can be dealt with - we need to save watch->wd, do idr_find() + * and compare its result with our pointer. If they match, we either have + * the damn thing still alive or we'd lost not one but two races at once, + * the watch had been killed and a new one got created with the same ->wd + * at the same address. That couldn't have happened in inotify_destroy(), + * but inotify_rm_wd() could run into that. Still, "new one got created" + * is not a problem - we have every right to kill it or leave it alone, + * whatever's more convenient. + * + * So we can use idr_find(...) == watch && watch->inode->i_sb == sb as + * "grab it and kill it" check. If it's been our original watch, we are + * fine, if it's a newcomer - nevermind, just pretend that we'd won the + * race and kill the fscker anyway; we are safe since we know that its + * superblock won't be going away. + * + * And yes, this is far beyond mere "not very pretty"; so's the entire + * concept of inotify to start with. + */ + +/** + * pin_to_kill - pin the watch down for removal + * @ih: inotify handle + * @watch: watch to kill + * + * Called with ih->mutex held, drops it. Possible return values: + * 0 - nothing to do, it has died + * 1 - remove it, drop the reference and deactivate_super() + * 2 - remove it, drop the reference and drop_super(); we tried hard to avoid + * that variant, since it involved a lot of PITA, but that's the best that + * could've been done. + */ +static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch) +{ + struct super_block *sb = watch->inode->i_sb; + s32 wd = watch->wd; + + spin_lock(&sb_lock); + if (sb->s_count >= S_BIAS) { + atomic_inc(&sb->s_active); + spin_unlock(&sb_lock); + get_inotify_watch(watch); + mutex_unlock(&ih->mutex); + return 1; /* the best outcome */ + } + sb->s_count++; + spin_unlock(&sb_lock); + mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */ + down_read(&sb->s_umount); + if (likely(!sb->s_root)) { + /* fs is already shut down; the watch is dead */ + drop_super(sb); + return 0; + } + /* raced with the final deactivate_super() */ + mutex_lock(&ih->mutex); + if (idr_find(&ih->idr, wd) != watch || watch->inode->i_sb != sb) { + /* the watch is dead */ + mutex_unlock(&ih->mutex); + drop_super(sb); + return 0; + } + /* still alive or freed and reused with the same sb and wd; kill */ + get_inotify_watch(watch); + mutex_unlock(&ih->mutex); + return 2; +} + +static void unpin_and_kill(struct inotify_watch *watch, int how) +{ + struct super_block *sb = watch->inode->i_sb; + put_inotify_watch(watch); + switch (how) { + case 1: + deactivate_super(sb); + break; + case 2: + drop_super(sb); + } +} + +/** + * inotify_destroy - clean up and destroy an inotify instance + * @ih: inotify handle + */ +void inotify_destroy(struct inotify_handle *ih) +{ + /* + * Destroy all of the watches for this handle. Unfortunately, not very + * pretty. We cannot do a simple iteration over the list, because we + * do not know the inode until we iterate to the watch. But we need to + * hold inode->inotify_mutex before ih->mutex. The following works. + * + * AV: it had to become even uglier to start working ;-/ + */ + while (1) { + struct inotify_watch *watch; + struct list_head *watches; + struct super_block *sb; + struct inode *inode; + int how; + + mutex_lock(&ih->mutex); + watches = &ih->watches; + if (list_empty(watches)) { + mutex_unlock(&ih->mutex); + break; + } + watch = list_first_entry(watches, struct inotify_watch, h_list); + sb = watch->inode->i_sb; + how = pin_to_kill(ih, watch); + if (!how) + continue; + + inode = watch->inode; + mutex_lock(&inode->inotify_mutex); + mutex_lock(&ih->mutex); + + /* make sure we didn't race with another list removal */ + if (likely(idr_find(&ih->idr, watch->wd))) { + remove_watch_no_event(watch, ih); + put_inotify_watch(watch); + } + + mutex_unlock(&ih->mutex); + mutex_unlock(&inode->inotify_mutex); + unpin_and_kill(watch, how); + } + + /* free this handle: the put matching the get in inotify_init() */ + put_inotify_handle(ih); +} +EXPORT_SYMBOL_GPL(inotify_destroy); + +/** + * inotify_find_watch - find an existing watch for an (ih,inode) pair + * @ih: inotify handle + * @inode: inode to watch + * @watchp: pointer to existing inotify_watch + * + * Caller must pin given inode (via nameidata). + */ +s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode, + struct inotify_watch **watchp) +{ + struct inotify_watch *old; + int ret = -ENOENT; + + mutex_lock(&inode->inotify_mutex); + mutex_lock(&ih->mutex); + + old = inode_find_handle(inode, ih); + if (unlikely(old)) { + get_inotify_watch(old); /* caller must put watch */ + *watchp = old; + ret = old->wd; + } + + mutex_unlock(&ih->mutex); + mutex_unlock(&inode->inotify_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(inotify_find_watch); + +/** + * inotify_find_update_watch - find and update the mask of an existing watch + * @ih: inotify handle + * @inode: inode's watch to update + * @mask: mask of events to watch + * + * Caller must pin given inode (via nameidata). + */ +s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode, + u32 mask) +{ + struct inotify_watch *old; + int mask_add = 0; + int ret; + + if (mask & IN_MASK_ADD) + mask_add = 1; + + /* don't allow invalid bits: we don't want flags set */ + mask &= IN_ALL_EVENTS | IN_ONESHOT; + if (unlikely(!mask)) + return -EINVAL; + + mutex_lock(&inode->inotify_mutex); + mutex_lock(&ih->mutex); + + /* + * Handle the case of re-adding a watch on an (inode,ih) pair that we + * are already watching. We just update the mask and return its wd. + */ + old = inode_find_handle(inode, ih); + if (unlikely(!old)) { + ret = -ENOENT; + goto out; + } + + if (mask_add) + old->mask |= mask; + else + old->mask = mask; + ret = old->wd; +out: + mutex_unlock(&ih->mutex); + mutex_unlock(&inode->inotify_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(inotify_find_update_watch); + +/** + * inotify_add_watch - add a watch to an inotify instance + * @ih: inotify handle + * @watch: caller allocated watch structure + * @inode: inode to watch + * @mask: mask of events to watch + * + * Caller must pin given inode (via nameidata). + * Caller must ensure it only calls inotify_add_watch() once per watch. + * Calls inotify_handle_get_wd() so may sleep. + */ +s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch, + struct inode *inode, u32 mask) +{ + int ret = 0; + int newly_watched; + + /* don't allow invalid bits: we don't want flags set */ + mask &= IN_ALL_EVENTS | IN_ONESHOT; + if (unlikely(!mask)) + return -EINVAL; + watch->mask = mask; + + mutex_lock(&inode->inotify_mutex); + mutex_lock(&ih->mutex); + + /* Initialize a new watch */ + ret = inotify_handle_get_wd(ih, watch); + if (unlikely(ret)) + goto out; + ret = watch->wd; + + /* save a reference to handle and bump the count to make it official */ + get_inotify_handle(ih); + watch->ih = ih; + + /* + * Save a reference to the inode and bump the ref count to make it + * official. We hold a reference to nameidata, which makes this safe. + */ + watch->inode = igrab(inode); + + /* Add the watch to the handle's and the inode's list */ + newly_watched = !inotify_inode_watched(inode); + list_add(&watch->h_list, &ih->watches); + list_add(&watch->i_list, &inode->inotify_watches); + /* + * Set child flags _after_ adding the watch, so there is no race + * windows where newly instantiated children could miss their parent's + * watched flag. + */ + if (newly_watched) + set_dentry_child_flags(inode, 1); + +out: + mutex_unlock(&ih->mutex); + mutex_unlock(&inode->inotify_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(inotify_add_watch); + +/** + * inotify_clone_watch - put the watch next to existing one + * @old: already installed watch + * @new: new watch + * + * Caller must hold the inotify_mutex of inode we are dealing with; + * it is expected to remove the old watch before unlocking the inode. + */ +s32 inotify_clone_watch(struct inotify_watch *old, struct inotify_watch *new) +{ + struct inotify_handle *ih = old->ih; + int ret = 0; + + new->mask = old->mask; + new->ih = ih; + + mutex_lock(&ih->mutex); + + /* Initialize a new watch */ + ret = inotify_handle_get_wd(ih, new); + if (unlikely(ret)) + goto out; + ret = new->wd; + + get_inotify_handle(ih); + + new->inode = igrab(old->inode); + + list_add(&new->h_list, &ih->watches); + list_add(&new->i_list, &old->inode->inotify_watches); +out: + mutex_unlock(&ih->mutex); + return ret; +} + +void inotify_evict_watch(struct inotify_watch *watch) +{ + get_inotify_watch(watch); + mutex_lock(&watch->ih->mutex); + inotify_remove_watch_locked(watch->ih, watch); + mutex_unlock(&watch->ih->mutex); +} + +/** + * inotify_rm_wd - remove a watch from an inotify instance + * @ih: inotify handle + * @wd: watch descriptor to remove + * + * Can sleep. + */ +int inotify_rm_wd(struct inotify_handle *ih, u32 wd) +{ + struct inotify_watch *watch; + struct super_block *sb; + struct inode *inode; + int how; + + mutex_lock(&ih->mutex); + watch = idr_find(&ih->idr, wd); + if (unlikely(!watch)) { + mutex_unlock(&ih->mutex); + return -EINVAL; + } + sb = watch->inode->i_sb; + how = pin_to_kill(ih, watch); + if (!how) + return 0; + + inode = watch->inode; + + mutex_lock(&inode->inotify_mutex); + mutex_lock(&ih->mutex); + + /* make sure that we did not race */ + if (likely(idr_find(&ih->idr, wd) == watch)) + inotify_remove_watch_locked(ih, watch); + + mutex_unlock(&ih->mutex); + mutex_unlock(&inode->inotify_mutex); + unpin_and_kill(watch, how); + + return 0; +} +EXPORT_SYMBOL_GPL(inotify_rm_wd); + +/** + * inotify_rm_watch - remove a watch from an inotify instance + * @ih: inotify handle + * @watch: watch to remove + * + * Can sleep. + */ +int inotify_rm_watch(struct inotify_handle *ih, + struct inotify_watch *watch) +{ + return inotify_rm_wd(ih, watch->wd); +} +EXPORT_SYMBOL_GPL(inotify_rm_watch); + +/* + * inotify_setup - core initialization function + */ +static int __init inotify_setup(void) +{ + atomic_set(&inotify_cookie, 0); + + return 0; +} + +module_init(inotify_setup); diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c new file mode 100644 index 00000000000..400f8064a54 --- /dev/null +++ b/fs/notify/inotify/inotify_user.c @@ -0,0 +1,778 @@ +/* + * fs/inotify_user.c - inotify support for userspace + * + * Authors: + * John McCutchan + * Robert Love + * + * Copyright (C) 2005 John McCutchan + * Copyright 2006 Hewlett-Packard Development Company, L.P. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static struct kmem_cache *watch_cachep __read_mostly; +static struct kmem_cache *event_cachep __read_mostly; + +static struct vfsmount *inotify_mnt __read_mostly; + +/* these are configurable via /proc/sys/fs/inotify/ */ +static int inotify_max_user_instances __read_mostly; +static int inotify_max_user_watches __read_mostly; +static int inotify_max_queued_events __read_mostly; + +/* + * Lock ordering: + * + * inotify_dev->up_mutex (ensures we don't re-add the same watch) + * inode->inotify_mutex (protects inode's watch list) + * inotify_handle->mutex (protects inotify_handle's watch list) + * inotify_dev->ev_mutex (protects device's event queue) + */ + +/* + * Lifetimes of the main data structures: + * + * inotify_device: Lifetime is managed by reference count, from + * sys_inotify_init() until release. Additional references can bump the count + * via get_inotify_dev() and drop the count via put_inotify_dev(). + * + * inotify_user_watch: Lifetime is from create_watch() to the receipt of an + * IN_IGNORED event from inotify, or when using IN_ONESHOT, to receipt of the + * first event, or to inotify_destroy(). + */ + +/* + * struct inotify_device - represents an inotify instance + * + * This structure is protected by the mutex 'mutex'. + */ +struct inotify_device { + wait_queue_head_t wq; /* wait queue for i/o */ + struct mutex ev_mutex; /* protects event queue */ + struct mutex up_mutex; /* synchronizes watch updates */ + struct list_head events; /* list of queued events */ + struct user_struct *user; /* user who opened this dev */ + struct inotify_handle *ih; /* inotify handle */ + struct fasync_struct *fa; /* async notification */ + atomic_t count; /* reference count */ + unsigned int queue_size; /* size of the queue (bytes) */ + unsigned int event_count; /* number of pending events */ + unsigned int max_events; /* maximum number of events */ +}; + +/* + * struct inotify_kernel_event - An inotify event, originating from a watch and + * queued for user-space. A list of these is attached to each instance of the + * device. In read(), this list is walked and all events that can fit in the + * buffer are returned. + * + * Protected by dev->ev_mutex of the device in which we are queued. + */ +struct inotify_kernel_event { + struct inotify_event event; /* the user-space event */ + struct list_head list; /* entry in inotify_device's list */ + char *name; /* filename, if any */ +}; + +/* + * struct inotify_user_watch - our version of an inotify_watch, we add + * a reference to the associated inotify_device. + */ +struct inotify_user_watch { + struct inotify_device *dev; /* associated device */ + struct inotify_watch wdata; /* inotify watch data */ +}; + +#ifdef CONFIG_SYSCTL + +#include + +static int zero; + +ctl_table inotify_table[] = { + { + .ctl_name = INOTIFY_MAX_USER_INSTANCES, + .procname = "max_user_instances", + .data = &inotify_max_user_instances, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + }, + { + .ctl_name = INOTIFY_MAX_USER_WATCHES, + .procname = "max_user_watches", + .data = &inotify_max_user_watches, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero, + }, + { + .ctl_name = INOTIFY_MAX_QUEUED_EVENTS, + .procname = "max_queued_events", + .data = &inotify_max_queued_events, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &zero + }, + { .ctl_name = 0 } +}; +#endif /* CONFIG_SYSCTL */ + +static inline void get_inotify_dev(struct inotify_device *dev) +{ + atomic_inc(&dev->count); +} + +static inline void put_inotify_dev(struct inotify_device *dev) +{ + if (atomic_dec_and_test(&dev->count)) { + atomic_dec(&dev->user->inotify_devs); + free_uid(dev->user); + kfree(dev); + } +} + +/* + * free_inotify_user_watch - cleans up the watch and its references + */ +static void free_inotify_user_watch(struct inotify_watch *w) +{ + struct inotify_user_watch *watch; + struct inotify_device *dev; + + watch = container_of(w, struct inotify_user_watch, wdata); + dev = watch->dev; + + atomic_dec(&dev->user->inotify_watches); + put_inotify_dev(dev); + kmem_cache_free(watch_cachep, watch); +} + +/* + * kernel_event - create a new kernel event with the given parameters + * + * This function can sleep. + */ +static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, + const char *name) +{ + struct inotify_kernel_event *kevent; + + kevent = kmem_cache_alloc(event_cachep, GFP_NOFS); + if (unlikely(!kevent)) + return NULL; + + /* we hand this out to user-space, so zero it just in case */ + memset(&kevent->event, 0, sizeof(struct inotify_event)); + + kevent->event.wd = wd; + kevent->event.mask = mask; + kevent->event.cookie = cookie; + + INIT_LIST_HEAD(&kevent->list); + + if (name) { + size_t len, rem, event_size = sizeof(struct inotify_event); + + /* + * We need to pad the filename so as to properly align an + * array of inotify_event structures. Because the structure is + * small and the common case is a small filename, we just round + * up to the next multiple of the structure's sizeof. This is + * simple and safe for all architectures. + */ + len = strlen(name) + 1; + rem = event_size - len; + if (len > event_size) { + rem = event_size - (len % event_size); + if (len % event_size == 0) + rem = 0; + } + + kevent->name = kmalloc(len + rem, GFP_KERNEL); + if (unlikely(!kevent->name)) { + kmem_cache_free(event_cachep, kevent); + return NULL; + } + memcpy(kevent->name, name, len); + if (rem) + memset(kevent->name + len, 0, rem); + kevent->event.len = len + rem; + } else { + kevent->event.len = 0; + kevent->name = NULL; + } + + return kevent; +} + +/* + * inotify_dev_get_event - return the next event in the given dev's queue + * + * Caller must hold dev->ev_mutex. + */ +static inline struct inotify_kernel_event * +inotify_dev_get_event(struct inotify_device *dev) +{ + return list_entry(dev->events.next, struct inotify_kernel_event, list); +} + +/* + * inotify_dev_get_last_event - return the last event in the given dev's queue + * + * Caller must hold dev->ev_mutex. + */ +static inline struct inotify_kernel_event * +inotify_dev_get_last_event(struct inotify_device *dev) +{ + if (list_empty(&dev->events)) + return NULL; + return list_entry(dev->events.prev, struct inotify_kernel_event, list); +} + +/* + * inotify_dev_queue_event - event handler registered with core inotify, adds + * a new event to the given device + * + * Can sleep (calls kernel_event()). + */ +static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask, + u32 cookie, const char *name, + struct inode *ignored) +{ + struct inotify_user_watch *watch; + struct inotify_device *dev; + struct inotify_kernel_event *kevent, *last; + + watch = container_of(w, struct inotify_user_watch, wdata); + dev = watch->dev; + + mutex_lock(&dev->ev_mutex); + + /* we can safely put the watch as we don't reference it while + * generating the event + */ + if (mask & IN_IGNORED || w->mask & IN_ONESHOT) + put_inotify_watch(w); /* final put */ + + /* coalescing: drop this event if it is a dupe of the previous */ + last = inotify_dev_get_last_event(dev); + if (last && last->event.mask == mask && last->event.wd == wd && + last->event.cookie == cookie) { + const char *lastname = last->name; + + if (!name && !lastname) + goto out; + if (name && lastname && !strcmp(lastname, name)) + goto out; + } + + /* the queue overflowed and we already sent the Q_OVERFLOW event */ + if (unlikely(dev->event_count > dev->max_events)) + goto out; + + /* if the queue overflows, we need to notify user space */ + if (unlikely(dev->event_count == dev->max_events)) + kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL); + else + kevent = kernel_event(wd, mask, cookie, name); + + if (unlikely(!kevent)) + goto out; + + /* queue the event and wake up anyone waiting */ + dev->event_count++; + dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; + list_add_tail(&kevent->list, &dev->events); + wake_up_interruptible(&dev->wq); + kill_fasync(&dev->fa, SIGIO, POLL_IN); + +out: + mutex_unlock(&dev->ev_mutex); +} + +/* + * remove_kevent - cleans up the given kevent + * + * Caller must hold dev->ev_mutex. + */ +static void remove_kevent(struct inotify_device *dev, + struct inotify_kernel_event *kevent) +{ + list_del(&kevent->list); + + dev->event_count--; + dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; +} + +/* + * free_kevent - frees the given kevent. + */ +static void free_kevent(struct inotify_kernel_event *kevent) +{ + kfree(kevent->name); + kmem_cache_free(event_cachep, kevent); +} + +/* + * inotify_dev_event_dequeue - destroy an event on the given device + * + * Caller must hold dev->ev_mutex. + */ +static void inotify_dev_event_dequeue(struct inotify_device *dev) +{ + if (!list_empty(&dev->events)) { + struct inotify_kernel_event *kevent; + kevent = inotify_dev_get_event(dev); + remove_kevent(dev, kevent); + free_kevent(kevent); + } +} + +/* + * find_inode - resolve a user-given path to a specific inode + */ +static int find_inode(const char __user *dirname, struct path *path, + unsigned flags) +{ + int error; + + error = user_path_at(AT_FDCWD, dirname, flags, path); + if (error) + return error; + /* you can only watch an inode if you have read permissions on it */ + error = inode_permission(path->dentry->d_inode, MAY_READ); + if (error) + path_put(path); + return error; +} + +/* + * create_watch - creates a watch on the given device. + * + * Callers must hold dev->up_mutex. + */ +static int create_watch(struct inotify_device *dev, struct inode *inode, + u32 mask) +{ + struct inotify_user_watch *watch; + int ret; + + if (atomic_read(&dev->user->inotify_watches) >= + inotify_max_user_watches) + return -ENOSPC; + + watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); + if (unlikely(!watch)) + return -ENOMEM; + + /* save a reference to device and bump the count to make it official */ + get_inotify_dev(dev); + watch->dev = dev; + + atomic_inc(&dev->user->inotify_watches); + + inotify_init_watch(&watch->wdata); + ret = inotify_add_watch(dev->ih, &watch->wdata, inode, mask); + if (ret < 0) + free_inotify_user_watch(&watch->wdata); + + return ret; +} + +/* Device Interface */ + +static unsigned int inotify_poll(struct file *file, poll_table *wait) +{ + struct inotify_device *dev = file->private_data; + int ret = 0; + + poll_wait(file, &dev->wq, wait); + mutex_lock(&dev->ev_mutex); + if (!list_empty(&dev->events)) + ret = POLLIN | POLLRDNORM; + mutex_unlock(&dev->ev_mutex); + + return ret; +} + +static ssize_t inotify_read(struct file *file, char __user *buf, + size_t count, loff_t *pos) +{ + size_t event_size = sizeof (struct inotify_event); + struct inotify_device *dev; + char __user *start; + int ret; + DEFINE_WAIT(wait); + + start = buf; + dev = file->private_data; + + while (1) { + + prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); + + mutex_lock(&dev->ev_mutex); + if (!list_empty(&dev->events)) { + ret = 0; + break; + } + mutex_unlock(&dev->ev_mutex); + + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } + + if (signal_pending(current)) { + ret = -EINTR; + break; + } + + schedule(); + } + + finish_wait(&dev->wq, &wait); + if (ret) + return ret; + + while (1) { + struct inotify_kernel_event *kevent; + + ret = buf - start; + if (list_empty(&dev->events)) + break; + + kevent = inotify_dev_get_event(dev); + if (event_size + kevent->event.len > count) { + if (ret == 0 && count > 0) { + /* + * could not get a single event because we + * didn't have enough buffer space. + */ + ret = -EINVAL; + } + break; + } + remove_kevent(dev, kevent); + + /* + * Must perform the copy_to_user outside the mutex in order + * to avoid a lock order reversal with mmap_sem. + */ + mutex_unlock(&dev->ev_mutex); + + if (copy_to_user(buf, &kevent->event, event_size)) { + ret = -EFAULT; + break; + } + buf += event_size; + count -= event_size; + + if (kevent->name) { + if (copy_to_user(buf, kevent->name, kevent->event.len)){ + ret = -EFAULT; + break; + } + buf += kevent->event.len; + count -= kevent->event.len; + } + + free_kevent(kevent); + + mutex_lock(&dev->ev_mutex); + } + mutex_unlock(&dev->ev_mutex); + + return ret; +} + +static int inotify_fasync(int fd, struct file *file, int on) +{ + struct inotify_device *dev = file->private_data; + + return fasync_helper(fd, file, on, &dev->fa) >= 0 ? 0 : -EIO; +} + +static int inotify_release(struct inode *ignored, struct file *file) +{ + struct inotify_device *dev = file->private_data; + + inotify_destroy(dev->ih); + + /* destroy all of the events on this device */ + mutex_lock(&dev->ev_mutex); + while (!list_empty(&dev->events)) + inotify_dev_event_dequeue(dev); + mutex_unlock(&dev->ev_mutex); + + /* free this device: the put matching the get in inotify_init() */ + put_inotify_dev(dev); + + return 0; +} + +static long inotify_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct inotify_device *dev; + void __user *p; + int ret = -ENOTTY; + + dev = file->private_data; + p = (void __user *) arg; + + switch (cmd) { + case FIONREAD: + ret = put_user(dev->queue_size, (int __user *) p); + break; + } + + return ret; +} + +static const struct file_operations inotify_fops = { + .poll = inotify_poll, + .read = inotify_read, + .fasync = inotify_fasync, + .release = inotify_release, + .unlocked_ioctl = inotify_ioctl, + .compat_ioctl = inotify_ioctl, +}; + +static const struct inotify_operations inotify_user_ops = { + .handle_event = inotify_dev_queue_event, + .destroy_watch = free_inotify_user_watch, +}; + +asmlinkage long sys_inotify_init1(int flags) +{ + struct inotify_device *dev; + struct inotify_handle *ih; + struct user_struct *user; + struct file *filp; + int fd, ret; + + /* Check the IN_* constants for consistency. */ + BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC); + BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK); + + if (flags & ~(IN_CLOEXEC | IN_NONBLOCK)) + return -EINVAL; + + fd = get_unused_fd_flags(flags & O_CLOEXEC); + if (fd < 0) + return fd; + + filp = get_empty_filp(); + if (!filp) { + ret = -ENFILE; + goto out_put_fd; + } + + user = get_current_user(); + if (unlikely(atomic_read(&user->inotify_devs) >= + inotify_max_user_instances)) { + ret = -EMFILE; + goto out_free_uid; + } + + dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); + if (unlikely(!dev)) { + ret = -ENOMEM; + goto out_free_uid; + } + + ih = inotify_init(&inotify_user_ops); + if (IS_ERR(ih)) { + ret = PTR_ERR(ih); + goto out_free_dev; + } + dev->ih = ih; + dev->fa = NULL; + + filp->f_op = &inotify_fops; + filp->f_path.mnt = mntget(inotify_mnt); + filp->f_path.dentry = dget(inotify_mnt->mnt_root); + filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping; + filp->f_mode = FMODE_READ; + filp->f_flags = O_RDONLY | (flags & O_NONBLOCK); + filp->private_data = dev; + + INIT_LIST_HEAD(&dev->events); + init_waitqueue_head(&dev->wq); + mutex_init(&dev->ev_mutex); + mutex_init(&dev->up_mutex); + dev->event_count = 0; + dev->queue_size = 0; + dev->max_events = inotify_max_queued_events; + dev->user = user; + atomic_set(&dev->count, 0); + + get_inotify_dev(dev); + atomic_inc(&user->inotify_devs); + fd_install(fd, filp); + + return fd; +out_free_dev: + kfree(dev); +out_free_uid: + free_uid(user); + put_filp(filp); +out_put_fd: + put_unused_fd(fd); + return ret; +} + +asmlinkage long sys_inotify_init(void) +{ + return sys_inotify_init1(0); +} + +asmlinkage long sys_inotify_add_watch(int fd, const char __user *pathname, u32 mask) +{ + struct inode *inode; + struct inotify_device *dev; + struct path path; + struct file *filp; + int ret, fput_needed; + unsigned flags = 0; + + filp = fget_light(fd, &fput_needed); + if (unlikely(!filp)) + return -EBADF; + + /* verify that this is indeed an inotify instance */ + if (unlikely(filp->f_op != &inotify_fops)) { + ret = -EINVAL; + goto fput_and_out; + } + + if (!(mask & IN_DONT_FOLLOW)) + flags |= LOOKUP_FOLLOW; + if (mask & IN_ONLYDIR) + flags |= LOOKUP_DIRECTORY; + + ret = find_inode(pathname, &path, flags); + if (unlikely(ret)) + goto fput_and_out; + + /* inode held in place by reference to path; dev by fget on fd */ + inode = path.dentry->d_inode; + dev = filp->private_data; + + mutex_lock(&dev->up_mutex); + ret = inotify_find_update_watch(dev->ih, inode, mask); + if (ret == -ENOENT) + ret = create_watch(dev, inode, mask); + mutex_unlock(&dev->up_mutex); + + path_put(&path); +fput_and_out: + fput_light(filp, fput_needed); + return ret; +} + +asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) +{ + struct file *filp; + struct inotify_device *dev; + int ret, fput_needed; + + filp = fget_light(fd, &fput_needed); + if (unlikely(!filp)) + return -EBADF; + + /* verify that this is indeed an inotify instance */ + if (unlikely(filp->f_op != &inotify_fops)) { + ret = -EINVAL; + goto out; + } + + dev = filp->private_data; + + /* we free our watch data when we get IN_IGNORED */ + ret = inotify_rm_wd(dev->ih, wd); + +out: + fput_light(filp, fput_needed); + return ret; +} + +static int +inotify_get_sb(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, struct vfsmount *mnt) +{ + return get_sb_pseudo(fs_type, "inotify", NULL, + INOTIFYFS_SUPER_MAGIC, mnt); +} + +static struct file_system_type inotify_fs_type = { + .name = "inotifyfs", + .get_sb = inotify_get_sb, + .kill_sb = kill_anon_super, +}; + +/* + * inotify_user_setup - Our initialization function. Note that we cannnot return + * error because we have compiled-in VFS hooks. So an (unlikely) failure here + * must result in panic(). + */ +static int __init inotify_user_setup(void) +{ + int ret; + + ret = register_filesystem(&inotify_fs_type); + if (unlikely(ret)) + panic("inotify: register_filesystem returned %d!\n", ret); + + inotify_mnt = kern_mount(&inotify_fs_type); + if (IS_ERR(inotify_mnt)) + panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt)); + + inotify_max_queued_events = 16384; + inotify_max_user_instances = 128; + inotify_max_user_watches = 8192; + + watch_cachep = kmem_cache_create("inotify_watch_cache", + sizeof(struct inotify_user_watch), + 0, SLAB_PANIC, NULL); + event_cachep = kmem_cache_create("inotify_event_cache", + sizeof(struct inotify_kernel_event), + 0, SLAB_PANIC, NULL); + + return 0; +} + +module_init(inotify_user_setup); -- cgit v1.2.3-70-g09d2 From 261bca86ed4f7f391d1938167624e78da61dcc6b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 30 Dec 2008 01:48:21 -0500 Subject: nfsd/create race fixes, infrastructure new helpers - insert_inode_locked() and insert_inode_locked4(). Hash new inode, making sure that there's no such inode in icache already. If there is and it does not end up unhashed (as would happen if we have nfsd trying to resolve a bogus fhandle), fail. Otherwise insert our inode into hash and succeed. In either case have i_state set to new+locked; cleanup ends up being simpler with such calling conventions. Signed-off-by: Al Viro --- fs/inode.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/fs.h | 2 ++ 2 files changed, 61 insertions(+) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index 098a2443196..7de1cda9248 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1032,6 +1032,65 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino) EXPORT_SYMBOL(iget_locked); +int insert_inode_locked(struct inode *inode) +{ + struct super_block *sb = inode->i_sb; + ino_t ino = inode->i_ino; + struct hlist_head *head = inode_hashtable + hash(sb, ino); + struct inode *old; + + inode->i_state |= I_LOCK|I_NEW; + while (1) { + spin_lock(&inode_lock); + old = find_inode_fast(sb, head, ino); + if (likely(!old)) { + hlist_add_head(&inode->i_hash, head); + spin_unlock(&inode_lock); + return 0; + } + __iget(old); + spin_unlock(&inode_lock); + wait_on_inode(old); + if (unlikely(!hlist_unhashed(&old->i_hash))) { + iput(old); + return -EBUSY; + } + iput(old); + } +} + +EXPORT_SYMBOL(insert_inode_locked); + +int insert_inode_locked4(struct inode *inode, unsigned long hashval, + int (*test)(struct inode *, void *), void *data) +{ + struct super_block *sb = inode->i_sb; + struct hlist_head *head = inode_hashtable + hash(sb, hashval); + struct inode *old; + + inode->i_state |= I_LOCK|I_NEW; + + while (1) { + spin_lock(&inode_lock); + old = find_inode(sb, head, test, data); + if (likely(!old)) { + hlist_add_head(&inode->i_hash, head); + spin_unlock(&inode_lock); + return 0; + } + __iget(old); + spin_unlock(&inode_lock); + wait_on_inode(old); + if (unlikely(!hlist_unhashed(&old->i_hash))) { + iput(old); + return -EBUSY; + } + iput(old); + } +} + +EXPORT_SYMBOL(insert_inode_locked4); + /** * __insert_inode_hash - hash an inode * @inode: unhashed inode diff --git a/include/linux/fs.h b/include/linux/fs.h index be16ce01fb1..e2170ee21e1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1902,6 +1902,8 @@ extern struct inode *ilookup(struct super_block *sb, unsigned long ino); extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); extern struct inode * iget_locked(struct super_block *, unsigned long); +extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); +extern int insert_inode_locked(struct inode *); extern void unlock_new_inode(struct inode *); extern void __iget(struct inode * inode); -- cgit v1.2.3-70-g09d2 From 41080b5a240113328c607f22b849f653373db0ce Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 30 Dec 2008 01:52:35 -0500 Subject: nfsd race fixes: ext2 * make ext2_new_inode() put the inode into icache in locked state * do not unlock until the inode is fully set up; otherwise nfsd might pick it in half-baked state. * make sure that ext2_new_inode() does *not* lead to two inodes with the same inumber hashed at the same time; otherwise a bogus fhandle coming from nfsd might race with inode creation: nfsd: iget_locked() creates inode nfsd: try to read from disk, block on that. ext2_new_inode(): allocate inode with that inumber ext2_new_inode(): insert it into icache, set it up and dirty ext2_write_inode(): get the relevant part of inode table in cache, set the entry for our inode (and start writing to disk) nfsd: get CPU again, look into inode table, see nice and sane on-disk inode, set the in-core inode from it oops - we have two in-core inodes with the same inumber live in icache, both used for IO. Welcome to fs corruption... Signed-off-by: Al Viro --- fs/ext2/ialloc.c | 6 +++++- fs/ext2/namei.c | 15 ++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index 8d0add62587..c454d5db28a 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -585,7 +585,10 @@ got: spin_lock(&sbi->s_next_gen_lock); inode->i_generation = sbi->s_next_generation++; spin_unlock(&sbi->s_next_gen_lock); - insert_inode_hash(inode); + if (insert_inode_locked(inode) < 0) { + err = -EINVAL; + goto fail_drop; + } if (DQUOT_ALLOC_INODE(inode)) { err = -EDQUOT; @@ -612,6 +615,7 @@ fail_drop: DQUOT_DROP(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; + unlock_new_inode(inode); iput(inode); return ERR_PTR(err); diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 2a747252ec1..90ea17998a7 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -41,9 +41,11 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) int err = ext2_add_link(dentry, inode); if (!err) { d_instantiate(dentry, inode); + unlock_new_inode(inode); return 0; } inode_dec_link_count(inode); + unlock_new_inode(inode); iput(inode); return err; } @@ -170,6 +172,7 @@ out: out_fail: inode_dec_link_count(inode); + unlock_new_inode(inode); iput (inode); goto out; } @@ -178,6 +181,7 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; + int err; if (inode->i_nlink >= EXT2_LINK_MAX) return -EMLINK; @@ -186,7 +190,14 @@ static int ext2_link (struct dentry * old_dentry, struct inode * dir, inode_inc_link_count(inode); atomic_inc(&inode->i_count); - return ext2_add_nondir(dentry, inode); + err = ext2_add_link(dentry, inode); + if (!err) { + d_instantiate(dentry, inode); + return 0; + } + inode_dec_link_count(inode); + iput(inode); + return err; } static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) @@ -222,12 +233,14 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, int mode) goto out_fail; d_instantiate(dentry, inode); + unlock_new_inode(inode); out: return err; out_fail: inode_dec_link_count(inode); inode_dec_link_count(inode); + unlock_new_inode(inode); iput(inode); out_dir: inode_dec_link_count(dir); -- cgit v1.2.3-70-g09d2 From c38012daa7ad902a39a4213ba2b3fe50e81157ea Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 30 Dec 2008 02:02:50 -0500 Subject: nfsd race fixes: ext3 ext3 analog of the previous patch Signed-off-by: Al Viro --- fs/ext3/ialloc.c | 6 +++++- fs/ext3/namei.c | 15 ++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c index 490bd0ed789..5655fbcbd11 100644 --- a/fs/ext3/ialloc.c +++ b/fs/ext3/ialloc.c @@ -579,7 +579,10 @@ got: ext3_set_inode_flags(inode); if (IS_DIRSYNC(inode)) handle->h_sync = 1; - insert_inode_hash(inode); + if (insert_inode_locked(inode) < 0) { + err = -EINVAL; + goto fail_drop; + } spin_lock(&sbi->s_next_gen_lock); inode->i_generation = sbi->s_next_generation++; spin_unlock(&sbi->s_next_gen_lock); @@ -627,6 +630,7 @@ fail_drop: DQUOT_DROP(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; + unlock_new_inode(inode); iput(inode); brelse(bitmap_bh); return ERR_PTR(err); diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 3e5edc92aa0..297ea8dfac7 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -1652,9 +1652,11 @@ static int ext3_add_nondir(handle_t *handle, if (!err) { ext3_mark_inode_dirty(handle, inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); return 0; } drop_nlink(inode); + unlock_new_inode(inode); iput(inode); return err; } @@ -1765,6 +1767,7 @@ retry: dir_block = ext3_bread (handle, inode, 0, 1, &err); if (!dir_block) { drop_nlink(inode); /* is this nlink == 0? */ + unlock_new_inode(inode); ext3_mark_inode_dirty(handle, inode); iput (inode); goto out_stop; @@ -1792,6 +1795,7 @@ retry: err = ext3_add_entry (handle, dentry, inode); if (err) { inode->i_nlink = 0; + unlock_new_inode(inode); ext3_mark_inode_dirty(handle, inode); iput (inode); goto out_stop; @@ -1800,6 +1804,7 @@ retry: ext3_update_dx_flag(dir); ext3_mark_inode_dirty(handle, dir); d_instantiate(dentry, inode); + unlock_new_inode(inode); out_stop: ext3_journal_stop(handle); if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) @@ -2174,6 +2179,7 @@ retry: mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); if (err) { drop_nlink(inode); + unlock_new_inode(inode); ext3_mark_inode_dirty(handle, inode); iput (inode); goto out_stop; @@ -2221,7 +2227,14 @@ retry: inc_nlink(inode); atomic_inc(&inode->i_count); - err = ext3_add_nondir(handle, dentry, inode); + err = ext3_add_entry(handle, dentry, inode); + if (!err) { + ext3_mark_inode_dirty(handle, inode); + d_instantiate(dentry, inode); + } else { + drop_nlink(inode); + iput(inode); + } ext3_journal_stop(handle); if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) goto retry; -- cgit v1.2.3-70-g09d2 From 6b38e842bb832a3dbeb17e382404aef3c40ac5f9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 30 Dec 2008 02:03:31 -0500 Subject: nfsd race fixes: ext4 Signed-off-by: Al Viro --- fs/ext4/ialloc.c | 6 +++++- fs/ext4/namei.c | 14 +++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 08cac9fcace..6e6052879aa 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -826,7 +826,10 @@ got: ext4_set_inode_flags(inode); if (IS_DIRSYNC(inode)) handle->h_sync = 1; - insert_inode_hash(inode); + if (insert_inode_locked(inode) < 0) { + err = -EINVAL; + goto fail_drop; + } spin_lock(&sbi->s_next_gen_lock); inode->i_generation = sbi->s_next_generation++; spin_unlock(&sbi->s_next_gen_lock); @@ -881,6 +884,7 @@ fail_drop: DQUOT_DROP(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; + unlock_new_inode(inode); iput(inode); brelse(bitmap_bh); return ERR_PTR(err); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 63adcb79298..da98a9012fa 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1693,9 +1693,11 @@ static int ext4_add_nondir(handle_t *handle, if (!err) { ext4_mark_inode_dirty(handle, inode); d_instantiate(dentry, inode); + unlock_new_inode(inode); return 0; } drop_nlink(inode); + unlock_new_inode(inode); iput(inode); return err; } @@ -1830,6 +1832,7 @@ retry: if (err) { out_clear_inode: clear_nlink(inode); + unlock_new_inode(inode); ext4_mark_inode_dirty(handle, inode); iput(inode); goto out_stop; @@ -1838,6 +1841,7 @@ out_clear_inode: ext4_update_dx_flag(dir); ext4_mark_inode_dirty(handle, dir); d_instantiate(dentry, inode); + unlock_new_inode(inode); out_stop: ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) @@ -2212,6 +2216,7 @@ retry: mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); if (err) { clear_nlink(inode); + unlock_new_inode(inode); ext4_mark_inode_dirty(handle, inode); iput(inode); goto out_stop; @@ -2262,7 +2267,14 @@ retry: ext4_inc_count(handle, inode); atomic_inc(&inode->i_count); - err = ext4_add_nondir(handle, dentry, inode); + err = ext4_add_entry(handle, dentry, inode); + if (!err) { + ext4_mark_inode_dirty(handle, inode); + d_instantiate(dentry, inode); + } else { + drop_nlink(inode); + iput(inode); + } ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; -- cgit v1.2.3-70-g09d2 From c1eaa26b671299b3ec01d40c6c71ee19a4f81517 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 30 Dec 2008 02:03:58 -0500 Subject: nfsd race fixes: reiserfs ... and the same for reiserfs. The difference here is that we need insert_inode_locked4() to match iget5_locked(). Signed-off-by: Al Viro --- fs/reiserfs/inode.c | 15 ++++++++++----- fs/reiserfs/namei.c | 8 ++++++++ 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 6c4c2c69449..145c2d3e5e0 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1753,6 +1753,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, struct inode *inode) { struct super_block *sb; + struct reiserfs_iget_args args; INITIALIZE_PATH(path_to_key); struct cpu_key key; struct item_head ih; @@ -1780,6 +1781,14 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, err = -ENOMEM; goto out_bad_inode; } + args.objectid = inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid); + memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE); + args.dirid = le32_to_cpu(ih.ih_key.k_dir_id); + if (insert_inode_locked4(inode, args.objectid, + reiserfs_find_actor, &args) < 0) { + err = -EINVAL; + goto out_bad_inode; + } if (old_format_only(sb)) /* not a perfect generation count, as object ids can be reused, but ** this is as good as reiserfs can do right now. @@ -1859,13 +1868,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, } else { inode2sd(&sd, inode, inode->i_size); } - // these do not go to on-disk stat data - inode->i_ino = le32_to_cpu(ih.ih_key.k_objectid); - // store in in-core inode the key of stat data and version all // object items will have (directory items will have old offset // format, other new objects will consist of new items) - memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE); if (old_format_only(sb) || S_ISDIR(mode) || S_ISLNK(mode)) set_inode_item_key_version(inode, KEY_FORMAT_3_5); else @@ -1929,7 +1934,6 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, reiserfs_mark_inode_private(inode); } - insert_inode_hash(inode); reiserfs_update_sd(th, inode); reiserfs_check_path(&path_to_key); @@ -1956,6 +1960,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, out_inserted_sd: inode->i_nlink = 0; th->t_trans_id = 0; /* so the caller can't use this handle later */ + unlock_new_inode(inode); /* OK to do even if we hadn't locked it */ /* If we were inheriting an ACL, we need to release the lock so that * iput doesn't deadlock in reiserfs_delete_xattrs. The locking diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index 4f322e5ed84..738967f6c8e 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -646,6 +646,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, int mode, err = journal_end(&th, dir->i_sb, jbegin_count); if (err) retval = err; + unlock_new_inode(inode); iput(inode); goto out_failed; } @@ -653,6 +654,7 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, int mode, reiserfs_update_inode_transaction(dir); d_instantiate(dentry, inode); + unlock_new_inode(inode); retval = journal_end(&th, dir->i_sb, jbegin_count); out_failed: @@ -727,11 +729,13 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, int mode, err = journal_end(&th, dir->i_sb, jbegin_count); if (err) retval = err; + unlock_new_inode(inode); iput(inode); goto out_failed; } d_instantiate(dentry, inode); + unlock_new_inode(inode); retval = journal_end(&th, dir->i_sb, jbegin_count); out_failed: @@ -812,6 +816,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) err = journal_end(&th, dir->i_sb, jbegin_count); if (err) retval = err; + unlock_new_inode(inode); iput(inode); goto out_failed; } @@ -819,6 +824,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) reiserfs_update_sd(&th, dir); d_instantiate(dentry, inode); + unlock_new_inode(inode); retval = journal_end(&th, dir->i_sb, jbegin_count); out_failed: if (locked) @@ -1096,11 +1102,13 @@ static int reiserfs_symlink(struct inode *parent_dir, err = journal_end(&th, parent_dir->i_sb, jbegin_count); if (err) retval = err; + unlock_new_inode(inode); iput(inode); goto out_failed; } d_instantiate(dentry, inode); + unlock_new_inode(inode); retval = journal_end(&th, parent_dir->i_sb, jbegin_count); out_failed: reiserfs_write_unlock(parent_dir->i_sb); -- cgit v1.2.3-70-g09d2 From 1f3403fa640f9f7b135dee79f2d39d01c8ad4a08 Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Tue, 30 Dec 2008 22:08:37 -0600 Subject: nfsd race fixes: jfs jfs version of Al Viro's nfsd race patches Signed-off-by: Dave Kleikamp Signed-off-by: Al Viro --- fs/jfs/jfs_inode.c | 29 +++++++++++++++++++++-------- fs/jfs/namei.c | 24 ++++++++++++++++-------- 2 files changed, 37 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c index 70022fd1c53..d4d142c2edd 100644 --- a/fs/jfs/jfs_inode.c +++ b/fs/jfs/jfs_inode.c @@ -79,7 +79,8 @@ struct inode *ialloc(struct inode *parent, umode_t mode) inode = new_inode(sb); if (!inode) { jfs_warn("ialloc: new_inode returned NULL!"); - return ERR_PTR(-ENOMEM); + rc = -ENOMEM; + goto fail; } jfs_inode = JFS_IP(inode); @@ -89,8 +90,12 @@ struct inode *ialloc(struct inode *parent, umode_t mode) jfs_warn("ialloc: diAlloc returned %d!", rc); if (rc == -EIO) make_bad_inode(inode); - iput(inode); - return ERR_PTR(rc); + goto fail_put; + } + + if (insert_inode_locked(inode) < 0) { + rc = -EINVAL; + goto fail_unlock; } inode->i_uid = current_fsuid(); @@ -112,11 +117,8 @@ struct inode *ialloc(struct inode *parent, umode_t mode) * Allocate inode to quota. */ if (DQUOT_ALLOC_INODE(inode)) { - DQUOT_DROP(inode); - inode->i_flags |= S_NOQUOTA; - inode->i_nlink = 0; - iput(inode); - return ERR_PTR(-EDQUOT); + rc = -EDQUOT; + goto fail_drop; } inode->i_mode = mode; @@ -158,4 +160,15 @@ struct inode *ialloc(struct inode *parent, umode_t mode) jfs_info("ialloc returns inode = 0x%p\n", inode); return inode; + +fail_drop: + DQUOT_DROP(inode); + inode->i_flags |= S_NOQUOTA; +fail_unlock: + inode->i_nlink = 0; + unlock_new_inode(inode); +fail_put: + iput(inode); +fail: + return ERR_PTR(rc); } diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index cc3cedffbfa..b4de56b851e 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -155,7 +155,6 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode, ip->i_fop = &jfs_file_operations; ip->i_mapping->a_ops = &jfs_aops; - insert_inode_hash(ip); mark_inode_dirty(ip); dip->i_ctime = dip->i_mtime = CURRENT_TIME; @@ -171,9 +170,12 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, int mode, if (rc) { free_ea_wmap(ip); ip->i_nlink = 0; + unlock_new_inode(ip); iput(ip); - } else + } else { d_instantiate(dentry, ip); + unlock_new_inode(ip); + } out2: free_UCSname(&dname); @@ -289,7 +291,6 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode) ip->i_op = &jfs_dir_inode_operations; ip->i_fop = &jfs_dir_operations; - insert_inode_hash(ip); mark_inode_dirty(ip); /* update parent directory inode */ @@ -306,9 +307,12 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, int mode) if (rc) { free_ea_wmap(ip); ip->i_nlink = 0; + unlock_new_inode(ip); iput(ip); - } else + } else { d_instantiate(dentry, ip); + unlock_new_inode(ip); + } out2: free_UCSname(&dname); @@ -1019,7 +1023,6 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, goto out3; } - insert_inode_hash(ip); mark_inode_dirty(ip); dip->i_ctime = dip->i_mtime = CURRENT_TIME; @@ -1039,9 +1042,12 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry, if (rc) { free_ea_wmap(ip); ip->i_nlink = 0; + unlock_new_inode(ip); iput(ip); - } else + } else { d_instantiate(dentry, ip); + unlock_new_inode(ip); + } out2: free_UCSname(&dname); @@ -1399,7 +1405,6 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, jfs_ip->dev = new_encode_dev(rdev); init_special_inode(ip, ip->i_mode, rdev); - insert_inode_hash(ip); mark_inode_dirty(ip); dir->i_ctime = dir->i_mtime = CURRENT_TIME; @@ -1417,9 +1422,12 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry, if (rc) { free_ea_wmap(ip); ip->i_nlink = 0; + unlock_new_inode(ip); iput(ip); - } else + } else { d_instantiate(dentry, ip); + unlock_new_inode(ip); + } out1: free_UCSname(&dname); -- cgit v1.2.3-70-g09d2 From 59e55e6cf86eb472e8373831c4234252916c53ef Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 2 Jan 2009 13:41:11 +0000 Subject: Remove devpts_root global Remove the 'devpts_root' global variable and find the root dentry using the super_block. The super-block can be found from the device inode, using the new wrapper, pts_sb_from_inode(). Changelog: This patch is based on an earlier patchset from Serge Hallyn and Matt Helsley. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 5d61b7c06e1..f96e10a109f 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -34,7 +34,6 @@ static DEFINE_IDA(allocated_ptys); static DEFINE_MUTEX(allocated_ptys_lock); static struct vfsmount *devpts_mnt; -static struct dentry *devpts_root; static struct { int setuid; @@ -56,6 +55,14 @@ static const match_table_t tokens = { {Opt_err, NULL} }; +static inline struct super_block *pts_sb_from_inode(struct inode *inode) +{ + if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) + return inode->i_sb; + + return devpts_mnt->mnt_sb; +} + static int devpts_remount(struct super_block *sb, int *flags, char *data) { char *p; @@ -142,7 +149,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent) inode->i_fop = &simple_dir_operations; inode->i_nlink = 2; - devpts_root = s->s_root = d_alloc_root(inode); + s->s_root = d_alloc_root(inode); if (s->s_root) return 0; @@ -211,7 +218,9 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) struct tty_driver *driver = tty->driver; dev_t device = MKDEV(driver->major, driver->minor_start+number); struct dentry *dentry; - struct inode *inode = new_inode(devpts_mnt->mnt_sb); + struct super_block *sb = pts_sb_from_inode(ptmx_inode); + struct inode *inode = new_inode(sb); + struct dentry *root = sb->s_root; char s[12]; /* We're supposed to be given the slave end of a pty */ @@ -231,15 +240,15 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) sprintf(s, "%d", number); - mutex_lock(&devpts_root->d_inode->i_mutex); + mutex_lock(&root->d_inode->i_mutex); - dentry = d_alloc_name(devpts_root, s); + dentry = d_alloc_name(root, s); if (!IS_ERR(dentry)) { d_add(dentry, inode); - fsnotify_create(devpts_root->d_inode, dentry); + fsnotify_create(root->d_inode, dentry); } - mutex_unlock(&devpts_root->d_inode->i_mutex); + mutex_unlock(&root->d_inode->i_mutex); return 0; } @@ -256,11 +265,13 @@ struct tty_struct *devpts_get_tty(struct inode *pts_inode, int number) void devpts_pty_kill(struct tty_struct *tty) { struct inode *inode = tty->driver_data; + struct super_block *sb = pts_sb_from_inode(inode); + struct dentry *root = sb->s_root; struct dentry *dentry; BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); - mutex_lock(&devpts_root->d_inode->i_mutex); + mutex_lock(&root->d_inode->i_mutex); dentry = d_find_alias(inode); if (dentry && !IS_ERR(dentry)) { @@ -269,7 +280,7 @@ void devpts_pty_kill(struct tty_struct *tty) dput(dentry); } - mutex_unlock(&devpts_root->d_inode->i_mutex); + mutex_unlock(&root->d_inode->i_mutex); } static int __init init_devpts_fs(void) -- cgit v1.2.3-70-g09d2 From e76b7c01e598d2d14ddfdb6ae5c6afe45245d0de Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 2 Jan 2009 13:41:21 +0000 Subject: Per-mount allocated_ptys To enable multiple mounts of devpts, 'allocated_ptys' must be a per-mount variable rather than a global variable. Move 'allocated_ptys' into the super_block's s_fs_info. Changelog[v2]: Define and use DEVPTS_SB() wrapper. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 48 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index f96e10a109f..49d879d911b 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -30,7 +30,6 @@ #define PTMX_MINOR 2 extern int pty_limit; /* Config limit on Unix98 ptys */ -static DEFINE_IDA(allocated_ptys); static DEFINE_MUTEX(allocated_ptys_lock); static struct vfsmount *devpts_mnt; @@ -55,6 +54,15 @@ static const match_table_t tokens = { {Opt_err, NULL} }; +struct pts_fs_info { + struct ida allocated_ptys; +}; + +static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb) +{ + return sb->s_fs_info; +} + static inline struct super_block *pts_sb_from_inode(struct inode *inode) { if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) @@ -126,6 +134,19 @@ static const struct super_operations devpts_sops = { .show_options = devpts_show_options, }; +static void *new_pts_fs_info(void) +{ + struct pts_fs_info *fsi; + + fsi = kzalloc(sizeof(struct pts_fs_info), GFP_KERNEL); + if (!fsi) + return NULL; + + ida_init(&fsi->allocated_ptys); + + return fsi; +} + static int devpts_fill_super(struct super_block *s, void *data, int silent) { @@ -137,9 +158,13 @@ devpts_fill_super(struct super_block *s, void *data, int silent) s->s_op = &devpts_sops; s->s_time_gran = 1; + s->s_fs_info = new_pts_fs_info(); + if (!s->s_fs_info) + goto fail; + inode = new_inode(s); if (!inode) - goto fail; + goto free_fsi; inode->i_ino = 1; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; inode->i_blocks = 0; @@ -155,6 +180,9 @@ devpts_fill_super(struct super_block *s, void *data, int silent) printk("devpts: get root dentry failed\n"); iput(inode); + +free_fsi: + kfree(s->s_fs_info); fail: return -ENOMEM; } @@ -165,11 +193,19 @@ static int devpts_get_sb(struct file_system_type *fs_type, return get_sb_single(fs_type, flags, data, devpts_fill_super, mnt); } +static void devpts_kill_sb(struct super_block *sb) +{ + struct pts_fs_info *fsi = DEVPTS_SB(sb); + + kfree(fsi); + kill_anon_super(sb); +} + static struct file_system_type devpts_fs_type = { .owner = THIS_MODULE, .name = "devpts", .get_sb = devpts_get_sb, - .kill_sb = kill_anon_super, + .kill_sb = devpts_kill_sb, }; /* @@ -179,16 +215,18 @@ static struct file_system_type devpts_fs_type = { int devpts_new_index(struct inode *ptmx_inode) { + struct super_block *sb = pts_sb_from_inode(ptmx_inode); + struct pts_fs_info *fsi = DEVPTS_SB(sb); int index; int ida_ret; retry: - if (!ida_pre_get(&allocated_ptys, GFP_KERNEL)) { + if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) { return -ENOMEM; } mutex_lock(&allocated_ptys_lock); - ida_ret = ida_get_new(&allocated_ptys, &index); + ida_ret = ida_get_new(&fsi->allocated_ptys, &index); if (ida_ret < 0) { mutex_unlock(&allocated_ptys_lock); if (ida_ret == -EAGAIN) @@ -197,7 +235,7 @@ retry: } if (index >= pty_limit) { - ida_remove(&allocated_ptys, index); + ida_remove(&fsi->allocated_ptys, index); mutex_unlock(&allocated_ptys_lock); return -EIO; } @@ -207,8 +245,11 @@ retry: void devpts_kill_index(struct inode *ptmx_inode, int idx) { + struct super_block *sb = pts_sb_from_inode(ptmx_inode); + struct pts_fs_info *fsi = DEVPTS_SB(sb); + mutex_lock(&allocated_ptys_lock); - ida_remove(&allocated_ptys, idx); + ida_remove(&fsi->allocated_ptys, idx); mutex_unlock(&allocated_ptys_lock); } -- cgit v1.2.3-70-g09d2 From 31af0abbdafb66ad8e27e3df878faec2ebe1132e Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 2 Jan 2009 13:41:33 +0000 Subject: Per-mount 'config' object With support for multiple mounts of devpts, the 'config' structure really represents per-mount options rather than config parameters. Rename 'config' structure to 'pts_mount_opts' and store it in the super-block. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 45 +++++++++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 49d879d911b..b793e6e3c21 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -34,13 +34,13 @@ static DEFINE_MUTEX(allocated_ptys_lock); static struct vfsmount *devpts_mnt; -static struct { +struct pts_mount_opts { int setuid; int setgid; uid_t uid; gid_t gid; umode_t mode; -} config = {.mode = DEVPTS_DEFAULT_MODE}; +}; enum { Opt_uid, Opt_gid, Opt_mode, @@ -56,6 +56,7 @@ static const match_table_t tokens = { struct pts_fs_info { struct ida allocated_ptys; + struct pts_mount_opts mount_opts; }; static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb) @@ -74,12 +75,14 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode) static int devpts_remount(struct super_block *sb, int *flags, char *data) { char *p; + struct pts_fs_info *fsi = DEVPTS_SB(sb); + struct pts_mount_opts *opts = &fsi->mount_opts; - config.setuid = 0; - config.setgid = 0; - config.uid = 0; - config.gid = 0; - config.mode = DEVPTS_DEFAULT_MODE; + opts->setuid = 0; + opts->setgid = 0; + opts->uid = 0; + opts->gid = 0; + opts->mode = DEVPTS_DEFAULT_MODE; while ((p = strsep(&data, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; @@ -94,19 +97,19 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data) case Opt_uid: if (match_int(&args[0], &option)) return -EINVAL; - config.uid = option; - config.setuid = 1; + opts->uid = option; + opts->setuid = 1; break; case Opt_gid: if (match_int(&args[0], &option)) return -EINVAL; - config.gid = option; - config.setgid = 1; + opts->gid = option; + opts->setgid = 1; break; case Opt_mode: if (match_octal(&args[0], &option)) return -EINVAL; - config.mode = option & S_IALLUGO; + opts->mode = option & S_IALLUGO; break; default: printk(KERN_ERR "devpts: called with bogus options\n"); @@ -119,11 +122,14 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data) static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs) { - if (config.setuid) - seq_printf(seq, ",uid=%u", config.uid); - if (config.setgid) - seq_printf(seq, ",gid=%u", config.gid); - seq_printf(seq, ",mode=%03o", config.mode); + struct pts_fs_info *fsi = DEVPTS_SB(vfs->mnt_sb); + struct pts_mount_opts *opts = &fsi->mount_opts; + + if (opts->setuid) + seq_printf(seq, ",uid=%u", opts->uid); + if (opts->setgid) + seq_printf(seq, ",gid=%u", opts->gid); + seq_printf(seq, ",mode=%03o", opts->mode); return 0; } @@ -143,6 +149,7 @@ static void *new_pts_fs_info(void) return NULL; ida_init(&fsi->allocated_ptys); + fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE; return fsi; } @@ -262,6 +269,8 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) struct super_block *sb = pts_sb_from_inode(ptmx_inode); struct inode *inode = new_inode(sb); struct dentry *root = sb->s_root; + struct pts_fs_info *fsi = DEVPTS_SB(sb); + struct pts_mount_opts *opts = &fsi->mount_opts; char s[12]; /* We're supposed to be given the slave end of a pty */ @@ -275,7 +284,7 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) inode->i_uid = config.setuid ? config.uid : current_fsuid(); inode->i_gid = config.setgid ? config.gid : current_fsgid(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; - init_special_inode(inode, S_IFCHR|config.mode, device); + init_special_inode(inode, S_IFCHR|opts->mode, device); inode->i_private = tty; tty->driver_data = inode; -- cgit v1.2.3-70-g09d2 From 53af8ee4094d80ddaac7efefb572b1c22ae49367 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 2 Jan 2009 13:41:47 +0000 Subject: Extract option parsing to new function Move code to parse mount options into a separate function so it can (later) be shared between mount and remount operations. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index b793e6e3c21..00530e82673 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -72,11 +72,9 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode) return devpts_mnt->mnt_sb; } -static int devpts_remount(struct super_block *sb, int *flags, char *data) +static int parse_mount_options(char *data, struct pts_mount_opts *opts) { char *p; - struct pts_fs_info *fsi = DEVPTS_SB(sb); - struct pts_mount_opts *opts = &fsi->mount_opts; opts->setuid = 0; opts->setgid = 0; @@ -120,6 +118,14 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data) return 0; } +static int devpts_remount(struct super_block *sb, int *flags, char *data) +{ + struct pts_fs_info *fsi = DEVPTS_SB(sb); + struct pts_mount_opts *opts = &fsi->mount_opts; + + return parse_mount_options(data, opts); +} + static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs) { struct pts_fs_info *fsi = DEVPTS_SB(vfs->mnt_sb); -- cgit v1.2.3-70-g09d2 From 1f8f1e296583f9f832c2fe7b5a219675b74bf43e Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 2 Jan 2009 13:42:02 +0000 Subject: Define mknod_ptmx() /dev/ptmx is closely tied to the devpts filesystem. An open of /dev/ptmx, allocates the next pty index and the associated device shows up in the devpts fs as /dev/pts/n. Wih multiple instancs of devpts filesystem, during an open of /dev/ptmx we would be unable to determine which instance of the devpts is being accessed. So we move the 'ptmx' node into /dev/pts and use the inode of the 'ptmx' node to identify the superblock and hence the devpts instance. This patch adds ability for the kernel to internally create the [ptmx, c, 5:2] device when mounting devpts filesystem. Since the ptmx node in devpts is new and may surprise some userspace scripts, the default permissions for the new node is 0000. These permissions can be changed either using chmod or by remounting with the new '-o ptmxmode=0666' mount option. Changelog[v5]: - [Serge Hallyn bugfix]: Letting new_inode() assign inode number to ptmx can collide with hand-assigning inode numbers to ptys. So, hand-assign specific inode number to ptmx node also. - [Serge Hallyn]: Maybe safer to grab root dentry mutex while creating ptmx node - [Bugfix with Serge Hallyn] Replace lookup_one_len() in mknod_ptmx() wih d_alloc_name() (lookup during ->get_sb() locks up system). To simplify patchset, fold the ptmx_dentry patch into this. Changelog[v4]: - Change default permissions of pts/ptmx node to 0000. - Move code for ptmxmode under #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES. Changelog[v3]: - Rename ptmx_mode to ptmxmode (for consistency with 'newinstance') Changelog[v2]: - [H. Peter Anvin] Remove mknod() system call support and create the ptmx node internally. Changelog[v1]: - Earlier version of this patch enabled creating /dev/pts/tty as well. As pointed out by Al Viro and H. Peter Anvin, that is not really necessary. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 115 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 110 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 00530e82673..8ee9dc2f9e4 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -27,6 +27,13 @@ #define DEVPTS_SUPER_MAGIC 0x1cd1 #define DEVPTS_DEFAULT_MODE 0600 +/* + * ptmx is a new node in /dev/pts and will be unused in legacy (single- + * instance) mode. To prevent surprises in user space, set permissions of + * ptmx to 0. Use 'chmod' or remount with '-o ptmxmode' to set meaningful + * permissions. + */ +#define DEVPTS_DEFAULT_PTMX_MODE 0000 #define PTMX_MINOR 2 extern int pty_limit; /* Config limit on Unix98 ptys */ @@ -40,10 +47,11 @@ struct pts_mount_opts { uid_t uid; gid_t gid; umode_t mode; + umode_t ptmxmode; }; enum { - Opt_uid, Opt_gid, Opt_mode, + Opt_uid, Opt_gid, Opt_mode, Opt_ptmxmode, Opt_err }; @@ -51,12 +59,16 @@ static const match_table_t tokens = { {Opt_uid, "uid=%u"}, {Opt_gid, "gid=%u"}, {Opt_mode, "mode=%o"}, +#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES + {Opt_ptmxmode, "ptmxmode=%o"}, +#endif {Opt_err, NULL} }; struct pts_fs_info { struct ida allocated_ptys; struct pts_mount_opts mount_opts; + struct dentry *ptmx_dentry; }; static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb) @@ -81,6 +93,7 @@ static int parse_mount_options(char *data, struct pts_mount_opts *opts) opts->uid = 0; opts->gid = 0; opts->mode = DEVPTS_DEFAULT_MODE; + opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; while ((p = strsep(&data, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; @@ -109,6 +122,13 @@ static int parse_mount_options(char *data, struct pts_mount_opts *opts) return -EINVAL; opts->mode = option & S_IALLUGO; break; +#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES + case Opt_ptmxmode: + if (match_octal(&args[0], &option)) + return -EINVAL; + opts->ptmxmode = option & S_IALLUGO; + break; +#endif default: printk(KERN_ERR "devpts: called with bogus options\n"); return -EINVAL; @@ -118,12 +138,93 @@ static int parse_mount_options(char *data, struct pts_mount_opts *opts) return 0; } +#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES +static int mknod_ptmx(struct super_block *sb) +{ + int mode; + int rc = -ENOMEM; + struct dentry *dentry; + struct inode *inode; + struct dentry *root = sb->s_root; + struct pts_fs_info *fsi = DEVPTS_SB(sb); + struct pts_mount_opts *opts = &fsi->mount_opts; + + mutex_lock(&root->d_inode->i_mutex); + + /* If we have already created ptmx node, return */ + if (fsi->ptmx_dentry) { + rc = 0; + goto out; + } + + dentry = d_alloc_name(root, "ptmx"); + if (!dentry) { + printk(KERN_NOTICE "Unable to alloc dentry for ptmx node\n"); + goto out; + } + + /* + * Create a new 'ptmx' node in this mount of devpts. + */ + inode = new_inode(sb); + if (!inode) { + printk(KERN_ERR "Unable to alloc inode for ptmx node\n"); + dput(dentry); + goto out; + } + + inode->i_ino = 2; + inode->i_uid = inode->i_gid = 0; + inode->i_blocks = 0; + inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; + + mode = S_IFCHR|opts->ptmxmode; + init_special_inode(inode, mode, MKDEV(TTYAUX_MAJOR, 2)); + + d_add(dentry, inode); + + fsi->ptmx_dentry = dentry; + rc = 0; + + printk(KERN_DEBUG "Created ptmx node in devpts ino %lu\n", + inode->i_ino); +out: + mutex_unlock(&root->d_inode->i_mutex); + return rc; +} + +static void update_ptmx_mode(struct pts_fs_info *fsi) +{ + struct inode *inode; + if (fsi->ptmx_dentry) { + inode = fsi->ptmx_dentry->d_inode; + inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode; + } +} +#else +static inline void update_ptmx_mode(struct pts_fs_info *fsi) +{ + return; +} +#endif + static int devpts_remount(struct super_block *sb, int *flags, char *data) { + int err; struct pts_fs_info *fsi = DEVPTS_SB(sb); struct pts_mount_opts *opts = &fsi->mount_opts; - return parse_mount_options(data, opts); + err = parse_mount_options(data, opts); + + /* + * parse_mount_options() restores options to default values + * before parsing and may have changed ptmxmode. So, update the + * mode in the inode too. Bogus options don't fail the remount, + * so do this even on error return. + */ + update_ptmx_mode(fsi); + + return err; } static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs) @@ -136,6 +237,9 @@ static int devpts_show_options(struct seq_file *seq, struct vfsmount *vfs) if (opts->setgid) seq_printf(seq, ",gid=%u", opts->gid); seq_printf(seq, ",mode=%03o", opts->mode); +#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES + seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode); +#endif return 0; } @@ -156,6 +260,7 @@ static void *new_pts_fs_info(void) ida_init(&fsi->allocated_ptys); fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE; + fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; return fsi; } @@ -163,7 +268,7 @@ static void *new_pts_fs_info(void) static int devpts_fill_super(struct super_block *s, void *data, int silent) { - struct inode * inode; + struct inode *inode; s->s_blocksize = 1024; s->s_blocksize_bits = 10; @@ -190,7 +295,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent) s->s_root = d_alloc_root(inode); if (s->s_root) return 0; - + printk("devpts: get root dentry failed\n"); iput(inode); @@ -211,7 +316,7 @@ static void devpts_kill_sb(struct super_block *sb) struct pts_fs_info *fsi = DEVPTS_SB(sb); kfree(fsi); - kill_anon_super(sb); + kill_litter_super(sb); } static struct file_system_type devpts_fs_type = { -- cgit v1.2.3-70-g09d2 From d4076ac55bf8755ce6c5706478631c1726cf0179 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 2 Jan 2009 13:42:19 +0000 Subject: Define get_init_pts_sb() See comments in the function header for details. The new interface will be used in a follow-on patch. Changelog [v2]: [Dave Hansen] Replace get_sb_ref() in fs/super.c with get_init_pts_sb() and make the new interface private to devpts Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 8ee9dc2f9e4..2d0eb2cf99e 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -305,10 +305,63 @@ fail: return -ENOMEM; } +static int compare_init_pts_sb(struct super_block *s, void *p) +{ + if (devpts_mnt) + return devpts_mnt->mnt_sb == s; + + return 0; +} + +/* + * get_init_pts_sb() + * + * This interface is needed to support multiple namespace semantics in + * devpts while preserving backward compatibility of the current 'single- + * namespace' semantics. i.e all mounts of devpts without the 'newinstance' + * mount option should bind to the initial kernel mount, like + * get_sb_single(). + * + * Mounts with 'newinstance' option create a new private namespace. + * + * But for single-mount semantics, devpts cannot use get_sb_single(), + * because get_sb_single()/sget() find and use the super-block from + * the most recent mount of devpts. But that recent mount may be a + * 'newinstance' mount and get_sb_single() would pick the newinstance + * super-block instead of the initial super-block. + * + * This interface is identical to get_sb_single() except that it + * consistently selects the 'single-namespace' superblock even in the + * presence of the private namespace (i.e 'newinstance') super-blocks. + */ +static int get_init_pts_sb(struct file_system_type *fs_type, int flags, + void *data, struct vfsmount *mnt) +{ + struct super_block *s; + int error; + + s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL); + if (IS_ERR(s)) + return PTR_ERR(s); + + if (!s->s_root) { + s->s_flags = flags; + error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0); + if (error) { + up_write(&s->s_umount); + deactivate_super(s); + return error; + } + s->s_flags |= MS_ACTIVE; + } + do_remount_sb(s, flags, data, 0); + return simple_set_mnt(mnt, s); +} + static int devpts_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { - return get_sb_single(fs_type, flags, data, devpts_fill_super, mnt); + return get_init_pts_sb(fs_type, flags, data, mnt); } static void devpts_kill_sb(struct super_block *sb) -- cgit v1.2.3-70-g09d2 From 2a1b2dc0c83bbfc24d72cafd5e69810a149b44e4 Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Fri, 2 Jan 2009 13:42:27 +0000 Subject: Enable multiple instances of devpts To support containers, allow multiple instances of devpts filesystem, such that indices of ptys allocated in one instance are independent of ptys allocated in other instances of devpts. But to preserve backward compatibility, enable this support for multiple instances only if: - CONFIG_DEVPTS_MULTIPLE_INSTANCES is set to Y, and - '-o newinstance' mount option is specified while mounting devpts To use multi-instance mount, a container startup script could: $ ns_exec -cm /bin/bash $ umount /dev/pts $ mount -t devpts -o newinstance lxcpts /dev/pts $ mount -o bind /dev/pts/ptmx /dev/ptmx $ /usr/sbin/sshd -p 1234 where 'ns_exec -cm /bin/bash' is calls clone() with CLONE_NEWNS flag and execs /bin/bash in the child process. A pty created by the sshd is not visible in the original mount of /dev/pts. USER-SPACE-IMPACT: - See Documentation/fs/devpts.txt (included in next patch) for user- space impact in multi-instance and mixed-mode operation. TODO: - Update mount(8), pts(4) man pages. Highlight impact of not redirecting /dev/ptmx to /dev/pts/ptmx after a multi-instance mount. Changelog[v6]: - [Dave Hansen] Use new get_init_pts_sb() interface - [Serge Hallyn] Don't bother displaying 'newinstance' in show_options - [Serge Hallyn] Use macros (PARSE_REMOUNT/PARSE_MOUNT) instead of 0/1. - [Serge Hallyn] Check error return from get_sb_single() (now get_init_pts_sb()) - devpts_pty_kill(): don't dput error dentries Changelog[v5]: - Move get_sb_ref() definition to earlier patch - Move usage info to Documentation/filesystems/devpts.txt (next patch) - Make ptmx node even in init_pts_ns, now that default mode is 0000 (defined in earlier patch, enabled here). - Cache ptmx dentry and use to update mode during remount (defined in earlier patch, enabled here). - Bugfix: explicitly ignore newinstance on remount (if newinstance was specified on remount of initial mount, it would be ignored but /proc/mounts would imply that the option was set) Changelog[v4]: - Update patch description to address H. Peter Anvin's comments - Consolidate multi-instance mode code under new config token, CONFIG_DEVPTS_MULTIPLE_INSTANCE. - Move usage-details from patch description to Documentation/fs/devpts.txt Changelog[v3]: - Rename new mount option to 'newinstance' - Create ptmx nodes only in 'newinstance' mounts - Bugfix: parse_mount_options() modifies @data but since we need to parse the @data twice (once in devpts_get_sb() and once during do_remount_sb()), parse a local copy of @data in devpts_get_sb(). (restructured code in devpts_get_sb() to fix this) Changelog[v2]: - Support both single-mount and multiple-mount semantics and provide '-onewmnt' option to select the semantics. Signed-off-by: Sukadev Bhattiprolu Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 170 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 163 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 2d0eb2cf99e..b4a89fa2167 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -48,10 +48,11 @@ struct pts_mount_opts { gid_t gid; umode_t mode; umode_t ptmxmode; + int newinstance; }; enum { - Opt_uid, Opt_gid, Opt_mode, Opt_ptmxmode, + Opt_uid, Opt_gid, Opt_mode, Opt_ptmxmode, Opt_newinstance, Opt_err }; @@ -61,6 +62,7 @@ static const match_table_t tokens = { {Opt_mode, "mode=%o"}, #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES {Opt_ptmxmode, "ptmxmode=%o"}, + {Opt_newinstance, "newinstance"}, #endif {Opt_err, NULL} }; @@ -78,13 +80,17 @@ static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb) static inline struct super_block *pts_sb_from_inode(struct inode *inode) { +#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) return inode->i_sb; - +#endif return devpts_mnt->mnt_sb; } -static int parse_mount_options(char *data, struct pts_mount_opts *opts) +#define PARSE_MOUNT 0 +#define PARSE_REMOUNT 1 + +static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) { char *p; @@ -95,6 +101,10 @@ static int parse_mount_options(char *data, struct pts_mount_opts *opts) opts->mode = DEVPTS_DEFAULT_MODE; opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; + /* newinstance makes sense only on initial mount */ + if (op == PARSE_MOUNT) + opts->newinstance = 0; + while ((p = strsep(&data, ",")) != NULL) { substring_t args[MAX_OPT_ARGS]; int token; @@ -128,6 +138,11 @@ static int parse_mount_options(char *data, struct pts_mount_opts *opts) return -EINVAL; opts->ptmxmode = option & S_IALLUGO; break; + case Opt_newinstance: + /* newinstance makes sense only on initial mount */ + if (op == PARSE_MOUNT) + opts->newinstance = 1; + break; #endif default: printk(KERN_ERR "devpts: called with bogus options\n"); @@ -214,7 +229,7 @@ static int devpts_remount(struct super_block *sb, int *flags, char *data) struct pts_fs_info *fsi = DEVPTS_SB(sb); struct pts_mount_opts *opts = &fsi->mount_opts; - err = parse_mount_options(data, opts); + err = parse_mount_options(data, PARSE_REMOUNT, opts); /* * parse_mount_options() restores options to default values @@ -309,8 +324,100 @@ static int compare_init_pts_sb(struct super_block *s, void *p) { if (devpts_mnt) return devpts_mnt->mnt_sb == s; + return 0; +} + +#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES +/* + * Safely parse the mount options in @data and update @opts. + * + * devpts ends up parsing options two times during mount, due to the + * two modes of operation it supports. The first parse occurs in + * devpts_get_sb() when determining the mode (single-instance or + * multi-instance mode). The second parse happens in devpts_remount() + * or new_pts_mount() depending on the mode. + * + * Parsing of options modifies the @data making subsequent parsing + * incorrect. So make a local copy of @data and parse it. + * + * Return: 0 On success, -errno on error + */ +static int safe_parse_mount_options(void *data, struct pts_mount_opts *opts) +{ + int rc; + void *datacp; + + if (!data) + return 0; + + /* Use kstrdup() ? */ + datacp = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!datacp) + return -ENOMEM; + + memcpy(datacp, data, PAGE_SIZE); + rc = parse_mount_options((char *)datacp, PARSE_MOUNT, opts); + kfree(datacp); + + return rc; +} + +/* + * Mount a new (private) instance of devpts. PTYs created in this + * instance are independent of the PTYs in other devpts instances. + */ +static int new_pts_mount(struct file_system_type *fs_type, int flags, + void *data, struct vfsmount *mnt) +{ + int err; + struct pts_fs_info *fsi; + struct pts_mount_opts *opts; + + printk(KERN_NOTICE "devpts: newinstance mount\n"); + + err = get_sb_nodev(fs_type, flags, data, devpts_fill_super, mnt); + if (err) + return err; + + fsi = DEVPTS_SB(mnt->mnt_sb); + opts = &fsi->mount_opts; + + err = parse_mount_options(data, PARSE_MOUNT, opts); + if (err) + goto fail; + + err = mknod_ptmx(mnt->mnt_sb); + if (err) + goto fail; return 0; + +fail: + dput(mnt->mnt_sb->s_root); + deactivate_super(mnt->mnt_sb); + return err; +} + +/* + * Check if 'newinstance' mount option was specified in @data. + * + * Return: -errno on error (eg: invalid mount options specified) + * : 1 if 'newinstance' mount option was specified + * : 0 if 'newinstance' mount option was NOT specified + */ +static int is_new_instance_mount(void *data) +{ + int rc; + struct pts_mount_opts opts; + + if (!data) + return 0; + + rc = safe_parse_mount_options(data, &opts); + if (!rc) + rc = opts.newinstance; + + return rc; } /* @@ -358,11 +465,54 @@ static int get_init_pts_sb(struct file_system_type *fs_type, int flags, return simple_set_mnt(mnt, s); } +/* + * Mount or remount the initial kernel mount of devpts. This type of + * mount maintains the legacy, single-instance semantics, while the + * kernel still allows multiple-instances. + */ +static int init_pts_mount(struct file_system_type *fs_type, int flags, + void *data, struct vfsmount *mnt) +{ + int err; + + err = get_init_pts_sb(fs_type, flags, data, mnt); + if (err) + return err; + + err = mknod_ptmx(mnt->mnt_sb); + if (err) { + dput(mnt->mnt_sb->s_root); + deactivate_super(mnt->mnt_sb); + } + + return err; +} + static int devpts_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { - return get_init_pts_sb(fs_type, flags, data, mnt); + int new; + + new = is_new_instance_mount(data); + if (new < 0) + return new; + + if (new) + return new_pts_mount(fs_type, flags, data, mnt); + + return init_pts_mount(fs_type, flags, data, mnt); } +#else +/* + * This supports only the legacy single-instance semantics (no + * multiple-instance semantics) + */ +static int devpts_get_sb(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, struct vfsmount *mnt) +{ + return get_sb_single(fs_type, flags, data, devpts_fill_super, mnt); +} +#endif static void devpts_kill_sb(struct super_block *sb) { @@ -488,12 +638,18 @@ void devpts_pty_kill(struct tty_struct *tty) mutex_lock(&root->d_inode->i_mutex); dentry = d_find_alias(inode); - if (dentry && !IS_ERR(dentry)) { + if (IS_ERR(dentry)) + goto out; + + if (dentry) { inode->i_nlink--; d_delete(dentry); - dput(dentry); + dput(dentry); // d_alloc_name() in devpts_pty_new() } + dput(dentry); // d_find_alias above + +out: mutex_unlock(&root->d_inode->i_mutex); } -- cgit v1.2.3-70-g09d2 From 835aa440f1c3fe16a622015bc1b52dffedf6d90e Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Fri, 2 Jan 2009 13:42:48 +0000 Subject: devpts: Coding style clean up Just nail the oddments now while this code is being touched Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 53 ++++++++++++++++++++++++++--------------------------- 1 file changed, 26 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index b4a89fa2167..b02c24313d5 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -311,7 +311,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent) if (s->s_root) return 0; - printk("devpts: get root dentry failed\n"); + printk(KERN_ERR "devpts: get root dentry failed\n"); iput(inode); free_fsi: @@ -444,25 +444,25 @@ static int is_new_instance_mount(void *data) static int get_init_pts_sb(struct file_system_type *fs_type, int flags, void *data, struct vfsmount *mnt) { - struct super_block *s; - int error; - - s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL); - if (IS_ERR(s)) - return PTR_ERR(s); - - if (!s->s_root) { - s->s_flags = flags; - error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0); - if (error) { - up_write(&s->s_umount); - deactivate_super(s); - return error; - } - s->s_flags |= MS_ACTIVE; - } - do_remount_sb(s, flags, data, 0); - return simple_set_mnt(mnt, s); + struct super_block *s; + int error; + + s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL); + if (IS_ERR(s)) + return PTR_ERR(s); + + if (!s->s_root) { + s->s_flags = flags; + error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0); + if (error) { + up_write(&s->s_umount); + deactivate_super(s); + return error; + } + s->s_flags |= MS_ACTIVE; + } + do_remount_sb(s, flags, data, 0); + return simple_set_mnt(mnt, s); } /* @@ -477,7 +477,7 @@ static int init_pts_mount(struct file_system_type *fs_type, int flags, err = get_init_pts_sb(fs_type, flags, data, mnt); if (err) - return err; + return err; err = mknod_ptmx(mnt->mnt_sb); if (err) { @@ -542,9 +542,8 @@ int devpts_new_index(struct inode *ptmx_inode) int ida_ret; retry: - if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) { + if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) return -ENOMEM; - } mutex_lock(&allocated_ptys_lock); ida_ret = ida_get_new(&fsi->allocated_ptys, &index); @@ -576,7 +575,8 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx) int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) { - int number = tty->index; /* tty layer puts index from devpts_new_index() in here */ + /* tty layer puts index from devpts_new_index() in here */ + int number = tty->index; struct tty_driver *driver = tty->driver; dev_t device = MKDEV(driver->major, driver->minor_start+number); struct dentry *dentry; @@ -644,11 +644,10 @@ void devpts_pty_kill(struct tty_struct *tty) if (dentry) { inode->i_nlink--; d_delete(dentry); - dput(dentry); // d_alloc_name() in devpts_pty_new() + dput(dentry); /* d_alloc_name() in devpts_pty_new() */ } - dput(dentry); // d_find_alias above - + dput(dentry); /* d_find_alias above */ out: mutex_unlock(&root->d_inode->i_mutex); } -- cgit v1.2.3-70-g09d2 From 8c056e5b148498192832678cf2957760945e8c71 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 2 Jan 2009 13:44:12 +0000 Subject: devpts: fix unused function warning fs/devpts/inode.c:324: warning: 'compare_init_pts_sb' defined but not used Signed-off-by: Andrew Morton Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index b02c24313d5..3f309f181de 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -320,6 +320,7 @@ fail: return -ENOMEM; } +#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES static int compare_init_pts_sb(struct super_block *s, void *p) { if (devpts_mnt) @@ -327,7 +328,6 @@ static int compare_init_pts_sb(struct super_block *s, void *p) return 0; } -#ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES /* * Safely parse the mount options in @data and update @opts. * -- cgit v1.2.3-70-g09d2 From d0eafc7db8f170d534a16b5f04617e98ae2025de Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 2 Jan 2009 13:44:49 +0000 Subject: CRED: Wrap task credential accesses in the devpts filesystem Wrap access to task credentials so that they can be separated more easily from the task_struct during the introduction of COW creds. Change most current->(|e|s|fs)[ug]id to current_(|e|s|fs)[ug]id(). Change some task->e?[ug]id to task_e?[ug]id(). In some places it makes more sense to use RCU directly rather than a convenient wrapper; these will be addressed by later patches. Signed-off-by: David Howells Signed-off-by: Alan Cox Signed-off-by: Linus Torvalds --- fs/devpts/inode.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 3f309f181de..fff96e152c0 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -594,9 +594,9 @@ int devpts_pty_new(struct inode *ptmx_inode, struct tty_struct *tty) if (!inode) return -ENOMEM; - inode->i_ino = number+2; - inode->i_uid = config.setuid ? config.uid : current_fsuid(); - inode->i_gid = config.setgid ? config.gid : current_fsgid(); + inode->i_ino = number + 3; + inode->i_uid = opts->setuid ? opts->uid : current_fsuid(); + inode->i_gid = opts->setgid ? opts->gid : current_fsgid(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; init_special_inode(inode, S_IFCHR|opts->mode, device); inode->i_private = tty; -- cgit v1.2.3-70-g09d2 From fe30af971d896c144ef4708f97cf9d3186303c42 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 3 Jan 2009 07:16:13 +0000 Subject: remove the rudiment of a.out for sparc it's been used only in sunos compat Signed-off-by: Al Viro Signed-off-by: Linus Torvalds --- fs/binfmt_aout.c | 65 +------------------------------------------------------- 1 file changed, 1 insertion(+), 64 deletions(-) (limited to 'fs') diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index f1f3f4192a6..8a3b32f5b78 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -99,88 +99,53 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u # define START_DATA(u) (u.start_data) #elif defined(__arm__) # define START_DATA(u) ((u.u_tsize << PAGE_SHIFT) + u.start_code) -#elif defined(__sparc__) -# define START_DATA(u) (u.u_tsize) #elif defined(__i386__) || defined(__mc68000__) || defined(__arch_um__) # define START_DATA(u) (u.u_tsize << PAGE_SHIFT) #endif -#ifdef __sparc__ -# define START_STACK(u) ((regs->u_regs[UREG_FP]) & ~(PAGE_SIZE - 1)) -#else # define START_STACK(u) (u.start_stack) -#endif fs = get_fs(); set_fs(KERNEL_DS); has_dumped = 1; current->flags |= PF_DUMPCORE; strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm)); -#ifndef __sparc__ dump.u_ar0 = offsetof(struct user, regs); -#endif dump.signal = signr; aout_dump_thread(regs, &dump); /* If the size of the dump file exceeds the rlimit, then see what would happen if we wrote the stack, but not the data area. */ -#ifdef __sparc__ - if ((dump.u_dsize + dump.u_ssize) > limit) - dump.u_dsize = 0; -#else if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit) dump.u_dsize = 0; -#endif /* Make sure we have enough room to write the stack and data areas. */ -#ifdef __sparc__ - if (dump.u_ssize > limit) - dump.u_ssize = 0; -#else if ((dump.u_ssize + 1) * PAGE_SIZE > limit) dump.u_ssize = 0; -#endif /* make sure we actually have a data and stack area to dump */ set_fs(USER_DS); -#ifdef __sparc__ - if (!access_ok(VERIFY_READ, (void __user *)START_DATA(dump), dump.u_dsize)) - dump.u_dsize = 0; - if (!access_ok(VERIFY_READ, (void __user *)START_STACK(dump), dump.u_ssize)) - dump.u_ssize = 0; -#else if (!access_ok(VERIFY_READ, (void __user *)START_DATA(dump), dump.u_dsize << PAGE_SHIFT)) dump.u_dsize = 0; if (!access_ok(VERIFY_READ, (void __user *)START_STACK(dump), dump.u_ssize << PAGE_SHIFT)) dump.u_ssize = 0; -#endif set_fs(KERNEL_DS); /* struct user */ DUMP_WRITE(&dump,sizeof(dump)); /* Now dump all of the user data. Include malloced stuff as well */ -#ifndef __sparc__ DUMP_SEEK(PAGE_SIZE); -#endif /* now we start writing out the user space info */ set_fs(USER_DS); /* Dump the data area */ if (dump.u_dsize != 0) { dump_start = START_DATA(dump); -#ifdef __sparc__ - dump_size = dump.u_dsize; -#else dump_size = dump.u_dsize << PAGE_SHIFT; -#endif DUMP_WRITE(dump_start,dump_size); } /* Now prepare to dump the stack area */ if (dump.u_ssize != 0) { dump_start = START_STACK(dump); -#ifdef __sparc__ - dump_size = dump.u_ssize; -#else dump_size = dump.u_ssize << PAGE_SHIFT; -#endif DUMP_WRITE(dump_start,dump_size); } /* Finally dump the task struct. Not be used by gdb, but could be useful */ @@ -205,11 +170,6 @@ static unsigned long __user *create_aout_tables(char __user *p, struct linux_bin int envc = bprm->envc; sp = (void __user *)((-(unsigned long)sizeof(char *)) & (unsigned long) p); -#ifdef __sparc__ - /* This imposes the proper stack alignment for a new process. */ - sp = (void __user *) (((unsigned long) sp) & ~7); - if ((envc+argc+3)&1) --sp; -#endif #ifdef __alpha__ /* whee.. test-programs are so much fun. */ put_user(0, --sp); @@ -302,11 +262,6 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) /* OK, This is the point of no return */ #if defined(__alpha__) SET_AOUT_PERSONALITY(bprm, ex); -#elif defined(__sparc__) - set_personality(PER_SUNOS); -#if !defined(__sparc_v9__) - memcpy(¤t->thread.core_exec, &ex, sizeof(struct exec)); -#endif #else set_personality(PER_LINUX); #endif @@ -322,24 +277,6 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) install_exec_creds(bprm); current->flags &= ~PF_FORKNOEXEC; -#ifdef __sparc__ - if (N_MAGIC(ex) == NMAGIC) { - loff_t pos = fd_offset; - /* Fuck me plenty... */ - /* */ - down_write(¤t->mm->mmap_sem); - error = do_brk(N_TXTADDR(ex), ex.a_text); - up_write(¤t->mm->mmap_sem); - bprm->file->f_op->read(bprm->file, (char *) N_TXTADDR(ex), - ex.a_text, &pos); - down_write(¤t->mm->mmap_sem); - error = do_brk(N_DATADDR(ex), ex.a_data); - up_write(¤t->mm->mmap_sem); - bprm->file->f_op->read(bprm->file, (char *) N_DATADDR(ex), - ex.a_data, &pos); - goto beyond_if; - } -#endif if (N_MAGIC(ex) == OMAGIC) { unsigned long text_addr, map_size; @@ -347,7 +284,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) text_addr = N_TXTADDR(ex); -#if defined(__alpha__) || defined(__sparc__) +#ifdef __alpha__ pos = fd_offset; map_size = ex.a_text+ex.a_data + PAGE_SIZE - 1; #else -- cgit v1.2.3-70-g09d2 From 17580d7f2f632ff8c9786d609508c35c9f56e1f3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 3 Jan 2009 07:16:23 +0000 Subject: sanitize ifdefs in binfmt_aout They are actually alpha vs. i386/arm/m68k i.e. ecoff vs. aout. In the only place where we actually tried to handle arm and i386/m68k in different ways (START_DATA() in coredump handling), the arm variant works for all of them (i386 and m68k have u.start_code set to 0). Signed-off-by: Al Viro Signed-off-by: Linus Torvalds --- fs/binfmt_aout.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index 8a3b32f5b78..b639dcf7c77 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -95,12 +95,10 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u int has_dumped = 0; unsigned long dump_start, dump_size; struct user dump; -#if defined(__alpha__) +#ifdef __alpha__ # define START_DATA(u) (u.start_data) -#elif defined(__arm__) +#else # define START_DATA(u) ((u.u_tsize << PAGE_SHIFT) + u.start_code) -#elif defined(__i386__) || defined(__mc68000__) || defined(__arch_um__) -# define START_DATA(u) (u.u_tsize << PAGE_SHIFT) #endif # define START_STACK(u) (u.start_stack) @@ -176,18 +174,18 @@ static unsigned long __user *create_aout_tables(char __user *p, struct linux_bin put_user(0, --sp); if (bprm->loader) { put_user(0, --sp); - put_user(0x3eb, --sp); + put_user(1003, --sp); put_user(bprm->loader, --sp); - put_user(0x3ea, --sp); + put_user(1002, --sp); } put_user(bprm->exec, --sp); - put_user(0x3e9, --sp); + put_user(1001, --sp); #endif sp -= envc+1; envp = (char __user * __user *) sp; sp -= argc+1; argv = (char __user * __user *) sp; -#if defined(__i386__) || defined(__mc68000__) || defined(__arm__) || defined(__arch_um__) +#ifndef __alpha__ put_user((unsigned long) envp,--sp); put_user((unsigned long) argv,--sp); #endif @@ -260,7 +258,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) return retval; /* OK, This is the point of no return */ -#if defined(__alpha__) +#ifdef __alpha__ SET_AOUT_PERSONALITY(bprm, ex); #else set_personality(PER_LINUX); -- cgit v1.2.3-70-g09d2 From 3bfacef412b4bc993a8992217e50f1245f2fd3a6 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 3 Jan 2009 07:16:33 +0000 Subject: get rid of special-casing the /sbin/loader on alpha ... just make it a binfmt handler like #! one. Signed-off-by: Al Viro Signed-off-by: Linus Torvalds --- arch/alpha/kernel/Makefile | 2 +- arch/alpha/kernel/binfmt_loader.c | 51 +++++++++++++++++++++++++++++++++++++++ fs/exec.c | 39 ------------------------------ 3 files changed, 52 insertions(+), 40 deletions(-) create mode 100644 arch/alpha/kernel/binfmt_loader.c (limited to 'fs') diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index ac706c1d7ad..b4697759a12 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile @@ -8,7 +8,7 @@ EXTRA_CFLAGS := -Werror -Wno-sign-compare obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ irq_alpha.o signal.o setup.o ptrace.o time.o \ - alpha_ksyms.o systbls.o err_common.o io.o + alpha_ksyms.o systbls.o err_common.o io.o binfmt_loader.o obj-$(CONFIG_VGA_HOSE) += console.o obj-$(CONFIG_SMP) += smp.o diff --git a/arch/alpha/kernel/binfmt_loader.c b/arch/alpha/kernel/binfmt_loader.c new file mode 100644 index 00000000000..4a0af906b00 --- /dev/null +++ b/arch/alpha/kernel/binfmt_loader.c @@ -0,0 +1,51 @@ +#include +#include +#include +#include +#include +#include + +static int load_binary(struct linux_binprm *bprm, struct pt_regs *regs) +{ + struct exec *eh = (struct exec *)bprm->buf; + unsigned long loader; + struct file *file; + int retval; + + if (eh->fh.f_magic != 0x183 || (eh->fh.f_flags & 0x3000) != 0x3000) + return -ENOEXEC; + + if (bprm->loader) + return -ENOEXEC; + + allow_write_access(bprm->file); + fput(bprm->file); + bprm->file = NULL; + + loader = bprm->vma->vm_end - sizeof(void *); + + file = open_exec("/sbin/loader"); + retval = PTR_ERR(file); + if (IS_ERR(file)) + return retval; + + /* Remember if the application is TASO. */ + bprm->taso = eh->ah.entry < 0x100000000UL; + + bprm->file = file; + bprm->loader = loader; + retval = prepare_binprm(bprm); + if (retval < 0) + return retval; + return search_binary_handler(bprm,regs); +} + +static struct linux_binfmt loader_format = { + .load_binary = load_binary, +}; + +static int __init init_loader_binfmt(void) +{ + return register_binfmt(&loader_format); +} +arch_initcall(init_loader_binfmt); diff --git a/fs/exec.c b/fs/exec.c index dfbf7009fbe..3ef9cf9b187 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -57,11 +57,6 @@ #include #include "internal.h" -#ifdef __alpha__ -/* for /sbin/loader handling in search_binary_handler() */ -#include -#endif - int core_uses_pid; char core_pattern[CORENAME_MAX_SIZE] = "core"; int suid_dumpable = 0; @@ -1172,41 +1167,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) unsigned int depth = bprm->recursion_depth; int try,retval; struct linux_binfmt *fmt; -#ifdef __alpha__ - /* handle /sbin/loader.. */ - { - struct exec * eh = (struct exec *) bprm->buf; - if (!bprm->loader && eh->fh.f_magic == 0x183 && - (eh->fh.f_flags & 0x3000) == 0x3000) - { - struct file * file; - unsigned long loader; - - allow_write_access(bprm->file); - fput(bprm->file); - bprm->file = NULL; - - loader = bprm->vma->vm_end - sizeof(void *); - - file = open_exec("/sbin/loader"); - retval = PTR_ERR(file); - if (IS_ERR(file)) - return retval; - - /* Remember if the application is TASO. */ - bprm->taso = eh->ah.entry < 0x100000000UL; - - bprm->file = file; - bprm->loader = loader; - retval = prepare_binprm(bprm); - if (retval<0) - return retval; - /* should call search_binary_handler recursively here, - but it does not matter */ - } - } -#endif retval = security_bprm_check(bprm); if (retval) return retval; -- cgit v1.2.3-70-g09d2 From 157cf649a735a2f7e8dba0ed08e6e38b6c30d886 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 14 Dec 2008 04:57:47 -0500 Subject: sanitize audit_fd_pair() * no allocations * return void Signed-off-by: Al Viro --- fs/pipe.c | 7 +------ include/linux/audit.h | 9 ++++----- kernel/auditsc.c | 44 ++++++++++++++------------------------------ net/socket.c | 9 +-------- 4 files changed, 20 insertions(+), 49 deletions(-) (limited to 'fs') diff --git a/fs/pipe.c b/fs/pipe.c index aaf797bd57b..891697112f6 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -1016,10 +1016,7 @@ int do_pipe_flags(int *fd, int flags) goto err_fdr; fdw = error; - error = audit_fd_pair(fdr, fdw); - if (error < 0) - goto err_fdw; - + audit_fd_pair(fdr, fdw); fd_install(fdr, fr); fd_install(fdw, fw); fd[0] = fdr; @@ -1027,8 +1024,6 @@ int do_pipe_flags(int *fd, int flags) return 0; - err_fdw: - put_unused_fd(fdw); err_fdr: put_unused_fd(fdr); err_read_pipe: diff --git a/include/linux/audit.h b/include/linux/audit.h index 54978bdd2bd..bd59cd1e321 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -448,7 +448,7 @@ extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mod extern int audit_bprm(struct linux_binprm *bprm); extern void audit_socketcall(int nargs, unsigned long *args); extern int audit_sockaddr(int len, void *addr); -extern int __audit_fd_pair(int fd1, int fd2); +extern void __audit_fd_pair(int fd1, int fd2); extern int audit_set_macxattr(const char *name); extern void __audit_mq_open(int oflag, mode_t mode, struct mq_attr *attr); extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout); @@ -464,11 +464,10 @@ static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) if (unlikely(!audit_dummy_context())) __audit_ipc_obj(ipcp); } -static inline int audit_fd_pair(int fd1, int fd2) +static inline void audit_fd_pair(int fd1, int fd2) { if (unlikely(!audit_dummy_context())) - return __audit_fd_pair(fd1, fd2); - return 0; + __audit_fd_pair(fd1, fd2); } static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, mode_t mode) { @@ -537,7 +536,7 @@ extern int audit_signals; #define audit_ipc_set_perm(q,u,g,m) ((void)0) #define audit_bprm(p) ({ 0; }) #define audit_socketcall(n,a) ((void)0) -#define audit_fd_pair(n,a) ({ 0; }) +#define audit_fd_pair(n,a) ((void)0) #define audit_sockaddr(len, addr) ({ 0; }) #define audit_set_macxattr(n) do { ; } while (0) #define audit_mq_open(o,m,a) ((void)0) diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 83e946f1cdd..327e65d5067 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -131,11 +131,6 @@ struct audit_aux_data_execve { struct mm_struct *mm; }; -struct audit_aux_data_fd_pair { - struct audit_aux_data d; - int fd[2]; -}; - struct audit_aux_data_pids { struct audit_aux_data d; pid_t target_pid[AUDIT_AUX_PIDS]; @@ -241,6 +236,7 @@ struct audit_context { struct mq_attr attr; } mq_open; }; + int fds[2]; #if AUDIT_DEBUG int put_count; @@ -1382,11 +1378,6 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts audit_log_execve_info(context, &ab, axi); break; } - case AUDIT_FD_PAIR: { - struct audit_aux_data_fd_pair *axs = (void *)aux; - audit_log_format(ab, "fd0=%d fd1=%d", axs->fd[0], axs->fd[1]); - break; } - case AUDIT_BPRM_FCAPS: { struct audit_aux_data_bprm_fcaps *axs = (void *)aux; audit_log_format(ab, "fver=%x", axs->fcap_ver); @@ -1416,6 +1407,15 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts if (context->type) show_special(context, &call_panic); + if (context->fds[0] >= 0) { + ab = audit_log_start(context, GFP_KERNEL, AUDIT_FD_PAIR); + if (ab) { + audit_log_format(ab, "fd0=%d fd1=%d", + context->fds[0], context->fds[1]); + audit_log_end(ab); + } + } + if (context->sockaddr_len) { ab = audit_log_start(context, GFP_KERNEL, AUDIT_SOCKADDR); if (ab) { @@ -1696,6 +1696,7 @@ void audit_syscall_exit(int valid, long return_code) context->target_sid = 0; context->sockaddr_len = 0; context->type = 0; + context->fds[0] = -1; kfree(context->filterkey); context->filterkey = NULL; tsk->audit_context = context; @@ -2291,29 +2292,12 @@ void audit_socketcall(int nargs, unsigned long *args) * @fd1: the first file descriptor * @fd2: the second file descriptor * - * Returns 0 for success or NULL context or < 0 on error. */ -int __audit_fd_pair(int fd1, int fd2) +void __audit_fd_pair(int fd1, int fd2) { struct audit_context *context = current->audit_context; - struct audit_aux_data_fd_pair *ax; - - if (likely(!context)) { - return 0; - } - - ax = kmalloc(sizeof(*ax), GFP_KERNEL); - if (!ax) { - return -ENOMEM; - } - - ax->fd[0] = fd1; - ax->fd[1] = fd2; - - ax->d.type = AUDIT_FD_PAIR; - ax->d.next = context->aux; - context->aux = (void *)ax; - return 0; + context->fds[0] = fd1; + context->fds[1] = fd2; } /** diff --git a/net/socket.c b/net/socket.c index b41a92093e4..06603d73c41 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1313,13 +1313,7 @@ asmlinkage long sys_socketpair(int family, int type, int protocol, goto out_fd1; } - err = audit_fd_pair(fd1, fd2); - if (err < 0) { - fput(newfile1); - fput(newfile2); - goto out_fd; - } - + audit_fd_pair(fd1, fd2); fd_install(fd1, newfile1); fd_install(fd2, newfile2); /* fd1 and fd2 may be already another descriptors. @@ -1349,7 +1343,6 @@ out_fd2: out_fd1: put_filp(newfile2); sock_release(sock2); -out_fd: put_unused_fd(fd1); put_unused_fd(fd2); goto out; -- cgit v1.2.3-70-g09d2 From c644f0e4b56f9a2fc066cd0d75a18074d130e4a3 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sun, 4 Jan 2009 12:00:48 -0800 Subject: fs: introduce bgl_lock_ptr() As suggested by Andreas Dilger, introduce a bgl_lock_ptr() helper in and add separate sb_bgl_lock() helpers to filesystem specific header files to break the hidden dependency to struct ext[234]_sb_info. Also, while at it, convert the macros to static inlines to try make up for all the times I broke Andrew Morton's tree. Acked-by: Andreas Dilger Signed-off-by: Pekka Enberg Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ext4/ext4_sb.h | 6 ++++++ include/linux/blockgroup_lock.h | 7 +++++-- include/linux/ext2_fs_sb.h | 6 ++++++ include/linux/ext3_fs_sb.h | 6 ++++++ 4 files changed, 23 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4_sb.h b/fs/ext4/ext4_sb.h index 445fde603df..b21f16713db 100644 --- a/fs/ext4/ext4_sb.h +++ b/fs/ext4/ext4_sb.h @@ -146,4 +146,10 @@ struct ext4_sb_info { struct flex_groups *s_flex_groups; }; +static inline spinlock_t * +sb_bgl_lock(struct ext4_sb_info *sbi, unsigned int block_group) +{ + return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group); +} + #endif /* _EXT4_SB */ diff --git a/include/linux/blockgroup_lock.h b/include/linux/blockgroup_lock.h index 8607312983b..e44b88ba552 100644 --- a/include/linux/blockgroup_lock.h +++ b/include/linux/blockgroup_lock.h @@ -53,7 +53,10 @@ static inline void bgl_lock_init(struct blockgroup_lock *bgl) * The accessor is a macro so we can embed a blockgroup_lock into different * superblock types */ -#define sb_bgl_lock(sb, block_group) \ - (&(sb)->s_blockgroup_lock.locks[(block_group) & (NR_BG_LOCKS-1)].lock) +static inline spinlock_t * +bgl_lock_ptr(struct blockgroup_lock *bgl, unsigned int block_group) +{ + return &bgl->locks[(block_group) & (NR_BG_LOCKS-1)].lock; +} #endif diff --git a/include/linux/ext2_fs_sb.h b/include/linux/ext2_fs_sb.h index f273415ab6f..dc541f3653d 100644 --- a/include/linux/ext2_fs_sb.h +++ b/include/linux/ext2_fs_sb.h @@ -108,4 +108,10 @@ struct ext2_sb_info { struct ext2_reserve_window_node s_rsv_window_head; }; +static inline spinlock_t * +sb_bgl_lock(struct ext2_sb_info *sbi, unsigned int block_group) +{ + return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group); +} + #endif /* _LINUX_EXT2_FS_SB */ diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h index b65f0288b84..e024e38248f 100644 --- a/include/linux/ext3_fs_sb.h +++ b/include/linux/ext3_fs_sb.h @@ -83,4 +83,10 @@ struct ext3_sb_info { #endif }; +static inline spinlock_t * +sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group) +{ + return bgl_lock_ptr(&sbi->s_blockgroup_lock, block_group); +} + #endif /* _LINUX_EXT3_FS_SB */ -- cgit v1.2.3-70-g09d2 From 54566b2c1594c2326a645a3551f9d989f7ba3c5e Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sun, 4 Jan 2009 12:00:53 -0800 Subject: fs: symlink write_begin allocation context fix With the write_begin/write_end aops, page_symlink was broken because it could no longer pass a GFP_NOFS type mask into the point where the allocations happened. They are done in write_begin, which would always assume that the filesystem can be entered from reclaim. This bug could cause filesystem deadlocks. The funny thing with having a gfp_t mask there is that it doesn't really allow the caller to arbitrarily tinker with the context in which it can be called. It couldn't ever be GFP_ATOMIC, for example, because it needs to take the page lock. The only thing any callers care about is __GFP_FS anyway, so turn that into a single flag. Add a new flag for write_begin, AOP_FLAG_NOFS. Filesystems can now act on this flag in their write_begin function. Change __grab_cache_page to accept a nofs argument as well, to honour that flag (while we're there, change the name to grab_cache_page_write_begin which is more instructive and does away with random leading underscores). This is really a more flexible way to go in the end anyway -- if a filesystem happens to want any extra allocations aside from the pagecache ones in ints write_begin function, it may now use GFP_KERNEL (rather than GFP_NOFS) for common case allocations (eg. ocfs2_alloc_write_ctxt, for a random example). [kosaki.motohiro@jp.fujitsu.com: fix ubifs] [kosaki.motohiro@jp.fujitsu.com: fix fuse] Signed-off-by: Nick Piggin Reviewed-by: KOSAKI Motohiro Cc: [2.6.28.x] Signed-off-by: KOSAKI Motohiro Signed-off-by: Andrew Morton [ Cleaned up the calling convention: just pass in the AOP flags untouched to the grab_cache_page_write_begin() function. That just simplifies everybody, and may even allow future expansion of the logic. - Linus ] Signed-off-by: Linus Torvalds --- fs/affs/file.c | 2 +- fs/afs/write.c | 2 +- fs/buffer.c | 4 ++-- fs/cifs/file.c | 2 +- fs/ecryptfs/mmap.c | 2 +- fs/ext3/inode.c | 2 +- fs/ext3/namei.c | 3 +-- fs/ext4/inode.c | 4 ++-- fs/ext4/namei.c | 3 +-- fs/fuse/file.c | 4 ++-- fs/gfs2/ops_address.c | 2 +- fs/hostfs/hostfs_kern.c | 2 +- fs/jffs2/file.c | 2 +- fs/libfs.c | 2 +- fs/namei.c | 13 +++++++++---- fs/nfs/file.c | 2 +- fs/reiserfs/inode.c | 2 +- fs/smbfs/file.c | 2 +- fs/ubifs/file.c | 9 +++++---- include/linux/fs.h | 5 ++++- include/linux/pagemap.h | 3 ++- mm/filemap.c | 13 +++++++++---- 22 files changed, 49 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/affs/file.c b/fs/affs/file.c index 1377b1240b6..9246cb4aa01 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -628,7 +628,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping } index = pos >> PAGE_CACHE_SHIFT; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/afs/write.c b/fs/afs/write.c index d6b85dab35f..3fb36d43362 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -144,7 +144,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, candidate->state = AFS_WBACK_PENDING; init_waitqueue_head(&candidate->waitq); - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { kfree(candidate); return -ENOMEM; diff --git a/fs/buffer.c b/fs/buffer.c index 776ae091d3b..a13f09b696f 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1996,7 +1996,7 @@ int block_write_begin(struct file *file, struct address_space *mapping, page = *pagep; if (page == NULL) { ownpage = 1; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { status = -ENOMEM; goto out; @@ -2502,7 +2502,7 @@ int nobh_write_begin(struct file *file, struct address_space *mapping, from = pos & (PAGE_CACHE_SIZE - 1); to = from + len; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index b1e1fc6a6e6..12bb656fbe7 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2074,7 +2074,7 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping, cFYI(1, ("write_begin from %lld len %d", (long long)pos, len)); - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { rc = -ENOMEM; goto out; diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 04d7b3fa1ac..46cec2b6979 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -288,7 +288,7 @@ static int ecryptfs_write_begin(struct file *file, loff_t prev_page_end_size; int rc = 0; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index c4bdccf976b..5fa453b49a6 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -1161,7 +1161,7 @@ static int ext3_write_begin(struct file *file, struct address_space *mapping, to = from + len; retry: - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 297ea8dfac7..1dd2abe6313 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -2175,8 +2175,7 @@ retry: * We have a transaction open. All is sweetness. It also sets * i_size in generic_commit_write(). */ - err = __page_symlink(inode, symname, l, - mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); + err = __page_symlink(inode, symname, l, 1); if (err) { drop_nlink(inode); unlock_new_inode(inode); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7c3325e0b00..6702a49992a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1346,7 +1346,7 @@ retry: goto out; } - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { ext4_journal_stop(handle); ret = -ENOMEM; @@ -2550,7 +2550,7 @@ retry: goto out; } - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) { ext4_journal_stop(handle); ret = -ENOMEM; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index da98a9012fa..9fd2a5e1be4 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2212,8 +2212,7 @@ retry: * We have a transaction open. All is sweetness. It also sets * i_size in generic_commit_write(). */ - err = __page_symlink(inode, symname, l, - mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS); + err = __page_symlink(inode, symname, l, 1); if (err) { clear_nlink(inode); unlock_new_inode(inode); diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 34930a964b8..4c9ee701126 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -646,7 +646,7 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping, { pgoff_t index = pos >> PAGE_CACHE_SHIFT; - *pagep = __grab_cache_page(mapping, index); + *pagep = grab_cache_page_write_begin(mapping, index, flags); if (!*pagep) return -ENOMEM; return 0; @@ -779,7 +779,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, break; err = -ENOMEM; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, 0); if (!page) break; diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c index 27563816e1c..15f710f2d4d 100644 --- a/fs/gfs2/ops_address.c +++ b/fs/gfs2/ops_address.c @@ -675,7 +675,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, goto out_trans_fail; error = -ENOMEM; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); *pagep = page; if (unlikely(!page)) goto out_endtrans; diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 3a31451ac17..5c538e0ec14 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -501,7 +501,7 @@ int hostfs_write_begin(struct file *file, struct address_space *mapping, { pgoff_t index = pos >> PAGE_CACHE_SHIFT; - *pagep = __grab_cache_page(mapping, index); + *pagep = grab_cache_page_write_begin(mapping, index, flags); if (!*pagep) return -ENOMEM; return 0; diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 5a98aa87c85..5edc2bf2058 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -132,7 +132,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, uint32_t pageofs = index << PAGE_CACHE_SHIFT; int ret = 0; - pg = __grab_cache_page(mapping, index); + pg = grab_cache_page_write_begin(mapping, index, flags); if (!pg) return -ENOMEM; *pagep = pg; diff --git a/fs/libfs.c b/fs/libfs.c index e960a832190..bdaec17fa38 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -360,7 +360,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping, index = pos >> PAGE_CACHE_SHIFT; from = pos & (PAGE_CACHE_SIZE - 1); - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; diff --git a/fs/namei.c b/fs/namei.c index dd5c9f0bf82..df2d3df4f04 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2817,18 +2817,23 @@ void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) } } -int __page_symlink(struct inode *inode, const char *symname, int len, - gfp_t gfp_mask) +/* + * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS + */ +int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; int err; char *kaddr; + unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE; + if (nofs) + flags |= AOP_FLAG_NOFS; retry: err = pagecache_write_begin(NULL, mapping, 0, len-1, - AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); + flags, &page, &fsdata); if (err) goto fail; @@ -2852,7 +2857,7 @@ fail: int page_symlink(struct inode *inode, const char *symname, int len) { return __page_symlink(inode, symname, len, - mapping_gfp_mask(inode->i_mapping)); + !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS)); } const struct inode_operations page_symlink_inode_operations = { diff --git a/fs/nfs/file.c b/fs/nfs/file.c index d319b49f8f0..90f292b520d 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -354,7 +354,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping, file->f_path.dentry->d_name.name, mapping->host->i_ino, len, (long long) pos); - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 145c2d3e5e0..ed04f47007f 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -2561,7 +2561,7 @@ static int reiserfs_write_begin(struct file *file, } index = pos >> PAGE_CACHE_SHIFT; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; diff --git a/fs/smbfs/file.c b/fs/smbfs/file.c index e4f8d51a555..92d5e8ffb63 100644 --- a/fs/smbfs/file.c +++ b/fs/smbfs/file.c @@ -297,7 +297,7 @@ static int smb_write_begin(struct file *file, struct address_space *mapping, struct page **pagep, void **fsdata) { pgoff_t index = pos >> PAGE_CACHE_SHIFT; - *pagep = __grab_cache_page(mapping, index); + *pagep = grab_cache_page_write_begin(mapping, index, flags); if (!*pagep) return -ENOMEM; return 0; diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index fe82d2464d4..bf37374567f 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -219,7 +219,8 @@ static void release_existing_page_budget(struct ubifs_info *c) } static int write_begin_slow(struct address_space *mapping, - loff_t pos, unsigned len, struct page **pagep) + loff_t pos, unsigned len, struct page **pagep, + unsigned flags) { struct inode *inode = mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; @@ -247,7 +248,7 @@ static int write_begin_slow(struct address_space *mapping, if (unlikely(err)) return err; - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (unlikely(!page)) { ubifs_release_budget(c, &req); return -ENOMEM; @@ -438,7 +439,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, return -EROFS; /* Try out the fast-path part first */ - page = __grab_cache_page(mapping, index); + page = grab_cache_page_write_begin(mapping, index, flags); if (unlikely(!page)) return -ENOMEM; @@ -483,7 +484,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, unlock_page(page); page_cache_release(page); - return write_begin_slow(mapping, pos, len, pagep); + return write_begin_slow(mapping, pos, len, pagep, flags); } /* diff --git a/include/linux/fs.h b/include/linux/fs.h index e2170ee21e1..f2a3010140e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -423,6 +423,9 @@ enum positive_aop_returns { #define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */ #define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */ +#define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct + * helper code (eg buffer layer) + * to clear GFP_FS from alloc */ /* * oh the beauties of C type declarations. @@ -2035,7 +2038,7 @@ extern int page_readlink(struct dentry *, char __user *, int); extern void *page_follow_link_light(struct dentry *, struct nameidata *); extern void page_put_link(struct dentry *, struct nameidata *, void *); extern int __page_symlink(struct inode *inode, const char *symname, int len, - gfp_t gfp_mask); + int nofs); extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern int generic_readlink(struct dentry *, char __user *, int); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 709742be02f..01ca0856caf 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -241,7 +241,8 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages); -struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index); +struct page *grab_cache_page_write_begin(struct address_space *mapping, + pgoff_t index, unsigned flags); /* * Returns locked page at given index in given cache, creating it if needed. diff --git a/mm/filemap.c b/mm/filemap.c index f3e5f8944d1..f8c69273c37 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2140,19 +2140,24 @@ EXPORT_SYMBOL(generic_file_direct_write); * Find or create a page at the given pagecache position. Return the locked * page. This function is specifically for buffered writes. */ -struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index) +struct page *grab_cache_page_write_begin(struct address_space *mapping, + pgoff_t index, unsigned flags) { int status; struct page *page; + gfp_t gfp_notmask = 0; + if (flags & AOP_FLAG_NOFS) + gfp_notmask = __GFP_FS; repeat: page = find_lock_page(mapping, index); if (likely(page)) return page; - page = page_cache_alloc(mapping); + page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask); if (!page) return NULL; - status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); + status = add_to_page_cache_lru(page, mapping, index, + GFP_KERNEL & ~gfp_notmask); if (unlikely(status)) { page_cache_release(page); if (status == -EEXIST) @@ -2161,7 +2166,7 @@ repeat: } return page; } -EXPORT_SYMBOL(__grab_cache_page); +EXPORT_SYMBOL(grab_cache_page_write_begin); static ssize_t generic_perform_write(struct file *file, struct iov_iter *i, loff_t pos) -- cgit v1.2.3-70-g09d2