summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c4
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/8021q/vlan_dev.c9
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/9p/client.c53
-rw-r--r--net/9p/trans_fd.c10
-rw-r--r--net/9p/trans_rdma.c9
-rw-r--r--net/9p/trans_virtio.c4
-rw-r--r--net/appletalk/atalk_proc.c30
-rw-r--r--net/atm/addr.c2
-rw-r--r--net/atm/atm_misc.c40
-rw-r--r--net/atm/atm_sysfs.c27
-rw-r--r--net/atm/br2684.c90
-rw-r--r--net/atm/clip.c86
-rw-r--r--net/atm/common.c386
-rw-r--r--net/atm/ioctl.c196
-rw-r--r--net/atm/lec.c599
-rw-r--r--net/atm/mpc.c540
-rw-r--r--net/atm/mpoa_caches.c190
-rw-r--r--net/atm/mpoa_proc.c89
-rw-r--r--net/atm/pppoatm.c28
-rw-r--r--net/atm/proc.c83
-rw-r--r--net/atm/pvc.c43
-rw-r--r--net/atm/raw.c26
-rw-r--r--net/atm/resources.c418
-rw-r--r--net/atm/signaling.c219
-rw-r--r--net/atm/svc.c258
-rw-r--r--net/ax25/af_ax25.c18
-rw-r--r--net/ax25/ax25_uid.c25
-rw-r--r--net/bluetooth/bnep/netdev.c6
-rw-r--r--net/bluetooth/cmtp/capi.c37
-rw-r--r--net/bluetooth/hci_conn.c3
-rw-r--r--net/bluetooth/hci_core.c12
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/hci_sock.c4
-rw-r--r--net/bluetooth/hci_sysfs.c122
-rw-r--r--net/bluetooth/hidp/core.c132
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bluetooth/l2cap.c14
-rw-r--r--net/bluetooth/rfcomm/core.c8
-rw-r--r--net/bridge/Kconfig13
-rw-r--r--net/bridge/Makefile2
-rw-r--r--net/bridge/br_device.c23
-rw-r--r--net/bridge/br_forward.c150
-rw-r--r--net/bridge/br_if.c8
-rw-r--r--net/bridge/br_input.c39
-rw-r--r--net/bridge/br_multicast.c1304
-rw-r--r--net/bridge/br_private.h173
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/br_stp_if.c1
-rw-r--r--net/bridge/br_sysfs_br.c281
-rw-r--r--net/bridge/br_sysfs_if.c18
-rw-r--r--net/bridge/netfilter/ebt_802_3.c2
-rw-r--r--net/bridge/netfilter/ebt_arp.c2
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c2
-rw-r--r--net/bridge/netfilter/ebt_dnat.c2
-rw-r--r--net/bridge/netfilter/ebt_ip.c2
-rw-r--r--net/bridge/netfilter/ebt_ip6.c2
-rw-r--r--net/bridge/netfilter/ebt_limit.c18
-rw-r--r--net/bridge/netfilter/ebt_log.c2
-rw-r--r--net/bridge/netfilter/ebt_mark.c33
-rw-r--r--net/bridge/netfilter/ebt_mark_m.c39
-rw-r--r--net/bridge/netfilter/ebt_nflog.c2
-rw-r--r--net/bridge/netfilter/ebt_pkttype.c2
-rw-r--r--net/bridge/netfilter/ebt_redirect.c2
-rw-r--r--net/bridge/netfilter/ebt_snat.c2
-rw-r--r--net/bridge/netfilter/ebt_stp.c2
-rw-r--r--net/bridge/netfilter/ebt_ulog.c2
-rw-r--r--net/bridge/netfilter/ebt_vlan.c2
-rw-r--r--net/bridge/netfilter/ebtable_broute.c2
-rw-r--r--net/bridge/netfilter/ebtable_filter.c2
-rw-r--r--net/bridge/netfilter/ebtable_nat.c2
-rw-r--r--net/bridge/netfilter/ebtables.c1241
-rw-r--r--net/can/af_can.c124
-rw-r--r--net/can/af_can.h4
-rw-r--r--net/can/proc.c93
-rw-r--r--net/core/dev.c288
-rw-r--r--net/core/dev_mcast.c2
-rw-r--r--net/core/drop_monitor.c1
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/ethtool.c387
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/neighbour.c18
-rw-r--r--net/core/net-sysfs.c3
-rw-r--r--net/core/netpoll.c169
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/rtnetlink.c135
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/sock.c19
-rw-r--r--net/dcb/dcbnl.c16
-rw-r--r--net/dccp/ccid.c11
-rw-r--r--net/dccp/ccid.h8
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/probe.c4
-rw-r--r--net/dccp/proto.c7
-rw-r--r--net/decnet/dn_route.c14
-rw-r--r--net/ethernet/eth.c6
-rw-r--r--net/ipv4/af_inet.c46
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/arp.c58
-rw-r--r--net/ipv4/devinet.c37
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/fib_semantics.c80
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c89
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/ip_fragment.c39
-rw-r--r--net/ipv4/ip_gre.c25
-rw-r--r--net/ipv4/ip_sockglue.c14
-rw-r--r--net/ipv4/ipcomp.c17
-rw-r--r--net/ipv4/ipip.c20
-rw-r--r--net/ipv4/ipmr.c3
-rw-r--r--net/ipv4/netfilter/arp_tables.c390
-rw-r--r--net/ipv4/netfilter/arptable_filter.c95
-rw-r--r--net/ipv4/netfilter/ip_tables.c561
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c14
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c4
-rw-r--r--net/ipv4/netfilter/iptable_filter.c124
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c166
-rw-r--r--net/ipv4/netfilter/iptable_raw.c96
-rw-r--r--net/ipv4/netfilter/iptable_security.c117
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c5
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c4
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c11
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c19
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c40
-rw-r--r--net/ipv4/netfilter/nf_nat_ftp.c105
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c39
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c41
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c154
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c31
-rw-r--r--net/ipv4/proc.c32
-rw-r--r--net/ipv4/route.c23
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c14
-rw-r--r--net/ipv4/tcp.c65
-rw-r--r--net/ipv4/tcp_input.c18
-rw-r--r--net/ipv4/tcp_ipv4.c25
-rw-r--r--net/ipv4/tcp_output.c22
-rw-r--r--net/ipv4/tcp_timer.c27
-rw-r--r--net/ipv4/udp.c19
-rw-r--r--net/ipv4/udplite.c4
-rw-r--r--net/ipv6/addrconf.c110
-rw-r--r--net/ipv6/addrconf_core.c2
-rw-r--r--net/ipv6/af_inet6.c32
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/anycast.c2
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/exthdrs.c2
-rw-r--r--net/ipv6/fib6_rules.c4
-rw-r--r--net/ipv6/icmp.c12
-rw-r--r--net/ipv6/ip6_fib.c52
-rw-r--r--net/ipv6/ip6_flowlabel.c9
-rw-r--r--net/ipv6/ip6_input.c3
-rw-r--r--net/ipv6/ip6_output.c21
-rw-r--r--net/ipv6/ip6_tunnel.c43
-rw-r--r--net/ipv6/ipcomp6.c21
-rw-r--r--net/ipv6/mcast.c32
-rw-r--r--net/ipv6/mip6.c2
-rw-r--r--net/ipv6/ndisc.c7
-rw-r--r--net/ipv6/netfilter/ip6_tables.c563
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c2
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c113
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c141
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c86
-rw-r--r--net/ipv6/netfilter/ip6table_security.c109
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c11
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c16
-rw-r--r--net/ipv6/proc.c39
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/reassembly.c27
-rw-r--r--net/ipv6/route.c12
-rw-r--r--net/ipv6/sit.c25
-rw-r--r--net/ipv6/syncookies.c3
-rw-r--r--net/ipv6/sysctl_net_ipv6.c4
-rw-r--r--net/ipv6/tcp_ipv6.c22
-rw-r--r--net/ipv6/tunnel6.c4
-rw-r--r--net/ipv6/udp.c22
-rw-r--r--net/ipv6/udplite.c4
-rw-r--r--net/ipv6/xfrm6_input.c2
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c194
-rw-r--r--net/ipx/ipx_proc.c90
-rw-r--r--net/irda/ircomm/ircomm_tty.c6
-rw-r--r--net/irda/irlan/irlan_common.c28
-rw-r--r--net/irda/irlan/irlan_eth.c5
-rw-r--r--net/irda/irnet/irnet_ppp.c5
-rw-r--r--net/irda/irnetlink.c2
-rw-r--r--net/key/af_key.c175
-rw-r--r--net/llc/af_llc.c64
-rw-r--r--net/llc/llc_conn.c143
-rw-r--r--net/llc/llc_core.c53
-rw-r--r--net/llc/llc_output.c45
-rw-r--r--net/llc/llc_proc.c69
-rw-r--r--net/llc/llc_sap.c111
-rw-r--r--net/mac80211/Kconfig12
-rw-r--r--net/mac80211/Makefile4
-rw-r--r--net/mac80211/agg-rx.c15
-rw-r--r--net/mac80211/agg-tx.c38
-rw-r--r--net/mac80211/cfg.c198
-rw-r--r--net/mac80211/debugfs.c127
-rw-r--r--net/mac80211/debugfs_key.c2
-rw-r--r--net/mac80211/debugfs_netdev.c212
-rw-r--r--net/mac80211/debugfs_netdev.h9
-rw-r--r--net/mac80211/debugfs_sta.c88
-rw-r--r--net/mac80211/driver-ops.h169
-rw-r--r--net/mac80211/driver-trace.h176
-rw-r--r--net/mac80211/ht.c53
-rw-r--r--net/mac80211/ibss.c127
-rw-r--r--net/mac80211/ieee80211_i.h215
-rw-r--r--net/mac80211/iface.c108
-rw-r--r--net/mac80211/key.c10
-rw-r--r--net/mac80211/key.h8
-rw-r--r--net/mac80211/main.c74
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/mesh_hwmp.c20
-rw-r--r--net/mac80211/mesh_pathtbl.c6
-rw-r--r--net/mac80211/mesh_plink.c23
-rw-r--r--net/mac80211/mlme.c1317
-rw-r--r--net/mac80211/offchannel.c170
-rw-r--r--net/mac80211/pm.c18
-rw-r--r--net/mac80211/rate.c93
-rw-r--r--net/mac80211/rate.h14
-rw-r--r--net/mac80211/rc80211_pid_algo.c8
-rw-r--r--net/mac80211/rx.c473
-rw-r--r--net/mac80211/scan.c257
-rw-r--r--net/mac80211/spectmgmt.c4
-rw-r--r--net/mac80211/sta_info.c777
-rw-r--r--net/mac80211/sta_info.h68
-rw-r--r--net/mac80211/status.c107
-rw-r--r--net/mac80211/tkip.c47
-rw-r--r--net/mac80211/tx.c376
-rw-r--r--net/mac80211/util.c319
-rw-r--r--net/mac80211/wep.c17
-rw-r--r--net/mac80211/wme.c2
-rw-r--r--net/mac80211/work.c1100
-rw-r--r--net/mac80211/wpa.c57
-rw-r--r--net/netfilter/Kconfig25
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipvs/Kconfig11
-rw-r--r--net/netfilter/ipvs/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c42
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c67
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c44
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c1183
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c10
-rw-r--r--net/netfilter/nf_conntrack_core.c260
-rw-r--r--net/netfilter/nf_conntrack_expect.c35
-rw-r--r--net/netfilter/nf_conntrack_extend.c1
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c3
-rw-r--r--net/netfilter/nf_conntrack_helper.c46
-rw-r--r--net/netfilter/nf_conntrack_netlink.c233
-rw-r--r--net/netfilter/nf_conntrack_pptp.c14
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c5
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c4
-rw-r--r--net/netfilter/nf_conntrack_sip.c332
-rw-r--r--net/netfilter/nf_conntrack_standalone.c13
-rw-r--r--net/netfilter/nf_queue.c2
-rw-r--r--net/netfilter/nfnetlink.c65
-rw-r--r--net/netfilter/nfnetlink_log.c5
-rw-r--r--net/netfilter/nfnetlink_queue.c7
-rw-r--r--net/netfilter/x_tables.c80
-rw-r--r--net/netfilter/xt_CT.c164
-rw-r--r--net/netfilter/xt_NFQUEUE.c6
-rw-r--r--net/netfilter/xt_RATEEST.c7
-rw-r--r--net/netfilter/xt_TCPMSS.c30
-rw-r--r--net/netfilter/xt_connlimit.c27
-rw-r--r--net/netfilter/xt_hashlimit.c215
-rw-r--r--net/netfilter/xt_limit.c4
-rw-r--r--net/netfilter/xt_osf.c4
-rw-r--r--net/netfilter/xt_recent.c168
-rw-r--r--net/netfilter/xt_repldata.h35
-rw-r--r--net/netlabel/netlabel_domainhash.c1
-rw-r--r--net/netlabel/netlabel_unlabeled.c3
-rw-r--r--net/netlink/af_netlink.c12
-rw-r--r--net/netlink/genetlink.c4
-rw-r--r--net/netrom/af_netrom.c21
-rw-r--r--net/netrom/nr_route.c53
-rw-r--r--net/packet/Kconfig10
-rw-r--r--net/packet/af_packet.c302
-rw-r--r--net/phonet/datagram.c6
-rw-r--r--net/phonet/pep-gprs.c4
-rw-r--r--net/phonet/pep.c29
-rw-r--r--net/phonet/pn_dev.c4
-rw-r--r--net/rds/tcp_connect.c7
-rw-r--r--net/rds/tcp_listen.c6
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/rose/af_rose.c22
-rw-r--r--net/sched/Kconfig16
-rw-r--r--net/sched/sch_api.c1
-rw-r--r--net/sched/sch_fifo.c34
-rw-r--r--net/sctp/bind_addr.c1
-rw-r--r--net/sctp/ipv6.c1
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/protocol.c6
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/sunrpc/rpc_pipe.c11
-rw-r--r--net/sysctl_net.c4
-rw-r--r--net/tipc/Kconfig75
-rw-r--r--net/tipc/core.c10
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/unix/sysctl_net_unix.c2
-rw-r--r--net/wimax/op-msg.c3
-rw-r--r--net/wimax/op-reset.c3
-rw-r--r--net/wimax/op-rfkill.c3
-rw-r--r--net/wimax/op-state-get.c3
-rw-r--r--net/wimax/stack.c3
-rw-r--r--net/wireless/.gitignore1
-rw-r--r--net/wireless/Kconfig13
-rw-r--r--net/wireless/Makefile6
-rw-r--r--net/wireless/chan.c41
-rw-r--r--net/wireless/core.c59
-rw-r--r--net/wireless/core.h20
-rw-r--r--net/wireless/db.txt17
-rw-r--r--net/wireless/genregdb.awk118
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c2
-rw-r--r--net/wireless/lib80211_crypt_tkip.c23
-rw-r--r--net/wireless/mlme.c214
-rw-r--r--net/wireless/nl80211.c866
-rw-r--r--net/wireless/nl80211.h23
-rw-r--r--net/wireless/radiotap.c305
-rw-r--r--net/wireless/reg.c687
-rw-r--r--net/wireless/reg.h29
-rw-r--r--net/wireless/regdb.h7
-rw-r--r--net/wireless/scan.c158
-rw-r--r--net/wireless/sme.c41
-rw-r--r--net/wireless/sysfs.c20
-rw-r--r--net/wireless/util.c137
-rw-r--r--net/wireless/wext-compat.c49
-rw-r--r--net/wireless/wext-proc.c4
-rw-r--r--net/x25/af_x25.c24
-rw-r--r--net/x25/x25_proc.c114
-rw-r--r--net/xfrm/xfrm_algo.c16
-rw-r--r--net/xfrm/xfrm_input.c2
-rw-r--r--net/xfrm/xfrm_ipcomp.c16
-rw-r--r--net/xfrm/xfrm_policy.c43
-rw-r--r--net/xfrm/xfrm_proc.c6
-rw-r--r--net/xfrm/xfrm_state.c100
-rw-r--r--net/xfrm/xfrm_sysctl.c4
-rw-r--r--net/xfrm/xfrm_user.c111
354 files changed, 18698 insertions, 9203 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 33f90e7362c..453512266ea 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -663,7 +663,7 @@ out:
return err;
}
-static int vlan_init_net(struct net *net)
+static int __net_init vlan_init_net(struct net *net)
{
struct vlan_net *vn = net_generic(net, vlan_net_id);
int err;
@@ -675,7 +675,7 @@ static int vlan_init_net(struct net *net)
return err;
}
-static void vlan_exit_net(struct net *net)
+static void __net_exit vlan_exit_net(struct net *net)
{
vlan_proc_cleanup(net);
}
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 5685296017e..6abdcac1b2e 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -61,7 +61,7 @@ struct vlan_dev_info {
struct proc_dir_entry *dent;
unsigned long cnt_inc_headroom_on_tx;
unsigned long cnt_encap_on_xmit;
- struct vlan_rx_stats *vlan_rx_stats;
+ struct vlan_rx_stats __percpu *vlan_rx_stats;
};
static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index e75a2f3b10a..c0316e0ca6e 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -14,6 +14,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
if (skb_bond_should_drop(skb))
goto drop;
+ skb->skb_iif = skb->dev->ifindex;
__vlan_hwaccel_put_tag(skb, vlan_tci);
skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
@@ -85,6 +86,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
if (skb_bond_should_drop(skb))
goto drop;
+ skb->skb_iif = skb->dev->ifindex;
__vlan_hwaccel_put_tag(skb, vlan_tci);
skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index c1b92cab46c..9e83272fc5b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -263,11 +263,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
vhdr->h_vlan_TCI = htons(vlan_tci);
/*
- * Set the protocol type. For a packet of type ETH_P_802_3 we
- * put the length in here instead. It is up to the 802.2
- * layer to carry protocol information.
+ * Set the protocol type. For a packet of type ETH_P_802_3/2 we
+ * put the length in here instead.
*/
- if (type != ETH_P_802_3)
+ if (type != ETH_P_802_3 && type != ETH_P_802_2)
vhdr->h_vlan_encapsulated_proto = htons(type);
else
vhdr->h_vlan_encapsulated_proto = htons(len);
@@ -323,7 +322,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
}
- skb->dev = vlan_dev_info(dev)->real_dev;
+ skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
len = skb->len;
ret = dev_queue_xmit(skb);
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 9ec1f057c03..afead353e21 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -140,7 +140,7 @@ void vlan_proc_cleanup(struct net *net)
* Create /proc/net/vlan entries
*/
-int vlan_proc_init(struct net *net)
+int __net_init vlan_proc_init(struct net *net)
{
struct vlan_net *vn = net_generic(net, vlan_net_id);
diff --git a/net/9p/client.c b/net/9p/client.c
index 8af95b2dddd..09d4f1e2e4a 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -69,7 +69,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
static int parse_opts(char *opts, struct p9_client *clnt)
{
- char *options;
+ char *options, *tmp_options;
char *p;
substring_t args[MAX_OPT_ARGS];
int option;
@@ -81,12 +81,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
if (!opts)
return 0;
- options = kstrdup(opts, GFP_KERNEL);
- if (!options) {
+ tmp_options = kstrdup(opts, GFP_KERNEL);
+ if (!tmp_options) {
P9_DPRINTK(P9_DEBUG_ERROR,
"failed to allocate copy of option string\n");
return -ENOMEM;
}
+ options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
int token;
@@ -108,6 +109,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
break;
case Opt_trans:
clnt->trans_mod = v9fs_get_trans_by_name(&args[0]);
+ if(clnt->trans_mod == NULL) {
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "Could not find request transport: %s\n",
+ (char *) &args[0]);
+ ret = -EINVAL;
+ goto free_and_return;
+ }
break;
case Opt_legacy:
clnt->dotu = 0;
@@ -117,7 +125,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
}
}
- kfree(options);
+free_and_return:
+ kfree(tmp_options);
return ret;
}
@@ -667,18 +676,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
clnt->trans = NULL;
spin_lock_init(&clnt->lock);
INIT_LIST_HEAD(&clnt->fidlist);
- clnt->fidpool = p9_idpool_create();
- if (IS_ERR(clnt->fidpool)) {
- err = PTR_ERR(clnt->fidpool);
- clnt->fidpool = NULL;
- goto error;
- }
p9_tag_init(clnt);
err = parse_opts(options, clnt);
if (err < 0)
- goto error;
+ goto free_client;
if (!clnt->trans_mod)
clnt->trans_mod = v9fs_get_default_trans();
@@ -687,7 +690,14 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
err = -EPROTONOSUPPORT;
P9_DPRINTK(P9_DEBUG_ERROR,
"No transport defined or default transport\n");
- goto error;
+ goto free_client;
+ }
+
+ clnt->fidpool = p9_idpool_create();
+ if (IS_ERR(clnt->fidpool)) {
+ err = PTR_ERR(clnt->fidpool);
+ clnt->fidpool = NULL;
+ goto put_trans;
}
P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n",
@@ -695,19 +705,25 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
err = clnt->trans_mod->create(clnt, dev_name, options);
if (err)
- goto error;
+ goto destroy_fidpool;
if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize)
clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ;
err = p9_client_version(clnt);
if (err)
- goto error;
+ goto close_trans;
return clnt;
-error:
- p9_client_destroy(clnt);
+close_trans:
+ clnt->trans_mod->close(clnt);
+destroy_fidpool:
+ p9_idpool_destroy(clnt->fidpool);
+put_trans:
+ v9fs_put_trans(clnt->trans_mod);
+free_client:
+ kfree(clnt);
return ERR_PTR(err);
}
EXPORT_SYMBOL(p9_client_create);
@@ -1214,10 +1230,11 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional)
{
int ret;
+ /* NOTE: size shouldn't include its own length */
/* size[2] type[2] dev[4] qid[13] */
/* mode[4] atime[4] mtime[4] length[8]*/
/* name[s] uid[s] gid[s] muid[s] */
- ret = 2+2+4+13+4+4+4+8+2+2+2+2;
+ ret = 2+4+13+4+4+4+8+2+2+2+2;
if (wst->name)
ret += strlen(wst->name);
@@ -1258,7 +1275,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
wst->name, wst->uid, wst->gid, wst->muid, wst->extension,
wst->n_uid, wst->n_gid, wst->n_muid);
- req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst);
+ req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index be1cb909d8c..31d0b05582a 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -714,7 +714,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
char *p;
substring_t args[MAX_OPT_ARGS];
int option;
- char *options;
+ char *options, *tmp_options;
int ret;
opts->port = P9_PORT;
@@ -724,12 +724,13 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
if (!params)
return 0;
- options = kstrdup(params, GFP_KERNEL);
- if (!options) {
+ tmp_options = kstrdup(params, GFP_KERNEL);
+ if (!tmp_options) {
P9_DPRINTK(P9_DEBUG_ERROR,
"failed to allocate copy of option string\n");
return -ENOMEM;
}
+ options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
int token;
@@ -760,7 +761,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
continue;
}
}
- kfree(options);
+
+ kfree(tmp_options);
return 0;
}
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 65cb29db03f..2c95a89c0f4 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -166,7 +166,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
char *p;
substring_t args[MAX_OPT_ARGS];
int option;
- char *options;
+ char *options, *tmp_options;
int ret;
opts->port = P9_PORT;
@@ -177,12 +177,13 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
if (!params)
return 0;
- options = kstrdup(params, GFP_KERNEL);
- if (!options) {
+ tmp_options = kstrdup(params, GFP_KERNEL);
+ if (!tmp_options) {
P9_DPRINTK(P9_DEBUG_ERROR,
"failed to allocate copy of option string\n");
return -ENOMEM;
}
+ options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
int token;
@@ -216,7 +217,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
}
/* RQ must be at least as large as the SQ */
opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
- kfree(options);
+ kfree(tmp_options);
return 0;
}
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index ea1e3daabef..cb50f4ae5ee 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -102,7 +102,8 @@ static void p9_virtio_close(struct p9_client *client)
struct virtio_chan *chan = client->trans;
mutex_lock(&virtio_9p_lock);
- chan->inuse = false;
+ if (chan)
+ chan->inuse = false;
mutex_unlock(&virtio_9p_lock);
}
@@ -311,6 +312,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
}
client->trans = (void *)chan;
+ client->status = Connected;
chan->client = client;
return 0;
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 80caad1a31a..6ef0e761e5d 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -144,40 +144,16 @@ out:
return 0;
}
-static __inline__ struct sock *atalk_get_socket_idx(loff_t pos)
-{
- struct sock *s;
- struct hlist_node *node;
-
- sk_for_each(s, node, &atalk_sockets)
- if (!pos--)
- goto found;
- s = NULL;
-found:
- return s;
-}
-
static void *atalk_seq_socket_start(struct seq_file *seq, loff_t *pos)
__acquires(atalk_sockets_lock)
{
- loff_t l = *pos;
-
read_lock_bh(&atalk_sockets_lock);
- return l ? atalk_get_socket_idx(--l) : SEQ_START_TOKEN;
+ return seq_hlist_start_head(&atalk_sockets, *pos);
}
static void *atalk_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct sock *i;
-
- ++*pos;
- if (v == SEQ_START_TOKEN) {
- i = sk_head(&atalk_sockets);
- goto out;
- }
- i = sk_next(v);
-out:
- return i;
+ return seq_hlist_next(v, &atalk_sockets, pos);
}
static void atalk_seq_socket_stop(struct seq_file *seq, void *v)
@@ -197,7 +173,7 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
goto out;
}
- s = v;
+ s = sk_entry(v);
at = at_sk(s);
seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X "
diff --git a/net/atm/addr.c b/net/atm/addr.c
index 82e85abc303..cf3ae8b4757 100644
--- a/net/atm/addr.c
+++ b/net/atm/addr.c
@@ -4,7 +4,7 @@
#include <linux/atm.h>
#include <linux/atmdev.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "signaling.h"
#include "addr.h"
diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
index 02cc7e71efe..fc63526d869 100644
--- a/net/atm/atm_misc.c
+++ b/net/atm/atm_misc.c
@@ -2,37 +2,35 @@
/* Written 1995-2000 by Werner Almesberger, EPFL ICA */
-
#include <linux/module.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/skbuff.h>
#include <linux/sonet.h>
#include <linux/bitops.h>
+#include <linux/errno.h>
#include <asm/atomic.h>
-#include <asm/errno.h>
-
-int atm_charge(struct atm_vcc *vcc,int truesize)
+int atm_charge(struct atm_vcc *vcc, int truesize)
{
- atm_force_charge(vcc,truesize);
+ atm_force_charge(vcc, truesize);
if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
return 1;
- atm_return(vcc,truesize);
+ atm_return(vcc, truesize);
atomic_inc(&vcc->stats->rx_drop);
return 0;
}
+EXPORT_SYMBOL(atm_charge);
-
-struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
- gfp_t gfp_flags)
+struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
+ gfp_t gfp_flags)
{
struct sock *sk = sk_atm(vcc);
int guess = atm_guess_pdu2truesize(pdu_size);
- atm_force_charge(vcc,guess);
+ atm_force_charge(vcc, guess);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
- struct sk_buff *skb = alloc_skb(pdu_size,gfp_flags);
+ struct sk_buff *skb = alloc_skb(pdu_size, gfp_flags);
if (skb) {
atomic_add(skb->truesize-guess,
@@ -40,10 +38,11 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
return skb;
}
}
- atm_return(vcc,guess);
+ atm_return(vcc, guess);
atomic_inc(&vcc->stats->rx_drop);
return NULL;
}
+EXPORT_SYMBOL(atm_alloc_charge);
/*
@@ -73,7 +72,6 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
* else *
*/
-
int atm_pcr_goal(const struct atm_trafprm *tp)
{
if (tp->pcr && tp->pcr != ATM_MAX_PCR)
@@ -84,26 +82,20 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
return -tp->max_pcr;
return 0;
}
+EXPORT_SYMBOL(atm_pcr_goal);
-
-void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
+void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
{
#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
__SONET_ITEMS
#undef __HANDLE_ITEM
}
+EXPORT_SYMBOL(sonet_copy_stats);
-
-void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
+void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
{
-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
+#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
__SONET_ITEMS
#undef __HANDLE_ITEM
}
-
-
-EXPORT_SYMBOL(atm_charge);
-EXPORT_SYMBOL(atm_alloc_charge);
-EXPORT_SYMBOL(atm_pcr_goal);
-EXPORT_SYMBOL(sonet_copy_stats);
EXPORT_SYMBOL(sonet_subtract_stats);
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index b5674dc2083..f693b78eb46 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -42,13 +42,14 @@ static ssize_t show_atmaddress(struct device *cdev,
spin_lock_irqsave(&adev->lock, flags);
list_for_each_entry(aaddr, &adev->local, entry) {
- for(i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) {
+ for (i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) {
if (j == *fmt) {
pos += sprintf(pos, ".");
++fmt;
j = 0;
}
- pos += sprintf(pos, "%02x", aaddr->addr.sas_addr.prv[i]);
+ pos += sprintf(pos, "%02x",
+ aaddr->addr.sas_addr.prv[i]);
}
pos += sprintf(pos, "\n");
}
@@ -78,17 +79,17 @@ static ssize_t show_link_rate(struct device *cdev,
/* show the link rate, not the data rate */
switch (adev->link_rate) {
- case ATM_OC3_PCR:
- link_rate = 155520000;
- break;
- case ATM_OC12_PCR:
- link_rate = 622080000;
- break;
- case ATM_25_PCR:
- link_rate = 25600000;
- break;
- default:
- link_rate = adev->link_rate * 8 * 53;
+ case ATM_OC3_PCR:
+ link_rate = 155520000;
+ break;
+ case ATM_OC12_PCR:
+ link_rate = 622080000;
+ break;
+ case ATM_25_PCR:
+ link_rate = 25600000;
+ break;
+ default:
+ link_rate = adev->link_rate * 8 * 53;
}
pos += sprintf(pos, "%d\n", link_rate);
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index c9230c39869..4d64d87e757 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -6,6 +6,8 @@
* Eric Kinzie, 2006-2007, US Naval Research Laboratory
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -15,7 +17,7 @@
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/ip.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/arp.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
@@ -26,20 +28,14 @@
#include "common.h"
-#ifdef SKB_DEBUG
static void skb_debug(const struct sk_buff *skb)
{
+#ifdef SKB_DEBUG
#define NUM2PRINT 50
- char buf[NUM2PRINT * 3 + 1]; /* 3 chars per byte */
- int i = 0;
- for (i = 0; i < skb->len && i < NUM2PRINT; i++) {
- sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
- }
- printk(KERN_DEBUG "br2684: skb: %s\n", buf);
-}
-#else
-#define skb_debug(skb) do {} while (0)
+ print_hex_dump(KERN_DEBUG, "br2684: skb: ", DUMP_OFFSET,
+ 16, 1, skb->data, min(NUM2PRINT, skb->len), true);
#endif
+}
#define BR2684_ETHERTYPE_LEN 2
#define BR2684_PAD_LEN 2
@@ -68,7 +64,7 @@ struct br2684_vcc {
struct atm_vcc *atmvcc;
struct net_device *device;
/* keep old push, pop functions for chaining */
- void (*old_push) (struct atm_vcc * vcc, struct sk_buff * skb);
+ void (*old_push)(struct atm_vcc *vcc, struct sk_buff *skb);
void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb);
enum br2684_encaps encaps;
struct list_head brvccs;
@@ -148,7 +144,7 @@ static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
struct br2684_vcc *brvcc = BR2684_VCC(vcc);
struct net_device *net_dev = skb->dev;
- pr_debug("br2684_pop(vcc %p ; net_dev %p )\n", vcc, net_dev);
+ pr_debug("(vcc %p ; net_dev %p )\n", vcc, net_dev);
brvcc->old_pop(vcc, skb);
if (!net_dev)
@@ -244,7 +240,7 @@ static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
struct br2684_dev *brdev = BRPRIV(dev);
struct br2684_vcc *brvcc;
- pr_debug("br2684_start_xmit, skb_dst(skb)=%p\n", skb_dst(skb));
+ pr_debug("skb_dst(skb)=%p\n", skb_dst(skb));
read_lock(&devs_lock);
brvcc = pick_outgoing_vcc(skb, brdev);
if (brvcc == NULL) {
@@ -300,7 +296,8 @@ static int br2684_setfilt(struct atm_vcc *atmvcc, void __user * arg)
struct br2684_dev *brdev;
read_lock(&devs_lock);
brdev = BRPRIV(br2684_find_dev(&fs.ifspec));
- if (brdev == NULL || list_empty(&brdev->brvccs) || brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */
+ if (brdev == NULL || list_empty(&brdev->brvccs) ||
+ brdev->brvccs.next != brdev->brvccs.prev) /* >1 VCC */
brvcc = NULL;
else
brvcc = list_entry_brvcc(brdev->brvccs.next);
@@ -352,7 +349,7 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
struct net_device *net_dev = brvcc->device;
struct br2684_dev *brdev = BRPRIV(net_dev);
- pr_debug("br2684_push\n");
+ pr_debug("\n");
if (unlikely(skb == NULL)) {
/* skb==NULL means VCC is being destroyed */
@@ -376,29 +373,25 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
__skb_trim(skb, skb->len - 4);
/* accept packets that have "ipv[46]" in the snap header */
- if ((skb->len >= (sizeof(llc_oui_ipv4)))
- &&
- (memcmp
- (skb->data, llc_oui_ipv4,
- sizeof(llc_oui_ipv4) - BR2684_ETHERTYPE_LEN) == 0)) {
- if (memcmp
- (skb->data + 6, ethertype_ipv6,
- sizeof(ethertype_ipv6)) == 0)
+ if ((skb->len >= (sizeof(llc_oui_ipv4))) &&
+ (memcmp(skb->data, llc_oui_ipv4,
+ sizeof(llc_oui_ipv4) - BR2684_ETHERTYPE_LEN) == 0)) {
+ if (memcmp(skb->data + 6, ethertype_ipv6,
+ sizeof(ethertype_ipv6)) == 0)
skb->protocol = htons(ETH_P_IPV6);
- else if (memcmp
- (skb->data + 6, ethertype_ipv4,
- sizeof(ethertype_ipv4)) == 0)
+ else if (memcmp(skb->data + 6, ethertype_ipv4,
+ sizeof(ethertype_ipv4)) == 0)
skb->protocol = htons(ETH_P_IP);
else
goto error;
skb_pull(skb, sizeof(llc_oui_ipv4));
skb_reset_network_header(skb);
skb->pkt_type = PACKET_HOST;
- /*
- * Let us waste some time for checking the encapsulation.
- * Note, that only 7 char is checked so frames with a valid FCS
- * are also accepted (but FCS is not checked of course).
- */
+ /*
+ * Let us waste some time for checking the encapsulation.
+ * Note, that only 7 char is checked so frames with a valid FCS
+ * are also accepted (but FCS is not checked of course).
+ */
} else if ((skb->len >= sizeof(llc_oui_pid_pad)) &&
(memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) {
skb_pull(skb, sizeof(llc_oui_pid_pad));
@@ -479,8 +472,7 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
write_lock_irq(&devs_lock);
net_dev = br2684_find_dev(&be.ifspec);
if (net_dev == NULL) {
- printk(KERN_ERR
- "br2684: tried to attach to non-existant device\n");
+ pr_err("tried to attach to non-existant device\n");
err = -ENXIO;
goto error;
}
@@ -494,17 +486,16 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
err = -EEXIST;
goto error;
}
- if (be.fcs_in != BR2684_FCSIN_NO || be.fcs_out != BR2684_FCSOUT_NO ||
- be.fcs_auto || be.has_vpiid || be.send_padding || (be.encaps !=
- BR2684_ENCAPS_VC
- && be.encaps !=
- BR2684_ENCAPS_LLC)
- || be.min_size != 0) {
+ if (be.fcs_in != BR2684_FCSIN_NO ||
+ be.fcs_out != BR2684_FCSOUT_NO ||
+ be.fcs_auto || be.has_vpiid || be.send_padding ||
+ (be.encaps != BR2684_ENCAPS_VC &&
+ be.encaps != BR2684_ENCAPS_LLC) ||
+ be.min_size != 0) {
err = -EINVAL;
goto error;
}
- pr_debug("br2684_regvcc vcc=%p, encaps=%d, brvcc=%p\n", atmvcc,
- be.encaps, brvcc);
+ pr_debug("vcc=%p, encaps=%d, brvcc=%p\n", atmvcc, be.encaps, brvcc);
if (list_empty(&brdev->brvccs) && !brdev->mac_was_set) {
unsigned char *esi = atmvcc->dev->esi;
if (esi[0] | esi[1] | esi[2] | esi[3] | esi[4] | esi[5])
@@ -541,7 +532,8 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
}
__module_get(THIS_MODULE);
return 0;
- error:
+
+error:
write_unlock_irq(&devs_lock);
kfree(brvcc);
return err;
@@ -587,7 +579,7 @@ static void br2684_setup_routed(struct net_device *netdev)
INIT_LIST_HEAD(&brdev->brvccs);
}
-static int br2684_create(void __user * arg)
+static int br2684_create(void __user *arg)
{
int err;
struct net_device *netdev;
@@ -595,11 +587,10 @@ static int br2684_create(void __user * arg)
struct atm_newif_br2684 ni;
enum br2684_payload payload;
- pr_debug("br2684_create\n");
+ pr_debug("\n");
- if (copy_from_user(&ni, arg, sizeof ni)) {
+ if (copy_from_user(&ni, arg, sizeof ni))
return -EFAULT;
- }
if (ni.media & BR2684_FLAG_ROUTED)
payload = p_routed;
@@ -607,9 +598,8 @@ static int br2684_create(void __user * arg)
payload = p_bridged;
ni.media &= 0xffff; /* strip flags */
- if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500) {
+ if (ni.media != BR2684_MEDIA_ETHERNET || ni.mtu != 1500)
return -EINVAL;
- }
netdev = alloc_netdev(sizeof(struct br2684_dev),
ni.ifname[0] ? ni.ifname : "nas%d",
@@ -624,7 +614,7 @@ static int br2684_create(void __user * arg)
/* open, stop, do_ioctl ? */
err = register_netdev(netdev);
if (err < 0) {
- printk(KERN_ERR "br2684_create: register_netdev failed\n");
+ pr_err("register_netdev failed\n");
free_netdev(netdev);
return err;
}
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 64629c35434..ebfa022008f 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -2,6 +2,8 @@
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h> /* for UINT_MAX */
@@ -30,10 +32,10 @@
#include <linux/jhash.h>
#include <net/route.h> /* for struct rtable and routing */
#include <net/icmp.h> /* icmp_send */
-#include <asm/param.h> /* for HZ */
+#include <linux/param.h> /* for HZ */
+#include <linux/uaccess.h>
#include <asm/byteorder.h> /* for htons etc. */
#include <asm/system.h> /* save/restore_flags */
-#include <asm/uaccess.h>
#include <asm/atomic.h>
#include "common.h"
@@ -51,13 +53,13 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
struct atmarp_ctrl *ctrl;
struct sk_buff *skb;
- pr_debug("to_atmarpd(%d)\n", type);
+ pr_debug("(%d)\n", type);
if (!atmarpd)
return -EUNATCH;
- skb = alloc_skb(sizeof(struct atmarp_ctrl),GFP_ATOMIC);
+ skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
- ctrl = (struct atmarp_ctrl *) skb_put(skb,sizeof(struct atmarp_ctrl));
+ ctrl = (struct atmarp_ctrl *)skb_put(skb, sizeof(struct atmarp_ctrl));
ctrl->type = type;
ctrl->itf_num = itf;
ctrl->ip = ip;
@@ -71,8 +73,7 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry)
{
- pr_debug("link_vcc %p to entry %p (neigh %p)\n", clip_vcc, entry,
- entry->neigh);
+ pr_debug("%p to entry %p (neigh %p)\n", clip_vcc, entry, entry->neigh);
clip_vcc->entry = entry;
clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */
clip_vcc->next = entry->vccs;
@@ -86,7 +87,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
struct clip_vcc **walk;
if (!entry) {
- printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
+ pr_crit("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
return;
}
netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */
@@ -106,13 +107,11 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
error = neigh_update(entry->neigh, NULL, NUD_NONE,
NEIGH_UPDATE_F_ADMIN);
if (error)
- printk(KERN_CRIT "unlink_clip_vcc: "
- "neigh_update failed with %d\n", error);
+ pr_crit("neigh_update failed with %d\n", error);
goto out;
}
- printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
- "0x%p)\n", entry, clip_vcc);
- out:
+ pr_crit("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc);
+out:
netif_tx_unlock_bh(entry->neigh->dev);
}
@@ -127,7 +126,7 @@ static int neigh_check_cb(struct neighbour *n)
if (cv->idle_timeout && time_after(jiffies, exp)) {
pr_debug("releasing vcc %p->%p of entry %p\n",
- cv, cv->vcc, entry);
+ cv, cv->vcc, entry);
vcc_release_async(cv->vcc, -ETIMEDOUT);
}
}
@@ -139,7 +138,7 @@ static int neigh_check_cb(struct neighbour *n)
struct sk_buff *skb;
pr_debug("destruction postponed with ref %d\n",
- atomic_read(&n->refcnt));
+ atomic_read(&n->refcnt));
while ((skb = skb_dequeue(&n->arp_queue)) != NULL)
dev_kfree_skb(skb);
@@ -163,7 +162,7 @@ static int clip_arp_rcv(struct sk_buff *skb)
{
struct atm_vcc *vcc;
- pr_debug("clip_arp_rcv\n");
+ pr_debug("\n");
vcc = ATM_SKB(skb)->vcc;
if (!vcc || !atm_charge(vcc, skb->truesize)) {
dev_kfree_skb_any(skb);
@@ -188,7 +187,7 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
- pr_debug("clip push\n");
+ pr_debug("\n");
if (!skb) {
pr_debug("removing VCC %p\n", clip_vcc);
if (clip_vcc->entry)
@@ -206,12 +205,12 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
}
ATM_SKB(skb)->vcc = vcc;
skb_reset_mac_header(skb);
- if (!clip_vcc->encap
- || skb->len < RFC1483LLC_LEN
- || memcmp(skb->data, llc_oui, sizeof (llc_oui)))
+ if (!clip_vcc->encap ||
+ skb->len < RFC1483LLC_LEN ||
+ memcmp(skb->data, llc_oui, sizeof(llc_oui)))
skb->protocol = htons(ETH_P_IP);
else {
- skb->protocol = ((__be16 *) skb->data)[3];
+ skb->protocol = ((__be16 *)skb->data)[3];
skb_pull(skb, RFC1483LLC_LEN);
if (skb->protocol == htons(ETH_P_ARP)) {
skb->dev->stats.rx_packets++;
@@ -239,7 +238,7 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
int old;
unsigned long flags;
- pr_debug("clip_pop(vcc %p)\n", vcc);
+ pr_debug("(vcc %p)\n", vcc);
clip_vcc->old_pop(vcc, skb);
/* skb->dev == NULL in outbound ARP packets */
if (!dev)
@@ -255,7 +254,7 @@ static void clip_pop(struct atm_vcc *vcc, struct sk_buff *skb)
static void clip_neigh_solicit(struct neighbour *neigh, struct sk_buff *skb)
{
- pr_debug("clip_neigh_solicit (neigh %p, skb %p)\n", neigh, skb);
+ pr_debug("(neigh %p, skb %p)\n", neigh, skb);
to_atmarpd(act_need, PRIV(neigh->dev)->number, NEIGH2ENTRY(neigh)->ip);
}
@@ -284,7 +283,7 @@ static int clip_constructor(struct neighbour *neigh)
struct in_device *in_dev;
struct neigh_parms *parms;
- pr_debug("clip_constructor (neigh %p, entry %p)\n", neigh, entry);
+ pr_debug("(neigh %p, entry %p)\n", neigh, entry);
neigh->type = inet_addr_type(&init_net, entry->ip);
if (neigh->type != RTN_UNICAST)
return -EINVAL;
@@ -369,9 +368,9 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
int old;
unsigned long flags;
- pr_debug("clip_start_xmit (skb %p)\n", skb);
+ pr_debug("(skb %p)\n", skb);
if (!skb_dst(skb)) {
- printk(KERN_ERR "clip_start_xmit: skb_dst(skb) == NULL\n");
+ pr_err("skb_dst(skb) == NULL\n");
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -385,7 +384,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
return 0;
}
#endif
- printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n");
+ pr_err("NO NEIGHBOUR !\n");
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -421,7 +420,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
if (old) {
- printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n");
+ pr_warning("XOFF->XOFF transition\n");
return NETDEV_TX_OK;
}
dev->stats.tx_packets++;
@@ -456,7 +455,7 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL);
if (!clip_vcc)
return -ENOMEM;
- pr_debug("mkip clip_vcc %p vcc %p\n", clip_vcc, vcc);
+ pr_debug("%p vcc %p\n", clip_vcc, vcc);
clip_vcc->vcc = vcc;
vcc->user_back = clip_vcc;
set_bit(ATM_VF_IS_CLIP, &vcc->flags);
@@ -506,16 +505,16 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
struct rtable *rt;
if (vcc->push != clip_push) {
- printk(KERN_WARNING "clip_setentry: non-CLIP VCC\n");
+ pr_warning("non-CLIP VCC\n");
return -EBADF;
}
clip_vcc = CLIP_VCC(vcc);
if (!ip) {
if (!clip_vcc->entry) {
- printk(KERN_ERR "hiding hidden ATMARP entry\n");
+ pr_err("hiding hidden ATMARP entry\n");
return 0;
}
- pr_debug("setentry: remove\n");
+ pr_debug("remove\n");
unlink_clip_vcc(clip_vcc);
return 0;
}
@@ -529,9 +528,9 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
entry = NEIGH2ENTRY(neigh);
if (entry != clip_vcc->entry) {
if (!clip_vcc->entry)
- pr_debug("setentry: add\n");
+ pr_debug("add\n");
else {
- pr_debug("setentry: update\n");
+ pr_debug("update\n");
unlink_clip_vcc(clip_vcc);
}
link_vcc(clip_vcc, entry);
@@ -614,16 +613,16 @@ static int clip_device_event(struct notifier_block *this, unsigned long event,
switch (event) {
case NETDEV_UP:
- pr_debug("clip_device_event NETDEV_UP\n");
+ pr_debug("NETDEV_UP\n");
to_atmarpd(act_up, PRIV(dev)->number, 0);
break;
case NETDEV_GOING_DOWN:
- pr_debug("clip_device_event NETDEV_DOWN\n");
+ pr_debug("NETDEV_DOWN\n");
to_atmarpd(act_down, PRIV(dev)->number, 0);
break;
case NETDEV_CHANGE:
case NETDEV_CHANGEMTU:
- pr_debug("clip_device_event NETDEV_CHANGE*\n");
+ pr_debug("NETDEV_CHANGE*\n");
to_atmarpd(act_change, PRIV(dev)->number, 0);
break;
}
@@ -645,7 +644,6 @@ static int clip_inet_event(struct notifier_block *this, unsigned long event,
return clip_device_event(this, NETDEV_CHANGE, in_dev->dev);
}
-
static struct notifier_block clip_dev_notifier = {
.notifier_call = clip_device_event,
};
@@ -660,7 +658,7 @@ static struct notifier_block clip_inet_notifier = {
static void atmarpd_close(struct atm_vcc *vcc)
{
- pr_debug("atmarpd_close\n");
+ pr_debug("\n");
rtnl_lock();
atmarpd = NULL;
@@ -671,7 +669,6 @@ static void atmarpd_close(struct atm_vcc *vcc)
module_put(THIS_MODULE);
}
-
static struct atmdev_ops atmarpd_dev_ops = {
.close = atmarpd_close
};
@@ -693,11 +690,11 @@ static int atm_init_atmarp(struct atm_vcc *vcc)
return -EADDRINUSE;
}
- mod_timer(&idle_timer, jiffies+CLIP_CHECK_INTERVAL*HZ);
+ mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
atmarpd = vcc;
- set_bit(ATM_VF_META,&vcc->flags);
- set_bit(ATM_VF_READY,&vcc->flags);
+ set_bit(ATM_VF_META, &vcc->flags);
+ set_bit(ATM_VF_READY, &vcc->flags);
/* allow replies and avoid getting closed if signaling dies */
vcc->dev = &atmarpd_dev;
vcc_insert_socket(sk_atm(vcc));
@@ -950,8 +947,7 @@ static int __init atm_clip_init(void)
p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops);
if (!p) {
- printk(KERN_ERR "Unable to initialize "
- "/proc/net/atm/arp\n");
+ pr_err("Unable to initialize /proc/net/atm/arp\n");
atm_clip_exit_noproc();
return -ENOMEM;
}
diff --git a/net/atm/common.c b/net/atm/common.c
index d61e051e0a3..74d095a081e 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -2,6 +2,7 @@
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/kmod.h>
@@ -18,11 +19,10 @@
#include <linux/bitops.h>
#include <linux/init.h>
#include <net/sock.h> /* struct sock */
+#include <linux/uaccess.h>
+#include <linux/poll.h>
-#include <asm/uaccess.h>
#include <asm/atomic.h>
-#include <asm/poll.h>
-
#include "resources.h" /* atm_find_dev */
#include "common.h" /* prototypes */
@@ -31,13 +31,15 @@
#include "signaling.h" /* for WAITING and sigd_attach */
struct hlist_head vcc_hash[VCC_HTABLE_SIZE];
+EXPORT_SYMBOL(vcc_hash);
+
DEFINE_RWLOCK(vcc_sklist_lock);
+EXPORT_SYMBOL(vcc_sklist_lock);
static void __vcc_insert_socket(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
- struct hlist_head *head = &vcc_hash[vcc->vci &
- (VCC_HTABLE_SIZE - 1)];
+ struct hlist_head *head = &vcc_hash[vcc->vci & (VCC_HTABLE_SIZE - 1)];
sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1);
sk_add_node(sk, head);
}
@@ -48,6 +50,7 @@ void vcc_insert_socket(struct sock *sk)
__vcc_insert_socket(sk);
write_unlock_irq(&vcc_sklist_lock);
}
+EXPORT_SYMBOL(vcc_insert_socket);
static void vcc_remove_socket(struct sock *sk)
{
@@ -56,37 +59,32 @@ static void vcc_remove_socket(struct sock *sk)
write_unlock_irq(&vcc_sklist_lock);
}
-
-static struct sk_buff *alloc_tx(struct atm_vcc *vcc,unsigned int size)
+static struct sk_buff *alloc_tx(struct atm_vcc *vcc, unsigned int size)
{
struct sk_buff *skb;
struct sock *sk = sk_atm(vcc);
if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
pr_debug("Sorry: wmem_alloc = %d, size = %d, sndbuf = %d\n",
- sk_wmem_alloc_get(sk), size,
- sk->sk_sndbuf);
+ sk_wmem_alloc_get(sk), size, sk->sk_sndbuf);
return NULL;
}
while (!(skb = alloc_skb(size, GFP_KERNEL)))
schedule();
- pr_debug("AlTx %d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
+ pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
atomic_add(skb->truesize, &sk->sk_wmem_alloc);
return skb;
}
-
-EXPORT_SYMBOL(vcc_hash);
-EXPORT_SYMBOL(vcc_sklist_lock);
-EXPORT_SYMBOL(vcc_insert_socket);
-
static void vcc_sock_destruct(struct sock *sk)
{
if (atomic_read(&sk->sk_rmem_alloc))
- printk(KERN_DEBUG "vcc_sock_destruct: rmem leakage (%d bytes) detected.\n", atomic_read(&sk->sk_rmem_alloc));
+ printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n",
+ __func__, atomic_read(&sk->sk_rmem_alloc));
if (atomic_read(&sk->sk_wmem_alloc))
- printk(KERN_DEBUG "vcc_sock_destruct: wmem leakage (%d bytes) detected.\n", atomic_read(&sk->sk_wmem_alloc));
+ printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n",
+ __func__, atomic_read(&sk->sk_wmem_alloc));
}
static void vcc_def_wakeup(struct sock *sk)
@@ -142,8 +140,8 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
vcc = atm_sk(sk);
vcc->dev = NULL;
- memset(&vcc->local,0,sizeof(struct sockaddr_atmsvc));
- memset(&vcc->remote,0,sizeof(struct sockaddr_atmsvc));
+ memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc));
+ memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc));
vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
atomic_set(&sk->sk_wmem_alloc, 1);
atomic_set(&sk->sk_rmem_alloc, 0);
@@ -156,7 +154,6 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family)
return 0;
}
-
static void vcc_destroy_socket(struct sock *sk)
{
struct atm_vcc *vcc = atm_sk(sk);
@@ -171,7 +168,7 @@ static void vcc_destroy_socket(struct sock *sk)
vcc->push(vcc, NULL); /* atmarpd has no push */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
- atm_return(vcc,skb->truesize);
+ atm_return(vcc, skb->truesize);
kfree_skb(skb);
}
@@ -182,7 +179,6 @@ static void vcc_destroy_socket(struct sock *sk)
vcc_remove_socket(sk);
}
-
int vcc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -197,7 +193,6 @@ int vcc_release(struct socket *sock)
return 0;
}
-
void vcc_release_async(struct atm_vcc *vcc, int reply)
{
struct sock *sk = sk_atm(vcc);
@@ -208,8 +203,6 @@ void vcc_release_async(struct atm_vcc *vcc, int reply)
clear_bit(ATM_VF_WAITING, &vcc->flags);
sk->sk_state_change(sk);
}
-
-
EXPORT_SYMBOL(vcc_release_async);
@@ -235,37 +228,37 @@ void atm_dev_release_vccs(struct atm_dev *dev)
write_unlock_irq(&vcc_sklist_lock);
}
-
-static int adjust_tp(struct atm_trafprm *tp,unsigned char aal)
+static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
{
int max_sdu;
- if (!tp->traffic_class) return 0;
+ if (!tp->traffic_class)
+ return 0;
switch (aal) {
- case ATM_AAL0:
- max_sdu = ATM_CELL_SIZE-1;
- break;
- case ATM_AAL34:
- max_sdu = ATM_MAX_AAL34_PDU;
- break;
- default:
- printk(KERN_WARNING "ATM: AAL problems ... "
- "(%d)\n",aal);
- /* fall through */
- case ATM_AAL5:
- max_sdu = ATM_MAX_AAL5_PDU;
+ case ATM_AAL0:
+ max_sdu = ATM_CELL_SIZE-1;
+ break;
+ case ATM_AAL34:
+ max_sdu = ATM_MAX_AAL34_PDU;
+ break;
+ default:
+ pr_warning("AAL problems ... (%d)\n", aal);
+ /* fall through */
+ case ATM_AAL5:
+ max_sdu = ATM_MAX_AAL5_PDU;
}
- if (!tp->max_sdu) tp->max_sdu = max_sdu;
- else if (tp->max_sdu > max_sdu) return -EINVAL;
- if (!tp->max_cdv) tp->max_cdv = ATM_MAX_CDV;
+ if (!tp->max_sdu)
+ tp->max_sdu = max_sdu;
+ else if (tp->max_sdu > max_sdu)
+ return -EINVAL;
+ if (!tp->max_cdv)
+ tp->max_cdv = ATM_MAX_CDV;
return 0;
}
-
static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
{
- struct hlist_head *head = &vcc_hash[vci &
- (VCC_HTABLE_SIZE - 1)];
+ struct hlist_head *head = &vcc_hash[vci & (VCC_HTABLE_SIZE - 1)];
struct hlist_node *node;
struct sock *s;
struct atm_vcc *walk;
@@ -289,7 +282,6 @@ static int check_ci(const struct atm_vcc *vcc, short vpi, int vci)
return 0;
}
-
static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
{
static short p; /* poor man's per-device cache */
@@ -327,14 +319,13 @@ static int find_ci(const struct atm_vcc *vcc, short *vpi, int *vci)
if ((c == ATM_NOT_RSV_VCI || *vci != ATM_VCI_ANY) &&
*vpi == ATM_VPI_ANY) {
p++;
- if (p >= 1 << vcc->dev->ci_range.vpi_bits) p = 0;
+ if (p >= 1 << vcc->dev->ci_range.vpi_bits)
+ p = 0;
}
- }
- while (old_p != p || old_c != c);
+ } while (old_p != p || old_c != c);
return -EADDRINUSE;
}
-
static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
int vci)
{
@@ -362,37 +353,46 @@ static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
__vcc_insert_socket(sk);
write_unlock_irq(&vcc_sklist_lock);
switch (vcc->qos.aal) {
- case ATM_AAL0:
- error = atm_init_aal0(vcc);
- vcc->stats = &dev->stats.aal0;
- break;
- case ATM_AAL34:
- error = atm_init_aal34(vcc);
- vcc->stats = &dev->stats.aal34;
- break;
- case ATM_NO_AAL:
- /* ATM_AAL5 is also used in the "0 for default" case */
- vcc->qos.aal = ATM_AAL5;
- /* fall through */
- case ATM_AAL5:
- error = atm_init_aal5(vcc);
- vcc->stats = &dev->stats.aal5;
- break;
- default:
- error = -EPROTOTYPE;
+ case ATM_AAL0:
+ error = atm_init_aal0(vcc);
+ vcc->stats = &dev->stats.aal0;
+ break;
+ case ATM_AAL34:
+ error = atm_init_aal34(vcc);
+ vcc->stats = &dev->stats.aal34;
+ break;
+ case ATM_NO_AAL:
+ /* ATM_AAL5 is also used in the "0 for default" case */
+ vcc->qos.aal = ATM_AAL5;
+ /* fall through */
+ case ATM_AAL5:
+ error = atm_init_aal5(vcc);
+ vcc->stats = &dev->stats.aal5;
+ break;
+ default:
+ error = -EPROTOTYPE;
}
- if (!error) error = adjust_tp(&vcc->qos.txtp,vcc->qos.aal);
- if (!error) error = adjust_tp(&vcc->qos.rxtp,vcc->qos.aal);
+ if (!error)
+ error = adjust_tp(&vcc->qos.txtp, vcc->qos.aal);
+ if (!error)
+ error = adjust_tp(&vcc->qos.rxtp, vcc->qos.aal);
if (error)
goto fail;
- pr_debug("VCC %d.%d, AAL %d\n",vpi,vci,vcc->qos.aal);
- pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",vcc->qos.txtp.traffic_class,
- vcc->qos.txtp.min_pcr,vcc->qos.txtp.max_pcr,vcc->qos.txtp.max_sdu);
- pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",vcc->qos.rxtp.traffic_class,
- vcc->qos.rxtp.min_pcr,vcc->qos.rxtp.max_pcr,vcc->qos.rxtp.max_sdu);
+ pr_debug("VCC %d.%d, AAL %d\n", vpi, vci, vcc->qos.aal);
+ pr_debug(" TX: %d, PCR %d..%d, SDU %d\n",
+ vcc->qos.txtp.traffic_class,
+ vcc->qos.txtp.min_pcr,
+ vcc->qos.txtp.max_pcr,
+ vcc->qos.txtp.max_sdu);
+ pr_debug(" RX: %d, PCR %d..%d, SDU %d\n",
+ vcc->qos.rxtp.traffic_class,
+ vcc->qos.rxtp.min_pcr,
+ vcc->qos.rxtp.max_pcr,
+ vcc->qos.rxtp.max_sdu);
if (dev->ops->open) {
- if ((error = dev->ops->open(vcc)))
+ error = dev->ops->open(vcc);
+ if (error)
goto fail;
}
return 0;
@@ -406,14 +406,13 @@ fail_module_put:
return error;
}
-
int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
{
struct atm_dev *dev;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
- pr_debug("vcc_connect (vpi %d, vci %d)\n",vpi,vci);
+ pr_debug("(vpi %d, vci %d)\n", vpi, vci);
if (sock->state == SS_CONNECTED)
return -EISCONN;
if (sock->state != SS_UNCONNECTED)
@@ -422,30 +421,33 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
return -EINVAL;
if (vpi != ATM_VPI_UNSPEC && vci != ATM_VCI_UNSPEC)
- clear_bit(ATM_VF_PARTIAL,&vcc->flags);
+ clear_bit(ATM_VF_PARTIAL, &vcc->flags);
else
- if (test_bit(ATM_VF_PARTIAL,&vcc->flags))
+ if (test_bit(ATM_VF_PARTIAL, &vcc->flags))
return -EINVAL;
- pr_debug("vcc_connect (TX: cl %d,bw %d-%d,sdu %d; "
- "RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n",
- vcc->qos.txtp.traffic_class,vcc->qos.txtp.min_pcr,
- vcc->qos.txtp.max_pcr,vcc->qos.txtp.max_sdu,
- vcc->qos.rxtp.traffic_class,vcc->qos.rxtp.min_pcr,
- vcc->qos.rxtp.max_pcr,vcc->qos.rxtp.max_sdu,
- vcc->qos.aal == ATM_AAL5 ? "" : vcc->qos.aal == ATM_AAL0 ? "" :
- " ??? code ",vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal);
+ pr_debug("(TX: cl %d,bw %d-%d,sdu %d; "
+ "RX: cl %d,bw %d-%d,sdu %d,AAL %s%d)\n",
+ vcc->qos.txtp.traffic_class, vcc->qos.txtp.min_pcr,
+ vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_sdu,
+ vcc->qos.rxtp.traffic_class, vcc->qos.rxtp.min_pcr,
+ vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_sdu,
+ vcc->qos.aal == ATM_AAL5 ? "" :
+ vcc->qos.aal == ATM_AAL0 ? "" : " ??? code ",
+ vcc->qos.aal == ATM_AAL0 ? 0 : vcc->qos.aal);
if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
return -EBADFD;
if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS ||
vcc->qos.rxtp.traffic_class == ATM_ANYCLASS)
return -EINVAL;
if (likely(itf != ATM_ITF_ANY)) {
- dev = try_then_request_module(atm_dev_lookup(itf), "atm-device-%d", itf);
+ dev = try_then_request_module(atm_dev_lookup(itf),
+ "atm-device-%d", itf);
} else {
dev = NULL;
mutex_lock(&atm_dev_mutex);
if (!list_empty(&atm_devs)) {
- dev = list_entry(atm_devs.next, struct atm_dev, dev_list);
+ dev = list_entry(atm_devs.next,
+ struct atm_dev, dev_list);
atm_dev_hold(dev);
}
mutex_unlock(&atm_dev_mutex);
@@ -458,13 +460,12 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci)
return error;
}
if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC)
- set_bit(ATM_VF_PARTIAL,&vcc->flags);
- if (test_bit(ATM_VF_READY,&ATM_SD(sock)->flags))
+ set_bit(ATM_VF_PARTIAL, &vcc->flags);
+ if (test_bit(ATM_VF_READY, &ATM_SD(sock)->flags))
sock->state = SS_CONNECTED;
return 0;
}
-
int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
@@ -478,8 +479,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */
return -EOPNOTSUPP;
vcc = ATM_SD(sock);
- if (test_bit(ATM_VF_RELEASED,&vcc->flags) ||
- test_bit(ATM_VF_CLOSE,&vcc->flags) ||
+ if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
+ test_bit(ATM_VF_CLOSE, &vcc->flags) ||
!test_bit(ATM_VF_READY, &vcc->flags))
return 0;
@@ -497,13 +498,12 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (error)
return error;
sock_recv_ts_and_drops(msg, sk, skb);
- pr_debug("RcvM %d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize);
+ pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc), skb->truesize);
atm_return(vcc, skb->truesize);
skb_free_datagram(sk, skb);
return copied;
}
-
int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
size_t total_len)
{
@@ -511,7 +511,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
DEFINE_WAIT(wait);
struct atm_vcc *vcc;
struct sk_buff *skb;
- int eff,error;
+ int eff, error;
const void __user *buff;
int size;
@@ -550,7 +550,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
eff = (size+3) & ~3; /* align to word boundary */
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
error = 0;
- while (!(skb = alloc_tx(vcc,eff))) {
+ while (!(skb = alloc_tx(vcc, eff))) {
if (m->msg_flags & MSG_DONTWAIT) {
error = -EAGAIN;
break;
@@ -560,9 +560,9 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
error = -ERESTARTSYS;
break;
}
- if (test_bit(ATM_VF_RELEASED,&vcc->flags) ||
- test_bit(ATM_VF_CLOSE,&vcc->flags) ||
- !test_bit(ATM_VF_READY,&vcc->flags)) {
+ if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
+ test_bit(ATM_VF_CLOSE, &vcc->flags) ||
+ !test_bit(ATM_VF_READY, &vcc->flags)) {
error = -EPIPE;
send_sig(SIGPIPE, current, 0);
break;
@@ -574,20 +574,20 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
goto out;
skb->dev = NULL; /* for paths shared with net_device interfaces */
ATM_SKB(skb)->atm_options = vcc->atm_options;
- if (copy_from_user(skb_put(skb,size),buff,size)) {
+ if (copy_from_user(skb_put(skb, size), buff, size)) {
kfree_skb(skb);
error = -EFAULT;
goto out;
}
- if (eff != size) memset(skb->data+size,0,eff-size);
- error = vcc->dev->ops->send(vcc,skb);
+ if (eff != size)
+ memset(skb->data + size, 0, eff-size);
+ error = vcc->dev->ops->send(vcc, skb);
error = error ? error : size;
out:
release_sock(sk);
return error;
}
-
unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
@@ -623,8 +623,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
return mask;
}
-
-static int atm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
+static int atm_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
{
int error;
@@ -636,25 +635,31 @@ static int atm_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
qos->rxtp.traffic_class != vcc->qos.rxtp.traffic_class ||
qos->txtp.traffic_class != vcc->qos.txtp.traffic_class)
return -EINVAL;
- error = adjust_tp(&qos->txtp,qos->aal);
- if (!error) error = adjust_tp(&qos->rxtp,qos->aal);
- if (error) return error;
- if (!vcc->dev->ops->change_qos) return -EOPNOTSUPP;
+ error = adjust_tp(&qos->txtp, qos->aal);
+ if (!error)
+ error = adjust_tp(&qos->rxtp, qos->aal);
+ if (error)
+ return error;
+ if (!vcc->dev->ops->change_qos)
+ return -EOPNOTSUPP;
if (sk_atm(vcc)->sk_family == AF_ATMPVC)
- return vcc->dev->ops->change_qos(vcc,qos,ATM_MF_SET);
- return svc_change_qos(vcc,qos);
+ return vcc->dev->ops->change_qos(vcc, qos, ATM_MF_SET);
+ return svc_change_qos(vcc, qos);
}
-
static int check_tp(const struct atm_trafprm *tp)
{
/* @@@ Should be merged with adjust_tp */
- if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS) return 0;
+ if (!tp->traffic_class || tp->traffic_class == ATM_ANYCLASS)
+ return 0;
if (tp->traffic_class != ATM_UBR && !tp->min_pcr && !tp->pcr &&
- !tp->max_pcr) return -EINVAL;
- if (tp->min_pcr == ATM_MAX_PCR) return -EINVAL;
+ !tp->max_pcr)
+ return -EINVAL;
+ if (tp->min_pcr == ATM_MAX_PCR)
+ return -EINVAL;
if (tp->min_pcr && tp->max_pcr && tp->max_pcr != ATM_MAX_PCR &&
- tp->min_pcr > tp->max_pcr) return -EINVAL;
+ tp->min_pcr > tp->max_pcr)
+ return -EINVAL;
/*
* We allow pcr to be outside [min_pcr,max_pcr], because later
* adjustment may still push it in the valid range.
@@ -662,7 +667,6 @@ static int check_tp(const struct atm_trafprm *tp)
return 0;
}
-
static int check_qos(const struct atm_qos *qos)
{
int error;
@@ -672,9 +676,11 @@ static int check_qos(const struct atm_qos *qos)
if (qos->txtp.traffic_class != qos->rxtp.traffic_class &&
qos->txtp.traffic_class && qos->rxtp.traffic_class &&
qos->txtp.traffic_class != ATM_ANYCLASS &&
- qos->rxtp.traffic_class != ATM_ANYCLASS) return -EINVAL;
+ qos->rxtp.traffic_class != ATM_ANYCLASS)
+ return -EINVAL;
error = check_tp(&qos->txtp);
- if (error) return error;
+ if (error)
+ return error;
return check_tp(&qos->rxtp);
}
@@ -690,37 +696,41 @@ int vcc_setsockopt(struct socket *sock, int level, int optname,
vcc = ATM_SD(sock);
switch (optname) {
- case SO_ATMQOS:
- {
- struct atm_qos qos;
-
- if (copy_from_user(&qos,optval,sizeof(qos)))
- return -EFAULT;
- error = check_qos(&qos);
- if (error) return error;
- if (sock->state == SS_CONNECTED)
- return atm_change_qos(vcc,&qos);
- if (sock->state != SS_UNCONNECTED)
- return -EBADFD;
- vcc->qos = qos;
- set_bit(ATM_VF_HASQOS,&vcc->flags);
- return 0;
- }
- case SO_SETCLP:
- if (get_user(value,(unsigned long __user *)optval))
- return -EFAULT;
- if (value) vcc->atm_options |= ATM_ATMOPT_CLP;
- else vcc->atm_options &= ~ATM_ATMOPT_CLP;
- return 0;
- default:
- if (level == SOL_SOCKET) return -EINVAL;
- break;
+ case SO_ATMQOS:
+ {
+ struct atm_qos qos;
+
+ if (copy_from_user(&qos, optval, sizeof(qos)))
+ return -EFAULT;
+ error = check_qos(&qos);
+ if (error)
+ return error;
+ if (sock->state == SS_CONNECTED)
+ return atm_change_qos(vcc, &qos);
+ if (sock->state != SS_UNCONNECTED)
+ return -EBADFD;
+ vcc->qos = qos;
+ set_bit(ATM_VF_HASQOS, &vcc->flags);
+ return 0;
}
- if (!vcc->dev || !vcc->dev->ops->setsockopt) return -EINVAL;
- return vcc->dev->ops->setsockopt(vcc,level,optname,optval,optlen);
+ case SO_SETCLP:
+ if (get_user(value, (unsigned long __user *)optval))
+ return -EFAULT;
+ if (value)
+ vcc->atm_options |= ATM_ATMOPT_CLP;
+ else
+ vcc->atm_options &= ~ATM_ATMOPT_CLP;
+ return 0;
+ default:
+ if (level == SOL_SOCKET)
+ return -EINVAL;
+ break;
+ }
+ if (!vcc->dev || !vcc->dev->ops->setsockopt)
+ return -EINVAL;
+ return vcc->dev->ops->setsockopt(vcc, level, optname, optval, optlen);
}
-
int vcc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -734,33 +744,33 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
vcc = ATM_SD(sock);
switch (optname) {
- case SO_ATMQOS:
- if (!test_bit(ATM_VF_HASQOS,&vcc->flags))
- return -EINVAL;
- return copy_to_user(optval,&vcc->qos,sizeof(vcc->qos)) ?
- -EFAULT : 0;
- case SO_SETCLP:
- return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 :
- 0,(unsigned long __user *)optval) ? -EFAULT : 0;
- case SO_ATMPVC:
- {
- struct sockaddr_atmpvc pvc;
-
- if (!vcc->dev ||
- !test_bit(ATM_VF_ADDR,&vcc->flags))
- return -ENOTCONN;
- pvc.sap_family = AF_ATMPVC;
- pvc.sap_addr.itf = vcc->dev->number;
- pvc.sap_addr.vpi = vcc->vpi;
- pvc.sap_addr.vci = vcc->vci;
- return copy_to_user(optval,&pvc,sizeof(pvc)) ?
- -EFAULT : 0;
- }
- default:
- if (level == SOL_SOCKET) return -EINVAL;
+ case SO_ATMQOS:
+ if (!test_bit(ATM_VF_HASQOS, &vcc->flags))
+ return -EINVAL;
+ return copy_to_user(optval, &vcc->qos, sizeof(vcc->qos))
+ ? -EFAULT : 0;
+ case SO_SETCLP:
+ return put_user(vcc->atm_options & ATM_ATMOPT_CLP ? 1 : 0,
+ (unsigned long __user *)optval) ? -EFAULT : 0;
+ case SO_ATMPVC:
+ {
+ struct sockaddr_atmpvc pvc;
+
+ if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
+ return -ENOTCONN;
+ pvc.sap_family = AF_ATMPVC;
+ pvc.sap_addr.itf = vcc->dev->number;
+ pvc.sap_addr.vpi = vcc->vpi;
+ pvc.sap_addr.vci = vcc->vci;
+ return copy_to_user(optval, &pvc, sizeof(pvc)) ? -EFAULT : 0;
+ }
+ default:
+ if (level == SOL_SOCKET)
+ return -EINVAL;
break;
}
- if (!vcc->dev || !vcc->dev->ops->getsockopt) return -EINVAL;
+ if (!vcc->dev || !vcc->dev->ops->getsockopt)
+ return -EINVAL;
return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len);
}
@@ -768,23 +778,27 @@ static int __init atm_init(void)
{
int error;
- if ((error = proto_register(&vcc_proto, 0)) < 0)
+ error = proto_register(&vcc_proto, 0);
+ if (error < 0)
goto out;
-
- if ((error = atmpvc_init()) < 0) {
- printk(KERN_ERR "atmpvc_init() failed with %d\n", error);
+ error = atmpvc_init();
+ if (error < 0) {
+ pr_err("atmpvc_init() failed with %d\n", error);
goto out_unregister_vcc_proto;
}
- if ((error = atmsvc_init()) < 0) {
- printk(KERN_ERR "atmsvc_init() failed with %d\n", error);
+ error = atmsvc_init();
+ if (error < 0) {
+ pr_err("atmsvc_init() failed with %d\n", error);
goto out_atmpvc_exit;
}
- if ((error = atm_proc_init()) < 0) {
- printk(KERN_ERR "atm_proc_init() failed with %d\n",error);
+ error = atm_proc_init();
+ if (error < 0) {
+ pr_err("atm_proc_init() failed with %d\n", error);
goto out_atmsvc_exit;
}
- if ((error = atm_sysfs_init()) < 0) {
- printk(KERN_ERR "atm_sysfs_init() failed with %d\n",error);
+ error = atm_sysfs_init();
+ if (error < 0) {
+ pr_err("atm_sysfs_init() failed with %d\n", error);
goto out_atmproc_exit;
}
out:
diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c
index 2ea40995dce..62dc8bfe6fe 100644
--- a/net/atm/ioctl.c
+++ b/net/atm/ioctl.c
@@ -3,6 +3,7 @@
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
/* 2003 John Levon <levon@movementarian.org> */
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/kmod.h>
@@ -36,6 +37,7 @@ void register_atm_ioctl(struct atm_ioctl *ioctl)
list_add_tail(&ioctl->list, &ioctl_list);
mutex_unlock(&ioctl_mutex);
}
+EXPORT_SYMBOL(register_atm_ioctl);
void deregister_atm_ioctl(struct atm_ioctl *ioctl)
{
@@ -43,129 +45,128 @@ void deregister_atm_ioctl(struct atm_ioctl *ioctl)
list_del(&ioctl->list);
mutex_unlock(&ioctl_mutex);
}
-
-EXPORT_SYMBOL(register_atm_ioctl);
EXPORT_SYMBOL(deregister_atm_ioctl);
-static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg, int compat)
+static int do_vcc_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg, int compat)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
int error;
- struct list_head * pos;
+ struct list_head *pos;
void __user *argp = (void __user *)arg;
vcc = ATM_SD(sock);
switch (cmd) {
- case SIOCOUTQ:
- if (sock->state != SS_CONNECTED ||
- !test_bit(ATM_VF_READY, &vcc->flags)) {
- error = -EINVAL;
- goto done;
- }
- error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk),
- (int __user *) argp) ? -EFAULT : 0;
+ case SIOCOUTQ:
+ if (sock->state != SS_CONNECTED ||
+ !test_bit(ATM_VF_READY, &vcc->flags)) {
+ error = -EINVAL;
+ goto done;
+ }
+ error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk),
+ (int __user *)argp) ? -EFAULT : 0;
+ goto done;
+ case SIOCINQ:
+ {
+ struct sk_buff *skb;
+
+ if (sock->state != SS_CONNECTED) {
+ error = -EINVAL;
goto done;
- case SIOCINQ:
- {
- struct sk_buff *skb;
-
- if (sock->state != SS_CONNECTED) {
- error = -EINVAL;
- goto done;
- }
- skb = skb_peek(&sk->sk_receive_queue);
- error = put_user(skb ? skb->len : 0,
- (int __user *)argp) ? -EFAULT : 0;
- goto done;
- }
- case SIOCGSTAMP: /* borrowed from IP */
+ }
+ skb = skb_peek(&sk->sk_receive_queue);
+ error = put_user(skb ? skb->len : 0,
+ (int __user *)argp) ? -EFAULT : 0;
+ goto done;
+ }
+ case SIOCGSTAMP: /* borrowed from IP */
#ifdef CONFIG_COMPAT
- if (compat)
- error = compat_sock_get_timestamp(sk, argp);
- else
+ if (compat)
+ error = compat_sock_get_timestamp(sk, argp);
+ else
#endif
- error = sock_get_timestamp(sk, argp);
- goto done;
- case SIOCGSTAMPNS: /* borrowed from IP */
+ error = sock_get_timestamp(sk, argp);
+ goto done;
+ case SIOCGSTAMPNS: /* borrowed from IP */
#ifdef CONFIG_COMPAT
- if (compat)
- error = compat_sock_get_timestampns(sk, argp);
- else
+ if (compat)
+ error = compat_sock_get_timestampns(sk, argp);
+ else
#endif
- error = sock_get_timestampns(sk, argp);
+ error = sock_get_timestampns(sk, argp);
+ goto done;
+ case ATM_SETSC:
+ if (net_ratelimit())
+ pr_warning("ATM_SETSC is obsolete; used by %s:%d\n",
+ current->comm, task_pid_nr(current));
+ error = 0;
+ goto done;
+ case ATMSIGD_CTRL:
+ if (!capable(CAP_NET_ADMIN)) {
+ error = -EPERM;
goto done;
- case ATM_SETSC:
- if (net_ratelimit())
- printk(KERN_WARNING "ATM_SETSC is obsolete; used by %s:%d\n",
- current->comm, task_pid_nr(current));
- error = 0;
+ }
+ /*
+ * The user/kernel protocol for exchanging signalling
+ * info uses kernel pointers as opaque references,
+ * so the holder of the file descriptor can scribble
+ * on the kernel... so we should make sure that we
+ * have the same privileges that /proc/kcore needs
+ */
+ if (!capable(CAP_SYS_RAWIO)) {
+ error = -EPERM;
goto done;
- case ATMSIGD_CTRL:
- if (!capable(CAP_NET_ADMIN)) {
- error = -EPERM;
- goto done;
- }
- /*
- * The user/kernel protocol for exchanging signalling
- * info uses kernel pointers as opaque references,
- * so the holder of the file descriptor can scribble
- * on the kernel... so we should make sure that we
- * have the same privileges that /proc/kcore needs
- */
- if (!capable(CAP_SYS_RAWIO)) {
- error = -EPERM;
- goto done;
- }
+ }
#ifdef CONFIG_COMPAT
- /* WTF? I don't even want to _think_ about making this
- work for 32-bit userspace. TBH I don't really want
- to think about it at all. dwmw2. */
- if (compat) {
- if (net_ratelimit())
- printk(KERN_WARNING "32-bit task cannot be atmsigd\n");
- error = -EINVAL;
- goto done;
- }
+ /* WTF? I don't even want to _think_ about making this
+ work for 32-bit userspace. TBH I don't really want
+ to think about it at all. dwmw2. */
+ if (compat) {
+ if (net_ratelimit())
+ pr_warning("32-bit task cannot be atmsigd\n");
+ error = -EINVAL;
+ goto done;
+ }
#endif
- error = sigd_attach(vcc);
- if (!error)
- sock->state = SS_CONNECTED;
+ error = sigd_attach(vcc);
+ if (!error)
+ sock->state = SS_CONNECTED;
+ goto done;
+ case ATM_SETBACKEND:
+ case ATM_NEWBACKENDIF:
+ {
+ atm_backend_t backend;
+ error = get_user(backend, (atm_backend_t __user *)argp);
+ if (error)
goto done;
- case ATM_SETBACKEND:
- case ATM_NEWBACKENDIF:
- {
- atm_backend_t backend;
- error = get_user(backend, (atm_backend_t __user *) argp);
- if (error)
- goto done;
- switch (backend) {
- case ATM_BACKEND_PPP:
- request_module("pppoatm");
- break;
- case ATM_BACKEND_BR2684:
- request_module("br2684");
- break;
- }
- }
- break;
- case ATMMPC_CTRL:
- case ATMMPC_DATA:
- request_module("mpoa");
- break;
- case ATMARPD_CTRL:
- request_module("clip");
+ switch (backend) {
+ case ATM_BACKEND_PPP:
+ request_module("pppoatm");
break;
- case ATMLEC_CTRL:
- request_module("lec");
+ case ATM_BACKEND_BR2684:
+ request_module("br2684");
break;
+ }
+ break;
+ }
+ case ATMMPC_CTRL:
+ case ATMMPC_DATA:
+ request_module("mpoa");
+ break;
+ case ATMARPD_CTRL:
+ request_module("clip");
+ break;
+ case ATMLEC_CTRL:
+ request_module("lec");
+ break;
}
error = -ENOIOCTLCMD;
mutex_lock(&ioctl_mutex);
list_for_each(pos, &ioctl_list) {
- struct atm_ioctl * ic = list_entry(pos, struct atm_ioctl, list);
+ struct atm_ioctl *ic = list_entry(pos, struct atm_ioctl, list);
if (try_module_get(ic->owner)) {
error = ic->ioctl(sock, cmd, arg);
module_put(ic->owner);
@@ -184,7 +185,6 @@ done:
return error;
}
-
int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
return do_vcc_ioctl(sock, cmd, arg, 0);
@@ -287,8 +287,8 @@ static int do_atmif_sioc(struct socket *sock, unsigned int cmd,
sioc = compat_alloc_user_space(sizeof(*sioc));
sioc32 = compat_ptr(arg);
- if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int))
- || get_user(data, &sioc32->arg))
+ if (copy_in_user(&sioc->number, &sioc32->number, 2 * sizeof(int)) ||
+ get_user(data, &sioc32->arg))
return -EFAULT;
datap = compat_ptr(data);
if (put_user(datap, &sioc->arg))
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 42749b7b917..5da5753157f 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -4,6 +4,8 @@
* Marko Kiiskila <mkiiskila@yahoo.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/capability.h>
@@ -16,7 +18,7 @@
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/arp.h>
#include <net/dst.h>
#include <linux/proc_fs.h>
@@ -85,17 +87,19 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
int is_rdesc,
struct lec_arp_table **ret_entry);
static void lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
- const unsigned char *atm_addr, unsigned long remoteflag,
+ const unsigned char *atm_addr,
+ unsigned long remoteflag,
unsigned int targetless_le_arp);
static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id);
static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc);
static void lec_set_flush_tran_id(struct lec_priv *priv,
const unsigned char *atm_addr,
unsigned long tran_id);
-static void lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
+static void lec_vcc_added(struct lec_priv *priv,
+ const struct atmlec_ioc *ioc_data,
struct atm_vcc *vcc,
- void (*old_push) (struct atm_vcc *vcc,
- struct sk_buff *skb));
+ void (*old_push)(struct atm_vcc *vcc,
+ struct sk_buff *skb));
static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc);
/* must be done under lec_arp_lock */
@@ -110,7 +114,6 @@ static inline void lec_arp_put(struct lec_arp_table *entry)
kfree(entry);
}
-
static struct lane2_ops lane2_ops = {
lane2_resolve, /* resolve, spec 3.1.3 */
lane2_associate_req, /* associate_req, spec 3.1.4 */
@@ -148,7 +151,8 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
mesg = (struct atmlec_msg *)skb2->data;
mesg->type = l_topology_change;
buff += 4;
- mesg->content.normal.flag = *buff & 0x01; /* 0x01 is topology change */
+ mesg->content.normal.flag = *buff & 0x01;
+ /* 0x01 is topology change */
priv = netdev_priv(dev);
atm_force_charge(priv->lecd, skb2->truesize);
@@ -242,7 +246,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
static void lec_tx_timeout(struct net_device *dev)
{
- printk(KERN_INFO "%s: tx timeout\n", dev->name);
+ pr_info("%s\n", dev->name);
dev->trans_start = jiffies;
netif_wake_queue(dev);
}
@@ -261,14 +265,10 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
unsigned char rdesc[ETH_ALEN]; /* Token Ring route descriptor */
#endif
int is_rdesc;
-#if DUMP_PACKETS > 0
- char buf[300];
- int i = 0;
-#endif /* DUMP_PACKETS >0 */
- pr_debug("lec_start_xmit called\n");
+ pr_debug("called\n");
if (!priv->lecd) {
- printk("%s:No lecd attached\n", dev->name);
+ pr_info("%s:No lecd attached\n", dev->name);
dev->stats.tx_errors++;
netif_stop_queue(dev);
kfree_skb(skb);
@@ -276,8 +276,8 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
}
pr_debug("skbuff head:%lx data:%lx tail:%lx end:%lx\n",
- (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb),
- (long)skb_end_pointer(skb));
+ (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb),
+ (long)skb_end_pointer(skb));
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0)
lec_handle_bridge(skb, dev);
@@ -285,8 +285,7 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
/* Make sure we have room for lec_id */
if (skb_headroom(skb) < 2) {
-
- pr_debug("lec_start_xmit: reallocating skb\n");
+ pr_debug("reallocating skb\n");
skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
kfree_skb(skb);
if (skb2 == NULL)
@@ -313,23 +312,17 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
}
#endif
-#if DUMP_PACKETS > 0
- printk("%s: send datalen:%ld lecid:%4.4x\n", dev->name,
- skb->len, priv->lecid);
#if DUMP_PACKETS >= 2
- for (i = 0; i < skb->len && i < 99; i++) {
- sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
- }
+#define MAX_DUMP_SKB 99
#elif DUMP_PACKETS >= 1
- for (i = 0; i < skb->len && i < 30; i++) {
- sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
- }
+#define MAX_DUMP_SKB 30
+#endif
+#if DUMP_PACKETS >= 1
+ printk(KERN_DEBUG "%s: send datalen:%ld lecid:%4.4x\n",
+ dev->name, skb->len, priv->lecid);
+ print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1,
+ skb->data, min(skb->len, MAX_DUMP_SKB), true);
#endif /* DUMP_PACKETS >= 1 */
- if (i == skb->len)
- printk("%s\n", buf);
- else
- printk("%s...\n", buf);
-#endif /* DUMP_PACKETS > 0 */
/* Minimum ethernet-frame size */
#ifdef CONFIG_TR
@@ -367,31 +360,28 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
#endif
entry = NULL;
vcc = lec_arp_resolve(priv, dst, is_rdesc, &entry);
- pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n", dev->name,
- vcc, vcc ? vcc->flags : 0, entry);
+ pr_debug("%s:vcc:%p vcc_flags:%lx, entry:%p\n",
+ dev->name, vcc, vcc ? vcc->flags : 0, entry);
if (!vcc || !test_bit(ATM_VF_READY, &vcc->flags)) {
if (entry && (entry->tx_wait.qlen < LEC_UNRES_QUE_LEN)) {
- pr_debug("%s:lec_start_xmit: queuing packet, ",
- dev->name);
- pr_debug("MAC address %pM\n", lec_h->h_dest);
+ pr_debug("%s:queuing packet, MAC address %pM\n",
+ dev->name, lec_h->h_dest);
skb_queue_tail(&entry->tx_wait, skb);
} else {
- pr_debug
- ("%s:lec_start_xmit: tx queue full or no arp entry, dropping, ",
- dev->name);
- pr_debug("MAC address %pM\n", lec_h->h_dest);
+ pr_debug("%s:tx queue full or no arp entry, dropping, MAC address: %pM\n",
+ dev->name, lec_h->h_dest);
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
}
goto out;
}
#if DUMP_PACKETS > 0
- printk("%s:sending to vpi:%d vci:%d\n", dev->name, vcc->vpi, vcc->vci);
+ printk(KERN_DEBUG "%s:sending to vpi:%d vci:%d\n",
+ dev->name, vcc->vpi, vcc->vci);
#endif /* DUMP_PACKETS > 0 */
while (entry && (skb2 = skb_dequeue(&entry->tx_wait))) {
- pr_debug("lec.c: emptying tx queue, ");
- pr_debug("MAC address %pM\n", lec_h->h_dest);
+ pr_debug("emptying tx queue, MAC address %pM\n", lec_h->h_dest);
lec_send(vcc, skb2);
}
@@ -444,14 +434,12 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
pr_debug("%s: msg from zeppelin:%d\n", dev->name, mesg->type);
switch (mesg->type) {
case l_set_mac_addr:
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < 6; i++)
dev->dev_addr[i] = mesg->content.normal.mac_addr[i];
- }
break;
case l_del_mac_addr:
- for (i = 0; i < 6; i++) {
+ for (i = 0; i < 6; i++)
dev->dev_addr[i] = 0;
- }
break;
case l_addr_delete:
lec_addr_delete(priv, mesg->content.normal.atm_addr,
@@ -477,10 +465,10 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
mesg->content.normal.atm_addr,
mesg->content.normal.flag,
mesg->content.normal.targetless_le_arp);
- pr_debug("lec: in l_arp_update\n");
+ pr_debug("in l_arp_update\n");
if (mesg->sizeoftlvs != 0) { /* LANE2 3.1.5 */
- pr_debug("lec: LANE2 3.1.5, got tlvs, size %d\n",
- mesg->sizeoftlvs);
+ pr_debug("LANE2 3.1.5, got tlvs, size %d\n",
+ mesg->sizeoftlvs);
lane2_associate_ind(dev, mesg->content.normal.mac_addr,
tmp, mesg->sizeoftlvs);
}
@@ -499,13 +487,14 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
priv->flush_timeout = (mesg->content.config.flush_timeout * HZ);
priv->path_switching_delay =
(mesg->content.config.path_switching_delay * HZ);
- priv->lane_version = mesg->content.config.lane_version; /* LANE2 */
+ priv->lane_version = mesg->content.config.lane_version;
+ /* LANE2 */
priv->lane2_ops = NULL;
if (priv->lane_version > 1)
priv->lane2_ops = &lane2_ops;
if (dev_set_mtu(dev, mesg->content.config.mtu))
- printk("%s: change_mtu to %d failed\n", dev->name,
- mesg->content.config.mtu);
+ pr_info("%s: change_mtu to %d failed\n",
+ dev->name, mesg->content.config.mtu);
priv->is_proxy = mesg->content.config.is_proxy;
break;
case l_flush_tran_id:
@@ -518,40 +507,35 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
break;
case l_should_bridge:
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
- {
- pr_debug("%s: bridge zeppelin asks about %pM\n",
- dev->name, mesg->content.proxy.mac_addr);
+ {
+ pr_debug("%s: bridge zeppelin asks about %pM\n",
+ dev->name, mesg->content.proxy.mac_addr);
- if (br_fdb_test_addr_hook == NULL)
- break;
+ if (br_fdb_test_addr_hook == NULL)
+ break;
- if (br_fdb_test_addr_hook(dev,
- mesg->content.proxy.mac_addr)) {
- /* hit from bridge table, send LE_ARP_RESPONSE */
- struct sk_buff *skb2;
- struct sock *sk;
-
- pr_debug
- ("%s: entry found, responding to zeppelin\n",
- dev->name);
- skb2 =
- alloc_skb(sizeof(struct atmlec_msg),
- GFP_ATOMIC);
- if (skb2 == NULL)
- break;
- skb2->len = sizeof(struct atmlec_msg);
- skb_copy_to_linear_data(skb2, mesg,
- sizeof(*mesg));
- atm_force_charge(priv->lecd, skb2->truesize);
- sk = sk_atm(priv->lecd);
- skb_queue_tail(&sk->sk_receive_queue, skb2);
- sk->sk_data_ready(sk, skb2->len);
- }
+ if (br_fdb_test_addr_hook(dev, mesg->content.proxy.mac_addr)) {
+ /* hit from bridge table, send LE_ARP_RESPONSE */
+ struct sk_buff *skb2;
+ struct sock *sk;
+
+ pr_debug("%s: entry found, responding to zeppelin\n",
+ dev->name);
+ skb2 = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
+ if (skb2 == NULL)
+ break;
+ skb2->len = sizeof(struct atmlec_msg);
+ skb_copy_to_linear_data(skb2, mesg, sizeof(*mesg));
+ atm_force_charge(priv->lecd, skb2->truesize);
+ sk = sk_atm(priv->lecd);
+ skb_queue_tail(&sk->sk_receive_queue, skb2);
+ sk->sk_data_ready(sk, skb2->len);
}
+ }
#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
break;
default:
- printk("%s: Unknown message type %d\n", dev->name, mesg->type);
+ pr_info("%s: Unknown message type %d\n", dev->name, mesg->type);
dev_kfree_skb(skb);
return -EINVAL;
}
@@ -572,14 +556,13 @@ static void lec_atm_close(struct atm_vcc *vcc)
lec_arp_destroy(priv);
if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
- printk("%s lec_atm_close: closing with messages pending\n",
- dev->name);
- while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue)) != NULL) {
+ pr_info("%s closing with messages pending\n", dev->name);
+ while ((skb = skb_dequeue(&sk_atm(vcc)->sk_receive_queue))) {
atm_return(vcc, skb->truesize);
dev_kfree_skb(skb);
}
- printk("%s: Shut down!\n", dev->name);
+ pr_info("%s: Shut down!\n", dev->name);
module_put(THIS_MODULE);
}
@@ -608,9 +591,8 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
struct sk_buff *skb;
struct atmlec_msg *mesg;
- if (!priv || !priv->lecd) {
+ if (!priv || !priv->lecd)
return -1;
- }
skb = alloc_skb(sizeof(struct atmlec_msg), GFP_ATOMIC);
if (!skb)
return -1;
@@ -633,7 +615,7 @@ send_to_lecd(struct lec_priv *priv, atmlec_msg_type type,
sk->sk_data_ready(sk, skb->len);
if (data != NULL) {
- pr_debug("lec: about to send %d bytes of data\n", data->len);
+ pr_debug("about to send %d bytes of data\n", data->len);
atm_force_charge(priv->lecd, data->truesize);
skb_queue_tail(&sk->sk_receive_queue, data);
sk->sk_data_ready(sk, skb->len);
@@ -691,36 +673,28 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
struct net_device *dev = (struct net_device *)vcc->proto_data;
struct lec_priv *priv = netdev_priv(dev);
-#if DUMP_PACKETS >0
- int i = 0;
- char buf[300];
-
- printk("%s: lec_push vcc vpi:%d vci:%d\n", dev->name,
- vcc->vpi, vcc->vci);
+#if DUMP_PACKETS > 0
+ printk(KERN_DEBUG "%s: vcc vpi:%d vci:%d\n",
+ dev->name, vcc->vpi, vcc->vci);
#endif
if (!skb) {
pr_debug("%s: null skb\n", dev->name);
lec_vcc_close(priv, vcc);
return;
}
-#if DUMP_PACKETS > 0
- printk("%s: rcv datalen:%ld lecid:%4.4x\n", dev->name,
- skb->len, priv->lecid);
#if DUMP_PACKETS >= 2
- for (i = 0; i < skb->len && i < 99; i++) {
- sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
- }
+#define MAX_SKB_DUMP 99
#elif DUMP_PACKETS >= 1
- for (i = 0; i < skb->len && i < 30; i++) {
- sprintf(buf + i * 3, "%2.2x ", 0xff & skb->data[i]);
- }
-#endif /* DUMP_PACKETS >= 1 */
- if (i == skb->len)
- printk("%s\n", buf);
- else
- printk("%s...\n", buf);
+#define MAX_SKB_DUMP 30
+#endif
+#if DUMP_PACKETS > 0
+ printk(KERN_DEBUG "%s: rcv datalen:%ld lecid:%4.4x\n",
+ dev->name, skb->len, priv->lecid);
+ print_hex_dump(KERN_DEBUG, "", DUMP_OFFSET, 16, 1,
+ skb->data, min(MAX_SKB_DUMP, skb->len), true);
#endif /* DUMP_PACKETS > 0 */
- if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) { /* Control frame, to daemon */
+ if (memcmp(skb->data, lec_ctrl_magic, 4) == 0) {
+ /* Control frame, to daemon */
struct sock *sk = sk_atm(vcc);
pr_debug("%s: To daemon\n", dev->name);
@@ -778,9 +752,8 @@ static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb)
dev_kfree_skb(skb);
return;
}
- if (!hlist_empty(&priv->lec_arp_empty_ones)) {
+ if (!hlist_empty(&priv->lec_arp_empty_ones))
lec_arp_check_empties(priv, vcc, skb);
- }
skb_pull(skb, 2); /* skip lec_id */
#ifdef CONFIG_TR
if (priv->is_trdev)
@@ -801,7 +774,7 @@ static void lec_pop(struct atm_vcc *vcc, struct sk_buff *skb)
struct net_device *dev = skb->dev;
if (vpriv == NULL) {
- printk("lec_pop(): vpriv = NULL!?!?!?\n");
+ pr_info("vpriv = NULL!?!?!?\n");
return;
}
@@ -822,15 +795,13 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
/* Lecd must be up in this case */
bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
- if (bytes_left != 0) {
- printk
- ("lec: lec_vcc_attach, copy from user failed for %d bytes\n",
- bytes_left);
- }
+ if (bytes_left != 0)
+ pr_info("copy from user failed for %d bytes\n", bytes_left);
if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
!dev_lec[ioc_data.dev_num])
return -EINVAL;
- if (!(vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL)))
+ vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
+ if (!vpriv)
return -ENOMEM;
vpriv->xoff = 0;
vpriv->old_pop = vcc->pop;
@@ -921,9 +892,8 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
priv->flush_timeout = (4 * HZ);
priv->path_switching_delay = (6 * HZ);
- if (dev_lec[i]->flags & IFF_UP) {
+ if (dev_lec[i]->flags & IFF_UP)
netif_start_queue(dev_lec[i]);
- }
__module_get(THIS_MODULE);
return i;
}
@@ -1125,7 +1095,9 @@ static int lec_seq_show(struct seq_file *seq, void *v)
else {
struct lec_state *state = seq->private;
struct net_device *dev = state->dev;
- struct lec_arp_table *entry = hlist_entry(state->node, struct lec_arp_table, next);
+ struct lec_arp_table *entry = hlist_entry(state->node,
+ struct lec_arp_table,
+ next);
seq_printf(seq, "%s ", dev->name);
lec_info(seq, entry);
@@ -1199,13 +1171,13 @@ static int __init lane_module_init(void)
p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops);
if (!p) {
- printk(KERN_ERR "Unable to initialize /proc/net/atm/lec\n");
+ pr_err("Unable to initialize /proc/net/atm/lec\n");
return -ENOMEM;
}
#endif
register_atm_ioctl(&lane_ioctl_ops);
- printk("lec.c: " __DATE__ " " __TIME__ " initialized\n");
+ pr_info("lec.c: " __DATE__ " " __TIME__ " initialized\n");
return 0;
}
@@ -1294,13 +1266,13 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
struct lec_priv *priv = netdev_priv(dev);
if (compare_ether_addr(lan_dst, dev->dev_addr))
- return (0); /* not our mac address */
+ return 0; /* not our mac address */
kfree(priv->tlvs); /* NULL if there was no previous association */
priv->tlvs = kmemdup(tlvs, sizeoftlvs, GFP_KERNEL);
if (priv->tlvs == NULL)
- return (0);
+ return 0;
priv->sizeoftlvs = sizeoftlvs;
skb = alloc_skb(sizeoftlvs, GFP_ATOMIC);
@@ -1310,12 +1282,12 @@ static int lane2_associate_req(struct net_device *dev, const u8 *lan_dst,
skb_copy_to_linear_data(skb, tlvs, sizeoftlvs);
retval = send_to_lecd(priv, l_associate_req, NULL, NULL, skb);
if (retval != 0)
- printk("lec.c: lane2_associate_req() failed\n");
+ pr_info("lec.c: lane2_associate_req() failed\n");
/*
* If the previous association has changed we must
* somehow notify other LANE entities about the change
*/
- return (1);
+ return 1;
}
/*
@@ -1348,12 +1320,12 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
entry->sizeoftlvs = sizeoftlvs;
#endif
#if 0
- printk("lec.c: lane2_associate_ind()\n");
- printk("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs);
+ pr_info("\n");
+ pr_info("dump of tlvs, sizeoftlvs=%d\n", sizeoftlvs);
while (i < sizeoftlvs)
- printk("%02x ", tlvs[i++]);
+ pr_cont("%02x ", tlvs[i++]);
- printk("\n");
+ pr_cont("\n");
#endif
/* tell MPOA about the TLVs we saw */
@@ -1373,15 +1345,15 @@ static void lane2_associate_ind(struct net_device *dev, const u8 *mac_addr,
#include <linux/types.h>
#include <linux/timer.h>
-#include <asm/param.h>
+#include <linux/param.h>
#include <asm/atomic.h>
#include <linux/inetdevice.h>
#include <net/route.h>
#if 0
-#define pr_debug(format,args...)
+#define pr_debug(format, args...)
/*
-#define pr_debug printk
+ #define pr_debug printk
*/
#endif
#define DEBUG_ARP_TABLE 0
@@ -1395,7 +1367,7 @@ static void lec_arp_expire_arp(unsigned long data);
* Arp table funcs
*/
-#define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE -1))
+#define HASH(ch) (ch & (LEC_ARP_TABLE_SIZE - 1))
/*
* Initialization of arp-cache
@@ -1404,9 +1376,8 @@ static void lec_arp_init(struct lec_priv *priv)
{
unsigned short i;
- for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
+ for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
- }
INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
INIT_HLIST_HEAD(&priv->lec_no_forward);
INIT_HLIST_HEAD(&priv->mcast_fwds);
@@ -1450,10 +1421,7 @@ lec_arp_add(struct lec_priv *priv, struct lec_arp_table *entry)
tmp = &priv->lec_arp_tables[HASH(entry->mac_addr[ETH_ALEN - 1])];
hlist_add_head(&entry->next, tmp);
- pr_debug("LEC_ARP: Added entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
- 0xff & entry->mac_addr[0], 0xff & entry->mac_addr[1],
- 0xff & entry->mac_addr[2], 0xff & entry->mac_addr[3],
- 0xff & entry->mac_addr[4], 0xff & entry->mac_addr[5]);
+ pr_debug("Added entry:%pM\n", entry->mac_addr);
}
/*
@@ -1466,20 +1434,23 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
struct lec_arp_table *entry;
int i, remove_vcc = 1;
- if (!to_remove) {
+ if (!to_remove)
return -1;
- }
hlist_del(&to_remove->next);
del_timer(&to_remove->timer);
- /* If this is the only MAC connected to this VCC, also tear down the VCC */
+ /*
+ * If this is the only MAC connected to this VCC,
+ * also tear down the VCC
+ */
if (to_remove->status >= ESI_FLUSH_PENDING) {
/*
* ESI_FLUSH_PENDING, ESI_FORWARD_DIRECT
*/
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) {
+ hlist_for_each_entry(entry, node,
+ &priv->lec_arp_tables[i], next) {
if (memcmp(to_remove->atm_addr,
entry->atm_addr, ATM_ESA_LEN) == 0) {
remove_vcc = 0;
@@ -1492,10 +1463,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
}
skb_queue_purge(&to_remove->tx_wait); /* FIXME: good place for this? */
- pr_debug("LEC_ARP: Removed entry:%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
- 0xff & to_remove->mac_addr[0], 0xff & to_remove->mac_addr[1],
- 0xff & to_remove->mac_addr[2], 0xff & to_remove->mac_addr[3],
- 0xff & to_remove->mac_addr[4], 0xff & to_remove->mac_addr[5]);
+ pr_debug("Removed entry:%pM\n", to_remove->mac_addr);
return 0;
}
@@ -1513,9 +1481,8 @@ static const char *get_status_string(unsigned char st)
return "ESI_FLUSH_PENDING";
case ESI_FORWARD_DIRECT:
return "ESI_FORWARD_DIRECT";
- default:
- return "<UNKNOWN>";
}
+ return "<UNKNOWN>";
}
static void dump_arp_table(struct lec_priv *priv)
@@ -1525,18 +1492,15 @@ static void dump_arp_table(struct lec_priv *priv)
char buf[256];
int i, j, offset;
- printk("Dump %p:\n", priv);
+ pr_info("Dump %p:\n", priv);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry(rulla, node, &priv->lec_arp_tables[i], next) {
+ hlist_for_each_entry(rulla, node,
+ &priv->lec_arp_tables[i], next) {
offset = 0;
offset += sprintf(buf, "%d: %p\n", i, rulla);
- offset += sprintf(buf + offset, "Mac:");
- for (j = 0; j < ETH_ALEN; j++) {
- offset += sprintf(buf + offset,
- "%2.2x ",
- rulla->mac_addr[j] & 0xff);
- }
- offset += sprintf(buf + offset, "Atm:");
+ offset += sprintf(buf + offset, "Mac: %pM",
+ rulla->mac_addr);
+ offset += sprintf(buf + offset, " Atm:");
for (j = 0; j < ATM_ESA_LEN; j++) {
offset += sprintf(buf + offset,
"%2.2x ",
@@ -1556,20 +1520,16 @@ static void dump_arp_table(struct lec_priv *priv)
"Flags:%x, Packets_flooded:%x, Status: %s ",
rulla->flags, rulla->packets_flooded,
get_status_string(rulla->status));
- printk("%s\n", buf);
+ pr_info("%s\n", buf);
}
}
if (!hlist_empty(&priv->lec_no_forward))
- printk("No forward\n");
+ pr_info("No forward\n");
hlist_for_each_entry(rulla, node, &priv->lec_no_forward, next) {
offset = 0;
- offset += sprintf(buf + offset, "Mac:");
- for (j = 0; j < ETH_ALEN; j++) {
- offset += sprintf(buf + offset, "%2.2x ",
- rulla->mac_addr[j] & 0xff);
- }
- offset += sprintf(buf + offset, "Atm:");
+ offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
+ offset += sprintf(buf + offset, " Atm:");
for (j = 0; j < ATM_ESA_LEN; j++) {
offset += sprintf(buf + offset, "%2.2x ",
rulla->atm_addr[j] & 0xff);
@@ -1586,19 +1546,15 @@ static void dump_arp_table(struct lec_priv *priv)
"Flags:%x, Packets_flooded:%x, Status: %s ",
rulla->flags, rulla->packets_flooded,
get_status_string(rulla->status));
- printk("%s\n", buf);
+ pr_info("%s\n", buf);
}
if (!hlist_empty(&priv->lec_arp_empty_ones))
- printk("Empty ones\n");
+ pr_info("Empty ones\n");
hlist_for_each_entry(rulla, node, &priv->lec_arp_empty_ones, next) {
offset = 0;
- offset += sprintf(buf + offset, "Mac:");
- for (j = 0; j < ETH_ALEN; j++) {
- offset += sprintf(buf + offset, "%2.2x ",
- rulla->mac_addr[j] & 0xff);
- }
- offset += sprintf(buf + offset, "Atm:");
+ offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
+ offset += sprintf(buf + offset, " Atm:");
for (j = 0; j < ATM_ESA_LEN; j++) {
offset += sprintf(buf + offset, "%2.2x ",
rulla->atm_addr[j] & 0xff);
@@ -1615,19 +1571,15 @@ static void dump_arp_table(struct lec_priv *priv)
"Flags:%x, Packets_flooded:%x, Status: %s ",
rulla->flags, rulla->packets_flooded,
get_status_string(rulla->status));
- printk("%s", buf);
+ pr_info("%s", buf);
}
if (!hlist_empty(&priv->mcast_fwds))
- printk("Multicast Forward VCCs\n");
+ pr_info("Multicast Forward VCCs\n");
hlist_for_each_entry(rulla, node, &priv->mcast_fwds, next) {
offset = 0;
- offset += sprintf(buf + offset, "Mac:");
- for (j = 0; j < ETH_ALEN; j++) {
- offset += sprintf(buf + offset, "%2.2x ",
- rulla->mac_addr[j] & 0xff);
- }
- offset += sprintf(buf + offset, "Atm:");
+ offset += sprintf(buf + offset, "Mac: %pM", rulla->mac_addr);
+ offset += sprintf(buf + offset, " Atm:");
for (j = 0; j < ATM_ESA_LEN; j++) {
offset += sprintf(buf + offset, "%2.2x ",
rulla->atm_addr[j] & 0xff);
@@ -1644,7 +1596,7 @@ static void dump_arp_table(struct lec_priv *priv)
"Flags:%x, Packets_flooded:%x, Status: %s ",
rulla->flags, rulla->packets_flooded,
get_status_string(rulla->status));
- printk("%s\n", buf);
+ pr_info("%s\n", buf);
}
}
@@ -1670,14 +1622,16 @@ static void lec_arp_destroy(struct lec_priv *priv)
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) {
+ hlist_for_each_entry_safe(entry, node, next,
+ &priv->lec_arp_tables[i], next) {
lec_arp_remove(priv, entry);
lec_arp_put(entry);
}
INIT_HLIST_HEAD(&priv->lec_arp_tables[i]);
}
- hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) {
+ hlist_for_each_entry_safe(entry, node, next,
+ &priv->lec_arp_empty_ones, next) {
del_timer_sync(&entry->timer);
lec_arp_clear_vccs(entry);
hlist_del(&entry->next);
@@ -1685,7 +1639,8 @@ static void lec_arp_destroy(struct lec_priv *priv)
}
INIT_HLIST_HEAD(&priv->lec_arp_empty_ones);
- hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) {
+ hlist_for_each_entry_safe(entry, node, next,
+ &priv->lec_no_forward, next) {
del_timer_sync(&entry->timer);
lec_arp_clear_vccs(entry);
hlist_del(&entry->next);
@@ -1714,15 +1669,12 @@ static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
struct hlist_head *head;
struct lec_arp_table *entry;
- pr_debug("LEC_ARP: lec_arp_find :%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
- mac_addr[0] & 0xff, mac_addr[1] & 0xff, mac_addr[2] & 0xff,
- mac_addr[3] & 0xff, mac_addr[4] & 0xff, mac_addr[5] & 0xff);
+ pr_debug("%pM\n", mac_addr);
head = &priv->lec_arp_tables[HASH(mac_addr[ETH_ALEN - 1])];
hlist_for_each_entry(entry, node, head, next) {
- if (!compare_ether_addr(mac_addr, entry->mac_addr)) {
+ if (!compare_ether_addr(mac_addr, entry->mac_addr))
return entry;
- }
}
return NULL;
}
@@ -1734,7 +1686,7 @@ static struct lec_arp_table *make_entry(struct lec_priv *priv,
to_return = kzalloc(sizeof(struct lec_arp_table), GFP_ATOMIC);
if (!to_return) {
- printk("LEC: Arp entry kmalloc failed\n");
+ pr_info("LEC: Arp entry kmalloc failed\n");
return NULL;
}
memcpy(to_return->mac_addr, mac_addr, ETH_ALEN);
@@ -1755,7 +1707,7 @@ static void lec_arp_expire_arp(unsigned long data)
entry = (struct lec_arp_table *)data;
- pr_debug("lec_arp_expire_arp\n");
+ pr_debug("\n");
if (entry->status == ESI_ARP_PENDING) {
if (entry->no_tries <= entry->priv->max_retry_count) {
if (entry->is_rdesc)
@@ -1779,10 +1731,10 @@ static void lec_arp_expire_vcc(unsigned long data)
del_timer(&to_remove->timer);
- pr_debug("LEC_ARP %p %p: lec_arp_expire_vcc vpi:%d vci:%d\n",
- to_remove, priv,
- to_remove->vcc ? to_remove->recv_vcc->vpi : 0,
- to_remove->vcc ? to_remove->recv_vcc->vci : 0);
+ pr_debug("%p %p: vpi:%d vci:%d\n",
+ to_remove, priv,
+ to_remove->vcc ? to_remove->recv_vcc->vpi : 0,
+ to_remove->vcc ? to_remove->recv_vcc->vci : 0);
spin_lock_irqsave(&priv->lec_arp_lock, flags);
hlist_del(&to_remove->next);
@@ -1792,6 +1744,50 @@ static void lec_arp_expire_vcc(unsigned long data)
lec_arp_put(to_remove);
}
+static bool __lec_arp_check_expire(struct lec_arp_table *entry,
+ unsigned long now,
+ struct lec_priv *priv)
+{
+ unsigned long time_to_check;
+
+ if ((entry->flags) & LEC_REMOTE_FLAG && priv->topology_change)
+ time_to_check = priv->forward_delay_time;
+ else
+ time_to_check = priv->aging_time;
+
+ pr_debug("About to expire: %lx - %lx > %lx\n",
+ now, entry->last_used, time_to_check);
+ if (time_after(now, entry->last_used + time_to_check) &&
+ !(entry->flags & LEC_PERMANENT_FLAG) &&
+ !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */
+ /* Remove entry */
+ pr_debug("Entry timed out\n");
+ lec_arp_remove(priv, entry);
+ lec_arp_put(entry);
+ } else {
+ /* Something else */
+ if ((entry->status == ESI_VC_PENDING ||
+ entry->status == ESI_ARP_PENDING) &&
+ time_after_eq(now, entry->timestamp +
+ priv->max_unknown_frame_time)) {
+ entry->timestamp = jiffies;
+ entry->packets_flooded = 0;
+ if (entry->status == ESI_VC_PENDING)
+ send_to_lecd(priv, l_svc_setup,
+ entry->mac_addr,
+ entry->atm_addr,
+ NULL);
+ }
+ if (entry->status == ESI_FLUSH_PENDING &&
+ time_after_eq(now, entry->timestamp +
+ priv->path_switching_delay)) {
+ lec_arp_hold(entry);
+ return true;
+ }
+ }
+
+ return false;
+}
/*
* Expire entries.
* 1. Re-set timer
@@ -1816,62 +1812,28 @@ static void lec_arp_check_expire(struct work_struct *work)
struct hlist_node *node, *next;
struct lec_arp_table *entry;
unsigned long now;
- unsigned long time_to_check;
int i;
- pr_debug("lec_arp_check_expire %p\n", priv);
+ pr_debug("%p\n", priv);
now = jiffies;
restart:
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) {
- if ((entry->flags) & LEC_REMOTE_FLAG &&
- priv->topology_change)
- time_to_check = priv->forward_delay_time;
- else
- time_to_check = priv->aging_time;
-
- pr_debug("About to expire: %lx - %lx > %lx\n",
- now, entry->last_used, time_to_check);
- if (time_after(now, entry->last_used + time_to_check)
- && !(entry->flags & LEC_PERMANENT_FLAG)
- && !(entry->mac_addr[0] & 0x01)) { /* LANE2: 7.1.20 */
- /* Remove entry */
- pr_debug("LEC:Entry timed out\n");
- lec_arp_remove(priv, entry);
+ hlist_for_each_entry_safe(entry, node, next,
+ &priv->lec_arp_tables[i], next) {
+ if (__lec_arp_check_expire(entry, now, priv)) {
+ struct sk_buff *skb;
+ struct atm_vcc *vcc = entry->vcc;
+
+ spin_unlock_irqrestore(&priv->lec_arp_lock,
+ flags);
+ while ((skb = skb_dequeue(&entry->tx_wait)))
+ lec_send(vcc, skb);
+ entry->last_used = jiffies;
+ entry->status = ESI_FORWARD_DIRECT;
lec_arp_put(entry);
- } else {
- /* Something else */
- if ((entry->status == ESI_VC_PENDING ||
- entry->status == ESI_ARP_PENDING)
- && time_after_eq(now,
- entry->timestamp +
- priv->
- max_unknown_frame_time)) {
- entry->timestamp = jiffies;
- entry->packets_flooded = 0;
- if (entry->status == ESI_VC_PENDING)
- send_to_lecd(priv, l_svc_setup,
- entry->mac_addr,
- entry->atm_addr,
- NULL);
- }
- if (entry->status == ESI_FLUSH_PENDING
- &&
- time_after_eq(now, entry->timestamp +
- priv->path_switching_delay)) {
- struct sk_buff *skb;
- struct atm_vcc *vcc = entry->vcc;
-
- lec_arp_hold(entry);
- spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
- while ((skb = skb_dequeue(&entry->tx_wait)) != NULL)
- lec_send(vcc, skb);
- entry->last_used = jiffies;
- entry->status = ESI_FORWARD_DIRECT;
- lec_arp_put(entry);
- goto restart;
- }
+
+ goto restart;
}
}
}
@@ -1885,7 +1847,8 @@ restart:
*
*/
static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
- const unsigned char *mac_to_find, int is_rdesc,
+ const unsigned char *mac_to_find,
+ int is_rdesc,
struct lec_arp_table **ret_entry)
{
unsigned long flags;
@@ -1921,9 +1884,8 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
* If the LE_ARP cache entry is still pending, reset count to 0
* so another LE_ARP request can be made for this frame.
*/
- if (entry->status == ESI_ARP_PENDING) {
+ if (entry->status == ESI_ARP_PENDING)
entry->no_tries = 0;
- }
/*
* Data direct VC not yet set up, check to see if the unknown
* frame count is greater than the limit. If the limit has
@@ -1934,7 +1896,7 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
entry->packets_flooded <
priv->maximum_unknown_frame_count) {
entry->packets_flooded++;
- pr_debug("LEC_ARP: Flooding..\n");
+ pr_debug("Flooding..\n");
found = priv->mcast_vcc;
goto out;
}
@@ -1945,13 +1907,13 @@ static struct atm_vcc *lec_arp_resolve(struct lec_priv *priv,
*/
lec_arp_hold(entry);
*ret_entry = entry;
- pr_debug("lec: entry->status %d entry->vcc %p\n", entry->status,
- entry->vcc);
+ pr_debug("entry->status %d entry->vcc %p\n", entry->status,
+ entry->vcc);
found = NULL;
} else {
/* No matching entry was found */
entry = make_entry(priv, mac_to_find);
- pr_debug("LEC_ARP: Making entry\n");
+ pr_debug("Making entry\n");
if (!entry) {
found = priv->mcast_vcc;
goto out;
@@ -1988,13 +1950,14 @@ lec_addr_delete(struct lec_priv *priv, const unsigned char *atm_addr,
struct lec_arp_table *entry;
int i;
- pr_debug("lec_addr_delete\n");
+ pr_debug("\n");
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) {
- if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)
- && (permanent ||
- !(entry->flags & LEC_PERMANENT_FLAG))) {
+ hlist_for_each_entry_safe(entry, node, next,
+ &priv->lec_arp_tables[i], next) {
+ if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN) &&
+ (permanent ||
+ !(entry->flags & LEC_PERMANENT_FLAG))) {
lec_arp_remove(priv, entry);
lec_arp_put(entry);
}
@@ -2019,10 +1982,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
struct lec_arp_table *entry, *tmp;
int i;
- pr_debug("lec:%s", (targetless_le_arp) ? "targetless " : " ");
- pr_debug("lec_arp_update mac:%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
- mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
- mac_addr[4], mac_addr[5]);
+ pr_debug("%smac:%pM\n",
+ (targetless_le_arp) ? "targetless " : "", mac_addr);
spin_lock_irqsave(&priv->lec_arp_lock, flags);
entry = lec_arp_find(priv, mac_addr);
@@ -2032,7 +1993,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
* we have no entry in the cache. 7.1.30
*/
if (!hlist_empty(&priv->lec_arp_empty_ones)) {
- hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) {
+ hlist_for_each_entry_safe(entry, node, next,
+ &priv->lec_arp_empty_ones, next) {
if (memcmp(entry->atm_addr, atm_addr, ATM_ESA_LEN) == 0) {
hlist_del(&entry->next);
del_timer(&entry->timer);
@@ -2076,7 +2038,8 @@ lec_arp_update(struct lec_priv *priv, const unsigned char *mac_addr,
memcpy(entry->atm_addr, atm_addr, ATM_ESA_LEN);
del_timer(&entry->timer);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry(tmp, node, &priv->lec_arp_tables[i], next) {
+ hlist_for_each_entry(tmp, node,
+ &priv->lec_arp_tables[i], next) {
if (entry != tmp &&
!memcmp(tmp->atm_addr, atm_addr, ATM_ESA_LEN)) {
/* Vcc to this host exists */
@@ -2121,14 +2084,13 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
int i, found_entry = 0;
spin_lock_irqsave(&priv->lec_arp_lock, flags);
+ /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */
if (ioc_data->receive == 2) {
- /* Vcc for Multicast Forward. No timer, LANEv2 7.1.20 and 2.3.5.3 */
-
pr_debug("LEC_ARP: Attaching mcast forward\n");
#if 0
entry = lec_arp_find(priv, bus_mac);
if (!entry) {
- printk("LEC_ARP: Multicast entry not found!\n");
+ pr_info("LEC_ARP: Multicast entry not found!\n");
goto out;
}
memcpy(entry->atm_addr, ioc_data->atm_addr, ATM_ESA_LEN);
@@ -2149,19 +2111,17 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
* Vcc which we don't want to make default vcc,
* attach it anyway.
*/
- pr_debug
- ("LEC_ARP:Attaching data direct, not default: "
- "%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
- ioc_data->atm_addr[0], ioc_data->atm_addr[1],
- ioc_data->atm_addr[2], ioc_data->atm_addr[3],
- ioc_data->atm_addr[4], ioc_data->atm_addr[5],
- ioc_data->atm_addr[6], ioc_data->atm_addr[7],
- ioc_data->atm_addr[8], ioc_data->atm_addr[9],
- ioc_data->atm_addr[10], ioc_data->atm_addr[11],
- ioc_data->atm_addr[12], ioc_data->atm_addr[13],
- ioc_data->atm_addr[14], ioc_data->atm_addr[15],
- ioc_data->atm_addr[16], ioc_data->atm_addr[17],
- ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
+ pr_debug("LEC_ARP:Attaching data direct, not default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
+ ioc_data->atm_addr[0], ioc_data->atm_addr[1],
+ ioc_data->atm_addr[2], ioc_data->atm_addr[3],
+ ioc_data->atm_addr[4], ioc_data->atm_addr[5],
+ ioc_data->atm_addr[6], ioc_data->atm_addr[7],
+ ioc_data->atm_addr[8], ioc_data->atm_addr[9],
+ ioc_data->atm_addr[10], ioc_data->atm_addr[11],
+ ioc_data->atm_addr[12], ioc_data->atm_addr[13],
+ ioc_data->atm_addr[14], ioc_data->atm_addr[15],
+ ioc_data->atm_addr[16], ioc_data->atm_addr[17],
+ ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
entry = make_entry(priv, bus_mac);
if (entry == NULL)
goto out;
@@ -2177,29 +2137,28 @@ lec_vcc_added(struct lec_priv *priv, const struct atmlec_ioc *ioc_data,
dump_arp_table(priv);
goto out;
}
- pr_debug
- ("LEC_ARP:Attaching data direct, default: "
- "%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
- ioc_data->atm_addr[0], ioc_data->atm_addr[1],
- ioc_data->atm_addr[2], ioc_data->atm_addr[3],
- ioc_data->atm_addr[4], ioc_data->atm_addr[5],
- ioc_data->atm_addr[6], ioc_data->atm_addr[7],
- ioc_data->atm_addr[8], ioc_data->atm_addr[9],
- ioc_data->atm_addr[10], ioc_data->atm_addr[11],
- ioc_data->atm_addr[12], ioc_data->atm_addr[13],
- ioc_data->atm_addr[14], ioc_data->atm_addr[15],
- ioc_data->atm_addr[16], ioc_data->atm_addr[17],
- ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
+ pr_debug("LEC_ARP:Attaching data direct, default: %2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x\n",
+ ioc_data->atm_addr[0], ioc_data->atm_addr[1],
+ ioc_data->atm_addr[2], ioc_data->atm_addr[3],
+ ioc_data->atm_addr[4], ioc_data->atm_addr[5],
+ ioc_data->atm_addr[6], ioc_data->atm_addr[7],
+ ioc_data->atm_addr[8], ioc_data->atm_addr[9],
+ ioc_data->atm_addr[10], ioc_data->atm_addr[11],
+ ioc_data->atm_addr[12], ioc_data->atm_addr[13],
+ ioc_data->atm_addr[14], ioc_data->atm_addr[15],
+ ioc_data->atm_addr[16], ioc_data->atm_addr[17],
+ ioc_data->atm_addr[18], ioc_data->atm_addr[19]);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) {
+ hlist_for_each_entry(entry, node,
+ &priv->lec_arp_tables[i], next) {
if (memcmp
(ioc_data->atm_addr, entry->atm_addr,
ATM_ESA_LEN) == 0) {
pr_debug("LEC_ARP: Attaching data direct\n");
pr_debug("Currently -> Vcc: %d, Rvcc:%d\n",
- entry->vcc ? entry->vcc->vci : 0,
- entry->recv_vcc ? entry->recv_vcc->
- vci : 0);
+ entry->vcc ? entry->vcc->vci : 0,
+ entry->recv_vcc ? entry->recv_vcc->
+ vci : 0);
found_entry = 1;
del_timer(&entry->timer);
entry->vcc = vcc;
@@ -2271,19 +2230,21 @@ static void lec_flush_complete(struct lec_priv *priv, unsigned long tran_id)
struct lec_arp_table *entry;
int i;
- pr_debug("LEC:lec_flush_complete %lx\n", tran_id);
+ pr_debug("%lx\n", tran_id);
restart:
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) {
- if (entry->flush_tran_id == tran_id
- && entry->status == ESI_FLUSH_PENDING) {
+ hlist_for_each_entry(entry, node,
+ &priv->lec_arp_tables[i], next) {
+ if (entry->flush_tran_id == tran_id &&
+ entry->status == ESI_FLUSH_PENDING) {
struct sk_buff *skb;
struct atm_vcc *vcc = entry->vcc;
lec_arp_hold(entry);
- spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
- while ((skb = skb_dequeue(&entry->tx_wait)) != NULL)
+ spin_unlock_irqrestore(&priv->lec_arp_lock,
+ flags);
+ while ((skb = skb_dequeue(&entry->tx_wait)))
lec_send(vcc, skb);
entry->last_used = jiffies;
entry->status = ESI_FORWARD_DIRECT;
@@ -2308,11 +2269,12 @@ lec_set_flush_tran_id(struct lec_priv *priv,
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++)
- hlist_for_each_entry(entry, node, &priv->lec_arp_tables[i], next) {
+ hlist_for_each_entry(entry, node,
+ &priv->lec_arp_tables[i], next) {
if (!memcmp(atm_addr, entry->atm_addr, ATM_ESA_LEN)) {
entry->flush_tran_id = tran_id;
pr_debug("Set flush transaction id to %lx for %p\n",
- tran_id, entry);
+ tran_id, entry);
}
}
spin_unlock_irqrestore(&priv->lec_arp_lock, flags);
@@ -2328,7 +2290,8 @@ static int lec_mcast_make(struct lec_priv *priv, struct atm_vcc *vcc)
struct lec_vcc_priv *vpriv;
int err = 0;
- if (!(vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL)))
+ vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
+ if (!vpriv)
return -ENOMEM;
vpriv->xoff = 0;
vpriv->old_pop = vcc->pop;
@@ -2368,18 +2331,19 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
spin_lock_irqsave(&priv->lec_arp_lock, flags);
for (i = 0; i < LEC_ARP_TABLE_SIZE; i++) {
- hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_tables[i], next) {
+ hlist_for_each_entry_safe(entry, node, next,
+ &priv->lec_arp_tables[i], next) {
if (vcc == entry->vcc) {
lec_arp_remove(priv, entry);
lec_arp_put(entry);
- if (priv->mcast_vcc == vcc) {
+ if (priv->mcast_vcc == vcc)
priv->mcast_vcc = NULL;
- }
}
}
}
- hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) {
+ hlist_for_each_entry_safe(entry, node, next,
+ &priv->lec_arp_empty_ones, next) {
if (entry->vcc == vcc) {
lec_arp_clear_vccs(entry);
del_timer(&entry->timer);
@@ -2388,7 +2352,8 @@ static void lec_vcc_close(struct lec_priv *priv, struct atm_vcc *vcc)
}
}
- hlist_for_each_entry_safe(entry, node, next, &priv->lec_no_forward, next) {
+ hlist_for_each_entry_safe(entry, node, next,
+ &priv->lec_no_forward, next) {
if (entry->recv_vcc == vcc) {
lec_arp_clear_vccs(entry);
del_timer(&entry->timer);
@@ -2429,14 +2394,16 @@ lec_arp_check_empties(struct lec_priv *priv,
src = hdr->h_source;
spin_lock_irqsave(&priv->lec_arp_lock, flags);
- hlist_for_each_entry_safe(entry, node, next, &priv->lec_arp_empty_ones, next) {
+ hlist_for_each_entry_safe(entry, node, next,
+ &priv->lec_arp_empty_ones, next) {
if (vcc == entry->vcc) {
del_timer(&entry->timer);
memcpy(entry->mac_addr, src, ETH_ALEN);
entry->status = ESI_FORWARD_DIRECT;
entry->last_used = jiffies;
/* We might have got an entry */
- if ((tmp = lec_arp_find(priv, src))) {
+ tmp = lec_arp_find(priv, src);
+ if (tmp) {
lec_arp_remove(priv, tmp);
lec_arp_put(tmp);
}
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 38a6cb0863f..a6521c8aa88 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
@@ -13,8 +15,8 @@
#include <net/sock.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
+#include <linux/uaccess.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
#include <net/checksum.h> /* for ip_fast_csum() */
#include <net/arp.h>
#include <net/dst.h>
@@ -36,31 +38,47 @@
*/
#if 0
-#define dprintk printk /* debug */
+#define dprintk(format, args...) \
+ printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args)
+#define dprintk_cont(format, args...) printk(KERN_CONT format, ##args)
#else
-#define dprintk(format,args...)
+#define dprintk(format, args...) \
+ do { if (0) \
+ printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\
+ } while (0)
+#define dprintk_cont(format, args...) \
+ do { if (0) printk(KERN_CONT format, ##args); } while (0)
#endif
#if 0
-#define ddprintk printk /* more debug */
+#define ddprintk(format, args...) \
+ printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args)
+#define ddprintk_cont(format, args...) printk(KERN_CONT format, ##args)
#else
-#define ddprintk(format,args...)
+#define ddprintk(format, args...) \
+ do { if (0) \
+ printk(KERN_DEBUG "mpoa:%s: " format, __func__, ##args);\
+ } while (0)
+#define ddprintk_cont(format, args...) \
+ do { if (0) printk(KERN_CONT format, ##args); } while (0)
#endif
-
-
#define MPOA_TAG_LEN 4
/* mpc_daemon -> kernel */
-static void MPOA_trigger_rcvd (struct k_message *msg, struct mpoa_client *mpc);
+static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc);
static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc);
static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc);
static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc);
static void mps_death(struct k_message *msg, struct mpoa_client *mpc);
-static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action);
-static void MPOA_cache_impos_rcvd(struct k_message *msg, struct mpoa_client *mpc);
-static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc);
-static void set_mps_mac_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc);
+static void clean_up(struct k_message *msg, struct mpoa_client *mpc,
+ int action);
+static void MPOA_cache_impos_rcvd(struct k_message *msg,
+ struct mpoa_client *mpc);
+static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
+ struct mpoa_client *mpc);
+static void set_mps_mac_addr_rcvd(struct k_message *mesg,
+ struct mpoa_client *mpc);
static const uint8_t *copy_macs(struct mpoa_client *mpc,
const uint8_t *router_mac,
@@ -74,10 +92,11 @@ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb);
static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb);
static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev);
+ struct net_device *dev);
+static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
+ unsigned long event, void *dev);
static void mpc_timer_refresh(void);
-static void mpc_cache_check( unsigned long checking_time );
+static void mpc_cache_check(unsigned long checking_time);
static struct llc_snap_hdr llc_snap_mpoa_ctrl = {
0xaa, 0xaa, 0x03,
@@ -167,7 +186,7 @@ struct atm_mpoa_qos *atm_mpoa_add_qos(__be32 dst_ip, struct atm_qos *qos)
entry = kmalloc(sizeof(struct atm_mpoa_qos), GFP_KERNEL);
if (entry == NULL) {
- printk("mpoa: atm_mpoa_add_qos: out of memory\n");
+ pr_info("mpoa: out of memory\n");
return entry;
}
@@ -185,10 +204,9 @@ struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip)
struct atm_mpoa_qos *qos;
qos = qos_head;
- while( qos != NULL ){
- if(qos->ipaddr == dst_ip) {
+ while (qos) {
+ if (qos->ipaddr == dst_ip)
break;
- }
qos = qos->next;
}
@@ -200,10 +218,10 @@ struct atm_mpoa_qos *atm_mpoa_search_qos(__be32 dst_ip)
*/
int atm_mpoa_delete_qos(struct atm_mpoa_qos *entry)
{
-
struct atm_mpoa_qos *curr;
- if (entry == NULL) return 0;
+ if (entry == NULL)
+ return 0;
if (entry == qos_head) {
qos_head = qos_head->next;
kfree(entry);
@@ -234,9 +252,17 @@ void atm_mpoa_disp_qos(struct seq_file *m)
while (qos != NULL) {
seq_printf(m, "%pI4\n %-7d %-7d %-7d %-7d %-7d\n %-7d %-7d %-7d %-7d %-7d\n",
- &qos->ipaddr,
- qos->qos.txtp.max_pcr, qos->qos.txtp.pcr, qos->qos.txtp.min_pcr, qos->qos.txtp.max_cdv, qos->qos.txtp.max_sdu,
- qos->qos.rxtp.max_pcr, qos->qos.rxtp.pcr, qos->qos.rxtp.min_pcr, qos->qos.rxtp.max_cdv, qos->qos.rxtp.max_sdu);
+ &qos->ipaddr,
+ qos->qos.txtp.max_pcr,
+ qos->qos.txtp.pcr,
+ qos->qos.txtp.min_pcr,
+ qos->qos.txtp.max_cdv,
+ qos->qos.txtp.max_sdu,
+ qos->qos.rxtp.max_pcr,
+ qos->qos.rxtp.pcr,
+ qos->qos.rxtp.min_pcr,
+ qos->qos.rxtp.max_cdv,
+ qos->qos.rxtp.max_sdu);
qos = qos->next;
}
}
@@ -256,7 +282,7 @@ static struct mpoa_client *alloc_mpc(void)
{
struct mpoa_client *mpc;
- mpc = kzalloc(sizeof (struct mpoa_client), GFP_KERNEL);
+ mpc = kzalloc(sizeof(struct mpoa_client), GFP_KERNEL);
if (mpc == NULL)
return NULL;
rwlock_init(&mpc->ingress_lock);
@@ -266,7 +292,7 @@ static struct mpoa_client *alloc_mpc(void)
mpc->parameters.mpc_p1 = MPC_P1;
mpc->parameters.mpc_p2 = MPC_P2;
- memset(mpc->parameters.mpc_p3,0,sizeof(mpc->parameters.mpc_p3));
+ memset(mpc->parameters.mpc_p3, 0, sizeof(mpc->parameters.mpc_p3));
mpc->parameters.mpc_p4 = MPC_P4;
mpc->parameters.mpc_p5 = MPC_P5;
mpc->parameters.mpc_p6 = MPC_P6;
@@ -286,9 +312,9 @@ static struct mpoa_client *alloc_mpc(void)
static void start_mpc(struct mpoa_client *mpc, struct net_device *dev)
{
- dprintk("mpoa: (%s) start_mpc:\n", mpc->dev->name);
+ dprintk("(%s)\n", mpc->dev->name);
if (!dev->netdev_ops)
- printk("mpoa: (%s) start_mpc not starting\n", dev->name);
+ pr_info("(%s) not starting\n", dev->name);
else {
mpc->old_ops = dev->netdev_ops;
mpc->new_ops = *mpc->old_ops;
@@ -300,14 +326,14 @@ static void start_mpc(struct mpoa_client *mpc, struct net_device *dev)
static void stop_mpc(struct mpoa_client *mpc)
{
struct net_device *dev = mpc->dev;
- dprintk("mpoa: (%s) stop_mpc:", mpc->dev->name);
+ dprintk("(%s)", mpc->dev->name);
/* Lets not nullify lec device's dev->hard_start_xmit */
if (dev->netdev_ops != &mpc->new_ops) {
- dprintk(" mpc already stopped, not fatal\n");
+ dprintk_cont(" mpc already stopped, not fatal\n");
return;
}
- dprintk("\n");
+ dprintk_cont("\n");
dev->netdev_ops = mpc->old_ops;
mpc->old_ops = NULL;
@@ -319,25 +345,18 @@ static const char *mpoa_device_type_string(char type) __attribute__ ((unused));
static const char *mpoa_device_type_string(char type)
{
- switch(type) {
+ switch (type) {
case NON_MPOA:
return "non-MPOA device";
- break;
case MPS:
return "MPS";
- break;
case MPC:
return "MPC";
- break;
case MPS_AND_MPC:
return "both MPS and MPC";
- break;
- default:
- return "unspecified (non-MPOA) device";
- break;
}
- return ""; /* not reached */
+ return "unspecified (non-MPOA) device";
}
/*
@@ -362,26 +381,28 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr,
struct mpoa_client *mpc;
mpoa_device_type = number_of_mps_macs = 0; /* silence gcc */
- dprintk("mpoa: (%s) lane2_assoc_ind: received TLV(s), ", dev->name);
+ dprintk("(%s) received TLV(s), ", dev->name);
dprintk("total length of all TLVs %d\n", sizeoftlvs);
mpc = find_mpc_by_lec(dev); /* Sampo-Fix: moved here from below */
if (mpc == NULL) {
- printk("mpoa: (%s) lane2_assoc_ind: no mpc\n", dev->name);
+ pr_info("(%s) no mpc\n", dev->name);
return;
}
end_of_tlvs = tlvs + sizeoftlvs;
while (end_of_tlvs - tlvs >= 5) {
- type = (tlvs[0] << 24) | (tlvs[1] << 16) | (tlvs[2] << 8) | tlvs[3];
+ type = ((tlvs[0] << 24) | (tlvs[1] << 16) |
+ (tlvs[2] << 8) | tlvs[3]);
length = tlvs[4];
tlvs += 5;
dprintk(" type 0x%x length %02x\n", type, length);
if (tlvs + length > end_of_tlvs) {
- printk("TLV value extends past its buffer, aborting parse\n");
+ pr_info("TLV value extends past its buffer, aborting parse\n");
return;
}
if (type == 0) {
- printk("mpoa: (%s) lane2_assoc_ind: TLV type was 0, returning\n", dev->name);
+ pr_info("mpoa: (%s) TLV type was 0, returning\n",
+ dev->name);
return;
}
@@ -391,39 +412,48 @@ static void lane2_assoc_ind(struct net_device *dev, const u8 *mac_addr,
}
mpoa_device_type = *tlvs++;
number_of_mps_macs = *tlvs++;
- dprintk("mpoa: (%s) MPOA device type '%s', ", dev->name, mpoa_device_type_string(mpoa_device_type));
+ dprintk("(%s) MPOA device type '%s', ",
+ dev->name, mpoa_device_type_string(mpoa_device_type));
if (mpoa_device_type == MPS_AND_MPC &&
length < (42 + number_of_mps_macs*ETH_ALEN)) { /* :) */
- printk("\nmpoa: (%s) lane2_assoc_ind: short MPOA Device Type TLV\n",
- dev->name);
+ pr_info("(%s) short MPOA Device Type TLV\n",
+ dev->name);
continue;
}
- if ((mpoa_device_type == MPS || mpoa_device_type == MPC)
- && length < 22 + number_of_mps_macs*ETH_ALEN) {
- printk("\nmpoa: (%s) lane2_assoc_ind: short MPOA Device Type TLV\n",
- dev->name);
+ if ((mpoa_device_type == MPS || mpoa_device_type == MPC) &&
+ length < 22 + number_of_mps_macs*ETH_ALEN) {
+ pr_info("(%s) short MPOA Device Type TLV\n", dev->name);
continue;
}
- if (mpoa_device_type != MPS && mpoa_device_type != MPS_AND_MPC) {
- dprintk("ignoring non-MPS device\n");
- if (mpoa_device_type == MPC) tlvs += 20;
+ if (mpoa_device_type != MPS &&
+ mpoa_device_type != MPS_AND_MPC) {
+ dprintk("ignoring non-MPS device ");
+ if (mpoa_device_type == MPC)
+ tlvs += 20;
continue; /* we are only interested in MPSs */
}
- if (number_of_mps_macs == 0 && mpoa_device_type == MPS_AND_MPC) {
- printk("\nmpoa: (%s) lane2_assoc_ind: MPS_AND_MPC has zero MACs\n", dev->name);
+ if (number_of_mps_macs == 0 &&
+ mpoa_device_type == MPS_AND_MPC) {
+ pr_info("(%s) MPS_AND_MPC has zero MACs\n", dev->name);
continue; /* someone should read the spec */
}
- dprintk("this MPS has %d MAC addresses\n", number_of_mps_macs);
+ dprintk_cont("this MPS has %d MAC addresses\n",
+ number_of_mps_macs);
- /* ok, now we can go and tell our daemon the control address of MPS */
+ /*
+ * ok, now we can go and tell our daemon
+ * the control address of MPS
+ */
send_set_mps_ctrl_addr(tlvs, mpc);
- tlvs = copy_macs(mpc, mac_addr, tlvs, number_of_mps_macs, mpoa_device_type);
- if (tlvs == NULL) return;
+ tlvs = copy_macs(mpc, mac_addr, tlvs,
+ number_of_mps_macs, mpoa_device_type);
+ if (tlvs == NULL)
+ return;
}
if (end_of_tlvs - tlvs != 0)
- printk("mpoa: (%s) lane2_assoc_ind: ignoring %Zd bytes of trailing TLV carbage\n",
- dev->name, end_of_tlvs - tlvs);
+ pr_info("(%s) ignoring %Zd bytes of trailing TLV garbage\n",
+ dev->name, end_of_tlvs - tlvs);
return;
}
@@ -441,11 +471,12 @@ static const uint8_t *copy_macs(struct mpoa_client *mpc,
num_macs = (mps_macs > 1) ? mps_macs : 1;
if (mpc->number_of_mps_macs != num_macs) { /* need to reallocate? */
- if (mpc->number_of_mps_macs != 0) kfree(mpc->mps_macs);
+ if (mpc->number_of_mps_macs != 0)
+ kfree(mpc->mps_macs);
mpc->number_of_mps_macs = 0;
- mpc->mps_macs = kmalloc(num_macs*ETH_ALEN, GFP_KERNEL);
+ mpc->mps_macs = kmalloc(num_macs * ETH_ALEN, GFP_KERNEL);
if (mpc->mps_macs == NULL) {
- printk("mpoa: (%s) copy_macs: out of mem\n", mpc->dev->name);
+ pr_info("(%s) out of mem\n", mpc->dev->name);
return NULL;
}
}
@@ -478,24 +509,30 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
iph = (struct iphdr *)buff;
ipaddr = iph->daddr;
- ddprintk("mpoa: (%s) send_via_shortcut: ipaddr 0x%x\n", mpc->dev->name, ipaddr);
+ ddprintk("(%s) ipaddr 0x%x\n",
+ mpc->dev->name, ipaddr);
entry = mpc->in_ops->get(ipaddr, mpc);
if (entry == NULL) {
entry = mpc->in_ops->add_entry(ipaddr, mpc);
- if (entry != NULL) mpc->in_ops->put(entry);
+ if (entry != NULL)
+ mpc->in_ops->put(entry);
return 1;
}
- if (mpc->in_ops->cache_hit(entry, mpc) != OPEN){ /* threshold not exceeded or VCC not ready */
- ddprintk("mpoa: (%s) send_via_shortcut: cache_hit: returns != OPEN\n", mpc->dev->name);
+ /* threshold not exceeded or VCC not ready */
+ if (mpc->in_ops->cache_hit(entry, mpc) != OPEN) {
+ ddprintk("(%s) cache_hit: returns != OPEN\n",
+ mpc->dev->name);
mpc->in_ops->put(entry);
return 1;
}
- ddprintk("mpoa: (%s) send_via_shortcut: using shortcut\n", mpc->dev->name);
+ ddprintk("(%s) using shortcut\n",
+ mpc->dev->name);
/* MPOA spec A.1.4, MPOA client must decrement IP ttl at least by one */
if (iph->ttl <= 1) {
- ddprintk("mpoa: (%s) send_via_shortcut: IP ttl = %u, using LANE\n", mpc->dev->name, iph->ttl);
+ ddprintk("(%s) IP ttl = %u, using LANE\n",
+ mpc->dev->name, iph->ttl);
mpc->in_ops->put(entry);
return 1;
}
@@ -504,15 +541,18 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
if (entry->ctrl_info.tag != 0) {
- ddprintk("mpoa: (%s) send_via_shortcut: adding tag 0x%x\n", mpc->dev->name, entry->ctrl_info.tag);
+ ddprintk("(%s) adding tag 0x%x\n",
+ mpc->dev->name, entry->ctrl_info.tag);
tagged_llc_snap_hdr.tag = entry->ctrl_info.tag;
- skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
- skb_push(skb, sizeof(tagged_llc_snap_hdr)); /* add LLC/SNAP header */
+ skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
+ skb_push(skb, sizeof(tagged_llc_snap_hdr));
+ /* add LLC/SNAP header */
skb_copy_to_linear_data(skb, &tagged_llc_snap_hdr,
sizeof(tagged_llc_snap_hdr));
} else {
- skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
- skb_push(skb, sizeof(struct llc_snap_hdr)); /* add LLC/SNAP header + tag */
+ skb_pull(skb, ETH_HLEN); /* get rid of Eth header */
+ skb_push(skb, sizeof(struct llc_snap_hdr));
+ /* add LLC/SNAP header + tag */
skb_copy_to_linear_data(skb, &llc_snap_mpoa_data,
sizeof(struct llc_snap_hdr));
}
@@ -537,8 +577,8 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
int i = 0;
mpc = find_mpc_by_lec(dev); /* this should NEVER fail */
- if(mpc == NULL) {
- printk("mpoa: (%s) mpc_send_packet: no MPC found\n", dev->name);
+ if (mpc == NULL) {
+ pr_info("(%s) no MPC found\n", dev->name);
goto non_ip;
}
@@ -554,14 +594,15 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
goto non_ip;
while (i < mpc->number_of_mps_macs) {
- if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN)))
- if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */
- return NETDEV_TX_OK; /* success! */
+ if (!compare_ether_addr(eth->h_dest,
+ (mpc->mps_macs + i*ETH_ALEN)))
+ if (send_via_shortcut(skb, mpc) == 0) /* try shortcut */
+ return NETDEV_TX_OK;
i++;
}
- non_ip:
- return mpc->old_ops->ndo_start_xmit(skb,dev);
+non_ip:
+ return mpc->old_ops->ndo_start_xmit(skb, dev);
}
static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
@@ -574,7 +615,8 @@ static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmmpc_ioc));
if (bytes_left != 0) {
- printk("mpoa: mpc_vcc_attach: Short read (missed %d bytes) from userland\n", bytes_left);
+ pr_info("mpoa:Short read (missed %d bytes) from userland\n",
+ bytes_left);
return -EFAULT;
}
ipaddr = ioc_data.ipaddr;
@@ -587,18 +629,20 @@ static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
if (ioc_data.type == MPC_SOCKET_INGRESS) {
in_entry = mpc->in_ops->get(ipaddr, mpc);
- if (in_entry == NULL || in_entry->entry_state < INGRESS_RESOLVED) {
- printk("mpoa: (%s) mpc_vcc_attach: did not find RESOLVED entry from ingress cache\n",
+ if (in_entry == NULL ||
+ in_entry->entry_state < INGRESS_RESOLVED) {
+ pr_info("(%s) did not find RESOLVED entry from ingress cache\n",
mpc->dev->name);
- if (in_entry != NULL) mpc->in_ops->put(in_entry);
+ if (in_entry != NULL)
+ mpc->in_ops->put(in_entry);
return -EINVAL;
}
- printk("mpoa: (%s) mpc_vcc_attach: attaching ingress SVC, entry = %pI4\n",
- mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
+ pr_info("(%s) attaching ingress SVC, entry = %pI4\n",
+ mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
in_entry->shortcut = vcc;
mpc->in_ops->put(in_entry);
} else {
- printk("mpoa: (%s) mpc_vcc_attach: attaching egress SVC\n", mpc->dev->name);
+ pr_info("(%s) attaching egress SVC\n", mpc->dev->name);
}
vcc->proto_data = mpc->dev;
@@ -618,27 +662,27 @@ static void mpc_vcc_close(struct atm_vcc *vcc, struct net_device *dev)
mpc = find_mpc_by_lec(dev);
if (mpc == NULL) {
- printk("mpoa: (%s) mpc_vcc_close: close for unknown MPC\n", dev->name);
+ pr_info("(%s) close for unknown MPC\n", dev->name);
return;
}
- dprintk("mpoa: (%s) mpc_vcc_close:\n", dev->name);
+ dprintk("(%s)\n", dev->name);
in_entry = mpc->in_ops->get_by_vcc(vcc, mpc);
if (in_entry) {
- dprintk("mpoa: (%s) mpc_vcc_close: ingress SVC closed ip = %pI4\n",
- mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
+ dprintk("(%s) ingress SVC closed ip = %pI4\n",
+ mpc->dev->name, &in_entry->ctrl_info.in_dst_ip);
in_entry->shortcut = NULL;
mpc->in_ops->put(in_entry);
}
eg_entry = mpc->eg_ops->get_by_vcc(vcc, mpc);
if (eg_entry) {
- dprintk("mpoa: (%s) mpc_vcc_close: egress SVC closed\n", mpc->dev->name);
+ dprintk("(%s) egress SVC closed\n", mpc->dev->name);
eg_entry->shortcut = NULL;
mpc->eg_ops->put(eg_entry);
}
if (in_entry == NULL && eg_entry == NULL)
- dprintk("mpoa: (%s) mpc_vcc_close: unused vcc closed\n", dev->name);
+ dprintk("(%s) unused vcc closed\n", dev->name);
return;
}
@@ -652,18 +696,19 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
__be32 tag;
char *tmp;
- ddprintk("mpoa: (%s) mpc_push:\n", dev->name);
+ ddprintk("(%s)\n", dev->name);
if (skb == NULL) {
- dprintk("mpoa: (%s) mpc_push: null skb, closing VCC\n", dev->name);
+ dprintk("(%s) null skb, closing VCC\n", dev->name);
mpc_vcc_close(vcc, dev);
return;
}
skb->dev = dev;
- if (memcmp(skb->data, &llc_snap_mpoa_ctrl, sizeof(struct llc_snap_hdr)) == 0) {
+ if (memcmp(skb->data, &llc_snap_mpoa_ctrl,
+ sizeof(struct llc_snap_hdr)) == 0) {
struct sock *sk = sk_atm(vcc);
- dprintk("mpoa: (%s) mpc_push: control packet arrived\n", dev->name);
+ dprintk("(%s) control packet arrived\n", dev->name);
/* Pass control packets to daemon */
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, skb->len);
@@ -675,20 +720,22 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
mpc = find_mpc_by_lec(dev);
if (mpc == NULL) {
- printk("mpoa: (%s) mpc_push: unknown MPC\n", dev->name);
+ pr_info("(%s) unknown MPC\n", dev->name);
return;
}
- if (memcmp(skb->data, &llc_snap_mpoa_data_tagged, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */
- ddprintk("mpoa: (%s) mpc_push: tagged data packet arrived\n", dev->name);
+ if (memcmp(skb->data, &llc_snap_mpoa_data_tagged,
+ sizeof(struct llc_snap_hdr)) == 0) { /* MPOA tagged data */
+ ddprintk("(%s) tagged data packet arrived\n", dev->name);
- } else if (memcmp(skb->data, &llc_snap_mpoa_data, sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */
- printk("mpoa: (%s) mpc_push: non-tagged data packet arrived\n", dev->name);
- printk(" mpc_push: non-tagged data unsupported, purging\n");
+ } else if (memcmp(skb->data, &llc_snap_mpoa_data,
+ sizeof(struct llc_snap_hdr)) == 0) { /* MPOA data */
+ pr_info("(%s) Unsupported non-tagged data packet arrived. Purging\n",
+ dev->name);
dev_kfree_skb_any(skb);
return;
} else {
- printk("mpoa: (%s) mpc_push: garbage arrived, purging\n", dev->name);
+ pr_info("(%s) garbage arrived, purging\n", dev->name);
dev_kfree_skb_any(skb);
return;
}
@@ -698,8 +745,8 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
eg = mpc->eg_ops->get_by_tag(tag, mpc);
if (eg == NULL) {
- printk("mpoa: (%s) mpc_push: Didn't find egress cache entry, tag = %u\n",
- dev->name,tag);
+ pr_info("mpoa: (%s) Didn't find egress cache entry, tag = %u\n",
+ dev->name, tag);
purge_egress_shortcut(vcc, NULL);
dev_kfree_skb_any(skb);
return;
@@ -711,13 +758,15 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb)
*/
if (eg->shortcut == NULL) {
eg->shortcut = vcc;
- printk("mpoa: (%s) mpc_push: egress SVC in use\n", dev->name);
+ pr_info("(%s) egress SVC in use\n", dev->name);
}
- skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag)); /* get rid of LLC/SNAP header */
- new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length); /* LLC/SNAP is shorter than MAC header :( */
+ skb_pull(skb, sizeof(struct llc_snap_hdr) + sizeof(tag));
+ /* get rid of LLC/SNAP header */
+ new_skb = skb_realloc_headroom(skb, eg->ctrl_info.DH_length);
+ /* LLC/SNAP is shorter than MAC header :( */
dev_kfree_skb_any(skb);
- if (new_skb == NULL){
+ if (new_skb == NULL) {
mpc->eg_ops->put(eg);
return;
}
@@ -750,7 +799,7 @@ static struct atm_dev mpc_dev = {
/* members not explicitly initialised will be 0 */
};
-static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
+static int atm_mpoa_mpoad_attach(struct atm_vcc *vcc, int arg)
{
struct mpoa_client *mpc;
struct lec_priv *priv;
@@ -770,15 +819,16 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
mpc = find_mpc_by_itfnum(arg);
if (mpc == NULL) {
- dprintk("mpoa: mpoad_attach: allocating new mpc for itf %d\n", arg);
+ dprintk("allocating new mpc for itf %d\n", arg);
mpc = alloc_mpc();
if (mpc == NULL)
return -ENOMEM;
mpc->dev_num = arg;
- mpc->dev = find_lec_by_itfnum(arg); /* NULL if there was no lec */
+ mpc->dev = find_lec_by_itfnum(arg);
+ /* NULL if there was no lec */
}
if (mpc->mpoad_vcc) {
- printk("mpoa: mpoad_attach: mpoad is already present for itf %d\n", arg);
+ pr_info("mpoad is already present for itf %d\n", arg);
return -EADDRINUSE;
}
@@ -794,8 +844,8 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
mpc->mpoad_vcc = vcc;
vcc->dev = &mpc_dev;
vcc_insert_socket(sk_atm(vcc));
- set_bit(ATM_VF_META,&vcc->flags);
- set_bit(ATM_VF_READY,&vcc->flags);
+ set_bit(ATM_VF_META, &vcc->flags);
+ set_bit(ATM_VF_READY, &vcc->flags);
if (mpc->dev) {
char empty[ATM_ESA_LEN];
@@ -805,7 +855,7 @@ static int atm_mpoa_mpoad_attach (struct atm_vcc *vcc, int arg)
/* set address if mpcd e.g. gets killed and restarted.
* If we do not do it now we have to wait for the next LE_ARP
*/
- if ( memcmp(mpc->mps_ctrl_addr, empty, ATM_ESA_LEN) != 0 )
+ if (memcmp(mpc->mps_ctrl_addr, empty, ATM_ESA_LEN) != 0)
send_set_mps_ctrl_addr(mpc->mps_ctrl_addr, mpc);
}
@@ -817,7 +867,7 @@ static void send_set_mps_ctrl_addr(const char *addr, struct mpoa_client *mpc)
{
struct k_message mesg;
- memcpy (mpc->mps_ctrl_addr, addr, ATM_ESA_LEN);
+ memcpy(mpc->mps_ctrl_addr, addr, ATM_ESA_LEN);
mesg.type = SET_MPS_CTRL_ADDR;
memcpy(mesg.MPS_ctrl, addr, ATM_ESA_LEN);
@@ -833,11 +883,11 @@ static void mpoad_close(struct atm_vcc *vcc)
mpc = find_mpc_by_vcc(vcc);
if (mpc == NULL) {
- printk("mpoa: mpoad_close: did not find MPC\n");
+ pr_info("did not find MPC\n");
return;
}
if (!mpc->mpoad_vcc) {
- printk("mpoa: mpoad_close: close for non-present mpoad\n");
+ pr_info("close for non-present mpoad\n");
return;
}
@@ -857,7 +907,7 @@ static void mpoad_close(struct atm_vcc *vcc)
kfree_skb(skb);
}
- printk("mpoa: (%s) going down\n",
+ pr_info("(%s) going down\n",
(mpc->dev) ? mpc->dev->name : "<unknown>");
module_put(THIS_MODULE);
@@ -871,61 +921,61 @@ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct mpoa_client *mpc = find_mpc_by_vcc(vcc);
- struct k_message *mesg = (struct k_message*)skb->data;
+ struct k_message *mesg = (struct k_message *)skb->data;
atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
if (mpc == NULL) {
- printk("mpoa: msg_from_mpoad: no mpc found\n");
+ pr_info("no mpc found\n");
return 0;
}
- dprintk("mpoa: (%s) msg_from_mpoad:", (mpc->dev) ? mpc->dev->name : "<unknown>");
- switch(mesg->type) {
+ dprintk("(%s)", mpc->dev ? mpc->dev->name : "<unknown>");
+ switch (mesg->type) {
case MPOA_RES_REPLY_RCVD:
- dprintk(" mpoa_res_reply_rcvd\n");
+ dprintk_cont("mpoa_res_reply_rcvd\n");
MPOA_res_reply_rcvd(mesg, mpc);
break;
case MPOA_TRIGGER_RCVD:
- dprintk(" mpoa_trigger_rcvd\n");
+ dprintk_cont("mpoa_trigger_rcvd\n");
MPOA_trigger_rcvd(mesg, mpc);
break;
case INGRESS_PURGE_RCVD:
- dprintk(" nhrp_purge_rcvd\n");
+ dprintk_cont("nhrp_purge_rcvd\n");
ingress_purge_rcvd(mesg, mpc);
break;
case EGRESS_PURGE_RCVD:
- dprintk(" egress_purge_reply_rcvd\n");
+ dprintk_cont("egress_purge_reply_rcvd\n");
egress_purge_rcvd(mesg, mpc);
break;
case MPS_DEATH:
- dprintk(" mps_death\n");
+ dprintk_cont("mps_death\n");
mps_death(mesg, mpc);
break;
case CACHE_IMPOS_RCVD:
- dprintk(" cache_impos_rcvd\n");
+ dprintk_cont("cache_impos_rcvd\n");
MPOA_cache_impos_rcvd(mesg, mpc);
break;
case SET_MPC_CTRL_ADDR:
- dprintk(" set_mpc_ctrl_addr\n");
+ dprintk_cont("set_mpc_ctrl_addr\n");
set_mpc_ctrl_addr_rcvd(mesg, mpc);
break;
case SET_MPS_MAC_ADDR:
- dprintk(" set_mps_mac_addr\n");
+ dprintk_cont("set_mps_mac_addr\n");
set_mps_mac_addr_rcvd(mesg, mpc);
break;
case CLEAN_UP_AND_EXIT:
- dprintk(" clean_up_and_exit\n");
+ dprintk_cont("clean_up_and_exit\n");
clean_up(mesg, mpc, DIE);
break;
case RELOAD:
- dprintk(" reload\n");
+ dprintk_cont("reload\n");
clean_up(mesg, mpc, RELOAD);
break;
case SET_MPC_PARAMS:
- dprintk(" set_mpc_params\n");
+ dprintk_cont("set_mpc_params\n");
mpc->parameters = mesg->content.params;
break;
default:
- dprintk(" unknown message %d\n", mesg->type);
+ dprintk_cont("unknown message %d\n", mesg->type);
break;
}
kfree_skb(skb);
@@ -940,7 +990,7 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
struct sock *sk;
if (mpc == NULL || !mpc->mpoad_vcc) {
- printk("mpoa: msg_to_mpoad: mesg %d to a non-existent mpoad\n", mesg->type);
+ pr_info("mesg %d to a non-existent mpoad\n", mesg->type);
return -ENXIO;
}
@@ -958,7 +1008,8 @@ int msg_to_mpoad(struct k_message *mesg, struct mpoa_client *mpc)
return 0;
}
-static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev_ptr)
+static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
+ unsigned long event, void *dev_ptr)
{
struct net_device *dev;
struct mpoa_client *mpc;
@@ -980,25 +1031,24 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
priv->lane2_ops->associate_indicator = lane2_assoc_ind;
mpc = find_mpc_by_itfnum(priv->itfnum);
if (mpc == NULL) {
- dprintk("mpoa: mpoa_event_listener: allocating new mpc for %s\n",
- dev->name);
+ dprintk("allocating new mpc for %s\n", dev->name);
mpc = alloc_mpc();
if (mpc == NULL) {
- printk("mpoa: mpoa_event_listener: no new mpc");
+ pr_info("no new mpc");
break;
}
}
mpc->dev_num = priv->itfnum;
mpc->dev = dev;
dev_hold(dev);
- dprintk("mpoa: (%s) was initialized\n", dev->name);
+ dprintk("(%s) was initialized\n", dev->name);
break;
case NETDEV_UNREGISTER:
/* the lec device was deallocated */
mpc = find_mpc_by_lec(dev);
if (mpc == NULL)
break;
- dprintk("mpoa: device (%s) was deallocated\n", dev->name);
+ dprintk("device (%s) was deallocated\n", dev->name);
stop_mpc(mpc);
dev_put(mpc->dev);
mpc->dev = NULL;
@@ -1008,9 +1058,8 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
mpc = find_mpc_by_lec(dev);
if (mpc == NULL)
break;
- if (mpc->mpoad_vcc != NULL) {
+ if (mpc->mpoad_vcc != NULL)
start_mpc(mpc, dev);
- }
break;
case NETDEV_DOWN:
/* the dev was ifconfig'ed down */
@@ -1020,9 +1069,8 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned lo
mpc = find_mpc_by_lec(dev);
if (mpc == NULL)
break;
- if (mpc->mpoad_vcc != NULL) {
+ if (mpc->mpoad_vcc != NULL)
stop_mpc(mpc);
- }
break;
case NETDEV_REBOOT:
case NETDEV_CHANGE:
@@ -1049,7 +1097,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
in_cache_entry *entry;
entry = mpc->in_ops->get(dst_ip, mpc);
- if(entry == NULL){
+ if (entry == NULL) {
entry = mpc->in_ops->add_entry(dst_ip, mpc);
entry->entry_state = INGRESS_RESOLVING;
msg->type = SND_MPOA_RES_RQST;
@@ -1060,7 +1108,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
return;
}
- if(entry->entry_state == INGRESS_INVALID){
+ if (entry->entry_state == INGRESS_INVALID) {
entry->entry_state = INGRESS_RESOLVING;
msg->type = SND_MPOA_RES_RQST;
msg->content.in_info = entry->ctrl_info;
@@ -1070,7 +1118,7 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
return;
}
- printk("mpoa: (%s) MPOA_trigger_rcvd: entry already in resolving state\n",
+ pr_info("(%s) entry already in resolving state\n",
(mpc->dev) ? mpc->dev->name : "<unknown>");
mpc->in_ops->put(entry);
return;
@@ -1080,23 +1128,25 @@ static void MPOA_trigger_rcvd(struct k_message *msg, struct mpoa_client *mpc)
* Things get complicated because we have to check if there's an egress
* shortcut with suitable traffic parameters we could use.
*/
-static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_client *client, in_cache_entry *entry)
+static void check_qos_and_open_shortcut(struct k_message *msg,
+ struct mpoa_client *client,
+ in_cache_entry *entry)
{
__be32 dst_ip = msg->content.in_info.in_dst_ip;
struct atm_mpoa_qos *qos = atm_mpoa_search_qos(dst_ip);
eg_cache_entry *eg_entry = client->eg_ops->get_by_src_ip(dst_ip, client);
- if(eg_entry && eg_entry->shortcut){
- if(eg_entry->shortcut->qos.txtp.traffic_class &
- msg->qos.txtp.traffic_class &
- (qos ? qos->qos.txtp.traffic_class : ATM_UBR | ATM_CBR)){
- if(eg_entry->shortcut->qos.txtp.traffic_class == ATM_UBR)
- entry->shortcut = eg_entry->shortcut;
- else if(eg_entry->shortcut->qos.txtp.max_pcr > 0)
- entry->shortcut = eg_entry->shortcut;
+ if (eg_entry && eg_entry->shortcut) {
+ if (eg_entry->shortcut->qos.txtp.traffic_class &
+ msg->qos.txtp.traffic_class &
+ (qos ? qos->qos.txtp.traffic_class : ATM_UBR | ATM_CBR)) {
+ if (eg_entry->shortcut->qos.txtp.traffic_class == ATM_UBR)
+ entry->shortcut = eg_entry->shortcut;
+ else if (eg_entry->shortcut->qos.txtp.max_pcr > 0)
+ entry->shortcut = eg_entry->shortcut;
}
- if(entry->shortcut){
- dprintk("mpoa: (%s) using egress SVC to reach %pI4\n",
+ if (entry->shortcut) {
+ dprintk("(%s) using egress SVC to reach %pI4\n",
client->dev->name, &dst_ip);
client->eg_ops->put(eg_entry);
return;
@@ -1107,12 +1157,13 @@ static void check_qos_and_open_shortcut(struct k_message *msg, struct mpoa_clien
/* No luck in the egress cache we must open an ingress SVC */
msg->type = OPEN_INGRESS_SVC;
- if (qos && (qos->qos.txtp.traffic_class == msg->qos.txtp.traffic_class))
- {
+ if (qos &&
+ (qos->qos.txtp.traffic_class == msg->qos.txtp.traffic_class)) {
msg->qos = qos->qos;
- printk("mpoa: (%s) trying to get a CBR shortcut\n",client->dev->name);
- }
- else memset(&msg->qos,0,sizeof(struct atm_qos));
+ pr_info("(%s) trying to get a CBR shortcut\n",
+ client->dev->name);
+ } else
+ memset(&msg->qos, 0, sizeof(struct atm_qos));
msg_to_mpoad(msg, client);
return;
}
@@ -1122,17 +1173,19 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
__be32 dst_ip = msg->content.in_info.in_dst_ip;
in_cache_entry *entry = mpc->in_ops->get(dst_ip, mpc);
- dprintk("mpoa: (%s) MPOA_res_reply_rcvd: ip %pI4\n",
+ dprintk("(%s) ip %pI4\n",
mpc->dev->name, &dst_ip);
- ddprintk("mpoa: (%s) MPOA_res_reply_rcvd() entry = %p", mpc->dev->name, entry);
- if(entry == NULL){
- printk("\nmpoa: (%s) ARGH, received res. reply for an entry that doesn't exist.\n", mpc->dev->name);
+ ddprintk("(%s) entry = %p",
+ mpc->dev->name, entry);
+ if (entry == NULL) {
+ pr_info("(%s) ARGH, received res. reply for an entry that doesn't exist.\n",
+ mpc->dev->name);
return;
}
- ddprintk(" entry_state = %d ", entry->entry_state);
+ ddprintk_cont(" entry_state = %d ", entry->entry_state);
if (entry->entry_state == INGRESS_RESOLVED) {
- printk("\nmpoa: (%s) MPOA_res_reply_rcvd for RESOLVED entry!\n", mpc->dev->name);
+ pr_info("(%s) RESOLVED entry!\n", mpc->dev->name);
mpc->in_ops->put(entry);
return;
}
@@ -1141,17 +1194,18 @@ static void MPOA_res_reply_rcvd(struct k_message *msg, struct mpoa_client *mpc)
do_gettimeofday(&(entry->tv));
do_gettimeofday(&(entry->reply_wait)); /* Used in refreshing func from now on */
entry->refresh_time = 0;
- ddprintk("entry->shortcut = %p\n", entry->shortcut);
+ ddprintk_cont("entry->shortcut = %p\n", entry->shortcut);
- if(entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL){
+ if (entry->entry_state == INGRESS_RESOLVING &&
+ entry->shortcut != NULL) {
entry->entry_state = INGRESS_RESOLVED;
mpc->in_ops->put(entry);
return; /* Shortcut already open... */
}
if (entry->shortcut != NULL) {
- printk("mpoa: (%s) MPOA_res_reply_rcvd: entry->shortcut != NULL, impossible!\n",
- mpc->dev->name);
+ pr_info("(%s) entry->shortcut != NULL, impossible!\n",
+ mpc->dev->name);
mpc->in_ops->put(entry);
return;
}
@@ -1170,14 +1224,14 @@ static void ingress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
__be32 mask = msg->ip_mask;
in_cache_entry *entry = mpc->in_ops->get_with_mask(dst_ip, mpc, mask);
- if(entry == NULL){
- printk("mpoa: (%s) ingress_purge_rcvd: purge for a non-existing entry, ip = %pI4\n",
- mpc->dev->name, &dst_ip);
+ if (entry == NULL) {
+ pr_info("(%s) purge for a non-existing entry, ip = %pI4\n",
+ mpc->dev->name, &dst_ip);
return;
}
do {
- dprintk("mpoa: (%s) ingress_purge_rcvd: removing an ingress entry, ip = %pI4\n",
+ dprintk("(%s) removing an ingress entry, ip = %pI4\n",
mpc->dev->name, &dst_ip);
write_lock_bh(&mpc->ingress_lock);
mpc->in_ops->remove_entry(entry, mpc);
@@ -1195,7 +1249,8 @@ static void egress_purge_rcvd(struct k_message *msg, struct mpoa_client *mpc)
eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(cache_id, mpc);
if (entry == NULL) {
- dprintk("mpoa: (%s) egress_purge_rcvd: purge for a non-existing entry\n", mpc->dev->name);
+ dprintk("(%s) purge for a non-existing entry\n",
+ mpc->dev->name);
return;
}
@@ -1214,15 +1269,15 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
struct k_message *purge_msg;
struct sk_buff *skb;
- dprintk("mpoa: purge_egress_shortcut: entering\n");
+ dprintk("entering\n");
if (vcc == NULL) {
- printk("mpoa: purge_egress_shortcut: vcc == NULL\n");
+ pr_info("vcc == NULL\n");
return;
}
skb = alloc_skb(sizeof(struct k_message), GFP_ATOMIC);
if (skb == NULL) {
- printk("mpoa: purge_egress_shortcut: out of memory\n");
+ pr_info("out of memory\n");
return;
}
@@ -1238,7 +1293,7 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
sk = sk_atm(vcc);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, skb->len);
- dprintk("mpoa: purge_egress_shortcut: exiting:\n");
+ dprintk("exiting\n");
return;
}
@@ -1247,14 +1302,14 @@ static void purge_egress_shortcut(struct atm_vcc *vcc, eg_cache_entry *entry)
* Our MPS died. Tell our daemon to send NHRP data plane purge to each
* of the egress shortcuts we have.
*/
-static void mps_death( struct k_message * msg, struct mpoa_client * mpc )
+static void mps_death(struct k_message *msg, struct mpoa_client *mpc)
{
eg_cache_entry *entry;
- dprintk("mpoa: (%s) mps_death:\n", mpc->dev->name);
+ dprintk("(%s)\n", mpc->dev->name);
- if(memcmp(msg->MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN)){
- printk("mpoa: (%s) mps_death: wrong MPS\n", mpc->dev->name);
+ if (memcmp(msg->MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN)) {
+ pr_info("(%s) wrong MPS\n", mpc->dev->name);
return;
}
@@ -1273,20 +1328,21 @@ static void mps_death( struct k_message * msg, struct mpoa_client * mpc )
return;
}
-static void MPOA_cache_impos_rcvd( struct k_message * msg, struct mpoa_client * mpc)
+static void MPOA_cache_impos_rcvd(struct k_message *msg,
+ struct mpoa_client *mpc)
{
uint16_t holding_time;
eg_cache_entry *entry = mpc->eg_ops->get_by_cache_id(msg->content.eg_info.cache_id, mpc);
holding_time = msg->content.eg_info.holding_time;
- dprintk("mpoa: (%s) MPOA_cache_impos_rcvd: entry = %p, holding_time = %u\n",
- mpc->dev->name, entry, holding_time);
- if(entry == NULL && holding_time) {
+ dprintk("(%s) entry = %p, holding_time = %u\n",
+ mpc->dev->name, entry, holding_time);
+ if (entry == NULL && holding_time) {
entry = mpc->eg_ops->add_entry(msg, mpc);
mpc->eg_ops->put(entry);
return;
}
- if(holding_time){
+ if (holding_time) {
mpc->eg_ops->update(entry, holding_time);
return;
}
@@ -1300,7 +1356,8 @@ static void MPOA_cache_impos_rcvd( struct k_message * msg, struct mpoa_client *
return;
}
-static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *mpc)
+static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg,
+ struct mpoa_client *mpc)
{
struct lec_priv *priv;
int i, retval ;
@@ -1315,34 +1372,39 @@ static void set_mpc_ctrl_addr_rcvd(struct k_message *mesg, struct mpoa_client *m
memcpy(&tlv[7], mesg->MPS_ctrl, ATM_ESA_LEN); /* MPC ctrl ATM addr */
memcpy(mpc->our_ctrl_addr, mesg->MPS_ctrl, ATM_ESA_LEN);
- dprintk("mpoa: (%s) setting MPC ctrl ATM address to ",
- (mpc->dev) ? mpc->dev->name : "<unknown>");
+ dprintk("(%s) setting MPC ctrl ATM address to",
+ mpc->dev ? mpc->dev->name : "<unknown>");
for (i = 7; i < sizeof(tlv); i++)
- dprintk("%02x ", tlv[i]);
- dprintk("\n");
+ dprintk_cont(" %02x", tlv[i]);
+ dprintk_cont("\n");
if (mpc->dev) {
priv = netdev_priv(mpc->dev);
- retval = priv->lane2_ops->associate_req(mpc->dev, mpc->dev->dev_addr, tlv, sizeof(tlv));
+ retval = priv->lane2_ops->associate_req(mpc->dev,
+ mpc->dev->dev_addr,
+ tlv, sizeof(tlv));
if (retval == 0)
- printk("mpoa: (%s) MPOA device type TLV association failed\n", mpc->dev->name);
+ pr_info("(%s) MPOA device type TLV association failed\n",
+ mpc->dev->name);
retval = priv->lane2_ops->resolve(mpc->dev, NULL, 1, NULL, NULL);
if (retval < 0)
- printk("mpoa: (%s) targetless LE_ARP request failed\n", mpc->dev->name);
+ pr_info("(%s) targetless LE_ARP request failed\n",
+ mpc->dev->name);
}
return;
}
-static void set_mps_mac_addr_rcvd(struct k_message *msg, struct mpoa_client *client)
+static void set_mps_mac_addr_rcvd(struct k_message *msg,
+ struct mpoa_client *client)
{
- if(client->number_of_mps_macs)
+ if (client->number_of_mps_macs)
kfree(client->mps_macs);
client->number_of_mps_macs = 0;
client->mps_macs = kmemdup(msg->MPS_ctrl, ETH_ALEN, GFP_KERNEL);
if (client->mps_macs == NULL) {
- printk("mpoa: set_mps_mac_addr_rcvd: out of memory\n");
+ pr_info("out of memory\n");
return;
}
client->number_of_mps_macs = 1;
@@ -1363,11 +1425,11 @@ static void clean_up(struct k_message *msg, struct mpoa_client *mpc, int action)
/* FIXME: This knows too much of the cache structure */
read_lock_irq(&mpc->egress_lock);
entry = mpc->eg_cache;
- while (entry != NULL){
- msg->content.eg_info = entry->ctrl_info;
- dprintk("mpoa: cache_id %u\n", entry->ctrl_info.cache_id);
- msg_to_mpoad(msg, mpc);
- entry = entry->next;
+ while (entry != NULL) {
+ msg->content.eg_info = entry->ctrl_info;
+ dprintk("cache_id %u\n", entry->ctrl_info.cache_id);
+ msg_to_mpoad(msg, mpc);
+ entry = entry->next;
}
read_unlock_irq(&mpc->egress_lock);
@@ -1386,20 +1448,22 @@ static void mpc_timer_refresh(void)
return;
}
-static void mpc_cache_check( unsigned long checking_time )
+static void mpc_cache_check(unsigned long checking_time)
{
struct mpoa_client *mpc = mpcs;
static unsigned long previous_resolving_check_time;
static unsigned long previous_refresh_time;
- while( mpc != NULL ){
+ while (mpc != NULL) {
mpc->in_ops->clear_count(mpc);
mpc->eg_ops->clear_expired(mpc);
- if(checking_time - previous_resolving_check_time > mpc->parameters.mpc_p4 * HZ ){
+ if (checking_time - previous_resolving_check_time >
+ mpc->parameters.mpc_p4 * HZ) {
mpc->in_ops->check_resolving(mpc);
previous_resolving_check_time = checking_time;
}
- if(checking_time - previous_refresh_time > mpc->parameters.mpc_p5 * HZ ){
+ if (checking_time - previous_refresh_time >
+ mpc->parameters.mpc_p5 * HZ) {
mpc->in_ops->refresh(mpc);
previous_refresh_time = checking_time;
}
@@ -1410,7 +1474,8 @@ static void mpc_cache_check( unsigned long checking_time )
return;
}
-static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
{
int err = 0;
struct atm_vcc *vcc = ATM_SD(sock);
@@ -1422,21 +1487,20 @@ static int atm_mpoa_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
return -EPERM;
switch (cmd) {
- case ATMMPC_CTRL:
- err = atm_mpoa_mpoad_attach(vcc, (int)arg);
- if (err >= 0)
- sock->state = SS_CONNECTED;
- break;
- case ATMMPC_DATA:
- err = atm_mpoa_vcc_attach(vcc, (void __user *)arg);
- break;
- default:
- break;
+ case ATMMPC_CTRL:
+ err = atm_mpoa_mpoad_attach(vcc, (int)arg);
+ if (err >= 0)
+ sock->state = SS_CONNECTED;
+ break;
+ case ATMMPC_DATA:
+ err = atm_mpoa_vcc_attach(vcc, (void __user *)arg);
+ break;
+ default:
+ break;
}
return err;
}
-
static struct atm_ioctl atm_ioctl_ops = {
.owner = THIS_MODULE,
.ioctl = atm_mpoa_ioctl,
@@ -1447,9 +1511,9 @@ static __init int atm_mpoa_init(void)
register_atm_ioctl(&atm_ioctl_ops);
if (mpc_proc_init() != 0)
- printk(KERN_INFO "mpoa: failed to initialize /proc/mpoa\n");
+ pr_info("failed to initialize /proc/mpoa\n");
- printk("mpc.c: " __DATE__ " " __TIME__ " initialized\n");
+ pr_info("mpc.c: " __DATE__ " " __TIME__ " initialized\n");
return 0;
}
@@ -1476,15 +1540,15 @@ static void __exit atm_mpoa_cleanup(void)
if (priv->lane2_ops != NULL)
priv->lane2_ops->associate_indicator = NULL;
}
- ddprintk("mpoa: cleanup_module: about to clear caches\n");
+ ddprintk("about to clear caches\n");
mpc->in_ops->destroy_cache(mpc);
mpc->eg_ops->destroy_cache(mpc);
- ddprintk("mpoa: cleanup_module: caches cleared\n");
+ ddprintk("caches cleared\n");
kfree(mpc->mps_macs);
memset(mpc, 0, sizeof(struct mpoa_client));
- ddprintk("mpoa: cleanup_module: about to kfree %p\n", mpc);
+ ddprintk("about to kfree %p\n", mpc);
kfree(mpc);
- ddprintk("mpoa: cleanup_module: next mpc is at %p\n", tmp);
+ ddprintk("next mpc is at %p\n", tmp);
mpc = tmp;
}
@@ -1492,7 +1556,7 @@ static void __exit atm_mpoa_cleanup(void)
qos_head = NULL;
while (qos != NULL) {
nextqos = qos->next;
- dprintk("mpoa: cleanup_module: freeing qos entry %p\n", qos);
+ dprintk("freeing qos entry %p\n", qos);
kfree(qos);
qos = nextqos;
}
diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
index 4504a4b339b..4c141810eb6 100644
--- a/net/atm/mpoa_caches.c
+++ b/net/atm/mpoa_caches.c
@@ -11,15 +11,23 @@
*/
#if 0
-#define dprintk printk /* debug */
+#define dprintk(format, args...) \
+ printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
#else
-#define dprintk(format,args...)
+#define dprintk(format, args...) \
+ do { if (0) \
+ printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
+ } while (0)
#endif
#if 0
-#define ddprintk printk /* more debug */
+#define ddprintk(format, args...) \
+ printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
#else
-#define ddprintk(format,args...)
+#define ddprintk(format, args...) \
+ do { if (0) \
+ printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
+ } while (0)
#endif
static in_cache_entry *in_cache_get(__be32 dst_ip,
@@ -29,8 +37,8 @@ static in_cache_entry *in_cache_get(__be32 dst_ip,
read_lock_bh(&client->ingress_lock);
entry = client->in_cache;
- while(entry != NULL){
- if( entry->ctrl_info.in_dst_ip == dst_ip ){
+ while (entry != NULL) {
+ if (entry->ctrl_info.in_dst_ip == dst_ip) {
atomic_inc(&entry->use);
read_unlock_bh(&client->ingress_lock);
return entry;
@@ -50,8 +58,8 @@ static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip,
read_lock_bh(&client->ingress_lock);
entry = client->in_cache;
- while(entry != NULL){
- if((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask )){
+ while (entry != NULL) {
+ if ((entry->ctrl_info.in_dst_ip & mask) == (dst_ip & mask)) {
atomic_inc(&entry->use);
read_unlock_bh(&client->ingress_lock);
return entry;
@@ -65,14 +73,14 @@ static in_cache_entry *in_cache_get_with_mask(__be32 dst_ip,
}
static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc,
- struct mpoa_client *client )
+ struct mpoa_client *client)
{
in_cache_entry *entry;
read_lock_bh(&client->ingress_lock);
entry = client->in_cache;
- while(entry != NULL){
- if(entry->shortcut == vcc) {
+ while (entry != NULL) {
+ if (entry->shortcut == vcc) {
atomic_inc(&entry->use);
read_unlock_bh(&client->ingress_lock);
return entry;
@@ -90,14 +98,14 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
in_cache_entry *entry = kzalloc(sizeof(in_cache_entry), GFP_KERNEL);
if (entry == NULL) {
- printk("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n");
+ pr_info("mpoa: mpoa_caches.c: new_in_cache_entry: out of memory\n");
return NULL;
}
- dprintk("mpoa: mpoa_caches.c: adding an ingress entry, ip = %pI4\n", &dst_ip);
+ dprintk("adding an ingress entry, ip = %pI4\n", &dst_ip);
atomic_set(&entry->use, 1);
- dprintk("mpoa: mpoa_caches.c: new_in_cache_entry: about to lock\n");
+ dprintk("new_in_cache_entry: about to lock\n");
write_lock_bh(&client->ingress_lock);
entry->next = client->in_cache;
entry->prev = NULL;
@@ -115,7 +123,7 @@ static in_cache_entry *in_cache_add_entry(__be32 dst_ip,
atomic_inc(&entry->use);
write_unlock_bh(&client->ingress_lock);
- dprintk("mpoa: mpoa_caches.c: new_in_cache_entry: unlocked\n");
+ dprintk("new_in_cache_entry: unlocked\n");
return entry;
}
@@ -126,39 +134,41 @@ static int cache_hit(in_cache_entry *entry, struct mpoa_client *mpc)
struct k_message msg;
entry->count++;
- if(entry->entry_state == INGRESS_RESOLVED && entry->shortcut != NULL)
+ if (entry->entry_state == INGRESS_RESOLVED && entry->shortcut != NULL)
return OPEN;
- if(entry->entry_state == INGRESS_REFRESHING){
- if(entry->count > mpc->parameters.mpc_p1){
+ if (entry->entry_state == INGRESS_REFRESHING) {
+ if (entry->count > mpc->parameters.mpc_p1) {
msg.type = SND_MPOA_RES_RQST;
msg.content.in_info = entry->ctrl_info;
memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN);
qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
- if (qos != NULL) msg.qos = qos->qos;
+ if (qos != NULL)
+ msg.qos = qos->qos;
msg_to_mpoad(&msg, mpc);
do_gettimeofday(&(entry->reply_wait));
entry->entry_state = INGRESS_RESOLVING;
}
- if(entry->shortcut != NULL)
+ if (entry->shortcut != NULL)
return OPEN;
return CLOSED;
}
- if(entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL)
+ if (entry->entry_state == INGRESS_RESOLVING && entry->shortcut != NULL)
return OPEN;
- if( entry->count > mpc->parameters.mpc_p1 &&
- entry->entry_state == INGRESS_INVALID){
- dprintk("mpoa: (%s) mpoa_caches.c: threshold exceeded for ip %pI4, sending MPOA res req\n",
+ if (entry->count > mpc->parameters.mpc_p1 &&
+ entry->entry_state == INGRESS_INVALID) {
+ dprintk("(%s) threshold exceeded for ip %pI4, sending MPOA res req\n",
mpc->dev->name, &entry->ctrl_info.in_dst_ip);
entry->entry_state = INGRESS_RESOLVING;
- msg.type = SND_MPOA_RES_RQST;
- memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN );
+ msg.type = SND_MPOA_RES_RQST;
+ memcpy(msg.MPS_ctrl, mpc->mps_ctrl_addr, ATM_ESA_LEN);
msg.content.in_info = entry->ctrl_info;
qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
- if (qos != NULL) msg.qos = qos->qos;
- msg_to_mpoad( &msg, mpc);
+ if (qos != NULL)
+ msg.qos = qos->qos;
+ msg_to_mpoad(&msg, mpc);
do_gettimeofday(&(entry->reply_wait));
}
@@ -185,7 +195,7 @@ static void in_cache_remove_entry(in_cache_entry *entry,
struct k_message msg;
vcc = entry->shortcut;
- dprintk("mpoa: mpoa_caches.c: removing an ingress entry, ip = %pI4\n",
+ dprintk("removing an ingress entry, ip = %pI4\n",
&entry->ctrl_info.in_dst_ip);
if (entry->prev != NULL)
@@ -195,14 +205,15 @@ static void in_cache_remove_entry(in_cache_entry *entry,
if (entry->next != NULL)
entry->next->prev = entry->prev;
client->in_ops->put(entry);
- if(client->in_cache == NULL && client->eg_cache == NULL){
+ if (client->in_cache == NULL && client->eg_cache == NULL) {
msg.type = STOP_KEEP_ALIVE_SM;
- msg_to_mpoad(&msg,client);
+ msg_to_mpoad(&msg, client);
}
/* Check if the egress side still uses this VCC */
if (vcc != NULL) {
- eg_cache_entry *eg_entry = client->eg_ops->get_by_vcc(vcc, client);
+ eg_cache_entry *eg_entry = client->eg_ops->get_by_vcc(vcc,
+ client);
if (eg_entry != NULL) {
client->eg_ops->put(eg_entry);
return;
@@ -213,7 +224,6 @@ static void in_cache_remove_entry(in_cache_entry *entry,
return;
}
-
/* Call this every MPC-p2 seconds... Not exactly correct solution,
but an easy one... */
static void clear_count_and_expired(struct mpoa_client *client)
@@ -225,12 +235,12 @@ static void clear_count_and_expired(struct mpoa_client *client)
write_lock_bh(&client->ingress_lock);
entry = client->in_cache;
- while(entry != NULL){
- entry->count=0;
+ while (entry != NULL) {
+ entry->count = 0;
next_entry = entry->next;
- if((now.tv_sec - entry->tv.tv_sec)
- > entry->ctrl_info.holding_time){
- dprintk("mpoa: mpoa_caches.c: holding time expired, ip = %pI4\n",
+ if ((now.tv_sec - entry->tv.tv_sec)
+ > entry->ctrl_info.holding_time) {
+ dprintk("holding time expired, ip = %pI4\n",
&entry->ctrl_info.in_dst_ip);
client->in_ops->remove_entry(entry, client);
}
@@ -250,33 +260,38 @@ static void check_resolving_entries(struct mpoa_client *client)
struct timeval now;
struct k_message msg;
- do_gettimeofday( &now );
+ do_gettimeofday(&now);
read_lock_bh(&client->ingress_lock);
entry = client->in_cache;
- while( entry != NULL ){
- if(entry->entry_state == INGRESS_RESOLVING){
- if(now.tv_sec - entry->hold_down.tv_sec < client->parameters.mpc_p6){
- entry = entry->next; /* Entry in hold down */
+ while (entry != NULL) {
+ if (entry->entry_state == INGRESS_RESOLVING) {
+ if ((now.tv_sec - entry->hold_down.tv_sec) <
+ client->parameters.mpc_p6) {
+ entry = entry->next; /* Entry in hold down */
continue;
}
- if( (now.tv_sec - entry->reply_wait.tv_sec) >
- entry->retry_time ){
- entry->retry_time = MPC_C1*( entry->retry_time );
- if(entry->retry_time > client->parameters.mpc_p5){
- /* Retry time maximum exceeded, put entry in hold down. */
+ if ((now.tv_sec - entry->reply_wait.tv_sec) >
+ entry->retry_time) {
+ entry->retry_time = MPC_C1 * (entry->retry_time);
+ /*
+ * Retry time maximum exceeded,
+ * put entry in hold down.
+ */
+ if (entry->retry_time > client->parameters.mpc_p5) {
do_gettimeofday(&(entry->hold_down));
entry->retry_time = client->parameters.mpc_p4;
entry = entry->next;
continue;
}
/* Ask daemon to send a resolution request. */
- memset(&(entry->hold_down),0,sizeof(struct timeval));
+ memset(&(entry->hold_down), 0, sizeof(struct timeval));
msg.type = SND_MPOA_RES_RTRY;
memcpy(msg.MPS_ctrl, client->mps_ctrl_addr, ATM_ESA_LEN);
msg.content.in_info = entry->ctrl_info;
qos = atm_mpoa_search_qos(entry->ctrl_info.in_dst_ip);
- if (qos != NULL) msg.qos = qos->qos;
+ if (qos != NULL)
+ msg.qos = qos->qos;
msg_to_mpoad(&msg, client);
do_gettimeofday(&(entry->reply_wait));
}
@@ -292,16 +307,17 @@ static void refresh_entries(struct mpoa_client *client)
struct timeval now;
struct in_cache_entry *entry = client->in_cache;
- ddprintk("mpoa: mpoa_caches.c: refresh_entries\n");
+ ddprintk("refresh_entries\n");
do_gettimeofday(&now);
read_lock_bh(&client->ingress_lock);
- while( entry != NULL ){
- if( entry->entry_state == INGRESS_RESOLVED ){
- if(!(entry->refresh_time))
- entry->refresh_time = (2*(entry->ctrl_info.holding_time))/3;
- if( (now.tv_sec - entry->reply_wait.tv_sec) > entry->refresh_time ){
- dprintk("mpoa: mpoa_caches.c: refreshing an entry.\n");
+ while (entry != NULL) {
+ if (entry->entry_state == INGRESS_RESOLVED) {
+ if (!(entry->refresh_time))
+ entry->refresh_time = (2 * (entry->ctrl_info.holding_time))/3;
+ if ((now.tv_sec - entry->reply_wait.tv_sec) >
+ entry->refresh_time) {
+ dprintk("refreshing an entry.\n");
entry->entry_state = INGRESS_REFRESHING;
}
@@ -314,21 +330,22 @@ static void refresh_entries(struct mpoa_client *client)
static void in_destroy_cache(struct mpoa_client *mpc)
{
write_lock_irq(&mpc->ingress_lock);
- while(mpc->in_cache != NULL)
+ while (mpc->in_cache != NULL)
mpc->in_ops->remove_entry(mpc->in_cache, mpc);
write_unlock_irq(&mpc->ingress_lock);
return;
}
-static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id, struct mpoa_client *mpc)
+static eg_cache_entry *eg_cache_get_by_cache_id(__be32 cache_id,
+ struct mpoa_client *mpc)
{
eg_cache_entry *entry;
read_lock_irq(&mpc->egress_lock);
entry = mpc->eg_cache;
- while(entry != NULL){
- if(entry->ctrl_info.cache_id == cache_id){
+ while (entry != NULL) {
+ if (entry->ctrl_info.cache_id == cache_id) {
atomic_inc(&entry->use);
read_unlock_irq(&mpc->egress_lock);
return entry;
@@ -348,7 +365,7 @@ static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
read_lock_irqsave(&mpc->egress_lock, flags);
entry = mpc->eg_cache;
- while (entry != NULL){
+ while (entry != NULL) {
if (entry->ctrl_info.tag == tag) {
atomic_inc(&entry->use);
read_unlock_irqrestore(&mpc->egress_lock, flags);
@@ -362,14 +379,15 @@ static eg_cache_entry *eg_cache_get_by_tag(__be32 tag, struct mpoa_client *mpc)
}
/* This can be called from any context since it saves CPU flags */
-static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc, struct mpoa_client *mpc)
+static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc,
+ struct mpoa_client *mpc)
{
unsigned long flags;
eg_cache_entry *entry;
read_lock_irqsave(&mpc->egress_lock, flags);
entry = mpc->eg_cache;
- while (entry != NULL){
+ while (entry != NULL) {
if (entry->shortcut == vcc) {
atomic_inc(&entry->use);
read_unlock_irqrestore(&mpc->egress_lock, flags);
@@ -382,14 +400,15 @@ static eg_cache_entry *eg_cache_get_by_vcc(struct atm_vcc *vcc, struct mpoa_clie
return NULL;
}
-static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr, struct mpoa_client *mpc)
+static eg_cache_entry *eg_cache_get_by_src_ip(__be32 ipaddr,
+ struct mpoa_client *mpc)
{
eg_cache_entry *entry;
read_lock_irq(&mpc->egress_lock);
entry = mpc->eg_cache;
- while(entry != NULL){
- if(entry->latest_ip_addr == ipaddr) {
+ while (entry != NULL) {
+ if (entry->latest_ip_addr == ipaddr) {
atomic_inc(&entry->use);
read_unlock_irq(&mpc->egress_lock);
return entry;
@@ -421,7 +440,7 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
struct k_message msg;
vcc = entry->shortcut;
- dprintk("mpoa: mpoa_caches.c: removing an egress entry.\n");
+ dprintk("removing an egress entry.\n");
if (entry->prev != NULL)
entry->prev->next = entry->next;
else
@@ -429,9 +448,9 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
if (entry->next != NULL)
entry->next->prev = entry->prev;
client->eg_ops->put(entry);
- if(client->in_cache == NULL && client->eg_cache == NULL){
+ if (client->in_cache == NULL && client->eg_cache == NULL) {
msg.type = STOP_KEEP_ALIVE_SM;
- msg_to_mpoad(&msg,client);
+ msg_to_mpoad(&msg, client);
}
/* Check if the ingress side still uses this VCC */
@@ -447,20 +466,21 @@ static void eg_cache_remove_entry(eg_cache_entry *entry,
return;
}
-static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_client *client)
+static eg_cache_entry *eg_cache_add_entry(struct k_message *msg,
+ struct mpoa_client *client)
{
eg_cache_entry *entry = kzalloc(sizeof(eg_cache_entry), GFP_KERNEL);
if (entry == NULL) {
- printk("mpoa: mpoa_caches.c: new_eg_cache_entry: out of memory\n");
+ pr_info("out of memory\n");
return NULL;
}
- dprintk("mpoa: mpoa_caches.c: adding an egress entry, ip = %pI4, this should be our IP\n",
+ dprintk("adding an egress entry, ip = %pI4, this should be our IP\n",
&msg->content.eg_info.eg_dst_ip);
atomic_set(&entry->use, 1);
- dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry: about to lock\n");
+ dprintk("new_eg_cache_entry: about to lock\n");
write_lock_irq(&client->egress_lock);
entry->next = client->eg_cache;
entry->prev = NULL;
@@ -472,18 +492,18 @@ static eg_cache_entry *eg_cache_add_entry(struct k_message *msg, struct mpoa_cli
entry->ctrl_info = msg->content.eg_info;
do_gettimeofday(&(entry->tv));
entry->entry_state = EGRESS_RESOLVED;
- dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry cache_id %lu\n", ntohl(entry->ctrl_info.cache_id));
- dprintk("mpoa: mpoa_caches.c: mps_ip = %pI4\n",
- &entry->ctrl_info.mps_ip);
+ dprintk("new_eg_cache_entry cache_id %u\n",
+ ntohl(entry->ctrl_info.cache_id));
+ dprintk("mps_ip = %pI4\n", &entry->ctrl_info.mps_ip);
atomic_inc(&entry->use);
write_unlock_irq(&client->egress_lock);
- dprintk("mpoa: mpoa_caches.c: new_eg_cache_entry: unlocked\n");
+ dprintk("new_eg_cache_entry: unlocked\n");
return entry;
}
-static void update_eg_cache_entry(eg_cache_entry * entry, uint16_t holding_time)
+static void update_eg_cache_entry(eg_cache_entry *entry, uint16_t holding_time)
{
do_gettimeofday(&(entry->tv));
entry->entry_state = EGRESS_RESOLVED;
@@ -502,13 +522,14 @@ static void clear_expired(struct mpoa_client *client)
write_lock_irq(&client->egress_lock);
entry = client->eg_cache;
- while(entry != NULL){
+ while (entry != NULL) {
next_entry = entry->next;
- if((now.tv_sec - entry->tv.tv_sec)
- > entry->ctrl_info.holding_time){
+ if ((now.tv_sec - entry->tv.tv_sec)
+ > entry->ctrl_info.holding_time) {
msg.type = SND_EGRESS_PURGE;
msg.content.eg_info = entry->ctrl_info;
- dprintk("mpoa: mpoa_caches.c: egress_cache: holding time expired, cache_id = %lu.\n",ntohl(entry->ctrl_info.cache_id));
+ dprintk("egress_cache: holding time expired, cache_id = %u.\n",
+ ntohl(entry->ctrl_info.cache_id));
msg_to_mpoad(&msg, client);
client->eg_ops->remove_entry(entry, client);
}
@@ -522,7 +543,7 @@ static void clear_expired(struct mpoa_client *client)
static void eg_destroy_cache(struct mpoa_client *mpc)
{
write_lock_irq(&mpc->egress_lock);
- while(mpc->eg_cache != NULL)
+ while (mpc->eg_cache != NULL)
mpc->eg_ops->remove_entry(mpc->eg_cache, mpc);
write_unlock_irq(&mpc->egress_lock);
@@ -530,7 +551,6 @@ static void eg_destroy_cache(struct mpoa_client *mpc)
}
-
static struct in_cache_ops ingress_ops = {
in_cache_add_entry, /* add_entry */
in_cache_get, /* get */
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 1a0f5ccea9c..b9bdb98427e 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -1,3 +1,4 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#ifdef CONFIG_PROC_FS
#include <linux/errno.h>
@@ -8,7 +9,7 @@
#include <linux/proc_fs.h>
#include <linux/time.h>
#include <linux/seq_file.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/atmmpc.h>
#include <linux/atm.h>
#include "mpc.h"
@@ -20,9 +21,23 @@
*/
#if 1
-#define dprintk printk /* debug */
+#define dprintk(format, args...) \
+ printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
#else
-#define dprintk(format,args...)
+#define dprintk(format, args...) \
+ do { if (0) \
+ printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
+ } while (0)
+#endif
+
+#if 0
+#define ddprintk(format, args...) \
+ printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args) /* debug */
+#else
+#define ddprintk(format, args...) \
+ do { if (0) \
+ printk(KERN_DEBUG "mpoa:%s: " format, __FILE__, ##args);\
+ } while (0)
#endif
#define STAT_FILE_NAME "mpc" /* Our statistic file's name */
@@ -51,42 +66,37 @@ static const struct file_operations mpc_file_operations = {
/*
* Returns the state of an ingress cache entry as a string
*/
-static const char *ingress_state_string(int state){
- switch(state) {
+static const char *ingress_state_string(int state)
+{
+ switch (state) {
case INGRESS_RESOLVING:
return "resolving ";
- break;
case INGRESS_RESOLVED:
return "resolved ";
- break;
case INGRESS_INVALID:
return "invalid ";
- break;
case INGRESS_REFRESHING:
return "refreshing ";
- break;
- default:
- return "";
}
+
+ return "";
}
/*
* Returns the state of an egress cache entry as a string
*/
-static const char *egress_state_string(int state){
- switch(state) {
+static const char *egress_state_string(int state)
+{
+ switch (state) {
case EGRESS_RESOLVED:
return "resolved ";
- break;
case EGRESS_PURGE:
return "purge ";
- break;
case EGRESS_INVALID:
return "invalid ";
- break;
- default:
- return "";
}
+
+ return "";
}
/*
@@ -123,7 +133,6 @@ static void mpc_stop(struct seq_file *m, void *v)
static int mpc_show(struct seq_file *m, void *v)
{
struct mpoa_client *mpc = v;
- unsigned char *temp;
int i;
in_cache_entry *in_entry;
eg_cache_entry *eg_entry;
@@ -140,15 +149,17 @@ static int mpc_show(struct seq_file *m, void *v)
do_gettimeofday(&now);
for (in_entry = mpc->in_cache; in_entry; in_entry = in_entry->next) {
- temp = (unsigned char *)&in_entry->ctrl_info.in_dst_ip;
- sprintf(ip_string,"%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
+ sprintf(ip_string, "%pI4", &in_entry->ctrl_info.in_dst_ip);
seq_printf(m, "%-16s%s%-14lu%-12u",
- ip_string,
- ingress_state_string(in_entry->entry_state),
- in_entry->ctrl_info.holding_time-(now.tv_sec-in_entry->tv.tv_sec),
- in_entry->packets_fwded);
+ ip_string,
+ ingress_state_string(in_entry->entry_state),
+ in_entry->ctrl_info.holding_time -
+ (now.tv_sec-in_entry->tv.tv_sec),
+ in_entry->packets_fwded);
if (in_entry->shortcut)
- seq_printf(m, " %-3d %-3d",in_entry->shortcut->vpi,in_entry->shortcut->vci);
+ seq_printf(m, " %-3d %-3d",
+ in_entry->shortcut->vpi,
+ in_entry->shortcut->vci);
seq_printf(m, "\n");
}
@@ -156,21 +167,23 @@ static int mpc_show(struct seq_file *m, void *v)
seq_printf(m, "Egress Entries:\nIngress MPC ATM addr\nCache-id State Holding time Packets recvd Latest IP addr VPI VCI\n");
for (eg_entry = mpc->eg_cache; eg_entry; eg_entry = eg_entry->next) {
unsigned char *p = eg_entry->ctrl_info.in_MPC_data_ATM_addr;
- for(i = 0; i < ATM_ESA_LEN; i++)
+ for (i = 0; i < ATM_ESA_LEN; i++)
seq_printf(m, "%02x", p[i]);
seq_printf(m, "\n%-16lu%s%-14lu%-15u",
(unsigned long)ntohl(eg_entry->ctrl_info.cache_id),
egress_state_string(eg_entry->entry_state),
- (eg_entry->ctrl_info.holding_time-(now.tv_sec-eg_entry->tv.tv_sec)),
+ (eg_entry->ctrl_info.holding_time -
+ (now.tv_sec-eg_entry->tv.tv_sec)),
eg_entry->packets_rcvd);
/* latest IP address */
- temp = (unsigned char *)&eg_entry->latest_ip_addr;
- sprintf(ip_string, "%d.%d.%d.%d", temp[0], temp[1], temp[2], temp[3]);
+ sprintf(ip_string, "%pI4", &eg_entry->latest_ip_addr);
seq_printf(m, "%-16s", ip_string);
if (eg_entry->shortcut)
- seq_printf(m, " %-3d %-3d",eg_entry->shortcut->vpi,eg_entry->shortcut->vci);
+ seq_printf(m, " %-3d %-3d",
+ eg_entry->shortcut->vpi,
+ eg_entry->shortcut->vci);
seq_printf(m, "\n");
}
seq_printf(m, "\n");
@@ -258,12 +271,9 @@ static int parse_qos(const char *buff)
qos.rxtp.max_pcr = rx_pcr;
qos.rxtp.max_sdu = rx_sdu;
qos.aal = ATM_AAL5;
- dprintk("mpoa: mpoa_proc.c: parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n",
- qos.txtp.max_pcr,
- qos.txtp.max_sdu,
- qos.rxtp.max_pcr,
- qos.rxtp.max_sdu
- );
+ dprintk("parse_qos(): setting qos paramameters to tx=%d,%d rx=%d,%d\n",
+ qos.txtp.max_pcr, qos.txtp.max_sdu,
+ qos.rxtp.max_pcr, qos.rxtp.max_sdu);
atm_mpoa_add_qos(ipaddr, &qos);
return 1;
@@ -278,7 +288,7 @@ int mpc_proc_init(void)
p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations);
if (!p) {
- printk(KERN_ERR "Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME);
+ pr_err("Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME);
return -ENOMEM;
}
return 0;
@@ -289,10 +299,9 @@ int mpc_proc_init(void)
*/
void mpc_proc_clean(void)
{
- remove_proc_entry(STAT_FILE_NAME,atm_proc_root);
+ remove_proc_entry(STAT_FILE_NAME, atm_proc_root);
}
-
#endif /* CONFIG_PROC_FS */
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 0af84cd4f65..400839273c6 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -33,6 +33,8 @@
* These hooks are not yet available in ppp_generic
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/skbuff.h>
@@ -132,7 +134,7 @@ static void pppoatm_unassign_vcc(struct atm_vcc *atmvcc)
static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
- pr_debug("pppoatm push\n");
+ pr_debug("\n");
if (skb == NULL) { /* VCC was closed */
pr_debug("removing ATMPPP VCC %p\n", pvcc);
pppoatm_unassign_vcc(atmvcc);
@@ -165,17 +167,17 @@ static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
pvcc->chan.mtu += LLC_LEN;
break;
}
- pr_debug("Couldn't autodetect yet "
- "(skb: %02X %02X %02X %02X %02X %02X)\n",
- skb->data[0], skb->data[1], skb->data[2],
- skb->data[3], skb->data[4], skb->data[5]);
+ pr_debug("Couldn't autodetect yet (skb: %02X %02X %02X %02X %02X %02X)\n",
+ skb->data[0], skb->data[1], skb->data[2],
+ skb->data[3], skb->data[4], skb->data[5]);
goto error;
case e_vc:
break;
}
ppp_input(&pvcc->chan, skb);
return;
- error:
+
+error:
kfree_skb(skb);
ppp_input_error(&pvcc->chan, 0);
}
@@ -194,7 +196,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
{
struct pppoatm_vcc *pvcc = chan_to_pvcc(chan);
ATM_SKB(skb)->vcc = pvcc->atmvcc;
- pr_debug("pppoatm_send (skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
+ pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
(void) skb_pull(skb, 1);
switch (pvcc->encaps) { /* LLC encapsulation needed */
@@ -208,7 +210,8 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
goto nospace;
}
kfree_skb(skb);
- if ((skb = n) == NULL)
+ skb = n;
+ if (skb == NULL)
return DROP_PACKET;
} else if (!atm_may_send(pvcc->atmvcc, skb->truesize))
goto nospace;
@@ -226,11 +229,11 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
- pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, ATM_SKB(skb)->vcc,
- ATM_SKB(skb)->vcc->dev);
+ pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
+ skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
return ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
? DROP_PACKET : 1;
- nospace:
+nospace:
/*
* We don't have space to send this SKB now, but we might have
* already applied SC_COMP_PROT compression, so may need to undo
@@ -289,7 +292,8 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
(be.encaps == e_vc ? 0 : LLC_LEN);
pvcc->wakeup_tasklet = tasklet_proto;
pvcc->wakeup_tasklet.data = (unsigned long) &pvcc->chan;
- if ((err = ppp_register_channel(&pvcc->chan)) != 0) {
+ err = ppp_register_channel(&pvcc->chan);
+ if (err != 0) {
kfree(pvcc);
return err;
}
diff --git a/net/atm/proc.c b/net/atm/proc.c
index ab8419a324b..7a96b2376bd 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -24,15 +24,15 @@
#include <linux/init.h> /* for __init */
#include <net/net_namespace.h>
#include <net/atmclip.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
+#include <linux/param.h> /* for HZ */
#include <asm/atomic.h>
-#include <asm/param.h> /* for HZ */
#include "resources.h"
#include "common.h" /* atm_proc_init prototype */
#include "signaling.h" /* to get sigd - ugly too */
-static ssize_t proc_dev_atm_read(struct file *file,char __user *buf,size_t count,
- loff_t *pos);
+static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos);
static const struct file_operations proc_atm_dev_ops = {
.owner = THIS_MODULE,
@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
const struct k_atm_aal_stats *stats)
{
seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
- atomic_read(&stats->rx_drop));
+ atomic_read(&stats->tx), atomic_read(&stats->tx_err),
+ atomic_read(&stats->rx), atomic_read(&stats->rx_err),
+ atomic_read(&stats->rx_drop));
}
static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
@@ -151,8 +151,8 @@ static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
{
- static const char *const class_name[] =
- {"off","UBR","CBR","VBR","ABR"};
+ static const char *const class_name[] = {
+ "off", "UBR", "CBR", "VBR", "ABR"};
static const char *const aal_name[] = {
"---", "1", "2", "3/4", /* 0- 3 */
"???", "5", "???", "???", /* 4- 7 */
@@ -160,11 +160,12 @@ static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
"???", "0", "???", "???"}; /* 12-15 */
seq_printf(seq, "%3d %3d %5d %-3s %7d %-5s %7d %-6s",
- vcc->dev->number,vcc->vpi,vcc->vci,
- vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" :
- aal_name[vcc->qos.aal],vcc->qos.rxtp.min_pcr,
- class_name[vcc->qos.rxtp.traffic_class],vcc->qos.txtp.min_pcr,
- class_name[vcc->qos.txtp.traffic_class]);
+ vcc->dev->number, vcc->vpi, vcc->vci,
+ vcc->qos.aal >= ARRAY_SIZE(aal_name) ? "err" :
+ aal_name[vcc->qos.aal], vcc->qos.rxtp.min_pcr,
+ class_name[vcc->qos.rxtp.traffic_class],
+ vcc->qos.txtp.min_pcr,
+ class_name[vcc->qos.txtp.traffic_class]);
if (test_bit(ATM_VF_IS_CLIP, &vcc->flags)) {
struct clip_vcc *clip_vcc = CLIP_VCC(vcc);
struct net_device *dev;
@@ -195,19 +196,20 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
seq_printf(seq, "%3d %3d %5d ", vcc->dev->number, vcc->vpi,
vcc->vci);
switch (sk->sk_family) {
- case AF_ATMPVC:
- seq_printf(seq, "PVC");
- break;
- case AF_ATMSVC:
- seq_printf(seq, "SVC");
- break;
- default:
- seq_printf(seq, "%3d", sk->sk_family);
+ case AF_ATMPVC:
+ seq_printf(seq, "PVC");
+ break;
+ case AF_ATMSVC:
+ seq_printf(seq, "SVC");
+ break;
+ default:
+ seq_printf(seq, "%3d", sk->sk_family);
}
- seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n", vcc->flags, sk->sk_err,
- sk_wmem_alloc_get(sk), sk->sk_sndbuf,
- sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
- atomic_read(&sk->sk_refcnt));
+ seq_printf(seq, " %04lx %5d %7d/%7d %7d/%7d [%d]\n",
+ vcc->flags, sk->sk_err,
+ sk_wmem_alloc_get(sk), sk->sk_sndbuf,
+ sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
+ atomic_read(&sk->sk_refcnt));
}
static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
@@ -236,7 +238,7 @@ static int atm_dev_seq_show(struct seq_file *seq, void *v)
"Itf Type ESI/\"MAC\"addr "
"AAL(TX,err,RX,err,drop) ... [refcnt]\n";
- if (v == SEQ_START_TOKEN)
+ if (v == &atm_devs)
seq_puts(seq, atm_dev_banner);
else {
struct atm_dev *dev = list_entry(v, struct atm_dev, dev_list);
@@ -376,32 +378,35 @@ static ssize_t proc_dev_atm_read(struct file *file, char __user *buf,
unsigned long page;
int length;
- if (count == 0) return 0;
+ if (count == 0)
+ return 0;
page = get_zeroed_page(GFP_KERNEL);
- if (!page) return -ENOMEM;
+ if (!page)
+ return -ENOMEM;
dev = PDE(file->f_path.dentry->d_inode)->data;
if (!dev->ops->proc_read)
length = -EINVAL;
else {
- length = dev->ops->proc_read(dev,pos,(char *) page);
- if (length > count) length = -EINVAL;
+ length = dev->ops->proc_read(dev, pos, (char *)page);
+ if (length > count)
+ length = -EINVAL;
}
if (length >= 0) {
- if (copy_to_user(buf,(char *) page,length)) length = -EFAULT;
+ if (copy_to_user(buf, (char *)page, length))
+ length = -EFAULT;
(*pos)++;
}
free_page(page);
return length;
}
-
struct proc_dir_entry *atm_proc_root;
EXPORT_SYMBOL(atm_proc_root);
int atm_proc_dev_register(struct atm_dev *dev)
{
- int digits,num;
+ int digits, num;
int error;
/* No proc info */
@@ -410,26 +415,28 @@ int atm_proc_dev_register(struct atm_dev *dev)
error = -ENOMEM;
digits = 0;
- for (num = dev->number; num; num /= 10) digits++;
- if (!digits) digits++;
+ for (num = dev->number; num; num /= 10)
+ digits++;
+ if (!digits)
+ digits++;
dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL);
if (!dev->proc_name)
goto err_out;
- sprintf(dev->proc_name,"%s:%d",dev->type, dev->number);
+ sprintf(dev->proc_name, "%s:%d", dev->type, dev->number);
dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root,
&proc_atm_dev_ops, dev);
if (!dev->proc_entry)
goto err_free_name;
return 0;
+
err_free_name:
kfree(dev->proc_name);
err_out:
return error;
}
-
void atm_proc_dev_deregister(struct atm_dev *dev)
{
if (!dev->ops->proc_read)
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index 8d74e62b0d7..437ee70c5e6 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -17,32 +17,35 @@
#include "common.h" /* common for PVCs and SVCs */
-static int pvc_shutdown(struct socket *sock,int how)
+static int pvc_shutdown(struct socket *sock, int how)
{
return 0;
}
-
-static int pvc_bind(struct socket *sock,struct sockaddr *sockaddr,
- int sockaddr_len)
+static int pvc_bind(struct socket *sock, struct sockaddr *sockaddr,
+ int sockaddr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_atmpvc *addr;
struct atm_vcc *vcc;
int error;
- if (sockaddr_len != sizeof(struct sockaddr_atmpvc)) return -EINVAL;
- addr = (struct sockaddr_atmpvc *) sockaddr;
- if (addr->sap_family != AF_ATMPVC) return -EAFNOSUPPORT;
+ if (sockaddr_len != sizeof(struct sockaddr_atmpvc))
+ return -EINVAL;
+ addr = (struct sockaddr_atmpvc *)sockaddr;
+ if (addr->sap_family != AF_ATMPVC)
+ return -EAFNOSUPPORT;
lock_sock(sk);
vcc = ATM_SD(sock);
if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
error = -EBADFD;
goto out;
}
- if (test_bit(ATM_VF_PARTIAL,&vcc->flags)) {
- if (vcc->vpi != ATM_VPI_UNSPEC) addr->sap_addr.vpi = vcc->vpi;
- if (vcc->vci != ATM_VCI_UNSPEC) addr->sap_addr.vci = vcc->vci;
+ if (test_bit(ATM_VF_PARTIAL, &vcc->flags)) {
+ if (vcc->vpi != ATM_VPI_UNSPEC)
+ addr->sap_addr.vpi = vcc->vpi;
+ if (vcc->vci != ATM_VCI_UNSPEC)
+ addr->sap_addr.vci = vcc->vci;
}
error = vcc_connect(sock, addr->sap_addr.itf, addr->sap_addr.vpi,
addr->sap_addr.vci);
@@ -51,11 +54,10 @@ out:
return error;
}
-
-static int pvc_connect(struct socket *sock,struct sockaddr *sockaddr,
- int sockaddr_len,int flags)
+static int pvc_connect(struct socket *sock, struct sockaddr *sockaddr,
+ int sockaddr_len, int flags)
{
- return pvc_bind(sock,sockaddr,sockaddr_len);
+ return pvc_bind(sock, sockaddr, sockaddr_len);
}
static int pvc_setsockopt(struct socket *sock, int level, int optname,
@@ -70,7 +72,6 @@ static int pvc_setsockopt(struct socket *sock, int level, int optname,
return error;
}
-
static int pvc_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -83,16 +84,16 @@ static int pvc_getsockopt(struct socket *sock, int level, int optname,
return error;
}
-
-static int pvc_getname(struct socket *sock,struct sockaddr *sockaddr,
- int *sockaddr_len,int peer)
+static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr,
+ int *sockaddr_len, int peer)
{
struct sockaddr_atmpvc *addr;
struct atm_vcc *vcc = ATM_SD(sock);
- if (!vcc->dev || !test_bit(ATM_VF_ADDR,&vcc->flags)) return -ENOTCONN;
+ if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags))
+ return -ENOTCONN;
*sockaddr_len = sizeof(struct sockaddr_atmpvc);
- addr = (struct sockaddr_atmpvc *) sockaddr;
+ addr = (struct sockaddr_atmpvc *)sockaddr;
addr->sap_family = AF_ATMPVC;
addr->sap_addr.itf = vcc->dev->number;
addr->sap_addr.vpi = vcc->vpi;
@@ -100,7 +101,6 @@ static int pvc_getname(struct socket *sock,struct sockaddr *sockaddr,
return 0;
}
-
static const struct proto_ops pvc_proto_ops = {
.family = PF_ATMPVC,
.owner = THIS_MODULE,
@@ -137,7 +137,6 @@ static int pvc_create(struct net *net, struct socket *sock, int protocol,
return vcc_create(net, sock, protocol, PF_ATMPVC);
}
-
static const struct net_proto_family pvc_family_ops = {
.family = PF_ATMPVC,
.create = pvc_create,
diff --git a/net/atm/raw.c b/net/atm/raw.c
index cbfcc71a17b..d0c4bd047dc 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -2,6 +2,7 @@
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/atmdev.h>
@@ -17,7 +18,7 @@
* SKB == NULL indicates that the link is being closed
*/
-static void atm_push_raw(struct atm_vcc *vcc,struct sk_buff *skb)
+static void atm_push_raw(struct atm_vcc *vcc, struct sk_buff *skb)
{
if (skb) {
struct sock *sk = sk_atm(vcc);
@@ -27,36 +28,33 @@ static void atm_push_raw(struct atm_vcc *vcc,struct sk_buff *skb)
}
}
-
-static void atm_pop_raw(struct atm_vcc *vcc,struct sk_buff *skb)
+static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct sock *sk = sk_atm(vcc);
- pr_debug("APopR (%d) %d -= %d\n", vcc->vci,
- sk_wmem_alloc_get(sk), skb->truesize);
+ pr_debug("(%d) %d -= %d\n",
+ vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
dev_kfree_skb_any(skb);
sk->sk_write_space(sk);
}
-
-static int atm_send_aal0(struct atm_vcc *vcc,struct sk_buff *skb)
+static int atm_send_aal0(struct atm_vcc *vcc, struct sk_buff *skb)
{
/*
* Note that if vpi/vci are _ANY or _UNSPEC the below will
* still work
*/
if (!capable(CAP_NET_ADMIN) &&
- (((u32 *) skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) !=
- ((vcc->vpi << ATM_HDR_VPI_SHIFT) | (vcc->vci << ATM_HDR_VCI_SHIFT)))
- {
+ (((u32 *)skb->data)[0] & (ATM_HDR_VPI_MASK | ATM_HDR_VCI_MASK)) !=
+ ((vcc->vpi << ATM_HDR_VPI_SHIFT) |
+ (vcc->vci << ATM_HDR_VCI_SHIFT))) {
kfree_skb(skb);
return -EADDRNOTAVAIL;
}
- return vcc->dev->ops->send(vcc,skb);
+ return vcc->dev->ops->send(vcc, skb);
}
-
int atm_init_aal0(struct atm_vcc *vcc)
{
vcc->push = atm_push_raw;
@@ -66,7 +64,6 @@ int atm_init_aal0(struct atm_vcc *vcc)
return 0;
}
-
int atm_init_aal34(struct atm_vcc *vcc)
{
vcc->push = atm_push_raw;
@@ -76,7 +73,6 @@ int atm_init_aal34(struct atm_vcc *vcc)
return 0;
}
-
int atm_init_aal5(struct atm_vcc *vcc)
{
vcc->push = atm_push_raw;
@@ -85,6 +81,4 @@ int atm_init_aal5(struct atm_vcc *vcc)
vcc->send = vcc->dev->ops->send;
return 0;
}
-
-
EXPORT_SYMBOL(atm_init_aal5);
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 56b7322ff46..90082904f20 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -7,6 +7,7 @@
* 2002/01 - don't free the whole struct sock on sk->destruct time,
* use the default destruct function initialized by sock_init_data */
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/ctype.h>
#include <linux/string.h>
@@ -70,7 +71,7 @@ struct atm_dev *atm_dev_lookup(int number)
mutex_unlock(&atm_dev_mutex);
return dev;
}
-
+EXPORT_SYMBOL(atm_dev_lookup);
struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
int number, unsigned long *flags)
@@ -79,13 +80,13 @@ struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
dev = __alloc_atm_dev(type);
if (!dev) {
- printk(KERN_ERR "atm_dev_register: no space for dev %s\n",
- type);
+ pr_err("no space for dev %s\n", type);
return NULL;
}
mutex_lock(&atm_dev_mutex);
if (number != -1) {
- if ((inuse = __atm_dev_lookup(number))) {
+ inuse = __atm_dev_lookup(number);
+ if (inuse) {
atm_dev_put(inuse);
mutex_unlock(&atm_dev_mutex);
kfree(dev);
@@ -109,16 +110,12 @@ struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops,
atomic_set(&dev->refcnt, 1);
if (atm_proc_dev_register(dev) < 0) {
- printk(KERN_ERR "atm_dev_register: "
- "atm_proc_dev_register failed for dev %s\n",
- type);
+ pr_err("atm_proc_dev_register failed for dev %s\n", type);
goto out_fail;
}
if (atm_register_sysfs(dev) < 0) {
- printk(KERN_ERR "atm_dev_register: "
- "atm_register_sysfs failed for dev %s\n",
- type);
+ pr_err("atm_register_sysfs failed for dev %s\n", type);
atm_proc_dev_deregister(dev);
goto out_fail;
}
@@ -134,7 +131,7 @@ out_fail:
dev = NULL;
goto out;
}
-
+EXPORT_SYMBOL(atm_dev_register);
void atm_dev_deregister(struct atm_dev *dev)
{
@@ -156,7 +153,7 @@ void atm_dev_deregister(struct atm_dev *dev)
atm_dev_put(dev);
}
-
+EXPORT_SYMBOL(atm_dev_deregister);
static void copy_aal_stats(struct k_atm_aal_stats *from,
struct atm_aal_stats *to)
@@ -166,7 +163,6 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
#undef __HANDLE_ITEM
}
-
static void subtract_aal_stats(struct k_atm_aal_stats *from,
struct atm_aal_stats *to)
{
@@ -175,8 +171,8 @@ static void subtract_aal_stats(struct k_atm_aal_stats *from,
#undef __HANDLE_ITEM
}
-
-static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg, int zero)
+static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg,
+ int zero)
{
struct atm_dev_stats tmp;
int error = 0;
@@ -194,7 +190,6 @@ static int fetch_stats(struct atm_dev *dev, struct atm_dev_stats __user *arg, in
return error ? -EFAULT : 0;
}
-
int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
{
void __user *buf;
@@ -210,50 +205,49 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
#endif
switch (cmd) {
- case ATM_GETNAMES:
-
- if (compat) {
+ case ATM_GETNAMES:
+ if (compat) {
#ifdef CONFIG_COMPAT
- struct compat_atm_iobuf __user *ciobuf = arg;
- compat_uptr_t cbuf;
- iobuf_len = &ciobuf->length;
- if (get_user(cbuf, &ciobuf->buffer))
- return -EFAULT;
- buf = compat_ptr(cbuf);
+ struct compat_atm_iobuf __user *ciobuf = arg;
+ compat_uptr_t cbuf;
+ iobuf_len = &ciobuf->length;
+ if (get_user(cbuf, &ciobuf->buffer))
+ return -EFAULT;
+ buf = compat_ptr(cbuf);
#endif
- } else {
- struct atm_iobuf __user *iobuf = arg;
- iobuf_len = &iobuf->length;
- if (get_user(buf, &iobuf->buffer))
- return -EFAULT;
- }
- if (get_user(len, iobuf_len))
+ } else {
+ struct atm_iobuf __user *iobuf = arg;
+ iobuf_len = &iobuf->length;
+ if (get_user(buf, &iobuf->buffer))
return -EFAULT;
- mutex_lock(&atm_dev_mutex);
- list_for_each(p, &atm_devs)
- size += sizeof(int);
- if (size > len) {
- mutex_unlock(&atm_dev_mutex);
- return -E2BIG;
- }
- tmp_buf = kmalloc(size, GFP_ATOMIC);
- if (!tmp_buf) {
- mutex_unlock(&atm_dev_mutex);
- return -ENOMEM;
- }
- tmp_p = tmp_buf;
- list_for_each(p, &atm_devs) {
- dev = list_entry(p, struct atm_dev, dev_list);
- *tmp_p++ = dev->number;
- }
+ }
+ if (get_user(len, iobuf_len))
+ return -EFAULT;
+ mutex_lock(&atm_dev_mutex);
+ list_for_each(p, &atm_devs)
+ size += sizeof(int);
+ if (size > len) {
+ mutex_unlock(&atm_dev_mutex);
+ return -E2BIG;
+ }
+ tmp_buf = kmalloc(size, GFP_ATOMIC);
+ if (!tmp_buf) {
mutex_unlock(&atm_dev_mutex);
- error = ((copy_to_user(buf, tmp_buf, size)) ||
- put_user(size, iobuf_len))
- ? -EFAULT : 0;
- kfree(tmp_buf);
- return error;
- default:
- break;
+ return -ENOMEM;
+ }
+ tmp_p = tmp_buf;
+ list_for_each(p, &atm_devs) {
+ dev = list_entry(p, struct atm_dev, dev_list);
+ *tmp_p++ = dev->number;
+ }
+ mutex_unlock(&atm_dev_mutex);
+ error = ((copy_to_user(buf, tmp_buf, size)) ||
+ put_user(size, iobuf_len))
+ ? -EFAULT : 0;
+ kfree(tmp_buf);
+ return error;
+ default:
+ break;
}
if (compat) {
@@ -282,166 +276,167 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg, int compat)
if (get_user(number, &sioc->number))
return -EFAULT;
}
- if (!(dev = try_then_request_module(atm_dev_lookup(number),
- "atm-device-%d", number)))
+
+ dev = try_then_request_module(atm_dev_lookup(number), "atm-device-%d",
+ number);
+ if (!dev)
return -ENODEV;
switch (cmd) {
- case ATM_GETTYPE:
- size = strlen(dev->type) + 1;
- if (copy_to_user(buf, dev->type, size)) {
- error = -EFAULT;
- goto done;
- }
- break;
- case ATM_GETESI:
- size = ESI_LEN;
- if (copy_to_user(buf, dev->esi, size)) {
- error = -EFAULT;
- goto done;
- }
- break;
- case ATM_SETESI:
- {
- int i;
-
- for (i = 0; i < ESI_LEN; i++)
- if (dev->esi[i]) {
- error = -EEXIST;
- goto done;
- }
- }
- /* fall through */
- case ATM_SETESIF:
- {
- unsigned char esi[ESI_LEN];
-
- if (!capable(CAP_NET_ADMIN)) {
- error = -EPERM;
- goto done;
- }
- if (copy_from_user(esi, buf, ESI_LEN)) {
- error = -EFAULT;
- goto done;
- }
- memcpy(dev->esi, esi, ESI_LEN);
- error = ESI_LEN;
- goto done;
- }
- case ATM_GETSTATZ:
- if (!capable(CAP_NET_ADMIN)) {
- error = -EPERM;
- goto done;
- }
- /* fall through */
- case ATM_GETSTAT:
- size = sizeof(struct atm_dev_stats);
- error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ);
- if (error)
- goto done;
- break;
- case ATM_GETCIRANGE:
- size = sizeof(struct atm_cirange);
- if (copy_to_user(buf, &dev->ci_range, size)) {
- error = -EFAULT;
- goto done;
- }
- break;
- case ATM_GETLINKRATE:
- size = sizeof(int);
- if (copy_to_user(buf, &dev->link_rate, size)) {
- error = -EFAULT;
- goto done;
- }
- break;
- case ATM_RSTADDR:
- if (!capable(CAP_NET_ADMIN)) {
- error = -EPERM;
- goto done;
- }
- atm_reset_addr(dev, ATM_ADDR_LOCAL);
- break;
- case ATM_ADDADDR:
- case ATM_DELADDR:
- case ATM_ADDLECSADDR:
- case ATM_DELLECSADDR:
- if (!capable(CAP_NET_ADMIN)) {
- error = -EPERM;
- goto done;
- }
- {
- struct sockaddr_atmsvc addr;
-
- if (copy_from_user(&addr, buf, sizeof(addr))) {
- error = -EFAULT;
- goto done;
- }
- if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR)
- error = atm_add_addr(dev, &addr,
- (cmd == ATM_ADDADDR ?
- ATM_ADDR_LOCAL : ATM_ADDR_LECS));
- else
- error = atm_del_addr(dev, &addr,
- (cmd == ATM_DELADDR ?
- ATM_ADDR_LOCAL : ATM_ADDR_LECS));
+ case ATM_GETTYPE:
+ size = strlen(dev->type) + 1;
+ if (copy_to_user(buf, dev->type, size)) {
+ error = -EFAULT;
+ goto done;
+ }
+ break;
+ case ATM_GETESI:
+ size = ESI_LEN;
+ if (copy_to_user(buf, dev->esi, size)) {
+ error = -EFAULT;
+ goto done;
+ }
+ break;
+ case ATM_SETESI:
+ {
+ int i;
+
+ for (i = 0; i < ESI_LEN; i++)
+ if (dev->esi[i]) {
+ error = -EEXIST;
goto done;
}
- case ATM_GETADDR:
- case ATM_GETLECSADDR:
- error = atm_get_addr(dev, buf, len,
- (cmd == ATM_GETADDR ?
+ }
+ /* fall through */
+ case ATM_SETESIF:
+ {
+ unsigned char esi[ESI_LEN];
+
+ if (!capable(CAP_NET_ADMIN)) {
+ error = -EPERM;
+ goto done;
+ }
+ if (copy_from_user(esi, buf, ESI_LEN)) {
+ error = -EFAULT;
+ goto done;
+ }
+ memcpy(dev->esi, esi, ESI_LEN);
+ error = ESI_LEN;
+ goto done;
+ }
+ case ATM_GETSTATZ:
+ if (!capable(CAP_NET_ADMIN)) {
+ error = -EPERM;
+ goto done;
+ }
+ /* fall through */
+ case ATM_GETSTAT:
+ size = sizeof(struct atm_dev_stats);
+ error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ);
+ if (error)
+ goto done;
+ break;
+ case ATM_GETCIRANGE:
+ size = sizeof(struct atm_cirange);
+ if (copy_to_user(buf, &dev->ci_range, size)) {
+ error = -EFAULT;
+ goto done;
+ }
+ break;
+ case ATM_GETLINKRATE:
+ size = sizeof(int);
+ if (copy_to_user(buf, &dev->link_rate, size)) {
+ error = -EFAULT;
+ goto done;
+ }
+ break;
+ case ATM_RSTADDR:
+ if (!capable(CAP_NET_ADMIN)) {
+ error = -EPERM;
+ goto done;
+ }
+ atm_reset_addr(dev, ATM_ADDR_LOCAL);
+ break;
+ case ATM_ADDADDR:
+ case ATM_DELADDR:
+ case ATM_ADDLECSADDR:
+ case ATM_DELLECSADDR:
+ {
+ struct sockaddr_atmsvc addr;
+
+ if (!capable(CAP_NET_ADMIN)) {
+ error = -EPERM;
+ goto done;
+ }
+
+ if (copy_from_user(&addr, buf, sizeof(addr))) {
+ error = -EFAULT;
+ goto done;
+ }
+ if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR)
+ error = atm_add_addr(dev, &addr,
+ (cmd == ATM_ADDADDR ?
ATM_ADDR_LOCAL : ATM_ADDR_LECS));
- if (error < 0)
- goto done;
- size = error;
- /* may return 0, but later on size == 0 means "don't
- write the length" */
- error = put_user(size, sioc_len)
- ? -EFAULT : 0;
+ else
+ error = atm_del_addr(dev, &addr,
+ (cmd == ATM_DELADDR ?
+ ATM_ADDR_LOCAL : ATM_ADDR_LECS));
+ goto done;
+ }
+ case ATM_GETADDR:
+ case ATM_GETLECSADDR:
+ error = atm_get_addr(dev, buf, len,
+ (cmd == ATM_GETADDR ?
+ ATM_ADDR_LOCAL : ATM_ADDR_LECS));
+ if (error < 0)
+ goto done;
+ size = error;
+ /* may return 0, but later on size == 0 means "don't
+ write the length" */
+ error = put_user(size, sioc_len) ? -EFAULT : 0;
+ goto done;
+ case ATM_SETLOOP:
+ if (__ATM_LM_XTRMT((int) (unsigned long) buf) &&
+ __ATM_LM_XTLOC((int) (unsigned long) buf) >
+ __ATM_LM_XTRMT((int) (unsigned long) buf)) {
+ error = -EINVAL;
+ goto done;
+ }
+ /* fall through */
+ case ATM_SETCIRANGE:
+ case SONET_GETSTATZ:
+ case SONET_SETDIAG:
+ case SONET_CLRDIAG:
+ case SONET_SETFRAMING:
+ if (!capable(CAP_NET_ADMIN)) {
+ error = -EPERM;
goto done;
- case ATM_SETLOOP:
- if (__ATM_LM_XTRMT((int) (unsigned long) buf) &&
- __ATM_LM_XTLOC((int) (unsigned long) buf) >
- __ATM_LM_XTRMT((int) (unsigned long) buf)) {
+ }
+ /* fall through */
+ default:
+ if (compat) {
+#ifdef CONFIG_COMPAT
+ if (!dev->ops->compat_ioctl) {
error = -EINVAL;
goto done;
}
- /* fall through */
- case ATM_SETCIRANGE:
- case SONET_GETSTATZ:
- case SONET_SETDIAG:
- case SONET_CLRDIAG:
- case SONET_SETFRAMING:
- if (!capable(CAP_NET_ADMIN)) {
- error = -EPERM;
- goto done;
- }
- /* fall through */
- default:
- if (compat) {
-#ifdef CONFIG_COMPAT
- if (!dev->ops->compat_ioctl) {
- error = -EINVAL;
- goto done;
- }
- size = dev->ops->compat_ioctl(dev, cmd, buf);
+ size = dev->ops->compat_ioctl(dev, cmd, buf);
#endif
- } else {
- if (!dev->ops->ioctl) {
- error = -EINVAL;
- goto done;
- }
- size = dev->ops->ioctl(dev, cmd, buf);
- }
- if (size < 0) {
- error = (size == -ENOIOCTLCMD ? -EINVAL : size);
+ } else {
+ if (!dev->ops->ioctl) {
+ error = -EINVAL;
goto done;
}
+ size = dev->ops->ioctl(dev, cmd, buf);
+ }
+ if (size < 0) {
+ error = (size == -ENOIOCTLCMD ? -EINVAL : size);
+ goto done;
+ }
}
if (size)
- error = put_user(size, sioc_len)
- ? -EFAULT : 0;
+ error = put_user(size, sioc_len) ? -EFAULT : 0;
else
error = 0;
done:
@@ -449,21 +444,10 @@ done:
return error;
}
-static __inline__ void *dev_get_idx(loff_t left)
-{
- struct list_head *p;
-
- list_for_each(p, &atm_devs) {
- if (!--left)
- break;
- }
- return (p != &atm_devs) ? p : NULL;
-}
-
void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
{
mutex_lock(&atm_dev_mutex);
- return *pos ? dev_get_idx(*pos) : SEQ_START_TOKEN;
+ return seq_list_start_head(&atm_devs, *pos);
}
void atm_dev_seq_stop(struct seq_file *seq, void *v)
@@ -473,13 +457,5 @@ void atm_dev_seq_stop(struct seq_file *seq, void *v)
void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- ++*pos;
- v = (v == SEQ_START_TOKEN)
- ? atm_devs.next : ((struct list_head *)v)->next;
- return (v == &atm_devs) ? NULL : v;
+ return seq_list_next(v, &atm_devs, pos);
}
-
-
-EXPORT_SYMBOL(atm_dev_register);
-EXPORT_SYMBOL(atm_dev_deregister);
-EXPORT_SYMBOL(atm_dev_lookup);
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 22992140052..ad1d28ae512 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -2,6 +2,7 @@
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/errno.h> /* error codes */
#include <linux/kernel.h> /* printk */
@@ -17,7 +18,6 @@
#include "resources.h"
#include "signaling.h"
-
#undef WAIT_FOR_DEMON /* #define this if system calls on SVC sockets
should block until the demon runs.
Danger: may cause nasty hangs if the demon
@@ -28,60 +28,59 @@ struct atm_vcc *sigd = NULL;
static DECLARE_WAIT_QUEUE_HEAD(sigd_sleep);
#endif
-
static void sigd_put_skb(struct sk_buff *skb)
{
#ifdef WAIT_FOR_DEMON
- DECLARE_WAITQUEUE(wait,current);
+ DECLARE_WAITQUEUE(wait, current);
- add_wait_queue(&sigd_sleep,&wait);
+ add_wait_queue(&sigd_sleep, &wait);
while (!sigd) {
set_current_state(TASK_UNINTERRUPTIBLE);
- pr_debug("atmsvc: waiting for signaling demon...\n");
+ pr_debug("atmsvc: waiting for signaling daemon...\n");
schedule();
}
current->state = TASK_RUNNING;
- remove_wait_queue(&sigd_sleep,&wait);
+ remove_wait_queue(&sigd_sleep, &wait);
#else
if (!sigd) {
- pr_debug("atmsvc: no signaling demon\n");
+ pr_debug("atmsvc: no signaling daemon\n");
kfree_skb(skb);
return;
}
#endif
- atm_force_charge(sigd,skb->truesize);
- skb_queue_tail(&sk_atm(sigd)->sk_receive_queue,skb);
+ atm_force_charge(sigd, skb->truesize);
+ skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len);
}
-
-static void modify_qos(struct atm_vcc *vcc,struct atmsvc_msg *msg)
+static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
{
struct sk_buff *skb;
- if (test_bit(ATM_VF_RELEASED,&vcc->flags) ||
- !test_bit(ATM_VF_READY,&vcc->flags))
+ if (test_bit(ATM_VF_RELEASED, &vcc->flags) ||
+ !test_bit(ATM_VF_READY, &vcc->flags))
return;
msg->type = as_error;
- if (!vcc->dev->ops->change_qos) msg->reply = -EOPNOTSUPP;
+ if (!vcc->dev->ops->change_qos)
+ msg->reply = -EOPNOTSUPP;
else {
/* should lock VCC */
- msg->reply = vcc->dev->ops->change_qos(vcc,&msg->qos,
- msg->reply);
- if (!msg->reply) msg->type = as_okay;
+ msg->reply = vcc->dev->ops->change_qos(vcc, &msg->qos,
+ msg->reply);
+ if (!msg->reply)
+ msg->type = as_okay;
}
/*
* Should probably just turn around the old skb. But the, the buffer
* space accounting needs to follow the change too. Maybe later.
*/
- while (!(skb = alloc_skb(sizeof(struct atmsvc_msg),GFP_KERNEL)))
+ while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
schedule();
- *(struct atmsvc_msg *) skb_put(skb,sizeof(struct atmsvc_msg)) = *msg;
+ *(struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg)) = *msg;
sigd_put_skb(skb);
}
-
-static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb)
+static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct atmsvc_msg *msg;
struct atm_vcc *session_vcc;
@@ -90,69 +89,68 @@ static int sigd_send(struct atm_vcc *vcc,struct sk_buff *skb)
msg = (struct atmsvc_msg *) skb->data;
atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
vcc = *(struct atm_vcc **) &msg->vcc;
- pr_debug("sigd_send %d (0x%lx)\n",(int) msg->type,
- (unsigned long) vcc);
+ pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
sk = sk_atm(vcc);
switch (msg->type) {
- case as_okay:
- sk->sk_err = -msg->reply;
- clear_bit(ATM_VF_WAITING, &vcc->flags);
- if (!*vcc->local.sas_addr.prv &&
- !*vcc->local.sas_addr.pub) {
- vcc->local.sas_family = AF_ATMSVC;
- memcpy(vcc->local.sas_addr.prv,
- msg->local.sas_addr.prv,ATM_ESA_LEN);
- memcpy(vcc->local.sas_addr.pub,
- msg->local.sas_addr.pub,ATM_E164_LEN+1);
- }
- session_vcc = vcc->session ? vcc->session : vcc;
- if (session_vcc->vpi || session_vcc->vci) break;
- session_vcc->itf = msg->pvc.sap_addr.itf;
- session_vcc->vpi = msg->pvc.sap_addr.vpi;
- session_vcc->vci = msg->pvc.sap_addr.vci;
- if (session_vcc->vpi || session_vcc->vci)
- session_vcc->qos = msg->qos;
- break;
- case as_error:
- clear_bit(ATM_VF_REGIS,&vcc->flags);
- clear_bit(ATM_VF_READY,&vcc->flags);
- sk->sk_err = -msg->reply;
- clear_bit(ATM_VF_WAITING, &vcc->flags);
+ case as_okay:
+ sk->sk_err = -msg->reply;
+ clear_bit(ATM_VF_WAITING, &vcc->flags);
+ if (!*vcc->local.sas_addr.prv && !*vcc->local.sas_addr.pub) {
+ vcc->local.sas_family = AF_ATMSVC;
+ memcpy(vcc->local.sas_addr.prv,
+ msg->local.sas_addr.prv, ATM_ESA_LEN);
+ memcpy(vcc->local.sas_addr.pub,
+ msg->local.sas_addr.pub, ATM_E164_LEN + 1);
+ }
+ session_vcc = vcc->session ? vcc->session : vcc;
+ if (session_vcc->vpi || session_vcc->vci)
break;
- case as_indicate:
- vcc = *(struct atm_vcc **) &msg->listen_vcc;
- sk = sk_atm(vcc);
- pr_debug("as_indicate!!!\n");
- lock_sock(sk);
- if (sk_acceptq_is_full(sk)) {
- sigd_enq(NULL,as_reject,vcc,NULL,NULL);
- dev_kfree_skb(skb);
- goto as_indicate_complete;
- }
- sk->sk_ack_backlog++;
- skb_queue_tail(&sk->sk_receive_queue, skb);
- pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep);
- sk->sk_state_change(sk);
+ session_vcc->itf = msg->pvc.sap_addr.itf;
+ session_vcc->vpi = msg->pvc.sap_addr.vpi;
+ session_vcc->vci = msg->pvc.sap_addr.vci;
+ if (session_vcc->vpi || session_vcc->vci)
+ session_vcc->qos = msg->qos;
+ break;
+ case as_error:
+ clear_bit(ATM_VF_REGIS, &vcc->flags);
+ clear_bit(ATM_VF_READY, &vcc->flags);
+ sk->sk_err = -msg->reply;
+ clear_bit(ATM_VF_WAITING, &vcc->flags);
+ break;
+ case as_indicate:
+ vcc = *(struct atm_vcc **)&msg->listen_vcc;
+ sk = sk_atm(vcc);
+ pr_debug("as_indicate!!!\n");
+ lock_sock(sk);
+ if (sk_acceptq_is_full(sk)) {
+ sigd_enq(NULL, as_reject, vcc, NULL, NULL);
+ dev_kfree_skb(skb);
+ goto as_indicate_complete;
+ }
+ sk->sk_ack_backlog++;
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep);
+ sk->sk_state_change(sk);
as_indicate_complete:
- release_sock(sk);
- return 0;
- case as_close:
- set_bit(ATM_VF_RELEASED,&vcc->flags);
- vcc_release_async(vcc, msg->reply);
- goto out;
- case as_modify:
- modify_qos(vcc,msg);
- break;
- case as_addparty:
- case as_dropparty:
- sk->sk_err_soft = msg->reply; /* < 0 failure, otherwise ep_ref */
- clear_bit(ATM_VF_WAITING, &vcc->flags);
- break;
- default:
- printk(KERN_ALERT "sigd_send: bad message type %d\n",
- (int) msg->type);
- return -EINVAL;
+ release_sock(sk);
+ return 0;
+ case as_close:
+ set_bit(ATM_VF_RELEASED, &vcc->flags);
+ vcc_release_async(vcc, msg->reply);
+ goto out;
+ case as_modify:
+ modify_qos(vcc, msg);
+ break;
+ case as_addparty:
+ case as_dropparty:
+ sk->sk_err_soft = msg->reply;
+ /* < 0 failure, otherwise ep_ref */
+ clear_bit(ATM_VF_WAITING, &vcc->flags);
+ break;
+ default:
+ pr_alert("bad message type %d\n", (int)msg->type);
+ return -EINVAL;
}
sk->sk_state_change(sk);
out:
@@ -160,48 +158,52 @@ out:
return 0;
}
-
-void sigd_enq2(struct atm_vcc *vcc,enum atmsvc_msg_type type,
- struct atm_vcc *listen_vcc,const struct sockaddr_atmpvc *pvc,
- const struct sockaddr_atmsvc *svc,const struct atm_qos *qos,int reply)
+void sigd_enq2(struct atm_vcc *vcc, enum atmsvc_msg_type type,
+ struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc,
+ const struct sockaddr_atmsvc *svc, const struct atm_qos *qos,
+ int reply)
{
struct sk_buff *skb;
struct atmsvc_msg *msg;
static unsigned session = 0;
- pr_debug("sigd_enq %d (0x%p)\n",(int) type,vcc);
- while (!(skb = alloc_skb(sizeof(struct atmsvc_msg),GFP_KERNEL)))
+ pr_debug("%d (0x%p)\n", (int)type, vcc);
+ while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
schedule();
- msg = (struct atmsvc_msg *) skb_put(skb,sizeof(struct atmsvc_msg));
- memset(msg,0,sizeof(*msg));
+ msg = (struct atmsvc_msg *)skb_put(skb, sizeof(struct atmsvc_msg));
+ memset(msg, 0, sizeof(*msg));
msg->type = type;
*(struct atm_vcc **) &msg->vcc = vcc;
*(struct atm_vcc **) &msg->listen_vcc = listen_vcc;
msg->reply = reply;
- if (qos) msg->qos = *qos;
- if (vcc) msg->sap = vcc->sap;
- if (svc) msg->svc = *svc;
- if (vcc) msg->local = vcc->local;
- if (pvc) msg->pvc = *pvc;
+ if (qos)
+ msg->qos = *qos;
+ if (vcc)
+ msg->sap = vcc->sap;
+ if (svc)
+ msg->svc = *svc;
+ if (vcc)
+ msg->local = vcc->local;
+ if (pvc)
+ msg->pvc = *pvc;
if (vcc) {
if (type == as_connect && test_bit(ATM_VF_SESSION, &vcc->flags))
msg->session = ++session;
/* every new pmp connect gets the next session number */
}
sigd_put_skb(skb);
- if (vcc) set_bit(ATM_VF_REGIS,&vcc->flags);
+ if (vcc)
+ set_bit(ATM_VF_REGIS, &vcc->flags);
}
-
-void sigd_enq(struct atm_vcc *vcc,enum atmsvc_msg_type type,
- struct atm_vcc *listen_vcc,const struct sockaddr_atmpvc *pvc,
- const struct sockaddr_atmsvc *svc)
+void sigd_enq(struct atm_vcc *vcc, enum atmsvc_msg_type type,
+ struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc,
+ const struct sockaddr_atmsvc *svc)
{
- sigd_enq2(vcc,type,listen_vcc,pvc,svc,vcc ? &vcc->qos : NULL,0);
+ sigd_enq2(vcc, type, listen_vcc, pvc, svc, vcc ? &vcc->qos : NULL, 0);
/* other ISP applications may use "reply" */
}
-
static void purge_vcc(struct atm_vcc *vcc)
{
if (sk_atm(vcc)->sk_family == PF_ATMSVC &&
@@ -212,21 +214,20 @@ static void purge_vcc(struct atm_vcc *vcc)
}
}
-
static void sigd_close(struct atm_vcc *vcc)
{
struct hlist_node *node;
struct sock *s;
int i;
- pr_debug("sigd_close\n");
+ pr_debug("\n");
sigd = NULL;
if (skb_peek(&sk_atm(vcc)->sk_receive_queue))
- printk(KERN_ERR "sigd_close: closing with requests pending\n");
+ pr_err("closing with requests pending\n");
skb_queue_purge(&sk_atm(vcc)->sk_receive_queue);
read_lock(&vcc_sklist_lock);
- for(i = 0; i < VCC_HTABLE_SIZE; ++i) {
+ for (i = 0; i < VCC_HTABLE_SIZE; ++i) {
struct hlist_head *head = &vcc_hash[i];
sk_for_each(s, node, head) {
@@ -238,13 +239,11 @@ static void sigd_close(struct atm_vcc *vcc)
read_unlock(&vcc_sklist_lock);
}
-
static struct atmdev_ops sigd_dev_ops = {
.close = sigd_close,
.send = sigd_send
};
-
static struct atm_dev sigd_dev = {
.ops = &sigd_dev_ops,
.type = "sig",
@@ -252,16 +251,16 @@ static struct atm_dev sigd_dev = {
.lock = __SPIN_LOCK_UNLOCKED(sigd_dev.lock)
};
-
int sigd_attach(struct atm_vcc *vcc)
{
- if (sigd) return -EADDRINUSE;
- pr_debug("sigd_attach\n");
+ if (sigd)
+ return -EADDRINUSE;
+ pr_debug("\n");
sigd = vcc;
vcc->dev = &sigd_dev;
vcc_insert_socket(sk_atm(vcc));
- set_bit(ATM_VF_META,&vcc->flags);
- set_bit(ATM_VF_READY,&vcc->flags);
+ set_bit(ATM_VF_META, &vcc->flags);
+ set_bit(ATM_VF_READY, &vcc->flags);
#ifdef WAIT_FOR_DEMON
wake_up(&sigd_sleep);
#endif
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 66e1d9b3e5d..3ba9a45a51a 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -2,6 +2,7 @@
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/string.h>
#include <linux/net.h> /* struct socket, struct proto_ops */
@@ -18,14 +19,15 @@
#include <linux/atmdev.h>
#include <linux/bitops.h>
#include <net/sock.h> /* for sock_no_* */
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "resources.h"
#include "common.h" /* common for PVCs and SVCs */
#include "signaling.h"
#include "addr.h"
-static int svc_create(struct net *net, struct socket *sock, int protocol, int kern);
+static int svc_create(struct net *net, struct socket *sock, int protocol,
+ int kern);
/*
* Note: since all this is still nicely synchronized with the signaling demon,
@@ -34,25 +36,25 @@ static int svc_create(struct net *net, struct socket *sock, int protocol, int ke
*/
-static int svc_shutdown(struct socket *sock,int how)
+static int svc_shutdown(struct socket *sock, int how)
{
return 0;
}
-
static void svc_disconnect(struct atm_vcc *vcc)
{
DEFINE_WAIT(wait);
struct sk_buff *skb;
struct sock *sk = sk_atm(vcc);
- pr_debug("svc_disconnect %p\n",vcc);
- if (test_bit(ATM_VF_REGIS,&vcc->flags)) {
+ pr_debug("%p\n", vcc);
+ if (test_bit(ATM_VF_REGIS, &vcc->flags)) {
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
- sigd_enq(vcc,as_close,NULL,NULL,NULL);
- while (!test_bit(ATM_VF_RELEASED,&vcc->flags) && sigd) {
+ sigd_enq(vcc, as_close, NULL, NULL, NULL);
+ while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
schedule();
- prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait,
+ TASK_UNINTERRUPTIBLE);
}
finish_wait(sk->sk_sleep, &wait);
}
@@ -61,35 +63,35 @@ static void svc_disconnect(struct atm_vcc *vcc)
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
atm_return(vcc, skb->truesize);
pr_debug("LISTEN REL\n");
- sigd_enq2(NULL,as_reject,vcc,NULL,NULL,&vcc->qos,0);
+ sigd_enq2(NULL, as_reject, vcc, NULL, NULL, &vcc->qos, 0);
dev_kfree_skb(skb);
}
clear_bit(ATM_VF_REGIS, &vcc->flags);
/* ... may retry later */
}
-
static int svc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct atm_vcc *vcc;
- if (sk) {
+ if (sk) {
vcc = ATM_SD(sock);
- pr_debug("svc_release %p\n", vcc);
+ pr_debug("%p\n", vcc);
clear_bit(ATM_VF_READY, &vcc->flags);
- /* VCC pointer is used as a reference, so we must not free it
- (thereby subjecting it to re-use) before all pending connections
- are closed */
+ /*
+ * VCC pointer is used as a reference,
+ * so we must not free it (thereby subjecting it to re-use)
+ * before all pending connections are closed
+ */
svc_disconnect(vcc);
vcc_release(sock);
}
return 0;
}
-
-static int svc_bind(struct socket *sock,struct sockaddr *sockaddr,
- int sockaddr_len)
+static int svc_bind(struct socket *sock, struct sockaddr *sockaddr,
+ int sockaddr_len)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
@@ -114,38 +116,37 @@ static int svc_bind(struct socket *sock,struct sockaddr *sockaddr,
error = -EAFNOSUPPORT;
goto out;
}
- clear_bit(ATM_VF_BOUND,&vcc->flags);
+ clear_bit(ATM_VF_BOUND, &vcc->flags);
/* failing rebind will kill old binding */
/* @@@ check memory (de)allocation on rebind */
- if (!test_bit(ATM_VF_HASQOS,&vcc->flags)) {
+ if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) {
error = -EBADFD;
goto out;
}
vcc->local = *addr;
set_bit(ATM_VF_WAITING, &vcc->flags);
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
- sigd_enq(vcc,as_bind,NULL,NULL,&vcc->local);
+ sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
}
finish_wait(sk->sk_sleep, &wait);
- clear_bit(ATM_VF_REGIS,&vcc->flags); /* doesn't count */
+ clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */
if (!sigd) {
error = -EUNATCH;
goto out;
}
if (!sk->sk_err)
- set_bit(ATM_VF_BOUND,&vcc->flags);
+ set_bit(ATM_VF_BOUND, &vcc->flags);
error = -sk->sk_err;
out:
release_sock(sk);
return error;
}
-
-static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
- int sockaddr_len,int flags)
+static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
+ int sockaddr_len, int flags)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
@@ -153,7 +154,7 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
struct atm_vcc *vcc = ATM_SD(sock);
int error;
- pr_debug("svc_connect %p\n",vcc);
+ pr_debug("%p\n", vcc);
lock_sock(sk);
if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) {
error = -EINVAL;
@@ -201,7 +202,7 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
vcc->remote = *addr;
set_bit(ATM_VF_WAITING, &vcc->flags);
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
- sigd_enq(vcc,as_connect,NULL,NULL,&vcc->remote);
+ sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote);
if (flags & O_NONBLOCK) {
finish_wait(sk->sk_sleep, &wait);
sock->state = SS_CONNECTING;
@@ -212,7 +213,8 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
if (!signal_pending(current)) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait,
+ TASK_INTERRUPTIBLE);
continue;
}
pr_debug("*ABORT*\n");
@@ -228,20 +230,22 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
* Kernel <--okay---- Demon
* Kernel <--close--- Demon
*/
- sigd_enq(vcc,as_close,NULL,NULL,NULL);
+ sigd_enq(vcc, as_close, NULL, NULL, NULL);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait,
+ TASK_INTERRUPTIBLE);
schedule();
}
if (!sk->sk_err)
- while (!test_bit(ATM_VF_RELEASED,&vcc->flags)
- && sigd) {
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ while (!test_bit(ATM_VF_RELEASED, &vcc->flags) &&
+ sigd) {
+ prepare_to_wait(sk->sk_sleep, &wait,
+ TASK_INTERRUPTIBLE);
schedule();
}
- clear_bit(ATM_VF_REGIS,&vcc->flags);
- clear_bit(ATM_VF_RELEASED,&vcc->flags);
- clear_bit(ATM_VF_CLOSE,&vcc->flags);
+ clear_bit(ATM_VF_REGIS, &vcc->flags);
+ clear_bit(ATM_VF_RELEASED, &vcc->flags);
+ clear_bit(ATM_VF_CLOSE, &vcc->flags);
/* we're gone now but may connect later */
error = -EINTR;
break;
@@ -269,37 +273,37 @@ static int svc_connect(struct socket *sock,struct sockaddr *sockaddr,
/*
* #endif
*/
- if (!(error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci)))
+ error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci);
+ if (!error)
sock->state = SS_CONNECTED;
else
- (void) svc_disconnect(vcc);
+ (void)svc_disconnect(vcc);
out:
release_sock(sk);
return error;
}
-
-static int svc_listen(struct socket *sock,int backlog)
+static int svc_listen(struct socket *sock, int backlog)
{
DEFINE_WAIT(wait);
struct sock *sk = sock->sk;
struct atm_vcc *vcc = ATM_SD(sock);
int error;
- pr_debug("svc_listen %p\n",vcc);
+ pr_debug("%p\n", vcc);
lock_sock(sk);
/* let server handle listen on unbound sockets */
- if (test_bit(ATM_VF_SESSION,&vcc->flags)) {
+ if (test_bit(ATM_VF_SESSION, &vcc->flags)) {
error = -EINVAL;
goto out;
}
if (test_bit(ATM_VF_LISTEN, &vcc->flags)) {
error = -EADDRINUSE;
goto out;
- }
+ }
set_bit(ATM_VF_WAITING, &vcc->flags);
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
- sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local);
+ sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local);
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
@@ -309,7 +313,7 @@ static int svc_listen(struct socket *sock,int backlog)
error = -EUNATCH;
goto out;
}
- set_bit(ATM_VF_LISTEN,&vcc->flags);
+ set_bit(ATM_VF_LISTEN, &vcc->flags);
vcc_insert_socket(sk);
sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
error = -sk->sk_err;
@@ -318,8 +322,7 @@ out:
return error;
}
-
-static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
+static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
@@ -336,15 +339,16 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
new_vcc = ATM_SD(newsock);
- pr_debug("svc_accept %p -> %p\n",old_vcc,new_vcc);
+ pr_debug("%p -> %p\n", old_vcc, new_vcc);
while (1) {
DEFINE_WAIT(wait);
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
sigd) {
- if (test_bit(ATM_VF_RELEASED,&old_vcc->flags)) break;
- if (test_bit(ATM_VF_CLOSE,&old_vcc->flags)) {
+ if (test_bit(ATM_VF_RELEASED, &old_vcc->flags))
+ break;
+ if (test_bit(ATM_VF_CLOSE, &old_vcc->flags)) {
error = -sk->sk_err;
break;
}
@@ -359,7 +363,8 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
error = -ERESTARTSYS;
break;
}
- prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_wait(sk->sk_sleep, &wait,
+ TASK_INTERRUPTIBLE);
}
finish_wait(sk->sk_sleep, &wait);
if (error)
@@ -368,31 +373,34 @@ static int svc_accept(struct socket *sock,struct socket *newsock,int flags)
error = -EUNATCH;
goto out;
}
- msg = (struct atmsvc_msg *) skb->data;
+ msg = (struct atmsvc_msg *)skb->data;
new_vcc->qos = msg->qos;
- set_bit(ATM_VF_HASQOS,&new_vcc->flags);
+ set_bit(ATM_VF_HASQOS, &new_vcc->flags);
new_vcc->remote = msg->svc;
new_vcc->local = msg->local;
new_vcc->sap = msg->sap;
error = vcc_connect(newsock, msg->pvc.sap_addr.itf,
- msg->pvc.sap_addr.vpi, msg->pvc.sap_addr.vci);
+ msg->pvc.sap_addr.vpi,
+ msg->pvc.sap_addr.vci);
dev_kfree_skb(skb);
sk->sk_ack_backlog--;
if (error) {
- sigd_enq2(NULL,as_reject,old_vcc,NULL,NULL,
- &old_vcc->qos,error);
+ sigd_enq2(NULL, as_reject, old_vcc, NULL, NULL,
+ &old_vcc->qos, error);
error = error == -EAGAIN ? -EBUSY : error;
goto out;
}
/* wait should be short, so we ignore the non-blocking flag */
set_bit(ATM_VF_WAITING, &new_vcc->flags);
- prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
- sigd_enq(new_vcc,as_accept,old_vcc,NULL,NULL);
+ prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
+ TASK_UNINTERRUPTIBLE);
+ sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL);
while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) {
release_sock(sk);
schedule();
lock_sock(sk);
- prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait,
+ TASK_UNINTERRUPTIBLE);
}
finish_wait(sk_atm(new_vcc)->sk_sleep, &wait);
if (!sigd) {
@@ -412,39 +420,37 @@ out:
return error;
}
-
-static int svc_getname(struct socket *sock,struct sockaddr *sockaddr,
- int *sockaddr_len,int peer)
+static int svc_getname(struct socket *sock, struct sockaddr *sockaddr,
+ int *sockaddr_len, int peer)
{
struct sockaddr_atmsvc *addr;
*sockaddr_len = sizeof(struct sockaddr_atmsvc);
addr = (struct sockaddr_atmsvc *) sockaddr;
- memcpy(addr,peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local,
- sizeof(struct sockaddr_atmsvc));
+ memcpy(addr, peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local,
+ sizeof(struct sockaddr_atmsvc));
return 0;
}
-
-int svc_change_qos(struct atm_vcc *vcc,struct atm_qos *qos)
+int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
{
struct sock *sk = sk_atm(vcc);
DEFINE_WAIT(wait);
set_bit(ATM_VF_WAITING, &vcc->flags);
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
- sigd_enq2(vcc,as_modify,NULL,NULL,&vcc->local,qos,0);
+ sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0);
while (test_bit(ATM_VF_WAITING, &vcc->flags) &&
!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
schedule();
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
}
finish_wait(sk->sk_sleep, &wait);
- if (!sigd) return -EUNATCH;
+ if (!sigd)
+ return -EUNATCH;
return -sk->sk_err;
}
-
static int svc_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
@@ -454,37 +460,35 @@ static int svc_setsockopt(struct socket *sock, int level, int optname,
lock_sock(sk);
switch (optname) {
- case SO_ATMSAP:
- if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) {
- error = -EINVAL;
- goto out;
- }
- if (copy_from_user(&vcc->sap, optval, optlen)) {
- error = -EFAULT;
- goto out;
- }
- set_bit(ATM_VF_HASSAP, &vcc->flags);
- break;
- case SO_MULTIPOINT:
- if (level != SOL_ATM || optlen != sizeof(int)) {
- error = -EINVAL;
- goto out;
- }
- if (get_user(value, (int __user *) optval)) {
- error = -EFAULT;
- goto out;
- }
- if (value == 1) {
- set_bit(ATM_VF_SESSION, &vcc->flags);
- } else if (value == 0) {
- clear_bit(ATM_VF_SESSION, &vcc->flags);
- } else {
- error = -EINVAL;
- }
- break;
- default:
- error = vcc_setsockopt(sock, level, optname,
- optval, optlen);
+ case SO_ATMSAP:
+ if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) {
+ error = -EINVAL;
+ goto out;
+ }
+ if (copy_from_user(&vcc->sap, optval, optlen)) {
+ error = -EFAULT;
+ goto out;
+ }
+ set_bit(ATM_VF_HASSAP, &vcc->flags);
+ break;
+ case SO_MULTIPOINT:
+ if (level != SOL_ATM || optlen != sizeof(int)) {
+ error = -EINVAL;
+ goto out;
+ }
+ if (get_user(value, (int __user *)optval)) {
+ error = -EFAULT;
+ goto out;
+ }
+ if (value == 1)
+ set_bit(ATM_VF_SESSION, &vcc->flags);
+ else if (value == 0)
+ clear_bit(ATM_VF_SESSION, &vcc->flags);
+ else
+ error = -EINVAL;
+ break;
+ default:
+ error = vcc_setsockopt(sock, level, optname, optval, optlen);
}
out:
@@ -492,9 +496,8 @@ out:
return error;
}
-
-static int svc_getsockopt(struct socket *sock,int level,int optname,
- char __user *optval,int __user *optlen)
+static int svc_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
int error = 0, len;
@@ -521,7 +524,6 @@ out:
return error;
}
-
static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
int sockaddr_len, int flags)
{
@@ -540,7 +542,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
error = -EINPROGRESS;
goto out;
}
- pr_debug("svc_addparty added wait queue\n");
+ pr_debug("added wait queue\n");
while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
schedule();
prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
@@ -552,7 +554,6 @@ out:
return error;
}
-
static int svc_dropparty(struct socket *sock, int ep_ref)
{
DEFINE_WAIT(wait);
@@ -579,7 +580,6 @@ out:
return error;
}
-
static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
int error, ep_ref;
@@ -587,29 +587,31 @@ static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct atm_vcc *vcc = ATM_SD(sock);
switch (cmd) {
- case ATM_ADDPARTY:
- if (!test_bit(ATM_VF_SESSION, &vcc->flags))
- return -EINVAL;
- if (copy_from_user(&sa, (void __user *) arg, sizeof(sa)))
- return -EFAULT;
- error = svc_addparty(sock, (struct sockaddr *) &sa, sizeof(sa), 0);
- break;
- case ATM_DROPPARTY:
- if (!test_bit(ATM_VF_SESSION, &vcc->flags))
- return -EINVAL;
- if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int)))
- return -EFAULT;
- error = svc_dropparty(sock, ep_ref);
- break;
- default:
- error = vcc_ioctl(sock, cmd, arg);
+ case ATM_ADDPARTY:
+ if (!test_bit(ATM_VF_SESSION, &vcc->flags))
+ return -EINVAL;
+ if (copy_from_user(&sa, (void __user *) arg, sizeof(sa)))
+ return -EFAULT;
+ error = svc_addparty(sock, (struct sockaddr *)&sa, sizeof(sa),
+ 0);
+ break;
+ case ATM_DROPPARTY:
+ if (!test_bit(ATM_VF_SESSION, &vcc->flags))
+ return -EINVAL;
+ if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int)))
+ return -EFAULT;
+ error = svc_dropparty(sock, ep_ref);
+ break;
+ default:
+ error = vcc_ioctl(sock, cmd, arg);
}
return error;
}
#ifdef CONFIG_COMPAT
-static int svc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int svc_compat_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
{
/* The definition of ATM_ADDPARTY uses the size of struct atm_iobuf.
But actually it takes a struct sockaddr_atmsvc, which doesn't need
@@ -660,13 +662,13 @@ static int svc_create(struct net *net, struct socket *sock, int protocol,
sock->ops = &svc_proto_ops;
error = vcc_create(net, sock, protocol, AF_ATMSVC);
- if (error) return error;
+ if (error)
+ return error;
ATM_SD(sock)->local.sas_family = AF_ATMSVC;
ATM_SD(sock)->remote.sas_family = AF_ATMSVC;
return 0;
}
-
static const struct net_proto_family svc_family_ops = {
.family = PF_ATMSVC,
.create = svc_create,
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 5588ba69c46..a5beedf43e2 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1863,25 +1863,13 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
static void *ax25_info_start(struct seq_file *seq, loff_t *pos)
__acquires(ax25_list_lock)
{
- struct ax25_cb *ax25;
- struct hlist_node *node;
- int i = 0;
-
spin_lock_bh(&ax25_list_lock);
- ax25_for_each(ax25, node, &ax25_list) {
- if (i == *pos)
- return ax25;
- ++i;
- }
- return NULL;
+ return seq_hlist_start(&ax25_list, *pos);
}
static void *ax25_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
- ++*pos;
-
- return hlist_entry( ((struct ax25_cb *)v)->ax25_node.next,
- struct ax25_cb, ax25_node);
+ return seq_hlist_next(v, &ax25_list, pos);
}
static void ax25_info_stop(struct seq_file *seq, void *v)
@@ -1892,7 +1880,7 @@ static void ax25_info_stop(struct seq_file *seq, void *v)
static int ax25_info_show(struct seq_file *seq, void *v)
{
- ax25_cb *ax25 = v;
+ ax25_cb *ax25 = hlist_entry(v, struct ax25_cb, ax25_node);
char buf[11];
int k;
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 832bcf092a0..9f13f6eefcb 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -146,31 +146,13 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(ax25_uid_lock)
{
- struct ax25_uid_assoc *pt;
- struct hlist_node *node;
- int i = 1;
-
read_lock(&ax25_uid_lock);
-
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- ax25_uid_for_each(pt, node, &ax25_uid_list) {
- if (i == *pos)
- return pt;
- ++i;
- }
- return NULL;
+ return seq_hlist_start_head(&ax25_uid_list, *pos);
}
static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- ++*pos;
- if (v == SEQ_START_TOKEN)
- return ax25_uid_list.first;
- else
- return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next,
- ax25_uid_assoc, uid_node);
+ return seq_hlist_next(v, &ax25_uid_list, pos);
}
static void ax25_uid_seq_stop(struct seq_file *seq, void *v)
@@ -186,8 +168,9 @@ static int ax25_uid_seq_show(struct seq_file *seq, void *v)
if (v == SEQ_START_TOKEN)
seq_printf(seq, "Policy: %d\n", ax25_uid_policy);
else {
- struct ax25_uid_assoc *pt = v;
+ struct ax25_uid_assoc *pt;
+ pt = hlist_entry(v, struct ax25_uid_assoc, uid_node);
seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call));
}
return 0;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 26fb831ef7e..b6234b73c4c 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -64,7 +64,7 @@ static void bnep_net_set_mc_list(struct net_device *dev)
struct sk_buff *skb;
int size;
- BT_DBG("%s mc_count %d", dev->name, dev->mc_count);
+ BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev));
size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2;
skb = alloc_skb(size, GFP_ATOMIC);
@@ -97,7 +97,9 @@ static void bnep_net_set_mc_list(struct net_device *dev)
/* FIXME: We should group addresses here. */
- for (i = 0; i < dev->mc_count && i < BNEP_MAX_MULTICAST_FILTERS; i++) {
+ for (i = 0;
+ i < netdev_mc_count(dev) && i < BNEP_MAX_MULTICAST_FILTERS;
+ i++) {
memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
dmi = dmi->next;
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 97f8d68d574..3487cfe74ae 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -21,7 +21,8 @@
*/
#include <linux/module.h>
-
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -516,33 +517,37 @@ static char *cmtp_procinfo(struct capi_ctr *ctrl)
return "CAPI Message Transport Protocol";
}
-static int cmtp_ctr_read_proc(char *page, char **start, off_t off, int count, int *eof, struct capi_ctr *ctrl)
+static int cmtp_proc_show(struct seq_file *m, void *v)
{
+ struct capi_ctr *ctrl = m->private;
struct cmtp_session *session = ctrl->driverdata;
struct cmtp_application *app;
struct list_head *p, *n;
- int len = 0;
- len += sprintf(page + len, "%s\n\n", cmtp_procinfo(ctrl));
- len += sprintf(page + len, "addr %s\n", session->name);
- len += sprintf(page + len, "ctrl %d\n", session->num);
+ seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl));
+ seq_printf(m, "addr %s\n", session->name);
+ seq_printf(m, "ctrl %d\n", session->num);
list_for_each_safe(p, n, &session->applications) {
app = list_entry(p, struct cmtp_application, list);
- len += sprintf(page + len, "appl %d -> %d\n", app->appl, app->mapping);
+ seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping);
}
- if (off + count >= len)
- *eof = 1;
-
- if (len < off)
- return 0;
-
- *start = page + off;
+ return 0;
+}
- return ((count < len - off) ? count : len - off);
+static int cmtp_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cmtp_proc_show, PDE(inode)->data);
}
+static const struct file_operations cmtp_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = cmtp_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
int cmtp_attach_device(struct cmtp_session *session)
{
@@ -582,7 +587,7 @@ int cmtp_attach_device(struct cmtp_session *session)
session->ctrl.send_message = cmtp_send_message;
session->ctrl.procinfo = cmtp_procinfo;
- session->ctrl.ctr_read_proc = cmtp_ctr_read_proc;
+ session->ctrl.proc_fops = &cmtp_proc_fops;
if (attach_capi_ctr(&session->ctrl) < 0) {
BT_ERR("Can't attach new controller");
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b7c4224f4e7..b10e3cdb08f 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -377,6 +377,9 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
if (acl->state == BT_CONNECTED &&
(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
+ acl->power_save = 1;
+ hci_conn_enter_active_mode(acl);
+
if (lmp_esco_capable(hdev))
hci_setup_sync(sco, acl->handle);
else
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 94ba3498202..4ad23192c7a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -491,6 +491,10 @@ int hci_dev_open(__u16 dev)
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
set_bit(HCI_RAW, &hdev->flags);
+ /* Treat all non BR/EDR controllers as raw devices for now */
+ if (hdev->dev_type != HCI_BREDR)
+ set_bit(HCI_RAW, &hdev->flags);
+
if (hdev->open(hdev)) {
ret = -EIO;
goto done;
@@ -797,7 +801,7 @@ int hci_get_dev_info(void __user *arg)
strcpy(di.name, hdev->name);
di.bdaddr = hdev->bdaddr;
- di.type = hdev->type;
+ di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
di.flags = hdev->flags;
di.pkt_type = hdev->pkt_type;
di.acl_mtu = hdev->acl_mtu;
@@ -869,8 +873,8 @@ int hci_register_dev(struct hci_dev *hdev)
struct list_head *head = &hci_dev_list, *p;
int i, id = 0;
- BT_DBG("%p name %s type %d owner %p", hdev, hdev->name,
- hdev->type, hdev->owner);
+ BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
+ hdev->bus, hdev->owner);
if (!hdev->open || !hdev->close || !hdev->destruct)
return -EINVAL;
@@ -946,7 +950,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
{
int i;
- BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
+ BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
write_lock_bh(&hci_dev_list_lock);
list_del(&hdev->list);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 28517bad796..6c57fc71c7e 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1698,7 +1698,9 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
hci_conn_add_sysfs(conn);
break;
+ case 0x11: /* Unsupported Feature or Parameter Value */
case 0x1c: /* SCO interval rejected */
+ case 0x1a: /* Unsupported Remote Feature */
case 0x1f: /* Unspecified error */
if (conn->out && conn->attempt < 2) {
conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 688cfebfbee..38f08f6b86f 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -329,6 +329,9 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
}
if (mask & HCI_CMSG_TSTAMP) {
+#ifdef CONFIG_COMPAT
+ struct compat_timeval ctv;
+#endif
struct timeval tv;
void *data;
int len;
@@ -339,7 +342,6 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
len = sizeof(tv);
#ifdef CONFIG_COMPAT
if (msg->msg_flags & MSG_CMSG_COMPAT) {
- struct compat_timeval ctv;
ctv.tv_sec = tv.tv_sec;
ctv.tv_usec = tv.tv_usec;
data = &ctv;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 2bc6f6a8de6..1a79a6c7e30 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -2,6 +2,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/debugfs.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -9,6 +10,9 @@
struct class *bt_class = NULL;
EXPORT_SYMBOL_GPL(bt_class);
+struct dentry *bt_debugfs = NULL;
+EXPORT_SYMBOL_GPL(bt_debugfs);
+
static struct workqueue_struct *bt_workq;
static inline char *link_typetostr(int type)
@@ -166,9 +170,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
queue_work(bt_workq, &conn->work_del);
}
-static inline char *host_typetostr(int type)
+static inline char *host_bustostr(int bus)
{
- switch (type) {
+ switch (bus) {
case HCI_VIRTUAL:
return "VIRTUAL";
case HCI_USB:
@@ -188,10 +192,28 @@ static inline char *host_typetostr(int type)
}
}
+static inline char *host_typetostr(int type)
+{
+ switch (type) {
+ case HCI_BREDR:
+ return "BR/EDR";
+ case HCI_80211:
+ return "802.11";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hci_dev *hdev = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
+}
+
static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", host_typetostr(hdev->type));
+ return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
}
static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
@@ -251,32 +273,6 @@ static ssize_t show_hci_revision(struct device *dev, struct device_attribute *at
return sprintf(buf, "%d\n", hdev->hci_rev);
}
-static ssize_t show_inquiry_cache(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct hci_dev *hdev = dev_get_drvdata(dev);
- struct inquiry_cache *cache = &hdev->inq_cache;
- struct inquiry_entry *e;
- int n = 0;
-
- hci_dev_lock_bh(hdev);
-
- for (e = cache->list; e; e = e->next) {
- struct inquiry_data *data = &e->data;
- bdaddr_t bdaddr;
- baswap(&bdaddr, &data->bdaddr);
- n += sprintf(buf + n, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
- batostr(&bdaddr),
- data->pscan_rep_mode, data->pscan_period_mode,
- data->pscan_mode, data->dev_class[2],
- data->dev_class[1], data->dev_class[0],
- __le16_to_cpu(data->clock_offset),
- data->rssi, data->ssp_mode, e->timestamp);
- }
-
- hci_dev_unlock_bh(hdev);
- return n;
-}
-
static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
@@ -355,6 +351,7 @@ static ssize_t store_sniff_min_interval(struct device *dev, struct device_attrib
return count;
}
+static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL);
static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static DEVICE_ATTR(class, S_IRUGO, show_class, NULL);
@@ -363,7 +360,6 @@ static DEVICE_ATTR(features, S_IRUGO, show_features, NULL);
static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL);
static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
-static DEVICE_ATTR(inquiry_cache, S_IRUGO, show_inquiry_cache, NULL);
static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
show_idle_timeout, store_idle_timeout);
@@ -373,6 +369,7 @@ static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
show_sniff_min_interval, store_sniff_min_interval);
static struct attribute *bt_host_attrs[] = {
+ &dev_attr_bus.attr,
&dev_attr_type.attr,
&dev_attr_name.attr,
&dev_attr_class.attr,
@@ -381,7 +378,6 @@ static struct attribute *bt_host_attrs[] = {
&dev_attr_manufacturer.attr,
&dev_attr_hci_version.attr,
&dev_attr_hci_revision.attr,
- &dev_attr_inquiry_cache.attr,
&dev_attr_idle_timeout.attr,
&dev_attr_sniff_max_interval.attr,
&dev_attr_sniff_min_interval.attr,
@@ -409,12 +405,52 @@ static struct device_type bt_host = {
.release = bt_host_release,
};
+static int inquiry_cache_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t inquiry_cache_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct hci_dev *hdev = file->private_data;
+ struct inquiry_cache *cache = &hdev->inq_cache;
+ struct inquiry_entry *e;
+ char buf[4096];
+ int n = 0;
+
+ hci_dev_lock_bh(hdev);
+
+ for (e = cache->list; e; e = e->next) {
+ struct inquiry_data *data = &e->data;
+ bdaddr_t bdaddr;
+ baswap(&bdaddr, &data->bdaddr);
+ n += sprintf(buf + n, "%s %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
+ batostr(&bdaddr),
+ data->pscan_rep_mode, data->pscan_period_mode,
+ data->pscan_mode, data->dev_class[2],
+ data->dev_class[1], data->dev_class[0],
+ __le16_to_cpu(data->clock_offset),
+ data->rssi, data->ssp_mode, e->timestamp);
+ }
+
+ hci_dev_unlock_bh(hdev);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, n);
+}
+
+static const struct file_operations inquiry_cache_fops = {
+ .open = inquiry_cache_open,
+ .read = inquiry_cache_read,
+};
+
int hci_register_sysfs(struct hci_dev *hdev)
{
struct device *dev = &hdev->dev;
int err;
- BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
+ BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
dev->type = &bt_host;
dev->class = bt_class;
@@ -428,12 +464,24 @@ int hci_register_sysfs(struct hci_dev *hdev)
if (err < 0)
return err;
+ if (!bt_debugfs)
+ return 0;
+
+ hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
+ if (!hdev->debugfs)
+ return 0;
+
+ debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
+ hdev, &inquiry_cache_fops);
+
return 0;
}
void hci_unregister_sysfs(struct hci_dev *hdev)
{
- BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
+ BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+
+ debugfs_remove_recursive(hdev->debugfs);
device_del(&hdev->dev);
}
@@ -444,6 +492,8 @@ int __init bt_sysfs_init(void)
if (!bt_workq)
return -ENOMEM;
+ bt_debugfs = debugfs_create_dir("bluetooth", NULL);
+
bt_class = class_create(THIS_MODULE, "bluetooth");
if (IS_ERR(bt_class)) {
destroy_workqueue(bt_workq);
@@ -455,7 +505,9 @@ int __init bt_sysfs_init(void)
void bt_sysfs_cleanup(void)
{
- destroy_workqueue(bt_workq);
-
class_destroy(bt_class);
+
+ debugfs_remove_recursive(bt_debugfs);
+
+ destroy_workqueue(bt_workq);
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 18e7f5a43dc..280529ad927 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -243,6 +243,39 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
input_sync(dev);
}
+static int __hidp_send_ctrl_message(struct hidp_session *session,
+ unsigned char hdr, unsigned char *data, int size)
+{
+ struct sk_buff *skb;
+
+ BT_DBG("session %p data %p size %d", session, data, size);
+
+ if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
+ BT_ERR("Can't allocate memory for new frame");
+ return -ENOMEM;
+ }
+
+ *skb_put(skb, 1) = hdr;
+ if (data && size > 0)
+ memcpy(skb_put(skb, size), data, size);
+
+ skb_queue_tail(&session->ctrl_transmit, skb);
+
+ return 0;
+}
+
+static inline int hidp_send_ctrl_message(struct hidp_session *session,
+ unsigned char hdr, unsigned char *data, int size)
+{
+ int err;
+
+ err = __hidp_send_ctrl_message(session, hdr, data, size);
+
+ hidp_schedule(session);
+
+ return err;
+}
+
static int hidp_queue_report(struct hidp_session *session,
unsigned char *data, int size)
{
@@ -280,9 +313,22 @@ static int hidp_send_report(struct hidp_session *session, struct hid_report *rep
return hidp_queue_report(session, buf, rsize);
}
-static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count)
+static int hidp_output_raw_report(struct hid_device *hid, unsigned char *data, size_t count,
+ unsigned char report_type)
{
- if (hidp_queue_report(hid->driver_data, data, count))
+ switch (report_type) {
+ case HID_FEATURE_REPORT:
+ report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE;
+ break;
+ case HID_OUTPUT_REPORT:
+ report_type = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (hidp_send_ctrl_message(hid->driver_data, report_type,
+ data, count))
return -ENOMEM;
return count;
}
@@ -307,39 +353,6 @@ static inline void hidp_del_timer(struct hidp_session *session)
del_timer(&session->timer);
}
-static int __hidp_send_ctrl_message(struct hidp_session *session,
- unsigned char hdr, unsigned char *data, int size)
-{
- struct sk_buff *skb;
-
- BT_DBG("session %p data %p size %d", session, data, size);
-
- if (!(skb = alloc_skb(size + 1, GFP_ATOMIC))) {
- BT_ERR("Can't allocate memory for new frame");
- return -ENOMEM;
- }
-
- *skb_put(skb, 1) = hdr;
- if (data && size > 0)
- memcpy(skb_put(skb, size), data, size);
-
- skb_queue_tail(&session->ctrl_transmit, skb);
-
- return 0;
-}
-
-static inline int hidp_send_ctrl_message(struct hidp_session *session,
- unsigned char hdr, unsigned char *data, int size)
-{
- int err;
-
- err = __hidp_send_ctrl_message(session, hdr, data, size);
-
- hidp_schedule(session);
-
- return err;
-}
-
static void hidp_process_handshake(struct hidp_session *session,
unsigned char param)
{
@@ -701,29 +714,9 @@ static void hidp_close(struct hid_device *hid)
static int hidp_parse(struct hid_device *hid)
{
struct hidp_session *session = hid->driver_data;
- struct hidp_connadd_req *req = session->req;
- unsigned char *buf;
- int ret;
-
- buf = kmalloc(req->rd_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, req->rd_data, req->rd_size)) {
- kfree(buf);
- return -EFAULT;
- }
-
- ret = hid_parse_report(session->hid, buf, req->rd_size);
-
- kfree(buf);
-
- if (ret)
- return ret;
-
- session->req = NULL;
- return 0;
+ return hid_parse_report(session->hid, session->rd_data,
+ session->rd_size);
}
static int hidp_start(struct hid_device *hid)
@@ -768,12 +761,24 @@ static int hidp_setup_hid(struct hidp_session *session,
bdaddr_t src, dst;
int err;
+ session->rd_data = kzalloc(req->rd_size, GFP_KERNEL);
+ if (!session->rd_data)
+ return -ENOMEM;
+
+ if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) {
+ err = -EFAULT;
+ goto fault;
+ }
+ session->rd_size = req->rd_size;
+
hid = hid_allocate_device();
- if (IS_ERR(hid))
- return PTR_ERR(hid);
+ if (IS_ERR(hid)) {
+ err = PTR_ERR(hid);
+ goto fault;
+ }
session->hid = hid;
- session->req = req;
+
hid->driver_data = session;
baswap(&src, &bt_sk(session->ctrl_sock->sk)->src);
@@ -804,6 +809,10 @@ failed:
hid_destroy_device(hid);
session->hid = NULL;
+fault:
+ kfree(session->rd_data);
+ session->rd_data = NULL;
+
return err;
}
@@ -898,6 +907,9 @@ unlink:
session->hid = NULL;
}
+ kfree(session->rd_data);
+ session->rd_data = NULL;
+
purge:
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index faf3d74c358..a4e215d50c1 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -154,7 +154,9 @@ struct hidp_session {
struct sk_buff_head ctrl_transmit;
struct sk_buff_head intr_transmit;
- struct hidp_connadd_req *req;
+ /* Report descriptor */
+ __u8 *rd_data;
+ uint rd_size;
};
static inline void hidp_schedule(struct hidp_session *session)
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 1120cf14a54..400efa26ddb 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1368,7 +1368,6 @@ static int l2cap_ertm_send(struct sock *sk)
while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
!(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
- tx_skb = skb_clone(skb, GFP_ATOMIC);
if (pi->remote_max_tx &&
bt_cb(skb)->retries == pi->remote_max_tx) {
@@ -1376,6 +1375,8 @@ static int l2cap_ertm_send(struct sock *sk)
break;
}
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+
bt_cb(skb)->retries++;
control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
@@ -3518,7 +3519,6 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
struct l2cap_pinfo *pi;
u16 control, len;
u8 tx_seq;
- int err;
sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
if (!sk) {
@@ -3570,13 +3570,11 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
goto drop;
if (__is_iframe(control))
- err = l2cap_data_channel_iframe(sk, control, skb);
+ l2cap_data_channel_iframe(sk, control, skb);
else
- err = l2cap_data_channel_sframe(sk, control, skb);
+ l2cap_data_channel_sframe(sk, control, skb);
- if (!err)
- goto done;
- break;
+ goto done;
case L2CAP_MODE_STREAMING:
control = get_unaligned_le16(skb->data);
@@ -3602,7 +3600,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
else
pi->expected_tx_seq = tx_seq + 1;
- err = l2cap_sar_reassembly_sdu(sk, skb, control);
+ l2cap_sar_reassembly_sdu(sk, skb, control);
goto done;
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index fc5ee3296e2..89f4a59eb82 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -252,7 +252,6 @@ static void rfcomm_session_timeout(unsigned long arg)
BT_DBG("session %p state %ld", s, s->state);
set_bit(RFCOMM_TIMED_OUT, &s->flags);
- rfcomm_session_put(s);
rfcomm_schedule(RFCOMM_SCHED_TIMEO);
}
@@ -1151,7 +1150,11 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
break;
case BT_DISCONN:
- rfcomm_session_put(s);
+ /* When socket is closed and we are not RFCOMM
+ * initiator rfcomm_process_rx already calls
+ * rfcomm_session_put() */
+ if (s->sock->sk->sk_state != BT_CLOSED)
+ rfcomm_session_put(s);
break;
}
}
@@ -1920,6 +1923,7 @@ static inline void rfcomm_process_sessions(void)
if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
s->state = BT_DISCONN;
rfcomm_send_disc(s, 0);
+ rfcomm_session_put(s);
continue;
}
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index e143ca67888..19a6b9629c5 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -31,3 +31,16 @@ config BRIDGE
will be called bridge.
If unsure, say N.
+
+config BRIDGE_IGMP_SNOOPING
+ bool "IGMP snooping"
+ depends on BRIDGE
+ default y
+ ---help---
+ If you say Y here, then the Ethernet bridge will be able selectively
+ forward multicast traffic based on IGMP traffic received from each
+ port.
+
+ Say N to exclude this support and reduce the binary size.
+
+ If unsure, say Y.
diff --git a/net/bridge/Makefile b/net/bridge/Makefile
index f444c12cde5..d0359ea8ee7 100644
--- a/net/bridge/Makefile
+++ b/net/bridge/Makefile
@@ -12,4 +12,6 @@ bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
+bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o
+
obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 1a99c4e04e8..eb7062d2e9e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -25,6 +25,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_bridge *br = netdev_priv(dev);
const unsigned char *dest = skb->data;
struct net_bridge_fdb_entry *dst;
+ struct net_bridge_mdb_entry *mdst;
+
+ BR_INPUT_SKB_CB(skb)->brdev = dev;
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
@@ -32,13 +35,21 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
- if (dest[0] & 1)
- br_flood_deliver(br, skb);
- else if ((dst = __br_fdb_get(br, dest)) != NULL)
+ if (dest[0] & 1) {
+ if (br_multicast_rcv(br, NULL, skb))
+ goto out;
+
+ mdst = br_mdb_get(br, skb);
+ if (mdst || BR_INPUT_SKB_CB(skb)->mrouters_only)
+ br_multicast_deliver(mdst, skb);
+ else
+ br_flood_deliver(br, skb);
+ } else if ((dst = __br_fdb_get(br, dest)) != NULL)
br_deliver(dst->dst, skb);
else
br_flood_deliver(br, skb);
+out:
return NETDEV_TX_OK;
}
@@ -49,6 +60,7 @@ static int br_dev_open(struct net_device *dev)
br_features_recompute(br);
netif_start_queue(dev);
br_stp_enable_bridge(br);
+ br_multicast_open(br);
return 0;
}
@@ -59,7 +71,10 @@ static void br_dev_set_multicast_list(struct net_device *dev)
static int br_dev_stop(struct net_device *dev)
{
- br_stp_disable_bridge(netdev_priv(dev));
+ struct net_bridge *br = netdev_priv(dev);
+
+ br_stp_disable_bridge(br);
+ br_multicast_stop(br);
netif_stop_queue(dev);
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index bc1704ac6cd..d61e6f74112 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -11,6 +11,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
@@ -103,51 +104,152 @@ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
kfree_skb(skb);
}
-/* called under bridge lock */
-static void br_flood(struct net_bridge *br, struct sk_buff *skb,
+static int deliver_clone(struct net_bridge_port *prev, struct sk_buff *skb,
+ void (*__packet_hook)(const struct net_bridge_port *p,
+ struct sk_buff *skb))
+{
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (!skb) {
+ struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
+
+ dev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+
+ __packet_hook(prev, skb);
+ return 0;
+}
+
+static struct net_bridge_port *maybe_deliver(
+ struct net_bridge_port *prev, struct net_bridge_port *p,
+ struct sk_buff *skb,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb))
{
+ int err;
+
+ if (!should_deliver(p, skb))
+ return prev;
+
+ if (!prev)
+ goto out;
+
+ err = deliver_clone(prev, skb, __packet_hook);
+ if (err)
+ return ERR_PTR(err);
+
+out:
+ return p;
+}
+
+/* called under bridge lock */
+static void br_flood(struct net_bridge *br, struct sk_buff *skb,
+ struct sk_buff *skb0,
+ void (*__packet_hook)(const struct net_bridge_port *p,
+ struct sk_buff *skb))
+{
struct net_bridge_port *p;
struct net_bridge_port *prev;
prev = NULL;
list_for_each_entry_rcu(p, &br->port_list, list) {
- if (should_deliver(p, skb)) {
- if (prev != NULL) {
- struct sk_buff *skb2;
-
- if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
- br->dev->stats.tx_dropped++;
- kfree_skb(skb);
- return;
- }
-
- __packet_hook(prev, skb2);
- }
-
- prev = p;
- }
+ prev = maybe_deliver(prev, p, skb, __packet_hook);
+ if (IS_ERR(prev))
+ goto out;
}
- if (prev != NULL) {
+ if (!prev)
+ goto out;
+
+ if (skb0)
+ deliver_clone(prev, skb, __packet_hook);
+ else
__packet_hook(prev, skb);
- return;
- }
+ return;
- kfree_skb(skb);
+out:
+ if (!skb0)
+ kfree_skb(skb);
}
/* called with rcu_read_lock */
void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
{
- br_flood(br, skb, __br_deliver);
+ br_flood(br, skb, NULL, __br_deliver);
}
/* called under bridge lock */
-void br_flood_forward(struct net_bridge *br, struct sk_buff *skb)
+void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
+ struct sk_buff *skb2)
+{
+ br_flood(br, skb, skb2, __br_forward);
+}
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+/* called with rcu_read_lock */
+static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
+ struct sk_buff *skb, struct sk_buff *skb0,
+ void (*__packet_hook)(
+ const struct net_bridge_port *p,
+ struct sk_buff *skb))
+{
+ struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
+ struct net_bridge *br = netdev_priv(dev);
+ struct net_bridge_port *port;
+ struct net_bridge_port *lport, *rport;
+ struct net_bridge_port *prev;
+ struct net_bridge_port_group *p;
+ struct hlist_node *rp;
+
+ prev = NULL;
+
+ rp = br->router_list.first;
+ p = mdst ? mdst->ports : NULL;
+ while (p || rp) {
+ lport = p ? p->port : NULL;
+ rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
+ NULL;
+
+ port = (unsigned long)lport > (unsigned long)rport ?
+ lport : rport;
+
+ prev = maybe_deliver(prev, port, skb, __packet_hook);
+ if (IS_ERR(prev))
+ goto out;
+
+ if ((unsigned long)lport >= (unsigned long)port)
+ p = p->next;
+ if ((unsigned long)rport >= (unsigned long)port)
+ rp = rp->next;
+ }
+
+ if (!prev)
+ goto out;
+
+ if (skb0)
+ deliver_clone(prev, skb, __packet_hook);
+ else
+ __packet_hook(prev, skb);
+ return;
+
+out:
+ if (!skb0)
+ kfree_skb(skb);
+}
+
+/* called with rcu_read_lock */
+void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
+ struct sk_buff *skb)
+{
+ br_multicast_flood(mdst, skb, NULL, __br_deliver);
+}
+
+/* called with rcu_read_lock */
+void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
+ struct sk_buff *skb, struct sk_buff *skb2)
{
- br_flood(br, skb, __br_forward);
+ br_multicast_flood(mdst, skb, skb2, __br_forward);
}
+#endif
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index a2cbe61f6e6..b6a3872f568 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -147,6 +147,8 @@ static void del_nbp(struct net_bridge_port *p)
rcu_assign_pointer(dev->br_port, NULL);
+ br_multicast_del_port(p);
+
kobject_uevent(&p->kobj, KOBJ_REMOVE);
kobject_del(&p->kobj);
@@ -206,9 +208,8 @@ static struct net_device *new_bridge_dev(struct net *net, const char *name)
br_netfilter_rtable_init(br);
- INIT_LIST_HEAD(&br->age_list);
-
br_stp_timer_init(br);
+ br_multicast_init(br);
return dev;
}
@@ -260,6 +261,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
br_init_port(p);
p->state = BR_STATE_DISABLED;
br_stp_port_timer_init(p);
+ br_multicast_add_port(p);
return p;
}
@@ -467,7 +469,7 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
return 0;
}
-void br_net_exit(struct net *net)
+void __net_exit br_net_exit(struct net *net)
{
struct net_device *dev;
LIST_HEAD(list);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5ee1a3682bf..53b39851d87 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -20,9 +20,9 @@
/* Bridge group multicast address 802.1d (pg 51). */
const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
-static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
+static int br_pass_frame_up(struct sk_buff *skb)
{
- struct net_device *indev, *brdev = br->dev;
+ struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
brdev->stats.rx_packets++;
brdev->stats.rx_bytes += skb->len;
@@ -30,8 +30,8 @@ static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
indev = skb->dev;
skb->dev = brdev;
- NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
- netif_receive_skb);
+ return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
+ netif_receive_skb);
}
/* note: already called with rcu_read_lock (preempt_disabled) */
@@ -41,6 +41,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
struct net_bridge *br;
struct net_bridge_fdb_entry *dst;
+ struct net_bridge_mdb_entry *mdst;
struct sk_buff *skb2;
if (!p || p->state == BR_STATE_DISABLED)
@@ -50,9 +51,15 @@ int br_handle_frame_finish(struct sk_buff *skb)
br = p->br;
br_fdb_update(br, p, eth_hdr(skb)->h_source);
+ if (is_multicast_ether_addr(dest) &&
+ br_multicast_rcv(br, p, skb))
+ goto drop;
+
if (p->state == BR_STATE_LEARNING)
goto drop;
+ BR_INPUT_SKB_CB(skb)->brdev = br->dev;
+
/* The packet skb2 goes to the local host (NULL to skip). */
skb2 = NULL;
@@ -62,27 +69,35 @@ int br_handle_frame_finish(struct sk_buff *skb)
dst = NULL;
if (is_multicast_ether_addr(dest)) {
+ mdst = br_mdb_get(br, skb);
+ if (mdst || BR_INPUT_SKB_CB(skb)->mrouters_only) {
+ if ((mdst && !hlist_unhashed(&mdst->mglist)) ||
+ br_multicast_is_router(br))
+ skb2 = skb;
+ br_multicast_forward(mdst, skb, skb2);
+ skb = NULL;
+ if (!skb2)
+ goto out;
+ } else
+ skb2 = skb;
+
br->dev->stats.multicast++;
- skb2 = skb;
} else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
skb2 = skb;
/* Do not forward the packet since it's local. */
skb = NULL;
}
- if (skb2 == skb)
- skb2 = skb_clone(skb, GFP_ATOMIC);
-
- if (skb2)
- br_pass_frame_up(br, skb2);
-
if (skb) {
if (dst)
br_forward(dst->dst, skb);
else
- br_flood_forward(br, skb);
+ br_flood_forward(br, skb, skb2);
}
+ if (skb2)
+ return br_pass_frame_up(skb2);
+
out:
return 0;
drop:
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
new file mode 100644
index 00000000000..2559fb53983
--- /dev/null
+++ b/net/bridge/br_multicast.c
@@ -0,0 +1,1304 @@
+/*
+ * Bridge multicast support.
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/if_ether.h>
+#include <linux/igmp.h>
+#include <linux/jhash.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/netdevice.h>
+#include <linux/netfilter_bridge.h>
+#include <linux/random.h>
+#include <linux/rculist.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <net/ip.h>
+
+#include "br_private.h"
+
+static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
+{
+ return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1);
+}
+
+static struct net_bridge_mdb_entry *__br_mdb_ip_get(
+ struct net_bridge_mdb_htable *mdb, __be32 dst, int hash)
+{
+ struct net_bridge_mdb_entry *mp;
+ struct hlist_node *p;
+
+ hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
+ if (dst == mp->addr)
+ return mp;
+ }
+
+ return NULL;
+}
+
+static struct net_bridge_mdb_entry *br_mdb_ip_get(
+ struct net_bridge_mdb_htable *mdb, __be32 dst)
+{
+ return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
+}
+
+struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
+ struct sk_buff *skb)
+{
+ struct net_bridge_mdb_htable *mdb = br->mdb;
+
+ if (!mdb || br->multicast_disabled)
+ return NULL;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (BR_INPUT_SKB_CB(skb)->igmp)
+ break;
+ return br_mdb_ip_get(mdb, ip_hdr(skb)->daddr);
+ }
+
+ return NULL;
+}
+
+static void br_mdb_free(struct rcu_head *head)
+{
+ struct net_bridge_mdb_htable *mdb =
+ container_of(head, struct net_bridge_mdb_htable, rcu);
+ struct net_bridge_mdb_htable *old = mdb->old;
+
+ mdb->old = NULL;
+ kfree(old->mhash);
+ kfree(old);
+}
+
+static int br_mdb_copy(struct net_bridge_mdb_htable *new,
+ struct net_bridge_mdb_htable *old,
+ int elasticity)
+{
+ struct net_bridge_mdb_entry *mp;
+ struct hlist_node *p;
+ int maxlen;
+ int len;
+ int i;
+
+ for (i = 0; i < old->max; i++)
+ hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver])
+ hlist_add_head(&mp->hlist[new->ver],
+ &new->mhash[br_ip_hash(new, mp->addr)]);
+
+ if (!elasticity)
+ return 0;
+
+ maxlen = 0;
+ for (i = 0; i < new->max; i++) {
+ len = 0;
+ hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver])
+ len++;
+ if (len > maxlen)
+ maxlen = len;
+ }
+
+ return maxlen > elasticity ? -EINVAL : 0;
+}
+
+static void br_multicast_free_pg(struct rcu_head *head)
+{
+ struct net_bridge_port_group *p =
+ container_of(head, struct net_bridge_port_group, rcu);
+
+ kfree(p);
+}
+
+static void br_multicast_free_group(struct rcu_head *head)
+{
+ struct net_bridge_mdb_entry *mp =
+ container_of(head, struct net_bridge_mdb_entry, rcu);
+
+ kfree(mp);
+}
+
+static void br_multicast_group_expired(unsigned long data)
+{
+ struct net_bridge_mdb_entry *mp = (void *)data;
+ struct net_bridge *br = mp->br;
+ struct net_bridge_mdb_htable *mdb;
+
+ spin_lock(&br->multicast_lock);
+ if (!netif_running(br->dev) || timer_pending(&mp->timer))
+ goto out;
+
+ if (!hlist_unhashed(&mp->mglist))
+ hlist_del_init(&mp->mglist);
+
+ if (mp->ports)
+ goto out;
+
+ mdb = br->mdb;
+ hlist_del_rcu(&mp->hlist[mdb->ver]);
+ mdb->size--;
+
+ del_timer(&mp->query_timer);
+ call_rcu_bh(&mp->rcu, br_multicast_free_group);
+
+out:
+ spin_unlock(&br->multicast_lock);
+}
+
+static void br_multicast_del_pg(struct net_bridge *br,
+ struct net_bridge_port_group *pg)
+{
+ struct net_bridge_mdb_htable *mdb = br->mdb;
+ struct net_bridge_mdb_entry *mp;
+ struct net_bridge_port_group *p;
+ struct net_bridge_port_group **pp;
+
+ mp = br_mdb_ip_get(mdb, pg->addr);
+ if (WARN_ON(!mp))
+ return;
+
+ for (pp = &mp->ports; (p = *pp); pp = &p->next) {
+ if (p != pg)
+ continue;
+
+ *pp = p->next;
+ hlist_del_init(&p->mglist);
+ del_timer(&p->timer);
+ del_timer(&p->query_timer);
+ call_rcu_bh(&p->rcu, br_multicast_free_pg);
+
+ if (!mp->ports && hlist_unhashed(&mp->mglist) &&
+ netif_running(br->dev))
+ mod_timer(&mp->timer, jiffies);
+
+ return;
+ }
+
+ WARN_ON(1);
+}
+
+static void br_multicast_port_group_expired(unsigned long data)
+{
+ struct net_bridge_port_group *pg = (void *)data;
+ struct net_bridge *br = pg->port->br;
+
+ spin_lock(&br->multicast_lock);
+ if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
+ hlist_unhashed(&pg->mglist))
+ goto out;
+
+ br_multicast_del_pg(br, pg);
+
+out:
+ spin_unlock(&br->multicast_lock);
+}
+
+static int br_mdb_rehash(struct net_bridge_mdb_htable **mdbp, int max,
+ int elasticity)
+{
+ struct net_bridge_mdb_htable *old = *mdbp;
+ struct net_bridge_mdb_htable *mdb;
+ int err;
+
+ mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
+ if (!mdb)
+ return -ENOMEM;
+
+ mdb->max = max;
+ mdb->old = old;
+
+ mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
+ if (!mdb->mhash) {
+ kfree(mdb);
+ return -ENOMEM;
+ }
+
+ mdb->size = old ? old->size : 0;
+ mdb->ver = old ? old->ver ^ 1 : 0;
+
+ if (!old || elasticity)
+ get_random_bytes(&mdb->secret, sizeof(mdb->secret));
+ else
+ mdb->secret = old->secret;
+
+ if (!old)
+ goto out;
+
+ err = br_mdb_copy(mdb, old, elasticity);
+ if (err) {
+ kfree(mdb->mhash);
+ kfree(mdb);
+ return err;
+ }
+
+ call_rcu_bh(&mdb->rcu, br_mdb_free);
+
+out:
+ rcu_assign_pointer(*mdbp, mdb);
+
+ return 0;
+}
+
+static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
+ __be32 group)
+{
+ struct sk_buff *skb;
+ struct igmphdr *ih;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+
+ skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
+ sizeof(*ih) + 4);
+ if (!skb)
+ goto out;
+
+ skb->protocol = htons(ETH_P_IP);
+
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+
+ memcpy(eth->h_source, br->dev->dev_addr, 6);
+ eth->h_dest[0] = 1;
+ eth->h_dest[1] = 0;
+ eth->h_dest[2] = 0x5e;
+ eth->h_dest[3] = 0;
+ eth->h_dest[4] = 0;
+ eth->h_dest[5] = 1;
+ eth->h_proto = htons(ETH_P_IP);
+ skb_put(skb, sizeof(*eth));
+
+ skb_set_network_header(skb, skb->len);
+ iph = ip_hdr(skb);
+
+ iph->version = 4;
+ iph->ihl = 6;
+ iph->tos = 0xc0;
+ iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4);
+ iph->id = 0;
+ iph->frag_off = htons(IP_DF);
+ iph->ttl = 1;
+ iph->protocol = IPPROTO_IGMP;
+ iph->saddr = 0;
+ iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
+ ((u8 *)&iph[1])[0] = IPOPT_RA;
+ ((u8 *)&iph[1])[1] = 4;
+ ((u8 *)&iph[1])[2] = 0;
+ ((u8 *)&iph[1])[3] = 0;
+ ip_send_check(iph);
+ skb_put(skb, 24);
+
+ skb_set_transport_header(skb, skb->len);
+ ih = igmp_hdr(skb);
+ ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
+ ih->code = (group ? br->multicast_last_member_interval :
+ br->multicast_query_response_interval) /
+ (HZ / IGMP_TIMER_SCALE);
+ ih->group = group;
+ ih->csum = 0;
+ ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
+ skb_put(skb, sizeof(*ih));
+
+ __skb_pull(skb, sizeof(*eth));
+
+out:
+ return skb;
+}
+
+static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
+{
+ struct net_bridge *br = mp->br;
+ struct sk_buff *skb;
+
+ skb = br_multicast_alloc_query(br, mp->addr);
+ if (!skb)
+ goto timer;
+
+ netif_rx(skb);
+
+timer:
+ if (++mp->queries_sent < br->multicast_last_member_count)
+ mod_timer(&mp->query_timer,
+ jiffies + br->multicast_last_member_interval);
+}
+
+static void br_multicast_group_query_expired(unsigned long data)
+{
+ struct net_bridge_mdb_entry *mp = (void *)data;
+ struct net_bridge *br = mp->br;
+
+ spin_lock(&br->multicast_lock);
+ if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) ||
+ mp->queries_sent >= br->multicast_last_member_count)
+ goto out;
+
+ br_multicast_send_group_query(mp);
+
+out:
+ spin_unlock(&br->multicast_lock);
+}
+
+static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
+{
+ struct net_bridge_port *port = pg->port;
+ struct net_bridge *br = port->br;
+ struct sk_buff *skb;
+
+ skb = br_multicast_alloc_query(br, pg->addr);
+ if (!skb)
+ goto timer;
+
+ br_deliver(port, skb);
+
+timer:
+ if (++pg->queries_sent < br->multicast_last_member_count)
+ mod_timer(&pg->query_timer,
+ jiffies + br->multicast_last_member_interval);
+}
+
+static void br_multicast_port_group_query_expired(unsigned long data)
+{
+ struct net_bridge_port_group *pg = (void *)data;
+ struct net_bridge_port *port = pg->port;
+ struct net_bridge *br = port->br;
+
+ spin_lock(&br->multicast_lock);
+ if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
+ pg->queries_sent >= br->multicast_last_member_count)
+ goto out;
+
+ br_multicast_send_port_group_query(pg);
+
+out:
+ spin_unlock(&br->multicast_lock);
+}
+
+static struct net_bridge_mdb_entry *br_multicast_get_group(
+ struct net_bridge *br, struct net_bridge_port *port, __be32 group,
+ int hash)
+{
+ struct net_bridge_mdb_htable *mdb = br->mdb;
+ struct net_bridge_mdb_entry *mp;
+ struct hlist_node *p;
+ unsigned count = 0;
+ unsigned max;
+ int elasticity;
+ int err;
+
+ hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
+ count++;
+ if (unlikely(group == mp->addr)) {
+ return mp;
+ }
+ }
+
+ elasticity = 0;
+ max = mdb->max;
+
+ if (unlikely(count > br->hash_elasticity && count)) {
+ if (net_ratelimit())
+ printk(KERN_INFO "%s: Multicast hash table "
+ "chain limit reached: %s\n",
+ br->dev->name, port ? port->dev->name :
+ br->dev->name);
+
+ elasticity = br->hash_elasticity;
+ }
+
+ if (mdb->size >= max) {
+ max *= 2;
+ if (unlikely(max >= br->hash_max)) {
+ printk(KERN_WARNING "%s: Multicast hash table maximum "
+ "reached, disabling snooping: %s, %d\n",
+ br->dev->name, port ? port->dev->name :
+ br->dev->name,
+ max);
+ err = -E2BIG;
+disable:
+ br->multicast_disabled = 1;
+ goto err;
+ }
+ }
+
+ if (max > mdb->max || elasticity) {
+ if (mdb->old) {
+ if (net_ratelimit())
+ printk(KERN_INFO "%s: Multicast hash table "
+ "on fire: %s\n",
+ br->dev->name, port ? port->dev->name :
+ br->dev->name);
+ err = -EEXIST;
+ goto err;
+ }
+
+ err = br_mdb_rehash(&br->mdb, max, elasticity);
+ if (err) {
+ printk(KERN_WARNING "%s: Cannot rehash multicast "
+ "hash table, disabling snooping: "
+ "%s, %d, %d\n",
+ br->dev->name, port ? port->dev->name :
+ br->dev->name,
+ mdb->size, err);
+ goto disable;
+ }
+
+ err = -EAGAIN;
+ goto err;
+ }
+
+ return NULL;
+
+err:
+ mp = ERR_PTR(err);
+ return mp;
+}
+
+static struct net_bridge_mdb_entry *br_multicast_new_group(
+ struct net_bridge *br, struct net_bridge_port *port, __be32 group)
+{
+ struct net_bridge_mdb_htable *mdb = br->mdb;
+ struct net_bridge_mdb_entry *mp;
+ int hash;
+
+ if (!mdb) {
+ if (br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0))
+ return NULL;
+ goto rehash;
+ }
+
+ hash = br_ip_hash(mdb, group);
+ mp = br_multicast_get_group(br, port, group, hash);
+ switch (PTR_ERR(mp)) {
+ case 0:
+ break;
+
+ case -EAGAIN:
+rehash:
+ mdb = br->mdb;
+ hash = br_ip_hash(mdb, group);
+ break;
+
+ default:
+ goto out;
+ }
+
+ mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
+ if (unlikely(!mp))
+ goto out;
+
+ mp->br = br;
+ mp->addr = group;
+ setup_timer(&mp->timer, br_multicast_group_expired,
+ (unsigned long)mp);
+ setup_timer(&mp->query_timer, br_multicast_group_query_expired,
+ (unsigned long)mp);
+
+ hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
+ mdb->size++;
+
+out:
+ return mp;
+}
+
+static int br_multicast_add_group(struct net_bridge *br,
+ struct net_bridge_port *port, __be32 group)
+{
+ struct net_bridge_mdb_entry *mp;
+ struct net_bridge_port_group *p;
+ struct net_bridge_port_group **pp;
+ unsigned long now = jiffies;
+ int err;
+
+ if (ipv4_is_local_multicast(group))
+ return 0;
+
+ spin_lock(&br->multicast_lock);
+ if (!netif_running(br->dev) ||
+ (port && port->state == BR_STATE_DISABLED))
+ goto out;
+
+ mp = br_multicast_new_group(br, port, group);
+ err = PTR_ERR(mp);
+ if (unlikely(IS_ERR(mp) || !mp))
+ goto err;
+
+ if (!port) {
+ hlist_add_head(&mp->mglist, &br->mglist);
+ mod_timer(&mp->timer, now + br->multicast_membership_interval);
+ goto out;
+ }
+
+ for (pp = &mp->ports; (p = *pp); pp = &p->next) {
+ if (p->port == port)
+ goto found;
+ if ((unsigned long)p->port < (unsigned long)port)
+ break;
+ }
+
+ p = kzalloc(sizeof(*p), GFP_ATOMIC);
+ err = -ENOMEM;
+ if (unlikely(!p))
+ goto err;
+
+ p->addr = group;
+ p->port = port;
+ p->next = *pp;
+ hlist_add_head(&p->mglist, &port->mglist);
+ setup_timer(&p->timer, br_multicast_port_group_expired,
+ (unsigned long)p);
+ setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
+ (unsigned long)p);
+
+ rcu_assign_pointer(*pp, p);
+
+found:
+ mod_timer(&p->timer, now + br->multicast_membership_interval);
+out:
+ err = 0;
+
+err:
+ spin_unlock(&br->multicast_lock);
+ return err;
+}
+
+static void br_multicast_router_expired(unsigned long data)
+{
+ struct net_bridge_port *port = (void *)data;
+ struct net_bridge *br = port->br;
+
+ spin_lock(&br->multicast_lock);
+ if (port->multicast_router != 1 ||
+ timer_pending(&port->multicast_router_timer) ||
+ hlist_unhashed(&port->rlist))
+ goto out;
+
+ hlist_del_init_rcu(&port->rlist);
+
+out:
+ spin_unlock(&br->multicast_lock);
+}
+
+static void br_multicast_local_router_expired(unsigned long data)
+{
+}
+
+static void br_multicast_send_query(struct net_bridge *br,
+ struct net_bridge_port *port, u32 sent)
+{
+ unsigned long time;
+ struct sk_buff *skb;
+
+ if (!netif_running(br->dev) || br->multicast_disabled ||
+ timer_pending(&br->multicast_querier_timer))
+ return;
+
+ skb = br_multicast_alloc_query(br, 0);
+ if (!skb)
+ goto timer;
+
+ if (port) {
+ __skb_push(skb, sizeof(struct ethhdr));
+ skb->dev = port->dev;
+ NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
+ dev_queue_xmit);
+ } else
+ netif_rx(skb);
+
+timer:
+ time = jiffies;
+ time += sent < br->multicast_startup_query_count ?
+ br->multicast_startup_query_interval :
+ br->multicast_query_interval;
+ mod_timer(port ? &port->multicast_query_timer :
+ &br->multicast_query_timer, time);
+}
+
+static void br_multicast_port_query_expired(unsigned long data)
+{
+ struct net_bridge_port *port = (void *)data;
+ struct net_bridge *br = port->br;
+
+ spin_lock(&br->multicast_lock);
+ if (port && (port->state == BR_STATE_DISABLED ||
+ port->state == BR_STATE_BLOCKING))
+ goto out;
+
+ if (port->multicast_startup_queries_sent <
+ br->multicast_startup_query_count)
+ port->multicast_startup_queries_sent++;
+
+ br_multicast_send_query(port->br, port,
+ port->multicast_startup_queries_sent);
+
+out:
+ spin_unlock(&br->multicast_lock);
+}
+
+void br_multicast_add_port(struct net_bridge_port *port)
+{
+ port->multicast_router = 1;
+
+ setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
+ (unsigned long)port);
+ setup_timer(&port->multicast_query_timer,
+ br_multicast_port_query_expired, (unsigned long)port);
+}
+
+void br_multicast_del_port(struct net_bridge_port *port)
+{
+ del_timer_sync(&port->multicast_router_timer);
+}
+
+static void __br_multicast_enable_port(struct net_bridge_port *port)
+{
+ port->multicast_startup_queries_sent = 0;
+
+ if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 ||
+ del_timer(&port->multicast_query_timer))
+ mod_timer(&port->multicast_query_timer, jiffies);
+}
+
+void br_multicast_enable_port(struct net_bridge_port *port)
+{
+ struct net_bridge *br = port->br;
+
+ spin_lock(&br->multicast_lock);
+ if (br->multicast_disabled || !netif_running(br->dev))
+ goto out;
+
+ __br_multicast_enable_port(port);
+
+out:
+ spin_unlock(&br->multicast_lock);
+}
+
+void br_multicast_disable_port(struct net_bridge_port *port)
+{
+ struct net_bridge *br = port->br;
+ struct net_bridge_port_group *pg;
+ struct hlist_node *p, *n;
+
+ spin_lock(&br->multicast_lock);
+ hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist)
+ br_multicast_del_pg(br, pg);
+
+ if (!hlist_unhashed(&port->rlist))
+ hlist_del_init_rcu(&port->rlist);
+ del_timer(&port->multicast_router_timer);
+ del_timer(&port->multicast_query_timer);
+ spin_unlock(&br->multicast_lock);
+}
+
+static int br_multicast_igmp3_report(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ struct igmpv3_report *ih;
+ struct igmpv3_grec *grec;
+ int i;
+ int len;
+ int num;
+ int type;
+ int err = 0;
+ __be32 group;
+
+ if (!pskb_may_pull(skb, sizeof(*ih)))
+ return -EINVAL;
+
+ ih = igmpv3_report_hdr(skb);
+ num = ntohs(ih->ngrec);
+ len = sizeof(*ih);
+
+ for (i = 0; i < num; i++) {
+ len += sizeof(*grec);
+ if (!pskb_may_pull(skb, len))
+ return -EINVAL;
+
+ grec = (void *)(skb->data + len);
+ group = grec->grec_mca;
+ type = grec->grec_type;
+
+ len += grec->grec_nsrcs * 4;
+ if (!pskb_may_pull(skb, len))
+ return -EINVAL;
+
+ /* We treat this as an IGMPv2 report for now. */
+ switch (type) {
+ case IGMPV3_MODE_IS_INCLUDE:
+ case IGMPV3_MODE_IS_EXCLUDE:
+ case IGMPV3_CHANGE_TO_INCLUDE:
+ case IGMPV3_CHANGE_TO_EXCLUDE:
+ case IGMPV3_ALLOW_NEW_SOURCES:
+ case IGMPV3_BLOCK_OLD_SOURCES:
+ break;
+
+ default:
+ continue;
+ }
+
+ err = br_multicast_add_group(br, port, group);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static void br_multicast_add_router(struct net_bridge *br,
+ struct net_bridge_port *port)
+{
+ struct hlist_node *p;
+ struct hlist_node **h;
+
+ for (h = &br->router_list.first;
+ (p = *h) &&
+ (unsigned long)container_of(p, struct net_bridge_port, rlist) >
+ (unsigned long)port;
+ h = &p->next)
+ ;
+
+ port->rlist.pprev = h;
+ port->rlist.next = p;
+ rcu_assign_pointer(*h, &port->rlist);
+ if (p)
+ p->pprev = &port->rlist.next;
+}
+
+static void br_multicast_mark_router(struct net_bridge *br,
+ struct net_bridge_port *port)
+{
+ unsigned long now = jiffies;
+
+ if (!port) {
+ if (br->multicast_router == 1)
+ mod_timer(&br->multicast_router_timer,
+ now + br->multicast_querier_interval);
+ return;
+ }
+
+ if (port->multicast_router != 1)
+ return;
+
+ if (!hlist_unhashed(&port->rlist))
+ goto timer;
+
+ br_multicast_add_router(br, port);
+
+timer:
+ mod_timer(&port->multicast_router_timer,
+ now + br->multicast_querier_interval);
+}
+
+static void br_multicast_query_received(struct net_bridge *br,
+ struct net_bridge_port *port,
+ __be32 saddr)
+{
+ if (saddr)
+ mod_timer(&br->multicast_querier_timer,
+ jiffies + br->multicast_querier_interval);
+ else if (timer_pending(&br->multicast_querier_timer))
+ return;
+
+ br_multicast_mark_router(br, port);
+}
+
+static int br_multicast_query(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+ struct igmphdr *ih = igmp_hdr(skb);
+ struct net_bridge_mdb_entry *mp;
+ struct igmpv3_query *ih3;
+ struct net_bridge_port_group *p;
+ struct net_bridge_port_group **pp;
+ unsigned long max_delay;
+ unsigned long now = jiffies;
+ __be32 group;
+
+ spin_lock(&br->multicast_lock);
+ if (!netif_running(br->dev) ||
+ (port && port->state == BR_STATE_DISABLED))
+ goto out;
+
+ br_multicast_query_received(br, port, iph->saddr);
+
+ group = ih->group;
+
+ if (skb->len == sizeof(*ih)) {
+ max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
+
+ if (!max_delay) {
+ max_delay = 10 * HZ;
+ group = 0;
+ }
+ } else {
+ if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
+ return -EINVAL;
+
+ ih3 = igmpv3_query_hdr(skb);
+ if (ih3->nsrcs)
+ return 0;
+
+ max_delay = ih3->code ? 1 :
+ IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE);
+ }
+
+ if (!group)
+ goto out;
+
+ mp = br_mdb_ip_get(br->mdb, group);
+ if (!mp)
+ goto out;
+
+ max_delay *= br->multicast_last_member_count;
+
+ if (!hlist_unhashed(&mp->mglist) &&
+ (timer_pending(&mp->timer) ?
+ time_after(mp->timer.expires, now + max_delay) :
+ try_to_del_timer_sync(&mp->timer) >= 0))
+ mod_timer(&mp->timer, now + max_delay);
+
+ for (pp = &mp->ports; (p = *pp); pp = &p->next) {
+ if (timer_pending(&p->timer) ?
+ time_after(p->timer.expires, now + max_delay) :
+ try_to_del_timer_sync(&p->timer) >= 0)
+ mod_timer(&mp->timer, now + max_delay);
+ }
+
+out:
+ spin_unlock(&br->multicast_lock);
+ return 0;
+}
+
+static void br_multicast_leave_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ __be32 group)
+{
+ struct net_bridge_mdb_htable *mdb;
+ struct net_bridge_mdb_entry *mp;
+ struct net_bridge_port_group *p;
+ unsigned long now;
+ unsigned long time;
+
+ if (ipv4_is_local_multicast(group))
+ return;
+
+ spin_lock(&br->multicast_lock);
+ if (!netif_running(br->dev) ||
+ (port && port->state == BR_STATE_DISABLED) ||
+ timer_pending(&br->multicast_querier_timer))
+ goto out;
+
+ mdb = br->mdb;
+ mp = br_mdb_ip_get(mdb, group);
+ if (!mp)
+ goto out;
+
+ now = jiffies;
+ time = now + br->multicast_last_member_count *
+ br->multicast_last_member_interval;
+
+ if (!port) {
+ if (!hlist_unhashed(&mp->mglist) &&
+ (timer_pending(&mp->timer) ?
+ time_after(mp->timer.expires, time) :
+ try_to_del_timer_sync(&mp->timer) >= 0)) {
+ mod_timer(&mp->timer, time);
+
+ mp->queries_sent = 0;
+ mod_timer(&mp->query_timer, now);
+ }
+
+ goto out;
+ }
+
+ for (p = mp->ports; p; p = p->next) {
+ if (p->port != port)
+ continue;
+
+ if (!hlist_unhashed(&p->mglist) &&
+ (timer_pending(&p->timer) ?
+ time_after(p->timer.expires, time) :
+ try_to_del_timer_sync(&p->timer) >= 0)) {
+ mod_timer(&p->timer, time);
+
+ p->queries_sent = 0;
+ mod_timer(&p->query_timer, now);
+ }
+
+ break;
+ }
+
+out:
+ spin_unlock(&br->multicast_lock);
+}
+
+static int br_multicast_ipv4_rcv(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ struct sk_buff *skb2 = skb;
+ struct iphdr *iph;
+ struct igmphdr *ih;
+ unsigned len;
+ unsigned offset;
+ int err;
+
+ BR_INPUT_SKB_CB(skb)->igmp = 0;
+ BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
+
+ /* We treat OOM as packet loss for now. */
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return -EINVAL;
+
+ iph = ip_hdr(skb);
+
+ if (iph->ihl < 5 || iph->version != 4)
+ return -EINVAL;
+
+ if (!pskb_may_pull(skb, ip_hdrlen(skb)))
+ return -EINVAL;
+
+ iph = ip_hdr(skb);
+
+ if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
+ return -EINVAL;
+
+ if (iph->protocol != IPPROTO_IGMP)
+ return 0;
+
+ len = ntohs(iph->tot_len);
+ if (skb->len < len || len < ip_hdrlen(skb))
+ return -EINVAL;
+
+ if (skb->len > len) {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (!skb2)
+ return -ENOMEM;
+
+ err = pskb_trim_rcsum(skb2, len);
+ if (err)
+ return err;
+ }
+
+ len -= ip_hdrlen(skb2);
+ offset = skb_network_offset(skb2) + ip_hdrlen(skb2);
+ __skb_pull(skb2, offset);
+ skb_reset_transport_header(skb2);
+
+ err = -EINVAL;
+ if (!pskb_may_pull(skb2, sizeof(*ih)))
+ goto out;
+
+ iph = ip_hdr(skb2);
+
+ switch (skb2->ip_summed) {
+ case CHECKSUM_COMPLETE:
+ if (!csum_fold(skb2->csum))
+ break;
+ /* fall through */
+ case CHECKSUM_NONE:
+ skb2->csum = 0;
+ if (skb_checksum_complete(skb2))
+ return -EINVAL;
+ }
+
+ err = 0;
+
+ BR_INPUT_SKB_CB(skb)->igmp = 1;
+ ih = igmp_hdr(skb2);
+
+ switch (ih->type) {
+ case IGMP_HOST_MEMBERSHIP_REPORT:
+ case IGMPV2_HOST_MEMBERSHIP_REPORT:
+ BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
+ err = br_multicast_add_group(br, port, ih->group);
+ break;
+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
+ err = br_multicast_igmp3_report(br, port, skb2);
+ break;
+ case IGMP_HOST_MEMBERSHIP_QUERY:
+ err = br_multicast_query(br, port, skb2);
+ break;
+ case IGMP_HOST_LEAVE_MESSAGE:
+ br_multicast_leave_group(br, port, ih->group);
+ break;
+ }
+
+out:
+ __skb_push(skb2, offset);
+ if (skb2 != skb)
+ kfree_skb(skb2);
+ return err;
+}
+
+int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ if (br->multicast_disabled)
+ return 0;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ return br_multicast_ipv4_rcv(br, port, skb);
+ }
+
+ return 0;
+}
+
+static void br_multicast_query_expired(unsigned long data)
+{
+ struct net_bridge *br = (void *)data;
+
+ spin_lock(&br->multicast_lock);
+ if (br->multicast_startup_queries_sent <
+ br->multicast_startup_query_count)
+ br->multicast_startup_queries_sent++;
+
+ br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent);
+
+ spin_unlock(&br->multicast_lock);
+}
+
+void br_multicast_init(struct net_bridge *br)
+{
+ br->hash_elasticity = 4;
+ br->hash_max = 512;
+
+ br->multicast_router = 1;
+ br->multicast_last_member_count = 2;
+ br->multicast_startup_query_count = 2;
+
+ br->multicast_last_member_interval = HZ;
+ br->multicast_query_response_interval = 10 * HZ;
+ br->multicast_startup_query_interval = 125 * HZ / 4;
+ br->multicast_query_interval = 125 * HZ;
+ br->multicast_querier_interval = 255 * HZ;
+ br->multicast_membership_interval = 260 * HZ;
+
+ spin_lock_init(&br->multicast_lock);
+ setup_timer(&br->multicast_router_timer,
+ br_multicast_local_router_expired, 0);
+ setup_timer(&br->multicast_querier_timer,
+ br_multicast_local_router_expired, 0);
+ setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
+ (unsigned long)br);
+}
+
+void br_multicast_open(struct net_bridge *br)
+{
+ br->multicast_startup_queries_sent = 0;
+
+ if (br->multicast_disabled)
+ return;
+
+ mod_timer(&br->multicast_query_timer, jiffies);
+}
+
+void br_multicast_stop(struct net_bridge *br)
+{
+ struct net_bridge_mdb_htable *mdb;
+ struct net_bridge_mdb_entry *mp;
+ struct hlist_node *p, *n;
+ u32 ver;
+ int i;
+
+ del_timer_sync(&br->multicast_router_timer);
+ del_timer_sync(&br->multicast_querier_timer);
+ del_timer_sync(&br->multicast_query_timer);
+
+ spin_lock_bh(&br->multicast_lock);
+ mdb = br->mdb;
+ if (!mdb)
+ goto out;
+
+ br->mdb = NULL;
+
+ ver = mdb->ver;
+ for (i = 0; i < mdb->max; i++) {
+ hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
+ hlist[ver]) {
+ del_timer(&mp->timer);
+ del_timer(&mp->query_timer);
+ call_rcu_bh(&mp->rcu, br_multicast_free_group);
+ }
+ }
+
+ if (mdb->old) {
+ spin_unlock_bh(&br->multicast_lock);
+ synchronize_rcu_bh();
+ spin_lock_bh(&br->multicast_lock);
+ WARN_ON(mdb->old);
+ }
+
+ mdb->old = mdb;
+ call_rcu_bh(&mdb->rcu, br_mdb_free);
+
+out:
+ spin_unlock_bh(&br->multicast_lock);
+}
+
+int br_multicast_set_router(struct net_bridge *br, unsigned long val)
+{
+ int err = -ENOENT;
+
+ spin_lock_bh(&br->multicast_lock);
+ if (!netif_running(br->dev))
+ goto unlock;
+
+ switch (val) {
+ case 0:
+ case 2:
+ del_timer(&br->multicast_router_timer);
+ /* fall through */
+ case 1:
+ br->multicast_router = val;
+ err = 0;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+unlock:
+ spin_unlock_bh(&br->multicast_lock);
+
+ return err;
+}
+
+int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
+{
+ struct net_bridge *br = p->br;
+ int err = -ENOENT;
+
+ spin_lock(&br->multicast_lock);
+ if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED)
+ goto unlock;
+
+ switch (val) {
+ case 0:
+ case 1:
+ case 2:
+ p->multicast_router = val;
+ err = 0;
+
+ if (val < 2 && !hlist_unhashed(&p->rlist))
+ hlist_del_init_rcu(&p->rlist);
+
+ if (val == 1)
+ break;
+
+ del_timer(&p->multicast_router_timer);
+
+ if (val == 0)
+ break;
+
+ br_multicast_add_router(br, p);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+unlock:
+ spin_unlock(&br->multicast_lock);
+
+ return err;
+}
+
+int br_multicast_toggle(struct net_bridge *br, unsigned long val)
+{
+ struct net_bridge_port *port;
+ int err = -ENOENT;
+
+ spin_lock(&br->multicast_lock);
+ if (!netif_running(br->dev))
+ goto unlock;
+
+ err = 0;
+ if (br->multicast_disabled == !val)
+ goto unlock;
+
+ br->multicast_disabled = !val;
+ if (br->multicast_disabled)
+ goto unlock;
+
+ if (br->mdb) {
+ if (br->mdb->old) {
+ err = -EEXIST;
+rollback:
+ br->multicast_disabled = !!val;
+ goto unlock;
+ }
+
+ err = br_mdb_rehash(&br->mdb, br->mdb->max,
+ br->hash_elasticity);
+ if (err)
+ goto rollback;
+ }
+
+ br_multicast_open(br);
+ list_for_each_entry(port, &br->port_list, list) {
+ if (port->state == BR_STATE_DISABLED ||
+ port->state == BR_STATE_BLOCKING)
+ continue;
+
+ __br_multicast_enable_port(port);
+ }
+
+unlock:
+ spin_unlock(&br->multicast_lock);
+
+ return err;
+}
+
+int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
+{
+ int err = -ENOENT;
+ u32 old;
+
+ spin_lock(&br->multicast_lock);
+ if (!netif_running(br->dev))
+ goto unlock;
+
+ err = -EINVAL;
+ if (!is_power_of_2(val))
+ goto unlock;
+ if (br->mdb && val < br->mdb->size)
+ goto unlock;
+
+ err = 0;
+
+ old = br->hash_max;
+ br->hash_max = val;
+
+ if (br->mdb) {
+ if (br->mdb->old) {
+ err = -EEXIST;
+rollback:
+ br->hash_max = old;
+ goto unlock;
+ }
+
+ err = br_mdb_rehash(&br->mdb, br->hash_max,
+ br->hash_elasticity);
+ if (err)
+ goto rollback;
+ }
+
+unlock:
+ spin_unlock(&br->multicast_lock);
+
+ return err;
+}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2114e45682e..1cf2cef7858 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -57,6 +57,41 @@ struct net_bridge_fdb_entry
unsigned char is_static;
};
+struct net_bridge_port_group {
+ struct net_bridge_port *port;
+ struct net_bridge_port_group *next;
+ struct hlist_node mglist;
+ struct rcu_head rcu;
+ struct timer_list timer;
+ struct timer_list query_timer;
+ __be32 addr;
+ u32 queries_sent;
+};
+
+struct net_bridge_mdb_entry
+{
+ struct hlist_node hlist[2];
+ struct hlist_node mglist;
+ struct net_bridge *br;
+ struct net_bridge_port_group *ports;
+ struct rcu_head rcu;
+ struct timer_list timer;
+ struct timer_list query_timer;
+ __be32 addr;
+ u32 queries_sent;
+};
+
+struct net_bridge_mdb_htable
+{
+ struct hlist_head *mhash;
+ struct rcu_head rcu;
+ struct net_bridge_mdb_htable *old;
+ u32 size;
+ u32 max;
+ u32 secret;
+ u32 ver;
+};
+
struct net_bridge_port
{
struct net_bridge *br;
@@ -84,6 +119,15 @@ struct net_bridge_port
unsigned long flags;
#define BR_HAIRPIN_MODE 0x00000001
+
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ u32 multicast_startup_queries_sent;
+ unsigned char multicast_router;
+ struct timer_list multicast_router_timer;
+ struct timer_list multicast_query_timer;
+ struct hlist_head mglist;
+ struct hlist_node rlist;
+#endif
};
struct net_bridge
@@ -93,7 +137,6 @@ struct net_bridge
struct net_device *dev;
spinlock_t hash_lock;
struct hlist_head hash[BR_HASH_SIZE];
- struct list_head age_list;
unsigned long feature_mask;
#ifdef CONFIG_BRIDGE_NETFILTER
struct rtable fake_rtable;
@@ -125,6 +168,35 @@ struct net_bridge
unsigned char topology_change;
unsigned char topology_change_detected;
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ unsigned char multicast_router;
+
+ u8 multicast_disabled:1;
+
+ u32 hash_elasticity;
+ u32 hash_max;
+
+ u32 multicast_last_member_count;
+ u32 multicast_startup_queries_sent;
+ u32 multicast_startup_query_count;
+
+ unsigned long multicast_last_member_interval;
+ unsigned long multicast_membership_interval;
+ unsigned long multicast_querier_interval;
+ unsigned long multicast_query_interval;
+ unsigned long multicast_query_response_interval;
+ unsigned long multicast_startup_query_interval;
+
+ spinlock_t multicast_lock;
+ struct net_bridge_mdb_htable *mdb;
+ struct hlist_head router_list;
+ struct hlist_head mglist;
+
+ struct timer_list multicast_router_timer;
+ struct timer_list multicast_querier_timer;
+ struct timer_list multicast_query_timer;
+#endif
+
struct timer_list hello_timer;
struct timer_list tcn_timer;
struct timer_list topology_change_timer;
@@ -132,6 +204,14 @@ struct net_bridge
struct kobject *ifobj;
};
+struct br_input_skb_cb {
+ struct net_device *brdev;
+ int igmp;
+ int mrouters_only;
+};
+
+#define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb)
+
extern struct notifier_block br_device_notifier;
extern const u8 br_group_address[ETH_ALEN];
@@ -175,7 +255,8 @@ extern void br_forward(const struct net_bridge_port *to,
struct sk_buff *skb);
extern int br_forward_finish(struct sk_buff *skb);
extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb);
-extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb);
+extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
+ struct sk_buff *skb2);
/* br_if.c */
extern void br_port_carrier_check(struct net_bridge_port *p);
@@ -198,6 +279,94 @@ extern struct sk_buff *br_handle_frame(struct net_bridge_port *p,
extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg);
+/* br_multicast.c */
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+extern int br_multicast_rcv(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb);
+extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
+ struct sk_buff *skb);
+extern void br_multicast_add_port(struct net_bridge_port *port);
+extern void br_multicast_del_port(struct net_bridge_port *port);
+extern void br_multicast_enable_port(struct net_bridge_port *port);
+extern void br_multicast_disable_port(struct net_bridge_port *port);
+extern void br_multicast_init(struct net_bridge *br);
+extern void br_multicast_open(struct net_bridge *br);
+extern void br_multicast_stop(struct net_bridge *br);
+extern void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
+ struct sk_buff *skb);
+extern void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
+ struct sk_buff *skb, struct sk_buff *skb2);
+extern int br_multicast_set_router(struct net_bridge *br, unsigned long val);
+extern int br_multicast_set_port_router(struct net_bridge_port *p,
+ unsigned long val);
+extern int br_multicast_toggle(struct net_bridge *br, unsigned long val);
+extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
+
+static inline bool br_multicast_is_router(struct net_bridge *br)
+{
+ return br->multicast_router == 2 ||
+ (br->multicast_router == 1 &&
+ timer_pending(&br->multicast_router_timer));
+}
+#else
+static inline int br_multicast_rcv(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
+static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
+ struct sk_buff *skb)
+{
+ return NULL;
+}
+
+static inline void br_multicast_add_port(struct net_bridge_port *port)
+{
+}
+
+static inline void br_multicast_del_port(struct net_bridge_port *port)
+{
+}
+
+static inline void br_multicast_enable_port(struct net_bridge_port *port)
+{
+}
+
+static inline void br_multicast_disable_port(struct net_bridge_port *port)
+{
+}
+
+static inline void br_multicast_init(struct net_bridge *br)
+{
+}
+
+static inline void br_multicast_open(struct net_bridge *br)
+{
+}
+
+static inline void br_multicast_stop(struct net_bridge *br)
+{
+}
+
+static inline void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
+ struct sk_buff *skb)
+{
+}
+
+static inline void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
+ struct sk_buff *skb,
+ struct sk_buff *skb2)
+{
+}
+static inline bool br_multicast_is_router(struct net_bridge *br)
+{
+ return 0;
+}
+#endif
+
/* br_netfilter.c */
#ifdef CONFIG_BRIDGE_NETFILTER
extern int br_netfilter_init(void);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index fd3f8d6c099..edcf14b560f 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -386,6 +386,8 @@ static void br_make_forwarding(struct net_bridge_port *p)
else
p->state = BR_STATE_LEARNING;
+ br_multicast_enable_port(p);
+
br_log_state(p);
if (br->forward_delay != 0)
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 9a52ac5b452..d527119e9f5 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -108,6 +108,7 @@ void br_stp_disable_port(struct net_bridge_port *p)
del_timer(&p->hold_timer);
br_fdb_delete_by_port(br, p, 0);
+ br_multicast_disable_port(p);
br_configuration_update(br);
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index bee4f300d0c..dd321e39e62 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -345,6 +345,273 @@ static ssize_t store_flush(struct device *d,
}
static DEVICE_ATTR(flush, S_IWUSR, NULL, store_flush);
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+static ssize_t show_multicast_router(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%d\n", br->multicast_router);
+}
+
+static ssize_t store_multicast_router(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_bridge_parm(d, buf, len, br_multicast_set_router);
+}
+static DEVICE_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
+ store_multicast_router);
+
+static ssize_t show_multicast_snooping(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%d\n", !br->multicast_disabled);
+}
+
+static ssize_t store_multicast_snooping(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_bridge_parm(d, buf, len, br_multicast_toggle);
+}
+static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR,
+ show_multicast_snooping, store_multicast_snooping);
+
+static ssize_t show_hash_elasticity(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%u\n", br->hash_elasticity);
+}
+
+static int set_elasticity(struct net_bridge *br, unsigned long val)
+{
+ br->hash_elasticity = val;
+ return 0;
+}
+
+static ssize_t store_hash_elasticity(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_bridge_parm(d, buf, len, set_elasticity);
+}
+static DEVICE_ATTR(hash_elasticity, S_IRUGO | S_IWUSR, show_hash_elasticity,
+ store_hash_elasticity);
+
+static ssize_t show_hash_max(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%u\n", br->hash_max);
+}
+
+static ssize_t store_hash_max(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_bridge_parm(d, buf, len, br_multicast_set_hash_max);
+}
+static DEVICE_ATTR(hash_max, S_IRUGO | S_IWUSR, show_hash_max,
+ store_hash_max);
+
+static ssize_t show_multicast_last_member_count(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%u\n", br->multicast_last_member_count);
+}
+
+static int set_last_member_count(struct net_bridge *br, unsigned long val)
+{
+ br->multicast_last_member_count = val;
+ return 0;
+}
+
+static ssize_t store_multicast_last_member_count(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_bridge_parm(d, buf, len, set_last_member_count);
+}
+static DEVICE_ATTR(multicast_last_member_count, S_IRUGO | S_IWUSR,
+ show_multicast_last_member_count,
+ store_multicast_last_member_count);
+
+static ssize_t show_multicast_startup_query_count(
+ struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%u\n", br->multicast_startup_query_count);
+}
+
+static int set_startup_query_count(struct net_bridge *br, unsigned long val)
+{
+ br->multicast_startup_query_count = val;
+ return 0;
+}
+
+static ssize_t store_multicast_startup_query_count(
+ struct device *d, struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ return store_bridge_parm(d, buf, len, set_startup_query_count);
+}
+static DEVICE_ATTR(multicast_startup_query_count, S_IRUGO | S_IWUSR,
+ show_multicast_startup_query_count,
+ store_multicast_startup_query_count);
+
+static ssize_t show_multicast_last_member_interval(
+ struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%lu\n",
+ jiffies_to_clock_t(br->multicast_last_member_interval));
+}
+
+static int set_last_member_interval(struct net_bridge *br, unsigned long val)
+{
+ br->multicast_last_member_interval = clock_t_to_jiffies(val);
+ return 0;
+}
+
+static ssize_t store_multicast_last_member_interval(
+ struct device *d, struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ return store_bridge_parm(d, buf, len, set_last_member_interval);
+}
+static DEVICE_ATTR(multicast_last_member_interval, S_IRUGO | S_IWUSR,
+ show_multicast_last_member_interval,
+ store_multicast_last_member_interval);
+
+static ssize_t show_multicast_membership_interval(
+ struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%lu\n",
+ jiffies_to_clock_t(br->multicast_membership_interval));
+}
+
+static int set_membership_interval(struct net_bridge *br, unsigned long val)
+{
+ br->multicast_membership_interval = clock_t_to_jiffies(val);
+ return 0;
+}
+
+static ssize_t store_multicast_membership_interval(
+ struct device *d, struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ return store_bridge_parm(d, buf, len, set_membership_interval);
+}
+static DEVICE_ATTR(multicast_membership_interval, S_IRUGO | S_IWUSR,
+ show_multicast_membership_interval,
+ store_multicast_membership_interval);
+
+static ssize_t show_multicast_querier_interval(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%lu\n",
+ jiffies_to_clock_t(br->multicast_querier_interval));
+}
+
+static int set_querier_interval(struct net_bridge *br, unsigned long val)
+{
+ br->multicast_querier_interval = clock_t_to_jiffies(val);
+ return 0;
+}
+
+static ssize_t store_multicast_querier_interval(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_bridge_parm(d, buf, len, set_querier_interval);
+}
+static DEVICE_ATTR(multicast_querier_interval, S_IRUGO | S_IWUSR,
+ show_multicast_querier_interval,
+ store_multicast_querier_interval);
+
+static ssize_t show_multicast_query_interval(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%lu\n",
+ jiffies_to_clock_t(br->multicast_query_interval));
+}
+
+static int set_query_interval(struct net_bridge *br, unsigned long val)
+{
+ br->multicast_query_interval = clock_t_to_jiffies(val);
+ return 0;
+}
+
+static ssize_t store_multicast_query_interval(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_bridge_parm(d, buf, len, set_query_interval);
+}
+static DEVICE_ATTR(multicast_query_interval, S_IRUGO | S_IWUSR,
+ show_multicast_query_interval,
+ store_multicast_query_interval);
+
+static ssize_t show_multicast_query_response_interval(
+ struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(
+ buf, "%lu\n",
+ jiffies_to_clock_t(br->multicast_query_response_interval));
+}
+
+static int set_query_response_interval(struct net_bridge *br, unsigned long val)
+{
+ br->multicast_query_response_interval = clock_t_to_jiffies(val);
+ return 0;
+}
+
+static ssize_t store_multicast_query_response_interval(
+ struct device *d, struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ return store_bridge_parm(d, buf, len, set_query_response_interval);
+}
+static DEVICE_ATTR(multicast_query_response_interval, S_IRUGO | S_IWUSR,
+ show_multicast_query_response_interval,
+ store_multicast_query_response_interval);
+
+static ssize_t show_multicast_startup_query_interval(
+ struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(
+ buf, "%lu\n",
+ jiffies_to_clock_t(br->multicast_startup_query_interval));
+}
+
+static int set_startup_query_interval(struct net_bridge *br, unsigned long val)
+{
+ br->multicast_startup_query_interval = clock_t_to_jiffies(val);
+ return 0;
+}
+
+static ssize_t store_multicast_startup_query_interval(
+ struct device *d, struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ return store_bridge_parm(d, buf, len, set_startup_query_interval);
+}
+static DEVICE_ATTR(multicast_startup_query_interval, S_IRUGO | S_IWUSR,
+ show_multicast_startup_query_interval,
+ store_multicast_startup_query_interval);
+#endif
+
static struct attribute *bridge_attrs[] = {
&dev_attr_forward_delay.attr,
&dev_attr_hello_time.attr,
@@ -364,6 +631,20 @@ static struct attribute *bridge_attrs[] = {
&dev_attr_gc_timer.attr,
&dev_attr_group_addr.attr,
&dev_attr_flush.attr,
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ &dev_attr_multicast_router.attr,
+ &dev_attr_multicast_snooping.attr,
+ &dev_attr_hash_elasticity.attr,
+ &dev_attr_hash_max.attr,
+ &dev_attr_multicast_last_member_count.attr,
+ &dev_attr_multicast_startup_query_count.attr,
+ &dev_attr_multicast_last_member_interval.attr,
+ &dev_attr_multicast_membership_interval.attr,
+ &dev_attr_multicast_querier_interval.attr,
+ &dev_attr_multicast_query_interval.attr,
+ &dev_attr_multicast_query_response_interval.attr,
+ &dev_attr_multicast_startup_query_interval.attr,
+#endif
NULL
};
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 820643a3ba9..696596cd338 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -159,6 +159,21 @@ static ssize_t store_hairpin_mode(struct net_bridge_port *p, unsigned long v)
static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR,
show_hairpin_mode, store_hairpin_mode);
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
+{
+ return sprintf(buf, "%d\n", p->multicast_router);
+}
+
+static ssize_t store_multicast_router(struct net_bridge_port *p,
+ unsigned long v)
+{
+ return br_multicast_set_port_router(p, v);
+}
+static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
+ store_multicast_router);
+#endif
+
static struct brport_attribute *brport_attrs[] = {
&brport_attr_path_cost,
&brport_attr_priority,
@@ -176,6 +191,9 @@ static struct brport_attribute *brport_attrs[] = {
&brport_attr_hold_timer,
&brport_attr_flush,
&brport_attr_hairpin_mode,
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ &brport_attr_multicast_router,
+#endif
NULL
};
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c
index bd91dc58d49..5d1176758ca 100644
--- a/net/bridge/netfilter/ebt_802_3.c
+++ b/net/bridge/netfilter/ebt_802_3.c
@@ -52,7 +52,7 @@ static struct xt_match ebt_802_3_mt_reg __read_mostly = {
.family = NFPROTO_BRIDGE,
.match = ebt_802_3_mt,
.checkentry = ebt_802_3_mt_check,
- .matchsize = XT_ALIGN(sizeof(struct ebt_802_3_info)),
+ .matchsize = sizeof(struct ebt_802_3_info),
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c
index b7ad60419f9..e727697c584 100644
--- a/net/bridge/netfilter/ebt_arp.c
+++ b/net/bridge/netfilter/ebt_arp.c
@@ -120,7 +120,7 @@ static struct xt_match ebt_arp_mt_reg __read_mostly = {
.family = NFPROTO_BRIDGE,
.match = ebt_arp_mt,
.checkentry = ebt_arp_mt_check,
- .matchsize = XT_ALIGN(sizeof(struct ebt_arp_info)),
+ .matchsize = sizeof(struct ebt_arp_info),
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index 76584cd72e5..f392e9d93f5 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -78,7 +78,7 @@ static struct xt_target ebt_arpreply_tg_reg __read_mostly = {
.hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING),
.target = ebt_arpreply_tg,
.checkentry = ebt_arpreply_tg_check,
- .targetsize = XT_ALIGN(sizeof(struct ebt_arpreply_info)),
+ .targetsize = sizeof(struct ebt_arpreply_info),
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index 6b49ea9e31f..2bb40d728a3 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -54,7 +54,7 @@ static struct xt_target ebt_dnat_tg_reg __read_mostly = {
(1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING),
.target = ebt_dnat_tg,
.checkentry = ebt_dnat_tg_check,
- .targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)),
+ .targetsize = sizeof(struct ebt_nat_info),
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index d771bbfbcbe..5de6df6f86b 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -110,7 +110,7 @@ static struct xt_match ebt_ip_mt_reg __read_mostly = {
.family = NFPROTO_BRIDGE,
.match = ebt_ip_mt,
.checkentry = ebt_ip_mt_check,
- .matchsize = XT_ALIGN(sizeof(struct ebt_ip_info)),
+ .matchsize = sizeof(struct ebt_ip_info),
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index 784a6573876..bbf2534ef02 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -122,7 +122,7 @@ static struct xt_match ebt_ip6_mt_reg __read_mostly = {
.family = NFPROTO_BRIDGE,
.match = ebt_ip6_mt,
.checkentry = ebt_ip6_mt_check,
- .matchsize = XT_ALIGN(sizeof(struct ebt_ip6_info)),
+ .matchsize = sizeof(struct ebt_ip6_info),
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c
index f7bd9192ff0..7a8182710eb 100644
--- a/net/bridge/netfilter/ebt_limit.c
+++ b/net/bridge/netfilter/ebt_limit.c
@@ -84,13 +84,29 @@ static bool ebt_limit_mt_check(const struct xt_mtchk_param *par)
return true;
}
+
+#ifdef CONFIG_COMPAT
+/*
+ * no conversion function needed --
+ * only avg/burst have meaningful values in userspace.
+ */
+struct ebt_compat_limit_info {
+ compat_uint_t avg, burst;
+ compat_ulong_t prev;
+ compat_uint_t credit, credit_cap, cost;
+};
+#endif
+
static struct xt_match ebt_limit_mt_reg __read_mostly = {
.name = "limit",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_limit_mt,
.checkentry = ebt_limit_mt_check,
- .matchsize = XT_ALIGN(sizeof(struct ebt_limit_info)),
+ .matchsize = sizeof(struct ebt_limit_info),
+#ifdef CONFIG_COMPAT
+ .compatsize = sizeof(struct ebt_compat_limit_info),
+#endif
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index e4ea3fdd1d4..e873924ddb5 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -195,7 +195,7 @@ static struct xt_target ebt_log_tg_reg __read_mostly = {
.family = NFPROTO_BRIDGE,
.target = ebt_log_tg,
.checkentry = ebt_log_tg_check,
- .targetsize = XT_ALIGN(sizeof(struct ebt_log_info)),
+ .targetsize = sizeof(struct ebt_log_info),
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c
index 2fee7e8e2e9..2b5ce533d6b 100644
--- a/net/bridge/netfilter/ebt_mark.c
+++ b/net/bridge/netfilter/ebt_mark.c
@@ -52,6 +52,32 @@ static bool ebt_mark_tg_check(const struct xt_tgchk_param *par)
return false;
return true;
}
+#ifdef CONFIG_COMPAT
+struct compat_ebt_mark_t_info {
+ compat_ulong_t mark;
+ compat_uint_t target;
+};
+
+static void mark_tg_compat_from_user(void *dst, const void *src)
+{
+ const struct compat_ebt_mark_t_info *user = src;
+ struct ebt_mark_t_info *kern = dst;
+
+ kern->mark = user->mark;
+ kern->target = user->target;
+}
+
+static int mark_tg_compat_to_user(void __user *dst, const void *src)
+{
+ struct compat_ebt_mark_t_info __user *user = dst;
+ const struct ebt_mark_t_info *kern = src;
+
+ if (put_user(kern->mark, &user->mark) ||
+ put_user(kern->target, &user->target))
+ return -EFAULT;
+ return 0;
+}
+#endif
static struct xt_target ebt_mark_tg_reg __read_mostly = {
.name = "mark",
@@ -59,7 +85,12 @@ static struct xt_target ebt_mark_tg_reg __read_mostly = {
.family = NFPROTO_BRIDGE,
.target = ebt_mark_tg,
.checkentry = ebt_mark_tg_check,
- .targetsize = XT_ALIGN(sizeof(struct ebt_mark_t_info)),
+ .targetsize = sizeof(struct ebt_mark_t_info),
+#ifdef CONFIG_COMPAT
+ .compatsize = sizeof(struct compat_ebt_mark_t_info),
+ .compat_from_user = mark_tg_compat_from_user,
+ .compat_to_user = mark_tg_compat_to_user,
+#endif
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_mark_m.c b/net/bridge/netfilter/ebt_mark_m.c
index ea570f214b1..8de8c396d91 100644
--- a/net/bridge/netfilter/ebt_mark_m.c
+++ b/net/bridge/netfilter/ebt_mark_m.c
@@ -35,13 +35,50 @@ static bool ebt_mark_mt_check(const struct xt_mtchk_param *par)
return true;
}
+
+#ifdef CONFIG_COMPAT
+struct compat_ebt_mark_m_info {
+ compat_ulong_t mark, mask;
+ uint8_t invert, bitmask;
+};
+
+static void mark_mt_compat_from_user(void *dst, const void *src)
+{
+ const struct compat_ebt_mark_m_info *user = src;
+ struct ebt_mark_m_info *kern = dst;
+
+ kern->mark = user->mark;
+ kern->mask = user->mask;
+ kern->invert = user->invert;
+ kern->bitmask = user->bitmask;
+}
+
+static int mark_mt_compat_to_user(void __user *dst, const void *src)
+{
+ struct compat_ebt_mark_m_info __user *user = dst;
+ const struct ebt_mark_m_info *kern = src;
+
+ if (put_user(kern->mark, &user->mark) ||
+ put_user(kern->mask, &user->mask) ||
+ put_user(kern->invert, &user->invert) ||
+ put_user(kern->bitmask, &user->bitmask))
+ return -EFAULT;
+ return 0;
+}
+#endif
+
static struct xt_match ebt_mark_mt_reg __read_mostly = {
.name = "mark_m",
.revision = 0,
.family = NFPROTO_BRIDGE,
.match = ebt_mark_mt,
.checkentry = ebt_mark_mt_check,
- .matchsize = XT_ALIGN(sizeof(struct ebt_mark_m_info)),
+ .matchsize = sizeof(struct ebt_mark_m_info),
+#ifdef CONFIG_COMPAT
+ .compatsize = sizeof(struct compat_ebt_mark_m_info),
+ .compat_from_user = mark_mt_compat_from_user,
+ .compat_to_user = mark_mt_compat_to_user,
+#endif
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c
index 2a63d996dd4..40dbd248b9a 100644
--- a/net/bridge/netfilter/ebt_nflog.c
+++ b/net/bridge/netfilter/ebt_nflog.c
@@ -51,7 +51,7 @@ static struct xt_target ebt_nflog_tg_reg __read_mostly = {
.family = NFPROTO_BRIDGE,
.target = ebt_nflog_tg,
.checkentry = ebt_nflog_tg_check,
- .targetsize = XT_ALIGN(sizeof(struct ebt_nflog_info)),
+ .targetsize = sizeof(struct ebt_nflog_info),
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_pkttype.c b/net/bridge/netfilter/ebt_pkttype.c
index 883e96e2a54..e2a07e6cbef 100644
--- a/net/bridge/netfilter/ebt_pkttype.c
+++ b/net/bridge/netfilter/ebt_pkttype.c
@@ -36,7 +36,7 @@ static struct xt_match ebt_pkttype_mt_reg __read_mostly = {
.family = NFPROTO_BRIDGE,
.match = ebt_pkttype_mt,
.checkentry = ebt_pkttype_mt_check,
- .matchsize = XT_ALIGN(sizeof(struct ebt_pkttype_info)),
+ .matchsize = sizeof(struct ebt_pkttype_info),
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index c8a49f7a57b..9be8fbcd370 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -59,7 +59,7 @@ static struct xt_target ebt_redirect_tg_reg __read_mostly = {
(1 << NF_BR_BROUTING),
.target = ebt_redirect_tg,
.checkentry = ebt_redirect_tg_check,
- .targetsize = XT_ALIGN(sizeof(struct ebt_redirect_info)),
+ .targetsize = sizeof(struct ebt_redirect_info),
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
index 8d04d4c302b..9c7b520765a 100644
--- a/net/bridge/netfilter/ebt_snat.c
+++ b/net/bridge/netfilter/ebt_snat.c
@@ -67,7 +67,7 @@ static struct xt_target ebt_snat_tg_reg __read_mostly = {
.hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_POST_ROUTING),
.target = ebt_snat_tg,
.checkentry = ebt_snat_tg_check,
- .targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)),
+ .targetsize = sizeof(struct ebt_nat_info),
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 75e29a9cebd..92a93d36376 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -177,7 +177,7 @@ static struct xt_match ebt_stp_mt_reg __read_mostly = {
.family = NFPROTO_BRIDGE,
.match = ebt_stp_mt,
.checkentry = ebt_stp_mt_check,
- .matchsize = XT_ALIGN(sizeof(struct ebt_stp_info)),
+ .matchsize = sizeof(struct ebt_stp_info),
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index ce50688a643..c6ac657074a 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -275,7 +275,7 @@ static struct xt_target ebt_ulog_tg_reg __read_mostly = {
.family = NFPROTO_BRIDGE,
.target = ebt_ulog_tg,
.checkentry = ebt_ulog_tg_check,
- .targetsize = XT_ALIGN(sizeof(struct ebt_ulog_info)),
+ .targetsize = sizeof(struct ebt_ulog_info),
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c
index 3dddd489328..be1dd2e1f61 100644
--- a/net/bridge/netfilter/ebt_vlan.c
+++ b/net/bridge/netfilter/ebt_vlan.c
@@ -163,7 +163,7 @@ static struct xt_match ebt_vlan_mt_reg __read_mostly = {
.family = NFPROTO_BRIDGE,
.match = ebt_vlan_mt,
.checkentry = ebt_vlan_mt_check,
- .matchsize = XT_ALIGN(sizeof(struct ebt_vlan_info)),
+ .matchsize = sizeof(struct ebt_vlan_info),
.me = THIS_MODULE,
};
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index d32ab13e728..ae3f106c390 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -71,7 +71,7 @@ static int __net_init broute_net_init(struct net *net)
static void __net_exit broute_net_exit(struct net *net)
{
- ebt_unregister_table(net->xt.broute_table);
+ ebt_unregister_table(net, net->xt.broute_table);
}
static struct pernet_operations broute_net_ops = {
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 60b1a6ca718..42e6bd09457 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -107,7 +107,7 @@ static int __net_init frame_filter_net_init(struct net *net)
static void __net_exit frame_filter_net_exit(struct net *net)
{
- ebt_unregister_table(net->xt.frame_filter);
+ ebt_unregister_table(net, net->xt.frame_filter);
}
static struct pernet_operations frame_filter_net_ops = {
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 4a98804203b..6dc2f878ae0 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -107,7 +107,7 @@ static int __net_init frame_nat_net_init(struct net *net)
static void __net_exit frame_nat_net_exit(struct net *net)
{
- ebt_unregister_table(net->xt.frame_nat);
+ ebt_unregister_table(net, net->xt.frame_nat);
}
static struct pernet_operations frame_nat_net_ops = {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 0b7f262cd14..dfb58056a89 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -33,11 +33,6 @@
#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
"report to author: "format, ## args)
/* #define BUGPRINT(format, args...) */
-#define MEMPRINT(format, args...) printk("kernel msg: ebtables "\
- ": out of memory: "format, ## args)
-/* #define MEMPRINT(format, args...) */
-
-
/*
* Each cpu has its own set of counters, so there is no need for write_lock in
@@ -56,11 +51,37 @@
static DEFINE_MUTEX(ebt_mutex);
+#ifdef CONFIG_COMPAT
+static void ebt_standard_compat_from_user(void *dst, const void *src)
+{
+ int v = *(compat_int_t *)src;
+
+ if (v >= 0)
+ v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
+ memcpy(dst, &v, sizeof(v));
+}
+
+static int ebt_standard_compat_to_user(void __user *dst, const void *src)
+{
+ compat_int_t cv = *(int *)src;
+
+ if (cv >= 0)
+ cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
+ return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
+}
+#endif
+
+
static struct xt_target ebt_standard_target = {
.name = "standard",
.revision = 0,
.family = NFPROTO_BRIDGE,
.targetsize = sizeof(int),
+#ifdef CONFIG_COMPAT
+ .compatsize = sizeof(compat_int_t),
+ .compat_from_user = ebt_standard_compat_from_user,
+ .compat_to_user = ebt_standard_compat_to_user,
+#endif
};
static inline int
@@ -82,7 +103,8 @@ static inline int ebt_do_match (struct ebt_entry_match *m,
return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
}
-static inline int ebt_dev_check(char *entry, const struct net_device *device)
+static inline int
+ebt_dev_check(const char *entry, const struct net_device *device)
{
int i = 0;
const char *devname;
@@ -100,8 +122,9 @@ static inline int ebt_dev_check(char *entry, const struct net_device *device)
#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
/* process standard matches */
-static inline int ebt_basic_match(struct ebt_entry *e, struct ethhdr *h,
- const struct net_device *in, const struct net_device *out)
+static inline int
+ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
+ const struct net_device *in, const struct net_device *out)
{
int verdict, i;
@@ -156,12 +179,12 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
int i, nentries;
struct ebt_entry *point;
struct ebt_counter *counter_base, *cb_base;
- struct ebt_entry_target *t;
+ const struct ebt_entry_target *t;
int verdict, sp = 0;
struct ebt_chainstack *cs;
struct ebt_entries *chaininfo;
- char *base;
- struct ebt_table_info *private;
+ const char *base;
+ const struct ebt_table_info *private;
bool hotdrop = false;
struct xt_match_param mtpar;
struct xt_target_param tgpar;
@@ -395,7 +418,7 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
return 0;
}
-static int ebt_verify_pointers(struct ebt_replace *repl,
+static int ebt_verify_pointers(const struct ebt_replace *repl,
struct ebt_table_info *newinfo)
{
unsigned int limit = repl->entries_size;
@@ -442,6 +465,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl,
break;
if (left < e->next_offset)
break;
+ if (e->next_offset < sizeof(struct ebt_entry))
+ return -EINVAL;
offset += e->next_offset;
}
}
@@ -466,8 +491,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl,
* to parse the userspace data
*/
static inline int
-ebt_check_entry_size_and_hooks(struct ebt_entry *e,
- struct ebt_table_info *newinfo,
+ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
+ const struct ebt_table_info *newinfo,
unsigned int *n, unsigned int *cnt,
unsigned int *totalcnt, unsigned int *udc_cnt)
{
@@ -561,13 +586,14 @@ ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
}
static inline int
-ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i)
+ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
{
struct xt_mtdtor_param par;
if (i && (*i)-- == 0)
return 1;
+ par.net = net;
par.match = m->u.match;
par.matchinfo = m->data;
par.family = NFPROTO_BRIDGE;
@@ -578,13 +604,14 @@ ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i)
}
static inline int
-ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i)
+ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
{
struct xt_tgdtor_param par;
if (i && (*i)-- == 0)
return 1;
+ par.net = net;
par.target = w->u.watcher;
par.targinfo = w->data;
par.family = NFPROTO_BRIDGE;
@@ -595,7 +622,7 @@ ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i)
}
static inline int
-ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
+ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
{
struct xt_tgdtor_param par;
struct ebt_entry_target *t;
@@ -605,10 +632,11 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
/* we're done */
if (cnt && (*cnt)-- == 0)
return 1;
- EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, NULL);
- EBT_MATCH_ITERATE(e, ebt_cleanup_match, NULL);
+ EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
+ EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
+ par.net = net;
par.target = t->u.target;
par.targinfo = t->data;
par.family = NFPROTO_BRIDGE;
@@ -619,7 +647,8 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
}
static inline int
-ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
+ebt_check_entry(struct ebt_entry *e, struct net *net,
+ const struct ebt_table_info *newinfo,
const char *name, unsigned int *cnt,
struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
{
@@ -671,6 +700,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
}
i = 0;
+ mtpar.net = tgpar.net = net;
mtpar.table = tgpar.table = name;
mtpar.entryinfo = tgpar.entryinfo = e;
mtpar.hook_mask = tgpar.hook_mask = hookmask;
@@ -726,9 +756,9 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
(*cnt)++;
return 0;
cleanup_watchers:
- EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, &j);
+ EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
cleanup_matches:
- EBT_MATCH_ITERATE(e, ebt_cleanup_match, &i);
+ EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
return ret;
}
@@ -737,12 +767,12 @@ cleanup_matches:
* the hook mask for udc tells us from which base chains the udc can be
* accessed. This mask is a parameter to the check() functions of the extensions
*/
-static int check_chainloops(struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
+static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
unsigned int udc_cnt, unsigned int hooknr, char *base)
{
int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
- struct ebt_entry *e = (struct ebt_entry *)chain->data;
- struct ebt_entry_target *t;
+ const struct ebt_entry *e = (struct ebt_entry *)chain->data;
+ const struct ebt_entry_target *t;
while (pos < nentries || chain_nr != -1) {
/* end of udc, go back one 'recursion' step */
@@ -808,7 +838,8 @@ letscontinue:
}
/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
-static int translate_table(char *name, struct ebt_table_info *newinfo)
+static int translate_table(struct net *net, const char *name,
+ struct ebt_table_info *newinfo)
{
unsigned int i, j, k, udc_cnt;
int ret;
@@ -917,17 +948,17 @@ static int translate_table(char *name, struct ebt_table_info *newinfo)
/* used to know what we need to clean up if something goes wrong */
i = 0;
ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
- ebt_check_entry, newinfo, name, &i, cl_s, udc_cnt);
+ ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
if (ret != 0) {
EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
- ebt_cleanup_entry, &i);
+ ebt_cleanup_entry, net, &i);
}
vfree(cl_s);
return ret;
}
/* called under write_lock */
-static void get_counters(struct ebt_counter *oldcounters,
+static void get_counters(const struct ebt_counter *oldcounters,
struct ebt_counter *counters, unsigned int nentries)
{
int i, cpu;
@@ -949,90 +980,45 @@ static void get_counters(struct ebt_counter *oldcounters,
}
}
-/* replace the table */
-static int do_replace(struct net *net, void __user *user, unsigned int len)
+static int do_replace_finish(struct net *net, struct ebt_replace *repl,
+ struct ebt_table_info *newinfo)
{
- int ret, i, countersize;
- struct ebt_table_info *newinfo;
- struct ebt_replace tmp;
- struct ebt_table *t;
+ int ret, i;
struct ebt_counter *counterstmp = NULL;
/* used to be able to unlock earlier */
struct ebt_table_info *table;
-
- if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
- return -EFAULT;
-
- if (len != sizeof(tmp) + tmp.entries_size) {
- BUGPRINT("Wrong len argument\n");
- return -EINVAL;
- }
-
- if (tmp.entries_size == 0) {
- BUGPRINT("Entries_size never zero\n");
- return -EINVAL;
- }
- /* overflow check */
- if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS -
- SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
- return -ENOMEM;
- if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
- return -ENOMEM;
-
- countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
- newinfo = vmalloc(sizeof(*newinfo) + countersize);
- if (!newinfo)
- return -ENOMEM;
-
- if (countersize)
- memset(newinfo->counters, 0, countersize);
-
- newinfo->entries = vmalloc(tmp.entries_size);
- if (!newinfo->entries) {
- ret = -ENOMEM;
- goto free_newinfo;
- }
- if (copy_from_user(
- newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
- BUGPRINT("Couldn't copy entries from userspace\n");
- ret = -EFAULT;
- goto free_entries;
- }
+ struct ebt_table *t;
/* the user wants counters back
the check on the size is done later, when we have the lock */
- if (tmp.num_counters) {
- counterstmp = vmalloc(tmp.num_counters * sizeof(*counterstmp));
- if (!counterstmp) {
- ret = -ENOMEM;
- goto free_entries;
- }
+ if (repl->num_counters) {
+ unsigned long size = repl->num_counters * sizeof(*counterstmp);
+ counterstmp = vmalloc(size);
+ if (!counterstmp)
+ return -ENOMEM;
}
- else
- counterstmp = NULL;
- /* this can get initialized by translate_table() */
newinfo->chainstack = NULL;
- ret = ebt_verify_pointers(&tmp, newinfo);
+ ret = ebt_verify_pointers(repl, newinfo);
if (ret != 0)
goto free_counterstmp;
- ret = translate_table(tmp.name, newinfo);
+ ret = translate_table(net, repl->name, newinfo);
if (ret != 0)
goto free_counterstmp;
- t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
+ t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
if (!t) {
ret = -ENOENT;
goto free_iterate;
}
/* the table doesn't like it */
- if (t->check && (ret = t->check(newinfo, tmp.valid_hooks)))
+ if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
goto free_unlock;
- if (tmp.num_counters && tmp.num_counters != t->private->nentries) {
+ if (repl->num_counters && repl->num_counters != t->private->nentries) {
BUGPRINT("Wrong nr. of counters requested\n");
ret = -EINVAL;
goto free_unlock;
@@ -1048,7 +1034,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
module_put(t->me);
/* we need an atomic snapshot of the counters */
write_lock_bh(&t->lock);
- if (tmp.num_counters)
+ if (repl->num_counters)
get_counters(t->private->counters, counterstmp,
t->private->nentries);
@@ -1059,10 +1045,9 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
allocation. Only reason why this is done is because this way the lock
is held only once, while this doesn't bring the kernel into a
dangerous state. */
- if (tmp.num_counters &&
- copy_to_user(tmp.counters, counterstmp,
- tmp.num_counters * sizeof(struct ebt_counter))) {
- BUGPRINT("Couldn't copy counters to userspace\n");
+ if (repl->num_counters &&
+ copy_to_user(repl->counters, counterstmp,
+ repl->num_counters * sizeof(struct ebt_counter))) {
ret = -EFAULT;
}
else
@@ -1070,7 +1055,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
/* decrease module count and free resources */
EBT_ENTRY_ITERATE(table->entries, table->entries_size,
- ebt_cleanup_entry, NULL);
+ ebt_cleanup_entry, net, NULL);
vfree(table->entries);
if (table->chainstack) {
@@ -1087,7 +1072,7 @@ free_unlock:
mutex_unlock(&ebt_mutex);
free_iterate:
EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
- ebt_cleanup_entry, NULL);
+ ebt_cleanup_entry, net, NULL);
free_counterstmp:
vfree(counterstmp);
/* can be initialized in translate_table() */
@@ -1096,6 +1081,59 @@ free_counterstmp:
vfree(newinfo->chainstack[i]);
vfree(newinfo->chainstack);
}
+ return ret;
+}
+
+/* replace the table */
+static int do_replace(struct net *net, const void __user *user,
+ unsigned int len)
+{
+ int ret, countersize;
+ struct ebt_table_info *newinfo;
+ struct ebt_replace tmp;
+
+ if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+ return -EFAULT;
+
+ if (len != sizeof(tmp) + tmp.entries_size) {
+ BUGPRINT("Wrong len argument\n");
+ return -EINVAL;
+ }
+
+ if (tmp.entries_size == 0) {
+ BUGPRINT("Entries_size never zero\n");
+ return -EINVAL;
+ }
+ /* overflow check */
+ if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
+ NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
+ return -ENOMEM;
+ if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
+ return -ENOMEM;
+
+ countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
+ newinfo = vmalloc(sizeof(*newinfo) + countersize);
+ if (!newinfo)
+ return -ENOMEM;
+
+ if (countersize)
+ memset(newinfo->counters, 0, countersize);
+
+ newinfo->entries = vmalloc(tmp.entries_size);
+ if (!newinfo->entries) {
+ ret = -ENOMEM;
+ goto free_newinfo;
+ }
+ if (copy_from_user(
+ newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
+ BUGPRINT("Couldn't copy entries from userspace\n");
+ ret = -EFAULT;
+ goto free_entries;
+ }
+
+ ret = do_replace_finish(net, &tmp, newinfo);
+ if (ret == 0)
+ return ret;
free_entries:
vfree(newinfo->entries);
free_newinfo:
@@ -1154,7 +1192,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table)
newinfo->hook_entry[i] = p +
((char *)repl->hook_entry[i] - repl->entries);
}
- ret = translate_table(repl->name, newinfo);
+ ret = translate_table(net, repl->name, newinfo);
if (ret != 0) {
BUGPRINT("Translate_table failed\n");
goto free_chainstack;
@@ -1204,7 +1242,7 @@ out:
return ERR_PTR(ret);
}
-void ebt_unregister_table(struct ebt_table *table)
+void ebt_unregister_table(struct net *net, struct ebt_table *table)
{
int i;
@@ -1216,7 +1254,7 @@ void ebt_unregister_table(struct ebt_table *table)
list_del(&table->list);
mutex_unlock(&ebt_mutex);
EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
- ebt_cleanup_entry, NULL);
+ ebt_cleanup_entry, net, NULL);
if (table->private->nentries)
module_put(table->me);
vfree(table->private->entries);
@@ -1230,39 +1268,33 @@ void ebt_unregister_table(struct ebt_table *table)
}
/* userspace just supplied us with counters */
-static int update_counters(struct net *net, void __user *user, unsigned int len)
+static int do_update_counters(struct net *net, const char *name,
+ struct ebt_counter __user *counters,
+ unsigned int num_counters,
+ const void __user *user, unsigned int len)
{
int i, ret;
struct ebt_counter *tmp;
- struct ebt_replace hlp;
struct ebt_table *t;
- if (copy_from_user(&hlp, user, sizeof(hlp)))
- return -EFAULT;
-
- if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
- return -EINVAL;
- if (hlp.num_counters == 0)
+ if (num_counters == 0)
return -EINVAL;
- if (!(tmp = vmalloc(hlp.num_counters * sizeof(*tmp)))) {
- MEMPRINT("Update_counters && nomemory\n");
+ tmp = vmalloc(num_counters * sizeof(*tmp));
+ if (!tmp)
return -ENOMEM;
- }
- t = find_table_lock(net, hlp.name, &ret, &ebt_mutex);
+ t = find_table_lock(net, name, &ret, &ebt_mutex);
if (!t)
goto free_tmp;
- if (hlp.num_counters != t->private->nentries) {
+ if (num_counters != t->private->nentries) {
BUGPRINT("Wrong nr of counters\n");
ret = -EINVAL;
goto unlock_mutex;
}
- if ( copy_from_user(tmp, hlp.counters,
- hlp.num_counters * sizeof(struct ebt_counter)) ) {
- BUGPRINT("Updata_counters && !cfu\n");
+ if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
ret = -EFAULT;
goto unlock_mutex;
}
@@ -1271,7 +1303,7 @@ static int update_counters(struct net *net, void __user *user, unsigned int len)
write_lock_bh(&t->lock);
/* we add to the counters of the first cpu */
- for (i = 0; i < hlp.num_counters; i++) {
+ for (i = 0; i < num_counters; i++) {
t->private->counters[i].pcnt += tmp[i].pcnt;
t->private->counters[i].bcnt += tmp[i].bcnt;
}
@@ -1285,8 +1317,23 @@ free_tmp:
return ret;
}
-static inline int ebt_make_matchname(struct ebt_entry_match *m,
- char *base, char __user *ubase)
+static int update_counters(struct net *net, const void __user *user,
+ unsigned int len)
+{
+ struct ebt_replace hlp;
+
+ if (copy_from_user(&hlp, user, sizeof(hlp)))
+ return -EFAULT;
+
+ if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
+ return -EINVAL;
+
+ return do_update_counters(net, hlp.name, hlp.counters,
+ hlp.num_counters, user, len);
+}
+
+static inline int ebt_make_matchname(const struct ebt_entry_match *m,
+ const char *base, char __user *ubase)
{
char __user *hlp = ubase + ((char *)m - base);
if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
@@ -1294,8 +1341,8 @@ static inline int ebt_make_matchname(struct ebt_entry_match *m,
return 0;
}
-static inline int ebt_make_watchername(struct ebt_entry_watcher *w,
- char *base, char __user *ubase)
+static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
+ const char *base, char __user *ubase)
{
char __user *hlp = ubase + ((char *)w - base);
if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
@@ -1303,11 +1350,12 @@ static inline int ebt_make_watchername(struct ebt_entry_watcher *w,
return 0;
}
-static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *ubase)
+static inline int
+ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
{
int ret;
char __user *hlp;
- struct ebt_entry_target *t;
+ const struct ebt_entry_target *t;
if (e->bitmask == 0)
return 0;
@@ -1326,13 +1374,46 @@ static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *u
return 0;
}
+static int copy_counters_to_user(struct ebt_table *t,
+ const struct ebt_counter *oldcounters,
+ void __user *user, unsigned int num_counters,
+ unsigned int nentries)
+{
+ struct ebt_counter *counterstmp;
+ int ret = 0;
+
+ /* userspace might not need the counters */
+ if (num_counters == 0)
+ return 0;
+
+ if (num_counters != nentries) {
+ BUGPRINT("Num_counters wrong\n");
+ return -EINVAL;
+ }
+
+ counterstmp = vmalloc(nentries * sizeof(*counterstmp));
+ if (!counterstmp)
+ return -ENOMEM;
+
+ write_lock_bh(&t->lock);
+ get_counters(oldcounters, counterstmp, nentries);
+ write_unlock_bh(&t->lock);
+
+ if (copy_to_user(user, counterstmp,
+ nentries * sizeof(struct ebt_counter)))
+ ret = -EFAULT;
+ vfree(counterstmp);
+ return ret;
+}
+
/* called with ebt_mutex locked */
static int copy_everything_to_user(struct ebt_table *t, void __user *user,
- int *len, int cmd)
+ const int *len, int cmd)
{
struct ebt_replace tmp;
- struct ebt_counter *counterstmp, *oldcounters;
+ const struct ebt_counter *oldcounters;
unsigned int entries_size, nentries;
+ int ret;
char *entries;
if (cmd == EBT_SO_GET_ENTRIES) {
@@ -1347,16 +1428,12 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
oldcounters = t->table->counters;
}
- if (copy_from_user(&tmp, user, sizeof(tmp))) {
- BUGPRINT("Cfu didn't work\n");
+ if (copy_from_user(&tmp, user, sizeof(tmp)))
return -EFAULT;
- }
if (*len != sizeof(struct ebt_replace) + entries_size +
- (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0)) {
- BUGPRINT("Wrong size\n");
+ (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
return -EINVAL;
- }
if (tmp.nentries != nentries) {
BUGPRINT("Nentries wrong\n");
@@ -1368,29 +1445,10 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
return -EINVAL;
}
- /* userspace might not need the counters */
- if (tmp.num_counters) {
- if (tmp.num_counters != nentries) {
- BUGPRINT("Num_counters wrong\n");
- return -EINVAL;
- }
- counterstmp = vmalloc(nentries * sizeof(*counterstmp));
- if (!counterstmp) {
- MEMPRINT("Couldn't copy counters, out of memory\n");
- return -ENOMEM;
- }
- write_lock_bh(&t->lock);
- get_counters(oldcounters, counterstmp, nentries);
- write_unlock_bh(&t->lock);
-
- if (copy_to_user(tmp.counters, counterstmp,
- nentries * sizeof(struct ebt_counter))) {
- BUGPRINT("Couldn't copy counters to userspace\n");
- vfree(counterstmp);
- return -EFAULT;
- }
- vfree(counterstmp);
- }
+ ret = copy_counters_to_user(t, oldcounters, tmp.counters,
+ tmp.num_counters, nentries);
+ if (ret)
+ return ret;
if (copy_to_user(tmp.entries, entries, entries_size)) {
BUGPRINT("Couldn't copy entries to userspace\n");
@@ -1418,7 +1476,7 @@ static int do_ebt_set_ctl(struct sock *sk,
break;
default:
ret = -EINVAL;
- }
+ }
return ret;
}
@@ -1478,15 +1536,892 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
return ret;
}
+#ifdef CONFIG_COMPAT
+/* 32 bit-userspace compatibility definitions. */
+struct compat_ebt_replace {
+ char name[EBT_TABLE_MAXNAMELEN];
+ compat_uint_t valid_hooks;
+ compat_uint_t nentries;
+ compat_uint_t entries_size;
+ /* start of the chains */
+ compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
+ /* nr of counters userspace expects back */
+ compat_uint_t num_counters;
+ /* where the kernel will put the old counters. */
+ compat_uptr_t counters;
+ compat_uptr_t entries;
+};
+
+/* struct ebt_entry_match, _target and _watcher have same layout */
+struct compat_ebt_entry_mwt {
+ union {
+ char name[EBT_FUNCTION_MAXNAMELEN];
+ compat_uptr_t ptr;
+ } u;
+ compat_uint_t match_size;
+ compat_uint_t data[0];
+};
+
+/* account for possible padding between match_size and ->data */
+static int ebt_compat_entry_padsize(void)
+{
+ BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
+ COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
+ return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
+ COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
+}
+
+static int ebt_compat_match_offset(const struct xt_match *match,
+ unsigned int userlen)
+{
+ /*
+ * ebt_among needs special handling. The kernel .matchsize is
+ * set to -1 at registration time; at runtime an EBT_ALIGN()ed
+ * value is expected.
+ * Example: userspace sends 4500, ebt_among.c wants 4504.
+ */
+ if (unlikely(match->matchsize == -1))
+ return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
+ return xt_compat_match_offset(match);
+}
+
+static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
+ unsigned int *size)
+{
+ const struct xt_match *match = m->u.match;
+ struct compat_ebt_entry_mwt __user *cm = *dstptr;
+ int off = ebt_compat_match_offset(match, m->match_size);
+ compat_uint_t msize = m->match_size - off;
+
+ BUG_ON(off >= m->match_size);
+
+ if (copy_to_user(cm->u.name, match->name,
+ strlen(match->name) + 1) || put_user(msize, &cm->match_size))
+ return -EFAULT;
+
+ if (match->compat_to_user) {
+ if (match->compat_to_user(cm->data, m->data))
+ return -EFAULT;
+ } else if (copy_to_user(cm->data, m->data, msize))
+ return -EFAULT;
+
+ *size -= ebt_compat_entry_padsize() + off;
+ *dstptr = cm->data;
+ *dstptr += msize;
+ return 0;
+}
+
+static int compat_target_to_user(struct ebt_entry_target *t,
+ void __user **dstptr,
+ unsigned int *size)
+{
+ const struct xt_target *target = t->u.target;
+ struct compat_ebt_entry_mwt __user *cm = *dstptr;
+ int off = xt_compat_target_offset(target);
+ compat_uint_t tsize = t->target_size - off;
+
+ BUG_ON(off >= t->target_size);
+
+ if (copy_to_user(cm->u.name, target->name,
+ strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
+ return -EFAULT;
+
+ if (target->compat_to_user) {
+ if (target->compat_to_user(cm->data, t->data))
+ return -EFAULT;
+ } else if (copy_to_user(cm->data, t->data, tsize))
+ return -EFAULT;
+
+ *size -= ebt_compat_entry_padsize() + off;
+ *dstptr = cm->data;
+ *dstptr += tsize;
+ return 0;
+}
+
+static int compat_watcher_to_user(struct ebt_entry_watcher *w,
+ void __user **dstptr,
+ unsigned int *size)
+{
+ return compat_target_to_user((struct ebt_entry_target *)w,
+ dstptr, size);
+}
+
+static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
+ unsigned int *size)
+{
+ struct ebt_entry_target *t;
+ struct ebt_entry __user *ce;
+ u32 watchers_offset, target_offset, next_offset;
+ compat_uint_t origsize;
+ int ret;
+
+ if (e->bitmask == 0) {
+ if (*size < sizeof(struct ebt_entries))
+ return -EINVAL;
+ if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
+ return -EFAULT;
+
+ *dstptr += sizeof(struct ebt_entries);
+ *size -= sizeof(struct ebt_entries);
+ return 0;
+ }
+
+ if (*size < sizeof(*ce))
+ return -EINVAL;
+
+ ce = (struct ebt_entry __user *)*dstptr;
+ if (copy_to_user(ce, e, sizeof(*ce)))
+ return -EFAULT;
+
+ origsize = *size;
+ *dstptr += sizeof(*ce);
+
+ ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
+ if (ret)
+ return ret;
+ watchers_offset = e->watchers_offset - (origsize - *size);
+
+ ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
+ if (ret)
+ return ret;
+ target_offset = e->target_offset - (origsize - *size);
+
+ t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
+
+ ret = compat_target_to_user(t, dstptr, size);
+ if (ret)
+ return ret;
+ next_offset = e->next_offset - (origsize - *size);
+
+ if (put_user(watchers_offset, &ce->watchers_offset) ||
+ put_user(target_offset, &ce->target_offset) ||
+ put_user(next_offset, &ce->next_offset))
+ return -EFAULT;
+
+ *size -= sizeof(*ce);
+ return 0;
+}
+
+static int compat_calc_match(struct ebt_entry_match *m, int *off)
+{
+ *off += ebt_compat_match_offset(m->u.match, m->match_size);
+ *off += ebt_compat_entry_padsize();
+ return 0;
+}
+
+static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
+{
+ *off += xt_compat_target_offset(w->u.watcher);
+ *off += ebt_compat_entry_padsize();
+ return 0;
+}
+
+static int compat_calc_entry(const struct ebt_entry *e,
+ const struct ebt_table_info *info,
+ const void *base,
+ struct compat_ebt_replace *newinfo)
+{
+ const struct ebt_entry_target *t;
+ unsigned int entry_offset;
+ int off, ret, i;
+
+ if (e->bitmask == 0)
+ return 0;
+
+ off = 0;
+ entry_offset = (void *)e - base;
+
+ EBT_MATCH_ITERATE(e, compat_calc_match, &off);
+ EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
+
+ t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
+
+ off += xt_compat_target_offset(t->u.target);
+ off += ebt_compat_entry_padsize();
+
+ newinfo->entries_size -= off;
+
+ ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
+ const void *hookptr = info->hook_entry[i];
+ if (info->hook_entry[i] &&
+ (e < (struct ebt_entry *)(base - hookptr))) {
+ newinfo->hook_entry[i] -= off;
+ pr_debug("0x%08X -> 0x%08X\n",
+ newinfo->hook_entry[i] + off,
+ newinfo->hook_entry[i]);
+ }
+ }
+
+ return 0;
+}
+
+
+static int compat_table_info(const struct ebt_table_info *info,
+ struct compat_ebt_replace *newinfo)
+{
+ unsigned int size = info->entries_size;
+ const void *entries = info->entries;
+
+ newinfo->entries_size = size;
+
+ return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
+ entries, newinfo);
+}
+
+static int compat_copy_everything_to_user(struct ebt_table *t,
+ void __user *user, int *len, int cmd)
+{
+ struct compat_ebt_replace repl, tmp;
+ struct ebt_counter *oldcounters;
+ struct ebt_table_info tinfo;
+ int ret;
+ void __user *pos;
+
+ memset(&tinfo, 0, sizeof(tinfo));
+
+ if (cmd == EBT_SO_GET_ENTRIES) {
+ tinfo.entries_size = t->private->entries_size;
+ tinfo.nentries = t->private->nentries;
+ tinfo.entries = t->private->entries;
+ oldcounters = t->private->counters;
+ } else {
+ tinfo.entries_size = t->table->entries_size;
+ tinfo.nentries = t->table->nentries;
+ tinfo.entries = t->table->entries;
+ oldcounters = t->table->counters;
+ }
+
+ if (copy_from_user(&tmp, user, sizeof(tmp)))
+ return -EFAULT;
+
+ if (tmp.nentries != tinfo.nentries ||
+ (tmp.num_counters && tmp.num_counters != tinfo.nentries))
+ return -EINVAL;
+
+ memcpy(&repl, &tmp, sizeof(repl));
+ if (cmd == EBT_SO_GET_ENTRIES)
+ ret = compat_table_info(t->private, &repl);
+ else
+ ret = compat_table_info(&tinfo, &repl);
+ if (ret)
+ return ret;
+
+ if (*len != sizeof(tmp) + repl.entries_size +
+ (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
+ pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
+ *len, tinfo.entries_size, repl.entries_size);
+ return -EINVAL;
+ }
+
+ /* userspace might not need the counters */
+ ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
+ tmp.num_counters, tinfo.nentries);
+ if (ret)
+ return ret;
+
+ pos = compat_ptr(tmp.entries);
+ return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
+ compat_copy_entry_to_user, &pos, &tmp.entries_size);
+}
+
+struct ebt_entries_buf_state {
+ char *buf_kern_start; /* kernel buffer to copy (translated) data to */
+ u32 buf_kern_len; /* total size of kernel buffer */
+ u32 buf_kern_offset; /* amount of data copied so far */
+ u32 buf_user_offset; /* read position in userspace buffer */
+};
+
+static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
+{
+ state->buf_kern_offset += sz;
+ return state->buf_kern_offset >= sz ? 0 : -EINVAL;
+}
+
+static int ebt_buf_add(struct ebt_entries_buf_state *state,
+ void *data, unsigned int sz)
+{
+ if (state->buf_kern_start == NULL)
+ goto count_only;
+
+ BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
+
+ memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
+
+ count_only:
+ state->buf_user_offset += sz;
+ return ebt_buf_count(state, sz);
+}
+
+static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
+{
+ char *b = state->buf_kern_start;
+
+ BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
+
+ if (b != NULL && sz > 0)
+ memset(b + state->buf_kern_offset, 0, sz);
+ /* do not adjust ->buf_user_offset here, we added kernel-side padding */
+ return ebt_buf_count(state, sz);
+}
+
+enum compat_mwt {
+ EBT_COMPAT_MATCH,
+ EBT_COMPAT_WATCHER,
+ EBT_COMPAT_TARGET,
+};
+
+static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
+ enum compat_mwt compat_mwt,
+ struct ebt_entries_buf_state *state,
+ const unsigned char *base)
+{
+ char name[EBT_FUNCTION_MAXNAMELEN];
+ struct xt_match *match;
+ struct xt_target *wt;
+ void *dst = NULL;
+ int off, pad = 0, ret = 0;
+ unsigned int size_kern, entry_offset, match_size = mwt->match_size;
+
+ strlcpy(name, mwt->u.name, sizeof(name));
+
+ if (state->buf_kern_start)
+ dst = state->buf_kern_start + state->buf_kern_offset;
+
+ entry_offset = (unsigned char *) mwt - base;
+ switch (compat_mwt) {
+ case EBT_COMPAT_MATCH:
+ match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
+ name, 0), "ebt_%s", name);
+ if (match == NULL)
+ return -ENOENT;
+ if (IS_ERR(match))
+ return PTR_ERR(match);
+
+ off = ebt_compat_match_offset(match, match_size);
+ if (dst) {
+ if (match->compat_from_user)
+ match->compat_from_user(dst, mwt->data);
+ else
+ memcpy(dst, mwt->data, match_size);
+ }
+
+ size_kern = match->matchsize;
+ if (unlikely(size_kern == -1))
+ size_kern = match_size;
+ module_put(match->me);
+ break;
+ case EBT_COMPAT_WATCHER: /* fallthrough */
+ case EBT_COMPAT_TARGET:
+ wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
+ name, 0), "ebt_%s", name);
+ if (wt == NULL)
+ return -ENOENT;
+ if (IS_ERR(wt))
+ return PTR_ERR(wt);
+ off = xt_compat_target_offset(wt);
+
+ if (dst) {
+ if (wt->compat_from_user)
+ wt->compat_from_user(dst, mwt->data);
+ else
+ memcpy(dst, mwt->data, match_size);
+ }
+
+ size_kern = wt->targetsize;
+ module_put(wt->me);
+ break;
+ }
+
+ if (!dst) {
+ ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
+ off + ebt_compat_entry_padsize());
+ if (ret < 0)
+ return ret;
+ }
+
+ state->buf_kern_offset += match_size + off;
+ state->buf_user_offset += match_size;
+ pad = XT_ALIGN(size_kern) - size_kern;
+
+ if (pad > 0 && dst) {
+ BUG_ON(state->buf_kern_len <= pad);
+ BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
+ memset(dst + size_kern, 0, pad);
+ }
+ return off + match_size;
+}
+
+/*
+ * return size of all matches, watchers or target, including necessary
+ * alignment and padding.
+ */
+static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
+ unsigned int size_left, enum compat_mwt type,
+ struct ebt_entries_buf_state *state, const void *base)
+{
+ int growth = 0;
+ char *buf;
+
+ if (size_left == 0)
+ return 0;
+
+ buf = (char *) match32;
+
+ while (size_left >= sizeof(*match32)) {
+ struct ebt_entry_match *match_kern;
+ int ret;
+
+ match_kern = (struct ebt_entry_match *) state->buf_kern_start;
+ if (match_kern) {
+ char *tmp;
+ tmp = state->buf_kern_start + state->buf_kern_offset;
+ match_kern = (struct ebt_entry_match *) tmp;
+ }
+ ret = ebt_buf_add(state, buf, sizeof(*match32));
+ if (ret < 0)
+ return ret;
+ size_left -= sizeof(*match32);
+
+ /* add padding before match->data (if any) */
+ ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
+ if (ret < 0)
+ return ret;
+
+ if (match32->match_size > size_left)
+ return -EINVAL;
+
+ size_left -= match32->match_size;
+
+ ret = compat_mtw_from_user(match32, type, state, base);
+ if (ret < 0)
+ return ret;
+
+ BUG_ON(ret < match32->match_size);
+ growth += ret - match32->match_size;
+ growth += ebt_compat_entry_padsize();
+
+ buf += sizeof(*match32);
+ buf += match32->match_size;
+
+ if (match_kern)
+ match_kern->match_size = ret;
+
+ WARN_ON(type == EBT_COMPAT_TARGET && size_left);
+ match32 = (struct compat_ebt_entry_mwt *) buf;
+ }
+
+ return growth;
+}
+
+#define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
+({ \
+ unsigned int __i; \
+ int __ret = 0; \
+ struct compat_ebt_entry_mwt *__watcher; \
+ \
+ for (__i = e->watchers_offset; \
+ __i < (e)->target_offset; \
+ __i += __watcher->watcher_size + \
+ sizeof(struct compat_ebt_entry_mwt)) { \
+ __watcher = (void *)(e) + __i; \
+ __ret = fn(__watcher , ## args); \
+ if (__ret != 0) \
+ break; \
+ } \
+ if (__ret == 0) { \
+ if (__i != (e)->target_offset) \
+ __ret = -EINVAL; \
+ } \
+ __ret; \
+})
+
+#define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
+({ \
+ unsigned int __i; \
+ int __ret = 0; \
+ struct compat_ebt_entry_mwt *__match; \
+ \
+ for (__i = sizeof(struct ebt_entry); \
+ __i < (e)->watchers_offset; \
+ __i += __match->match_size + \
+ sizeof(struct compat_ebt_entry_mwt)) { \
+ __match = (void *)(e) + __i; \
+ __ret = fn(__match , ## args); \
+ if (__ret != 0) \
+ break; \
+ } \
+ if (__ret == 0) { \
+ if (__i != (e)->watchers_offset) \
+ __ret = -EINVAL; \
+ } \
+ __ret; \
+})
+
+/* called for all ebt_entry structures. */
+static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
+ unsigned int *total,
+ struct ebt_entries_buf_state *state)
+{
+ unsigned int i, j, startoff, new_offset = 0;
+ /* stores match/watchers/targets & offset of next struct ebt_entry: */
+ unsigned int offsets[4];
+ unsigned int *offsets_update = NULL;
+ int ret;
+ char *buf_start;
+
+ if (*total < sizeof(struct ebt_entries))
+ return -EINVAL;
+
+ if (!entry->bitmask) {
+ *total -= sizeof(struct ebt_entries);
+ return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
+ }
+ if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
+ return -EINVAL;
+
+ startoff = state->buf_user_offset;
+ /* pull in most part of ebt_entry, it does not need to be changed. */
+ ret = ebt_buf_add(state, entry,
+ offsetof(struct ebt_entry, watchers_offset));
+ if (ret < 0)
+ return ret;
+
+ offsets[0] = sizeof(struct ebt_entry); /* matches come first */
+ memcpy(&offsets[1], &entry->watchers_offset,
+ sizeof(offsets) - sizeof(offsets[0]));
+
+ if (state->buf_kern_start) {
+ buf_start = state->buf_kern_start + state->buf_kern_offset;
+ offsets_update = (unsigned int *) buf_start;
+ }
+ ret = ebt_buf_add(state, &offsets[1],
+ sizeof(offsets) - sizeof(offsets[0]));
+ if (ret < 0)
+ return ret;
+ buf_start = (char *) entry;
+ /*
+ * 0: matches offset, always follows ebt_entry.
+ * 1: watchers offset, from ebt_entry structure
+ * 2: target offset, from ebt_entry structure
+ * 3: next ebt_entry offset, from ebt_entry structure
+ *
+ * offsets are relative to beginning of struct ebt_entry (i.e., 0).
+ */
+ for (i = 0, j = 1 ; j < 4 ; j++, i++) {
+ struct compat_ebt_entry_mwt *match32;
+ unsigned int size;
+ char *buf = buf_start;
+
+ buf = buf_start + offsets[i];
+ if (offsets[i] > offsets[j])
+ return -EINVAL;
+
+ match32 = (struct compat_ebt_entry_mwt *) buf;
+ size = offsets[j] - offsets[i];
+ ret = ebt_size_mwt(match32, size, i, state, base);
+ if (ret < 0)
+ return ret;
+ new_offset += ret;
+ if (offsets_update && new_offset) {
+ pr_debug("ebtables: change offset %d to %d\n",
+ offsets_update[i], offsets[j] + new_offset);
+ offsets_update[i] = offsets[j] + new_offset;
+ }
+ }
+
+ startoff = state->buf_user_offset - startoff;
+
+ BUG_ON(*total < startoff);
+ *total -= startoff;
+ return 0;
+}
+
+/*
+ * repl->entries_size is the size of the ebt_entry blob in userspace.
+ * It might need more memory when copied to a 64 bit kernel in case
+ * userspace is 32-bit. So, first task: find out how much memory is needed.
+ *
+ * Called before validation is performed.
+ */
+static int compat_copy_entries(unsigned char *data, unsigned int size_user,
+ struct ebt_entries_buf_state *state)
+{
+ unsigned int size_remaining = size_user;
+ int ret;
+
+ ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
+ &size_remaining, state);
+ if (ret < 0)
+ return ret;
+
+ WARN_ON(size_remaining);
+ return state->buf_kern_offset;
+}
+
+
+static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
+ void __user *user, unsigned int len)
+{
+ struct compat_ebt_replace tmp;
+ int i;
+
+ if (len < sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(&tmp, user, sizeof(tmp)))
+ return -EFAULT;
+
+ if (len != sizeof(tmp) + tmp.entries_size)
+ return -EINVAL;
+
+ if (tmp.entries_size == 0)
+ return -EINVAL;
+
+ if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
+ NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
+ return -ENOMEM;
+ if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
+ return -ENOMEM;
+
+ memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
+
+ /* starting with hook_entry, 32 vs. 64 bit structures are different */
+ for (i = 0; i < NF_BR_NUMHOOKS; i++)
+ repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
+
+ repl->num_counters = tmp.num_counters;
+ repl->counters = compat_ptr(tmp.counters);
+ repl->entries = compat_ptr(tmp.entries);
+ return 0;
+}
+
+static int compat_do_replace(struct net *net, void __user *user,
+ unsigned int len)
+{
+ int ret, i, countersize, size64;
+ struct ebt_table_info *newinfo;
+ struct ebt_replace tmp;
+ struct ebt_entries_buf_state state;
+ void *entries_tmp;
+
+ ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
+ if (ret) {
+ /* try real handler in case userland supplied needed padding */
+ if (ret == -EINVAL && do_replace(net, user, len) == 0)
+ ret = 0;
+ return ret;
+ }
+
+ countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
+ newinfo = vmalloc(sizeof(*newinfo) + countersize);
+ if (!newinfo)
+ return -ENOMEM;
+
+ if (countersize)
+ memset(newinfo->counters, 0, countersize);
+
+ memset(&state, 0, sizeof(state));
+
+ newinfo->entries = vmalloc(tmp.entries_size);
+ if (!newinfo->entries) {
+ ret = -ENOMEM;
+ goto free_newinfo;
+ }
+ if (copy_from_user(
+ newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
+ ret = -EFAULT;
+ goto free_entries;
+ }
+
+ entries_tmp = newinfo->entries;
+
+ xt_compat_lock(NFPROTO_BRIDGE);
+
+ ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
+ if (ret < 0)
+ goto out_unlock;
+
+ pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
+ tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
+ xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
+
+ size64 = ret;
+ newinfo->entries = vmalloc(size64);
+ if (!newinfo->entries) {
+ vfree(entries_tmp);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ memset(&state, 0, sizeof(state));
+ state.buf_kern_start = newinfo->entries;
+ state.buf_kern_len = size64;
+
+ ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
+ BUG_ON(ret < 0); /* parses same data again */
+
+ vfree(entries_tmp);
+ tmp.entries_size = size64;
+
+ for (i = 0; i < NF_BR_NUMHOOKS; i++) {
+ char __user *usrptr;
+ if (tmp.hook_entry[i]) {
+ unsigned int delta;
+ usrptr = (char __user *) tmp.hook_entry[i];
+ delta = usrptr - tmp.entries;
+ usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
+ tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
+ }
+ }
+
+ xt_compat_flush_offsets(NFPROTO_BRIDGE);
+ xt_compat_unlock(NFPROTO_BRIDGE);
+
+ ret = do_replace_finish(net, &tmp, newinfo);
+ if (ret == 0)
+ return ret;
+free_entries:
+ vfree(newinfo->entries);
+free_newinfo:
+ vfree(newinfo);
+ return ret;
+out_unlock:
+ xt_compat_flush_offsets(NFPROTO_BRIDGE);
+ xt_compat_unlock(NFPROTO_BRIDGE);
+ goto free_entries;
+}
+
+static int compat_update_counters(struct net *net, void __user *user,
+ unsigned int len)
+{
+ struct compat_ebt_replace hlp;
+
+ if (copy_from_user(&hlp, user, sizeof(hlp)))
+ return -EFAULT;
+
+ /* try real handler in case userland supplied needed padding */
+ if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
+ return update_counters(net, user, len);
+
+ return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
+ hlp.num_counters, user, len);
+}
+
+static int compat_do_ebt_set_ctl(struct sock *sk,
+ int cmd, void __user *user, unsigned int len)
+{
+ int ret;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ switch (cmd) {
+ case EBT_SO_SET_ENTRIES:
+ ret = compat_do_replace(sock_net(sk), user, len);
+ break;
+ case EBT_SO_SET_COUNTERS:
+ ret = compat_update_counters(sock_net(sk), user, len);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
+ void __user *user, int *len)
+{
+ int ret;
+ struct compat_ebt_replace tmp;
+ struct ebt_table *t;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* try real handler in case userland supplied needed padding */
+ if ((cmd == EBT_SO_GET_INFO ||
+ cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
+ return do_ebt_get_ctl(sk, cmd, user, len);
+
+ if (copy_from_user(&tmp, user, sizeof(tmp)))
+ return -EFAULT;
+
+ t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
+ if (!t)
+ return ret;
+
+ xt_compat_lock(NFPROTO_BRIDGE);
+ switch (cmd) {
+ case EBT_SO_GET_INFO:
+ tmp.nentries = t->private->nentries;
+ ret = compat_table_info(t->private, &tmp);
+ if (ret)
+ goto out;
+ tmp.valid_hooks = t->valid_hooks;
+
+ if (copy_to_user(user, &tmp, *len) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = 0;
+ break;
+ case EBT_SO_GET_INIT_INFO:
+ tmp.nentries = t->table->nentries;
+ tmp.entries_size = t->table->entries_size;
+ tmp.valid_hooks = t->table->valid_hooks;
+
+ if (copy_to_user(user, &tmp, *len) != 0) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = 0;
+ break;
+ case EBT_SO_GET_ENTRIES:
+ case EBT_SO_GET_INIT_ENTRIES:
+ /*
+ * try real handler first in case of userland-side padding.
+ * in case we are dealing with an 'ordinary' 32 bit binary
+ * without 64bit compatibility padding, this will fail right
+ * after copy_from_user when the *len argument is validated.
+ *
+ * the compat_ variant needs to do one pass over the kernel
+ * data set to adjust for size differences before it the check.
+ */
+ if (copy_everything_to_user(t, user, len, cmd) == 0)
+ ret = 0;
+ else
+ ret = compat_copy_everything_to_user(t, user, len, cmd);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ out:
+ xt_compat_flush_offsets(NFPROTO_BRIDGE);
+ xt_compat_unlock(NFPROTO_BRIDGE);
+ mutex_unlock(&ebt_mutex);
+ return ret;
+}
+#endif
+
static struct nf_sockopt_ops ebt_sockopts =
{
.pf = PF_INET,
.set_optmin = EBT_BASE_CTL,
.set_optmax = EBT_SO_SET_MAX + 1,
.set = do_ebt_set_ctl,
+#ifdef CONFIG_COMPAT
+ .compat_set = compat_do_ebt_set_ctl,
+#endif
.get_optmin = EBT_BASE_CTL,
.get_optmax = EBT_SO_GET_MAX + 1,
.get = do_ebt_get_ctl,
+#ifdef CONFIG_COMPAT
+ .compat_get = compat_do_ebt_get_ctl,
+#endif
.owner = THIS_MODULE,
};
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 51adc4c2b86..702be5a2c95 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -77,8 +77,8 @@ static int stats_timer __read_mostly = 1;
module_param(stats_timer, int, S_IRUGO);
MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
-HLIST_HEAD(can_rx_dev_list);
-static struct dev_rcv_lists can_rx_alldev_list;
+/* receive filters subscribed for 'all' CAN devices */
+struct dev_rcv_lists can_rx_alldev_list;
static DEFINE_SPINLOCK(can_rcvlists_lock);
static struct kmem_cache *rcv_cache __read_mostly;
@@ -292,28 +292,10 @@ EXPORT_SYMBOL(can_send);
static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
{
- struct dev_rcv_lists *d = NULL;
- struct hlist_node *n;
-
- /*
- * find receive list for this device
- *
- * The hlist_for_each_entry*() macros curse through the list
- * using the pointer variable n and set d to the containing
- * struct in each list iteration. Therefore, after list
- * iteration, d is unmodified when the list is empty, and it
- * points to last list element, when the list is non-empty
- * but no match in the loop body is found. I.e. d is *not*
- * NULL when no match is found. We can, however, use the
- * cursor variable n to decide if a match was found.
- */
-
- hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
- if (d->dev == dev)
- break;
- }
-
- return n ? d : NULL;
+ if (!dev)
+ return &can_rx_alldev_list;
+ else
+ return (struct dev_rcv_lists *)dev->ml_priv;
}
/**
@@ -433,6 +415,9 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
/* insert new receiver (dev,canid,mask) -> (func,data) */
+ if (dev && dev->type != ARPHRD_CAN)
+ return -ENODEV;
+
r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
if (!r)
return -ENOMEM;
@@ -468,16 +453,6 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
EXPORT_SYMBOL(can_rx_register);
/*
- * can_rx_delete_device - rcu callback for dev_rcv_lists structure removal
- */
-static void can_rx_delete_device(struct rcu_head *rp)
-{
- struct dev_rcv_lists *d = container_of(rp, struct dev_rcv_lists, rcu);
-
- kfree(d);
-}
-
-/*
* can_rx_delete_receiver - rcu callback for single receiver entry removal
*/
static void can_rx_delete_receiver(struct rcu_head *rp)
@@ -506,6 +481,9 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
struct hlist_node *next;
struct dev_rcv_lists *d;
+ if (dev && dev->type != ARPHRD_CAN)
+ return;
+
spin_lock(&can_rcvlists_lock);
d = find_dev_rcv_lists(dev);
@@ -541,7 +519,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
"dev %s, id %03X, mask %03X\n",
DNAME(dev), can_id, mask);
r = NULL;
- d = NULL;
goto out;
}
@@ -552,10 +529,10 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
can_pstats.rcv_entries--;
/* remove device structure requested by NETDEV_UNREGISTER */
- if (d->remove_on_zero_entries && !d->entries)
- hlist_del_rcu(&d->list);
- else
- d = NULL;
+ if (d->remove_on_zero_entries && !d->entries) {
+ kfree(d);
+ dev->ml_priv = NULL;
+ }
out:
spin_unlock(&can_rcvlists_lock);
@@ -563,10 +540,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
/* schedule the receiver item for deletion */
if (r)
call_rcu(&r->rcu, can_rx_delete_receiver);
-
- /* schedule the device structure for deletion */
- if (d)
- call_rcu(&d->rcu, can_rx_delete_device);
}
EXPORT_SYMBOL(can_rx_unregister);
@@ -780,48 +753,35 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
case NETDEV_REGISTER:
- /*
- * create new dev_rcv_lists for this device
- *
- * N.B. zeroing the struct is the correct initialization
- * for the embedded hlist_head structs.
- * Another list type, e.g. list_head, would require
- * explicit initialization.
- */
-
+ /* create new dev_rcv_lists for this device */
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
printk(KERN_ERR
"can: allocation of receive list failed\n");
return NOTIFY_DONE;
}
- d->dev = dev;
-
- spin_lock(&can_rcvlists_lock);
- hlist_add_head_rcu(&d->list, &can_rx_dev_list);
- spin_unlock(&can_rcvlists_lock);
+ BUG_ON(dev->ml_priv);
+ dev->ml_priv = d;
break;
case NETDEV_UNREGISTER:
spin_lock(&can_rcvlists_lock);
- d = find_dev_rcv_lists(dev);
+ d = dev->ml_priv;
if (d) {
- if (d->entries) {
+ if (d->entries)
d->remove_on_zero_entries = 1;
- d = NULL;
- } else
- hlist_del_rcu(&d->list);
+ else {
+ kfree(d);
+ dev->ml_priv = NULL;
+ }
} else
printk(KERN_ERR "can: notifier: receive list not "
"found for dev %s\n", dev->name);
spin_unlock(&can_rcvlists_lock);
- if (d)
- call_rcu(&d->rcu, can_rx_delete_device);
-
break;
}
@@ -853,21 +813,13 @@ static __init int can_init(void)
{
printk(banner);
+ memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
+
rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
0, 0, NULL);
if (!rcv_cache)
return -ENOMEM;
- /*
- * Insert can_rx_alldev_list for reception on all devices.
- * This struct is zero initialized which is correct for the
- * embedded hlist heads, the dev pointer, and the entries counter.
- */
-
- spin_lock(&can_rcvlists_lock);
- hlist_add_head_rcu(&can_rx_alldev_list.list, &can_rx_dev_list);
- spin_unlock(&can_rcvlists_lock);
-
if (stats_timer) {
/* the statistics are updated every second (timer triggered) */
setup_timer(&can_stattimer, can_stat_update, 0);
@@ -887,8 +839,7 @@ static __init int can_init(void)
static __exit void can_exit(void)
{
- struct dev_rcv_lists *d;
- struct hlist_node *n, *next;
+ struct net_device *dev;
if (stats_timer)
del_timer(&can_stattimer);
@@ -900,14 +851,19 @@ static __exit void can_exit(void)
unregister_netdevice_notifier(&can_netdev_notifier);
sock_unregister(PF_CAN);
- /* remove can_rx_dev_list */
- spin_lock(&can_rcvlists_lock);
- hlist_del(&can_rx_alldev_list.list);
- hlist_for_each_entry_safe(d, n, next, &can_rx_dev_list, list) {
- hlist_del(&d->list);
- kfree(d);
+ /* remove created dev_rcv_lists from still registered CAN devices */
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
+ if (dev->type == ARPHRD_CAN && dev->ml_priv){
+
+ struct dev_rcv_lists *d = dev->ml_priv;
+
+ BUG_ON(d->entries);
+ kfree(d);
+ dev->ml_priv = NULL;
+ }
}
- spin_unlock(&can_rcvlists_lock);
+ rcu_read_unlock();
rcu_barrier(); /* Wait for completion of call_rcu()'s */
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 18f91e37cc3..34253b84e30 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -63,10 +63,8 @@ struct receiver {
enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX };
+/* per device receive filters linked at dev->ml_priv */
struct dev_rcv_lists {
- struct hlist_node list;
- struct rcu_head rcu;
- struct net_device *dev;
struct hlist_head rx[RX_MAX];
struct hlist_head rx_sff[0x800];
int remove_on_zero_entries;
diff --git a/net/can/proc.c b/net/can/proc.c
index 9b9ad29be56..f4265cc9c3f 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -45,6 +45,7 @@
#include <linux/proc_fs.h>
#include <linux/list.h>
#include <linux/rcupdate.h>
+#include <linux/if_arp.h>
#include <linux/can/core.h>
#include "af_can.h"
@@ -84,6 +85,9 @@ static const char rx_list_name[][8] = {
[RX_EFF] = "rx_eff",
};
+/* receive filters subscribed for 'all' CAN devices */
+extern struct dev_rcv_lists can_rx_alldev_list;
+
/*
* af_can statistics stuff
*/
@@ -190,10 +194,6 @@ void can_stat_update(unsigned long data)
/*
* proc read functions
- *
- * From known use-cases we expect about 10 entries in a receive list to be
- * printed in the proc_fs. So PAGE_SIZE is definitely enough space here.
- *
*/
static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
@@ -202,7 +202,6 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
struct receiver *r;
struct hlist_node *n;
- rcu_read_lock();
hlist_for_each_entry_rcu(r, n, rx_list, list) {
char *fmt = (r->can_id & CAN_EFF_FLAG)?
" %-5s %08X %08x %08x %08x %8ld %s\n" :
@@ -212,7 +211,6 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
(unsigned long)r->func, (unsigned long)r->data,
r->matches, r->ident);
}
- rcu_read_unlock();
}
static void can_print_recv_banner(struct seq_file *m)
@@ -346,24 +344,39 @@ static const struct file_operations can_version_proc_fops = {
.release = single_release,
};
+static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx,
+ struct net_device *dev,
+ struct dev_rcv_lists *d)
+{
+ if (!hlist_empty(&d->rx[idx])) {
+ can_print_recv_banner(m);
+ can_print_rcvlist(m, &d->rx[idx], dev);
+ } else
+ seq_printf(m, " (%s: no entry)\n", DNAME(dev));
+
+}
+
static int can_rcvlist_proc_show(struct seq_file *m, void *v)
{
/* double cast to prevent GCC warning */
int idx = (int)(long)m->private;
+ struct net_device *dev;
struct dev_rcv_lists *d;
- struct hlist_node *n;
seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]);
rcu_read_lock();
- hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
- if (!hlist_empty(&d->rx[idx])) {
- can_print_recv_banner(m);
- can_print_rcvlist(m, &d->rx[idx], d->dev);
- } else
- seq_printf(m, " (%s: no entry)\n", DNAME(d->dev));
+ /* receive list for 'all' CAN devices (dev == NULL) */
+ d = &can_rx_alldev_list;
+ can_rcvlist_proc_show_one(m, idx, NULL, d);
+
+ /* receive list for registered CAN devices */
+ for_each_netdev_rcu(&init_net, dev) {
+ if (dev->type == ARPHRD_CAN && dev->ml_priv)
+ can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv);
}
+
rcu_read_unlock();
seq_putc(m, '\n');
@@ -383,34 +396,50 @@ static const struct file_operations can_rcvlist_proc_fops = {
.release = single_release,
};
+static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m,
+ struct net_device *dev,
+ struct dev_rcv_lists *d)
+{
+ int i;
+ int all_empty = 1;
+
+ /* check wether at least one list is non-empty */
+ for (i = 0; i < 0x800; i++)
+ if (!hlist_empty(&d->rx_sff[i])) {
+ all_empty = 0;
+ break;
+ }
+
+ if (!all_empty) {
+ can_print_recv_banner(m);
+ for (i = 0; i < 0x800; i++) {
+ if (!hlist_empty(&d->rx_sff[i]))
+ can_print_rcvlist(m, &d->rx_sff[i], dev);
+ }
+ } else
+ seq_printf(m, " (%s: no entry)\n", DNAME(dev));
+}
+
static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
{
+ struct net_device *dev;
struct dev_rcv_lists *d;
- struct hlist_node *n;
/* RX_SFF */
seq_puts(m, "\nreceive list 'rx_sff':\n");
rcu_read_lock();
- hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
- int i, all_empty = 1;
- /* check wether at least one list is non-empty */
- for (i = 0; i < 0x800; i++)
- if (!hlist_empty(&d->rx_sff[i])) {
- all_empty = 0;
- break;
- }
-
- if (!all_empty) {
- can_print_recv_banner(m);
- for (i = 0; i < 0x800; i++) {
- if (!hlist_empty(&d->rx_sff[i]))
- can_print_rcvlist(m, &d->rx_sff[i],
- d->dev);
- }
- } else
- seq_printf(m, " (%s: no entry)\n", DNAME(d->dev));
+
+ /* sff receive list for 'all' CAN devices (dev == NULL) */
+ d = &can_rx_alldev_list;
+ can_rcvlist_sff_proc_show_one(m, NULL, d);
+
+ /* sff receive list for registered CAN devices */
+ for_each_netdev_rcu(&init_net, dev) {
+ if (dev->type == ARPHRD_CAN && dev->ml_priv)
+ can_rcvlist_sff_proc_show_one(m, dev, dev->ml_priv);
}
+
rcu_read_unlock();
seq_putc(m, '\n');
diff --git a/net/core/dev.c b/net/core/dev.c
index be9924f60ec..bcc490cc945 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1113,19 +1113,7 @@ void dev_load(struct net *net, const char *name)
}
EXPORT_SYMBOL(dev_load);
-/**
- * dev_open - prepare an interface for use.
- * @dev: device to open
- *
- * Takes a device from down to up state. The device's private open
- * function is invoked and then the multicast lists are loaded. Finally
- * the device is moved into the up state and a %NETDEV_UP message is
- * sent to the netdev notifier chain.
- *
- * Calling this function on an active interface is a nop. On a failure
- * a negative errno code is returned.
- */
-int dev_open(struct net_device *dev)
+static int __dev_open(struct net_device *dev)
{
const struct net_device_ops *ops = dev->netdev_ops;
int ret;
@@ -1133,13 +1121,6 @@ int dev_open(struct net_device *dev)
ASSERT_RTNL();
/*
- * Is it already up?
- */
-
- if (dev->flags & IFF_UP)
- return 0;
-
- /*
* Is it even present?
*/
if (!netif_device_present(dev))
@@ -1187,36 +1168,57 @@ int dev_open(struct net_device *dev)
* Wakeup transmit queue engine
*/
dev_activate(dev);
-
- /*
- * ... and announce new interface.
- */
- call_netdevice_notifiers(NETDEV_UP, dev);
}
return ret;
}
-EXPORT_SYMBOL(dev_open);
/**
- * dev_close - shutdown an interface.
- * @dev: device to shutdown
+ * dev_open - prepare an interface for use.
+ * @dev: device to open
*
- * This function moves an active device into down state. A
- * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
- * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
- * chain.
+ * Takes a device from down to up state. The device's private open
+ * function is invoked and then the multicast lists are loaded. Finally
+ * the device is moved into the up state and a %NETDEV_UP message is
+ * sent to the netdev notifier chain.
+ *
+ * Calling this function on an active interface is a nop. On a failure
+ * a negative errno code is returned.
*/
-int dev_close(struct net_device *dev)
+int dev_open(struct net_device *dev)
+{
+ int ret;
+
+ /*
+ * Is it already up?
+ */
+ if (dev->flags & IFF_UP)
+ return 0;
+
+ /*
+ * Open device
+ */
+ ret = __dev_open(dev);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * ... and announce new interface.
+ */
+ rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
+ call_netdevice_notifiers(NETDEV_UP, dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(dev_open);
+
+static int __dev_close(struct net_device *dev)
{
const struct net_device_ops *ops = dev->netdev_ops;
- ASSERT_RTNL();
+ ASSERT_RTNL();
might_sleep();
- if (!(dev->flags & IFF_UP))
- return 0;
-
/*
* Tell people we are going down, so that they can
* prepare to death, when device is still operating.
@@ -1252,14 +1254,34 @@ int dev_close(struct net_device *dev)
dev->flags &= ~IFF_UP;
/*
- * Tell people we are down
+ * Shutdown NET_DMA
*/
- call_netdevice_notifiers(NETDEV_DOWN, dev);
+ net_dmaengine_put();
+
+ return 0;
+}
+
+/**
+ * dev_close - shutdown an interface.
+ * @dev: device to shutdown
+ *
+ * This function moves an active device into down state. A
+ * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
+ * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
+ * chain.
+ */
+int dev_close(struct net_device *dev)
+{
+ if (!(dev->flags & IFF_UP))
+ return 0;
+
+ __dev_close(dev);
/*
- * Shutdown NET_DMA
+ * Tell people we are down
*/
- net_dmaengine_put();
+ rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
+ call_netdevice_notifiers(NETDEV_DOWN, dev);
return 0;
}
@@ -1448,13 +1470,10 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
if (skb->len > (dev->mtu + dev->hard_header_len))
return NET_RX_DROP;
- skb_dst_drop(skb);
+ skb_set_dev(skb, dev);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, dev);
- skb->mark = 0;
- secpath_reset(skb);
- nf_reset(skb);
return netif_rx(skb);
}
EXPORT_SYMBOL_GPL(dev_forward_skb);
@@ -1614,6 +1633,36 @@ static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
return false;
}
+/**
+ * skb_dev_set -- assign a new device to a buffer
+ * @skb: buffer for the new device
+ * @dev: network device
+ *
+ * If an skb is owned by a device already, we have to reset
+ * all data private to the namespace a device belongs to
+ * before assigning it a new device.
+ */
+#ifdef CONFIG_NET_NS
+void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
+{
+ skb_dst_drop(skb);
+ if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
+ secpath_reset(skb);
+ nf_reset(skb);
+ skb_init_secmark(skb);
+ skb->mark = 0;
+ skb->priority = 0;
+ skb->nf_trace = 0;
+ skb->ipvs_property = 0;
+#ifdef CONFIG_NET_SCHED
+ skb->tc_index = 0;
+#endif
+ }
+ skb->dev = dev;
+}
+EXPORT_SYMBOL(skb_set_dev);
+#endif /* CONFIG_NET_NS */
+
/*
* Invalidate hardware checksum when packet is to be mangled, and
* complete checksum manually on outgoing path.
@@ -1853,6 +1902,14 @@ gso:
skb->next = nskb->next;
nskb->next = NULL;
+
+ /*
+ * If device doesnt need nskb->dst, release it right now while
+ * its hot in this cpu cache
+ */
+ if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
+ skb_dst_drop(nskb);
+
rc = ops->ndo_start_xmit(nskb, dev);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
@@ -1974,6 +2031,21 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
return rc;
}
+/*
+ * Returns true if either:
+ * 1. skb has frag_list and the device doesn't support FRAGLIST, or
+ * 2. skb is fragmented and the device does not support SG, or if
+ * at least one of fragments is in highmem and device does not
+ * support DMA from it.
+ */
+static inline int skb_needs_linearize(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
+ (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
+ illegal_highdma(dev, skb)));
+}
+
/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
@@ -2010,18 +2082,8 @@ int dev_queue_xmit(struct sk_buff *skb)
if (netif_needs_gso(dev, skb))
goto gso;
- if (skb_has_frags(skb) &&
- !(dev->features & NETIF_F_FRAGLIST) &&
- __skb_linearize(skb))
- goto out_kfree_skb;
-
- /* Fragmented skb is linearized if device does not support SG,
- * or if at least one of fragments is in highmem and device
- * does not support DMA from it.
- */
- if (skb_shinfo(skb)->nr_frags &&
- (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
- __skb_linearize(skb))
+ /* Convert a paged skb to linear, if required */
+ if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
goto out_kfree_skb;
/* If packet is not checksummed and device does not support
@@ -2041,7 +2103,7 @@ gso:
rcu_read_lock_bh();
txq = dev_pick_tx(dev, skb);
- q = rcu_dereference(txq->qdisc);
+ q = rcu_dereference_bh(txq->qdisc);
#ifdef CONFIG_NET_CLS_ACT
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
@@ -2422,6 +2484,7 @@ int netif_receive_skb(struct sk_buff *skb)
struct packet_type *ptype, *pt_prev;
struct net_device *orig_dev;
struct net_device *null_or_orig;
+ struct net_device *null_or_bond;
int ret = NET_RX_DROP;
__be16 type;
@@ -2487,12 +2550,24 @@ ncls:
if (!skb)
goto out;
+ /*
+ * Make sure frames received on VLAN interfaces stacked on
+ * bonding interfaces still make their way to any base bonding
+ * device that may have registered for a specific ptype. The
+ * handler may have to adjust skb->dev and orig_dev.
+ */
+ null_or_bond = NULL;
+ if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
+ (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
+ null_or_bond = vlan_dev_real_dev(skb->dev);
+ }
+
type = skb->protocol;
list_for_each_entry_rcu(ptype,
&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
- if (ptype->type == type &&
- (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
- ptype->dev == orig_dev)) {
+ if (ptype->type == type && (ptype->dev == null_or_orig ||
+ ptype->dev == skb->dev || ptype->dev == orig_dev ||
+ ptype->dev == null_or_bond)) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
@@ -2561,7 +2636,7 @@ out:
return netif_receive_skb(skb);
}
-void napi_gro_flush(struct napi_struct *napi)
+static void napi_gro_flush(struct napi_struct *napi)
{
struct sk_buff *skb, *next;
@@ -2574,7 +2649,6 @@ void napi_gro_flush(struct napi_struct *napi)
napi->gro_count = 0;
napi->gro_list = NULL;
}
-EXPORT_SYMBOL(napi_gro_flush);
enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
@@ -2761,7 +2835,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
switch (ret) {
case GRO_NORMAL:
case GRO_HELD:
- skb->protocol = eth_type_trans(skb, napi->dev);
+ skb->protocol = eth_type_trans(skb, skb->dev);
if (ret == GRO_HELD)
skb_gro_pull(skb, -ETH_HLEN);
@@ -2966,7 +3040,7 @@ static void net_rx_action(struct softirq_action *h)
* entries to the tail of this list, and only ->poll()
* calls can remove this head entry from the list.
*/
- n = list_entry(list->next, struct napi_struct, poll_list);
+ n = list_first_entry(list, struct napi_struct, poll_list);
have = netpoll_poll_lock(n);
@@ -3185,7 +3259,7 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
{
const struct net_device_stats *stats = dev_get_stats(dev);
- seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
+ seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
"%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
dev->name, stats->rx_bytes, stats->rx_packets,
stats->rx_errors,
@@ -3640,10 +3714,10 @@ void __dev_set_rx_mode(struct net_device *dev)
/* Unicast addresses changes may only happen under the rtnl,
* therefore calling __dev_set_promiscuity here is safe.
*/
- if (dev->uc.count > 0 && !dev->uc_promisc) {
+ if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
__dev_set_promiscuity(dev, 1);
dev->uc_promisc = 1;
- } else if (dev->uc.count == 0 && dev->uc_promisc) {
+ } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
__dev_set_promiscuity(dev, -1);
dev->uc_promisc = 0;
}
@@ -4211,7 +4285,7 @@ static void dev_addr_discard(struct net_device *dev)
netif_addr_lock_bh(dev);
__dev_addr_discard(&dev->mc_list);
- dev->mc_count = 0;
+ netdev_mc_count(dev) = 0;
netif_addr_unlock_bh(dev);
}
@@ -4247,18 +4321,10 @@ unsigned dev_get_flags(const struct net_device *dev)
}
EXPORT_SYMBOL(dev_get_flags);
-/**
- * dev_change_flags - change device settings
- * @dev: device
- * @flags: device state flags
- *
- * Change settings on device based state flags. The flags are
- * in the userspace exported format.
- */
-int dev_change_flags(struct net_device *dev, unsigned flags)
+int __dev_change_flags(struct net_device *dev, unsigned int flags)
{
- int ret, changes;
int old_flags = dev->flags;
+ int ret;
ASSERT_RTNL();
@@ -4289,17 +4355,12 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
ret = 0;
if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
- ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
+ ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
if (!ret)
dev_set_rx_mode(dev);
}
- if (dev->flags & IFF_UP &&
- ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
- IFF_VOLATILE)))
- call_netdevice_notifiers(NETDEV_CHANGE, dev);
-
if ((flags ^ dev->gflags) & IFF_PROMISC) {
int inc = (flags & IFF_PROMISC) ? 1 : -1;
@@ -4318,11 +4379,47 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
dev_set_allmulti(dev, inc);
}
- /* Exclude state transition flags, already notified */
- changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
+ return ret;
+}
+
+void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
+{
+ unsigned int changes = dev->flags ^ old_flags;
+
+ if (changes & IFF_UP) {
+ if (dev->flags & IFF_UP)
+ call_netdevice_notifiers(NETDEV_UP, dev);
+ else
+ call_netdevice_notifiers(NETDEV_DOWN, dev);
+ }
+
+ if (dev->flags & IFF_UP &&
+ (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
+ call_netdevice_notifiers(NETDEV_CHANGE, dev);
+}
+
+/**
+ * dev_change_flags - change device settings
+ * @dev: device
+ * @flags: device state flags
+ *
+ * Change settings on device based state flags. The flags are
+ * in the userspace exported format.
+ */
+int dev_change_flags(struct net_device *dev, unsigned flags)
+{
+ int ret, changes;
+ int old_flags = dev->flags;
+
+ ret = __dev_change_flags(dev, flags);
+ if (ret < 0)
+ return ret;
+
+ changes = old_flags ^ dev->flags;
if (changes)
rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
+ __dev_notify_flags(dev, old_flags);
return ret;
}
EXPORT_SYMBOL(dev_change_flags);
@@ -4813,6 +4910,10 @@ static void rollback_registered_many(struct list_head *head)
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+ if (!dev->rtnl_link_ops ||
+ dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
+ rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
+
/*
* Flush the unicast and multicast chains
*/
@@ -4830,7 +4931,7 @@ static void rollback_registered_many(struct list_head *head)
}
/* Process any work delayed until the end of the batch */
- dev = list_entry(head->next, struct net_device, unreg_list);
+ dev = list_first_entry(head, struct net_device, unreg_list);
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
synchronize_net();
@@ -5039,7 +5140,9 @@ int register_netdevice(struct net_device *dev)
* Prevent userspace races by waiting until the network
* device is fully setup before sending notifications.
*/
- rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
+ if (!dev->rtnl_link_ops ||
+ dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
out:
return ret;
@@ -5216,7 +5319,7 @@ void netdev_run_todo(void)
while (!list_empty(&list)) {
struct net_device *dev
- = list_entry(list.next, struct net_device, todo_list);
+ = list_first_entry(&list, struct net_device, todo_list);
list_del(&dev->todo_list);
if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
@@ -5367,6 +5470,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
netdev_init_queues(dev);
+ INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
+ dev->ethtool_ntuple_list.count = 0;
INIT_LIST_HEAD(&dev->napi_list);
INIT_LIST_HEAD(&dev->unreg_list);
INIT_LIST_HEAD(&dev->link_watch_list);
@@ -5403,6 +5508,9 @@ void free_netdev(struct net_device *dev)
/* Flush device addresses */
dev_addr_flush(dev);
+ /* Clear ethtool n-tuple list */
+ ethtool_ntuple_flush(dev);
+
list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
netif_napi_del(p);
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 9e2fa39f22a..fd91569e239 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -96,6 +96,8 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
int err;
netif_addr_lock_bh(dev);
+ if (alen != dev->addr_len)
+ return -EINVAL;
err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
if (!err)
__dev_set_rx_mode(dev);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index b8e9d3a8688..f8c87497535 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -296,7 +296,6 @@ static int dropmon_net_event(struct notifier_block *ev_block,
new_stat->dev = dev;
new_stat->last_rx = jiffies;
- INIT_RCU_HEAD(&new_stat->rcu);
spin_lock(&trace_state_lock);
list_add_rcu(&new_stat->list, &hw_stats_list);
spin_unlock(&trace_state_lock);
diff --git a/net/core/dst.c b/net/core/dst.c
index 57bc4d5b8d0..cb1b3488b73 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -17,6 +17,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <net/net_namespace.h>
+#include <linux/sched.h>
#include <net/dst.h>
@@ -79,6 +80,7 @@ loop:
while ((dst = next) != NULL) {
next = dst->next;
prefetch(&next->next);
+ cond_resched();
if (likely(atomic_read(&dst->__refcnt))) {
last->next = dst;
last = dst;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index d8aee584e8d..0f2f82185ec 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -120,7 +120,7 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data)
* NETIF_F_xxx values in include/linux/netdevice.h
*/
static const u32 flags_dup_features =
- ETH_FLAG_LRO;
+ (ETH_FLAG_LRO | ETH_FLAG_NTUPLE);
u32 ethtool_op_get_flags(struct net_device *dev)
{
@@ -134,19 +134,44 @@ u32 ethtool_op_get_flags(struct net_device *dev)
int ethtool_op_set_flags(struct net_device *dev, u32 data)
{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ unsigned long features = dev->features;
+
if (data & ETH_FLAG_LRO)
- dev->features |= NETIF_F_LRO;
+ features |= NETIF_F_LRO;
else
- dev->features &= ~NETIF_F_LRO;
+ features &= ~NETIF_F_LRO;
+
+ if (data & ETH_FLAG_NTUPLE) {
+ if (!ops->set_rx_ntuple)
+ return -EOPNOTSUPP;
+ features |= NETIF_F_NTUPLE;
+ } else {
+ /* safe to clear regardless */
+ features &= ~NETIF_F_NTUPLE;
+ }
+ dev->features = features;
return 0;
}
+void ethtool_ntuple_flush(struct net_device *dev)
+{
+ struct ethtool_rx_ntuple_flow_spec_container *fsc, *f;
+
+ list_for_each_entry_safe(fsc, f, &dev->ethtool_ntuple_list.list, list) {
+ list_del(&fsc->list);
+ kfree(fsc);
+ }
+ dev->ethtool_ntuple_list.count = 0;
+}
+EXPORT_SYMBOL(ethtool_ntuple_flush);
+
/* Handlers for each ethtool command */
static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_cmd cmd = { ETHTOOL_GSET };
+ struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
int err;
if (!dev->ethtool_ops->get_settings)
@@ -174,7 +199,10 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
return dev->ethtool_ops->set_settings(dev, &cmd);
}
-static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
{
struct ethtool_drvinfo info;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -209,7 +237,10 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
return 0;
}
-static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
{
struct ethtool_rxnfc cmd;
@@ -222,7 +253,10 @@ static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
return dev->ethtool_ops->set_rxnfc(dev, &cmd);
}
-static int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
{
struct ethtool_rxnfc info;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -266,6 +300,315 @@ err_out:
return ret;
}
+static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
+ struct ethtool_rx_ntuple_flow_spec *spec,
+ struct ethtool_rx_ntuple_flow_spec_container *fsc)
+{
+
+ /* don't add filters forever */
+ if (list->count >= ETHTOOL_MAX_NTUPLE_LIST_ENTRY) {
+ /* free the container */
+ kfree(fsc);
+ return;
+ }
+
+ /* Copy the whole filter over */
+ fsc->fs.flow_type = spec->flow_type;
+ memcpy(&fsc->fs.h_u, &spec->h_u, sizeof(spec->h_u));
+ memcpy(&fsc->fs.m_u, &spec->m_u, sizeof(spec->m_u));
+
+ fsc->fs.vlan_tag = spec->vlan_tag;
+ fsc->fs.vlan_tag_mask = spec->vlan_tag_mask;
+ fsc->fs.data = spec->data;
+ fsc->fs.data_mask = spec->data_mask;
+ fsc->fs.action = spec->action;
+
+ /* add to the list */
+ list_add_tail_rcu(&fsc->list, &list->list);
+ list->count++;
+}
+
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_rx_ntuple cmd;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
+ int ret;
+
+ if (!(dev->features & NETIF_F_NTUPLE))
+ return -EINVAL;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+
+ /*
+ * Cache filter in dev struct for GET operation only if
+ * the underlying driver doesn't have its own GET operation, and
+ * only if the filter was added successfully. First make sure we
+ * can allocate the filter, then continue if successful.
+ */
+ if (!ops->get_rx_ntuple) {
+ fsc = kmalloc(sizeof(*fsc), GFP_ATOMIC);
+ if (!fsc)
+ return -ENOMEM;
+ }
+
+ ret = ops->set_rx_ntuple(dev, &cmd);
+ if (ret) {
+ kfree(fsc);
+ return ret;
+ }
+
+ if (!ops->get_rx_ntuple)
+ __rx_ntuple_filter_add(&dev->ethtool_ntuple_list, &cmd.fs, fsc);
+
+ return ret;
+}
+
+static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_gstrings gstrings;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rx_ntuple_flow_spec_container *fsc;
+ u8 *data;
+ char *p;
+ int ret, i, num_strings = 0;
+
+ if (!ops->get_sset_count)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
+ return -EFAULT;
+
+ ret = ops->get_sset_count(dev, gstrings.string_set);
+ if (ret < 0)
+ return ret;
+
+ gstrings.len = ret;
+
+ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ if (ops->get_rx_ntuple) {
+ /* driver-specific filter grab */
+ ret = ops->get_rx_ntuple(dev, gstrings.string_set, data);
+ goto copy;
+ }
+
+ /* default ethtool filter grab */
+ i = 0;
+ p = (char *)data;
+ list_for_each_entry(fsc, &dev->ethtool_ntuple_list.list, list) {
+ sprintf(p, "Filter %d:\n", i);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+
+ switch (fsc->fs.flow_type) {
+ case TCP_V4_FLOW:
+ sprintf(p, "\tFlow Type: TCP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case UDP_V4_FLOW:
+ sprintf(p, "\tFlow Type: UDP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case SCTP_V4_FLOW:
+ sprintf(p, "\tFlow Type: SCTP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case AH_ESP_V4_FLOW:
+ sprintf(p, "\tFlow Type: AH ESP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case ESP_V4_FLOW:
+ sprintf(p, "\tFlow Type: ESP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case IP_USER_FLOW:
+ sprintf(p, "\tFlow Type: Raw IP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case IPV4_FLOW:
+ sprintf(p, "\tFlow Type: IPv4\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ default:
+ sprintf(p, "\tFlow Type: Unknown\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ goto unknown_filter;
+ };
+
+ /* now the rest of the filters */
+ switch (fsc->fs.flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ sprintf(p, "\tSrc IP addr: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc IP mask: 0x%x\n",
+ fsc->fs.m_u.tcp_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP addr: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP mask: 0x%x\n",
+ fsc->fs.m_u.tcp_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.psrc,
+ fsc->fs.m_u.tcp_ip4_spec.psrc);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.pdst,
+ fsc->fs.m_u.tcp_ip4_spec.pdst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tTOS: %d, mask: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.tos,
+ fsc->fs.m_u.tcp_ip4_spec.tos);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case AH_ESP_V4_FLOW:
+ case ESP_V4_FLOW:
+ sprintf(p, "\tSrc IP addr: 0x%x\n",
+ fsc->fs.h_u.ah_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc IP mask: 0x%x\n",
+ fsc->fs.m_u.ah_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP addr: 0x%x\n",
+ fsc->fs.h_u.ah_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP mask: 0x%x\n",
+ fsc->fs.m_u.ah_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSPI: %d, mask: 0x%x\n",
+ fsc->fs.h_u.ah_ip4_spec.spi,
+ fsc->fs.m_u.ah_ip4_spec.spi);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tTOS: %d, mask: 0x%x\n",
+ fsc->fs.h_u.ah_ip4_spec.tos,
+ fsc->fs.m_u.ah_ip4_spec.tos);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case IP_USER_FLOW:
+ sprintf(p, "\tSrc IP addr: 0x%x\n",
+ fsc->fs.h_u.raw_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc IP mask: 0x%x\n",
+ fsc->fs.m_u.raw_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP addr: 0x%x\n",
+ fsc->fs.h_u.raw_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP mask: 0x%x\n",
+ fsc->fs.m_u.raw_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case IPV4_FLOW:
+ sprintf(p, "\tSrc IP addr: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc IP mask: 0x%x\n",
+ fsc->fs.m_u.usr_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP addr: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP mask: 0x%x\n",
+ fsc->fs.m_u.usr_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
+ fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tTOS: %d, mask: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.tos,
+ fsc->fs.m_u.usr_ip4_spec.tos);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.ip_ver,
+ fsc->fs.m_u.usr_ip4_spec.ip_ver);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.proto,
+ fsc->fs.m_u.usr_ip4_spec.proto);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ };
+ sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
+ fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tUser-defined mask: 0x%Lx\n", fsc->fs.data_mask);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ if (fsc->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+ sprintf(p, "\tAction: Drop\n");
+ else
+ sprintf(p, "\tAction: Direct to queue %d\n",
+ fsc->fs.action);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+unknown_filter:
+ i++;
+ }
+copy:
+ /* indicate to userspace how many strings we actually have */
+ gstrings.len = num_strings;
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
+ goto out;
+ useraddr += sizeof(gstrings);
+ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
{
struct ethtool_regs regs;
@@ -324,7 +667,7 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
{
- struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
if (!dev->ethtool_ops->get_wol)
return -EOPNOTSUPP;
@@ -456,9 +799,12 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
return ret;
}
-static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
+ struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
if (!dev->ethtool_ops->get_coalesce)
return -EOPNOTSUPP;
@@ -470,7 +816,10 @@ static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
return 0;
}
-static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
{
struct ethtool_coalesce coalesce;
@@ -485,7 +834,7 @@ static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
+ struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM };
if (!dev->ethtool_ops->get_ringparam)
return -EOPNOTSUPP;
@@ -839,7 +1188,7 @@ static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr)
static int ethtool_get_value(struct net_device *dev, char __user *useraddr,
u32 cmd, u32 (*actor)(struct net_device *))
{
- struct ethtool_value edata = { cmd };
+ struct ethtool_value edata = { .cmd = cmd };
if (!actor)
return -EOPNOTSUPP;
@@ -880,7 +1229,10 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
return actor(dev, edata.data);
}
-static int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
{
struct ethtool_flash efl;
@@ -927,6 +1279,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GPERMADDR:
case ETHTOOL_GUFO:
case ETHTOOL_GGSO:
+ case ETHTOOL_GGRO:
case ETHTOOL_GFLAGS:
case ETHTOOL_GPFLAGS:
case ETHTOOL_GRXFH:
@@ -1112,6 +1465,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_RESET:
rc = ethtool_reset(dev, useraddr);
break;
+ case ETHTOOL_SRXNTUPLE:
+ rc = ethtool_set_rx_ntuple(dev, useraddr);
+ break;
+ case ETHTOOL_GRXNTUPLE:
+ rc = ethtool_get_rx_ntuple(dev, useraddr);
+ break;
default:
rc = -EOPNOTSUPP;
}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 02a3b2c69c1..9a24377146b 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -708,7 +708,7 @@ static struct notifier_block fib_rules_notifier = {
.notifier_call = fib_rules_event,
};
-static int fib_rules_net_init(struct net *net)
+static int __net_init fib_rules_net_init(struct net *net)
{
INIT_LIST_HEAD(&net->rules_ops);
spin_lock_init(&net->rules_mod_lock);
diff --git a/net/core/filter.c b/net/core/filter.c
index 08db7b9143a..d38ef7fd50f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -86,7 +86,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
return err;
rcu_read_lock_bh();
- filter = rcu_dereference(sk->sk_filter);
+ filter = rcu_dereference_bh(sk->sk_filter);
if (filter) {
unsigned int pkt_len = sk_run_filter(skb, filter->insns,
filter->len);
@@ -521,7 +521,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
}
rcu_read_lock_bh();
- old_fp = rcu_dereference(sk->sk_filter);
+ old_fp = rcu_dereference_bh(sk->sk_filter);
rcu_assign_pointer(sk->sk_filter, fp);
rcu_read_unlock_bh();
@@ -529,6 +529,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
sk_filter_delayed_uncharge(sk, old_fp);
return 0;
}
+EXPORT_SYMBOL_GPL(sk_attach_filter);
int sk_detach_filter(struct sock *sk)
{
@@ -536,7 +537,7 @@ int sk_detach_filter(struct sock *sk)
struct sk_filter *filter;
rcu_read_lock_bh();
- filter = rcu_dereference(sk->sk_filter);
+ filter = rcu_dereference_bh(sk->sk_filter);
if (filter) {
rcu_assign_pointer(sk->sk_filter, NULL);
sk_filter_delayed_uncharge(sk, filter);
@@ -545,3 +546,4 @@ int sk_detach_filter(struct sock *sk)
rcu_read_unlock_bh();
return ret;
}
+EXPORT_SYMBOL_GPL(sk_detach_filter);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f35377b643e..d102f6d9abd 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2417,8 +2417,7 @@ EXPORT_SYMBOL(neigh_seq_stop);
static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
{
- struct proc_dir_entry *pde = seq->private;
- struct neigh_table *tbl = pde->data;
+ struct neigh_table *tbl = seq->private;
int cpu;
if (*pos == 0)
@@ -2435,8 +2434,7 @@ static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct proc_dir_entry *pde = seq->private;
- struct neigh_table *tbl = pde->data;
+ struct neigh_table *tbl = seq->private;
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
@@ -2455,8 +2453,7 @@ static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
static int neigh_stat_seq_show(struct seq_file *seq, void *v)
{
- struct proc_dir_entry *pde = seq->private;
- struct neigh_table *tbl = pde->data;
+ struct neigh_table *tbl = seq->private;
struct neigh_statistics *st = v;
if (v == SEQ_START_TOKEN) {
@@ -2501,7 +2498,7 @@ static int neigh_stat_seq_open(struct inode *inode, struct file *file)
if (!ret) {
struct seq_file *sf = file->private_data;
- sf->private = PDE(inode);
+ sf->private = PDE(inode)->data;
}
return ret;
};
@@ -2559,9 +2556,11 @@ EXPORT_SYMBOL(neigh_app_ns);
#ifdef CONFIG_SYSCTL
+#define NEIGH_VARS_MAX 19
+
static struct neigh_sysctl_table {
struct ctl_table_header *sysctl_header;
- struct ctl_table neigh_vars[__NET_NEIGH_MAX];
+ struct ctl_table neigh_vars[NEIGH_VARS_MAX];
char *dev_name;
} neigh_sysctl_template __read_mostly = {
.neigh_vars = {
@@ -2678,8 +2677,7 @@ static struct neigh_sysctl_table {
};
int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
- int p_id, int pdev_id, char *p_name,
- proc_handler *handler)
+ char *p_name, proc_handler *handler)
{
struct neigh_sysctl_table *t;
const char *dev_name_source = NULL;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index fbc1c7472c5..099c753c421 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -410,7 +410,8 @@ static ssize_t wireless_show(struct device *d, char *buf,
const struct iw_statistics *iw;
ssize_t ret = -EINVAL;
- rtnl_lock();
+ if (!rtnl_trylock())
+ return restart_syscall();
if (dev_isalive(dev)) {
iw = get_wireless_stats(dev);
if (iw)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 0b4d0d35ef4..7aa69725376 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -407,11 +407,24 @@ static void arp_reply(struct sk_buff *skb)
__be32 sip, tip;
unsigned char *sha;
struct sk_buff *send_skb;
- struct netpoll *np = NULL;
+ struct netpoll *np, *tmp;
+ unsigned long flags;
+ int hits = 0;
+
+ if (list_empty(&npinfo->rx_np))
+ return;
+
+ /* Before checking the packet, we do some early
+ inspection whether this is interesting at all */
+ spin_lock_irqsave(&npinfo->rx_lock, flags);
+ list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
+ if (np->dev == skb->dev)
+ hits++;
+ }
+ spin_unlock_irqrestore(&npinfo->rx_lock, flags);
- if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
- np = npinfo->rx_np;
- if (!np)
+ /* No netpoll struct is using this dev */
+ if (!hits)
return;
/* No arp on this interface */
@@ -437,77 +450,91 @@ static void arp_reply(struct sk_buff *skb)
arp_ptr += skb->dev->addr_len;
memcpy(&sip, arp_ptr, 4);
arp_ptr += 4;
- /* if we actually cared about dst hw addr, it would get copied here */
+ /* If we actually cared about dst hw addr,
+ it would get copied here */
arp_ptr += skb->dev->addr_len;
memcpy(&tip, arp_ptr, 4);
/* Should we ignore arp? */
- if (tip != np->local_ip ||
- ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
+ if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
return;
size = arp_hdr_len(skb->dev);
- send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
- LL_RESERVED_SPACE(np->dev));
- if (!send_skb)
- return;
-
- skb_reset_network_header(send_skb);
- arp = (struct arphdr *) skb_put(send_skb, size);
- send_skb->dev = skb->dev;
- send_skb->protocol = htons(ETH_P_ARP);
+ spin_lock_irqsave(&npinfo->rx_lock, flags);
+ list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
+ if (tip != np->local_ip)
+ continue;
- /* Fill the device header for the ARP frame */
- if (dev_hard_header(send_skb, skb->dev, ptype,
- sha, np->dev->dev_addr,
- send_skb->len) < 0) {
- kfree_skb(send_skb);
- return;
- }
+ send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
+ LL_RESERVED_SPACE(np->dev));
+ if (!send_skb)
+ continue;
- /*
- * Fill out the arp protocol part.
- *
- * we only support ethernet device type,
- * which (according to RFC 1390) should always equal 1 (Ethernet).
- */
+ skb_reset_network_header(send_skb);
+ arp = (struct arphdr *) skb_put(send_skb, size);
+ send_skb->dev = skb->dev;
+ send_skb->protocol = htons(ETH_P_ARP);
- arp->ar_hrd = htons(np->dev->type);
- arp->ar_pro = htons(ETH_P_IP);
- arp->ar_hln = np->dev->addr_len;
- arp->ar_pln = 4;
- arp->ar_op = htons(type);
+ /* Fill the device header for the ARP frame */
+ if (dev_hard_header(send_skb, skb->dev, ptype,
+ sha, np->dev->dev_addr,
+ send_skb->len) < 0) {
+ kfree_skb(send_skb);
+ continue;
+ }
- arp_ptr=(unsigned char *)(arp + 1);
- memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
- arp_ptr += np->dev->addr_len;
- memcpy(arp_ptr, &tip, 4);
- arp_ptr += 4;
- memcpy(arp_ptr, sha, np->dev->addr_len);
- arp_ptr += np->dev->addr_len;
- memcpy(arp_ptr, &sip, 4);
+ /*
+ * Fill out the arp protocol part.
+ *
+ * we only support ethernet device type,
+ * which (according to RFC 1390) should
+ * always equal 1 (Ethernet).
+ */
- netpoll_send_skb(np, send_skb);
+ arp->ar_hrd = htons(np->dev->type);
+ arp->ar_pro = htons(ETH_P_IP);
+ arp->ar_hln = np->dev->addr_len;
+ arp->ar_pln = 4;
+ arp->ar_op = htons(type);
+
+ arp_ptr = (unsigned char *)(arp + 1);
+ memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
+ arp_ptr += np->dev->addr_len;
+ memcpy(arp_ptr, &tip, 4);
+ arp_ptr += 4;
+ memcpy(arp_ptr, sha, np->dev->addr_len);
+ arp_ptr += np->dev->addr_len;
+ memcpy(arp_ptr, &sip, 4);
+
+ netpoll_send_skb(np, send_skb);
+
+ /* If there are several rx_hooks for the same address,
+ we're fine by sending a single reply */
+ break;
+ }
+ spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
int __netpoll_rx(struct sk_buff *skb)
{
int proto, len, ulen;
+ int hits = 0;
struct iphdr *iph;
struct udphdr *uh;
- struct netpoll_info *npi = skb->dev->npinfo;
- struct netpoll *np = npi->rx_np;
+ struct netpoll_info *npinfo = skb->dev->npinfo;
+ struct netpoll *np, *tmp;
- if (!np)
+ if (list_empty(&npinfo->rx_np))
goto out;
+
if (skb->dev->type != ARPHRD_ETHER)
goto out;
/* check if netpoll clients need ARP */
if (skb->protocol == htons(ETH_P_ARP) &&
atomic_read(&trapped)) {
- skb_queue_tail(&npi->arp_tx, skb);
+ skb_queue_tail(&npinfo->arp_tx, skb);
return 1;
}
@@ -551,16 +578,23 @@ int __netpoll_rx(struct sk_buff *skb)
goto out;
if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
goto out;
- if (np->local_ip && np->local_ip != iph->daddr)
- goto out;
- if (np->remote_ip && np->remote_ip != iph->saddr)
- goto out;
- if (np->local_port && np->local_port != ntohs(uh->dest))
- goto out;
- np->rx_hook(np, ntohs(uh->source),
- (char *)(uh+1),
- ulen - sizeof(struct udphdr));
+ list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
+ if (np->local_ip && np->local_ip != iph->daddr)
+ continue;
+ if (np->remote_ip && np->remote_ip != iph->saddr)
+ continue;
+ if (np->local_port && np->local_port != ntohs(uh->dest))
+ continue;
+
+ np->rx_hook(np, ntohs(uh->source),
+ (char *)(uh+1),
+ ulen - sizeof(struct udphdr));
+ hits++;
+ }
+
+ if (!hits)
+ goto out;
kfree_skb(skb);
return 1;
@@ -684,6 +718,7 @@ int netpoll_setup(struct netpoll *np)
struct net_device *ndev = NULL;
struct in_device *in_dev;
struct netpoll_info *npinfo;
+ struct netpoll *npe, *tmp;
unsigned long flags;
int err;
@@ -704,7 +739,7 @@ int netpoll_setup(struct netpoll *np)
}
npinfo->rx_flags = 0;
- npinfo->rx_np = NULL;
+ INIT_LIST_HEAD(&npinfo->rx_np);
spin_lock_init(&npinfo->rx_lock);
skb_queue_head_init(&npinfo->arp_tx);
@@ -785,7 +820,7 @@ int netpoll_setup(struct netpoll *np)
if (np->rx_hook) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
npinfo->rx_flags |= NETPOLL_RX_ENABLED;
- npinfo->rx_np = np;
+ list_add_tail(&np->rx, &npinfo->rx_np);
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
@@ -801,9 +836,16 @@ int netpoll_setup(struct netpoll *np)
return 0;
release:
- if (!ndev->npinfo)
+ if (!ndev->npinfo) {
+ spin_lock_irqsave(&npinfo->rx_lock, flags);
+ list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
+ npe->dev = NULL;
+ }
+ spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+
kfree(npinfo);
- np->dev = NULL;
+ }
+
dev_put(ndev);
return err;
}
@@ -823,10 +865,11 @@ void netpoll_cleanup(struct netpoll *np)
if (np->dev) {
npinfo = np->dev->npinfo;
if (npinfo) {
- if (npinfo->rx_np == np) {
+ if (!list_empty(&npinfo->rx_np)) {
spin_lock_irqsave(&npinfo->rx_lock, flags);
- npinfo->rx_np = NULL;
- npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
+ list_del(&np->rx);
+ if (list_empty(&npinfo->rx_np))
+ npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index de0c2c72642..43923811bd6 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2188,12 +2188,13 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
/* If there was already an IPSEC SA, we keep it as is, else
* we go look for it ...
*/
+#define DUMMY_MARK 0
static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
{
struct xfrm_state *x = pkt_dev->flows[flow].x;
if (!x) {
/*slow path: we dont already have xfrm_state*/
- x = xfrm_stateonly_find(&init_net,
+ x = xfrm_stateonly_find(&init_net, DUMMY_MARK,
(xfrm_address_t *)&pkt_dev->cur_daddr,
(xfrm_address_t *)&pkt_dev->cur_saddr,
AF_INET,
@@ -3524,6 +3525,7 @@ static int pktgen_thread_worker(void *arg)
wait_event_interruptible_timeout(t->queue,
t->control != 0,
HZ/10);
+ try_to_freeze();
continue;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 794bcb897ff..4568120d853 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -35,6 +35,7 @@
#include <linux/security.h>
#include <linux/mutex.h>
#include <linux/if_addr.h>
+#include <linux/pci.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -89,6 +90,14 @@ int rtnl_is_locked(void)
}
EXPORT_SYMBOL(rtnl_is_locked);
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_rtnl_is_held(void)
+{
+ return lockdep_is_held(&rtnl_mutex);
+}
+EXPORT_SYMBOL(lockdep_rtnl_is_held);
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
+
static struct rtnl_link *rtnl_msg_handlers[NPROTO];
static inline int rtm_msgindex(int msgtype)
@@ -548,6 +557,19 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
}
}
+static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
+ const struct ifinfomsg *ifm)
+{
+ unsigned int flags = ifm->ifi_flags;
+
+ /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
+ if (ifm->ifi_change)
+ flags = (flags & ifm->ifi_change) |
+ (dev->flags & ~ifm->ifi_change);
+
+ return flags;
+}
+
static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
const struct net_device_stats *b)
{
@@ -580,6 +602,15 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
a->tx_compressed = b->tx_compressed;
};
+static inline int rtnl_vfinfo_size(const struct net_device *dev)
+{
+ if (dev->dev.parent && dev_is_pci(dev->dev.parent))
+ return dev_num_vf(dev->dev.parent) *
+ sizeof(struct ifla_vf_info);
+ else
+ return 0;
+}
+
static inline size_t if_nlmsg_size(const struct net_device *dev)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
@@ -597,6 +628,8 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
+ nla_total_size(4) /* IFLA_MASTER */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
+ + nla_total_size(4) /* IFLA_NUM_VF */
+ + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */
+ rtnl_link_get_size(dev); /* IFLA_LINKINFO */
}
@@ -665,6 +698,17 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
stats = dev_get_stats(dev);
copy_rtnl_link_stats(nla_data(attr), stats);
+ if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
+ int i;
+ struct ifla_vf_info ivi;
+
+ NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
+ for (i = 0; i < dev_num_vf(dev->dev.parent); i++) {
+ if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
+ break;
+ NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi);
+ }
+ }
if (dev->rtnl_link_ops) {
if (rtnl_link_fill(skb, dev) < 0)
goto nla_put_failure;
@@ -725,6 +769,12 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_LINKINFO] = { .type = NLA_NESTED },
[IFLA_NET_NS_PID] = { .type = NLA_U32 },
[IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
+ [IFLA_VF_MAC] = { .type = NLA_BINARY,
+ .len = sizeof(struct ifla_vf_mac) },
+ [IFLA_VF_VLAN] = { .type = NLA_BINARY,
+ .len = sizeof(struct ifla_vf_vlan) },
+ [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
+ .len = sizeof(struct ifla_vf_tx_rate) },
};
EXPORT_SYMBOL(ifla_policy);
@@ -875,13 +925,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
}
if (ifm->ifi_flags || ifm->ifi_change) {
- unsigned int flags = ifm->ifi_flags;
-
- /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
- if (ifm->ifi_change)
- flags = (flags & ifm->ifi_change) |
- (dev->flags & ~ifm->ifi_change);
- err = dev_change_flags(dev, flags);
+ err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
if (err < 0)
goto errout;
}
@@ -898,6 +942,41 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
write_unlock_bh(&dev_base_lock);
}
+ if (tb[IFLA_VF_MAC]) {
+ struct ifla_vf_mac *ivm;
+ ivm = nla_data(tb[IFLA_VF_MAC]);
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_mac)
+ err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac);
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
+
+ if (tb[IFLA_VF_VLAN]) {
+ struct ifla_vf_vlan *ivv;
+ ivv = nla_data(tb[IFLA_VF_VLAN]);
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_vlan)
+ err = ops->ndo_set_vf_vlan(dev, ivv->vf,
+ ivv->vlan,
+ ivv->qos);
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
+ err = 0;
+
+ if (tb[IFLA_VF_TX_RATE]) {
+ struct ifla_vf_tx_rate *ivt;
+ ivt = nla_data(tb[IFLA_VF_TX_RATE]);
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_tx_rate)
+ err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate);
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
err = 0;
errout:
@@ -989,6 +1068,26 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
return 0;
}
+int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
+{
+ unsigned int old_flags;
+ int err;
+
+ old_flags = dev->flags;
+ if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
+ err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
+ if (err < 0)
+ return err;
+ }
+
+ dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
+
+ __dev_notify_flags(dev, old_flags);
+ return 0;
+}
+EXPORT_SYMBOL(rtnl_configure_link);
+
struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[])
{
@@ -1010,6 +1109,7 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
dev_net_set(dev, net);
dev->rtnl_link_ops = ops;
+ dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
dev->real_num_tx_queues = real_num_queues;
if (strchr(dev->name, '%')) {
@@ -1139,7 +1239,7 @@ replay:
if (!(nlh->nlmsg_flags & NLM_F_CREATE))
return -ENODEV;
- if (ifm->ifi_index || ifm->ifi_flags || ifm->ifi_change)
+ if (ifm->ifi_index)
return -EOPNOTSUPP;
if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
return -EOPNOTSUPP;
@@ -1170,9 +1270,15 @@ replay:
err = ops->newlink(net, dev, tb, data);
else
err = register_netdevice(dev);
- if (err < 0 && !IS_ERR(dev))
+ if (err < 0 && !IS_ERR(dev)) {
free_netdev(dev);
+ goto out;
+ }
+ err = rtnl_configure_link(dev, ifm);
+ if (err < 0)
+ unregister_netdevice(dev);
+out:
put_net(dest_net);
return err;
}
@@ -1361,17 +1467,14 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
struct net_device *dev = ptr;
switch (event) {
- case NETDEV_UNREGISTER:
- rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
- break;
case NETDEV_UP:
case NETDEV_DOWN:
- rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
- break;
+ case NETDEV_PRE_UP:
case NETDEV_POST_INIT:
case NETDEV_REGISTER:
case NETDEV_CHANGE:
case NETDEV_GOING_DOWN:
+ case NETDEV_UNREGISTER:
case NETDEV_UNREGISTER_BATCH:
break;
default:
@@ -1386,7 +1489,7 @@ static struct notifier_block rtnetlink_dev_notifier = {
};
-static int rtnetlink_net_init(struct net *net)
+static int __net_init rtnetlink_net_init(struct net *net)
{
struct sock *sk;
sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
@@ -1397,7 +1500,7 @@ static int rtnetlink_net_init(struct net *net)
return 0;
}
-static void rtnetlink_net_exit(struct net *net)
+static void __net_exit rtnetlink_net_exit(struct net *net)
{
netlink_kernel_release(net->rtnl);
net->rtnl = NULL;
diff --git a/net/core/scm.c b/net/core/scm.c
index b7ba91b074b..9b264634acf 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -156,6 +156,8 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
switch (cmsg->cmsg_type)
{
case SCM_RIGHTS:
+ if (!sock->ops || sock->ops->family != PF_UNIX)
+ goto error;
err=scm_fp_copy(cmsg, &p->fp);
if (err<0)
goto error;
diff --git a/net/core/sock.c b/net/core/sock.c
index e1f6f225f01..fcd397a762f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -741,7 +741,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
struct timeval tm;
} v;
- unsigned int lv = sizeof(int);
+ int lv = sizeof(int);
int len;
if (get_user(len, optlen))
@@ -1073,7 +1073,8 @@ static void __sk_free(struct sock *sk)
if (sk->sk_destruct)
sk->sk_destruct(sk);
- filter = rcu_dereference(sk->sk_filter);
+ filter = rcu_dereference_check(sk->sk_filter,
+ atomic_read(&sk->sk_wmem_alloc) == 0);
if (filter) {
sk_filter_uncharge(sk, filter);
rcu_assign_pointer(sk->sk_filter, NULL);
@@ -2140,13 +2141,13 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot)
}
EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
-static int sock_inuse_init_net(struct net *net)
+static int __net_init sock_inuse_init_net(struct net *net)
{
net->core.inuse = alloc_percpu(struct prot_inuse);
return net->core.inuse ? 0 : -ENOMEM;
}
-static void sock_inuse_exit_net(struct net *net)
+static void __net_exit sock_inuse_exit_net(struct net *net)
{
free_percpu(net->core.inuse);
}
@@ -2228,13 +2229,10 @@ int proto_register(struct proto *prot, int alloc_slab)
}
if (prot->rsk_prot != NULL) {
- static const char mask[] = "request_sock_%s";
-
- prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
+ prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name);
if (prot->rsk_prot->slab_name == NULL)
goto out_free_sock_slab;
- sprintf(prot->rsk_prot->slab_name, mask, prot->name);
prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
prot->rsk_prot->obj_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
@@ -2247,14 +2245,11 @@ int proto_register(struct proto *prot, int alloc_slab)
}
if (prot->twsk_prot != NULL) {
- static const char mask[] = "tw_sock_%s";
-
- prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
+ prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
if (prot->twsk_prot->twsk_slab_name == NULL)
goto out_free_request_sock_slab;
- sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name);
prot->twsk_prot->twsk_slab =
kmem_cache_create(prot->twsk_prot->twsk_slab_name,
prot->twsk_prot->twsk_obj_size,
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index db9f5b39388..813e399220a 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -54,7 +54,7 @@ MODULE_LICENSE("GPL");
/**************** DCB attribute policies *************************************/
/* DCB netlink attributes policy */
-static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
+static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
[DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
[DCB_ATTR_STATE] = {.type = NLA_U8},
[DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
@@ -68,7 +68,7 @@ static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
};
/* DCB priority flow control to User Priority nested attributes */
-static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
+static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
[DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
[DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
@@ -81,7 +81,7 @@ static struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
};
/* DCB priority grouping nested attributes */
-static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
+static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
[DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
[DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
[DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
@@ -103,7 +103,7 @@ static struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
};
/* DCB traffic class nested attributes. */
-static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
+static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
[DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
[DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
[DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
@@ -112,7 +112,7 @@ static struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
};
/* DCB capabilities nested attributes. */
-static struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
+static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
[DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
[DCB_CAP_ATTR_PG] = {.type = NLA_U8},
[DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
@@ -124,14 +124,14 @@ static struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
};
/* DCB capabilities nested attributes. */
-static struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
+static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
[DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
[DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
[DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
};
/* DCB BCN nested attributes. */
-static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
+static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
[DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
[DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
@@ -160,7 +160,7 @@ static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
};
/* DCB APP nested attributes. */
-static struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
+static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
[DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
[DCB_APP_ATTR_ID] = {.type = NLA_U16},
[DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 57dfb9c8c4f..49d27c556be 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -63,14 +63,13 @@ int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
u8 *ccid_array, array_len;
int err = 0;
- if (len < ARRAY_SIZE(ccids))
- return -EINVAL;
-
if (ccid_get_builtin_ccids(&ccid_array, &array_len))
return -ENOBUFS;
- if (put_user(array_len, optlen) ||
- copy_to_user(optval, ccid_array, array_len))
+ if (put_user(array_len, optlen))
+ err = -EFAULT;
+ else if (len > 0 && copy_to_user(optval, ccid_array,
+ len > array_len ? array_len : len))
err = -EFAULT;
kfree(ccid_array);
@@ -83,7 +82,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f
va_list args;
va_start(args, fmt);
- vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
+ vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args);
va_end(args);
slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 269958bf7fe..6df6f8ac963 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -19,7 +19,9 @@
#include <linux/list.h>
#include <linux/module.h>
-#define CCID_MAX 255
+/* maximum value for a CCID (RFC 4340, 19.5) */
+#define CCID_MAX 255
+#define CCID_SLAB_NAME_LENGTH 32
struct tcp_info;
@@ -49,8 +51,8 @@ struct ccid_operations {
const char *ccid_name;
struct kmem_cache *ccid_hc_rx_slab,
*ccid_hc_tx_slab;
- char ccid_hc_rx_slab_name[32];
- char ccid_hc_tx_slab_name[32];
+ char ccid_hc_rx_slab_name[CCID_SLAB_NAME_LENGTH];
+ char ccid_hc_tx_slab_name[CCID_SLAB_NAME_LENGTH];
__u32 ccid_hc_rx_obj_size,
ccid_hc_tx_obj_size;
/* Interface Routines */
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index dad7bc4878e..b195c4feaa0 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -996,7 +996,7 @@ static struct inet_protosw dccp_v4_protosw = {
.flags = INET_PROTOSW_ICSK,
};
-static int dccp_v4_init_net(struct net *net)
+static int __net_init dccp_v4_init_net(struct net *net)
{
int err;
@@ -1005,7 +1005,7 @@ static int dccp_v4_init_net(struct net *net)
return err;
}
-static void dccp_v4_exit_net(struct net *net)
+static void __net_exit dccp_v4_exit_net(struct net *net)
{
inet_ctl_sock_destroy(net->dccp.v4_ctl_sk);
}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index baf05cf43c2..1aec6349e85 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1189,7 +1189,7 @@ static struct inet_protosw dccp_v6_protosw = {
.flags = INET_PROTOSW_ICSK,
};
-static int dccp_v6_init_net(struct net *net)
+static int __net_init dccp_v6_init_net(struct net *net)
{
int err;
@@ -1198,7 +1198,7 @@ static int dccp_v6_init_net(struct net *net)
return err;
}
-static void dccp_v6_exit_net(struct net *net)
+static void __net_exit dccp_v6_exit_net(struct net *net)
{
inet_ctl_sock_destroy(net->dccp.v6_ctl_sk);
}
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index bace1d8cbcf..f5b3464f124 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -161,8 +161,8 @@ static __init int dccpprobe_init(void)
if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
goto err0;
- ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0),
- "dccp");
+ try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0,
+ "dccp");
if (ret)
goto err1;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 671cd1413d5..0ef7061920c 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -835,6 +835,8 @@ verify_sock_status:
len = -EFAULT;
break;
}
+ if (flags & MSG_TRUNC)
+ len = skb->len;
found_fin_ok:
if (!(flags & MSG_PEEK))
sk_eat_skb(sk, skb, 0);
@@ -1003,12 +1005,13 @@ EXPORT_SYMBOL_GPL(dccp_shutdown);
static inline int dccp_mib_init(void)
{
- return snmp_mib_init((void**)dccp_statistics, sizeof(struct dccp_mib));
+ return snmp_mib_init((void __percpu **)dccp_statistics,
+ sizeof(struct dccp_mib));
}
static inline void dccp_mib_exit(void)
{
- snmp_mib_free((void**)dccp_statistics);
+ snmp_mib_free((void __percpu **)dccp_statistics);
}
static int thash_entries;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index a03284061a3..a7bf03ca0a3 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1155,8 +1155,8 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
if (!(flags & MSG_TRYHARD)) {
rcu_read_lock_bh();
- for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt;
- rt = rcu_dereference(rt->u.dst.dn_next)) {
+ for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
+ rt = rcu_dereference_bh(rt->u.dst.dn_next)) {
if ((flp->fld_dst == rt->fl.fld_dst) &&
(flp->fld_src == rt->fl.fld_src) &&
(flp->mark == rt->fl.mark) &&
@@ -1618,9 +1618,9 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (h > s_h)
s_idx = 0;
rcu_read_lock_bh();
- for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0;
+ for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
rt;
- rt = rcu_dereference(rt->u.dst.dn_next), idx++) {
+ rt = rcu_dereference_bh(rt->u.dst.dn_next), idx++) {
if (idx < s_idx)
continue;
skb_dst_set(skb, dst_clone(&rt->u.dst));
@@ -1654,12 +1654,12 @@ static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq)
for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
rcu_read_lock_bh();
- rt = dn_rt_hash_table[s->bucket].chain;
+ rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
if (rt)
break;
rcu_read_unlock_bh();
}
- return rcu_dereference(rt);
+ return rt;
}
static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
@@ -1674,7 +1674,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
rcu_read_lock_bh();
rt = dn_rt_hash_table[s->bucket].chain;
}
- return rcu_dereference(rt);
+ return rcu_dereference_bh(rt);
}
static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index dd3db88f8f0..205a1c12f3c 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -73,8 +73,8 @@ __setup("ether=", netdev_boot_setup);
* @len: packet length (<= skb->len)
*
*
- * Set the protocol type. For a packet of type ETH_P_802_3 we put the length
- * in here instead. It is up to the 802.2 layer to carry protocol information.
+ * Set the protocol type. For a packet of type ETH_P_802_3/2 we put the length
+ * in here instead.
*/
int eth_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
@@ -82,7 +82,7 @@ int eth_header(struct sk_buff *skb, struct net_device *dev,
{
struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
- if (type != ETH_P_802_3)
+ if (type != ETH_P_802_3 && type != ETH_P_802_2)
eth->h_proto = htons(type);
else
eth->h_proto = htons(len);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 7d12c6a9b19..33b7dffa773 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1385,7 +1385,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
}
EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
-unsigned long snmp_fold_field(void *mib[], int offt)
+unsigned long snmp_fold_field(void __percpu *mib[], int offt)
{
unsigned long res = 0;
int i;
@@ -1398,7 +1398,7 @@ unsigned long snmp_fold_field(void *mib[], int offt)
}
EXPORT_SYMBOL_GPL(snmp_fold_field);
-int snmp_mib_init(void *ptr[2], size_t mibsize)
+int snmp_mib_init(void __percpu *ptr[2], size_t mibsize)
{
BUG_ON(ptr == NULL);
ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
@@ -1416,7 +1416,7 @@ err0:
}
EXPORT_SYMBOL_GPL(snmp_mib_init);
-void snmp_mib_free(void *ptr[2])
+void snmp_mib_free(void __percpu *ptr[2])
{
BUG_ON(ptr == NULL);
free_percpu(ptr[0]);
@@ -1460,25 +1460,25 @@ static const struct net_protocol icmp_protocol = {
static __net_init int ipv4_mib_init_net(struct net *net)
{
- if (snmp_mib_init((void **)net->mib.tcp_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
sizeof(struct tcp_mib)) < 0)
goto err_tcp_mib;
- if (snmp_mib_init((void **)net->mib.ip_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
sizeof(struct ipstats_mib)) < 0)
goto err_ip_mib;
- if (snmp_mib_init((void **)net->mib.net_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
sizeof(struct linux_mib)) < 0)
goto err_net_mib;
- if (snmp_mib_init((void **)net->mib.udp_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
sizeof(struct udp_mib)) < 0)
goto err_udp_mib;
- if (snmp_mib_init((void **)net->mib.udplite_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
sizeof(struct udp_mib)) < 0)
goto err_udplite_mib;
- if (snmp_mib_init((void **)net->mib.icmp_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
sizeof(struct icmp_mib)) < 0)
goto err_icmp_mib;
- if (snmp_mib_init((void **)net->mib.icmpmsg_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
sizeof(struct icmpmsg_mib)) < 0)
goto err_icmpmsg_mib;
@@ -1486,30 +1486,30 @@ static __net_init int ipv4_mib_init_net(struct net *net)
return 0;
err_icmpmsg_mib:
- snmp_mib_free((void **)net->mib.icmp_statistics);
+ snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
err_icmp_mib:
- snmp_mib_free((void **)net->mib.udplite_statistics);
+ snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
err_udplite_mib:
- snmp_mib_free((void **)net->mib.udp_statistics);
+ snmp_mib_free((void __percpu **)net->mib.udp_statistics);
err_udp_mib:
- snmp_mib_free((void **)net->mib.net_statistics);
+ snmp_mib_free((void __percpu **)net->mib.net_statistics);
err_net_mib:
- snmp_mib_free((void **)net->mib.ip_statistics);
+ snmp_mib_free((void __percpu **)net->mib.ip_statistics);
err_ip_mib:
- snmp_mib_free((void **)net->mib.tcp_statistics);
+ snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
err_tcp_mib:
return -ENOMEM;
}
static __net_exit void ipv4_mib_exit_net(struct net *net)
{
- snmp_mib_free((void **)net->mib.icmpmsg_statistics);
- snmp_mib_free((void **)net->mib.icmp_statistics);
- snmp_mib_free((void **)net->mib.udplite_statistics);
- snmp_mib_free((void **)net->mib.udp_statistics);
- snmp_mib_free((void **)net->mib.net_statistics);
- snmp_mib_free((void **)net->mib.ip_statistics);
- snmp_mib_free((void **)net->mib.tcp_statistics);
+ snmp_mib_free((void __percpu **)net->mib.icmpmsg_statistics);
+ snmp_mib_free((void __percpu **)net->mib.icmp_statistics);
+ snmp_mib_free((void __percpu **)net->mib.udplite_statistics);
+ snmp_mib_free((void __percpu **)net->mib.udp_statistics);
+ snmp_mib_free((void __percpu **)net->mib.net_statistics);
+ snmp_mib_free((void __percpu **)net->mib.ip_statistics);
+ snmp_mib_free((void __percpu **)net->mib.tcp_statistics);
}
static __net_initdata struct pernet_operations ipv4_mib_ops = {
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 7ed3e4ae93a..987b47dc69a 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -393,7 +393,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
return;
- x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET);
+ x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET);
if (!x)
return;
printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index c95cd93acf2..c4dd1354280 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -70,6 +70,7 @@
* bonding can change the skb before
* sending (e.g. insert 8021q tag).
* Harald Welte : convert to make use of jenkins hash
+ * Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support.
*/
#include <linux/module.h>
@@ -524,12 +525,15 @@ int arp_bind_neighbour(struct dst_entry *dst)
/*
* Check if we can use proxy ARP for this path
*/
-
-static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt)
+static inline int arp_fwd_proxy(struct in_device *in_dev,
+ struct net_device *dev, struct rtable *rt)
{
struct in_device *out_dev;
int imi, omi = -1;
+ if (rt->u.dst.dev == dev)
+ return 0;
+
if (!IN_DEV_PROXY_ARP(in_dev))
return 0;
@@ -548,6 +552,43 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt)
}
/*
+ * Check for RFC3069 proxy arp private VLAN (allow to send back to same dev)
+ *
+ * RFC3069 supports proxy arp replies back to the same interface. This
+ * is done to support (ethernet) switch features, like RFC 3069, where
+ * the individual ports are not allowed to communicate with each
+ * other, BUT they are allowed to talk to the upstream router. As
+ * described in RFC 3069, it is possible to allow these hosts to
+ * communicate through the upstream router, by proxy_arp'ing.
+ *
+ * RFC 3069: "VLAN Aggregation for Efficient IP Address Allocation"
+ *
+ * This technology is known by different names:
+ * In RFC 3069 it is called VLAN Aggregation.
+ * Cisco and Allied Telesyn call it Private VLAN.
+ * Hewlett-Packard call it Source-Port filtering or port-isolation.
+ * Ericsson call it MAC-Forced Forwarding (RFC Draft).
+ *
+ */
+static inline int arp_fwd_pvlan(struct in_device *in_dev,
+ struct net_device *dev, struct rtable *rt,
+ __be32 sip, __be32 tip)
+{
+ /* Private VLAN is only concerned about the same ethernet segment */
+ if (rt->u.dst.dev != dev)
+ return 0;
+
+ /* Don't reply on self probes (often done by windowz boxes)*/
+ if (sip == tip)
+ return 0;
+
+ if (IN_DEV_PROXY_ARP_PVLAN(in_dev))
+ return 1;
+ else
+ return 0;
+}
+
+/*
* Interface to link layer: send routine and receive handler.
*/
@@ -833,8 +874,11 @@ static int arp_process(struct sk_buff *skb)
}
goto out;
} else if (IN_DEV_FORWARD(in_dev)) {
- if (addr_type == RTN_UNICAST && rt->u.dst.dev != dev &&
- (arp_fwd_proxy(in_dev, rt) || pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) {
+ if (addr_type == RTN_UNICAST &&
+ (arp_fwd_proxy(in_dev, dev, rt) ||
+ arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
+ pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))
+ {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
if (n)
neigh_release(n);
@@ -863,7 +907,8 @@ static int arp_process(struct sk_buff *skb)
devices (strip is candidate)
*/
if (n == NULL &&
- arp->ar_op == htons(ARPOP_REPLY) &&
+ (arp->ar_op == htons(ARPOP_REPLY) ||
+ (arp->ar_op == htons(ARPOP_REQUEST) && tip == sip)) &&
inet_addr_type(net, sip) == RTN_UNICAST)
n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
}
@@ -1239,8 +1284,7 @@ void __init arp_init(void)
dev_add_pack(&arp_packet_type);
arp_proc_init();
#ifdef CONFIG_SYSCTL
- neigh_sysctl_register(NULL, &arp_tbl.parms, NET_IPV4,
- NET_IPV4_NEIGH, "ipv4", NULL);
+ neigh_sysctl_register(NULL, &arp_tbl.parms, "ipv4", NULL);
#endif
register_netdevice_notifier(&arp_netdev_notifier);
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 040c4f05b65..51ca946e339 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -64,20 +64,20 @@
static struct ipv4_devconf ipv4_devconf = {
.data = {
- [NET_IPV4_CONF_ACCEPT_REDIRECTS - 1] = 1,
- [NET_IPV4_CONF_SEND_REDIRECTS - 1] = 1,
- [NET_IPV4_CONF_SECURE_REDIRECTS - 1] = 1,
- [NET_IPV4_CONF_SHARED_MEDIA - 1] = 1,
+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
+ [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
+ [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
+ [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
},
};
static struct ipv4_devconf ipv4_devconf_dflt = {
.data = {
- [NET_IPV4_CONF_ACCEPT_REDIRECTS - 1] = 1,
- [NET_IPV4_CONF_SEND_REDIRECTS - 1] = 1,
- [NET_IPV4_CONF_SECURE_REDIRECTS - 1] = 1,
- [NET_IPV4_CONF_SHARED_MEDIA - 1] = 1,
- [NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
+ [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
+ [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
+ [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
+ [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
+ [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
},
};
@@ -1317,14 +1317,19 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
{
int *valp = ctl->data;
int val = *valp;
+ loff_t pos = *ppos;
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write && *valp != val) {
struct net *net = ctl->extra2;
if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
- if (!rtnl_trylock())
+ if (!rtnl_trylock()) {
+ /* Restore the original values before restarting */
+ *valp = val;
+ *ppos = pos;
return restart_syscall();
+ }
if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
inet_forward_change(net);
} else if (*valp) {
@@ -1360,7 +1365,7 @@ int ipv4_doint_and_flush(ctl_table *ctl, int write,
{ \
.procname = name, \
.data = ipv4_devconf.data + \
- NET_IPV4_CONF_ ## attr - 1, \
+ IPV4_DEVCONF_ ## attr - 1, \
.maxlen = sizeof(int), \
.mode = mval, \
.proc_handler = proc, \
@@ -1381,7 +1386,7 @@ int ipv4_doint_and_flush(ctl_table *ctl, int write,
static struct devinet_sysctl_table {
struct ctl_table_header *sysctl_header;
- struct ctl_table devinet_vars[__NET_IPV4_CONF_MAX];
+ struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
char *dev_name;
} devinet_sysctl = {
.devinet_vars = {
@@ -1408,6 +1413,7 @@ static struct devinet_sysctl_table {
DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
+ DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
@@ -1486,8 +1492,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
static void devinet_sysctl_register(struct in_device *idev)
{
- neigh_sysctl_register(idev->dev, idev->arp_parms, NET_IPV4,
- NET_IPV4_NEIGH, "ipv4", NULL);
+ neigh_sysctl_register(idev->dev, idev->arp_parms, "ipv4", NULL);
__devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
&idev->cnf);
}
@@ -1502,7 +1507,7 @@ static struct ctl_table ctl_forward_entry[] = {
{
.procname = "ip_forward",
.data = &ipv4_devconf.data[
- NET_IPV4_CONF_FORWARDING - 1],
+ IPV4_DEVCONF_FORWARDING - 1],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = devinet_sysctl_forward,
@@ -1546,7 +1551,7 @@ static __net_init int devinet_init_net(struct net *net)
if (tbl == NULL)
goto err_alloc_ctl;
- tbl[0].data = &all->data[NET_IPV4_CONF_FORWARDING - 1];
+ tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
tbl[0].extra1 = all;
tbl[0].extra2 = net;
#endif
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 1948895beb6..14ca1f1c3fb 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -422,7 +422,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
return;
- x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
+ x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
if (!x)
return;
NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 82dbf711d6d..9b3e28ed524 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -883,7 +883,7 @@ static void nl_fib_input(struct sk_buff *skb)
netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
}
-static int nl_fib_lookup_init(struct net *net)
+static int __net_init nl_fib_lookup_init(struct net *net)
{
struct sock *sk;
sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0,
@@ -1004,7 +1004,7 @@ fail:
return err;
}
-static void __net_exit ip_fib_net_exit(struct net *net)
+static void ip_fib_net_exit(struct net *net)
{
unsigned int i;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index ed19aa6919c..1af0ea0fb6a 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -62,8 +62,8 @@ static DEFINE_SPINLOCK(fib_multipath_lock);
#define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \
for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
-#define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \
-for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++)
+#define change_nexthops(fi) { int nhsel; struct fib_nh *nexthop_nh; \
+for (nhsel=0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nexthop_nh++, nhsel++)
#else /* CONFIG_IP_ROUTE_MULTIPATH */
@@ -72,7 +72,7 @@ for (nhsel=0, nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++,
#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \
for (nhsel=0; nhsel < 1; nhsel++)
-#define change_nexthops(fi) { int nhsel = 0; struct fib_nh * nh = (struct fib_nh *)((fi)->fib_nh); \
+#define change_nexthops(fi) { int nhsel = 0; struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
for (nhsel=0; nhsel < 1; nhsel++)
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
@@ -145,9 +145,9 @@ void free_fib_info(struct fib_info *fi)
return;
}
change_nexthops(fi) {
- if (nh->nh_dev)
- dev_put(nh->nh_dev);
- nh->nh_dev = NULL;
+ if (nexthop_nh->nh_dev)
+ dev_put(nexthop_nh->nh_dev);
+ nexthop_nh->nh_dev = NULL;
} endfor_nexthops(fi);
fib_info_cnt--;
release_net(fi->fib_net);
@@ -162,9 +162,9 @@ void fib_release_info(struct fib_info *fi)
if (fi->fib_prefsrc)
hlist_del(&fi->fib_lhash);
change_nexthops(fi) {
- if (!nh->nh_dev)
+ if (!nexthop_nh->nh_dev)
continue;
- hlist_del(&nh->nh_hash);
+ hlist_del(&nexthop_nh->nh_hash);
} endfor_nexthops(fi)
fi->fib_dead = 1;
fib_info_put(fi);
@@ -395,19 +395,20 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
if (!rtnh_ok(rtnh, remaining))
return -EINVAL;
- nh->nh_flags = (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
- nh->nh_oif = rtnh->rtnh_ifindex;
- nh->nh_weight = rtnh->rtnh_hops + 1;
+ nexthop_nh->nh_flags =
+ (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
+ nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
+ nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
- nh->nh_gw = nla ? nla_get_be32(nla) : 0;
+ nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
#ifdef CONFIG_NET_CLS_ROUTE
nla = nla_find(attrs, attrlen, RTA_FLOW);
- nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
+ nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
#endif
}
@@ -527,10 +528,6 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
if (nh->nh_gw) {
struct fib_result res;
-#ifdef CONFIG_IP_ROUTE_PERVASIVE
- if (nh->nh_flags&RTNH_F_PERVASIVE)
- return 0;
-#endif
if (nh->nh_flags&RTNH_F_ONLINK) {
struct net_device *dev;
@@ -738,7 +735,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
fi->fib_nhs = nhs;
change_nexthops(fi) {
- nh->nh_parent = fi;
+ nexthop_nh->nh_parent = fi;
} endfor_nexthops(fi)
if (cfg->fc_mx) {
@@ -808,7 +805,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
goto failure;
} else {
change_nexthops(fi) {
- if ((err = fib_check_nh(cfg, fi, nh)) != 0)
+ if ((err = fib_check_nh(cfg, fi, nexthop_nh)) != 0)
goto failure;
} endfor_nexthops(fi)
}
@@ -843,11 +840,11 @@ link_it:
struct hlist_head *head;
unsigned int hash;
- if (!nh->nh_dev)
+ if (!nexthop_nh->nh_dev)
continue;
- hash = fib_devindex_hashfn(nh->nh_dev->ifindex);
+ hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
head = &fib_info_devhash[hash];
- hlist_add_head(&nh->nh_hash, head);
+ hlist_add_head(&nexthop_nh->nh_hash, head);
} endfor_nexthops(fi)
spin_unlock_bh(&fib_info_lock);
return fi;
@@ -1080,21 +1077,21 @@ int fib_sync_down_dev(struct net_device *dev, int force)
prev_fi = fi;
dead = 0;
change_nexthops(fi) {
- if (nh->nh_flags&RTNH_F_DEAD)
+ if (nexthop_nh->nh_flags&RTNH_F_DEAD)
dead++;
- else if (nh->nh_dev == dev &&
- nh->nh_scope != scope) {
- nh->nh_flags |= RTNH_F_DEAD;
+ else if (nexthop_nh->nh_dev == dev &&
+ nexthop_nh->nh_scope != scope) {
+ nexthop_nh->nh_flags |= RTNH_F_DEAD;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
spin_lock_bh(&fib_multipath_lock);
- fi->fib_power -= nh->nh_power;
- nh->nh_power = 0;
+ fi->fib_power -= nexthop_nh->nh_power;
+ nexthop_nh->nh_power = 0;
spin_unlock_bh(&fib_multipath_lock);
#endif
dead++;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (force > 1 && nh->nh_dev == dev) {
+ if (force > 1 && nexthop_nh->nh_dev == dev) {
dead = fi->fib_nhs;
break;
}
@@ -1144,18 +1141,20 @@ int fib_sync_up(struct net_device *dev)
prev_fi = fi;
alive = 0;
change_nexthops(fi) {
- if (!(nh->nh_flags&RTNH_F_DEAD)) {
+ if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
alive++;
continue;
}
- if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP))
+ if (nexthop_nh->nh_dev == NULL ||
+ !(nexthop_nh->nh_dev->flags&IFF_UP))
continue;
- if (nh->nh_dev != dev || !__in_dev_get_rtnl(dev))
+ if (nexthop_nh->nh_dev != dev ||
+ !__in_dev_get_rtnl(dev))
continue;
alive++;
spin_lock_bh(&fib_multipath_lock);
- nh->nh_power = 0;
- nh->nh_flags &= ~RTNH_F_DEAD;
+ nexthop_nh->nh_power = 0;
+ nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
spin_unlock_bh(&fib_multipath_lock);
} endfor_nexthops(fi)
@@ -1182,9 +1181,9 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
if (fi->fib_power <= 0) {
int power = 0;
change_nexthops(fi) {
- if (!(nh->nh_flags&RTNH_F_DEAD)) {
- power += nh->nh_weight;
- nh->nh_power = nh->nh_weight;
+ if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
+ power += nexthop_nh->nh_weight;
+ nexthop_nh->nh_power = nexthop_nh->nh_weight;
}
} endfor_nexthops(fi);
fi->fib_power = power;
@@ -1204,9 +1203,10 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
w = jiffies % fi->fib_power;
change_nexthops(fi) {
- if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) {
- if ((w -= nh->nh_power) <= 0) {
- nh->nh_power--;
+ if (!(nexthop_nh->nh_flags&RTNH_F_DEAD) &&
+ nexthop_nh->nh_power) {
+ if ((w -= nexthop_nh->nh_power) <= 0) {
+ nexthop_nh->nh_power--;
fi->fib_power--;
res->nh_sel = nhsel;
spin_unlock_bh(&fib_multipath_lock);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index fe11f60ce41..4b4c2bcd15d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -114,7 +114,7 @@ struct icmp_bxm {
/* An array of errno for error messages from dest unreach. */
/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
-struct icmp_err icmp_err_convert[] = {
+const struct icmp_err icmp_err_convert[] = {
{
.errno = ENETUNREACH, /* ICMP_NET_UNREACH */
.fatal = 0,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 76c08402c93..63bf298ca10 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -946,7 +946,6 @@ int igmp_rcv(struct sk_buff *skb)
break;
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
- case IGMPV3_HOST_MEMBERSHIP_REPORT:
/* Is it our report looped back? */
if (skb_rtable(skb)->fl.iif == 0)
break;
@@ -960,6 +959,7 @@ int igmp_rcv(struct sk_buff *skb)
in_dev_put(in_dev);
return pim_rcv_v1(skb);
#endif
+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
case IGMP_DVMRP:
case IGMP_TRACE:
case IGMP_HOST_LEAVE_MESSAGE:
@@ -1799,7 +1799,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
iml->next = inet->mc_list;
iml->sflist = NULL;
iml->sfmode = MCAST_EXCLUDE;
- inet->mc_list = iml;
+ rcu_assign_pointer(inet->mc_list, iml);
ip_mc_inc_group(in_dev, addr);
err = 0;
done:
@@ -1807,24 +1807,46 @@ done:
return err;
}
+static void ip_sf_socklist_reclaim(struct rcu_head *rp)
+{
+ struct ip_sf_socklist *psf;
+
+ psf = container_of(rp, struct ip_sf_socklist, rcu);
+ /* sk_omem_alloc should have been decreased by the caller*/
+ kfree(psf);
+}
+
static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
struct in_device *in_dev)
{
+ struct ip_sf_socklist *psf = iml->sflist;
int err;
- if (iml->sflist == NULL) {
+ if (psf == NULL) {
/* any-source empty exclude case */
return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
iml->sfmode, 0, NULL, 0);
}
err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
- iml->sfmode, iml->sflist->sl_count,
- iml->sflist->sl_addr, 0);
- sock_kfree_s(sk, iml->sflist, IP_SFLSIZE(iml->sflist->sl_max));
- iml->sflist = NULL;
+ iml->sfmode, psf->sl_count, psf->sl_addr, 0);
+ rcu_assign_pointer(iml->sflist, NULL);
+ /* decrease mem now to avoid the memleak warning */
+ atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
+ call_rcu(&psf->rcu, ip_sf_socklist_reclaim);
return err;
}
+
+static void ip_mc_socklist_reclaim(struct rcu_head *rp)
+{
+ struct ip_mc_socklist *iml;
+
+ iml = container_of(rp, struct ip_mc_socklist, rcu);
+ /* sk_omem_alloc should have been decreased by the caller*/
+ kfree(iml);
+}
+
+
/*
* Ask a socket to leave a group.
*/
@@ -1854,12 +1876,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
(void) ip_mc_leave_src(sk, iml, in_dev);
- *imlp = iml->next;
+ rcu_assign_pointer(*imlp, iml->next);
if (in_dev)
ip_mc_dec_group(in_dev, group);
rtnl_unlock();
- sock_kfree_s(sk, iml, sizeof(*iml));
+ /* decrease mem now to avoid the memleak warning */
+ atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
+ call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
return 0;
}
if (!in_dev)
@@ -1974,9 +1998,12 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
if (psl) {
for (i=0; i<psl->sl_count; i++)
newpsl->sl_addr[i] = psl->sl_addr[i];
- sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max));
+ /* decrease mem now to avoid the memleak warning */
+ atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
+ call_rcu(&psl->rcu, ip_sf_socklist_reclaim);
}
- pmc->sflist = psl = newpsl;
+ rcu_assign_pointer(pmc->sflist, newpsl);
+ psl = newpsl;
}
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
for (i=0; i<psl->sl_count; i++) {
@@ -2072,11 +2099,13 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
if (psl) {
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
psl->sl_count, psl->sl_addr, 0);
- sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max));
+ /* decrease mem now to avoid the memleak warning */
+ atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
+ call_rcu(&psl->rcu, ip_sf_socklist_reclaim);
} else
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
0, NULL, 0);
- pmc->sflist = newpsl;
+ rcu_assign_pointer(pmc->sflist, newpsl);
pmc->sfmode = msf->imsf_fmode;
err = 0;
done:
@@ -2209,30 +2238,40 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
struct ip_mc_socklist *pmc;
struct ip_sf_socklist *psl;
int i;
+ int ret;
+ ret = 1;
if (!ipv4_is_multicast(loc_addr))
- return 1;
+ goto out;
- for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+ rcu_read_lock();
+ for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) {
if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
pmc->multi.imr_ifindex == dif)
break;
}
+ ret = inet->mc_all;
if (!pmc)
- return inet->mc_all;
+ goto unlock;
psl = pmc->sflist;
+ ret = (pmc->sfmode == MCAST_EXCLUDE);
if (!psl)
- return pmc->sfmode == MCAST_EXCLUDE;
+ goto unlock;
for (i=0; i<psl->sl_count; i++) {
if (psl->sl_addr[i] == rmt_addr)
break;
}
+ ret = 0;
if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
- return 0;
+ goto unlock;
if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
- return 0;
- return 1;
+ goto unlock;
+ ret = 1;
+unlock:
+ rcu_read_unlock();
+out:
+ return ret;
}
/*
@@ -2251,7 +2290,7 @@ void ip_mc_drop_socket(struct sock *sk)
rtnl_lock();
while ((iml = inet->mc_list) != NULL) {
struct in_device *in_dev;
- inet->mc_list = iml->next;
+ rcu_assign_pointer(inet->mc_list, iml->next);
in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
(void) ip_mc_leave_src(sk, iml, in_dev);
@@ -2259,7 +2298,9 @@ void ip_mc_drop_socket(struct sock *sk)
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
in_dev_put(in_dev);
}
- sock_kfree_s(sk, iml, sizeof(*iml));
+ /* decrease mem now to avoid the memleak warning */
+ atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
+ call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
}
rtnl_unlock();
}
@@ -2603,7 +2644,7 @@ static const struct file_operations igmp_mcf_seq_fops = {
.release = seq_release_net,
};
-static int igmp_net_init(struct net *net)
+static int __net_init igmp_net_init(struct net *net)
{
struct proc_dir_entry *pde;
@@ -2621,7 +2662,7 @@ out_igmp:
return -ENOMEM;
}
-static void igmp_net_exit(struct net *net)
+static void __net_exit igmp_net_exit(struct net *net)
{
proc_net_remove(net, "mcfilter");
proc_net_remove(net, "igmp");
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index ee16475f8fc..8da6429269d 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -529,6 +529,8 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
syn_ack_recalc(req, thresh, max_retries,
queue->rskq_defer_accept,
&expire, &resend);
+ if (req->rsk_ops->syn_ack_timeout)
+ req->rsk_ops->syn_ack_timeout(parent, req);
if (!expire &&
(!resend ||
!req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 86964b353c3..b59430bc041 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -32,6 +32,8 @@
#include <linux/netdevice.h>
#include <linux/jhash.h>
#include <linux/random.h>
+#include <net/route.h>
+#include <net/dst.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
@@ -205,11 +207,34 @@ static void ip_expire(unsigned long arg)
if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
struct sk_buff *head = qp->q.fragments;
- /* Send an ICMP "Fragment Reassembly Timeout" message. */
rcu_read_lock();
head->dev = dev_get_by_index_rcu(net, qp->iif);
- if (head->dev)
- icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
+ if (!head->dev)
+ goto out_rcu_unlock;
+
+ /*
+ * Only search router table for the head fragment,
+ * when defraging timeout at PRE_ROUTING HOOK.
+ */
+ if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) {
+ const struct iphdr *iph = ip_hdr(head);
+ int err = ip_route_input(head, iph->daddr, iph->saddr,
+ iph->tos, head->dev);
+ if (unlikely(err))
+ goto out_rcu_unlock;
+
+ /*
+ * Only an end host needs to send an ICMP
+ * "Fragment Reassembly Timeout" message, per RFC792.
+ */
+ if (skb_rtable(head)->rt_type != RTN_LOCAL)
+ goto out_rcu_unlock;
+
+ }
+
+ /* Send an ICMP "Fragment Reassembly Timeout" message. */
+ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
+out_rcu_unlock:
rcu_read_unlock();
}
out:
@@ -646,7 +671,7 @@ static struct ctl_table ip4_frags_ctl_table[] = {
{ }
};
-static int ip4_frags_ns_ctl_register(struct net *net)
+static int __net_init ip4_frags_ns_ctl_register(struct net *net)
{
struct ctl_table *table;
struct ctl_table_header *hdr;
@@ -676,7 +701,7 @@ err_alloc:
return -ENOMEM;
}
-static void ip4_frags_ns_ctl_unregister(struct net *net)
+static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
{
struct ctl_table *table;
@@ -704,7 +729,7 @@ static inline void ip4_frags_ctl_register(void)
}
#endif
-static int ipv4_frags_init_net(struct net *net)
+static int __net_init ipv4_frags_init_net(struct net *net)
{
/*
* Fragment cache limits. We will commit 256K at one time. Should we
@@ -726,7 +751,7 @@ static int ipv4_frags_init_net(struct net *net)
return ip4_frags_ns_ctl_register(net);
}
-static void ipv4_frags_exit_net(struct net *net)
+static void __net_exit ipv4_frags_exit_net(struct net *net)
{
ip4_frags_ns_ctl_unregister(net);
inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f36ce156cac..c0c5274d027 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -793,7 +793,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
}
if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ip_rt_put(rt);
goto tx_error;
}
@@ -1307,7 +1307,7 @@ static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
}
}
-static int ipgre_init_net(struct net *net)
+static int __net_init ipgre_init_net(struct net *net)
{
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
int err;
@@ -1334,7 +1334,7 @@ err_alloc_dev:
return err;
}
-static void ipgre_exit_net(struct net *net)
+static void __net_exit ipgre_exit_net(struct net *net)
{
struct ipgre_net *ign;
LIST_HEAD(list);
@@ -1665,14 +1665,15 @@ static int __init ipgre_init(void)
printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
- if (inet_add_protocol(&ipgre_protocol, IPPROTO_GRE) < 0) {
- printk(KERN_INFO "ipgre init: can't add protocol\n");
- return -EAGAIN;
- }
-
err = register_pernet_device(&ipgre_net_ops);
if (err < 0)
- goto gen_device_failed;
+ return err;
+
+ err = inet_add_protocol(&ipgre_protocol, IPPROTO_GRE);
+ if (err < 0) {
+ printk(KERN_INFO "ipgre init: can't add protocol\n");
+ goto add_proto_failed;
+ }
err = rtnl_link_register(&ipgre_link_ops);
if (err < 0)
@@ -1688,9 +1689,9 @@ out:
tap_ops_failed:
rtnl_link_unregister(&ipgre_link_ops);
rtnl_link_failed:
- unregister_pernet_device(&ipgre_net_ops);
-gen_device_failed:
inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
+add_proto_failed:
+ unregister_pernet_device(&ipgre_net_ops);
goto out;
}
@@ -1698,9 +1699,9 @@ static void __exit ipgre_fini(void)
{
rtnl_link_unregister(&ipgre_tap_ops);
rtnl_link_unregister(&ipgre_link_ops);
- unregister_pernet_device(&ipgre_net_ops);
if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
printk(KERN_INFO "ipgre close: can't remove protocol\n");
+ unregister_pernet_device(&ipgre_net_ops);
}
module_init(ipgre_init);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index cafad9baff0..644dc43a55d 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -451,7 +451,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
(1<<IP_TTL) | (1<<IP_HDRINCL) |
(1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
(1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
- (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) ||
+ (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
+ (1<<IP_MINTTL))) ||
optname == IP_MULTICAST_TTL ||
optname == IP_MULTICAST_ALL ||
optname == IP_MULTICAST_LOOP ||
@@ -936,6 +937,14 @@ mc_msf_out:
inet->transparent = !!val;
break;
+ case IP_MINTTL:
+ if (optlen < 1)
+ goto e_inval;
+ if (val < 0 || val > 255)
+ goto e_inval;
+ inet->min_ttl = val;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -1198,6 +1207,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_TRANSPARENT:
val = inet->transparent;
break;
+ case IP_MINTTL:
+ val = inet->min_ttl;
+ break;
default:
release_sock(sk);
return -ENOPROTOOPT;
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 38fbf04150a..629067571f0 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -25,6 +25,7 @@
static void ipcomp4_err(struct sk_buff *skb, u32 info)
{
+ struct net *net = dev_net(skb->dev);
__be32 spi;
struct iphdr *iph = (struct iphdr *)skb->data;
struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
@@ -35,7 +36,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
return;
spi = htonl(ntohs(ipch->cpi));
- x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr,
+ x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr,
spi, IPPROTO_COMP, AF_INET);
if (!x)
return;
@@ -47,9 +48,10 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
/* We always hold one tunnel user reference to indicate a tunnel */
static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
{
+ struct net *net = xs_net(x);
struct xfrm_state *t;
- t = xfrm_state_alloc(&init_net);
+ t = xfrm_state_alloc(net);
if (t == NULL)
goto out;
@@ -61,6 +63,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x)
t->props.mode = x->props.mode;
t->props.saddr.a4 = x->props.saddr.a4;
t->props.flags = x->props.flags;
+ memcpy(&t->mark, &x->mark, sizeof(t->mark));
if (xfrm_init_state(t))
goto error;
@@ -82,10 +85,12 @@ error:
*/
static int ipcomp_tunnel_attach(struct xfrm_state *x)
{
+ struct net *net = xs_net(x);
int err = 0;
struct xfrm_state *t;
+ u32 mark = x->mark.v & x->mark.m;
- t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr.a4,
+ t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr.a4,
x->props.saddr.a4, IPPROTO_IPIP, AF_INET);
if (!t) {
t = ipcomp_tunnel_create(x);
@@ -124,16 +129,12 @@ static int ipcomp4_init_state(struct xfrm_state *x)
if (x->props.mode == XFRM_MODE_TUNNEL) {
err = ipcomp_tunnel_attach(x);
if (err)
- goto error_tunnel;
+ goto out;
}
err = 0;
out:
return err;
-
-error_tunnel:
- ipcomp_destroy(x);
- goto out;
}
static const struct xfrm_type ipcomp_type = {
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index eda04fed337..2f302d3ac9a 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -130,7 +130,6 @@ struct ipip_net {
struct net_device *fb_tunnel_dev;
};
-static void ipip_fb_tunnel_init(struct net_device *dev);
static void ipip_tunnel_init(struct net_device *dev);
static void ipip_tunnel_setup(struct net_device *dev);
@@ -730,7 +729,7 @@ static void ipip_tunnel_init(struct net_device *dev)
ipip_tunnel_bind_dev(dev);
}
-static void ipip_fb_tunnel_init(struct net_device *dev)
+static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
@@ -773,7 +772,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
}
}
-static int ipip_init_net(struct net *net)
+static int __net_init ipip_init_net(struct net *net)
{
struct ipip_net *ipn = net_generic(net, ipip_net_id);
int err;
@@ -806,7 +805,7 @@ err_alloc_dev:
return err;
}
-static void ipip_exit_net(struct net *net)
+static void __net_exit ipip_exit_net(struct net *net)
{
struct ipip_net *ipn = net_generic(net, ipip_net_id);
LIST_HEAD(list);
@@ -831,15 +830,14 @@ static int __init ipip_init(void)
printk(banner);
- if (xfrm4_tunnel_register(&ipip_handler, AF_INET)) {
+ err = register_pernet_device(&ipip_net_ops);
+ if (err < 0)
+ return err;
+ err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
+ if (err < 0) {
+ unregister_pernet_device(&ipip_net_ops);
printk(KERN_INFO "ipip init: can't register tunnel\n");
- return -EAGAIN;
}
-
- err = register_pernet_device(&ipip_net_ops);
- if (err)
- xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
-
return err;
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 54596f73eff..8582e12e4a6 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1163,9 +1163,6 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
int ct;
LIST_HEAD(list);
- if (!net_eq(dev_net(dev), net))
- return NOTIFY_DONE;
-
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
v = &net->ipv4.vif_table[0];
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 06632762ba5..f07d77f6575 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -27,6 +27,7 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp/arp_tables.h>
+#include "../../netfilter/xt_repldata.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
@@ -58,6 +59,12 @@ do { \
#define ARP_NF_ASSERT(x)
#endif
+void *arpt_alloc_initial_table(const struct xt_table *info)
+{
+ return xt_alloc_initial_table(arpt, ARPT);
+}
+EXPORT_SYMBOL_GPL(arpt_alloc_initial_table);
+
static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
const char *hdr_addr, int len)
{
@@ -226,7 +233,14 @@ arpt_error(struct sk_buff *skb, const struct xt_target_param *par)
return NF_DROP;
}
-static inline struct arpt_entry *get_entry(void *base, unsigned int offset)
+static inline const struct arpt_entry_target *
+arpt_get_target_c(const struct arpt_entry *e)
+{
+ return arpt_get_target((struct arpt_entry *)e);
+}
+
+static inline struct arpt_entry *
+get_entry(const void *base, unsigned int offset)
{
return (struct arpt_entry *)(base + offset);
}
@@ -273,7 +287,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
arp = arp_hdr(skb);
do {
- struct arpt_entry_target *t;
+ const struct arpt_entry_target *t;
int hdr_len;
if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
@@ -285,7 +299,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
(2 * skb->dev->addr_len);
ADD_COUNTER(e->counters, hdr_len, 1);
- t = arpt_get_target(e);
+ t = arpt_get_target_c(e);
/* Standard target? */
if (!t->u.kernel.target->target) {
@@ -351,7 +365,7 @@ static inline bool unconditional(const struct arpt_arp *arp)
/* Figures out from what hook each rule can be called: returns 0 if
* there are loops. Puts hook bitmask in comefrom.
*/
-static int mark_source_chains(struct xt_table_info *newinfo,
+static int mark_source_chains(const struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0)
{
unsigned int hook;
@@ -372,7 +386,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
for (;;) {
const struct arpt_standard_target *t
- = (void *)arpt_get_target(e);
+ = (void *)arpt_get_target_c(e);
int visited = e->comefrom & (1 << hook);
if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) {
@@ -456,7 +470,7 @@ static int mark_source_chains(struct xt_table_info *newinfo,
return 1;
}
-static inline int check_entry(struct arpt_entry *e, const char *name)
+static inline int check_entry(const struct arpt_entry *e, const char *name)
{
const struct arpt_entry_target *t;
@@ -468,7 +482,7 @@ static inline int check_entry(struct arpt_entry *e, const char *name)
if (e->target_offset + sizeof(struct arpt_entry_target) > e->next_offset)
return -EINVAL;
- t = arpt_get_target(e);
+ t = arpt_get_target_c(e);
if (e->target_offset + t->u.target_size > e->next_offset)
return -EINVAL;
@@ -498,8 +512,7 @@ static inline int check_target(struct arpt_entry *e, const char *name)
}
static inline int
-find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
- unsigned int *i)
+find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
{
struct arpt_entry_target *t;
struct xt_target *target;
@@ -524,8 +537,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size,
ret = check_target(e, name);
if (ret)
goto err;
-
- (*i)++;
return 0;
err:
module_put(t->u.kernel.target->me);
@@ -533,14 +544,14 @@ out:
return ret;
}
-static bool check_underflow(struct arpt_entry *e)
+static bool check_underflow(const struct arpt_entry *e)
{
const struct arpt_entry_target *t;
unsigned int verdict;
if (!unconditional(&e->arp))
return false;
- t = arpt_get_target(e);
+ t = arpt_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
return false;
verdict = ((struct arpt_standard_target *)t)->verdict;
@@ -550,12 +561,11 @@ static bool check_underflow(struct arpt_entry *e)
static inline int check_entry_size_and_hooks(struct arpt_entry *e,
struct xt_table_info *newinfo,
- unsigned char *base,
- unsigned char *limit,
+ const unsigned char *base,
+ const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
- unsigned int valid_hooks,
- unsigned int *i)
+ unsigned int valid_hooks)
{
unsigned int h;
@@ -592,19 +602,14 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
-
- (*i)++;
return 0;
}
-static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i)
+static inline void cleanup_entry(struct arpt_entry *e)
{
struct xt_tgdtor_param par;
struct arpt_entry_target *t;
- if (i && (*i)-- == 0)
- return 1;
-
t = arpt_get_target(e);
par.target = t->u.kernel.target;
par.targinfo = t->data;
@@ -612,26 +617,20 @@ static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i)
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(par.target->me);
- return 0;
}
/* Checks and translates the user-supplied table segment (held in
* newinfo).
*/
-static int translate_table(const char *name,
- unsigned int valid_hooks,
- struct xt_table_info *newinfo,
- void *entry0,
- unsigned int size,
- unsigned int number,
- const unsigned int *hook_entries,
- const unsigned int *underflows)
+static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ const struct arpt_replace *repl)
{
+ struct arpt_entry *iter;
unsigned int i;
- int ret;
+ int ret = 0;
- newinfo->size = size;
- newinfo->number = number;
+ newinfo->size = repl->size;
+ newinfo->number = repl->num_entries;
/* Init all hooks to impossible value. */
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
@@ -643,52 +642,63 @@ static int translate_table(const char *name,
i = 0;
/* Walk through entries, checking offsets. */
- ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size,
- check_entry_size_and_hooks,
- newinfo,
- entry0,
- entry0 + size,
- hook_entries, underflows, valid_hooks, &i);
+ xt_entry_foreach(iter, entry0, newinfo->size) {
+ ret = check_entry_size_and_hooks(iter, newinfo, entry0,
+ entry0 + repl->size,
+ repl->hook_entry,
+ repl->underflow,
+ repl->valid_hooks);
+ if (ret != 0)
+ break;
+ ++i;
+ }
duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
if (ret != 0)
return ret;
- if (i != number) {
+ if (i != repl->num_entries) {
duprintf("translate_table: %u not %u entries\n",
- i, number);
+ i, repl->num_entries);
return -EINVAL;
}
/* Check hooks all assigned */
for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
/* Only hooks which are valid */
- if (!(valid_hooks & (1 << i)))
+ if (!(repl->valid_hooks & (1 << i)))
continue;
if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
duprintf("Invalid hook entry %u %u\n",
- i, hook_entries[i]);
+ i, repl->hook_entry[i]);
return -EINVAL;
}
if (newinfo->underflow[i] == 0xFFFFFFFF) {
duprintf("Invalid underflow %u %u\n",
- i, underflows[i]);
+ i, repl->underflow[i]);
return -EINVAL;
}
}
- if (!mark_source_chains(newinfo, valid_hooks, entry0)) {
+ if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
duprintf("Looping hook\n");
return -ELOOP;
}
/* Finally, each sanity check must pass */
i = 0;
- ret = ARPT_ENTRY_ITERATE(entry0, newinfo->size,
- find_check_entry, name, size, &i);
+ xt_entry_foreach(iter, entry0, newinfo->size) {
+ ret = find_check_entry(iter, repl->name, repl->size);
+ if (ret != 0)
+ break;
+ ++i;
+ }
if (ret != 0) {
- ARPT_ENTRY_ITERATE(entry0, newinfo->size,
- cleanup_entry, &i);
+ xt_entry_foreach(iter, entry0, newinfo->size) {
+ if (i-- == 0)
+ break;
+ cleanup_entry(iter);
+ }
return ret;
}
@@ -701,30 +711,10 @@ static int translate_table(const char *name,
return ret;
}
-/* Gets counters. */
-static inline int add_entry_to_counter(const struct arpt_entry *e,
- struct xt_counters total[],
- unsigned int *i)
-{
- ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
-
- (*i)++;
- return 0;
-}
-
-static inline int set_entry_to_counter(const struct arpt_entry *e,
- struct xt_counters total[],
- unsigned int *i)
-{
- SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
-
- (*i)++;
- return 0;
-}
-
static void get_counters(const struct xt_table_info *t,
struct xt_counters counters[])
{
+ struct arpt_entry *iter;
unsigned int cpu;
unsigned int i;
unsigned int curcpu;
@@ -740,32 +730,32 @@ static void get_counters(const struct xt_table_info *t,
curcpu = smp_processor_id();
i = 0;
- ARPT_ENTRY_ITERATE(t->entries[curcpu],
- t->size,
- set_entry_to_counter,
- counters,
- &i);
+ xt_entry_foreach(iter, t->entries[curcpu], t->size) {
+ SET_COUNTER(counters[i], iter->counters.bcnt,
+ iter->counters.pcnt);
+ ++i;
+ }
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
continue;
i = 0;
xt_info_wrlock(cpu);
- ARPT_ENTRY_ITERATE(t->entries[cpu],
- t->size,
- add_entry_to_counter,
- counters,
- &i);
+ xt_entry_foreach(iter, t->entries[cpu], t->size) {
+ ADD_COUNTER(counters[i], iter->counters.bcnt,
+ iter->counters.pcnt);
+ ++i;
+ }
xt_info_wrunlock(cpu);
}
local_bh_enable();
}
-static struct xt_counters *alloc_counters(struct xt_table *table)
+static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
- struct xt_table_info *private = table->private;
+ const struct xt_table_info *private = table->private;
/* We need atomic snapshot of counters: rest doesn't change
* (other than comefrom, which userspace doesn't care
@@ -783,11 +773,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
}
static int copy_entries_to_user(unsigned int total_size,
- struct xt_table *table,
+ const struct xt_table *table,
void __user *userptr)
{
unsigned int off, num;
- struct arpt_entry *e;
+ const struct arpt_entry *e;
struct xt_counters *counters;
struct xt_table_info *private = table->private;
int ret = 0;
@@ -807,7 +797,7 @@ static int copy_entries_to_user(unsigned int total_size,
/* FIXME: use iterator macros --RR */
/* ... then go back and fix counters and names */
for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
- struct arpt_entry_target *t;
+ const struct arpt_entry_target *t;
e = (struct arpt_entry *)(loc_cpu_entry + off);
if (copy_to_user(userptr + off
@@ -818,7 +808,7 @@ static int copy_entries_to_user(unsigned int total_size,
goto free_counters;
}
- t = arpt_get_target(e);
+ t = arpt_get_target_c(e);
if (copy_to_user(userptr + off + e->target_offset
+ offsetof(struct arpt_entry_target,
u.user.name),
@@ -835,7 +825,7 @@ static int copy_entries_to_user(unsigned int total_size,
}
#ifdef CONFIG_COMPAT
-static void compat_standard_from_user(void *dst, void *src)
+static void compat_standard_from_user(void *dst, const void *src)
{
int v = *(compat_int_t *)src;
@@ -844,7 +834,7 @@ static void compat_standard_from_user(void *dst, void *src)
memcpy(dst, &v, sizeof(v));
}
-static int compat_standard_to_user(void __user *dst, void *src)
+static int compat_standard_to_user(void __user *dst, const void *src)
{
compat_int_t cv = *(int *)src;
@@ -853,18 +843,18 @@ static int compat_standard_to_user(void __user *dst, void *src)
return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
}
-static int compat_calc_entry(struct arpt_entry *e,
+static int compat_calc_entry(const struct arpt_entry *e,
const struct xt_table_info *info,
- void *base, struct xt_table_info *newinfo)
+ const void *base, struct xt_table_info *newinfo)
{
- struct arpt_entry_target *t;
+ const struct arpt_entry_target *t;
unsigned int entry_offset;
int off, i, ret;
off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
entry_offset = (void *)e - base;
- t = arpt_get_target(e);
+ t = arpt_get_target_c(e);
off += xt_compat_target_offset(t->u.kernel.target);
newinfo->size -= off;
ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off);
@@ -885,7 +875,9 @@ static int compat_calc_entry(struct arpt_entry *e,
static int compat_table_info(const struct xt_table_info *info,
struct xt_table_info *newinfo)
{
+ struct arpt_entry *iter;
void *loc_cpu_entry;
+ int ret;
if (!newinfo || !info)
return -EINVAL;
@@ -894,13 +886,17 @@ static int compat_table_info(const struct xt_table_info *info,
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()];
- return ARPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
- compat_calc_entry, info, loc_cpu_entry,
- newinfo);
+ xt_entry_foreach(iter, loc_cpu_entry, info->size) {
+ ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
+ if (ret != 0)
+ return ret;
+ }
+ return 0;
}
#endif
-static int get_info(struct net *net, void __user *user, int *len, int compat)
+static int get_info(struct net *net, void __user *user,
+ const int *len, int compat)
{
char name[ARPT_TABLE_MAXNAMELEN];
struct xt_table *t;
@@ -925,10 +921,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
if (t && !IS_ERR(t)) {
struct arpt_getinfo info;
const struct xt_table_info *private = t->private;
-
#ifdef CONFIG_COMPAT
+ struct xt_table_info tmp;
+
if (compat) {
- struct xt_table_info tmp;
ret = compat_table_info(private, &tmp);
xt_compat_flush_offsets(NFPROTO_ARP);
private = &tmp;
@@ -959,7 +955,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
}
static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
- int *len)
+ const int *len)
{
int ret;
struct arpt_get_entries get;
@@ -1010,6 +1006,7 @@ static int __do_replace(struct net *net, const char *name,
struct xt_table_info *oldinfo;
struct xt_counters *counters;
void *loc_cpu_old_entry;
+ struct arpt_entry *iter;
ret = 0;
counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
@@ -1053,8 +1050,8 @@ static int __do_replace(struct net *net, const char *name,
/* Decrease module usage counts and free resource */
loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
- ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
- NULL);
+ xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
+ cleanup_entry(iter);
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
@@ -1073,12 +1070,14 @@ static int __do_replace(struct net *net, const char *name,
return ret;
}
-static int do_replace(struct net *net, void __user *user, unsigned int len)
+static int do_replace(struct net *net, const void __user *user,
+ unsigned int len)
{
int ret;
struct arpt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
+ struct arpt_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1099,9 +1098,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
goto free_newinfo;
}
- ret = translate_table(tmp.name, tmp.valid_hooks,
- newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
- tmp.hook_entry, tmp.underflow);
+ ret = translate_table(newinfo, loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
@@ -1114,27 +1111,15 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
return 0;
free_newinfo_untrans:
- ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
+ xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
+ cleanup_entry(iter);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
}
-/* We're lazy, and add to the first CPU; overflow works its fey magic
- * and everything is OK. */
-static int
-add_counter_to_entry(struct arpt_entry *e,
- const struct xt_counters addme[],
- unsigned int *i)
-{
- ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
-
- (*i)++;
- return 0;
-}
-
-static int do_add_counters(struct net *net, void __user *user, unsigned int len,
- int compat)
+static int do_add_counters(struct net *net, const void __user *user,
+ unsigned int len, int compat)
{
unsigned int i, curcpu;
struct xt_counters_info tmp;
@@ -1147,6 +1132,7 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
const struct xt_table_info *private;
int ret = 0;
void *loc_cpu_entry;
+ struct arpt_entry *iter;
#ifdef CONFIG_COMPAT
struct compat_xt_counters_info compat_tmp;
@@ -1204,11 +1190,10 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
curcpu = smp_processor_id();
loc_cpu_entry = private->entries[curcpu];
xt_info_wrlock(curcpu);
- ARPT_ENTRY_ITERATE(loc_cpu_entry,
- private->size,
- add_counter_to_entry,
- paddc,
- &i);
+ xt_entry_foreach(iter, loc_cpu_entry, private->size) {
+ ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
+ ++i;
+ }
xt_info_wrunlock(curcpu);
unlock_up_free:
local_bh_enable();
@@ -1221,28 +1206,22 @@ static int do_add_counters(struct net *net, void __user *user, unsigned int len,
}
#ifdef CONFIG_COMPAT
-static inline int
-compat_release_entry(struct compat_arpt_entry *e, unsigned int *i)
+static inline void compat_release_entry(struct compat_arpt_entry *e)
{
struct arpt_entry_target *t;
- if (i && (*i)-- == 0)
- return 1;
-
t = compat_arpt_get_target(e);
module_put(t->u.kernel.target->me);
- return 0;
}
static inline int
check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
- unsigned char *base,
- unsigned char *limit,
- unsigned int *hook_entries,
- unsigned int *underflows,
- unsigned int *i,
+ const unsigned char *base,
+ const unsigned char *limit,
+ const unsigned int *hook_entries,
+ const unsigned int *underflows,
const char *name)
{
struct arpt_entry_target *t;
@@ -1302,8 +1281,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
/* Clear counters and comefrom */
memset(&e->counters, 0, sizeof(e->counters));
e->comefrom = 0;
-
- (*i)++;
return 0;
release_target:
@@ -1347,19 +1324,6 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
return ret;
}
-static inline int compat_check_entry(struct arpt_entry *e, const char *name,
- unsigned int *i)
-{
- int ret;
-
- ret = check_target(e, name);
- if (ret)
- return ret;
-
- (*i)++;
- return 0;
-}
-
static int translate_compat_table(const char *name,
unsigned int valid_hooks,
struct xt_table_info **pinfo,
@@ -1372,8 +1336,10 @@ static int translate_compat_table(const char *name,
unsigned int i, j;
struct xt_table_info *newinfo, *info;
void *pos, *entry0, *entry1;
+ struct compat_arpt_entry *iter0;
+ struct arpt_entry *iter1;
unsigned int size;
- int ret;
+ int ret = 0;
info = *pinfo;
entry0 = *pentry0;
@@ -1390,13 +1356,17 @@ static int translate_compat_table(const char *name,
j = 0;
xt_compat_lock(NFPROTO_ARP);
/* Walk through entries, checking offsets. */
- ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size,
- check_compat_entry_size_and_hooks,
- info, &size, entry0,
- entry0 + total_size,
- hook_entries, underflows, &j, name);
- if (ret != 0)
- goto out_unlock;
+ xt_entry_foreach(iter0, entry0, total_size) {
+ ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+ entry0,
+ entry0 + total_size,
+ hook_entries,
+ underflows,
+ name);
+ if (ret != 0)
+ goto out_unlock;
+ ++j;
+ }
ret = -EINVAL;
if (j != number) {
@@ -1435,9 +1405,12 @@ static int translate_compat_table(const char *name,
entry1 = newinfo->entries[raw_smp_processor_id()];
pos = entry1;
size = total_size;
- ret = COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size,
- compat_copy_entry_from_user,
- &pos, &size, name, newinfo, entry1);
+ xt_entry_foreach(iter0, entry0, total_size) {
+ ret = compat_copy_entry_from_user(iter0, &pos, &size,
+ name, newinfo, entry1);
+ if (ret != 0)
+ break;
+ }
xt_compat_flush_offsets(NFPROTO_ARP);
xt_compat_unlock(NFPROTO_ARP);
if (ret)
@@ -1448,13 +1421,32 @@ static int translate_compat_table(const char *name,
goto free_newinfo;
i = 0;
- ret = ARPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
- name, &i);
+ xt_entry_foreach(iter1, entry1, newinfo->size) {
+ ret = check_target(iter1, name);
+ if (ret != 0)
+ break;
+ ++i;
+ }
if (ret) {
+ /*
+ * The first i matches need cleanup_entry (calls ->destroy)
+ * because they had called ->check already. The other j-i
+ * entries need only release.
+ */
+ int skip = i;
j -= i;
- COMPAT_ARPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
- compat_release_entry, &j);
- ARPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
+ xt_entry_foreach(iter0, entry0, newinfo->size) {
+ if (skip-- > 0)
+ continue;
+ if (j-- == 0)
+ break;
+ compat_release_entry(iter0);
+ }
+ xt_entry_foreach(iter1, entry1, newinfo->size) {
+ if (i-- == 0)
+ break;
+ cleanup_entry(iter1);
+ }
xt_free_table_info(newinfo);
return ret;
}
@@ -1472,7 +1464,11 @@ static int translate_compat_table(const char *name,
free_newinfo:
xt_free_table_info(newinfo);
out:
- COMPAT_ARPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
+ xt_entry_foreach(iter0, entry0, total_size) {
+ if (j-- == 0)
+ break;
+ compat_release_entry(iter0);
+ }
return ret;
out_unlock:
xt_compat_flush_offsets(NFPROTO_ARP);
@@ -1499,6 +1495,7 @@ static int compat_do_replace(struct net *net, void __user *user,
struct compat_arpt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
+ struct arpt_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1536,7 +1533,8 @@ static int compat_do_replace(struct net *net, void __user *user,
return 0;
free_newinfo_untrans:
- ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
+ xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
+ cleanup_entry(iter);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
@@ -1570,7 +1568,7 @@ static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
compat_uint_t *size,
struct xt_counters *counters,
- unsigned int *i)
+ unsigned int i)
{
struct arpt_entry_target *t;
struct compat_arpt_entry __user *ce;
@@ -1578,14 +1576,12 @@ static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
compat_uint_t origsize;
int ret;
- ret = -EFAULT;
origsize = *size;
ce = (struct compat_arpt_entry __user *)*dstptr;
- if (copy_to_user(ce, e, sizeof(struct arpt_entry)))
- goto out;
-
- if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
- goto out;
+ if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 ||
+ copy_to_user(&ce->counters, &counters[i],
+ sizeof(counters[i])) != 0)
+ return -EFAULT;
*dstptr += sizeof(struct compat_arpt_entry);
*size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry);
@@ -1595,18 +1591,12 @@ static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
t = arpt_get_target(e);
ret = xt_compat_target_to_user(t, dstptr, size);
if (ret)
- goto out;
- ret = -EFAULT;
+ return ret;
next_offset = e->next_offset - (origsize - *size);
- if (put_user(target_offset, &ce->target_offset))
- goto out;
- if (put_user(next_offset, &ce->next_offset))
- goto out;
-
- (*i)++;
+ if (put_user(target_offset, &ce->target_offset) != 0 ||
+ put_user(next_offset, &ce->next_offset) != 0)
+ return -EFAULT;
return 0;
-out:
- return ret;
}
static int compat_copy_entries_to_user(unsigned int total_size,
@@ -1620,6 +1610,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
int ret = 0;
void *loc_cpu_entry;
unsigned int i = 0;
+ struct arpt_entry *iter;
counters = alloc_counters(table);
if (IS_ERR(counters))
@@ -1629,9 +1620,12 @@ static int compat_copy_entries_to_user(unsigned int total_size,
loc_cpu_entry = private->entries[raw_smp_processor_id()];
pos = userptr;
size = total_size;
- ret = ARPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
- compat_copy_entry_to_user,
- &pos, &size, counters, &i);
+ xt_entry_foreach(iter, loc_cpu_entry, total_size) {
+ ret = compat_copy_entry_to_user(iter, &pos,
+ &size, counters, i++);
+ if (ret != 0)
+ break;
+ }
vfree(counters);
return ret;
}
@@ -1799,12 +1793,7 @@ struct xt_table *arpt_register_table(struct net *net,
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
memcpy(loc_cpu_entry, repl->entries, repl->size);
- ret = translate_table(table->name, table->valid_hooks,
- newinfo, loc_cpu_entry, repl->size,
- repl->num_entries,
- repl->hook_entry,
- repl->underflow);
-
+ ret = translate_table(newinfo, loc_cpu_entry, repl);
duprintf("arpt_register_table: translate table gives %d\n", ret);
if (ret != 0)
goto out_free;
@@ -1827,13 +1816,14 @@ void arpt_unregister_table(struct xt_table *table)
struct xt_table_info *private;
void *loc_cpu_entry;
struct module *table_owner = table->me;
+ struct arpt_entry *iter;
private = xt_unregister_table(table);
/* Decrease module usage counts and free resources */
loc_cpu_entry = private->entries[raw_smp_processor_id()];
- ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size,
- cleanup_entry, NULL);
+ xt_entry_foreach(iter, loc_cpu_entry, private->size)
+ cleanup_entry(iter);
if (private->number > private->initial_entries)
module_put(table_owner);
xt_free_table_info(private);
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 97337601827..bfe26f32b93 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp/arp_tables.h>
MODULE_LICENSE("GPL");
@@ -15,93 +16,37 @@ MODULE_DESCRIPTION("arptables filter table");
#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \
(1 << NF_ARP_FORWARD))
-static const struct
-{
- struct arpt_replace repl;
- struct arpt_standard entries[3];
- struct arpt_error term;
-} initial_table __net_initdata = {
- .repl = {
- .name = "filter",
- .valid_hooks = FILTER_VALID_HOOKS,
- .num_entries = 4,
- .size = sizeof(struct arpt_standard) * 3 + sizeof(struct arpt_error),
- .hook_entry = {
- [NF_ARP_IN] = 0,
- [NF_ARP_OUT] = sizeof(struct arpt_standard),
- [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
- },
- .underflow = {
- [NF_ARP_IN] = 0,
- [NF_ARP_OUT] = sizeof(struct arpt_standard),
- [NF_ARP_FORWARD] = 2 * sizeof(struct arpt_standard),
- },
- },
- .entries = {
- ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_IN */
- ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_OUT */
- ARPT_STANDARD_INIT(NF_ACCEPT), /* ARP_FORWARD */
- },
- .term = ARPT_ERROR_INIT,
-};
-
static const struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_ARP,
+ .priority = NF_IP_PRI_FILTER,
};
/* The work comes in here from netfilter.c */
-static unsigned int arpt_in_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int
+arptable_filter_hook(unsigned int hook, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- return arpt_do_table(skb, hook, in, out,
- dev_net(in)->ipv4.arptable_filter);
-}
+ const struct net *net = dev_net((in != NULL) ? in : out);
-static unsigned int arpt_out_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- return arpt_do_table(skb, hook, in, out,
- dev_net(out)->ipv4.arptable_filter);
+ return arpt_do_table(skb, hook, in, out, net->ipv4.arptable_filter);
}
-static struct nf_hook_ops arpt_ops[] __read_mostly = {
- {
- .hook = arpt_in_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_ARP,
- .hooknum = NF_ARP_IN,
- .priority = NF_IP_PRI_FILTER,
- },
- {
- .hook = arpt_out_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_ARP,
- .hooknum = NF_ARP_OUT,
- .priority = NF_IP_PRI_FILTER,
- },
- {
- .hook = arpt_in_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_ARP,
- .hooknum = NF_ARP_FORWARD,
- .priority = NF_IP_PRI_FILTER,
- },
-};
+static struct nf_hook_ops *arpfilter_ops __read_mostly;
static int __net_init arptable_filter_net_init(struct net *net)
{
- /* Register table */
+ struct arpt_replace *repl;
+
+ repl = arpt_alloc_initial_table(&packet_filter);
+ if (repl == NULL)
+ return -ENOMEM;
net->ipv4.arptable_filter =
- arpt_register_table(net, &packet_filter, &initial_table.repl);
+ arpt_register_table(net, &packet_filter, repl);
+ kfree(repl);
if (IS_ERR(net->ipv4.arptable_filter))
return PTR_ERR(net->ipv4.arptable_filter);
return 0;
@@ -125,9 +70,11 @@ static int __init arptable_filter_init(void)
if (ret < 0)
return ret;
- ret = nf_register_hooks(arpt_ops, ARRAY_SIZE(arpt_ops));
- if (ret < 0)
+ arpfilter_ops = xt_hook_link(&packet_filter, arptable_filter_hook);
+ if (IS_ERR(arpfilter_ops)) {
+ ret = PTR_ERR(arpfilter_ops);
goto cleanup_table;
+ }
return ret;
cleanup_table:
@@ -137,7 +84,7 @@ cleanup_table:
static void __exit arptable_filter_fini(void)
{
- nf_unregister_hooks(arpt_ops, ARRAY_SIZE(arpt_ops));
+ xt_hook_unlink(&packet_filter, arpfilter_ops);
unregister_pernet_subsys(&arptable_filter_net_ops);
}
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 572330a552e..b29c66df8d1 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -28,6 +28,7 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <net/netfilter/nf_log.h>
+#include "../../netfilter/xt_repldata.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -66,6 +67,12 @@ do { \
#define inline
#endif
+void *ipt_alloc_initial_table(const struct xt_table *info)
+{
+ return xt_alloc_initial_table(ipt, IPT);
+}
+EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
+
/*
We keep a set of rules for each CPU, so we can avoid write-locking
them in the softirq when updating the counters and therefore
@@ -169,7 +176,7 @@ ipt_error(struct sk_buff *skb, const struct xt_target_param *par)
/* Performance critical - called for every packet */
static inline bool
-do_match(struct ipt_entry_match *m, const struct sk_buff *skb,
+do_match(const struct ipt_entry_match *m, const struct sk_buff *skb,
struct xt_match_param *par)
{
par->match = m->u.kernel.match;
@@ -184,7 +191,7 @@ do_match(struct ipt_entry_match *m, const struct sk_buff *skb,
/* Performance critical */
static inline struct ipt_entry *
-get_entry(void *base, unsigned int offset)
+get_entry(const void *base, unsigned int offset)
{
return (struct ipt_entry *)(base + offset);
}
@@ -199,6 +206,13 @@ static inline bool unconditional(const struct ipt_ip *ip)
#undef FWINV
}
+/* for const-correctness */
+static inline const struct ipt_entry_target *
+ipt_get_target_c(const struct ipt_entry *e)
+{
+ return ipt_get_target((struct ipt_entry *)e);
+}
+
#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
static const char *const hooknames[] = {
@@ -233,11 +247,11 @@ static struct nf_loginfo trace_loginfo = {
/* Mildly perf critical (only if packet tracing is on) */
static inline int
-get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
+get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
const char *hookname, const char **chainname,
const char **comment, unsigned int *rulenum)
{
- struct ipt_standard_target *t = (void *)ipt_get_target(s);
+ const struct ipt_standard_target *t = (void *)ipt_get_target_c(s);
if (strcmp(t->target.u.kernel.target->name, IPT_ERROR_TARGET) == 0) {
/* Head of user chain: ERROR target with chainname */
@@ -263,17 +277,18 @@ get_chainname_rulenum(struct ipt_entry *s, struct ipt_entry *e,
return 0;
}
-static void trace_packet(struct sk_buff *skb,
+static void trace_packet(const struct sk_buff *skb,
unsigned int hook,
const struct net_device *in,
const struct net_device *out,
const char *tablename,
- struct xt_table_info *private,
- struct ipt_entry *e)
+ const struct xt_table_info *private,
+ const struct ipt_entry *e)
{
- void *table_base;
+ const void *table_base;
const struct ipt_entry *root;
const char *hookname, *chainname, *comment;
+ const struct ipt_entry *iter;
unsigned int rulenum = 0;
table_base = private->entries[smp_processor_id()];
@@ -282,10 +297,10 @@ static void trace_packet(struct sk_buff *skb,
hookname = chainname = hooknames[hook];
comment = comments[NF_IP_TRACE_COMMENT_RULE];
- IPT_ENTRY_ITERATE(root,
- private->size - private->hook_entry[hook],
- get_chainname_rulenum,
- e, hookname, &chainname, &comment, &rulenum);
+ xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
+ if (get_chainname_rulenum(iter, e, hookname,
+ &chainname, &comment, &rulenum) != 0)
+ break;
nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo,
"TRACE: %s:%s:%s:%u ",
@@ -315,9 +330,9 @@ ipt_do_table(struct sk_buff *skb,
/* Initializing verdict to NF_DROP keeps gcc happy. */
unsigned int verdict = NF_DROP;
const char *indev, *outdev;
- void *table_base;
+ const void *table_base;
struct ipt_entry *e, *back;
- struct xt_table_info *private;
+ const struct xt_table_info *private;
struct xt_match_param mtpar;
struct xt_target_param tgpar;
@@ -350,17 +365,22 @@ ipt_do_table(struct sk_buff *skb,
back = get_entry(table_base, private->underflow[hook]);
do {
- struct ipt_entry_target *t;
+ const struct ipt_entry_target *t;
+ const struct xt_entry_match *ematch;
IP_NF_ASSERT(e);
IP_NF_ASSERT(back);
if (!ip_packet_match(ip, indev, outdev,
- &e->ip, mtpar.fragoff) ||
- IPT_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
+ &e->ip, mtpar.fragoff)) {
+ no_match:
e = ipt_next_entry(e);
continue;
}
+ xt_ematch_foreach(ematch, e)
+ if (do_match(ematch, skb, &mtpar) != 0)
+ goto no_match;
+
ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
t = ipt_get_target(e);
@@ -443,7 +463,7 @@ ipt_do_table(struct sk_buff *skb,
/* Figures out from what hook each rule can be called: returns 0 if
there are loops. Puts hook bitmask in comefrom. */
static int
-mark_source_chains(struct xt_table_info *newinfo,
+mark_source_chains(const struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0)
{
unsigned int hook;
@@ -461,8 +481,8 @@ mark_source_chains(struct xt_table_info *newinfo,
e->counters.pcnt = pos;
for (;;) {
- struct ipt_standard_target *t
- = (void *)ipt_get_target(e);
+ const struct ipt_standard_target *t
+ = (void *)ipt_get_target_c(e);
int visited = e->comefrom & (1 << hook);
if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
@@ -552,27 +572,23 @@ mark_source_chains(struct xt_table_info *newinfo,
return 1;
}
-static int
-cleanup_match(struct ipt_entry_match *m, unsigned int *i)
+static void cleanup_match(struct ipt_entry_match *m, struct net *net)
{
struct xt_mtdtor_param par;
- if (i && (*i)-- == 0)
- return 1;
-
+ par.net = net;
par.match = m->u.kernel.match;
par.matchinfo = m->data;
par.family = NFPROTO_IPV4;
if (par.match->destroy != NULL)
par.match->destroy(&par);
module_put(par.match->me);
- return 0;
}
static int
-check_entry(struct ipt_entry *e, const char *name)
+check_entry(const struct ipt_entry *e, const char *name)
{
- struct ipt_entry_target *t;
+ const struct ipt_entry_target *t;
if (!ip_checkentry(&e->ip)) {
duprintf("ip_tables: ip check failed %p %s.\n", e, name);
@@ -583,7 +599,7 @@ check_entry(struct ipt_entry *e, const char *name)
e->next_offset)
return -EINVAL;
- t = ipt_get_target(e);
+ t = ipt_get_target_c(e);
if (e->target_offset + t->u.target_size > e->next_offset)
return -EINVAL;
@@ -591,8 +607,7 @@ check_entry(struct ipt_entry *e, const char *name)
}
static int
-check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
- unsigned int *i)
+check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
{
const struct ipt_ip *ip = par->entryinfo;
int ret;
@@ -607,13 +622,11 @@ check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
par.match->name);
return ret;
}
- ++*i;
return 0;
}
static int
-find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
- unsigned int *i)
+find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par)
{
struct xt_match *match;
int ret;
@@ -627,7 +640,7 @@ find_check_match(struct ipt_entry_match *m, struct xt_mtchk_param *par,
}
m->u.kernel.match = match;
- ret = check_match(m, par, i);
+ ret = check_match(m, par);
if (ret)
goto err;
@@ -637,10 +650,11 @@ err:
return ret;
}
-static int check_target(struct ipt_entry *e, const char *name)
+static int check_target(struct ipt_entry *e, struct net *net, const char *name)
{
struct ipt_entry_target *t = ipt_get_target(e);
struct xt_tgchk_param par = {
+ .net = net,
.table = name,
.entryinfo = e,
.target = t->u.kernel.target,
@@ -661,27 +675,32 @@ static int check_target(struct ipt_entry *e, const char *name)
}
static int
-find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
- unsigned int *i)
+find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
+ unsigned int size)
{
struct ipt_entry_target *t;
struct xt_target *target;
int ret;
unsigned int j;
struct xt_mtchk_param mtpar;
+ struct xt_entry_match *ematch;
ret = check_entry(e, name);
if (ret)
return ret;
j = 0;
+ mtpar.net = net;
mtpar.table = name;
mtpar.entryinfo = &e->ip;
mtpar.hook_mask = e->comefrom;
mtpar.family = NFPROTO_IPV4;
- ret = IPT_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
- if (ret != 0)
- goto cleanup_matches;
+ xt_ematch_foreach(ematch, e) {
+ ret = find_check_match(ematch, &mtpar);
+ if (ret != 0)
+ goto cleanup_matches;
+ ++j;
+ }
t = ipt_get_target(e);
target = try_then_request_module(xt_find_target(AF_INET,
@@ -695,27 +714,29 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
}
t->u.kernel.target = target;
- ret = check_target(e, name);
+ ret = check_target(e, net, name);
if (ret)
goto err;
-
- (*i)++;
return 0;
err:
module_put(t->u.kernel.target->me);
cleanup_matches:
- IPT_MATCH_ITERATE(e, cleanup_match, &j);
+ xt_ematch_foreach(ematch, e) {
+ if (j-- == 0)
+ break;
+ cleanup_match(ematch, net);
+ }
return ret;
}
-static bool check_underflow(struct ipt_entry *e)
+static bool check_underflow(const struct ipt_entry *e)
{
const struct ipt_entry_target *t;
unsigned int verdict;
if (!unconditional(&e->ip))
return false;
- t = ipt_get_target(e);
+ t = ipt_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
return false;
verdict = ((struct ipt_standard_target *)t)->verdict;
@@ -726,12 +747,11 @@ static bool check_underflow(struct ipt_entry *e)
static int
check_entry_size_and_hooks(struct ipt_entry *e,
struct xt_table_info *newinfo,
- unsigned char *base,
- unsigned char *limit,
+ const unsigned char *base,
+ const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
- unsigned int valid_hooks,
- unsigned int *i)
+ unsigned int valid_hooks)
{
unsigned int h;
@@ -768,50 +788,42 @@ check_entry_size_and_hooks(struct ipt_entry *e,
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
-
- (*i)++;
return 0;
}
-static int
-cleanup_entry(struct ipt_entry *e, unsigned int *i)
+static void
+cleanup_entry(struct ipt_entry *e, struct net *net)
{
struct xt_tgdtor_param par;
struct ipt_entry_target *t;
-
- if (i && (*i)-- == 0)
- return 1;
+ struct xt_entry_match *ematch;
/* Cleanup all matches */
- IPT_MATCH_ITERATE(e, cleanup_match, NULL);
+ xt_ematch_foreach(ematch, e)
+ cleanup_match(ematch, net);
t = ipt_get_target(e);
+ par.net = net;
par.target = t->u.kernel.target;
par.targinfo = t->data;
par.family = NFPROTO_IPV4;
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(par.target->me);
- return 0;
}
/* Checks and translates the user-supplied table segment (held in
newinfo) */
static int
-translate_table(const char *name,
- unsigned int valid_hooks,
- struct xt_table_info *newinfo,
- void *entry0,
- unsigned int size,
- unsigned int number,
- const unsigned int *hook_entries,
- const unsigned int *underflows)
+translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ const struct ipt_replace *repl)
{
+ struct ipt_entry *iter;
unsigned int i;
- int ret;
+ int ret = 0;
- newinfo->size = size;
- newinfo->number = number;
+ newinfo->size = repl->size;
+ newinfo->number = repl->num_entries;
/* Init all hooks to impossible value. */
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
@@ -822,49 +834,58 @@ translate_table(const char *name,
duprintf("translate_table: size %u\n", newinfo->size);
i = 0;
/* Walk through entries, checking offsets. */
- ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
- check_entry_size_and_hooks,
- newinfo,
- entry0,
- entry0 + size,
- hook_entries, underflows, valid_hooks, &i);
- if (ret != 0)
- return ret;
+ xt_entry_foreach(iter, entry0, newinfo->size) {
+ ret = check_entry_size_and_hooks(iter, newinfo, entry0,
+ entry0 + repl->size,
+ repl->hook_entry,
+ repl->underflow,
+ repl->valid_hooks);
+ if (ret != 0)
+ return ret;
+ ++i;
+ }
- if (i != number) {
+ if (i != repl->num_entries) {
duprintf("translate_table: %u not %u entries\n",
- i, number);
+ i, repl->num_entries);
return -EINVAL;
}
/* Check hooks all assigned */
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
/* Only hooks which are valid */
- if (!(valid_hooks & (1 << i)))
+ if (!(repl->valid_hooks & (1 << i)))
continue;
if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
duprintf("Invalid hook entry %u %u\n",
- i, hook_entries[i]);
+ i, repl->hook_entry[i]);
return -EINVAL;
}
if (newinfo->underflow[i] == 0xFFFFFFFF) {
duprintf("Invalid underflow %u %u\n",
- i, underflows[i]);
+ i, repl->underflow[i]);
return -EINVAL;
}
}
- if (!mark_source_chains(newinfo, valid_hooks, entry0))
+ if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
return -ELOOP;
/* Finally, each sanity check must pass */
i = 0;
- ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
- find_check_entry, name, size, &i);
+ xt_entry_foreach(iter, entry0, newinfo->size) {
+ ret = find_check_entry(iter, net, repl->name, repl->size);
+ if (ret != 0)
+ break;
+ ++i;
+ }
if (ret != 0) {
- IPT_ENTRY_ITERATE(entry0, newinfo->size,
- cleanup_entry, &i);
+ xt_entry_foreach(iter, entry0, newinfo->size) {
+ if (i-- == 0)
+ break;
+ cleanup_entry(iter, net);
+ }
return ret;
}
@@ -877,33 +898,11 @@ translate_table(const char *name,
return ret;
}
-/* Gets counters. */
-static inline int
-add_entry_to_counter(const struct ipt_entry *e,
- struct xt_counters total[],
- unsigned int *i)
-{
- ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
-
- (*i)++;
- return 0;
-}
-
-static inline int
-set_entry_to_counter(const struct ipt_entry *e,
- struct ipt_counters total[],
- unsigned int *i)
-{
- SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
-
- (*i)++;
- return 0;
-}
-
static void
get_counters(const struct xt_table_info *t,
struct xt_counters counters[])
{
+ struct ipt_entry *iter;
unsigned int cpu;
unsigned int i;
unsigned int curcpu;
@@ -919,32 +918,32 @@ get_counters(const struct xt_table_info *t,
curcpu = smp_processor_id();
i = 0;
- IPT_ENTRY_ITERATE(t->entries[curcpu],
- t->size,
- set_entry_to_counter,
- counters,
- &i);
+ xt_entry_foreach(iter, t->entries[curcpu], t->size) {
+ SET_COUNTER(counters[i], iter->counters.bcnt,
+ iter->counters.pcnt);
+ ++i;
+ }
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
continue;
i = 0;
xt_info_wrlock(cpu);
- IPT_ENTRY_ITERATE(t->entries[cpu],
- t->size,
- add_entry_to_counter,
- counters,
- &i);
+ xt_entry_foreach(iter, t->entries[cpu], t->size) {
+ ADD_COUNTER(counters[i], iter->counters.bcnt,
+ iter->counters.pcnt);
+ ++i; /* macro does multi eval of i */
+ }
xt_info_wrunlock(cpu);
}
local_bh_enable();
}
-static struct xt_counters * alloc_counters(struct xt_table *table)
+static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
- struct xt_table_info *private = table->private;
+ const struct xt_table_info *private = table->private;
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
@@ -962,11 +961,11 @@ static struct xt_counters * alloc_counters(struct xt_table *table)
static int
copy_entries_to_user(unsigned int total_size,
- struct xt_table *table,
+ const struct xt_table *table,
void __user *userptr)
{
unsigned int off, num;
- struct ipt_entry *e;
+ const struct ipt_entry *e;
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
int ret = 0;
@@ -1018,7 +1017,7 @@ copy_entries_to_user(unsigned int total_size,
}
}
- t = ipt_get_target(e);
+ t = ipt_get_target_c(e);
if (copy_to_user(userptr + off + e->target_offset
+ offsetof(struct ipt_entry_target,
u.user.name),
@@ -1035,7 +1034,7 @@ copy_entries_to_user(unsigned int total_size,
}
#ifdef CONFIG_COMPAT
-static void compat_standard_from_user(void *dst, void *src)
+static void compat_standard_from_user(void *dst, const void *src)
{
int v = *(compat_int_t *)src;
@@ -1044,7 +1043,7 @@ static void compat_standard_from_user(void *dst, void *src)
memcpy(dst, &v, sizeof(v));
}
-static int compat_standard_to_user(void __user *dst, void *src)
+static int compat_standard_to_user(void __user *dst, const void *src)
{
compat_int_t cv = *(int *)src;
@@ -1053,25 +1052,20 @@ static int compat_standard_to_user(void __user *dst, void *src)
return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
}
-static inline int
-compat_calc_match(struct ipt_entry_match *m, int *size)
-{
- *size += xt_compat_match_offset(m->u.kernel.match);
- return 0;
-}
-
-static int compat_calc_entry(struct ipt_entry *e,
+static int compat_calc_entry(const struct ipt_entry *e,
const struct xt_table_info *info,
- void *base, struct xt_table_info *newinfo)
+ const void *base, struct xt_table_info *newinfo)
{
- struct ipt_entry_target *t;
+ const struct xt_entry_match *ematch;
+ const struct ipt_entry_target *t;
unsigned int entry_offset;
int off, i, ret;
off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
entry_offset = (void *)e - base;
- IPT_MATCH_ITERATE(e, compat_calc_match, &off);
- t = ipt_get_target(e);
+ xt_ematch_foreach(ematch, e)
+ off += xt_compat_match_offset(ematch->u.kernel.match);
+ t = ipt_get_target_c(e);
off += xt_compat_target_offset(t->u.kernel.target);
newinfo->size -= off;
ret = xt_compat_add_offset(AF_INET, entry_offset, off);
@@ -1092,7 +1086,9 @@ static int compat_calc_entry(struct ipt_entry *e,
static int compat_table_info(const struct xt_table_info *info,
struct xt_table_info *newinfo)
{
+ struct ipt_entry *iter;
void *loc_cpu_entry;
+ int ret;
if (!newinfo || !info)
return -EINVAL;
@@ -1101,13 +1097,17 @@ static int compat_table_info(const struct xt_table_info *info,
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()];
- return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
- compat_calc_entry, info, loc_cpu_entry,
- newinfo);
+ xt_entry_foreach(iter, loc_cpu_entry, info->size) {
+ ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
+ if (ret != 0)
+ return ret;
+ }
+ return 0;
}
#endif
-static int get_info(struct net *net, void __user *user, int *len, int compat)
+static int get_info(struct net *net, void __user *user,
+ const int *len, int compat)
{
char name[IPT_TABLE_MAXNAMELEN];
struct xt_table *t;
@@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
if (t && !IS_ERR(t)) {
struct ipt_getinfo info;
const struct xt_table_info *private = t->private;
-
#ifdef CONFIG_COMPAT
+ struct xt_table_info tmp;
+
if (compat) {
- struct xt_table_info tmp;
ret = compat_table_info(private, &tmp);
xt_compat_flush_offsets(AF_INET);
private = &tmp;
@@ -1167,7 +1167,8 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
}
static int
-get_entries(struct net *net, struct ipt_get_entries __user *uptr, int *len)
+get_entries(struct net *net, struct ipt_get_entries __user *uptr,
+ const int *len)
{
int ret;
struct ipt_get_entries get;
@@ -1215,6 +1216,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct xt_table_info *oldinfo;
struct xt_counters *counters;
void *loc_cpu_old_entry;
+ struct ipt_entry *iter;
ret = 0;
counters = vmalloc(num_counters * sizeof(struct xt_counters));
@@ -1257,8 +1259,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
/* Decrease module usage counts and free resource */
loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
- IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
- NULL);
+ xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
+ cleanup_entry(iter, net);
+
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
sizeof(struct xt_counters) * num_counters) != 0)
@@ -1277,12 +1280,13 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
}
static int
-do_replace(struct net *net, void __user *user, unsigned int len)
+do_replace(struct net *net, const void __user *user, unsigned int len)
{
int ret;
struct ipt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
+ struct ipt_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1303,9 +1307,7 @@ do_replace(struct net *net, void __user *user, unsigned int len)
goto free_newinfo;
}
- ret = translate_table(tmp.name, tmp.valid_hooks,
- newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
- tmp.hook_entry, tmp.underflow);
+ ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
@@ -1318,27 +1320,16 @@ do_replace(struct net *net, void __user *user, unsigned int len)
return 0;
free_newinfo_untrans:
- IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
+ xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
+ cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
}
-/* We're lazy, and add to the first CPU; overflow works its fey magic
- * and everything is OK. */
static int
-add_counter_to_entry(struct ipt_entry *e,
- const struct xt_counters addme[],
- unsigned int *i)
-{
- ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
-
- (*i)++;
- return 0;
-}
-
-static int
-do_add_counters(struct net *net, void __user *user, unsigned int len, int compat)
+do_add_counters(struct net *net, const void __user *user,
+ unsigned int len, int compat)
{
unsigned int i, curcpu;
struct xt_counters_info tmp;
@@ -1351,6 +1342,7 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
const struct xt_table_info *private;
int ret = 0;
void *loc_cpu_entry;
+ struct ipt_entry *iter;
#ifdef CONFIG_COMPAT
struct compat_xt_counters_info compat_tmp;
@@ -1408,11 +1400,10 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, int compat
curcpu = smp_processor_id();
loc_cpu_entry = private->entries[curcpu];
xt_info_wrlock(curcpu);
- IPT_ENTRY_ITERATE(loc_cpu_entry,
- private->size,
- add_counter_to_entry,
- paddc,
- &i);
+ xt_entry_foreach(iter, loc_cpu_entry, private->size) {
+ ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
+ ++i;
+ }
xt_info_wrunlock(curcpu);
unlock_up_free:
local_bh_enable();
@@ -1440,45 +1431,40 @@ struct compat_ipt_replace {
static int
compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
unsigned int *size, struct xt_counters *counters,
- unsigned int *i)
+ unsigned int i)
{
struct ipt_entry_target *t;
struct compat_ipt_entry __user *ce;
u_int16_t target_offset, next_offset;
compat_uint_t origsize;
- int ret;
+ const struct xt_entry_match *ematch;
+ int ret = 0;
- ret = -EFAULT;
origsize = *size;
ce = (struct compat_ipt_entry __user *)*dstptr;
- if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
- goto out;
-
- if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
- goto out;
+ if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
+ copy_to_user(&ce->counters, &counters[i],
+ sizeof(counters[i])) != 0)
+ return -EFAULT;
*dstptr += sizeof(struct compat_ipt_entry);
*size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
- ret = IPT_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
+ xt_ematch_foreach(ematch, e) {
+ ret = xt_compat_match_to_user(ematch, dstptr, size);
+ if (ret != 0)
+ return ret;
+ }
target_offset = e->target_offset - (origsize - *size);
- if (ret)
- goto out;
t = ipt_get_target(e);
ret = xt_compat_target_to_user(t, dstptr, size);
if (ret)
- goto out;
- ret = -EFAULT;
+ return ret;
next_offset = e->next_offset - (origsize - *size);
- if (put_user(target_offset, &ce->target_offset))
- goto out;
- if (put_user(next_offset, &ce->next_offset))
- goto out;
-
- (*i)++;
+ if (put_user(target_offset, &ce->target_offset) != 0 ||
+ put_user(next_offset, &ce->next_offset) != 0)
+ return -EFAULT;
return 0;
-out:
- return ret;
}
static int
@@ -1486,7 +1472,7 @@ compat_find_calc_match(struct ipt_entry_match *m,
const char *name,
const struct ipt_ip *ip,
unsigned int hookmask,
- int *size, unsigned int *i)
+ int *size)
{
struct xt_match *match;
@@ -1500,47 +1486,32 @@ compat_find_calc_match(struct ipt_entry_match *m,
}
m->u.kernel.match = match;
*size += xt_compat_match_offset(match);
-
- (*i)++;
- return 0;
-}
-
-static int
-compat_release_match(struct ipt_entry_match *m, unsigned int *i)
-{
- if (i && (*i)-- == 0)
- return 1;
-
- module_put(m->u.kernel.match->me);
return 0;
}
-static int
-compat_release_entry(struct compat_ipt_entry *e, unsigned int *i)
+static void compat_release_entry(struct compat_ipt_entry *e)
{
struct ipt_entry_target *t;
-
- if (i && (*i)-- == 0)
- return 1;
+ struct xt_entry_match *ematch;
/* Cleanup all matches */
- COMPAT_IPT_MATCH_ITERATE(e, compat_release_match, NULL);
+ xt_ematch_foreach(ematch, e)
+ module_put(ematch->u.kernel.match->me);
t = compat_ipt_get_target(e);
module_put(t->u.kernel.target->me);
- return 0;
}
static int
check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
- unsigned char *base,
- unsigned char *limit,
- unsigned int *hook_entries,
- unsigned int *underflows,
- unsigned int *i,
+ const unsigned char *base,
+ const unsigned char *limit,
+ const unsigned int *hook_entries,
+ const unsigned int *underflows,
const char *name)
{
+ struct xt_entry_match *ematch;
struct ipt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
@@ -1569,10 +1540,13 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
entry_offset = (void *)e - (void *)base;
j = 0;
- ret = COMPAT_IPT_MATCH_ITERATE(e, compat_find_calc_match, name,
- &e->ip, e->comefrom, &off, &j);
- if (ret != 0)
- goto release_matches;
+ xt_ematch_foreach(ematch, e) {
+ ret = compat_find_calc_match(ematch, name,
+ &e->ip, e->comefrom, &off);
+ if (ret != 0)
+ goto release_matches;
+ ++j;
+ }
t = compat_ipt_get_target(e);
target = try_then_request_module(xt_find_target(AF_INET,
@@ -1604,14 +1578,16 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
/* Clear counters and comefrom */
memset(&e->counters, 0, sizeof(e->counters));
e->comefrom = 0;
-
- (*i)++;
return 0;
out:
module_put(t->u.kernel.target->me);
release_matches:
- IPT_MATCH_ITERATE(e, compat_release_match, &j);
+ xt_ematch_foreach(ematch, e) {
+ if (j-- == 0)
+ break;
+ module_put(ematch->u.kernel.match->me);
+ }
return ret;
}
@@ -1625,6 +1601,7 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
struct ipt_entry *de;
unsigned int origsize;
int ret, h;
+ struct xt_entry_match *ematch;
ret = 0;
origsize = *size;
@@ -1635,10 +1612,11 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
*dstptr += sizeof(struct ipt_entry);
*size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
- ret = COMPAT_IPT_MATCH_ITERATE(e, xt_compat_match_from_user,
- dstptr, size);
- if (ret)
- return ret;
+ xt_ematch_foreach(ematch, e) {
+ ret = xt_compat_match_from_user(ematch, dstptr, size);
+ if (ret != 0)
+ return ret;
+ }
de->target_offset = e->target_offset - (origsize - *size);
t = compat_ipt_get_target(e);
target = t->u.kernel.target;
@@ -1655,36 +1633,43 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
}
static int
-compat_check_entry(struct ipt_entry *e, const char *name,
- unsigned int *i)
+compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
{
+ struct xt_entry_match *ematch;
struct xt_mtchk_param mtpar;
unsigned int j;
- int ret;
+ int ret = 0;
j = 0;
+ mtpar.net = net;
mtpar.table = name;
mtpar.entryinfo = &e->ip;
mtpar.hook_mask = e->comefrom;
mtpar.family = NFPROTO_IPV4;
- ret = IPT_MATCH_ITERATE(e, check_match, &mtpar, &j);
- if (ret)
- goto cleanup_matches;
+ xt_ematch_foreach(ematch, e) {
+ ret = check_match(ematch, &mtpar);
+ if (ret != 0)
+ goto cleanup_matches;
+ ++j;
+ }
- ret = check_target(e, name);
+ ret = check_target(e, net, name);
if (ret)
goto cleanup_matches;
-
- (*i)++;
return 0;
cleanup_matches:
- IPT_MATCH_ITERATE(e, cleanup_match, &j);
+ xt_ematch_foreach(ematch, e) {
+ if (j-- == 0)
+ break;
+ cleanup_match(ematch, net);
+ }
return ret;
}
static int
-translate_compat_table(const char *name,
+translate_compat_table(struct net *net,
+ const char *name,
unsigned int valid_hooks,
struct xt_table_info **pinfo,
void **pentry0,
@@ -1696,6 +1681,8 @@ translate_compat_table(const char *name,
unsigned int i, j;
struct xt_table_info *newinfo, *info;
void *pos, *entry0, *entry1;
+ struct compat_ipt_entry *iter0;
+ struct ipt_entry *iter1;
unsigned int size;
int ret;
@@ -1714,13 +1701,17 @@ translate_compat_table(const char *name,
j = 0;
xt_compat_lock(AF_INET);
/* Walk through entries, checking offsets. */
- ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
- check_compat_entry_size_and_hooks,
- info, &size, entry0,
- entry0 + total_size,
- hook_entries, underflows, &j, name);
- if (ret != 0)
- goto out_unlock;
+ xt_entry_foreach(iter0, entry0, total_size) {
+ ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+ entry0,
+ entry0 + total_size,
+ hook_entries,
+ underflows,
+ name);
+ if (ret != 0)
+ goto out_unlock;
+ ++j;
+ }
ret = -EINVAL;
if (j != number) {
@@ -1759,9 +1750,12 @@ translate_compat_table(const char *name,
entry1 = newinfo->entries[raw_smp_processor_id()];
pos = entry1;
size = total_size;
- ret = COMPAT_IPT_ENTRY_ITERATE(entry0, total_size,
- compat_copy_entry_from_user,
- &pos, &size, name, newinfo, entry1);
+ xt_entry_foreach(iter0, entry0, total_size) {
+ ret = compat_copy_entry_from_user(iter0, &pos, &size,
+ name, newinfo, entry1);
+ if (ret != 0)
+ break;
+ }
xt_compat_flush_offsets(AF_INET);
xt_compat_unlock(AF_INET);
if (ret)
@@ -1772,13 +1766,32 @@ translate_compat_table(const char *name,
goto free_newinfo;
i = 0;
- ret = IPT_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
- name, &i);
+ xt_entry_foreach(iter1, entry1, newinfo->size) {
+ ret = compat_check_entry(iter1, net, name);
+ if (ret != 0)
+ break;
+ ++i;
+ }
if (ret) {
+ /*
+ * The first i matches need cleanup_entry (calls ->destroy)
+ * because they had called ->check already. The other j-i
+ * entries need only release.
+ */
+ int skip = i;
j -= i;
- COMPAT_IPT_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
- compat_release_entry, &j);
- IPT_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
+ xt_entry_foreach(iter0, entry0, newinfo->size) {
+ if (skip-- > 0)
+ continue;
+ if (j-- == 0)
+ break;
+ compat_release_entry(iter0);
+ }
+ xt_entry_foreach(iter1, entry1, newinfo->size) {
+ if (i-- == 0)
+ break;
+ cleanup_entry(iter1, net);
+ }
xt_free_table_info(newinfo);
return ret;
}
@@ -1796,7 +1809,11 @@ translate_compat_table(const char *name,
free_newinfo:
xt_free_table_info(newinfo);
out:
- COMPAT_IPT_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
+ xt_entry_foreach(iter0, entry0, total_size) {
+ if (j-- == 0)
+ break;
+ compat_release_entry(iter0);
+ }
return ret;
out_unlock:
xt_compat_flush_offsets(AF_INET);
@@ -1811,6 +1828,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
struct compat_ipt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
+ struct ipt_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1833,7 +1851,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
goto free_newinfo;
}
- ret = translate_compat_table(tmp.name, tmp.valid_hooks,
+ ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
&newinfo, &loc_cpu_entry, tmp.size,
tmp.num_entries, tmp.hook_entry,
tmp.underflow);
@@ -1849,7 +1867,8 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
return 0;
free_newinfo_untrans:
- IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
+ xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
+ cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
@@ -1898,6 +1917,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
int ret = 0;
const void *loc_cpu_entry;
unsigned int i = 0;
+ struct ipt_entry *iter;
counters = alloc_counters(table);
if (IS_ERR(counters))
@@ -1910,9 +1930,12 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
loc_cpu_entry = private->entries[raw_smp_processor_id()];
pos = userptr;
size = total_size;
- ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
- compat_copy_entry_to_user,
- &pos, &size, counters, &i);
+ xt_entry_foreach(iter, loc_cpu_entry, total_size) {
+ ret = compat_copy_entry_to_user(iter, &pos,
+ &size, counters, i++);
+ if (ret != 0)
+ break;
+ }
vfree(counters);
return ret;
@@ -2086,11 +2109,7 @@ struct xt_table *ipt_register_table(struct net *net,
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
memcpy(loc_cpu_entry, repl->entries, repl->size);
- ret = translate_table(table->name, table->valid_hooks,
- newinfo, loc_cpu_entry, repl->size,
- repl->num_entries,
- repl->hook_entry,
- repl->underflow);
+ ret = translate_table(net, newinfo, loc_cpu_entry, repl);
if (ret != 0)
goto out_free;
@@ -2108,17 +2127,19 @@ out:
return ERR_PTR(ret);
}
-void ipt_unregister_table(struct xt_table *table)
+void ipt_unregister_table(struct net *net, struct xt_table *table)
{
struct xt_table_info *private;
void *loc_cpu_entry;
struct module *table_owner = table->me;
+ struct ipt_entry *iter;
private = xt_unregister_table(table);
/* Decrease module usage counts and free resources */
loc_cpu_entry = private->entries[raw_smp_processor_id()];
- IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
+ xt_entry_foreach(iter, loc_cpu_entry, private->size)
+ cleanup_entry(iter, net);
if (private->number > private->initial_entries)
module_put(table_owner);
xt_free_table_info(private);
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 40ca2d240ab..0886f96c736 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -560,8 +560,7 @@ struct clusterip_seq_position {
static void *clusterip_seq_start(struct seq_file *s, loff_t *pos)
{
- const struct proc_dir_entry *pde = s->private;
- struct clusterip_config *c = pde->data;
+ struct clusterip_config *c = s->private;
unsigned int weight;
u_int32_t local_nodes;
struct clusterip_seq_position *idx;
@@ -632,10 +631,9 @@ static int clusterip_proc_open(struct inode *inode, struct file *file)
if (!ret) {
struct seq_file *sf = file->private_data;
- struct proc_dir_entry *pde = PDE(inode);
- struct clusterip_config *c = pde->data;
+ struct clusterip_config *c = PDE(inode)->data;
- sf->private = pde;
+ sf->private = c;
clusterip_config_get(c);
}
@@ -645,8 +643,7 @@ static int clusterip_proc_open(struct inode *inode, struct file *file)
static int clusterip_proc_release(struct inode *inode, struct file *file)
{
- struct proc_dir_entry *pde = PDE(inode);
- struct clusterip_config *c = pde->data;
+ struct clusterip_config *c = PDE(inode)->data;
int ret;
ret = seq_release(inode, file);
@@ -660,10 +657,9 @@ static int clusterip_proc_release(struct inode *inode, struct file *file)
static ssize_t clusterip_proc_write(struct file *file, const char __user *input,
size_t size, loff_t *ofs)
{
+ struct clusterip_config *c = PDE(file->f_path.dentry->d_inode)->data;
#define PROC_WRITELEN 10
char buffer[PROC_WRITELEN+1];
- const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
- struct clusterip_config *c = pde->data;
unsigned long nodenum;
if (copy_from_user(buffer, input, PROC_WRITELEN))
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 399061c3fd7..09a5d3f7cc4 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -338,7 +338,7 @@ struct compat_ipt_ulog_info {
char prefix[ULOG_PREFIX_LEN];
};
-static void ulog_tg_compat_from_user(void *dst, void *src)
+static void ulog_tg_compat_from_user(void *dst, const void *src)
{
const struct compat_ipt_ulog_info *cl = src;
struct ipt_ulog_info l = {
@@ -351,7 +351,7 @@ static void ulog_tg_compat_from_user(void *dst, void *src)
memcpy(dst, &l, sizeof(l));
}
-static int ulog_tg_compat_to_user(void __user *dst, void *src)
+static int ulog_tg_compat_to_user(void __user *dst, const void *src)
{
const struct ipt_ulog_info *l = src;
struct compat_ipt_ulog_info cl = {
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index df566cbd68e..c8dc9800d62 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -23,104 +23,32 @@ MODULE_DESCRIPTION("iptables filter table");
(1 << NF_INET_FORWARD) | \
(1 << NF_INET_LOCAL_OUT))
-static struct
-{
- struct ipt_replace repl;
- struct ipt_standard entries[3];
- struct ipt_error term;
-} initial_table __net_initdata = {
- .repl = {
- .name = "filter",
- .valid_hooks = FILTER_VALID_HOOKS,
- .num_entries = 4,
- .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
- .hook_entry = {
- [NF_INET_LOCAL_IN] = 0,
- [NF_INET_FORWARD] = sizeof(struct ipt_standard),
- [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
- },
- .underflow = {
- [NF_INET_LOCAL_IN] = 0,
- [NF_INET_FORWARD] = sizeof(struct ipt_standard),
- [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
- },
- },
- .entries = {
- IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
- IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
- IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
- },
- .term = IPT_ERROR_INIT, /* ERROR */
-};
-
static const struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV4,
+ .priority = NF_IP_PRI_FILTER,
};
-/* The work comes in here from netfilter.c. */
-static unsigned int
-ipt_local_in_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- return ipt_do_table(skb, hook, in, out,
- dev_net(in)->ipv4.iptable_filter);
-}
-
static unsigned int
-ipt_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- return ipt_do_table(skb, hook, in, out,
- dev_net(in)->ipv4.iptable_filter);
-}
+ const struct net *net;
-static unsigned int
-ipt_local_out_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr))
+ if (hook == NF_INET_LOCAL_OUT &&
+ (skb->len < sizeof(struct iphdr) ||
+ ip_hdrlen(skb) < sizeof(struct iphdr)))
+ /* root is playing with raw sockets. */
return NF_ACCEPT;
- return ipt_do_table(skb, hook, in, out,
- dev_net(out)->ipv4.iptable_filter);
+
+ net = dev_net((in != NULL) ? in : out);
+ return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter);
}
-static struct nf_hook_ops ipt_ops[] __read_mostly = {
- {
- .hook = ipt_local_in_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP_PRI_FILTER,
- },
- {
- .hook = ipt_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_FORWARD,
- .priority = NF_IP_PRI_FILTER,
- },
- {
- .hook = ipt_local_out_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_OUT,
- .priority = NF_IP_PRI_FILTER,
- },
-};
+static struct nf_hook_ops *filter_ops __read_mostly;
/* Default to forward because I got too much mail already. */
static int forward = NF_ACCEPT;
@@ -128,9 +56,18 @@ module_param(forward, bool, 0000);
static int __net_init iptable_filter_net_init(struct net *net)
{
- /* Register table */
+ struct ipt_replace *repl;
+
+ repl = ipt_alloc_initial_table(&packet_filter);
+ if (repl == NULL)
+ return -ENOMEM;
+ /* Entry 1 is the FORWARD hook */
+ ((struct ipt_standard *)repl->entries)[1].target.verdict =
+ -forward - 1;
+
net->ipv4.iptable_filter =
- ipt_register_table(net, &packet_filter, &initial_table.repl);
+ ipt_register_table(net, &packet_filter, repl);
+ kfree(repl);
if (IS_ERR(net->ipv4.iptable_filter))
return PTR_ERR(net->ipv4.iptable_filter);
return 0;
@@ -138,7 +75,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
static void __net_exit iptable_filter_net_exit(struct net *net)
{
- ipt_unregister_table(net->ipv4.iptable_filter);
+ ipt_unregister_table(net, net->ipv4.iptable_filter);
}
static struct pernet_operations iptable_filter_net_ops = {
@@ -155,17 +92,16 @@ static int __init iptable_filter_init(void)
return -EINVAL;
}
- /* Entry 1 is the FORWARD hook */
- initial_table.entries[1].target.verdict = -forward - 1;
-
ret = register_pernet_subsys(&iptable_filter_net_ops);
if (ret < 0)
return ret;
/* Register hooks */
- ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
- if (ret < 0)
+ filter_ops = xt_hook_link(&packet_filter, iptable_filter_hook);
+ if (IS_ERR(filter_ops)) {
+ ret = PTR_ERR(filter_ops);
goto cleanup_table;
+ }
return ret;
@@ -176,7 +112,7 @@ static int __init iptable_filter_init(void)
static void __exit iptable_filter_fini(void)
{
- nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
+ xt_hook_unlink(&packet_filter, filter_ops);
unregister_pernet_subsys(&iptable_filter_net_ops);
}
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index fae78c3076c..b9b83464cbf 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -27,101 +27,16 @@ MODULE_DESCRIPTION("iptables mangle table");
(1 << NF_INET_LOCAL_OUT) | \
(1 << NF_INET_POST_ROUTING))
-/* Ouch - five different hooks? Maybe this should be a config option..... -- BC */
-static const struct
-{
- struct ipt_replace repl;
- struct ipt_standard entries[5];
- struct ipt_error term;
-} initial_table __net_initdata = {
- .repl = {
- .name = "mangle",
- .valid_hooks = MANGLE_VALID_HOOKS,
- .num_entries = 6,
- .size = sizeof(struct ipt_standard) * 5 + sizeof(struct ipt_error),
- .hook_entry = {
- [NF_INET_PRE_ROUTING] = 0,
- [NF_INET_LOCAL_IN] = sizeof(struct ipt_standard),
- [NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2,
- [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
- [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
- },
- .underflow = {
- [NF_INET_PRE_ROUTING] = 0,
- [NF_INET_LOCAL_IN] = sizeof(struct ipt_standard),
- [NF_INET_FORWARD] = sizeof(struct ipt_standard) * 2,
- [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 3,
- [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard) * 4,
- },
- },
- .entries = {
- IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
- IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
- IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
- IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
- IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
- },
- .term = IPT_ERROR_INIT, /* ERROR */
-};
-
static const struct xt_table packet_mangler = {
.name = "mangle",
.valid_hooks = MANGLE_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV4,
+ .priority = NF_IP_PRI_MANGLE,
};
-/* The work comes in here from netfilter.c. */
-static unsigned int
-ipt_pre_routing_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- return ipt_do_table(skb, hook, in, out,
- dev_net(in)->ipv4.iptable_mangle);
-}
-
-static unsigned int
-ipt_post_routing_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- return ipt_do_table(skb, hook, in, out,
- dev_net(out)->ipv4.iptable_mangle);
-}
-
-static unsigned int
-ipt_local_in_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- return ipt_do_table(skb, hook, in, out,
- dev_net(in)->ipv4.iptable_mangle);
-}
-
-static unsigned int
-ipt_forward_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- return ipt_do_table(skb, hook, in, out,
- dev_net(in)->ipv4.iptable_mangle);
-}
-
static unsigned int
-ipt_local_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
{
unsigned int ret;
const struct iphdr *iph;
@@ -141,7 +56,7 @@ ipt_local_hook(unsigned int hook,
daddr = iph->daddr;
tos = iph->tos;
- ret = ipt_do_table(skb, hook, in, out,
+ ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
dev_net(out)->ipv4.iptable_mangle);
/* Reroute for ANY change. */
if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
@@ -158,49 +73,36 @@ ipt_local_hook(unsigned int hook,
return ret;
}
-static struct nf_hook_ops ipt_ops[] __read_mostly = {
- {
- .hook = ipt_pre_routing_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_PRE_ROUTING,
- .priority = NF_IP_PRI_MANGLE,
- },
- {
- .hook = ipt_local_in_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP_PRI_MANGLE,
- },
- {
- .hook = ipt_forward_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_FORWARD,
- .priority = NF_IP_PRI_MANGLE,
- },
- {
- .hook = ipt_local_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_OUT,
- .priority = NF_IP_PRI_MANGLE,
- },
- {
- .hook = ipt_post_routing_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_POST_ROUTING,
- .priority = NF_IP_PRI_MANGLE,
- },
-};
+/* The work comes in here from netfilter.c. */
+static unsigned int
+iptable_mangle_hook(unsigned int hook,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ if (hook == NF_INET_LOCAL_OUT)
+ return ipt_mangle_out(skb, out);
+ if (hook == NF_INET_POST_ROUTING)
+ return ipt_do_table(skb, hook, in, out,
+ dev_net(out)->ipv4.iptable_mangle);
+ /* PREROUTING/INPUT/FORWARD: */
+ return ipt_do_table(skb, hook, in, out,
+ dev_net(in)->ipv4.iptable_mangle);
+}
+
+static struct nf_hook_ops *mangle_ops __read_mostly;
static int __net_init iptable_mangle_net_init(struct net *net)
{
- /* Register table */
+ struct ipt_replace *repl;
+
+ repl = ipt_alloc_initial_table(&packet_mangler);
+ if (repl == NULL)
+ return -ENOMEM;
net->ipv4.iptable_mangle =
- ipt_register_table(net, &packet_mangler, &initial_table.repl);
+ ipt_register_table(net, &packet_mangler, repl);
+ kfree(repl);
if (IS_ERR(net->ipv4.iptable_mangle))
return PTR_ERR(net->ipv4.iptable_mangle);
return 0;
@@ -208,7 +110,7 @@ static int __net_init iptable_mangle_net_init(struct net *net)
static void __net_exit iptable_mangle_net_exit(struct net *net)
{
- ipt_unregister_table(net->ipv4.iptable_mangle);
+ ipt_unregister_table(net, net->ipv4.iptable_mangle);
}
static struct pernet_operations iptable_mangle_net_ops = {
@@ -225,9 +127,11 @@ static int __init iptable_mangle_init(void)
return ret;
/* Register hooks */
- ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
- if (ret < 0)
+ mangle_ops = xt_hook_link(&packet_mangler, iptable_mangle_hook);
+ if (IS_ERR(mangle_ops)) {
+ ret = PTR_ERR(mangle_ops);
goto cleanup_table;
+ }
return ret;
@@ -238,7 +142,7 @@ static int __init iptable_mangle_init(void)
static void __exit iptable_mangle_fini(void)
{
- nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
+ xt_hook_unlink(&packet_mangler, mangle_ops);
unregister_pernet_subsys(&iptable_mangle_net_ops);
}
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 993edc23be0..06fb9d11953 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -9,90 +9,44 @@
#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
-static const struct
-{
- struct ipt_replace repl;
- struct ipt_standard entries[2];
- struct ipt_error term;
-} initial_table __net_initdata = {
- .repl = {
- .name = "raw",
- .valid_hooks = RAW_VALID_HOOKS,
- .num_entries = 3,
- .size = sizeof(struct ipt_standard) * 2 + sizeof(struct ipt_error),
- .hook_entry = {
- [NF_INET_PRE_ROUTING] = 0,
- [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard)
- },
- .underflow = {
- [NF_INET_PRE_ROUTING] = 0,
- [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard)
- },
- },
- .entries = {
- IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
- IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
- },
- .term = IPT_ERROR_INIT, /* ERROR */
-};
-
static const struct xt_table packet_raw = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV4,
+ .priority = NF_IP_PRI_RAW,
};
/* The work comes in here from netfilter.c. */
static unsigned int
-ipt_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+iptable_raw_hook(unsigned int hook, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- return ipt_do_table(skb, hook, in, out,
- dev_net(in)->ipv4.iptable_raw);
-}
+ const struct net *net;
-static unsigned int
-ipt_local_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr))
+ if (hook == NF_INET_LOCAL_OUT &&
+ (skb->len < sizeof(struct iphdr) ||
+ ip_hdrlen(skb) < sizeof(struct iphdr)))
+ /* root is playing with raw sockets. */
return NF_ACCEPT;
- return ipt_do_table(skb, hook, in, out,
- dev_net(out)->ipv4.iptable_raw);
+
+ net = dev_net((in != NULL) ? in : out);
+ return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_raw);
}
-/* 'raw' is the very first table. */
-static struct nf_hook_ops ipt_ops[] __read_mostly = {
- {
- .hook = ipt_hook,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_PRE_ROUTING,
- .priority = NF_IP_PRI_RAW,
- .owner = THIS_MODULE,
- },
- {
- .hook = ipt_local_hook,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_OUT,
- .priority = NF_IP_PRI_RAW,
- .owner = THIS_MODULE,
- },
-};
+static struct nf_hook_ops *rawtable_ops __read_mostly;
static int __net_init iptable_raw_net_init(struct net *net)
{
- /* Register table */
+ struct ipt_replace *repl;
+
+ repl = ipt_alloc_initial_table(&packet_raw);
+ if (repl == NULL)
+ return -ENOMEM;
net->ipv4.iptable_raw =
- ipt_register_table(net, &packet_raw, &initial_table.repl);
+ ipt_register_table(net, &packet_raw, repl);
+ kfree(repl);
if (IS_ERR(net->ipv4.iptable_raw))
return PTR_ERR(net->ipv4.iptable_raw);
return 0;
@@ -100,7 +54,7 @@ static int __net_init iptable_raw_net_init(struct net *net)
static void __net_exit iptable_raw_net_exit(struct net *net)
{
- ipt_unregister_table(net->ipv4.iptable_raw);
+ ipt_unregister_table(net, net->ipv4.iptable_raw);
}
static struct pernet_operations iptable_raw_net_ops = {
@@ -117,9 +71,11 @@ static int __init iptable_raw_init(void)
return ret;
/* Register hooks */
- ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
- if (ret < 0)
+ rawtable_ops = xt_hook_link(&packet_raw, iptable_raw_hook);
+ if (IS_ERR(rawtable_ops)) {
+ ret = PTR_ERR(rawtable_ops);
goto cleanup_table;
+ }
return ret;
@@ -130,7 +86,7 @@ static int __init iptable_raw_init(void)
static void __exit iptable_raw_fini(void)
{
- nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
+ xt_hook_unlink(&packet_raw, rawtable_ops);
unregister_pernet_subsys(&iptable_raw_net_ops);
}
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index 3bd3d6388da..cce2f64e6f2 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -27,109 +27,44 @@ MODULE_DESCRIPTION("iptables security table, for MAC rules");
(1 << NF_INET_FORWARD) | \
(1 << NF_INET_LOCAL_OUT)
-static const struct
-{
- struct ipt_replace repl;
- struct ipt_standard entries[3];
- struct ipt_error term;
-} initial_table __net_initdata = {
- .repl = {
- .name = "security",
- .valid_hooks = SECURITY_VALID_HOOKS,
- .num_entries = 4,
- .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
- .hook_entry = {
- [NF_INET_LOCAL_IN] = 0,
- [NF_INET_FORWARD] = sizeof(struct ipt_standard),
- [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
- },
- .underflow = {
- [NF_INET_LOCAL_IN] = 0,
- [NF_INET_FORWARD] = sizeof(struct ipt_standard),
- [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2,
- },
- },
- .entries = {
- IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
- IPT_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
- IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
- },
- .term = IPT_ERROR_INIT, /* ERROR */
-};
-
static const struct xt_table security_table = {
.name = "security",
.valid_hooks = SECURITY_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV4,
+ .priority = NF_IP_PRI_SECURITY,
};
static unsigned int
-ipt_local_in_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- return ipt_do_table(skb, hook, in, out,
- dev_net(in)->ipv4.iptable_security);
-}
-
-static unsigned int
-ipt_forward_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+iptable_security_hook(unsigned int hook, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- return ipt_do_table(skb, hook, in, out,
- dev_net(in)->ipv4.iptable_security);
-}
+ const struct net *net;
-static unsigned int
-ipt_local_out_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- /* Somebody is playing with raw sockets. */
- if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr))
+ if (hook == NF_INET_LOCAL_OUT &&
+ (skb->len < sizeof(struct iphdr) ||
+ ip_hdrlen(skb) < sizeof(struct iphdr)))
+ /* Somebody is playing with raw sockets. */
return NF_ACCEPT;
- return ipt_do_table(skb, hook, in, out,
- dev_net(out)->ipv4.iptable_security);
+
+ net = dev_net((in != NULL) ? in : out);
+ return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_security);
}
-static struct nf_hook_ops ipt_ops[] __read_mostly = {
- {
- .hook = ipt_local_in_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP_PRI_SECURITY,
- },
- {
- .hook = ipt_forward_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_FORWARD,
- .priority = NF_IP_PRI_SECURITY,
- },
- {
- .hook = ipt_local_out_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV4,
- .hooknum = NF_INET_LOCAL_OUT,
- .priority = NF_IP_PRI_SECURITY,
- },
-};
+static struct nf_hook_ops *sectbl_ops __read_mostly;
static int __net_init iptable_security_net_init(struct net *net)
{
- net->ipv4.iptable_security =
- ipt_register_table(net, &security_table, &initial_table.repl);
+ struct ipt_replace *repl;
+ repl = ipt_alloc_initial_table(&security_table);
+ if (repl == NULL)
+ return -ENOMEM;
+ net->ipv4.iptable_security =
+ ipt_register_table(net, &security_table, repl);
+ kfree(repl);
if (IS_ERR(net->ipv4.iptable_security))
return PTR_ERR(net->ipv4.iptable_security);
@@ -138,7 +73,7 @@ static int __net_init iptable_security_net_init(struct net *net)
static void __net_exit iptable_security_net_exit(struct net *net)
{
- ipt_unregister_table(net->ipv4.iptable_security);
+ ipt_unregister_table(net, net->ipv4.iptable_security);
}
static struct pernet_operations iptable_security_net_ops = {
@@ -154,9 +89,11 @@ static int __init iptable_security_init(void)
if (ret < 0)
return ret;
- ret = nf_register_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
- if (ret < 0)
+ sectbl_ops = xt_hook_link(&security_table, iptable_security_hook);
+ if (IS_ERR(sectbl_ops)) {
+ ret = PTR_ERR(sectbl_ops);
goto cleanup_table;
+ }
return ret;
@@ -167,7 +104,7 @@ cleanup_table:
static void __exit iptable_security_fini(void)
{
- nf_unregister_hooks(ipt_ops, ARRAY_SIZE(ipt_ops));
+ xt_hook_unlink(&security_table, sectbl_ops);
unregister_pernet_subsys(&iptable_security_net_ops);
}
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index d171b123a65..2bb1f87051c 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -22,6 +22,7 @@
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/nf_nat_helper.h>
@@ -210,7 +211,7 @@ static ctl_table ip_ct_sysctl_table[] = {
},
{
.procname = "ip_conntrack_buckets",
- .data = &nf_conntrack_htable_size,
+ .data = &init_net.ct.htable_size,
.maxlen = sizeof(unsigned int),
.mode = 0444,
.proc_handler = proc_dointvec,
@@ -266,7 +267,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
return -EINVAL;
}
- h = nf_conntrack_find_get(sock_net(sk), &tuple);
+ h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
if (h) {
struct sockaddr_in sin;
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 8668a3defda..2fb7b76da94 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
struct hlist_nulls_node *n;
for (st->bucket = 0;
- st->bucket < nf_conntrack_htable_size;
+ st->bucket < net->ct.htable_size;
st->bucket++) {
n = rcu_dereference(net->ct.hash[st->bucket].first);
if (!is_a_nulls(n))
@@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
head = rcu_dereference(head->next);
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
- if (++st->bucket >= nf_conntrack_htable_size)
+ if (++st->bucket >= net->ct.htable_size)
return NULL;
}
head = rcu_dereference(net->ct.hash[st->bucket].first);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 7afd39b5b78..7404bde9599 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -18,6 +18,7 @@
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_log.h>
static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
@@ -114,13 +115,14 @@ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
static int
-icmp_error_message(struct net *net, struct sk_buff *skb,
+icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
enum ip_conntrack_info *ctinfo,
unsigned int hooknum)
{
struct nf_conntrack_tuple innertuple, origtuple;
const struct nf_conntrack_l4proto *innerproto;
const struct nf_conntrack_tuple_hash *h;
+ u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
NF_CT_ASSERT(skb->nfct == NULL);
@@ -146,7 +148,7 @@ icmp_error_message(struct net *net, struct sk_buff *skb,
*ctinfo = IP_CT_RELATED;
- h = nf_conntrack_find_get(net, &innertuple);
+ h = nf_conntrack_find_get(net, zone, &innertuple);
if (!h) {
pr_debug("icmp_error_message: no match\n");
return -NF_ACCEPT;
@@ -163,7 +165,8 @@ icmp_error_message(struct net *net, struct sk_buff *skb,
/* Small and modified version of icmp_rcv */
static int
-icmp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
+icmp_error(struct net *net, struct nf_conn *tmpl,
+ struct sk_buff *skb, unsigned int dataoff,
enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
{
const struct icmphdr *icmph;
@@ -208,7 +211,7 @@ icmp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
icmph->type != ICMP_REDIRECT)
return NF_ACCEPT;
- return icmp_error_message(net, skb, ctinfo, hooknum);
+ return icmp_error_message(net, tmpl, skb, ctinfo, hooknum);
}
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 331ead3ebd1..cb763ae9ed9 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -17,6 +17,10 @@
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+#include <net/netfilter/nf_conntrack_zones.h>
/* Returns new sk_buff, or NULL */
static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
@@ -38,15 +42,22 @@ static int nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
struct sk_buff *skb)
{
+ u16 zone = NF_CT_DEFAULT_ZONE;
+
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ if (skb->nfct)
+ zone = nf_ct_zone((struct nf_conn *)skb->nfct);
+#endif
+
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge &&
skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
- return IP_DEFRAG_CONNTRACK_BRIDGE_IN;
+ return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
#endif
if (hooknum == NF_INET_PRE_ROUTING)
- return IP_DEFRAG_CONNTRACK_IN;
+ return IP_DEFRAG_CONNTRACK_IN + zone;
else
- return IP_DEFRAG_CONNTRACK_OUT;
+ return IP_DEFRAG_CONNTRACK_OUT + zone;
}
static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
@@ -59,7 +70,7 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
/* Previously seen (loopback)? Ignore. Do this before
fragment check. */
- if (skb->nfct)
+ if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
return NF_ACCEPT;
#endif
#endif
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index fe1a64479dd..4595281c286 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -30,14 +30,12 @@
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_zones.h>
static DEFINE_SPINLOCK(nf_nat_lock);
static struct nf_conntrack_l3proto *l3proto __read_mostly;
-/* Calculated at init based on memory size */
-static unsigned int nf_nat_htable_size __read_mostly;
-
#define MAX_IP_NAT_PROTO 256
static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
__read_mostly;
@@ -72,15 +70,16 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
/* We keep an extra hash for each conntrack, for fast searching. */
static inline unsigned int
-hash_by_src(const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net, u16 zone,
+ const struct nf_conntrack_tuple *tuple)
{
unsigned int hash;
/* Original src, to ensure we map it consistently if poss. */
hash = jhash_3words((__force u32)tuple->src.u3.ip,
- (__force u32)tuple->src.u.all,
+ (__force u32)tuple->src.u.all ^ zone,
tuple->dst.protonum, 0);
- return ((u64)hash * nf_nat_htable_size) >> 32;
+ return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
}
/* Is this tuple already taken? (not by us) */
@@ -142,12 +141,12 @@ same_src(const struct nf_conn *ct,
/* Only called for SRC manip */
static int
-find_appropriate_src(struct net *net,
+find_appropriate_src(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple,
struct nf_conntrack_tuple *result,
const struct nf_nat_range *range)
{
- unsigned int h = hash_by_src(tuple);
+ unsigned int h = hash_by_src(net, zone, tuple);
const struct nf_conn_nat *nat;
const struct nf_conn *ct;
const struct hlist_node *n;
@@ -155,7 +154,7 @@ find_appropriate_src(struct net *net,
rcu_read_lock();
hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
ct = nat->ct;
- if (same_src(ct, tuple)) {
+ if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
/* Copy source part from reply tuple. */
nf_ct_invert_tuplepr(result,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
@@ -178,7 +177,7 @@ find_appropriate_src(struct net *net,
the ip with the lowest src-ip/dst-ip/proto usage.
*/
static void
-find_best_ips_proto(struct nf_conntrack_tuple *tuple,
+find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
const struct nf_conn *ct,
enum nf_nat_manip_type maniptype)
@@ -212,7 +211,7 @@ find_best_ips_proto(struct nf_conntrack_tuple *tuple,
maxip = ntohl(range->max_ip);
j = jhash_2words((__force u32)tuple->src.u3.ip,
range->flags & IP_NAT_RANGE_PERSISTENT ?
- 0 : (__force u32)tuple->dst.u3.ip, 0);
+ 0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
j = ((u64)j * (maxip - minip + 1)) >> 32;
*var_ipp = htonl(minip + j);
}
@@ -232,6 +231,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
{
struct net *net = nf_ct_net(ct);
const struct nf_nat_protocol *proto;
+ u16 zone = nf_ct_zone(ct);
/* 1) If this srcip/proto/src-proto-part is currently mapped,
and that same mapping gives a unique tuple within the given
@@ -242,7 +242,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
manips not an issue. */
if (maniptype == IP_NAT_MANIP_SRC &&
!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
- if (find_appropriate_src(net, orig_tuple, tuple, range)) {
+ if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
pr_debug("get_unique_tuple: Found current src map\n");
if (!nf_nat_used_tuple(tuple, ct))
return;
@@ -252,7 +252,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
/* 2) Select the least-used IP/proto combination in the given
range. */
*tuple = *orig_tuple;
- find_best_ips_proto(tuple, range, ct, maniptype);
+ find_best_ips_proto(zone, tuple, range, ct, maniptype);
/* 3) The per-protocol part of the manip is made to map into
the range to make a unique tuple. */
@@ -330,7 +330,8 @@ nf_nat_setup_info(struct nf_conn *ct,
if (have_to_hash) {
unsigned int srchash;
- srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ srchash = hash_by_src(net, nf_ct_zone(ct),
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
spin_lock_bh(&nf_nat_lock);
/* nf_conntrack_alter_reply might re-allocate exntension aera */
nat = nfct_nat(ct);
@@ -679,8 +680,10 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
static int __net_init nf_nat_net_init(struct net *net)
{
- net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size,
- &net->ipv4.nat_vmalloced, 0);
+ /* Leave them the same for the moment. */
+ net->ipv4.nat_htable_size = net->ct.htable_size;
+ net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
+ &net->ipv4.nat_vmalloced, 0);
if (!net->ipv4.nat_bysource)
return -ENOMEM;
return 0;
@@ -703,7 +706,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
nf_ct_iterate_cleanup(net, &clean_nat, NULL);
synchronize_rcu();
nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
- nf_nat_htable_size);
+ net->ipv4.nat_htable_size);
}
static struct pernet_operations nf_nat_net_ops = {
@@ -724,9 +727,6 @@ static int __init nf_nat_init(void)
return ret;
}
- /* Leave them the same for the moment. */
- nf_nat_htable_size = nf_conntrack_htable_size;
-
ret = register_pernet_subsys(&nf_nat_net_ops);
if (ret < 0)
goto cleanup_extend;
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c
index a1d5d58a58b..86e0e84ff0a 100644
--- a/net/ipv4/netfilter/nf_nat_ftp.c
+++ b/net/ipv4/netfilter/nf_nat_ftp.c
@@ -27,76 +27,29 @@ MODULE_ALIAS("ip_nat_ftp");
/* FIXME: Time out? --RR */
-static int
-mangle_rfc959_packet(struct sk_buff *skb,
- __be32 newip,
- u_int16_t port,
- unsigned int matchoff,
- unsigned int matchlen,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo)
+static int nf_nat_ftp_fmt_cmd(enum nf_ct_ftp_type type,
+ char *buffer, size_t buflen,
+ __be32 addr, u16 port)
{
- char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")];
-
- sprintf(buffer, "%u,%u,%u,%u,%u,%u",
- NIPQUAD(newip), port>>8, port&0xFF);
-
- pr_debug("calling nf_nat_mangle_tcp_packet\n");
-
- return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
- matchlen, buffer, strlen(buffer));
-}
-
-/* |1|132.235.1.2|6275| */
-static int
-mangle_eprt_packet(struct sk_buff *skb,
- __be32 newip,
- u_int16_t port,
- unsigned int matchoff,
- unsigned int matchlen,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo)
-{
- char buffer[sizeof("|1|255.255.255.255|65535|")];
-
- sprintf(buffer, "|1|%u.%u.%u.%u|%u|", NIPQUAD(newip), port);
-
- pr_debug("calling nf_nat_mangle_tcp_packet\n");
-
- return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
- matchlen, buffer, strlen(buffer));
-}
-
-/* |1|132.235.1.2|6275| */
-static int
-mangle_epsv_packet(struct sk_buff *skb,
- __be32 newip,
- u_int16_t port,
- unsigned int matchoff,
- unsigned int matchlen,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo)
-{
- char buffer[sizeof("|||65535|")];
-
- sprintf(buffer, "|||%u|", port);
-
- pr_debug("calling nf_nat_mangle_tcp_packet\n");
+ switch (type) {
+ case NF_CT_FTP_PORT:
+ case NF_CT_FTP_PASV:
+ return snprintf(buffer, buflen, "%u,%u,%u,%u,%u,%u",
+ ((unsigned char *)&addr)[0],
+ ((unsigned char *)&addr)[1],
+ ((unsigned char *)&addr)[2],
+ ((unsigned char *)&addr)[3],
+ port >> 8,
+ port & 0xFF);
+ case NF_CT_FTP_EPRT:
+ return snprintf(buffer, buflen, "|1|%pI4|%u|", &addr, port);
+ case NF_CT_FTP_EPSV:
+ return snprintf(buffer, buflen, "|||%u|", port);
+ }
- return nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
- matchlen, buffer, strlen(buffer));
+ return 0;
}
-static int (*mangle[])(struct sk_buff *, __be32, u_int16_t,
- unsigned int, unsigned int, struct nf_conn *,
- enum ip_conntrack_info)
-= {
- [NF_CT_FTP_PORT] = mangle_rfc959_packet,
- [NF_CT_FTP_PASV] = mangle_rfc959_packet,
- [NF_CT_FTP_EPRT] = mangle_eprt_packet,
- [NF_CT_FTP_EPSV] = mangle_epsv_packet
-};
-
/* So, this packet has hit the connection tracking matching code.
Mangle it, and change the expectation to match the new version. */
static unsigned int nf_nat_ftp(struct sk_buff *skb,
@@ -110,6 +63,8 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
u_int16_t port;
int dir = CTINFO2DIR(ctinfo);
struct nf_conn *ct = exp->master;
+ char buffer[sizeof("|1|255.255.255.255|65535|")];
+ unsigned int buflen;
pr_debug("FTP_NAT: type %i, off %u len %u\n", type, matchoff, matchlen);
@@ -132,11 +87,21 @@ static unsigned int nf_nat_ftp(struct sk_buff *skb,
if (port == 0)
return NF_DROP;
- if (!mangle[type](skb, newip, port, matchoff, matchlen, ct, ctinfo)) {
- nf_ct_unexpect_related(exp);
- return NF_DROP;
- }
+ buflen = nf_nat_ftp_fmt_cmd(type, buffer, sizeof(buffer), newip, port);
+ if (!buflen)
+ goto out;
+
+ pr_debug("calling nf_nat_mangle_tcp_packet\n");
+
+ if (!nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
+ matchlen, buffer, buflen))
+ goto out;
+
return NF_ACCEPT;
+
+out:
+ nf_ct_unexpect_related(exp);
+ return NF_DROP;
}
static void __exit nf_nat_ftp_fini(void)
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 7f10a6be019..4b6af4bb1f5 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -141,6 +141,17 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
return 1;
}
+void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ __be32 seq, s16 off)
+{
+ if (!off)
+ return;
+ set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
+ adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
+ nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
+}
+EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
+
/* Generic function for mangling variable-length address changes inside
* NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
* command in FTP).
@@ -149,14 +160,13 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
* skb enlargement, ...
*
* */
-int
-nf_nat_mangle_tcp_packet(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int match_offset,
- unsigned int match_len,
- const char *rep_buffer,
- unsigned int rep_len)
+int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int match_offset,
+ unsigned int match_len,
+ const char *rep_buffer,
+ unsigned int rep_len, bool adjust)
{
struct rtable *rt = skb_rtable(skb);
struct iphdr *iph;
@@ -202,16 +212,13 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb,
inet_proto_csum_replace2(&tcph->check, skb,
htons(oldlen), htons(datalen), 1);
- if (rep_len != match_len) {
- set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
- adjust_tcp_sequence(ntohl(tcph->seq),
- (int)rep_len - (int)match_len,
- ct, ctinfo);
- nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
- }
+ if (adjust && rep_len != match_len)
+ nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
+ (int)rep_len - (int)match_len);
+
return 1;
}
-EXPORT_SYMBOL(nf_nat_mangle_tcp_packet);
+EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
/* Generic function for mangling variable-length address changes inside
* NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 9eb171056c6..4c060038d29 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -25,6 +25,7 @@
#include <net/netfilter/nf_nat_rule.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
@@ -74,7 +75,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
pr_debug("trying to unexpect other dir: ");
nf_ct_dump_tuple_ip(&t);
- other_exp = nf_ct_expect_find_get(net, &t);
+ other_exp = nf_ct_expect_find_get(net, nf_ct_zone(ct), &t);
if (other_exp) {
nf_ct_unexpect_related(other_exp);
nf_ct_expect_put(other_exp);
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 9e81e0dfb4e..ab74cc0535e 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -28,36 +28,6 @@
(1 << NF_INET_POST_ROUTING) | \
(1 << NF_INET_LOCAL_OUT))
-static const struct
-{
- struct ipt_replace repl;
- struct ipt_standard entries[3];
- struct ipt_error term;
-} nat_initial_table __net_initdata = {
- .repl = {
- .name = "nat",
- .valid_hooks = NAT_VALID_HOOKS,
- .num_entries = 4,
- .size = sizeof(struct ipt_standard) * 3 + sizeof(struct ipt_error),
- .hook_entry = {
- [NF_INET_PRE_ROUTING] = 0,
- [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
- [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
- },
- .underflow = {
- [NF_INET_PRE_ROUTING] = 0,
- [NF_INET_POST_ROUTING] = sizeof(struct ipt_standard),
- [NF_INET_LOCAL_OUT] = sizeof(struct ipt_standard) * 2
- },
- },
- .entries = {
- IPT_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
- IPT_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
- IPT_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
- },
- .term = IPT_ERROR_INIT, /* ERROR */
-};
-
static const struct xt_table nat_table = {
.name = "nat",
.valid_hooks = NAT_VALID_HOOKS,
@@ -186,8 +156,13 @@ static struct xt_target ipt_dnat_reg __read_mostly = {
static int __net_init nf_nat_rule_net_init(struct net *net)
{
- net->ipv4.nat_table = ipt_register_table(net, &nat_table,
- &nat_initial_table.repl);
+ struct ipt_replace *repl;
+
+ repl = ipt_alloc_initial_table(&nat_table);
+ if (repl == NULL)
+ return -ENOMEM;
+ net->ipv4.nat_table = ipt_register_table(net, &nat_table, repl);
+ kfree(repl);
if (IS_ERR(net->ipv4.nat_table))
return PTR_ERR(net->ipv4.nat_table);
return 0;
@@ -195,7 +170,7 @@ static int __net_init nf_nat_rule_net_init(struct net *net)
static void __net_exit nf_nat_rule_net_exit(struct net *net)
{
- ipt_unregister_table(net->ipv4.nat_table);
+ ipt_unregister_table(net, net->ipv4.nat_table);
}
static struct pernet_operations nf_nat_rule_net_ops = {
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 07d61a57613..11b538deaae 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -1,4 +1,4 @@
-/* SIP extension for UDP NAT alteration.
+/* SIP extension for NAT alteration.
*
* (C) 2005 by Christian Hentschel <chentschel@arnet.com.ar>
* based on RR's ip_nat_ftp.c and other modules.
@@ -15,6 +15,7 @@
#include <linux/ip.h>
#include <net/ip.h>
#include <linux/udp.h>
+#include <linux/tcp.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
@@ -29,25 +30,42 @@ MODULE_DESCRIPTION("SIP NAT helper");
MODULE_ALIAS("ip_nat_sip");
-static unsigned int mangle_packet(struct sk_buff *skb,
+static unsigned int mangle_packet(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int matchoff, unsigned int matchlen,
const char *buffer, unsigned int buflen)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
-
- if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, matchoff, matchlen,
- buffer, buflen))
- return 0;
+ struct tcphdr *th;
+ unsigned int baseoff;
+
+ if (nf_ct_protonum(ct) == IPPROTO_TCP) {
+ th = (struct tcphdr *)(skb->data + ip_hdrlen(skb));
+ baseoff = ip_hdrlen(skb) + th->doff * 4;
+ matchoff += dataoff - baseoff;
+
+ if (!__nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
+ matchoff, matchlen,
+ buffer, buflen, false))
+ return 0;
+ } else {
+ baseoff = ip_hdrlen(skb) + sizeof(struct udphdr);
+ matchoff += dataoff - baseoff;
+
+ if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo,
+ matchoff, matchlen,
+ buffer, buflen))
+ return 0;
+ }
/* Reload data pointer and adjust datalen value */
- *dptr = skb->data + ip_hdrlen(skb) + sizeof(struct udphdr);
+ *dptr = skb->data + dataoff;
*datalen += buflen - matchlen;
return 1;
}
-static int map_addr(struct sk_buff *skb,
+static int map_addr(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int matchoff, unsigned int matchlen,
union nf_inet_addr *addr, __be16 port)
@@ -76,11 +94,11 @@ static int map_addr(struct sk_buff *skb,
buflen = sprintf(buffer, "%pI4:%u", &newaddr, ntohs(newport));
- return mangle_packet(skb, dptr, datalen, matchoff, matchlen,
+ return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
buffer, buflen);
}
-static int map_sip_addr(struct sk_buff *skb,
+static int map_sip_addr(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen,
enum sip_header_types type)
{
@@ -93,16 +111,18 @@ static int map_sip_addr(struct sk_buff *skb,
if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, type, NULL,
&matchoff, &matchlen, &addr, &port) <= 0)
return 1;
- return map_addr(skb, dptr, datalen, matchoff, matchlen, &addr, port);
+ return map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
+ &addr, port);
}
-static unsigned int ip_nat_sip(struct sk_buff *skb,
+static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
- unsigned int dataoff, matchoff, matchlen;
+ unsigned int coff, matchoff, matchlen;
+ enum sip_header_types hdr;
union nf_inet_addr addr;
__be16 port;
int request, in_header;
@@ -112,16 +132,21 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
if (ct_sip_parse_request(ct, *dptr, *datalen,
&matchoff, &matchlen,
&addr, &port) > 0 &&
- !map_addr(skb, dptr, datalen, matchoff, matchlen,
+ !map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
&addr, port))
return NF_DROP;
request = 1;
} else
request = 0;
+ if (nf_ct_protonum(ct) == IPPROTO_TCP)
+ hdr = SIP_HDR_VIA_TCP;
+ else
+ hdr = SIP_HDR_VIA_UDP;
+
/* Translate topmost Via header and parameters */
if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
- SIP_HDR_VIA, NULL, &matchoff, &matchlen,
+ hdr, NULL, &matchoff, &matchlen,
&addr, &port) > 0) {
unsigned int matchend, poff, plen, buflen, n;
char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
@@ -138,7 +163,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
goto next;
}
- if (!map_addr(skb, dptr, datalen, matchoff, matchlen,
+ if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
&addr, port))
return NF_DROP;
@@ -153,8 +178,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) {
buflen = sprintf(buffer, "%pI4",
&ct->tuplehash[!dir].tuple.dst.u3.ip);
- if (!mangle_packet(skb, dptr, datalen, poff, plen,
- buffer, buflen))
+ if (!mangle_packet(skb, dataoff, dptr, datalen,
+ poff, plen, buffer, buflen))
return NF_DROP;
}
@@ -167,8 +192,8 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) {
buflen = sprintf(buffer, "%pI4",
&ct->tuplehash[!dir].tuple.src.u3.ip);
- if (!mangle_packet(skb, dptr, datalen, poff, plen,
- buffer, buflen))
+ if (!mangle_packet(skb, dataoff, dptr, datalen,
+ poff, plen, buffer, buflen))
return NF_DROP;
}
@@ -181,31 +206,45 @@ static unsigned int ip_nat_sip(struct sk_buff *skb,
htons(n) != ct->tuplehash[!dir].tuple.src.u.udp.port) {
__be16 p = ct->tuplehash[!dir].tuple.src.u.udp.port;
buflen = sprintf(buffer, "%u", ntohs(p));
- if (!mangle_packet(skb, dptr, datalen, poff, plen,
- buffer, buflen))
+ if (!mangle_packet(skb, dataoff, dptr, datalen,
+ poff, plen, buffer, buflen))
return NF_DROP;
}
}
next:
/* Translate Contact headers */
- dataoff = 0;
+ coff = 0;
in_header = 0;
- while (ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen,
+ while (ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
SIP_HDR_CONTACT, &in_header,
&matchoff, &matchlen,
&addr, &port) > 0) {
- if (!map_addr(skb, dptr, datalen, matchoff, matchlen,
+ if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen,
&addr, port))
return NF_DROP;
}
- if (!map_sip_addr(skb, dptr, datalen, SIP_HDR_FROM) ||
- !map_sip_addr(skb, dptr, datalen, SIP_HDR_TO))
+ if (!map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_FROM) ||
+ !map_sip_addr(skb, dataoff, dptr, datalen, SIP_HDR_TO))
return NF_DROP;
+
return NF_ACCEPT;
}
+static void ip_nat_sip_seq_adjust(struct sk_buff *skb, s16 off)
+{
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+ const struct tcphdr *th;
+
+ if (nf_ct_protonum(ct) != IPPROTO_TCP || off == 0)
+ return;
+
+ th = (struct tcphdr *)(skb->data + ip_hdrlen(skb));
+ nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
+}
+
/* Handles expected signalling connections and media streams */
static void ip_nat_sip_expected(struct nf_conn *ct,
struct nf_conntrack_expect *exp)
@@ -232,7 +271,7 @@ static void ip_nat_sip_expected(struct nf_conn *ct,
}
}
-static unsigned int ip_nat_sip_expect(struct sk_buff *skb,
+static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen,
struct nf_conntrack_expect *exp,
unsigned int matchoff,
@@ -279,8 +318,8 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb,
if (exp->tuple.dst.u3.ip != exp->saved_ip ||
exp->tuple.dst.u.udp.port != exp->saved_proto.udp.port) {
buflen = sprintf(buffer, "%pI4:%u", &newip, port);
- if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen,
- buffer, buflen))
+ if (!mangle_packet(skb, dataoff, dptr, datalen,
+ matchoff, matchlen, buffer, buflen))
goto err;
}
return NF_ACCEPT;
@@ -290,7 +329,7 @@ err:
return NF_DROP;
}
-static int mangle_content_len(struct sk_buff *skb,
+static int mangle_content_len(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen)
{
enum ip_conntrack_info ctinfo;
@@ -312,12 +351,13 @@ static int mangle_content_len(struct sk_buff *skb,
return 0;
buflen = sprintf(buffer, "%u", c_len);
- return mangle_packet(skb, dptr, datalen, matchoff, matchlen,
+ return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
buffer, buflen);
}
-static int mangle_sdp_packet(struct sk_buff *skb, const char **dptr,
- unsigned int dataoff, unsigned int *datalen,
+static int mangle_sdp_packet(struct sk_buff *skb, unsigned int dataoff,
+ const char **dptr, unsigned int *datalen,
+ unsigned int sdpoff,
enum sdp_header_types type,
enum sdp_header_types term,
char *buffer, int buflen)
@@ -326,16 +366,16 @@ static int mangle_sdp_packet(struct sk_buff *skb, const char **dptr,
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
unsigned int matchlen, matchoff;
- if (ct_sip_get_sdp_header(ct, *dptr, dataoff, *datalen, type, term,
+ if (ct_sip_get_sdp_header(ct, *dptr, sdpoff, *datalen, type, term,
&matchoff, &matchlen) <= 0)
return -ENOENT;
- return mangle_packet(skb, dptr, datalen, matchoff, matchlen,
+ return mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
buffer, buflen) ? 0 : -EINVAL;
}
-static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr,
- unsigned int dataoff,
- unsigned int *datalen,
+static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, unsigned int dataoff,
+ const char **dptr, unsigned int *datalen,
+ unsigned int sdpoff,
enum sdp_header_types type,
enum sdp_header_types term,
const union nf_inet_addr *addr)
@@ -344,16 +384,15 @@ static unsigned int ip_nat_sdp_addr(struct sk_buff *skb, const char **dptr,
unsigned int buflen;
buflen = sprintf(buffer, "%pI4", &addr->ip);
- if (mangle_sdp_packet(skb, dptr, dataoff, datalen, type, term,
+ if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff, type, term,
buffer, buflen))
return 0;
- return mangle_content_len(skb, dptr, datalen);
+ return mangle_content_len(skb, dataoff, dptr, datalen);
}
-static unsigned int ip_nat_sdp_port(struct sk_buff *skb,
- const char **dptr,
- unsigned int *datalen,
+static unsigned int ip_nat_sdp_port(struct sk_buff *skb, unsigned int dataoff,
+ const char **dptr, unsigned int *datalen,
unsigned int matchoff,
unsigned int matchlen,
u_int16_t port)
@@ -362,16 +401,16 @@ static unsigned int ip_nat_sdp_port(struct sk_buff *skb,
unsigned int buflen;
buflen = sprintf(buffer, "%u", port);
- if (!mangle_packet(skb, dptr, datalen, matchoff, matchlen,
+ if (!mangle_packet(skb, dataoff, dptr, datalen, matchoff, matchlen,
buffer, buflen))
return 0;
- return mangle_content_len(skb, dptr, datalen);
+ return mangle_content_len(skb, dataoff, dptr, datalen);
}
-static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
- unsigned int dataoff,
- unsigned int *datalen,
+static unsigned int ip_nat_sdp_session(struct sk_buff *skb, unsigned int dataoff,
+ const char **dptr, unsigned int *datalen,
+ unsigned int sdpoff,
const union nf_inet_addr *addr)
{
char buffer[sizeof("nnn.nnn.nnn.nnn")];
@@ -379,12 +418,12 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
/* Mangle session description owner and contact addresses */
buflen = sprintf(buffer, "%pI4", &addr->ip);
- if (mangle_sdp_packet(skb, dptr, dataoff, datalen,
+ if (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff,
SDP_HDR_OWNER_IP4, SDP_HDR_MEDIA,
buffer, buflen))
return 0;
- switch (mangle_sdp_packet(skb, dptr, dataoff, datalen,
+ switch (mangle_sdp_packet(skb, dataoff, dptr, datalen, sdpoff,
SDP_HDR_CONNECTION_IP4, SDP_HDR_MEDIA,
buffer, buflen)) {
case 0:
@@ -401,14 +440,13 @@ static unsigned int ip_nat_sdp_session(struct sk_buff *skb, const char **dptr,
return 0;
}
- return mangle_content_len(skb, dptr, datalen);
+ return mangle_content_len(skb, dataoff, dptr, datalen);
}
/* So, this packet has hit the connection tracking matching code.
Mangle it, and change the expectation to match the new version. */
-static unsigned int ip_nat_sdp_media(struct sk_buff *skb,
- const char **dptr,
- unsigned int *datalen,
+static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
+ const char **dptr, unsigned int *datalen,
struct nf_conntrack_expect *rtp_exp,
struct nf_conntrack_expect *rtcp_exp,
unsigned int mediaoff,
@@ -456,7 +494,8 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb,
/* Update media port. */
if (rtp_exp->tuple.dst.u.udp.port != rtp_exp->saved_proto.udp.port &&
- !ip_nat_sdp_port(skb, dptr, datalen, mediaoff, medialen, port))
+ !ip_nat_sdp_port(skb, dataoff, dptr, datalen,
+ mediaoff, medialen, port))
goto err2;
return NF_ACCEPT;
@@ -471,6 +510,7 @@ err1:
static void __exit nf_nat_sip_fini(void)
{
rcu_assign_pointer(nf_nat_sip_hook, NULL);
+ rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, NULL);
rcu_assign_pointer(nf_nat_sip_expect_hook, NULL);
rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL);
rcu_assign_pointer(nf_nat_sdp_port_hook, NULL);
@@ -482,12 +522,14 @@ static void __exit nf_nat_sip_fini(void)
static int __init nf_nat_sip_init(void)
{
BUG_ON(nf_nat_sip_hook != NULL);
+ BUG_ON(nf_nat_sip_seq_adjust_hook != NULL);
BUG_ON(nf_nat_sip_expect_hook != NULL);
BUG_ON(nf_nat_sdp_addr_hook != NULL);
BUG_ON(nf_nat_sdp_port_hook != NULL);
BUG_ON(nf_nat_sdp_session_hook != NULL);
BUG_ON(nf_nat_sdp_media_hook != NULL);
rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip);
+ rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust);
rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect);
rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port);
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index d9521f6f9ed..0b9c7ce3d6c 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1038,7 +1038,7 @@ static int snmp_parse_mangle(unsigned char *msg,
unsigned int cls, con, tag, vers, pdutype;
struct asn1_ctx ctx;
struct asn1_octstr comm;
- struct snmp_object **obj;
+ struct snmp_object *obj;
if (debug > 1)
hex_dump(msg, len);
@@ -1148,43 +1148,34 @@ static int snmp_parse_mangle(unsigned char *msg,
if (cls != ASN1_UNI || con != ASN1_CON || tag != ASN1_SEQ)
return 0;
- obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
- if (obj == NULL) {
- if (net_ratelimit())
- printk(KERN_WARNING "OOM in bsalg(%d)\n", __LINE__);
- return 0;
- }
-
while (!asn1_eoc_decode(&ctx, eoc)) {
unsigned int i;
- if (!snmp_object_decode(&ctx, obj)) {
- if (*obj) {
- kfree((*obj)->id);
- kfree(*obj);
+ if (!snmp_object_decode(&ctx, &obj)) {
+ if (obj) {
+ kfree(obj->id);
+ kfree(obj);
}
- kfree(obj);
return 0;
}
if (debug > 1) {
printk(KERN_DEBUG "bsalg: object: ");
- for (i = 0; i < (*obj)->id_len; i++) {
+ for (i = 0; i < obj->id_len; i++) {
if (i > 0)
printk(".");
- printk("%lu", (*obj)->id[i]);
+ printk("%lu", obj->id[i]);
}
- printk(": type=%u\n", (*obj)->type);
+ printk(": type=%u\n", obj->type);
}
- if ((*obj)->type == SNMP_IPADDR)
+ if (obj->type == SNMP_IPADDR)
mangle_address(ctx.begin, ctx.pointer - 4 , map, check);
- kfree((*obj)->id);
- kfree(*obj);
+ kfree(obj->id);
+ kfree(obj);
}
- kfree(obj);
if (!asn1_eoc_decode(&ctx, eoc))
return 0;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index f25542c48b7..242ed230737 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -127,8 +127,8 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
SNMP_MIB_SENTINEL
};
-static struct {
- char *name;
+static const struct {
+ const char *name;
int index;
} icmpmibmap[] = {
{ "DestUnreachs", ICMP_DEST_UNREACH },
@@ -280,7 +280,7 @@ static void icmpmsg_put(struct seq_file *seq)
count = 0;
for (i = 0; i < ICMPMSG_MIB_MAX; i++) {
- val = snmp_fold_field((void **) net->mib.icmpmsg_statistics, i);
+ val = snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics, i);
if (val) {
type[count] = i;
vals[count++] = val;
@@ -307,18 +307,18 @@ static void icmp_put(struct seq_file *seq)
for (i=0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " Out%s", icmpmibmap[i].name);
seq_printf(seq, "\nIcmp: %lu %lu",
- snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_INMSGS),
- snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_INERRORS));
+ snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INMSGS),
+ snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_INERRORS));
for (i=0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void **) net->mib.icmpmsg_statistics,
+ snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
icmpmibmap[i].index));
seq_printf(seq, " %lu %lu",
- snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
- snmp_fold_field((void **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
+ snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTMSGS),
+ snmp_fold_field((void __percpu **) net->mib.icmp_statistics, ICMP_MIB_OUTERRORS));
for (i=0; icmpmibmap[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void **) net->mib.icmpmsg_statistics,
+ snmp_fold_field((void __percpu **) net->mib.icmpmsg_statistics,
icmpmibmap[i].index | 0x100));
}
@@ -341,7 +341,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void **)net->mib.ip_statistics,
+ snmp_fold_field((void __percpu **)net->mib.ip_statistics,
snmp4_ipstats_list[i].entry));
icmp_put(seq); /* RFC 2011 compatibility */
@@ -356,11 +356,11 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
/* MaxConn field is signed, RFC 2012 */
if (snmp4_tcp_list[i].entry == TCP_MIB_MAXCONN)
seq_printf(seq, " %ld",
- snmp_fold_field((void **)net->mib.tcp_statistics,
+ snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
snmp4_tcp_list[i].entry));
else
seq_printf(seq, " %lu",
- snmp_fold_field((void **)net->mib.tcp_statistics,
+ snmp_fold_field((void __percpu **)net->mib.tcp_statistics,
snmp4_tcp_list[i].entry));
}
@@ -371,7 +371,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nUdp:");
for (i = 0; snmp4_udp_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void **)net->mib.udp_statistics,
+ snmp_fold_field((void __percpu **)net->mib.udp_statistics,
snmp4_udp_list[i].entry));
/* the UDP and UDP-Lite MIBs are the same */
@@ -382,7 +382,7 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nUdpLite:");
for (i = 0; snmp4_udp_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void **)net->mib.udplite_statistics,
+ snmp_fold_field((void __percpu **)net->mib.udplite_statistics,
snmp4_udp_list[i].entry));
seq_putc(seq, '\n');
@@ -419,7 +419,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nTcpExt:");
for (i = 0; snmp4_net_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void **)net->mib.net_statistics,
+ snmp_fold_field((void __percpu **)net->mib.net_statistics,
snmp4_net_list[i].entry));
seq_puts(seq, "\nIpExt:");
@@ -429,7 +429,7 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nIpExt:");
for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
seq_printf(seq, " %lu",
- snmp_fold_field((void **)net->mib.ip_statistics,
+ snmp_fold_field((void __percpu **)net->mib.ip_statistics,
snmp4_ipextstats_list[i].entry));
seq_putc(seq, '\n');
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d62b05d3338..b2ba5581d2a 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -287,12 +287,12 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
if (!rt_hash_table[st->bucket].chain)
continue;
rcu_read_lock_bh();
- r = rcu_dereference(rt_hash_table[st->bucket].chain);
+ r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
while (r) {
if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
r->rt_genid == st->genid)
return r;
- r = rcu_dereference(r->u.dst.rt_next);
+ r = rcu_dereference_bh(r->u.dst.rt_next);
}
rcu_read_unlock_bh();
}
@@ -314,7 +314,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
rcu_read_lock_bh();
r = rt_hash_table[st->bucket].chain;
}
- return rcu_dereference(r);
+ return rcu_dereference_bh(r);
}
static struct rtable *rt_cache_get_next(struct seq_file *seq,
@@ -1990,8 +1990,13 @@ static int __mkroute_input(struct sk_buff *skb,
if (skb->protocol != htons(ETH_P_IP)) {
/* Not IP (i.e. ARP). Do not create route, if it is
* invalid for proxy arp. DNAT routes are always valid.
+ *
+ * Proxy arp feature have been extended to allow, ARP
+ * replies back to the same interface, to support
+ * Private VLAN switch technologies. See arp.c.
*/
- if (out_dev == in_dev) {
+ if (out_dev == in_dev &&
+ IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
err = -EINVAL;
goto cleanup;
}
@@ -2689,8 +2694,8 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
rcu_read_lock_bh();
- for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference(rth->u.dst.rt_next)) {
+ for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
+ rth = rcu_dereference_bh(rth->u.dst.rt_next)) {
if (rth->fl.fl4_dst == flp->fl4_dst &&
rth->fl.fl4_src == flp->fl4_src &&
rth->fl.iif == 0 &&
@@ -3008,8 +3013,8 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (!rt_hash_table[h].chain)
continue;
rcu_read_lock_bh();
- for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
- rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
+ for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
+ rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) {
if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
continue;
if (rt_is_expired(rt))
@@ -3329,7 +3334,7 @@ static __net_initdata struct pernet_operations rt_secret_timer_ops = {
#ifdef CONFIG_NET_CLS_ROUTE
-struct ip_rt_acct *ip_rt_acct __read_mostly;
+struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
#endif /* CONFIG_NET_CLS_ROUTE */
static __initdata unsigned long rhash_entries;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 66fd80ef247..5c24db4a3c9 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -358,7 +358,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
tcp_select_initial_window(tcp_full_space(sk), req->mss,
&req->rcv_wnd, &req->window_clamp,
- ireq->wscale_ok, &rcv_wscale);
+ ireq->wscale_ok, &rcv_wscale,
+ dst_metric(&rt->u.dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 7e3712ce399..c1bc074f61b 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -576,6 +576,20 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "tcp_thin_linear_timeouts",
+ .data = &sysctl_tcp_thin_linear_timeouts,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "tcp_thin_dupack",
+ .data = &sysctl_tcp_thin_dupack,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
.procname = "udp_mem",
.data = &sysctl_udp_mem,
.maxlen = sizeof(sysctl_udp_mem),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b0a26bb25e2..5901010fad5 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -536,8 +536,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
tp->nonagle &= ~TCP_NAGLE_PUSH;
}
-static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
- struct sk_buff *skb)
+static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
{
if (flags & MSG_OOB)
tp->snd_up = tp->write_seq;
@@ -546,13 +545,13 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
static inline void tcp_push(struct sock *sk, int flags, int mss_now,
int nonagle)
{
- struct tcp_sock *tp = tcp_sk(sk);
-
if (tcp_send_head(sk)) {
- struct sk_buff *skb = tcp_write_queue_tail(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
if (!(flags & MSG_MORE) || forced_push(tp))
- tcp_mark_push(tp, skb);
- tcp_mark_urg(tp, flags, skb);
+ tcp_mark_push(tp, tcp_write_queue_tail(sk));
+
+ tcp_mark_urg(tp, flags);
__tcp_push_pending_frames(sk, mss_now,
(flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
}
@@ -877,12 +876,12 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
#define TCP_OFF(sk) (sk->sk_sndmsg_off)
-static inline int select_size(struct sock *sk)
+static inline int select_size(struct sock *sk, int sg)
{
struct tcp_sock *tp = tcp_sk(sk);
int tmp = tp->mss_cache;
- if (sk->sk_route_caps & NETIF_F_SG) {
+ if (sg) {
if (sk_can_gso(sk))
tmp = 0;
else {
@@ -906,7 +905,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
struct sk_buff *skb;
int iovlen, flags;
int mss_now, size_goal;
- int err, copied;
+ int sg, err, copied;
long timeo;
lock_sock(sk);
@@ -934,6 +933,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto out_err;
+ sg = sk->sk_route_caps & NETIF_F_SG;
+
while (--iovlen >= 0) {
int seglen = iov->iov_len;
unsigned char __user *from = iov->iov_base;
@@ -959,8 +960,9 @@ new_segment:
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
- skb = sk_stream_alloc_skb(sk, select_size(sk),
- sk->sk_allocation);
+ skb = sk_stream_alloc_skb(sk,
+ select_size(sk, sg),
+ sk->sk_allocation);
if (!skb)
goto wait_for_memory;
@@ -997,9 +999,7 @@ new_segment:
/* We can extend the last page
* fragment. */
merge = 1;
- } else if (i == MAX_SKB_FRAGS ||
- (!i &&
- !(sk->sk_route_caps & NETIF_F_SG))) {
+ } else if (i == MAX_SKB_FRAGS || !sg) {
/* Need to add new fragment and cannot
* do this because interface is non-SG,
* or because all the page slots are
@@ -2229,6 +2229,20 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
}
break;
+ case TCP_THIN_LINEAR_TIMEOUTS:
+ if (val < 0 || val > 1)
+ err = -EINVAL;
+ else
+ tp->thin_lto = val;
+ break;
+
+ case TCP_THIN_DUPACK:
+ if (val < 0 || val > 1)
+ err = -EINVAL;
+ else
+ tp->thin_dupack = val;
+ break;
+
case TCP_CORK:
/* When set indicates to always queue non-full frames.
* Later the user clears this option and we transmit
@@ -2788,10 +2802,10 @@ EXPORT_SYMBOL(tcp_gro_complete);
#ifdef CONFIG_TCP_MD5SIG
static unsigned long tcp_md5sig_users;
-static struct tcp_md5sig_pool **tcp_md5sig_pool;
+static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool;
static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
-static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
+static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
{
int cpu;
for_each_possible_cpu(cpu) {
@@ -2808,7 +2822,7 @@ static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
void tcp_free_md5sig_pool(void)
{
- struct tcp_md5sig_pool **pool = NULL;
+ struct tcp_md5sig_pool * __percpu *pool = NULL;
spin_lock_bh(&tcp_md5sig_pool_lock);
if (--tcp_md5sig_users == 0) {
@@ -2822,10 +2836,11 @@ void tcp_free_md5sig_pool(void)
EXPORT_SYMBOL(tcp_free_md5sig_pool);
-static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk)
+static struct tcp_md5sig_pool * __percpu *
+__tcp_alloc_md5sig_pool(struct sock *sk)
{
int cpu;
- struct tcp_md5sig_pool **pool;
+ struct tcp_md5sig_pool * __percpu *pool;
pool = alloc_percpu(struct tcp_md5sig_pool *);
if (!pool)
@@ -2852,9 +2867,9 @@ out_free:
return NULL;
}
-struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk)
+struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
{
- struct tcp_md5sig_pool **pool;
+ struct tcp_md5sig_pool * __percpu *pool;
int alloc = 0;
retry:
@@ -2873,7 +2888,9 @@ retry:
if (alloc) {
/* we cannot hold spinlock here because this may sleep. */
- struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk);
+ struct tcp_md5sig_pool * __percpu *p;
+
+ p = __tcp_alloc_md5sig_pool(sk);
spin_lock_bh(&tcp_md5sig_pool_lock);
if (!p) {
tcp_md5sig_users--;
@@ -2897,7 +2914,7 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
{
- struct tcp_md5sig_pool **p;
+ struct tcp_md5sig_pool * __percpu *p;
spin_lock_bh(&tcp_md5sig_pool_lock);
p = tcp_md5sig_pool;
if (p)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 28e02963249..788851ca8c5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -89,6 +89,8 @@ int sysctl_tcp_frto __read_mostly = 2;
int sysctl_tcp_frto_response __read_mostly;
int sysctl_tcp_nometrics_save __read_mostly;
+int sysctl_tcp_thin_dupack __read_mostly;
+
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
int sysctl_tcp_abc __read_mostly;
@@ -2447,6 +2449,16 @@ static int tcp_time_to_recover(struct sock *sk)
return 1;
}
+ /* If a thin stream is detected, retransmit after first
+ * received dupack. Employ only if SACK is supported in order
+ * to avoid possible corner-case series of spurious retransmissions
+ * Use only if there are no unsent data.
+ */
+ if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
+ tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
+ tcp_is_sack(tp) && !tcp_send_head(sk))
+ return 1;
+
return 0;
}
@@ -5783,11 +5795,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
/* tcp_ack considers this ACK as duplicate
* and does not calculate rtt.
- * Fix it at least with timestamps.
+ * Force it here.
*/
- if (tp->rx_opt.saw_tstamp &&
- tp->rx_opt.rcv_tsecr && !tp->srtt)
- tcp_ack_saw_tstamp(sk, 0);
+ tcp_ack_update_rtt(sk, 0, 0);
if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 65b8ebfd078..c3588b4fd97 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -742,9 +742,9 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
* This still operates on a request_sock only, not on a big
* socket.
*/
-static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
- struct request_sock *req,
- struct request_values *rvp)
+static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
+ struct request_sock *req,
+ struct request_values *rvp)
{
const struct inet_request_sock *ireq = inet_rsk(req);
int err = -1;
@@ -775,10 +775,11 @@ static int __tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
return err;
}
-static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
+static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
struct request_values *rvp)
{
- return __tcp_v4_send_synack(sk, NULL, req, rvp);
+ TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
+ return tcp_v4_send_synack(sk, NULL, req, rvp);
}
/*
@@ -1192,10 +1193,11 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.family = PF_INET,
.obj_size = sizeof(struct tcp_request_sock),
- .rtx_syn_ack = tcp_v4_send_synack,
+ .rtx_syn_ack = tcp_v4_rtx_synack,
.send_ack = tcp_v4_reqsk_send_ack,
.destructor = tcp_v4_reqsk_destructor,
.send_reset = tcp_v4_send_reset,
+ .syn_ack_timeout = tcp_syn_ack_timeout,
};
#ifdef CONFIG_TCP_MD5SIG
@@ -1373,8 +1375,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
}
tcp_rsk(req)->snt_isn = isn;
- if (__tcp_v4_send_synack(sk, dst, req,
- (struct request_values *)&tmp_ext) ||
+ if (tcp_v4_send_synack(sk, dst, req,
+ (struct request_values *)&tmp_ext) ||
want_cookie)
goto drop_and_free;
@@ -1649,6 +1651,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (!sk)
goto no_tcp_socket;
+ if (iph->ttl < inet_sk(sk)->min_ttl)
+ goto discard_and_relse;
+
process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
@@ -2425,12 +2430,12 @@ static struct tcp_seq_afinfo tcp4_seq_afinfo = {
},
};
-static int tcp4_proc_init_net(struct net *net)
+static int __net_init tcp4_proc_init_net(struct net *net)
{
return tcp_proc_register(net, &tcp4_seq_afinfo);
}
-static void tcp4_proc_exit_net(struct net *net)
+static void __net_exit tcp4_proc_exit_net(struct net *net)
{
tcp_proc_unregister(net, &tcp4_seq_afinfo);
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 383ce237640..4a1605d3f90 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -183,7 +183,8 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
*/
void tcp_select_initial_window(int __space, __u32 mss,
__u32 *rcv_wnd, __u32 *window_clamp,
- int wscale_ok, __u8 *rcv_wscale)
+ int wscale_ok, __u8 *rcv_wscale,
+ __u32 init_rcv_wnd)
{
unsigned int space = (__space < 0 ? 0 : __space);
@@ -232,7 +233,13 @@ void tcp_select_initial_window(int __space, __u32 mss,
init_cwnd = 2;
else if (mss > 1460)
init_cwnd = 3;
- if (*rcv_wnd > init_cwnd * mss)
+ /* when initializing use the value from init_rcv_wnd
+ * rather than the default from above
+ */
+ if (init_rcv_wnd &&
+ (*rcv_wnd > init_rcv_wnd * mss))
+ *rcv_wnd = init_rcv_wnd * mss;
+ else if (*rcv_wnd > init_cwnd * mss)
*rcv_wnd = init_cwnd * mss;
}
@@ -1794,11 +1801,6 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle)
{
- struct sk_buff *skb = tcp_send_head(sk);
-
- if (!skb)
- return;
-
/* If we are closed, the bytes will have to remain here.
* In time closedown will finish, we empty the write queue and
* all will be happy.
@@ -2422,7 +2424,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
&req->rcv_wnd,
&req->window_clamp,
ireq->wscale_ok,
- &rcv_wscale);
+ &rcv_wscale,
+ dst_metric(dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
}
@@ -2549,7 +2552,8 @@ static void tcp_connect_init(struct sock *sk)
&tp->rcv_wnd,
&tp->window_clamp,
sysctl_tcp_window_scaling,
- &rcv_wscale);
+ &rcv_wscale,
+ dst_metric(dst, RTAX_INITRWND));
tp->rx_opt.rcv_wscale = rcv_wscale;
tp->rcv_ssthresh = tp->rcv_wnd;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8816a20c259..a17629b8912 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -29,6 +29,7 @@ int sysctl_tcp_keepalive_intvl __read_mostly = TCP_KEEPALIVE_INTVL;
int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
int sysctl_tcp_orphan_retries __read_mostly;
+int sysctl_tcp_thin_linear_timeouts __read_mostly;
static void tcp_write_timer(unsigned long);
static void tcp_delack_timer(unsigned long);
@@ -415,7 +416,25 @@ void tcp_retransmit_timer(struct sock *sk)
icsk->icsk_retransmits++;
out_reset_timer:
- icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
+ /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
+ * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
+ * might be increased if the stream oscillates between thin and thick,
+ * thus the old value might already be too high compared to the value
+ * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
+ * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
+ * exponential backoff behaviour to avoid continue hammering
+ * linear-timeout retransmissions into a black hole
+ */
+ if (sk->sk_state == TCP_ESTABLISHED &&
+ (tp->thin_lto || sysctl_tcp_thin_linear_timeouts) &&
+ tcp_stream_is_thin(tp) &&
+ icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
+ icsk->icsk_backoff = 0;
+ icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
+ } else {
+ /* Use normal (exponential) backoff */
+ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
+ }
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
__sk_dst_reset(sk);
@@ -474,6 +493,12 @@ static void tcp_synack_timer(struct sock *sk)
TCP_TIMEOUT_INIT, TCP_RTO_MAX);
}
+void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
+{
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
+}
+EXPORT_SYMBOL(tcp_syn_ack_timeout);
+
void tcp_set_keepalive(struct sock *sk, int val)
{
if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f0126fdd7e0..608a5446d05 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1117,7 +1117,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
- unsigned int ulen, copied;
+ unsigned int ulen;
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
@@ -1138,10 +1138,9 @@ try_again:
goto out;
ulen = skb->len - sizeof(struct udphdr);
- copied = len;
- if (copied > ulen)
- copied = ulen;
- else if (copied < ulen)
+ if (len > ulen)
+ len = ulen;
+ else if (len < ulen)
msg->msg_flags |= MSG_TRUNC;
/*
@@ -1150,14 +1149,14 @@ try_again:
* coverage checksum (UDP-Lite), do it before the copy.
*/
- if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
+ if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov, copied);
+ msg->msg_iov, len);
else {
err = skb_copy_and_csum_datagram_iovec(skb,
sizeof(struct udphdr),
@@ -1186,7 +1185,7 @@ try_again:
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
- err = copied;
+ err = len;
if (flags & MSG_TRUNC)
err = ulen;
@@ -2027,12 +2026,12 @@ static struct udp_seq_afinfo udp4_seq_afinfo = {
},
};
-static int udp4_proc_init_net(struct net *net)
+static int __net_init udp4_proc_init_net(struct net *net)
{
return udp_proc_register(net, &udp4_seq_afinfo);
}
-static void udp4_proc_exit_net(struct net *net)
+static void __net_exit udp4_proc_exit_net(struct net *net)
{
udp_proc_unregister(net, &udp4_seq_afinfo);
}
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 66f79513f4a..6610bf76369 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -81,12 +81,12 @@ static struct udp_seq_afinfo udplite4_seq_afinfo = {
},
};
-static int udplite4_proc_init_net(struct net *net)
+static int __net_init udplite4_proc_init_net(struct net *net)
{
return udp_proc_register(net, &udplite4_seq_afinfo);
}
-static void udplite4_proc_exit_net(struct net *net)
+static void __net_exit udplite4_proc_exit_net(struct net *net)
{
udp_proc_unregister(net, &udplite4_seq_afinfo);
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index de7a194a64a..88fd8c5877e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -278,31 +278,31 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
static int snmp6_alloc_dev(struct inet6_dev *idev)
{
- if (snmp_mib_init((void **)idev->stats.ipv6,
+ if (snmp_mib_init((void __percpu **)idev->stats.ipv6,
sizeof(struct ipstats_mib)) < 0)
goto err_ip;
- if (snmp_mib_init((void **)idev->stats.icmpv6,
+ if (snmp_mib_init((void __percpu **)idev->stats.icmpv6,
sizeof(struct icmpv6_mib)) < 0)
goto err_icmp;
- if (snmp_mib_init((void **)idev->stats.icmpv6msg,
+ if (snmp_mib_init((void __percpu **)idev->stats.icmpv6msg,
sizeof(struct icmpv6msg_mib)) < 0)
goto err_icmpmsg;
return 0;
err_icmpmsg:
- snmp_mib_free((void **)idev->stats.icmpv6);
+ snmp_mib_free((void __percpu **)idev->stats.icmpv6);
err_icmp:
- snmp_mib_free((void **)idev->stats.ipv6);
+ snmp_mib_free((void __percpu **)idev->stats.ipv6);
err_ip:
return -ENOMEM;
}
static void snmp6_free_dev(struct inet6_dev *idev)
{
- snmp_mib_free((void **)idev->stats.icmpv6msg);
- snmp_mib_free((void **)idev->stats.icmpv6);
- snmp_mib_free((void **)idev->stats.ipv6);
+ snmp_mib_free((void __percpu **)idev->stats.icmpv6msg);
+ snmp_mib_free((void __percpu **)idev->stats.icmpv6);
+ snmp_mib_free((void __percpu **)idev->stats.ipv6);
}
/* Nobody refers to this device, we may destroy it. */
@@ -502,8 +502,11 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
if (p == &net->ipv6.devconf_dflt->forwarding)
return 0;
- if (!rtnl_trylock())
+ if (!rtnl_trylock()) {
+ /* Restore the original values before restarting */
+ *p = old;
return restart_syscall();
+ }
if (p == &net->ipv6.devconf_all->forwarding) {
__s32 newf = net->ipv6.devconf_all->forwarding;
@@ -989,8 +992,7 @@ struct ipv6_saddr_dst {
static inline int ipv6_saddr_preferred(int type)
{
- if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|
- IPV6_ADDR_LOOPBACK|IPV6_ADDR_RESERVED))
+ if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
return 1;
return 0;
}
@@ -2646,7 +2648,8 @@ static int addrconf_ifdown(struct net_device *dev, int how)
write_lock_bh(&addrconf_hash_lock);
while ((ifa = *bifa) != NULL) {
- if (ifa->idev == idev) {
+ if (ifa->idev == idev &&
+ (how || !(ifa->flags&IFA_F_PERMANENT))) {
*bifa = ifa->lst_next;
ifa->lst_next = NULL;
addrconf_del_timer(ifa);
@@ -2686,18 +2689,30 @@ static int addrconf_ifdown(struct net_device *dev, int how)
write_lock_bh(&idev->lock);
}
#endif
- while ((ifa = idev->addr_list) != NULL) {
- idev->addr_list = ifa->if_next;
- ifa->if_next = NULL;
- ifa->dead = 1;
- addrconf_del_timer(ifa);
- write_unlock_bh(&idev->lock);
+ bifa = &idev->addr_list;
+ while ((ifa = *bifa) != NULL) {
+ if (how == 0 && (ifa->flags&IFA_F_PERMANENT)) {
+ /* Retain permanent address on admin down */
+ bifa = &ifa->if_next;
+
+ /* Restart DAD if needed when link comes back up */
+ if ( !((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
+ idev->cnf.accept_dad <= 0 ||
+ (ifa->flags & IFA_F_NODAD)))
+ ifa->flags |= IFA_F_TENTATIVE;
+ } else {
+ *bifa = ifa->if_next;
+ ifa->if_next = NULL;
- __ipv6_ifa_notify(RTM_DELADDR, ifa);
- atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
- in6_ifa_put(ifa);
+ ifa->dead = 1;
+ write_unlock_bh(&idev->lock);
- write_lock_bh(&idev->lock);
+ __ipv6_ifa_notify(RTM_DELADDR, ifa);
+ atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
+ in6_ifa_put(ifa);
+
+ write_lock_bh(&idev->lock);
+ }
}
write_unlock_bh(&idev->lock);
@@ -2789,14 +2804,14 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
read_lock_bh(&idev->lock);
if (ifp->dead)
goto out;
- spin_lock_bh(&ifp->lock);
+ spin_lock(&ifp->lock);
if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
idev->cnf.accept_dad < 1 ||
!(ifp->flags&IFA_F_TENTATIVE) ||
ifp->flags & IFA_F_NODAD) {
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
- spin_unlock_bh(&ifp->lock);
+ spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
addrconf_dad_completed(ifp);
@@ -2804,7 +2819,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
}
if (!(idev->if_flags & IF_READY)) {
- spin_unlock_bh(&ifp->lock);
+ spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
/*
* If the device is not ready:
@@ -2824,7 +2839,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
ip6_ins_rt(ifp->rt);
addrconf_dad_kick(ifp);
- spin_unlock_bh(&ifp->lock);
+ spin_unlock(&ifp->lock);
out:
read_unlock_bh(&idev->lock);
}
@@ -2840,14 +2855,15 @@ static void addrconf_dad_timer(unsigned long data)
read_unlock_bh(&idev->lock);
goto out;
}
- spin_lock_bh(&ifp->lock);
+
+ spin_lock(&ifp->lock);
if (ifp->probes == 0) {
/*
* DAD was successful
*/
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
- spin_unlock_bh(&ifp->lock);
+ spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
addrconf_dad_completed(ifp);
@@ -2857,7 +2873,7 @@ static void addrconf_dad_timer(unsigned long data)
ifp->probes--;
addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time);
- spin_unlock_bh(&ifp->lock);
+ spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
/* send a neighbour solicitation for our addr */
@@ -2905,12 +2921,12 @@ static void addrconf_dad_run(struct inet6_dev *idev) {
read_lock_bh(&idev->lock);
for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) {
- spin_lock_bh(&ifp->lock);
+ spin_lock(&ifp->lock);
if (!(ifp->flags & IFA_F_TENTATIVE)) {
- spin_unlock_bh(&ifp->lock);
+ spin_unlock(&ifp->lock);
continue;
}
- spin_unlock_bh(&ifp->lock);
+ spin_unlock(&ifp->lock);
addrconf_dad_kick(ifp);
}
read_unlock_bh(&idev->lock);
@@ -3027,14 +3043,14 @@ static const struct file_operations if6_fops = {
.release = seq_release_net,
};
-static int if6_proc_net_init(struct net *net)
+static int __net_init if6_proc_net_init(struct net *net)
{
if (!proc_net_fops_create(net, "if_inet6", S_IRUGO, &if6_fops))
return -ENOMEM;
return 0;
}
-static void if6_proc_net_exit(struct net *net)
+static void __net_exit if6_proc_net_exit(struct net *net)
{
proc_net_remove(net, "if_inet6");
}
@@ -3752,8 +3768,8 @@ static inline size_t inet6_if_nlmsg_size(void)
);
}
-static inline void __snmp6_fill_stats(u64 *stats, void **mib, int items,
- int bytes)
+static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
+ int items, int bytes)
{
int i;
int pad = bytes - sizeof(u64) * items;
@@ -3772,10 +3788,10 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
{
switch(attrtype) {
case IFLA_INET6_STATS:
- __snmp6_fill_stats(stats, (void **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes);
+ __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes);
break;
case IFLA_INET6_ICMP6STATS:
- __snmp6_fill_stats(stats, (void **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes);
+ __snmp6_fill_stats(stats, (void __percpu **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes);
break;
}
}
@@ -4028,12 +4044,15 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
{
int *valp = ctl->data;
int val = *valp;
+ loff_t pos = *ppos;
int ret;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write)
ret = addrconf_fixup_forwarding(ctl, valp, val);
+ if (ret)
+ *ppos = pos;
return ret;
}
@@ -4075,8 +4094,11 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
if (p == &net->ipv6.devconf_dflt->disable_ipv6)
return 0;
- if (!rtnl_trylock())
+ if (!rtnl_trylock()) {
+ /* Restore the original values before restarting */
+ *p = old;
return restart_syscall();
+ }
if (p == &net->ipv6.devconf_all->disable_ipv6) {
__s32 newf = net->ipv6.devconf_all->disable_ipv6;
@@ -4095,12 +4117,15 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
{
int *valp = ctl->data;
int val = *valp;
+ loff_t pos = *ppos;
int ret;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write)
ret = addrconf_disable_ipv6(ctl, valp, val);
+ if (ret)
+ *ppos = pos;
return ret;
}
@@ -4402,8 +4427,7 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
static void addrconf_sysctl_register(struct inet6_dev *idev)
{
- neigh_sysctl_register(idev->dev, idev->nd_parms, NET_IPV6,
- NET_IPV6_NEIGH, "ipv6",
+ neigh_sysctl_register(idev->dev, idev->nd_parms, "ipv6",
&ndisc_ifinfo_sysctl_change);
__addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
idev, &idev->cnf);
@@ -4418,7 +4442,7 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
#endif
-static int addrconf_init_net(struct net *net)
+static int __net_init addrconf_init_net(struct net *net)
{
int err;
struct ipv6_devconf *all, *dflt;
@@ -4467,7 +4491,7 @@ err_alloc_all:
return err;
}
-static void addrconf_exit_net(struct net *net)
+static void __net_exit addrconf_exit_net(struct net *net)
{
#ifdef CONFIG_SYSCTL
__addrconf_sysctl_unregister(net->ipv6.devconf_dflt);
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 3f82e9542ed..6b03826552e 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -72,7 +72,7 @@ int __ipv6_addr_type(const struct in6_addr *addr)
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.3 */
}
- return (IPV6_ADDR_RESERVED |
+ return (IPV6_ADDR_UNICAST |
IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_GLOBAL)); /* addr-select 3.4 */
}
EXPORT_SYMBOL(__ipv6_addr_type);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 12e69d364dd..37d14e735c2 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -971,41 +971,41 @@ static void ipv6_packet_cleanup(void)
static int __net_init ipv6_init_mibs(struct net *net)
{
- if (snmp_mib_init((void **)net->mib.udp_stats_in6,
+ if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6,
sizeof (struct udp_mib)) < 0)
return -ENOMEM;
- if (snmp_mib_init((void **)net->mib.udplite_stats_in6,
+ if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6,
sizeof (struct udp_mib)) < 0)
goto err_udplite_mib;
- if (snmp_mib_init((void **)net->mib.ipv6_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics,
sizeof(struct ipstats_mib)) < 0)
goto err_ip_mib;
- if (snmp_mib_init((void **)net->mib.icmpv6_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics,
sizeof(struct icmpv6_mib)) < 0)
goto err_icmp_mib;
- if (snmp_mib_init((void **)net->mib.icmpv6msg_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics,
sizeof(struct icmpv6msg_mib)) < 0)
goto err_icmpmsg_mib;
return 0;
err_icmpmsg_mib:
- snmp_mib_free((void **)net->mib.icmpv6_statistics);
+ snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
err_icmp_mib:
- snmp_mib_free((void **)net->mib.ipv6_statistics);
+ snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
err_ip_mib:
- snmp_mib_free((void **)net->mib.udplite_stats_in6);
+ snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
err_udplite_mib:
- snmp_mib_free((void **)net->mib.udp_stats_in6);
+ snmp_mib_free((void __percpu **)net->mib.udp_stats_in6);
return -ENOMEM;
}
-static void __net_exit ipv6_cleanup_mibs(struct net *net)
+static void ipv6_cleanup_mibs(struct net *net)
{
- snmp_mib_free((void **)net->mib.udp_stats_in6);
- snmp_mib_free((void **)net->mib.udplite_stats_in6);
- snmp_mib_free((void **)net->mib.ipv6_statistics);
- snmp_mib_free((void **)net->mib.icmpv6_statistics);
- snmp_mib_free((void **)net->mib.icmpv6msg_statistics);
+ snmp_mib_free((void __percpu **)net->mib.udp_stats_in6);
+ snmp_mib_free((void __percpu **)net->mib.udplite_stats_in6);
+ snmp_mib_free((void __percpu **)net->mib.ipv6_statistics);
+ snmp_mib_free((void __percpu **)net->mib.icmpv6_statistics);
+ snmp_mib_free((void __percpu **)net->mib.icmpv6msg_statistics);
}
static int __net_init inet6_net_init(struct net *net)
@@ -1042,7 +1042,7 @@ out:
#endif
}
-static void inet6_net_exit(struct net *net)
+static void __net_exit inet6_net_exit(struct net *net)
{
#ifdef CONFIG_PROC_FS
udp6_proc_exit(net);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index c2f300c314b..5ac89025f9d 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -614,7 +614,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
type != ICMPV6_PKT_TOOBIG)
return;
- x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
+ x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
if (!x)
return;
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index f1c74c8ef9d..c4f6ca32fa7 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -538,7 +538,7 @@ static const struct file_operations ac6_seq_fops = {
.release = seq_release_net,
};
-int ac6_proc_init(struct net *net)
+int __net_init ac6_proc_init(struct net *net)
{
if (!proc_net_fops_create(net, "anycast6", S_IRUGO, &ac6_seq_fops))
return -ENOMEM;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 668a46b655e..ee9b93bdd6a 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -365,7 +365,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
type != ICMPV6_PKT_TOOBIG)
return;
- x = xfrm_state_lookup(net, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6);
+ x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6);
if (!x)
return;
printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n",
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 4bac362b133..074f2c084f9 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -481,7 +481,7 @@ looped_back:
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS);
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
- 0, skb->dev);
+ 0);
kfree_skb(skb);
return -1;
}
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index b7aa7c64cc4..551882b9dfd 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -262,7 +262,7 @@ static struct fib_rules_ops fib6_rules_ops_template = {
.fro_net = &init_net,
};
-static int fib6_rules_net_init(struct net *net)
+static int __net_init fib6_rules_net_init(struct net *net)
{
struct fib_rules_ops *ops;
int err = -ENOMEM;
@@ -291,7 +291,7 @@ out_fib6_rules_ops:
goto out;
}
-static void fib6_rules_net_exit(struct net *net)
+static void __net_exit fib6_rules_net_exit(struct net *net)
{
fib_rules_unregister(net->ipv6.fib6_rules_ops);
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 4ae661bc367..eb9abe24bdf 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -67,11 +67,6 @@
#include <asm/uaccess.h>
#include <asm/system.h>
-DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
-EXPORT_SYMBOL(icmpv6_statistics);
-DEFINE_SNMP_STAT(struct icmpv6msg_mib, icmpv6msg_statistics) __read_mostly;
-EXPORT_SYMBOL(icmpv6msg_statistics);
-
/*
* The ICMP socket(s). This is the most convenient way to flow control
* our ICMP output as well as maintain a clean interface throughout
@@ -119,7 +114,7 @@ static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
*/
void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
{
- icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
+ icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
kfree_skb(skb);
}
@@ -305,8 +300,7 @@ static inline void mip6_addr_swap(struct sk_buff *skb) {}
/*
* Send an ICMP message in response to a packet in error
*/
-void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
- struct net_device *dev)
+void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
{
struct net *net = dev_net(skb->dev);
struct inet6_dev *idev = NULL;
@@ -951,7 +945,7 @@ ctl_table ipv6_icmp_table_template[] = {
{ },
};
-struct ctl_table *ipv6_icmp_sysctl_init(struct net *net)
+struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
{
struct ctl_table *table;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 0e93ca56eb6..2f9847924fa 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -93,29 +93,20 @@ static __u32 rt_sernum;
static void fib6_gc_timer_cb(unsigned long arg);
-static struct fib6_walker_t fib6_walker_list = {
- .prev = &fib6_walker_list,
- .next = &fib6_walker_list,
-};
-
-#define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next)
+static LIST_HEAD(fib6_walkers);
+#define FOR_WALKERS(w) list_for_each_entry(w, &fib6_walkers, lh)
static inline void fib6_walker_link(struct fib6_walker_t *w)
{
write_lock_bh(&fib6_walker_lock);
- w->next = fib6_walker_list.next;
- w->prev = &fib6_walker_list;
- w->next->prev = w;
- w->prev->next = w;
+ list_add(&w->lh, &fib6_walkers);
write_unlock_bh(&fib6_walker_lock);
}
static inline void fib6_walker_unlink(struct fib6_walker_t *w)
{
write_lock_bh(&fib6_walker_lock);
- w->next->prev = w->prev;
- w->prev->next = w->next;
- w->prev = w->next = w;
+ list_del(&w->lh);
write_unlock_bh(&fib6_walker_lock);
}
static __inline__ u32 fib6_new_sernum(void)
@@ -239,7 +230,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
return NULL;
}
-static void fib6_tables_init(struct net *net)
+static void __net_init fib6_tables_init(struct net *net)
{
fib6_link_table(net, net->ipv6.fib6_main_tbl);
fib6_link_table(net, net->ipv6.fib6_local_tbl);
@@ -262,7 +253,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl, flags);
}
-static void fib6_tables_init(struct net *net)
+static void __net_init fib6_tables_init(struct net *net)
{
fib6_link_table(net, net->ipv6.fib6_main_tbl);
}
@@ -319,12 +310,26 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
w->root = &table->tb6_root;
if (cb->args[4] == 0) {
+ w->count = 0;
+ w->skip = 0;
+
read_lock_bh(&table->tb6_lock);
res = fib6_walk(w);
read_unlock_bh(&table->tb6_lock);
- if (res > 0)
+ if (res > 0) {
cb->args[4] = 1;
+ cb->args[5] = w->root->fn_sernum;
+ }
} else {
+ if (cb->args[5] != w->root->fn_sernum) {
+ /* Begin at the root if the tree changed */
+ cb->args[5] = w->root->fn_sernum;
+ w->state = FWS_INIT;
+ w->node = w->root;
+ w->skip = w->count;
+ } else
+ w->skip = 0;
+
read_lock_bh(&table->tb6_lock);
res = fib6_walk_continue(w);
read_unlock_bh(&table->tb6_lock);
@@ -1250,9 +1255,18 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
w->leaf = fn->leaf;
case FWS_C:
if (w->leaf && fn->fn_flags&RTN_RTINFO) {
- int err = w->func(w);
+ int err;
+
+ if (w->count < w->skip) {
+ w->count++;
+ continue;
+ }
+
+ err = w->func(w);
if (err)
return err;
+
+ w->count++;
continue;
}
w->state = FWS_U;
@@ -1346,6 +1360,8 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
c.w.root = root;
c.w.func = fib6_clean_node;
c.w.prune = prune;
+ c.w.count = 0;
+ c.w.skip = 0;
c.func = func;
c.arg = arg;
c.net = net;
@@ -1469,7 +1485,7 @@ static void fib6_gc_timer_cb(unsigned long arg)
fib6_run_gc(0, (struct net *)arg);
}
-static int fib6_net_init(struct net *net)
+static int __net_init fib6_net_init(struct net *net)
{
setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 6e7bffa2205..e41eba8aacf 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -154,7 +154,7 @@ static void ip6_fl_gc(unsigned long dummy)
write_unlock(&ip6_fl_lock);
}
-static void ip6_fl_purge(struct net *net)
+static void __net_exit ip6_fl_purge(struct net *net)
{
int i;
@@ -735,7 +735,7 @@ static const struct file_operations ip6fl_seq_fops = {
.release = seq_release_net,
};
-static int ip6_flowlabel_proc_init(struct net *net)
+static int __net_init ip6_flowlabel_proc_init(struct net *net)
{
if (!proc_net_fops_create(net, "ip6_flowlabel",
S_IRUGO, &ip6fl_seq_fops))
@@ -743,7 +743,7 @@ static int ip6_flowlabel_proc_init(struct net *net)
return 0;
}
-static void ip6_flowlabel_proc_fini(struct net *net)
+static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
{
proc_net_remove(net, "ip6_flowlabel");
}
@@ -754,11 +754,10 @@ static inline int ip6_flowlabel_proc_init(struct net *net)
}
static inline void ip6_flowlabel_proc_fini(struct net *net)
{
- return ;
}
#endif
-static inline void ip6_flowlabel_net_exit(struct net *net)
+static void __net_exit ip6_flowlabel_net_exit(struct net *net)
{
ip6_fl_purge(net);
ip6_flowlabel_proc_fini(net);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 237e2dba6e9..e28f9203dec 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -216,8 +216,7 @@ resubmit:
IP6_INC_STATS_BH(net, idev,
IPSTATS_MIB_INUNKNOWNPROTOS);
icmpv6_send(skb, ICMPV6_PARAMPROB,
- ICMPV6_UNK_NEXTHDR, nhoff,
- skb->dev);
+ ICMPV6_UNK_NEXTHDR, nhoff);
}
} else
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index eb6d0972863..dabf108ad81 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -267,7 +267,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
if (net_ratelimit())
printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
skb->dev = dst->dev;
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return -EMSGSIZE;
@@ -402,6 +402,7 @@ int ip6_forward(struct sk_buff *skb)
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct inet6_skb_parm *opt = IP6CB(skb);
struct net *net = dev_net(dst->dev);
+ u32 mtu;
if (net->ipv6.devconf_all->forwarding == 0)
goto error;
@@ -441,8 +442,7 @@ int ip6_forward(struct sk_buff *skb)
if (hdr->hop_limit <= 1) {
/* Force OUTPUT device used as source address */
skb->dev = dst->dev;
- icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
- 0, skb->dev);
+ icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
IP6_INC_STATS_BH(net,
ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
@@ -504,15 +504,19 @@ int ip6_forward(struct sk_buff *skb)
goto error;
if (addrtype & IPV6_ADDR_LINKLOCAL) {
icmpv6_send(skb, ICMPV6_DEST_UNREACH,
- ICMPV6_NOT_NEIGHBOUR, 0, skb->dev);
+ ICMPV6_NOT_NEIGHBOUR, 0);
goto error;
}
}
- if (skb->len > dst_mtu(dst)) {
+ mtu = dst_mtu(dst);
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+
+ if (skb->len > mtu) {
/* Again, force OUTPUT device used as source address */
skb->dev = dst->dev;
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS_BH(net,
ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
IP6_INC_STATS_BH(net,
@@ -622,12 +626,11 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
mtu = ip6_skb_dst_mtu(skb);
/* We must not fragment if the socket is set to force MTU discovery
- * or if the skb it not generated by a local socket. (This last
- * check should be redundant, but it's free.)
+ * or if the skb it not generated by a local socket.
*/
if (!skb->local_df) {
skb->dev = skb_dst(skb)->dev;
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index d453d07b0df..138980eec21 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -74,7 +74,6 @@ MODULE_LICENSE("GPL");
(addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
(HASH_SIZE - 1))
-static void ip6_fb_tnl_dev_init(struct net_device *dev);
static void ip6_tnl_dev_init(struct net_device *dev);
static void ip6_tnl_dev_setup(struct net_device *dev);
@@ -623,7 +622,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (rt && rt->rt6i_dev)
skb2->dev = rt->rt6i_dev;
- icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev);
+ icmpv6_send(skb2, rel_type, rel_code, rel_info);
if (rt)
dst_release(&rt->u.dst);
@@ -1015,7 +1014,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
if (tel->encap_limit == 0) {
icmpv6_send(skb, ICMPV6_PARAMPROB,
- ICMPV6_HDR_FIELD, offset + 2, skb->dev);
+ ICMPV6_HDR_FIELD, offset + 2);
return -1;
}
encap_limit = tel->encap_limit - 1;
@@ -1034,7 +1033,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu);
if (err != 0) {
if (err == -EMSGSIZE)
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
return -1;
}
@@ -1364,7 +1363,7 @@ static void ip6_tnl_dev_init(struct net_device *dev)
* Return: 0
**/
-static void ip6_fb_tnl_dev_init(struct net_device *dev)
+static void __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net *net = dev_net(dev);
@@ -1388,7 +1387,7 @@ static struct xfrm6_tunnel ip6ip6_handler = {
.priority = 1,
};
-static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
+static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
{
int h;
struct ip6_tnl *t;
@@ -1407,7 +1406,7 @@ static void ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
unregister_netdevice_many(&list);
}
-static int ip6_tnl_init_net(struct net *net)
+static int __net_init ip6_tnl_init_net(struct net *net)
{
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
int err;
@@ -1436,7 +1435,7 @@ err_alloc_dev:
return err;
}
-static void ip6_tnl_exit_net(struct net *net)
+static void __net_exit ip6_tnl_exit_net(struct net *net)
{
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
@@ -1462,27 +1461,29 @@ static int __init ip6_tunnel_init(void)
{
int err;
- if (xfrm6_tunnel_register(&ip4ip6_handler, AF_INET)) {
+ err = register_pernet_device(&ip6_tnl_net_ops);
+ if (err < 0)
+ goto out_pernet;
+
+ err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
+ if (err < 0) {
printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n");
- err = -EAGAIN;
- goto out;
+ goto out_ip4ip6;
}
- if (xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6)) {
+ err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
+ if (err < 0) {
printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n");
- err = -EAGAIN;
- goto unreg_ip4ip6;
+ goto out_ip6ip6;
}
- err = register_pernet_device(&ip6_tnl_net_ops);
- if (err < 0)
- goto err_pernet;
return 0;
-err_pernet:
- xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
-unreg_ip4ip6:
+
+out_ip6ip6:
xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
-out:
+out_ip4ip6:
+ unregister_pernet_device(&ip6_tnl_net_ops);
+out_pernet:
return err;
}
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 2f2a5ca2c87..85cccd6ed0b 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -53,6 +53,7 @@
static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
+ struct net *net = dev_net(skb->dev);
__be32 spi;
struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
struct ip_comp_hdr *ipcomph =
@@ -63,7 +64,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return;
spi = htonl(ntohs(ipcomph->cpi));
- x = xfrm_state_lookup(&init_net, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6);
+ x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6);
if (!x)
return;
@@ -74,14 +75,15 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
{
+ struct net *net = xs_net(x);
struct xfrm_state *t = NULL;
- t = xfrm_state_alloc(&init_net);
+ t = xfrm_state_alloc(net);
if (!t)
goto out;
t->id.proto = IPPROTO_IPV6;
- t->id.spi = xfrm6_tunnel_alloc_spi((xfrm_address_t *)&x->props.saddr);
+ t->id.spi = xfrm6_tunnel_alloc_spi(net, (xfrm_address_t *)&x->props.saddr);
if (!t->id.spi)
goto error;
@@ -90,6 +92,7 @@ static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x)
t->props.family = AF_INET6;
t->props.mode = x->props.mode;
memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr));
+ memcpy(&t->mark, &x->mark, sizeof(t->mark));
if (xfrm_init_state(t))
goto error;
@@ -108,13 +111,15 @@ error:
static int ipcomp6_tunnel_attach(struct xfrm_state *x)
{
+ struct net *net = xs_net(x);
int err = 0;
struct xfrm_state *t = NULL;
__be32 spi;
+ u32 mark = x->mark.m & x->mark.v;
- spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&x->props.saddr);
+ spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&x->props.saddr);
if (spi)
- t = xfrm_state_lookup(&init_net, (xfrm_address_t *)&x->id.daddr,
+ t = xfrm_state_lookup(net, mark, (xfrm_address_t *)&x->id.daddr,
spi, IPPROTO_IPV6, AF_INET6);
if (!t) {
t = ipcomp6_tunnel_create(x);
@@ -154,16 +159,12 @@ static int ipcomp6_init_state(struct xfrm_state *x)
if (x->props.mode == XFRM_MODE_TUNNEL) {
err = ipcomp6_tunnel_attach(x);
if (err)
- goto error_tunnel;
+ goto out;
}
err = 0;
out:
return err;
-error_tunnel:
- ipcomp_destroy(x);
-
- goto out;
}
static const struct xfrm_type ipcomp6_type =
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 1f9c44442e6..bcd97191596 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -793,10 +793,10 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
}
spin_unlock_bh(&im->mca_lock);
- write_lock_bh(&idev->mc_lock);
+ spin_lock_bh(&idev->mc_lock);
pmc->next = idev->mc_tomb;
idev->mc_tomb = pmc;
- write_unlock_bh(&idev->mc_lock);
+ spin_unlock_bh(&idev->mc_lock);
}
static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca)
@@ -804,7 +804,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca)
struct ifmcaddr6 *pmc, *pmc_prev;
struct ip6_sf_list *psf, *psf_next;
- write_lock_bh(&idev->mc_lock);
+ spin_lock_bh(&idev->mc_lock);
pmc_prev = NULL;
for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) {
if (ipv6_addr_equal(&pmc->mca_addr, pmca))
@@ -817,7 +817,8 @@ static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca)
else
idev->mc_tomb = pmc->next;
}
- write_unlock_bh(&idev->mc_lock);
+ spin_unlock_bh(&idev->mc_lock);
+
if (pmc) {
for (psf=pmc->mca_tomb; psf; psf=psf_next) {
psf_next = psf->sf_next;
@@ -832,10 +833,10 @@ static void mld_clear_delrec(struct inet6_dev *idev)
{
struct ifmcaddr6 *pmc, *nextpmc;
- write_lock_bh(&idev->mc_lock);
+ spin_lock_bh(&idev->mc_lock);
pmc = idev->mc_tomb;
idev->mc_tomb = NULL;
- write_unlock_bh(&idev->mc_lock);
+ spin_unlock_bh(&idev->mc_lock);
for (; pmc; pmc = nextpmc) {
nextpmc = pmc->next;
@@ -1696,7 +1697,7 @@ static void mld_send_cr(struct inet6_dev *idev)
int type, dtype;
read_lock_bh(&idev->lock);
- write_lock_bh(&idev->mc_lock);
+ spin_lock(&idev->mc_lock);
/* deleted MCA's */
pmc_prev = NULL;
@@ -1730,7 +1731,7 @@ static void mld_send_cr(struct inet6_dev *idev)
} else
pmc_prev = pmc;
}
- write_unlock_bh(&idev->mc_lock);
+ spin_unlock(&idev->mc_lock);
/* change recs */
for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
@@ -2311,7 +2312,7 @@ void ipv6_mc_up(struct inet6_dev *idev)
void ipv6_mc_init_dev(struct inet6_dev *idev)
{
write_lock_bh(&idev->lock);
- rwlock_init(&idev->mc_lock);
+ spin_lock_init(&idev->mc_lock);
idev->mc_gq_running = 0;
setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
(unsigned long)idev);
@@ -2646,7 +2647,7 @@ static const struct file_operations igmp6_mcf_seq_fops = {
.release = seq_release_net,
};
-static int igmp6_proc_init(struct net *net)
+static int __net_init igmp6_proc_init(struct net *net)
{
int err;
@@ -2666,23 +2667,22 @@ out_proc_net_igmp6:
goto out;
}
-static void igmp6_proc_exit(struct net *net)
+static void __net_exit igmp6_proc_exit(struct net *net)
{
proc_net_remove(net, "mcfilter6");
proc_net_remove(net, "igmp6");
}
#else
-static int igmp6_proc_init(struct net *net)
+static inline int igmp6_proc_init(struct net *net)
{
return 0;
}
-static void igmp6_proc_exit(struct net *net)
+static inline void igmp6_proc_exit(struct net *net)
{
- ;
}
#endif
-static int igmp6_net_init(struct net *net)
+static int __net_init igmp6_net_init(struct net *net)
{
int err;
@@ -2708,7 +2708,7 @@ out_sock_create:
goto out;
}
-static void igmp6_net_exit(struct net *net)
+static void __net_exit igmp6_net_exit(struct net *net)
{
inet_ctl_sock_destroy(net->ipv6.igmp_sk);
igmp6_proc_exit(net);
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index f797e8c6f3b..2794b600283 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -56,7 +56,7 @@ static inline void *mip6_padn(__u8 *data, __u8 padlen)
static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos)
{
- icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos, skb->dev);
+ icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
}
static int mip6_mh_len(int type)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c4585279809..8bcc4b7db3b 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1772,7 +1772,7 @@ int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void __user *bu
#endif
-static int ndisc_net_init(struct net *net)
+static int __net_init ndisc_net_init(struct net *net)
{
struct ipv6_pinfo *np;
struct sock *sk;
@@ -1797,7 +1797,7 @@ static int ndisc_net_init(struct net *net)
return 0;
}
-static void ndisc_net_exit(struct net *net)
+static void __net_exit ndisc_net_exit(struct net *net)
{
inet_ctl_sock_destroy(net->ipv6.ndisc_sk);
}
@@ -1820,8 +1820,7 @@ int __init ndisc_init(void)
neigh_table_init(&nd_tbl);
#ifdef CONFIG_SYSCTL
- err = neigh_sysctl_register(NULL, &nd_tbl.parms, NET_IPV6,
- NET_IPV6_NEIGH, "ipv6",
+ err = neigh_sysctl_register(NULL, &nd_tbl.parms, "ipv6",
&ndisc_ifinfo_sysctl_change);
if (err)
goto out_unregister_pernet;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 480d7f8c980..9210e312edf 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -29,6 +29,7 @@
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter/x_tables.h>
#include <net/netfilter/nf_log.h>
+#include "../../netfilter/xt_repldata.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -67,6 +68,12 @@ do { \
#define inline
#endif
+void *ip6t_alloc_initial_table(const struct xt_table *info)
+{
+ return xt_alloc_initial_table(ip6t, IP6T);
+}
+EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
+
/*
We keep a set of rules for each CPU, so we can avoid write-locking
them in the softirq when updating the counters and therefore
@@ -201,7 +208,7 @@ ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
/* Performance critical - called for every packet */
static inline bool
-do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
+do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
struct xt_match_param *par)
{
par->match = m->u.kernel.match;
@@ -215,7 +222,7 @@ do_match(struct ip6t_entry_match *m, const struct sk_buff *skb,
}
static inline struct ip6t_entry *
-get_entry(void *base, unsigned int offset)
+get_entry(const void *base, unsigned int offset)
{
return (struct ip6t_entry *)(base + offset);
}
@@ -229,6 +236,12 @@ static inline bool unconditional(const struct ip6t_ip6 *ipv6)
return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
}
+static inline const struct ip6t_entry_target *
+ip6t_get_target_c(const struct ip6t_entry *e)
+{
+ return ip6t_get_target((struct ip6t_entry *)e);
+}
+
#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
/* This cries for unification! */
@@ -264,11 +277,11 @@ static struct nf_loginfo trace_loginfo = {
/* Mildly perf critical (only if packet tracing is on) */
static inline int
-get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
+get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
const char *hookname, const char **chainname,
const char **comment, unsigned int *rulenum)
{
- struct ip6t_standard_target *t = (void *)ip6t_get_target(s);
+ const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
/* Head of user chain: ERROR target with chainname */
@@ -294,17 +307,18 @@ get_chainname_rulenum(struct ip6t_entry *s, struct ip6t_entry *e,
return 0;
}
-static void trace_packet(struct sk_buff *skb,
+static void trace_packet(const struct sk_buff *skb,
unsigned int hook,
const struct net_device *in,
const struct net_device *out,
const char *tablename,
- struct xt_table_info *private,
- struct ip6t_entry *e)
+ const struct xt_table_info *private,
+ const struct ip6t_entry *e)
{
- void *table_base;
+ const void *table_base;
const struct ip6t_entry *root;
const char *hookname, *chainname, *comment;
+ const struct ip6t_entry *iter;
unsigned int rulenum = 0;
table_base = private->entries[smp_processor_id()];
@@ -313,10 +327,10 @@ static void trace_packet(struct sk_buff *skb,
hookname = chainname = hooknames[hook];
comment = comments[NF_IP6_TRACE_COMMENT_RULE];
- IP6T_ENTRY_ITERATE(root,
- private->size - private->hook_entry[hook],
- get_chainname_rulenum,
- e, hookname, &chainname, &comment, &rulenum);
+ xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
+ if (get_chainname_rulenum(iter, e, hookname,
+ &chainname, &comment, &rulenum) != 0)
+ break;
nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
"TRACE: %s:%s:%s:%u ",
@@ -345,9 +359,9 @@ ip6t_do_table(struct sk_buff *skb,
/* Initializing verdict to NF_DROP keeps gcc happy. */
unsigned int verdict = NF_DROP;
const char *indev, *outdev;
- void *table_base;
+ const void *table_base;
struct ip6t_entry *e, *back;
- struct xt_table_info *private;
+ const struct xt_table_info *private;
struct xt_match_param mtpar;
struct xt_target_param tgpar;
@@ -378,22 +392,27 @@ ip6t_do_table(struct sk_buff *skb,
back = get_entry(table_base, private->underflow[hook]);
do {
- struct ip6t_entry_target *t;
+ const struct ip6t_entry_target *t;
+ const struct xt_entry_match *ematch;
IP_NF_ASSERT(e);
IP_NF_ASSERT(back);
if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
- &mtpar.thoff, &mtpar.fragoff, &hotdrop) ||
- IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
+ &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
+ no_match:
e = ip6t_next_entry(e);
continue;
}
+ xt_ematch_foreach(ematch, e)
+ if (do_match(ematch, skb, &mtpar) != 0)
+ goto no_match;
+
ADD_COUNTER(e->counters,
ntohs(ipv6_hdr(skb)->payload_len) +
sizeof(struct ipv6hdr), 1);
- t = ip6t_get_target(e);
+ t = ip6t_get_target_c(e);
IP_NF_ASSERT(t->u.kernel.target);
#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
@@ -475,7 +494,7 @@ ip6t_do_table(struct sk_buff *skb,
/* Figures out from what hook each rule can be called: returns 0 if
there are loops. Puts hook bitmask in comefrom. */
static int
-mark_source_chains(struct xt_table_info *newinfo,
+mark_source_chains(const struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0)
{
unsigned int hook;
@@ -493,8 +512,8 @@ mark_source_chains(struct xt_table_info *newinfo,
e->counters.pcnt = pos;
for (;;) {
- struct ip6t_standard_target *t
- = (void *)ip6t_get_target(e);
+ const struct ip6t_standard_target *t
+ = (void *)ip6t_get_target_c(e);
int visited = e->comefrom & (1 << hook);
if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
@@ -584,27 +603,23 @@ mark_source_chains(struct xt_table_info *newinfo,
return 1;
}
-static int
-cleanup_match(struct ip6t_entry_match *m, unsigned int *i)
+static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
{
struct xt_mtdtor_param par;
- if (i && (*i)-- == 0)
- return 1;
-
+ par.net = net;
par.match = m->u.kernel.match;
par.matchinfo = m->data;
par.family = NFPROTO_IPV6;
if (par.match->destroy != NULL)
par.match->destroy(&par);
module_put(par.match->me);
- return 0;
}
static int
-check_entry(struct ip6t_entry *e, const char *name)
+check_entry(const struct ip6t_entry *e, const char *name)
{
- struct ip6t_entry_target *t;
+ const struct ip6t_entry_target *t;
if (!ip6_checkentry(&e->ipv6)) {
duprintf("ip_tables: ip check failed %p %s.\n", e, name);
@@ -615,15 +630,14 @@ check_entry(struct ip6t_entry *e, const char *name)
e->next_offset)
return -EINVAL;
- t = ip6t_get_target(e);
+ t = ip6t_get_target_c(e);
if (e->target_offset + t->u.target_size > e->next_offset)
return -EINVAL;
return 0;
}
-static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
- unsigned int *i)
+static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
{
const struct ip6t_ip6 *ipv6 = par->entryinfo;
int ret;
@@ -638,13 +652,11 @@ static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
par.match->name);
return ret;
}
- ++*i;
return 0;
}
static int
-find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
- unsigned int *i)
+find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
{
struct xt_match *match;
int ret;
@@ -658,7 +670,7 @@ find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
}
m->u.kernel.match = match;
- ret = check_match(m, par, i);
+ ret = check_match(m, par);
if (ret)
goto err;
@@ -668,10 +680,11 @@ err:
return ret;
}
-static int check_target(struct ip6t_entry *e, const char *name)
+static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
{
struct ip6t_entry_target *t = ip6t_get_target(e);
struct xt_tgchk_param par = {
+ .net = net,
.table = name,
.entryinfo = e,
.target = t->u.kernel.target,
@@ -693,27 +706,32 @@ static int check_target(struct ip6t_entry *e, const char *name)
}
static int
-find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
- unsigned int *i)
+find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
+ unsigned int size)
{
struct ip6t_entry_target *t;
struct xt_target *target;
int ret;
unsigned int j;
struct xt_mtchk_param mtpar;
+ struct xt_entry_match *ematch;
ret = check_entry(e, name);
if (ret)
return ret;
j = 0;
+ mtpar.net = net;
mtpar.table = name;
mtpar.entryinfo = &e->ipv6;
mtpar.hook_mask = e->comefrom;
mtpar.family = NFPROTO_IPV6;
- ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
- if (ret != 0)
- goto cleanup_matches;
+ xt_ematch_foreach(ematch, e) {
+ ret = find_check_match(ematch, &mtpar);
+ if (ret != 0)
+ goto cleanup_matches;
+ ++j;
+ }
t = ip6t_get_target(e);
target = try_then_request_module(xt_find_target(AF_INET6,
@@ -727,27 +745,29 @@ find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
}
t->u.kernel.target = target;
- ret = check_target(e, name);
+ ret = check_target(e, net, name);
if (ret)
goto err;
-
- (*i)++;
return 0;
err:
module_put(t->u.kernel.target->me);
cleanup_matches:
- IP6T_MATCH_ITERATE(e, cleanup_match, &j);
+ xt_ematch_foreach(ematch, e) {
+ if (j-- == 0)
+ break;
+ cleanup_match(ematch, net);
+ }
return ret;
}
-static bool check_underflow(struct ip6t_entry *e)
+static bool check_underflow(const struct ip6t_entry *e)
{
const struct ip6t_entry_target *t;
unsigned int verdict;
if (!unconditional(&e->ipv6))
return false;
- t = ip6t_get_target(e);
+ t = ip6t_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
return false;
verdict = ((struct ip6t_standard_target *)t)->verdict;
@@ -758,12 +778,11 @@ static bool check_underflow(struct ip6t_entry *e)
static int
check_entry_size_and_hooks(struct ip6t_entry *e,
struct xt_table_info *newinfo,
- unsigned char *base,
- unsigned char *limit,
+ const unsigned char *base,
+ const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
- unsigned int valid_hooks,
- unsigned int *i)
+ unsigned int valid_hooks)
{
unsigned int h;
@@ -800,50 +819,41 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
-
- (*i)++;
return 0;
}
-static int
-cleanup_entry(struct ip6t_entry *e, unsigned int *i)
+static void cleanup_entry(struct ip6t_entry *e, struct net *net)
{
struct xt_tgdtor_param par;
struct ip6t_entry_target *t;
-
- if (i && (*i)-- == 0)
- return 1;
+ struct xt_entry_match *ematch;
/* Cleanup all matches */
- IP6T_MATCH_ITERATE(e, cleanup_match, NULL);
+ xt_ematch_foreach(ematch, e)
+ cleanup_match(ematch, net);
t = ip6t_get_target(e);
+ par.net = net;
par.target = t->u.kernel.target;
par.targinfo = t->data;
par.family = NFPROTO_IPV6;
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(par.target->me);
- return 0;
}
/* Checks and translates the user-supplied table segment (held in
newinfo) */
static int
-translate_table(const char *name,
- unsigned int valid_hooks,
- struct xt_table_info *newinfo,
- void *entry0,
- unsigned int size,
- unsigned int number,
- const unsigned int *hook_entries,
- const unsigned int *underflows)
+translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ const struct ip6t_replace *repl)
{
+ struct ip6t_entry *iter;
unsigned int i;
- int ret;
+ int ret = 0;
- newinfo->size = size;
- newinfo->number = number;
+ newinfo->size = repl->size;
+ newinfo->number = repl->num_entries;
/* Init all hooks to impossible value. */
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
@@ -854,49 +864,58 @@ translate_table(const char *name,
duprintf("translate_table: size %u\n", newinfo->size);
i = 0;
/* Walk through entries, checking offsets. */
- ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
- check_entry_size_and_hooks,
- newinfo,
- entry0,
- entry0 + size,
- hook_entries, underflows, valid_hooks, &i);
- if (ret != 0)
- return ret;
+ xt_entry_foreach(iter, entry0, newinfo->size) {
+ ret = check_entry_size_and_hooks(iter, newinfo, entry0,
+ entry0 + repl->size,
+ repl->hook_entry,
+ repl->underflow,
+ repl->valid_hooks);
+ if (ret != 0)
+ return ret;
+ ++i;
+ }
- if (i != number) {
+ if (i != repl->num_entries) {
duprintf("translate_table: %u not %u entries\n",
- i, number);
+ i, repl->num_entries);
return -EINVAL;
}
/* Check hooks all assigned */
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
/* Only hooks which are valid */
- if (!(valid_hooks & (1 << i)))
+ if (!(repl->valid_hooks & (1 << i)))
continue;
if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
duprintf("Invalid hook entry %u %u\n",
- i, hook_entries[i]);
+ i, repl->hook_entry[i]);
return -EINVAL;
}
if (newinfo->underflow[i] == 0xFFFFFFFF) {
duprintf("Invalid underflow %u %u\n",
- i, underflows[i]);
+ i, repl->underflow[i]);
return -EINVAL;
}
}
- if (!mark_source_chains(newinfo, valid_hooks, entry0))
+ if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
return -ELOOP;
/* Finally, each sanity check must pass */
i = 0;
- ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
- find_check_entry, name, size, &i);
+ xt_entry_foreach(iter, entry0, newinfo->size) {
+ ret = find_check_entry(iter, net, repl->name, repl->size);
+ if (ret != 0)
+ break;
+ ++i;
+ }
if (ret != 0) {
- IP6T_ENTRY_ITERATE(entry0, newinfo->size,
- cleanup_entry, &i);
+ xt_entry_foreach(iter, entry0, newinfo->size) {
+ if (i-- == 0)
+ break;
+ cleanup_entry(iter, net);
+ }
return ret;
}
@@ -909,33 +928,11 @@ translate_table(const char *name,
return ret;
}
-/* Gets counters. */
-static inline int
-add_entry_to_counter(const struct ip6t_entry *e,
- struct xt_counters total[],
- unsigned int *i)
-{
- ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
-
- (*i)++;
- return 0;
-}
-
-static inline int
-set_entry_to_counter(const struct ip6t_entry *e,
- struct ip6t_counters total[],
- unsigned int *i)
-{
- SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
-
- (*i)++;
- return 0;
-}
-
static void
get_counters(const struct xt_table_info *t,
struct xt_counters counters[])
{
+ struct ip6t_entry *iter;
unsigned int cpu;
unsigned int i;
unsigned int curcpu;
@@ -951,32 +948,32 @@ get_counters(const struct xt_table_info *t,
curcpu = smp_processor_id();
i = 0;
- IP6T_ENTRY_ITERATE(t->entries[curcpu],
- t->size,
- set_entry_to_counter,
- counters,
- &i);
+ xt_entry_foreach(iter, t->entries[curcpu], t->size) {
+ SET_COUNTER(counters[i], iter->counters.bcnt,
+ iter->counters.pcnt);
+ ++i;
+ }
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
continue;
i = 0;
xt_info_wrlock(cpu);
- IP6T_ENTRY_ITERATE(t->entries[cpu],
- t->size,
- add_entry_to_counter,
- counters,
- &i);
+ xt_entry_foreach(iter, t->entries[cpu], t->size) {
+ ADD_COUNTER(counters[i], iter->counters.bcnt,
+ iter->counters.pcnt);
+ ++i;
+ }
xt_info_wrunlock(cpu);
}
local_bh_enable();
}
-static struct xt_counters *alloc_counters(struct xt_table *table)
+static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
- struct xt_table_info *private = table->private;
+ const struct xt_table_info *private = table->private;
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
@@ -994,11 +991,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)
static int
copy_entries_to_user(unsigned int total_size,
- struct xt_table *table,
+ const struct xt_table *table,
void __user *userptr)
{
unsigned int off, num;
- struct ip6t_entry *e;
+ const struct ip6t_entry *e;
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
int ret = 0;
@@ -1050,7 +1047,7 @@ copy_entries_to_user(unsigned int total_size,
}
}
- t = ip6t_get_target(e);
+ t = ip6t_get_target_c(e);
if (copy_to_user(userptr + off + e->target_offset
+ offsetof(struct ip6t_entry_target,
u.user.name),
@@ -1067,7 +1064,7 @@ copy_entries_to_user(unsigned int total_size,
}
#ifdef CONFIG_COMPAT
-static void compat_standard_from_user(void *dst, void *src)
+static void compat_standard_from_user(void *dst, const void *src)
{
int v = *(compat_int_t *)src;
@@ -1076,7 +1073,7 @@ static void compat_standard_from_user(void *dst, void *src)
memcpy(dst, &v, sizeof(v));
}
-static int compat_standard_to_user(void __user *dst, void *src)
+static int compat_standard_to_user(void __user *dst, const void *src)
{
compat_int_t cv = *(int *)src;
@@ -1085,25 +1082,20 @@ static int compat_standard_to_user(void __user *dst, void *src)
return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
}
-static inline int
-compat_calc_match(struct ip6t_entry_match *m, int *size)
-{
- *size += xt_compat_match_offset(m->u.kernel.match);
- return 0;
-}
-
-static int compat_calc_entry(struct ip6t_entry *e,
+static int compat_calc_entry(const struct ip6t_entry *e,
const struct xt_table_info *info,
- void *base, struct xt_table_info *newinfo)
+ const void *base, struct xt_table_info *newinfo)
{
- struct ip6t_entry_target *t;
+ const struct xt_entry_match *ematch;
+ const struct ip6t_entry_target *t;
unsigned int entry_offset;
int off, i, ret;
off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
entry_offset = (void *)e - base;
- IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
- t = ip6t_get_target(e);
+ xt_ematch_foreach(ematch, e)
+ off += xt_compat_match_offset(ematch->u.kernel.match);
+ t = ip6t_get_target_c(e);
off += xt_compat_target_offset(t->u.kernel.target);
newinfo->size -= off;
ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
@@ -1124,7 +1116,9 @@ static int compat_calc_entry(struct ip6t_entry *e,
static int compat_table_info(const struct xt_table_info *info,
struct xt_table_info *newinfo)
{
+ struct ip6t_entry *iter;
void *loc_cpu_entry;
+ int ret;
if (!newinfo || !info)
return -EINVAL;
@@ -1133,13 +1127,17 @@ static int compat_table_info(const struct xt_table_info *info,
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries[raw_smp_processor_id()];
- return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
- compat_calc_entry, info, loc_cpu_entry,
- newinfo);
+ xt_entry_foreach(iter, loc_cpu_entry, info->size) {
+ ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
+ if (ret != 0)
+ return ret;
+ }
+ return 0;
}
#endif
-static int get_info(struct net *net, void __user *user, int *len, int compat)
+static int get_info(struct net *net, void __user *user,
+ const int *len, int compat)
{
char name[IP6T_TABLE_MAXNAMELEN];
struct xt_table *t;
@@ -1164,10 +1162,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
if (t && !IS_ERR(t)) {
struct ip6t_getinfo info;
const struct xt_table_info *private = t->private;
-
#ifdef CONFIG_COMPAT
+ struct xt_table_info tmp;
+
if (compat) {
- struct xt_table_info tmp;
ret = compat_table_info(private, &tmp);
xt_compat_flush_offsets(AF_INET6);
private = &tmp;
@@ -1199,7 +1197,8 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
}
static int
-get_entries(struct net *net, struct ip6t_get_entries __user *uptr, int *len)
+get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
+ const int *len)
{
int ret;
struct ip6t_get_entries get;
@@ -1247,6 +1246,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct xt_table_info *oldinfo;
struct xt_counters *counters;
const void *loc_cpu_old_entry;
+ struct ip6t_entry *iter;
ret = 0;
counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
@@ -1290,8 +1290,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
/* Decrease module usage counts and free resource */
loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
- IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
- NULL);
+ xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
+ cleanup_entry(iter, net);
+
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
sizeof(struct xt_counters) * num_counters) != 0)
@@ -1310,12 +1311,13 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
}
static int
-do_replace(struct net *net, void __user *user, unsigned int len)
+do_replace(struct net *net, const void __user *user, unsigned int len)
{
int ret;
struct ip6t_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
+ struct ip6t_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1336,9 +1338,7 @@ do_replace(struct net *net, void __user *user, unsigned int len)
goto free_newinfo;
}
- ret = translate_table(tmp.name, tmp.valid_hooks,
- newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
- tmp.hook_entry, tmp.underflow);
+ ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
@@ -1351,27 +1351,15 @@ do_replace(struct net *net, void __user *user, unsigned int len)
return 0;
free_newinfo_untrans:
- IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
+ xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
+ cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
}
-/* We're lazy, and add to the first CPU; overflow works its fey magic
- * and everything is OK. */
static int
-add_counter_to_entry(struct ip6t_entry *e,
- const struct xt_counters addme[],
- unsigned int *i)
-{
- ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
-
- (*i)++;
- return 0;
-}
-
-static int
-do_add_counters(struct net *net, void __user *user, unsigned int len,
+do_add_counters(struct net *net, const void __user *user, unsigned int len,
int compat)
{
unsigned int i, curcpu;
@@ -1385,6 +1373,7 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
const struct xt_table_info *private;
int ret = 0;
const void *loc_cpu_entry;
+ struct ip6t_entry *iter;
#ifdef CONFIG_COMPAT
struct compat_xt_counters_info compat_tmp;
@@ -1443,11 +1432,10 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,
curcpu = smp_processor_id();
xt_info_wrlock(curcpu);
loc_cpu_entry = private->entries[curcpu];
- IP6T_ENTRY_ITERATE(loc_cpu_entry,
- private->size,
- add_counter_to_entry,
- paddc,
- &i);
+ xt_entry_foreach(iter, loc_cpu_entry, private->size) {
+ ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
+ ++i;
+ }
xt_info_wrunlock(curcpu);
unlock_up_free:
@@ -1476,45 +1464,40 @@ struct compat_ip6t_replace {
static int
compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
unsigned int *size, struct xt_counters *counters,
- unsigned int *i)
+ unsigned int i)
{
struct ip6t_entry_target *t;
struct compat_ip6t_entry __user *ce;
u_int16_t target_offset, next_offset;
compat_uint_t origsize;
- int ret;
+ const struct xt_entry_match *ematch;
+ int ret = 0;
- ret = -EFAULT;
origsize = *size;
ce = (struct compat_ip6t_entry __user *)*dstptr;
- if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
- goto out;
-
- if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
- goto out;
+ if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
+ copy_to_user(&ce->counters, &counters[i],
+ sizeof(counters[i])) != 0)
+ return -EFAULT;
*dstptr += sizeof(struct compat_ip6t_entry);
*size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
- ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
+ xt_ematch_foreach(ematch, e) {
+ ret = xt_compat_match_to_user(ematch, dstptr, size);
+ if (ret != 0)
+ return ret;
+ }
target_offset = e->target_offset - (origsize - *size);
- if (ret)
- goto out;
t = ip6t_get_target(e);
ret = xt_compat_target_to_user(t, dstptr, size);
if (ret)
- goto out;
- ret = -EFAULT;
+ return ret;
next_offset = e->next_offset - (origsize - *size);
- if (put_user(target_offset, &ce->target_offset))
- goto out;
- if (put_user(next_offset, &ce->next_offset))
- goto out;
-
- (*i)++;
+ if (put_user(target_offset, &ce->target_offset) != 0 ||
+ put_user(next_offset, &ce->next_offset) != 0)
+ return -EFAULT;
return 0;
-out:
- return ret;
}
static int
@@ -1522,7 +1505,7 @@ compat_find_calc_match(struct ip6t_entry_match *m,
const char *name,
const struct ip6t_ip6 *ipv6,
unsigned int hookmask,
- int *size, unsigned int *i)
+ int *size)
{
struct xt_match *match;
@@ -1536,47 +1519,32 @@ compat_find_calc_match(struct ip6t_entry_match *m,
}
m->u.kernel.match = match;
*size += xt_compat_match_offset(match);
-
- (*i)++;
- return 0;
-}
-
-static int
-compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
-{
- if (i && (*i)-- == 0)
- return 1;
-
- module_put(m->u.kernel.match->me);
return 0;
}
-static int
-compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
+static void compat_release_entry(struct compat_ip6t_entry *e)
{
struct ip6t_entry_target *t;
-
- if (i && (*i)-- == 0)
- return 1;
+ struct xt_entry_match *ematch;
/* Cleanup all matches */
- COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
+ xt_ematch_foreach(ematch, e)
+ module_put(ematch->u.kernel.match->me);
t = compat_ip6t_get_target(e);
module_put(t->u.kernel.target->me);
- return 0;
}
static int
check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
- unsigned char *base,
- unsigned char *limit,
- unsigned int *hook_entries,
- unsigned int *underflows,
- unsigned int *i,
+ const unsigned char *base,
+ const unsigned char *limit,
+ const unsigned int *hook_entries,
+ const unsigned int *underflows,
const char *name)
{
+ struct xt_entry_match *ematch;
struct ip6t_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
@@ -1605,10 +1573,13 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
entry_offset = (void *)e - (void *)base;
j = 0;
- ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
- &e->ipv6, e->comefrom, &off, &j);
- if (ret != 0)
- goto release_matches;
+ xt_ematch_foreach(ematch, e) {
+ ret = compat_find_calc_match(ematch, name,
+ &e->ipv6, e->comefrom, &off);
+ if (ret != 0)
+ goto release_matches;
+ ++j;
+ }
t = compat_ip6t_get_target(e);
target = try_then_request_module(xt_find_target(AF_INET6,
@@ -1640,14 +1611,16 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
/* Clear counters and comefrom */
memset(&e->counters, 0, sizeof(e->counters));
e->comefrom = 0;
-
- (*i)++;
return 0;
out:
module_put(t->u.kernel.target->me);
release_matches:
- IP6T_MATCH_ITERATE(e, compat_release_match, &j);
+ xt_ematch_foreach(ematch, e) {
+ if (j-- == 0)
+ break;
+ module_put(ematch->u.kernel.match->me);
+ }
return ret;
}
@@ -1661,6 +1634,7 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
struct ip6t_entry *de;
unsigned int origsize;
int ret, h;
+ struct xt_entry_match *ematch;
ret = 0;
origsize = *size;
@@ -1671,10 +1645,11 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
*dstptr += sizeof(struct ip6t_entry);
*size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
- ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
- dstptr, size);
- if (ret)
- return ret;
+ xt_ematch_foreach(ematch, e) {
+ ret = xt_compat_match_from_user(ematch, dstptr, size);
+ if (ret != 0)
+ return ret;
+ }
de->target_offset = e->target_offset - (origsize - *size);
t = compat_ip6t_get_target(e);
target = t->u.kernel.target;
@@ -1690,36 +1665,44 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
return ret;
}
-static int compat_check_entry(struct ip6t_entry *e, const char *name,
- unsigned int *i)
+static int compat_check_entry(struct ip6t_entry *e, struct net *net,
+ const char *name)
{
unsigned int j;
- int ret;
+ int ret = 0;
struct xt_mtchk_param mtpar;
+ struct xt_entry_match *ematch;
j = 0;
+ mtpar.net = net;
mtpar.table = name;
mtpar.entryinfo = &e->ipv6;
mtpar.hook_mask = e->comefrom;
mtpar.family = NFPROTO_IPV6;
- ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
- if (ret)
- goto cleanup_matches;
+ xt_ematch_foreach(ematch, e) {
+ ret = check_match(ematch, &mtpar);
+ if (ret != 0)
+ goto cleanup_matches;
+ ++j;
+ }
- ret = check_target(e, name);
+ ret = check_target(e, net, name);
if (ret)
goto cleanup_matches;
-
- (*i)++;
return 0;
cleanup_matches:
- IP6T_MATCH_ITERATE(e, cleanup_match, &j);
+ xt_ematch_foreach(ematch, e) {
+ if (j-- == 0)
+ break;
+ cleanup_match(ematch, net);
+ }
return ret;
}
static int
-translate_compat_table(const char *name,
+translate_compat_table(struct net *net,
+ const char *name,
unsigned int valid_hooks,
struct xt_table_info **pinfo,
void **pentry0,
@@ -1731,8 +1714,10 @@ translate_compat_table(const char *name,
unsigned int i, j;
struct xt_table_info *newinfo, *info;
void *pos, *entry0, *entry1;
+ struct compat_ip6t_entry *iter0;
+ struct ip6t_entry *iter1;
unsigned int size;
- int ret;
+ int ret = 0;
info = *pinfo;
entry0 = *pentry0;
@@ -1749,13 +1734,17 @@ translate_compat_table(const char *name,
j = 0;
xt_compat_lock(AF_INET6);
/* Walk through entries, checking offsets. */
- ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
- check_compat_entry_size_and_hooks,
- info, &size, entry0,
- entry0 + total_size,
- hook_entries, underflows, &j, name);
- if (ret != 0)
- goto out_unlock;
+ xt_entry_foreach(iter0, entry0, total_size) {
+ ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+ entry0,
+ entry0 + total_size,
+ hook_entries,
+ underflows,
+ name);
+ if (ret != 0)
+ goto out_unlock;
+ ++j;
+ }
ret = -EINVAL;
if (j != number) {
@@ -1794,9 +1783,12 @@ translate_compat_table(const char *name,
entry1 = newinfo->entries[raw_smp_processor_id()];
pos = entry1;
size = total_size;
- ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
- compat_copy_entry_from_user,
- &pos, &size, name, newinfo, entry1);
+ xt_entry_foreach(iter0, entry0, total_size) {
+ ret = compat_copy_entry_from_user(iter0, &pos, &size,
+ name, newinfo, entry1);
+ if (ret != 0)
+ break;
+ }
xt_compat_flush_offsets(AF_INET6);
xt_compat_unlock(AF_INET6);
if (ret)
@@ -1807,13 +1799,32 @@ translate_compat_table(const char *name,
goto free_newinfo;
i = 0;
- ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
- name, &i);
+ xt_entry_foreach(iter1, entry1, newinfo->size) {
+ ret = compat_check_entry(iter1, net, name);
+ if (ret != 0)
+ break;
+ ++i;
+ }
if (ret) {
+ /*
+ * The first i matches need cleanup_entry (calls ->destroy)
+ * because they had called ->check already. The other j-i
+ * entries need only release.
+ */
+ int skip = i;
j -= i;
- COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
- compat_release_entry, &j);
- IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, &i);
+ xt_entry_foreach(iter0, entry0, newinfo->size) {
+ if (skip-- > 0)
+ continue;
+ if (j-- == 0)
+ break;
+ compat_release_entry(iter0);
+ }
+ xt_entry_foreach(iter1, entry1, newinfo->size) {
+ if (i-- == 0)
+ break;
+ cleanup_entry(iter1, net);
+ }
xt_free_table_info(newinfo);
return ret;
}
@@ -1831,7 +1842,11 @@ translate_compat_table(const char *name,
free_newinfo:
xt_free_table_info(newinfo);
out:
- COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
+ xt_entry_foreach(iter0, entry0, total_size) {
+ if (j-- == 0)
+ break;
+ compat_release_entry(iter0);
+ }
return ret;
out_unlock:
xt_compat_flush_offsets(AF_INET6);
@@ -1846,6 +1861,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
struct compat_ip6t_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
+ struct ip6t_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
@@ -1868,7 +1884,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
goto free_newinfo;
}
- ret = translate_compat_table(tmp.name, tmp.valid_hooks,
+ ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
&newinfo, &loc_cpu_entry, tmp.size,
tmp.num_entries, tmp.hook_entry,
tmp.underflow);
@@ -1884,7 +1900,8 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
return 0;
free_newinfo_untrans:
- IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
+ xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
+ cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
@@ -1933,6 +1950,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
int ret = 0;
const void *loc_cpu_entry;
unsigned int i = 0;
+ struct ip6t_entry *iter;
counters = alloc_counters(table);
if (IS_ERR(counters))
@@ -1945,9 +1963,12 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
loc_cpu_entry = private->entries[raw_smp_processor_id()];
pos = userptr;
size = total_size;
- ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
- compat_copy_entry_to_user,
- &pos, &size, counters, &i);
+ xt_entry_foreach(iter, loc_cpu_entry, total_size) {
+ ret = compat_copy_entry_to_user(iter, &pos,
+ &size, counters, i++);
+ if (ret != 0)
+ break;
+ }
vfree(counters);
return ret;
@@ -2121,11 +2142,7 @@ struct xt_table *ip6t_register_table(struct net *net,
loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
memcpy(loc_cpu_entry, repl->entries, repl->size);
- ret = translate_table(table->name, table->valid_hooks,
- newinfo, loc_cpu_entry, repl->size,
- repl->num_entries,
- repl->hook_entry,
- repl->underflow);
+ ret = translate_table(net, newinfo, loc_cpu_entry, repl);
if (ret != 0)
goto out_free;
@@ -2142,17 +2159,19 @@ out:
return ERR_PTR(ret);
}
-void ip6t_unregister_table(struct xt_table *table)
+void ip6t_unregister_table(struct net *net, struct xt_table *table)
{
struct xt_table_info *private;
void *loc_cpu_entry;
struct module *table_owner = table->me;
+ struct ip6t_entry *iter;
private = xt_unregister_table(table);
/* Decrease module usage counts and free resources */
loc_cpu_entry = private->entries[raw_smp_processor_id()];
- IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
+ xt_entry_foreach(iter, loc_cpu_entry, private->size)
+ cleanup_entry(iter, net);
if (private->number > private->initial_entries)
module_put(table_owner);
xt_free_table_info(private);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 8311ca31816..dd8afbaf00a 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -169,7 +169,7 @@ send_unreach(struct net *net, struct sk_buff *skb_in, unsigned char code,
if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
skb_in->dev = net->loopback_dev;
- icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0, NULL);
+ icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
}
static unsigned int
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index ad378efd0eb..36b72cafc22 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -21,99 +21,26 @@ MODULE_DESCRIPTION("ip6tables filter table");
(1 << NF_INET_FORWARD) | \
(1 << NF_INET_LOCAL_OUT))
-static struct
-{
- struct ip6t_replace repl;
- struct ip6t_standard entries[3];
- struct ip6t_error term;
-} initial_table __net_initdata = {
- .repl = {
- .name = "filter",
- .valid_hooks = FILTER_VALID_HOOKS,
- .num_entries = 4,
- .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
- .hook_entry = {
- [NF_INET_LOCAL_IN] = 0,
- [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
- [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
- },
- .underflow = {
- [NF_INET_LOCAL_IN] = 0,
- [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
- [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2
- },
- },
- .entries = {
- IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
- IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
- IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
- },
- .term = IP6T_ERROR_INIT, /* ERROR */
-};
-
static const struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV6,
+ .priority = NF_IP6_PRI_FILTER,
};
/* The work comes in here from netfilter.c. */
static unsigned int
-ip6t_in_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- return ip6t_do_table(skb, hook, in, out,
- dev_net(in)->ipv6.ip6table_filter);
-}
-
-static unsigned int
-ip6t_local_out_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
-#if 0
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr)) {
- if (net_ratelimit())
- printk("ip6t_hook: happy cracking.\n");
- return NF_ACCEPT;
- }
-#endif
+ const struct net *net = dev_net((in != NULL) ? in : out);
- return ip6t_do_table(skb, hook, in, out,
- dev_net(out)->ipv6.ip6table_filter);
+ return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter);
}
-static struct nf_hook_ops ip6t_ops[] __read_mostly = {
- {
- .hook = ip6t_in_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP6_PRI_FILTER,
- },
- {
- .hook = ip6t_in_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_FORWARD,
- .priority = NF_IP6_PRI_FILTER,
- },
- {
- .hook = ip6t_local_out_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_OUT,
- .priority = NF_IP6_PRI_FILTER,
- },
-};
+static struct nf_hook_ops *filter_ops __read_mostly;
/* Default to forward because I got too much mail already. */
static int forward = NF_ACCEPT;
@@ -121,9 +48,18 @@ module_param(forward, bool, 0000);
static int __net_init ip6table_filter_net_init(struct net *net)
{
- /* Register table */
+ struct ip6t_replace *repl;
+
+ repl = ip6t_alloc_initial_table(&packet_filter);
+ if (repl == NULL)
+ return -ENOMEM;
+ /* Entry 1 is the FORWARD hook */
+ ((struct ip6t_standard *)repl->entries)[1].target.verdict =
+ -forward - 1;
+
net->ipv6.ip6table_filter =
- ip6t_register_table(net, &packet_filter, &initial_table.repl);
+ ip6t_register_table(net, &packet_filter, repl);
+ kfree(repl);
if (IS_ERR(net->ipv6.ip6table_filter))
return PTR_ERR(net->ipv6.ip6table_filter);
return 0;
@@ -131,7 +67,7 @@ static int __net_init ip6table_filter_net_init(struct net *net)
static void __net_exit ip6table_filter_net_exit(struct net *net)
{
- ip6t_unregister_table(net->ipv6.ip6table_filter);
+ ip6t_unregister_table(net, net->ipv6.ip6table_filter);
}
static struct pernet_operations ip6table_filter_net_ops = {
@@ -148,17 +84,16 @@ static int __init ip6table_filter_init(void)
return -EINVAL;
}
- /* Entry 1 is the FORWARD hook */
- initial_table.entries[1].target.verdict = -forward - 1;
-
ret = register_pernet_subsys(&ip6table_filter_net_ops);
if (ret < 0)
return ret;
/* Register hooks */
- ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
- if (ret < 0)
+ filter_ops = xt_hook_link(&packet_filter, ip6table_filter_hook);
+ if (IS_ERR(filter_ops)) {
+ ret = PTR_ERR(filter_ops);
goto cleanup_table;
+ }
return ret;
@@ -169,7 +104,7 @@ static int __init ip6table_filter_init(void)
static void __exit ip6table_filter_fini(void)
{
- nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
+ xt_hook_unlink(&packet_filter, filter_ops);
unregister_pernet_subsys(&ip6table_filter_net_ops);
}
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index a929c19d30e..7844e557c0e 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -21,80 +21,17 @@ MODULE_DESCRIPTION("ip6tables mangle table");
(1 << NF_INET_LOCAL_OUT) | \
(1 << NF_INET_POST_ROUTING))
-static const struct
-{
- struct ip6t_replace repl;
- struct ip6t_standard entries[5];
- struct ip6t_error term;
-} initial_table __net_initdata = {
- .repl = {
- .name = "mangle",
- .valid_hooks = MANGLE_VALID_HOOKS,
- .num_entries = 6,
- .size = sizeof(struct ip6t_standard) * 5 + sizeof(struct ip6t_error),
- .hook_entry = {
- [NF_INET_PRE_ROUTING] = 0,
- [NF_INET_LOCAL_IN] = sizeof(struct ip6t_standard),
- [NF_INET_FORWARD] = sizeof(struct ip6t_standard) * 2,
- [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
- [NF_INET_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
- },
- .underflow = {
- [NF_INET_PRE_ROUTING] = 0,
- [NF_INET_LOCAL_IN] = sizeof(struct ip6t_standard),
- [NF_INET_FORWARD] = sizeof(struct ip6t_standard) * 2,
- [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 3,
- [NF_INET_POST_ROUTING] = sizeof(struct ip6t_standard) * 4,
- },
- },
- .entries = {
- IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
- IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
- IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
- IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
- IP6T_STANDARD_INIT(NF_ACCEPT), /* POST_ROUTING */
- },
- .term = IP6T_ERROR_INIT, /* ERROR */
-};
-
static const struct xt_table packet_mangler = {
.name = "mangle",
.valid_hooks = MANGLE_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV6,
+ .priority = NF_IP6_PRI_MANGLE,
};
-/* The work comes in here from netfilter.c. */
-static unsigned int
-ip6t_in_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- return ip6t_do_table(skb, hook, in, out,
- dev_net(in)->ipv6.ip6table_mangle);
-}
-
-static unsigned int
-ip6t_post_routing_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- return ip6t_do_table(skb, hook, in, out,
- dev_net(out)->ipv6.ip6table_mangle);
-}
-
static unsigned int
-ip6t_local_out_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
{
-
unsigned int ret;
struct in6_addr saddr, daddr;
u_int8_t hop_limit;
@@ -119,7 +56,7 @@ ip6t_local_out_hook(unsigned int hook,
/* flowlabel and prio (includes version, which shouldn't change either */
flowlabel = *((u_int32_t *)ipv6_hdr(skb));
- ret = ip6t_do_table(skb, hook, in, out,
+ ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
dev_net(out)->ipv6.ip6table_mangle);
if (ret != NF_DROP && ret != NF_STOLEN &&
@@ -132,49 +69,33 @@ ip6t_local_out_hook(unsigned int hook,
return ret;
}
-static struct nf_hook_ops ip6t_ops[] __read_mostly = {
- {
- .hook = ip6t_in_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_PRE_ROUTING,
- .priority = NF_IP6_PRI_MANGLE,
- },
- {
- .hook = ip6t_in_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP6_PRI_MANGLE,
- },
- {
- .hook = ip6t_in_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_FORWARD,
- .priority = NF_IP6_PRI_MANGLE,
- },
- {
- .hook = ip6t_local_out_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_OUT,
- .priority = NF_IP6_PRI_MANGLE,
- },
- {
- .hook = ip6t_post_routing_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_POST_ROUTING,
- .priority = NF_IP6_PRI_MANGLE,
- },
-};
+/* The work comes in here from netfilter.c. */
+static unsigned int
+ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ if (hook == NF_INET_LOCAL_OUT)
+ return ip6t_mangle_out(skb, out);
+ if (hook == NF_INET_POST_ROUTING)
+ return ip6t_do_table(skb, hook, in, out,
+ dev_net(out)->ipv6.ip6table_mangle);
+ /* INPUT/FORWARD */
+ return ip6t_do_table(skb, hook, in, out,
+ dev_net(in)->ipv6.ip6table_mangle);
+}
+static struct nf_hook_ops *mangle_ops __read_mostly;
static int __net_init ip6table_mangle_net_init(struct net *net)
{
- /* Register table */
+ struct ip6t_replace *repl;
+
+ repl = ip6t_alloc_initial_table(&packet_mangler);
+ if (repl == NULL)
+ return -ENOMEM;
net->ipv6.ip6table_mangle =
- ip6t_register_table(net, &packet_mangler, &initial_table.repl);
+ ip6t_register_table(net, &packet_mangler, repl);
+ kfree(repl);
if (IS_ERR(net->ipv6.ip6table_mangle))
return PTR_ERR(net->ipv6.ip6table_mangle);
return 0;
@@ -182,7 +103,7 @@ static int __net_init ip6table_mangle_net_init(struct net *net)
static void __net_exit ip6table_mangle_net_exit(struct net *net)
{
- ip6t_unregister_table(net->ipv6.ip6table_mangle);
+ ip6t_unregister_table(net, net->ipv6.ip6table_mangle);
}
static struct pernet_operations ip6table_mangle_net_ops = {
@@ -199,9 +120,11 @@ static int __init ip6table_mangle_init(void)
return ret;
/* Register hooks */
- ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
- if (ret < 0)
+ mangle_ops = xt_hook_link(&packet_mangler, ip6table_mangle_hook);
+ if (IS_ERR(mangle_ops)) {
+ ret = PTR_ERR(mangle_ops);
goto cleanup_table;
+ }
return ret;
@@ -212,7 +135,7 @@ static int __init ip6table_mangle_init(void)
static void __exit ip6table_mangle_fini(void)
{
- nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
+ xt_hook_unlink(&packet_mangler, mangle_ops);
unregister_pernet_subsys(&ip6table_mangle_net_ops);
}
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index ed1a1180f3b..aef31a29de9 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -8,85 +8,37 @@
#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
-static const struct
-{
- struct ip6t_replace repl;
- struct ip6t_standard entries[2];
- struct ip6t_error term;
-} initial_table __net_initdata = {
- .repl = {
- .name = "raw",
- .valid_hooks = RAW_VALID_HOOKS,
- .num_entries = 3,
- .size = sizeof(struct ip6t_standard) * 2 + sizeof(struct ip6t_error),
- .hook_entry = {
- [NF_INET_PRE_ROUTING] = 0,
- [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard)
- },
- .underflow = {
- [NF_INET_PRE_ROUTING] = 0,
- [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard)
- },
- },
- .entries = {
- IP6T_STANDARD_INIT(NF_ACCEPT), /* PRE_ROUTING */
- IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
- },
- .term = IP6T_ERROR_INIT, /* ERROR */
-};
-
static const struct xt_table packet_raw = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV6,
+ .priority = NF_IP6_PRI_FIRST,
};
/* The work comes in here from netfilter.c. */
static unsigned int
-ip6t_pre_routing_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ip6table_raw_hook(unsigned int hook, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- return ip6t_do_table(skb, hook, in, out,
- dev_net(in)->ipv6.ip6table_raw);
-}
+ const struct net *net = dev_net((in != NULL) ? in : out);
-static unsigned int
-ip6t_local_out_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- return ip6t_do_table(skb, hook, in, out,
- dev_net(out)->ipv6.ip6table_raw);
+ return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw);
}
-static struct nf_hook_ops ip6t_ops[] __read_mostly = {
- {
- .hook = ip6t_pre_routing_hook,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_PRE_ROUTING,
- .priority = NF_IP6_PRI_FIRST,
- .owner = THIS_MODULE,
- },
- {
- .hook = ip6t_local_out_hook,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_OUT,
- .priority = NF_IP6_PRI_FIRST,
- .owner = THIS_MODULE,
- },
-};
+static struct nf_hook_ops *rawtable_ops __read_mostly;
static int __net_init ip6table_raw_net_init(struct net *net)
{
- /* Register table */
+ struct ip6t_replace *repl;
+
+ repl = ip6t_alloc_initial_table(&packet_raw);
+ if (repl == NULL)
+ return -ENOMEM;
net->ipv6.ip6table_raw =
- ip6t_register_table(net, &packet_raw, &initial_table.repl);
+ ip6t_register_table(net, &packet_raw, repl);
+ kfree(repl);
if (IS_ERR(net->ipv6.ip6table_raw))
return PTR_ERR(net->ipv6.ip6table_raw);
return 0;
@@ -94,7 +46,7 @@ static int __net_init ip6table_raw_net_init(struct net *net)
static void __net_exit ip6table_raw_net_exit(struct net *net)
{
- ip6t_unregister_table(net->ipv6.ip6table_raw);
+ ip6t_unregister_table(net, net->ipv6.ip6table_raw);
}
static struct pernet_operations ip6table_raw_net_ops = {
@@ -111,9 +63,11 @@ static int __init ip6table_raw_init(void)
return ret;
/* Register hooks */
- ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
- if (ret < 0)
+ rawtable_ops = xt_hook_link(&packet_raw, ip6table_raw_hook);
+ if (IS_ERR(rawtable_ops)) {
+ ret = PTR_ERR(rawtable_ops);
goto cleanup_table;
+ }
return ret;
@@ -124,7 +78,7 @@ static int __init ip6table_raw_init(void)
static void __exit ip6table_raw_fini(void)
{
- nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
+ xt_hook_unlink(&packet_raw, rawtable_ops);
unregister_pernet_subsys(&ip6table_raw_net_ops);
}
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index 41b444c6093..0824d865aa9 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -26,106 +26,37 @@ MODULE_DESCRIPTION("ip6tables security table, for MAC rules");
(1 << NF_INET_FORWARD) | \
(1 << NF_INET_LOCAL_OUT)
-static const struct
-{
- struct ip6t_replace repl;
- struct ip6t_standard entries[3];
- struct ip6t_error term;
-} initial_table __net_initdata = {
- .repl = {
- .name = "security",
- .valid_hooks = SECURITY_VALID_HOOKS,
- .num_entries = 4,
- .size = sizeof(struct ip6t_standard) * 3 + sizeof(struct ip6t_error),
- .hook_entry = {
- [NF_INET_LOCAL_IN] = 0,
- [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
- [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
- },
- .underflow = {
- [NF_INET_LOCAL_IN] = 0,
- [NF_INET_FORWARD] = sizeof(struct ip6t_standard),
- [NF_INET_LOCAL_OUT] = sizeof(struct ip6t_standard) * 2,
- },
- },
- .entries = {
- IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_IN */
- IP6T_STANDARD_INIT(NF_ACCEPT), /* FORWARD */
- IP6T_STANDARD_INIT(NF_ACCEPT), /* LOCAL_OUT */
- },
- .term = IP6T_ERROR_INIT, /* ERROR */
-};
-
static const struct xt_table security_table = {
.name = "security",
.valid_hooks = SECURITY_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV6,
+ .priority = NF_IP6_PRI_SECURITY,
};
static unsigned int
-ip6t_local_in_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- return ip6t_do_table(skb, hook, in, out,
- dev_net(in)->ipv6.ip6table_security);
-}
-
-static unsigned int
-ip6t_forward_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+ip6table_security_hook(unsigned int hook, struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- return ip6t_do_table(skb, hook, in, out,
- dev_net(in)->ipv6.ip6table_security);
-}
+ const struct net *net = dev_net((in != NULL) ? in : out);
-static unsigned int
-ip6t_local_out_hook(unsigned int hook,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
-{
- /* TBD: handle short packets via raw socket */
- return ip6t_do_table(skb, hook, in, out,
- dev_net(out)->ipv6.ip6table_security);
+ return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_security);
}
-static struct nf_hook_ops ip6t_ops[] __read_mostly = {
- {
- .hook = ip6t_local_in_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_IN,
- .priority = NF_IP6_PRI_SECURITY,
- },
- {
- .hook = ip6t_forward_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_FORWARD,
- .priority = NF_IP6_PRI_SECURITY,
- },
- {
- .hook = ip6t_local_out_hook,
- .owner = THIS_MODULE,
- .pf = NFPROTO_IPV6,
- .hooknum = NF_INET_LOCAL_OUT,
- .priority = NF_IP6_PRI_SECURITY,
- },
-};
+static struct nf_hook_ops *sectbl_ops __read_mostly;
static int __net_init ip6table_security_net_init(struct net *net)
{
- net->ipv6.ip6table_security =
- ip6t_register_table(net, &security_table, &initial_table.repl);
+ struct ip6t_replace *repl;
+ repl = ip6t_alloc_initial_table(&security_table);
+ if (repl == NULL)
+ return -ENOMEM;
+ net->ipv6.ip6table_security =
+ ip6t_register_table(net, &security_table, repl);
+ kfree(repl);
if (IS_ERR(net->ipv6.ip6table_security))
return PTR_ERR(net->ipv6.ip6table_security);
@@ -134,7 +65,7 @@ static int __net_init ip6table_security_net_init(struct net *net)
static void __net_exit ip6table_security_net_exit(struct net *net)
{
- ip6t_unregister_table(net->ipv6.ip6table_security);
+ ip6t_unregister_table(net, net->ipv6.ip6table_security);
}
static struct pernet_operations ip6table_security_net_ops = {
@@ -150,9 +81,11 @@ static int __init ip6table_security_init(void)
if (ret < 0)
return ret;
- ret = nf_register_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
- if (ret < 0)
+ sectbl_ops = xt_hook_link(&security_table, ip6table_security_hook);
+ if (IS_ERR(sectbl_ops)) {
+ ret = PTR_ERR(sectbl_ops);
goto cleanup_table;
+ }
return ret;
@@ -163,7 +96,7 @@ cleanup_table:
static void __exit ip6table_security_fini(void)
{
- nf_unregister_hooks(ip6t_ops, ARRAY_SIZE(ip6t_ops));
+ xt_hook_unlink(&security_table, sectbl_ops);
unregister_pernet_subsys(&ip6table_security_net_ops);
}
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 0956ebabbff..996c3f41fec 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -27,6 +27,7 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
#include <net/netfilter/nf_log.h>
@@ -191,15 +192,20 @@ out:
static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
struct sk_buff *skb)
{
+ u16 zone = NF_CT_DEFAULT_ZONE;
+
+ if (skb->nfct)
+ zone = nf_ct_zone((struct nf_conn *)skb->nfct);
+
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge &&
skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
- return IP6_DEFRAG_CONNTRACK_BRIDGE_IN;
+ return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
#endif
if (hooknum == NF_INET_PRE_ROUTING)
- return IP6_DEFRAG_CONNTRACK_IN;
+ return IP6_DEFRAG_CONNTRACK_IN + zone;
else
- return IP6_DEFRAG_CONNTRACK_OUT;
+ return IP6_DEFRAG_CONNTRACK_OUT + zone;
}
@@ -212,7 +218,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
struct sk_buff *reasm;
/* Previously seen (loopback)? */
- if (skb->nfct)
+ if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
return NF_ACCEPT;
reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index c7b8bd1d798..9be81776415 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -23,6 +23,7 @@
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
#include <net/netfilter/nf_log.h>
@@ -128,7 +129,7 @@ static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
}
static int
-icmpv6_error_message(struct net *net,
+icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int icmp6off,
enum ip_conntrack_info *ctinfo,
@@ -137,6 +138,7 @@ icmpv6_error_message(struct net *net,
struct nf_conntrack_tuple intuple, origtuple;
const struct nf_conntrack_tuple_hash *h;
const struct nf_conntrack_l4proto *inproto;
+ u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
NF_CT_ASSERT(skb->nfct == NULL);
@@ -163,7 +165,7 @@ icmpv6_error_message(struct net *net,
*ctinfo = IP_CT_RELATED;
- h = nf_conntrack_find_get(net, &intuple);
+ h = nf_conntrack_find_get(net, zone, &intuple);
if (!h) {
pr_debug("icmpv6_error: no match\n");
return -NF_ACCEPT;
@@ -179,7 +181,8 @@ icmpv6_error_message(struct net *net,
}
static int
-icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
+icmpv6_error(struct net *net, struct nf_conn *tmpl,
+ struct sk_buff *skb, unsigned int dataoff,
enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
{
const struct icmp6hdr *icmp6h;
@@ -215,7 +218,7 @@ icmpv6_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
if (icmp6h->icmp6_type >= 128)
return NF_ACCEPT;
- return icmpv6_error_message(net, skb, dataoff, ctinfo, hooknum);
+ return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum);
}
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 312c20adc83..f1171b74465 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -45,9 +45,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#define NF_CT_FRAG6_HIGH_THRESH 262144 /* == 256*1024 */
-#define NF_CT_FRAG6_LOW_THRESH 196608 /* == 192*1024 */
-#define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT
struct nf_ct_frag6_skb_cb
{
@@ -63,6 +60,7 @@ struct nf_ct_frag6_queue
struct inet_frag_queue q;
__be32 id; /* fragment id */
+ u32 user;
struct in6_addr saddr;
struct in6_addr daddr;
@@ -471,7 +469,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
/* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
fp = skb_shinfo(head)->frag_list;
- if (NFCT_FRAG6_CB(fp)->orig == NULL)
+ if (fp && NFCT_FRAG6_CB(fp)->orig == NULL)
/* at above code, head skb is divided into two skbs. */
fp = fp->next;
@@ -597,12 +595,6 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
hdr = ipv6_hdr(clone);
fhdr = (struct frag_hdr *)skb_transport_header(clone);
- if (!(fhdr->frag_off & htons(0xFFF9))) {
- pr_debug("Invalid fragment offset\n");
- /* It is not a fragmented frame */
- goto ret_orig;
- }
-
if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
nf_ct_frag6_evictor();
@@ -669,8 +661,8 @@ int nf_ct_frag6_init(void)
nf_frags.frag_expire = nf_ct_frag6_expire;
nf_frags.secret_interval = 10 * 60 * HZ;
nf_init_frags.timeout = IPV6_FRAG_TIMEOUT;
- nf_init_frags.high_thresh = 256 * 1024;
- nf_init_frags.low_thresh = 192 * 1024;
+ nf_init_frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ nf_init_frags.low_thresh = IPV6_FRAG_LOW_THRESH;
inet_frags_init_net(&nf_init_frags);
inet_frags_init(&nf_frags);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index c9605c3ad91..58344c0fbd1 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -59,7 +59,7 @@ static const struct file_operations sockstat6_seq_fops = {
.release = single_release_net,
};
-static struct snmp_mib snmp6_ipstats_list[] = {
+static const struct snmp_mib snmp6_ipstats_list[] = {
/* ipv6 mib according to RFC 2465 */
SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INPKTS),
SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS),
@@ -92,7 +92,7 @@ static struct snmp_mib snmp6_ipstats_list[] = {
SNMP_MIB_SENTINEL
};
-static struct snmp_mib snmp6_icmp6_list[] = {
+static const struct snmp_mib snmp6_icmp6_list[] = {
/* icmpv6 mib according to RFC 2466 */
SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS),
SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS),
@@ -120,7 +120,7 @@ static const char *const icmp6type2name[256] = {
};
-static struct snmp_mib snmp6_udp6_list[] = {
+static const struct snmp_mib snmp6_udp6_list[] = {
SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS),
SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS),
SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS),
@@ -128,7 +128,7 @@ static struct snmp_mib snmp6_udp6_list[] = {
SNMP_MIB_SENTINEL
};
-static struct snmp_mib snmp6_udplite6_list[] = {
+static const struct snmp_mib snmp6_udplite6_list[] = {
SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS),
SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS),
SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS),
@@ -136,7 +136,7 @@ static struct snmp_mib snmp6_udplite6_list[] = {
SNMP_MIB_SENTINEL
};
-static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib)
+static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib)
{
char name[32];
int i;
@@ -170,8 +170,8 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib)
return;
}
-static inline void
-snmp6_seq_show_item(struct seq_file *seq, void **mib, struct snmp_mib *itemlist)
+static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib,
+ const struct snmp_mib *itemlist)
{
int i;
for (i=0; itemlist[i].name; i++)
@@ -183,14 +183,15 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
{
struct net *net = (struct net *)seq->private;
- snmp6_seq_show_item(seq, (void **)net->mib.ipv6_statistics,
+ snmp6_seq_show_item(seq, (void __percpu **)net->mib.ipv6_statistics,
snmp6_ipstats_list);
- snmp6_seq_show_item(seq, (void **)net->mib.icmpv6_statistics,
+ snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
snmp6_icmp6_list);
- snmp6_seq_show_icmpv6msg(seq, (void **)net->mib.icmpv6msg_statistics);
- snmp6_seq_show_item(seq, (void **)net->mib.udp_stats_in6,
+ snmp6_seq_show_icmpv6msg(seq,
+ (void __percpu **)net->mib.icmpv6msg_statistics);
+ snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6,
snmp6_udp6_list);
- snmp6_seq_show_item(seq, (void **)net->mib.udplite_stats_in6,
+ snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6,
snmp6_udplite6_list);
return 0;
}
@@ -213,9 +214,11 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
struct inet6_dev *idev = (struct inet6_dev *)seq->private;
seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
- snmp6_seq_show_item(seq, (void **)idev->stats.ipv6, snmp6_ipstats_list);
- snmp6_seq_show_item(seq, (void **)idev->stats.icmpv6, snmp6_icmp6_list);
- snmp6_seq_show_icmpv6msg(seq, (void **)idev->stats.icmpv6msg);
+ snmp6_seq_show_item(seq, (void __percpu **)idev->stats.ipv6,
+ snmp6_ipstats_list);
+ snmp6_seq_show_item(seq, (void __percpu **)idev->stats.icmpv6,
+ snmp6_icmp6_list);
+ snmp6_seq_show_icmpv6msg(seq, (void __percpu **)idev->stats.icmpv6msg);
return 0;
}
@@ -259,7 +262,7 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
struct net *net = dev_net(idev->dev);
if (!net->mib.proc_net_devsnmp6)
return -ENOENT;
- if (!idev || !idev->stats.proc_dir_entry)
+ if (!idev->stats.proc_dir_entry)
return -EINVAL;
remove_proc_entry(idev->stats.proc_dir_entry->name,
net->mib.proc_net_devsnmp6);
@@ -267,7 +270,7 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
return 0;
}
-static int ipv6_proc_init_net(struct net *net)
+static int __net_init ipv6_proc_init_net(struct net *net)
{
if (!proc_net_fops_create(net, "sockstat6", S_IRUGO,
&sockstat6_seq_fops))
@@ -288,7 +291,7 @@ proc_dev_snmp6_fail:
return -ENOMEM;
}
-static void ipv6_proc_exit_net(struct net *net)
+static void __net_exit ipv6_proc_exit_net(struct net *net)
{
proc_net_remove(net, "sockstat6");
proc_net_remove(net, "dev_snmp6");
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 926ce8eeffa..ed31c37c6e3 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1275,7 +1275,7 @@ static const struct file_operations raw6_seq_fops = {
.release = seq_release_net,
};
-static int raw6_init_net(struct net *net)
+static int __net_init raw6_init_net(struct net *net)
{
if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops))
return -ENOMEM;
@@ -1283,7 +1283,7 @@ static int raw6_init_net(struct net *net)
return 0;
}
-static void raw6_exit_net(struct net *net)
+static void __net_exit raw6_exit_net(struct net *net)
{
proc_net_remove(net, "raw6");
}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 2cddea3bd6b..a555156e977 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -228,7 +228,7 @@ static void ip6_frag_expire(unsigned long data)
pointer directly, device might already disappeared.
*/
fq->q.fragments->dev = dev;
- icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
+ icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
out_rcu_unlock:
rcu_read_unlock();
out:
@@ -237,8 +237,7 @@ out:
}
static __inline__ struct frag_queue *
-fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
- struct inet6_dev *idev)
+fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst)
{
struct inet_frag_queue *q;
struct ip6_create_arg arg;
@@ -254,13 +253,9 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
if (q == NULL)
- goto oom;
+ return NULL;
return container_of(q, struct frag_queue, q);
-
-oom:
- IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS);
- return NULL;
}
static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
@@ -606,8 +601,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
- if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
- ip6_dst_idev(skb_dst(skb)))) != NULL) {
+ fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
+ if (fq != NULL) {
int ret;
spin_lock(&fq->q.lock);
@@ -672,7 +667,7 @@ static struct ctl_table ip6_frags_ctl_table[] = {
{ }
};
-static int ip6_frags_ns_sysctl_register(struct net *net)
+static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
{
struct ctl_table *table;
struct ctl_table_header *hdr;
@@ -702,7 +697,7 @@ err_alloc:
return -ENOMEM;
}
-static void ip6_frags_ns_sysctl_unregister(struct net *net)
+static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
{
struct ctl_table *table;
@@ -745,10 +740,10 @@ static inline void ip6_frags_sysctl_unregister(void)
}
#endif
-static int ipv6_frags_init_net(struct net *net)
+static int __net_init ipv6_frags_init_net(struct net *net)
{
- net->ipv6.frags.high_thresh = 256 * 1024;
- net->ipv6.frags.low_thresh = 192 * 1024;
+ net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
inet_frags_init_net(&net->ipv6.frags);
@@ -756,7 +751,7 @@ static int ipv6_frags_init_net(struct net *net)
return ip6_frags_ns_sysctl_register(net);
}
-static void ipv6_frags_exit_net(struct net *net)
+static void __net_exit ipv6_frags_exit_net(struct net *net)
{
ip6_frags_ns_sysctl_unregister(net);
inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c2bd74c5f8d..b08879e97f2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -909,7 +909,7 @@ static void ip6_link_failure(struct sk_buff *skb)
{
struct rt6_info *rt;
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0, skb->dev);
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
rt = (struct rt6_info *) skb_dst(skb);
if (rt) {
@@ -1873,7 +1873,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
switch (ipstats_mib_noroutes) {
case IPSTATS_MIB_INNOROUTES:
type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
- if (type == IPV6_ADDR_ANY || type == IPV6_ADDR_RESERVED) {
+ if (type == IPV6_ADDR_ANY) {
IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
IPSTATS_MIB_INADDRERRORS);
break;
@@ -1884,7 +1884,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
ipstats_mib_noroutes);
break;
}
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0, skb->dev);
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
kfree_skb(skb);
return 0;
}
@@ -2612,7 +2612,7 @@ ctl_table ipv6_route_table_template[] = {
{ }
};
-struct ctl_table *ipv6_route_sysctl_init(struct net *net)
+struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
{
struct ctl_table *table;
@@ -2637,7 +2637,7 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net)
}
#endif
-static int ip6_route_net_init(struct net *net)
+static int __net_init ip6_route_net_init(struct net *net)
{
int ret = -ENOMEM;
@@ -2702,7 +2702,7 @@ out_ip6_dst_ops:
goto out;
}
-static void ip6_route_net_exit(struct net *net)
+static void __net_exit ip6_route_net_exit(struct net *net)
{
#ifdef CONFIG_PROC_FS
proc_net_remove(net, "ipv6_route");
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 976e68244b9..b1eea811be4 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -62,7 +62,6 @@
#define HASH_SIZE 16
#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
-static void ipip6_fb_tunnel_init(struct net_device *dev);
static void ipip6_tunnel_init(struct net_device *dev);
static void ipip6_tunnel_setup(struct net_device *dev);
@@ -364,7 +363,6 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
goto out;
}
- INIT_RCU_HEAD(&p->rcu_head);
p->next = t->prl;
p->addr = a->addr;
p->flags = a->flags;
@@ -745,7 +743,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
if (skb->len > mtu) {
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ip_rt_put(rt);
goto tx_error;
}
@@ -1120,7 +1118,7 @@ static void ipip6_tunnel_init(struct net_device *dev)
ipip6_tunnel_bind_dev(dev);
}
-static void ipip6_fb_tunnel_init(struct net_device *dev)
+static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
@@ -1145,7 +1143,7 @@ static struct xfrm_tunnel sit_handler = {
.priority = 1,
};
-static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
+static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
{
int prio;
@@ -1162,7 +1160,7 @@ static void sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
}
}
-static int sit_init_net(struct net *net)
+static int __net_init sit_init_net(struct net *net)
{
struct sit_net *sitn = net_generic(net, sit_net_id);
int err;
@@ -1195,7 +1193,7 @@ err_alloc_dev:
return err;
}
-static void sit_exit_net(struct net *net)
+static void __net_exit sit_exit_net(struct net *net)
{
struct sit_net *sitn = net_generic(net, sit_net_id);
LIST_HEAD(list);
@@ -1228,15 +1226,14 @@ static int __init sit_init(void)
printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n");
- if (xfrm4_tunnel_register(&sit_handler, AF_INET6) < 0) {
- printk(KERN_INFO "sit init: Can't add protocol\n");
- return -EAGAIN;
- }
-
err = register_pernet_device(&sit_net_ops);
if (err < 0)
- xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
-
+ return err;
+ err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
+ if (err < 0) {
+ unregister_pernet_device(&sit_net_ops);
+ printk(KERN_INFO "sit init: Can't add protocol\n");
+ }
return err;
}
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 7208a06576c..34d1f0690d7 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -269,7 +269,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
tcp_select_initial_window(tcp_full_space(sk), req->mss,
&req->rcv_wnd, &req->window_clamp,
- ireq->wscale_ok, &rcv_wscale);
+ ireq->wscale_ok, &rcv_wscale,
+ dst_metric(dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index c690736885b..f841d93bf98 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -55,7 +55,7 @@ struct ctl_path net_ipv6_ctl_path[] = {
};
EXPORT_SYMBOL_GPL(net_ipv6_ctl_path);
-static int ipv6_sysctl_net_init(struct net *net)
+static int __net_init ipv6_sysctl_net_init(struct net *net)
{
struct ctl_table *ipv6_table;
struct ctl_table *ipv6_route_table;
@@ -98,7 +98,7 @@ out_ipv6_table:
goto out;
}
-static void ipv6_sysctl_net_exit(struct net *net)
+static void __net_exit ipv6_sysctl_net_exit(struct net *net)
{
struct ctl_table *ipv6_table;
struct ctl_table *ipv6_route_table;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index febfd595a40..6963a6b6763 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -520,6 +520,13 @@ done:
return err;
}
+static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
+ struct request_values *rvp)
+{
+ TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
+ return tcp_v6_send_synack(sk, req, rvp);
+}
+
static inline void syn_flood_warning(struct sk_buff *skb)
{
#ifdef CONFIG_SYN_COOKIES
@@ -876,7 +883,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
if (net_ratelimit()) {
- printk(KERN_INFO "MD5 Hash %s for (%pI6, %u)->(%pI6, %u)\n",
+ printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
genhash ? "failed" : "mismatch",
&ip6h->saddr, ntohs(th->source),
&ip6h->daddr, ntohs(th->dest));
@@ -890,10 +897,11 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
.family = AF_INET6,
.obj_size = sizeof(struct tcp6_request_sock),
- .rtx_syn_ack = tcp_v6_send_synack,
+ .rtx_syn_ack = tcp_v6_rtx_synack,
.send_ack = tcp_v6_reqsk_send_ack,
.destructor = tcp_v6_reqsk_destructor,
- .send_reset = tcp_v6_send_reset
+ .send_reset = tcp_v6_send_reset,
+ .syn_ack_timeout = tcp_syn_ack_timeout,
};
#ifdef CONFIG_TCP_MD5SIG
@@ -2105,7 +2113,7 @@ static struct tcp_seq_afinfo tcp6_seq_afinfo = {
},
};
-int tcp6_proc_init(struct net *net)
+int __net_init tcp6_proc_init(struct net *net)
{
return tcp_proc_register(net, &tcp6_seq_afinfo);
}
@@ -2174,18 +2182,18 @@ static struct inet_protosw tcpv6_protosw = {
INET_PROTOSW_ICSK,
};
-static int tcpv6_net_init(struct net *net)
+static int __net_init tcpv6_net_init(struct net *net)
{
return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
SOCK_RAW, IPPROTO_TCP, net);
}
-static void tcpv6_net_exit(struct net *net)
+static void __net_exit tcpv6_net_exit(struct net *net)
{
inet_ctl_sock_destroy(net->ipv6.tcp_sk);
}
-static void tcpv6_net_exit_batch(struct list_head *net_exit_list)
+static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
{
inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
}
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 51e2832d13a..e17bc1dfc1a 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -98,7 +98,7 @@ static int tunnel6_rcv(struct sk_buff *skb)
if (!handler->handler(skb))
return 0;
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev);
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
@@ -116,7 +116,7 @@ static int tunnel46_rcv(struct sk_buff *skb)
if (!handler->handler(skb))
return 0;
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, skb->dev);
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 69ebdbe78c4..52b8347ae3b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -322,7 +322,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
- unsigned int ulen, copied;
+ unsigned int ulen;
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
@@ -341,10 +341,9 @@ try_again:
goto out;
ulen = skb->len - sizeof(struct udphdr);
- copied = len;
- if (copied > ulen)
- copied = ulen;
- else if (copied < ulen)
+ if (len > ulen)
+ len = ulen;
+ else if (len < ulen)
msg->msg_flags |= MSG_TRUNC;
is_udp4 = (skb->protocol == htons(ETH_P_IP));
@@ -355,14 +354,14 @@ try_again:
* coverage checksum (UDP-Lite), do it before the copy.
*/
- if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
+ if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov, copied );
+ msg->msg_iov,len);
else {
err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
if (err == -EINVAL)
@@ -411,7 +410,7 @@ try_again:
datagram_recv_ctl(sk, msg, skb);
}
- err = copied;
+ err = len;
if (flags & MSG_TRUNC)
err = ulen;
@@ -681,12 +680,11 @@ static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh,
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
+ struct net *net = dev_net(skb->dev);
struct sock *sk;
struct udphdr *uh;
- struct net_device *dev = skb->dev;
struct in6_addr *saddr, *daddr;
u32 ulen = 0;
- struct net *net = dev_net(skb->dev);
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto short_packet;
@@ -745,7 +743,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
proto == IPPROTO_UDPLITE);
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
kfree_skb(skb);
return 0;
@@ -1396,7 +1394,7 @@ static struct udp_seq_afinfo udp6_seq_afinfo = {
},
};
-int udp6_proc_init(struct net *net)
+int __net_init udp6_proc_init(struct net *net)
{
return udp_proc_register(net, &udp6_seq_afinfo);
}
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 6ea6938919e..5f48fadc27f 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -104,12 +104,12 @@ static struct udp_seq_afinfo udplite6_seq_afinfo = {
},
};
-static int udplite6_proc_init_net(struct net *net)
+static int __net_init udplite6_proc_init_net(struct net *net)
{
return udp_proc_register(net, &udplite6_seq_afinfo);
}
-static void udplite6_proc_exit_net(struct net *net)
+static void __net_exit udplite6_proc_exit_net(struct net *net)
{
udp_proc_unregister(net, &udplite6_seq_afinfo);
}
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 9084582d236..2bc98ede123 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -101,7 +101,7 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
break;
}
- x = xfrm_state_lookup_byaddr(net, dst, src, proto, AF_INET6);
+ x = xfrm_state_lookup_byaddr(net, skb->mark, dst, src, proto, AF_INET6);
if (!x)
continue;
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index c4f4eef032a..0c92112dcba 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -38,7 +38,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
if (!skb->local_df && skb->len > mtu) {
skb->dev = dst->dev;
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ret = -EMSGSIZE;
}
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 438831d3359..fa85a7d22dc 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -30,6 +30,25 @@
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/mutex.h>
+#include <net/netns/generic.h>
+
+#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
+#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
+
+#define XFRM6_TUNNEL_SPI_MIN 1
+#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
+
+struct xfrm6_tunnel_net {
+ struct hlist_head spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
+ struct hlist_head spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
+ u32 spi;
+};
+
+static int xfrm6_tunnel_net_id __read_mostly;
+static inline struct xfrm6_tunnel_net *xfrm6_tunnel_pernet(struct net *net)
+{
+ return net_generic(net, xfrm6_tunnel_net_id);
+}
/*
* xfrm_tunnel_spi things are for allocating unique id ("spi")
@@ -46,19 +65,8 @@ struct xfrm6_tunnel_spi {
static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
-static u32 xfrm6_tunnel_spi;
-
-#define XFRM6_TUNNEL_SPI_MIN 1
-#define XFRM6_TUNNEL_SPI_MAX 0xffffffff
-
static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
-#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
-#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
-
-static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE];
-static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE];
-
static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr)
{
unsigned h;
@@ -76,50 +84,14 @@ static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
}
-
-static int xfrm6_tunnel_spi_init(void)
-{
- int i;
-
- xfrm6_tunnel_spi = 0;
- xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
- sizeof(struct xfrm6_tunnel_spi),
- 0, SLAB_HWCACHE_ALIGN,
- NULL);
- if (!xfrm6_tunnel_spi_kmem)
- return -ENOMEM;
-
- for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
- INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
- for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
- INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi[i]);
- return 0;
-}
-
-static void xfrm6_tunnel_spi_fini(void)
-{
- int i;
-
- for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
- if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
- return;
- }
- for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
- if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
- return;
- }
- rcu_barrier();
- kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
- xfrm6_tunnel_spi_kmem = NULL;
-}
-
-static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
+static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr)
{
+ struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
struct xfrm6_tunnel_spi *x6spi;
struct hlist_node *pos;
hlist_for_each_entry_rcu(x6spi, pos,
- &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
+ &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
list_byaddr) {
if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0)
return x6spi;
@@ -128,13 +100,13 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
return NULL;
}
-__be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
+__be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr)
{
struct xfrm6_tunnel_spi *x6spi;
u32 spi;
rcu_read_lock_bh();
- x6spi = __xfrm6_tunnel_spi_lookup(saddr);
+ x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
spi = x6spi ? x6spi->spi : 0;
rcu_read_unlock_bh();
return htonl(spi);
@@ -142,14 +114,15 @@ __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
-static int __xfrm6_tunnel_spi_check(u32 spi)
+static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
{
+ struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
struct xfrm6_tunnel_spi *x6spi;
int index = xfrm6_tunnel_spi_hash_byspi(spi);
struct hlist_node *pos;
hlist_for_each_entry(x6spi, pos,
- &xfrm6_tunnel_spi_byspi[index],
+ &xfrm6_tn->spi_byspi[index],
list_byspi) {
if (x6spi->spi == spi)
return -1;
@@ -157,61 +130,61 @@ static int __xfrm6_tunnel_spi_check(u32 spi)
return index;
}
-static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
+static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
{
+ struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
u32 spi;
struct xfrm6_tunnel_spi *x6spi;
int index;
- if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN ||
- xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX)
- xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN;
+ if (xfrm6_tn->spi < XFRM6_TUNNEL_SPI_MIN ||
+ xfrm6_tn->spi >= XFRM6_TUNNEL_SPI_MAX)
+ xfrm6_tn->spi = XFRM6_TUNNEL_SPI_MIN;
else
- xfrm6_tunnel_spi++;
+ xfrm6_tn->spi++;
- for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
- index = __xfrm6_tunnel_spi_check(spi);
+ for (spi = xfrm6_tn->spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) {
+ index = __xfrm6_tunnel_spi_check(net, spi);
if (index >= 0)
goto alloc_spi;
}
- for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) {
- index = __xfrm6_tunnel_spi_check(spi);
+ for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
+ index = __xfrm6_tunnel_spi_check(net, spi);
if (index >= 0)
goto alloc_spi;
}
spi = 0;
goto out;
alloc_spi:
- xfrm6_tunnel_spi = spi;
+ xfrm6_tn->spi = spi;
x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, GFP_ATOMIC);
if (!x6spi)
goto out;
- INIT_RCU_HEAD(&x6spi->rcu_head);
memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr));
x6spi->spi = spi;
atomic_set(&x6spi->refcnt, 1);
- hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]);
+ hlist_add_head_rcu(&x6spi->list_byspi, &xfrm6_tn->spi_byspi[index]);
index = xfrm6_tunnel_spi_hash_byaddr(saddr);
- hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]);
+ hlist_add_head_rcu(&x6spi->list_byaddr, &xfrm6_tn->spi_byaddr[index]);
out:
return spi;
}
-__be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
+__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
{
struct xfrm6_tunnel_spi *x6spi;
u32 spi;
spin_lock_bh(&xfrm6_tunnel_spi_lock);
- x6spi = __xfrm6_tunnel_spi_lookup(saddr);
+ x6spi = __xfrm6_tunnel_spi_lookup(net, saddr);
if (x6spi) {
atomic_inc(&x6spi->refcnt);
spi = x6spi->spi;
} else
- spi = __xfrm6_tunnel_alloc_spi(saddr);
+ spi = __xfrm6_tunnel_alloc_spi(net, saddr);
spin_unlock_bh(&xfrm6_tunnel_spi_lock);
return htonl(spi);
@@ -225,15 +198,16 @@ static void x6spi_destroy_rcu(struct rcu_head *head)
container_of(head, struct xfrm6_tunnel_spi, rcu_head));
}
-void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
+void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr)
{
+ struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
struct xfrm6_tunnel_spi *x6spi;
struct hlist_node *pos, *n;
spin_lock_bh(&xfrm6_tunnel_spi_lock);
hlist_for_each_entry_safe(x6spi, pos, n,
- &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
+ &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
list_byaddr)
{
if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
@@ -263,10 +237,11 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
static int xfrm6_tunnel_rcv(struct sk_buff *skb)
{
+ struct net *net = dev_net(skb->dev);
struct ipv6hdr *iph = ipv6_hdr(skb);
__be32 spi;
- spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
+ spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&iph->saddr);
return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
}
@@ -326,7 +301,9 @@ static int xfrm6_tunnel_init_state(struct xfrm_state *x)
static void xfrm6_tunnel_destroy(struct xfrm_state *x)
{
- xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr);
+ struct net *net = xs_net(x);
+
+ xfrm6_tunnel_free_spi(net, (xfrm_address_t *)&x->props.saddr);
}
static const struct xfrm_type xfrm6_tunnel_type = {
@@ -351,34 +328,73 @@ static struct xfrm6_tunnel xfrm46_tunnel_handler = {
.priority = 2,
};
+static int __net_init xfrm6_tunnel_net_init(struct net *net)
+{
+ struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
+ unsigned int i;
+
+ for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
+ INIT_HLIST_HEAD(&xfrm6_tn->spi_byaddr[i]);
+ for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
+ INIT_HLIST_HEAD(&xfrm6_tn->spi_byspi[i]);
+ xfrm6_tn->spi = 0;
+
+ return 0;
+}
+
+static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
+{
+}
+
+static struct pernet_operations xfrm6_tunnel_net_ops = {
+ .init = xfrm6_tunnel_net_init,
+ .exit = xfrm6_tunnel_net_exit,
+ .id = &xfrm6_tunnel_net_id,
+ .size = sizeof(struct xfrm6_tunnel_net),
+};
+
static int __init xfrm6_tunnel_init(void)
{
- if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0)
- goto err;
- if (xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6))
- goto unreg;
- if (xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET))
- goto dereg6;
- if (xfrm6_tunnel_spi_init() < 0)
- goto dereg46;
+ int rv;
+
+ xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
+ sizeof(struct xfrm6_tunnel_spi),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!xfrm6_tunnel_spi_kmem)
+ return -ENOMEM;
+ rv = register_pernet_subsys(&xfrm6_tunnel_net_ops);
+ if (rv < 0)
+ goto out_pernet;
+ rv = xfrm_register_type(&xfrm6_tunnel_type, AF_INET6);
+ if (rv < 0)
+ goto out_type;
+ rv = xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6);
+ if (rv < 0)
+ goto out_xfrm6;
+ rv = xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET);
+ if (rv < 0)
+ goto out_xfrm46;
return 0;
-dereg46:
- xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
-dereg6:
+out_xfrm46:
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
-unreg:
+out_xfrm6:
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
-err:
- return -EAGAIN;
+out_type:
+ unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
+out_pernet:
+ kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
+ return rv;
}
static void __exit xfrm6_tunnel_fini(void)
{
- xfrm6_tunnel_spi_fini();
xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
+ unregister_pernet_subsys(&xfrm6_tunnel_net_ops);
+ kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
}
module_init(xfrm6_tunnel_init);
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 576178482f8..26b5bfcf1d0 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -13,45 +13,15 @@
#include <net/tcp_states.h>
#include <net/ipx.h>
-static __inline__ struct ipx_interface *ipx_get_interface_idx(loff_t pos)
-{
- struct ipx_interface *i;
-
- list_for_each_entry(i, &ipx_interfaces, node)
- if (!pos--)
- goto out;
- i = NULL;
-out:
- return i;
-}
-
-static struct ipx_interface *ipx_interfaces_next(struct ipx_interface *i)
-{
- struct ipx_interface *rc = NULL;
-
- if (i->node.next != &ipx_interfaces)
- rc = list_entry(i->node.next, struct ipx_interface, node);
- return rc;
-}
-
static void *ipx_seq_interface_start(struct seq_file *seq, loff_t *pos)
{
- loff_t l = *pos;
-
spin_lock_bh(&ipx_interfaces_lock);
- return l ? ipx_get_interface_idx(--l) : SEQ_START_TOKEN;
+ return seq_list_start_head(&ipx_interfaces, *pos);
}
static void *ipx_seq_interface_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct ipx_interface *i;
-
- ++*pos;
- if (v == SEQ_START_TOKEN)
- i = ipx_interfaces_head();
- else
- i = ipx_interfaces_next(v);
- return i;
+ return seq_list_next(v, &ipx_interfaces, pos);
}
static void ipx_seq_interface_stop(struct seq_file *seq, void *v)
@@ -63,7 +33,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v)
{
struct ipx_interface *i;
- if (v == SEQ_START_TOKEN) {
+ if (v == &ipx_interfaces) {
seq_puts(seq, "Network Node_Address Primary Device "
"Frame_Type");
#ifdef IPX_REFCNT_DEBUG
@@ -73,7 +43,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v)
goto out;
}
- i = v;
+ i = list_entry(v, struct ipx_interface, node);
seq_printf(seq, "%08lX ", (unsigned long int)ntohl(i->if_netnum));
seq_printf(seq, "%02X%02X%02X%02X%02X%02X ",
i->if_node[0], i->if_node[1], i->if_node[2],
@@ -89,53 +59,15 @@ out:
return 0;
}
-static struct ipx_route *ipx_routes_head(void)
-{
- struct ipx_route *rc = NULL;
-
- if (!list_empty(&ipx_routes))
- rc = list_entry(ipx_routes.next, struct ipx_route, node);
- return rc;
-}
-
-static struct ipx_route *ipx_routes_next(struct ipx_route *r)
-{
- struct ipx_route *rc = NULL;
-
- if (r->node.next != &ipx_routes)
- rc = list_entry(r->node.next, struct ipx_route, node);
- return rc;
-}
-
-static __inline__ struct ipx_route *ipx_get_route_idx(loff_t pos)
-{
- struct ipx_route *r;
-
- list_for_each_entry(r, &ipx_routes, node)
- if (!pos--)
- goto out;
- r = NULL;
-out:
- return r;
-}
-
static void *ipx_seq_route_start(struct seq_file *seq, loff_t *pos)
{
- loff_t l = *pos;
read_lock_bh(&ipx_routes_lock);
- return l ? ipx_get_route_idx(--l) : SEQ_START_TOKEN;
+ return seq_list_start_head(&ipx_routes, *pos);
}
static void *ipx_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct ipx_route *r;
-
- ++*pos;
- if (v == SEQ_START_TOKEN)
- r = ipx_routes_head();
- else
- r = ipx_routes_next(v);
- return r;
+ return seq_list_next(v, &ipx_routes, pos);
}
static void ipx_seq_route_stop(struct seq_file *seq, void *v)
@@ -147,11 +79,13 @@ static int ipx_seq_route_show(struct seq_file *seq, void *v)
{
struct ipx_route *rt;
- if (v == SEQ_START_TOKEN) {
+ if (v == &ipx_routes) {
seq_puts(seq, "Network Router_Net Router_Node\n");
goto out;
}
- rt = v;
+
+ rt = list_entry(v, struct ipx_route, node);
+
seq_printf(seq, "%08lX ", (unsigned long int)ntohl(rt->ir_net));
if (rt->ir_routed)
seq_printf(seq, "%08lX %02X%02X%02X%02X%02X%02X\n",
@@ -226,9 +160,9 @@ static void *ipx_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
spin_unlock_bh(&i->if_sklist_lock);
sk = NULL;
for (;;) {
- i = ipx_interfaces_next(i);
- if (!i)
+ if (i->node.next == &ipx_interfaces)
break;
+ i = list_entry(i->node.next, struct ipx_interface, node);
spin_lock_bh(&i->if_sklist_lock);
if (!hlist_empty(&i->if_sklist)) {
sk = sk_head(&i->if_sklist);
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 811984d9324..8b85d774e47 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -496,9 +496,6 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
IRDA_DEBUG(0, "%s()\n", __func__ );
- if (!tty)
- return;
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -1007,9 +1004,6 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
- if (!tty)
- return;
-
/* ircomm_tty_flush_buffer(tty); */
ircomm_tty_shutdown(self);
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 315ead3cb92..e486dc89ea5 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -1128,34 +1128,14 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
*/
static void *irlan_seq_start(struct seq_file *seq, loff_t *pos)
{
- int i = 1;
- struct irlan_cb *self;
-
rcu_read_lock();
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- list_for_each_entry(self, &irlans, dev_list) {
- if (*pos == i)
- return self;
- ++i;
- }
- return NULL;
+ return seq_list_start_head(&irlans, *pos);
}
/* Return entry after v, and increment pos */
static void *irlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct list_head *nxt;
-
- ++*pos;
- if (v == SEQ_START_TOKEN)
- nxt = irlans.next;
- else
- nxt = ((struct irlan_cb *)v)->dev_list.next;
-
- return (nxt == &irlans) ? NULL
- : list_entry(nxt, struct irlan_cb, dev_list);
+ return seq_list_next(v, &irlans, pos);
}
/* End of reading /proc file */
@@ -1170,10 +1150,10 @@ static void irlan_seq_stop(struct seq_file *seq, void *v)
*/
static int irlan_seq_show(struct seq_file *seq, void *v)
{
- if (v == SEQ_START_TOKEN)
+ if (v == &irlans)
seq_puts(seq, "IrLAN instances:\n");
else {
- struct irlan_cb *self = v;
+ struct irlan_cb *self = list_entry(v, struct irlan_cb, dev_list);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index d340110f5c0..9616c32d107 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -321,14 +321,15 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
/* Enable promiscuous mode */
IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n");
}
- else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) {
+ else if ((dev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(dev) > HW_MAX_ADDRS) {
/* Disable promiscuous mode, use normal mode. */
IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
/* hardware_set_filter(NULL); */
irlan_set_multicast_filter(self, TRUE);
}
- else if (dev->mc_count) {
+ else if (!netdev_mc_empty(dev)) {
IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
/* Walk the address list, and load the filter */
/* hardware_set_filter(dev->mc_list); */
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 156020d138b..6b3602de359 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -698,15 +698,18 @@ dev_irnet_ioctl(
/* Query PPP channel and unit number */
case PPPIOCGCHAN:
+ lock_kernel();
if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan),
(int __user *)argp))
err = 0;
+ unlock_kernel();
break;
case PPPIOCGUNIT:
lock_kernel();
if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan),
(int __user *)argp))
- err = 0;
+ err = 0;
+ unlock_kernel();
break;
/* All these ioctls can be passed both directly and from ppp_generic,
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
index 476b307bd80..69b5b75f543 100644
--- a/net/irda/irnetlink.c
+++ b/net/irda/irnetlink.c
@@ -124,7 +124,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
return ret;
}
-static struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = {
+static const struct nla_policy irda_nl_policy[IRDA_NL_ATTR_MAX + 1] = {
[IRDA_NL_ATTR_IFNAME] = { .type = NLA_NUL_STRING,
.len = IFNAMSIZ-1 },
[IRDA_NL_ATTR_MODE] = { .type = NLA_U32 },
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 76fa6fef647..36870788264 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -41,10 +41,10 @@ struct netns_pfkey {
struct hlist_head table;
atomic_t socks_nr;
};
-static DECLARE_WAIT_QUEUE_HEAD(pfkey_table_wait);
-static DEFINE_RWLOCK(pfkey_table_lock);
-static atomic_t pfkey_table_users = ATOMIC_INIT(0);
+static DEFINE_MUTEX(pfkey_mutex);
+#define DUMMY_MARK 0
+static struct xfrm_mark dummy_mark = {0, 0};
struct pfkey_sock {
/* struct sock must be the first member of struct pfkey_sock */
struct sock sk;
@@ -108,50 +108,6 @@ static void pfkey_sock_destruct(struct sock *sk)
atomic_dec(&net_pfkey->socks_nr);
}
-static void pfkey_table_grab(void)
-{
- write_lock_bh(&pfkey_table_lock);
-
- if (atomic_read(&pfkey_table_users)) {
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue_exclusive(&pfkey_table_wait, &wait);
- for(;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (atomic_read(&pfkey_table_users) == 0)
- break;
- write_unlock_bh(&pfkey_table_lock);
- schedule();
- write_lock_bh(&pfkey_table_lock);
- }
-
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&pfkey_table_wait, &wait);
- }
-}
-
-static __inline__ void pfkey_table_ungrab(void)
-{
- write_unlock_bh(&pfkey_table_lock);
- wake_up(&pfkey_table_wait);
-}
-
-static __inline__ void pfkey_lock_table(void)
-{
- /* read_lock() synchronizes us to pfkey_table_grab */
-
- read_lock(&pfkey_table_lock);
- atomic_inc(&pfkey_table_users);
- read_unlock(&pfkey_table_lock);
-}
-
-static __inline__ void pfkey_unlock_table(void)
-{
- if (atomic_dec_and_test(&pfkey_table_users))
- wake_up(&pfkey_table_wait);
-}
-
-
static const struct proto_ops pfkey_ops;
static void pfkey_insert(struct sock *sk)
@@ -159,16 +115,16 @@ static void pfkey_insert(struct sock *sk)
struct net *net = sock_net(sk);
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
- pfkey_table_grab();
- sk_add_node(sk, &net_pfkey->table);
- pfkey_table_ungrab();
+ mutex_lock(&pfkey_mutex);
+ sk_add_node_rcu(sk, &net_pfkey->table);
+ mutex_unlock(&pfkey_mutex);
}
static void pfkey_remove(struct sock *sk)
{
- pfkey_table_grab();
- sk_del_node_init(sk);
- pfkey_table_ungrab();
+ mutex_lock(&pfkey_mutex);
+ sk_del_node_init_rcu(sk);
+ mutex_unlock(&pfkey_mutex);
}
static struct proto key_proto = {
@@ -223,6 +179,8 @@ static int pfkey_release(struct socket *sock)
sock_orphan(sk);
sock->sk = NULL;
skb_queue_purge(&sk->sk_write_queue);
+
+ synchronize_rcu();
sock_put(sk);
return 0;
@@ -277,8 +235,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
if (!skb)
return -ENOMEM;
- pfkey_lock_table();
- sk_for_each(sk, node, &net_pfkey->table) {
+ rcu_read_lock();
+ sk_for_each_rcu(sk, node, &net_pfkey->table) {
struct pfkey_sock *pfk = pfkey_sk(sk);
int err2;
@@ -309,7 +267,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
if ((broadcast_flags & BROADCAST_REGISTERED) && err)
err = err2;
}
- pfkey_unlock_table();
+ rcu_read_unlock();
if (one_sk != NULL)
err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
@@ -691,7 +649,7 @@ static struct xfrm_state *pfkey_xfrm_state_lookup(struct net *net, struct sadb_
if (!xaddr)
return NULL;
- return xfrm_state_lookup(net, xaddr, sa->sadb_sa_spi, proto, family);
+ return xfrm_state_lookup(net, DUMMY_MARK, xaddr, sa->sadb_sa_spi, proto, family);
}
#define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1)))
@@ -1360,7 +1318,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
}
if (hdr->sadb_msg_seq) {
- x = xfrm_find_acq_byseq(net, hdr->sadb_msg_seq);
+ x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
if (x && xfrm_addr_cmp(&x->id.daddr, xdaddr, family)) {
xfrm_state_put(x);
x = NULL;
@@ -1368,7 +1326,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
}
if (!x)
- x = xfrm_find_acq(net, mode, reqid, proto, xdaddr, xsaddr, 1, family);
+ x = xfrm_find_acq(net, &dummy_mark, mode, reqid, proto, xdaddr, xsaddr, 1, family);
if (x == NULL)
return -ENOENT;
@@ -1417,7 +1375,7 @@ static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, struct sadb_msg *
if (hdr->sadb_msg_seq == 0 || hdr->sadb_msg_errno == 0)
return 0;
- x = xfrm_find_acq_byseq(net, hdr->sadb_msg_seq);
+ x = xfrm_find_acq_byseq(net, DUMMY_MARK, hdr->sadb_msg_seq);
if (x == NULL)
return 0;
@@ -1712,6 +1670,23 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, struct sadb_msg
return 0;
}
+static int unicast_flush_resp(struct sock *sk, struct sadb_msg *ihdr)
+{
+ struct sk_buff *skb;
+ struct sadb_msg *hdr;
+
+ skb = alloc_skb(sizeof(struct sadb_msg) + 16, GFP_ATOMIC);
+ if (!skb)
+ return -ENOBUFS;
+
+ hdr = (struct sadb_msg *) skb_put(skb, sizeof(struct sadb_msg));
+ memcpy(hdr, ihdr, sizeof(struct sadb_msg));
+ hdr->sadb_msg_errno = (uint8_t) 0;
+ hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+
+ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
+}
+
static int key_notify_sa_flush(struct km_event *c)
{
struct sk_buff *skb;
@@ -1740,7 +1715,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd
unsigned proto;
struct km_event c;
struct xfrm_audit audit_info;
- int err;
+ int err, err2;
proto = pfkey_satype2proto(hdr->sadb_msg_satype);
if (proto == 0)
@@ -1750,8 +1725,13 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd
audit_info.sessionid = audit_get_sessionid(current);
audit_info.secid = 0;
err = xfrm_state_flush(net, proto, &audit_info);
- if (err)
- return err;
+ err2 = unicast_flush_resp(sk, hdr);
+ if (err || err2) {
+ if (err == -ESRCH) /* empty table - go quietly */
+ err = 0;
+ return err ? err : err2;
+ }
+
c.data.proto = proto;
c.seq = hdr->sadb_msg_seq;
c.pid = hdr->sadb_msg_pid;
@@ -2346,7 +2326,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
return err;
}
- xp = xfrm_policy_bysel_ctx(net, XFRM_POLICY_TYPE_MAIN,
+ xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, XFRM_POLICY_TYPE_MAIN,
pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
1, &err);
security_xfrm_policy_free(pol_ctx);
@@ -2594,8 +2574,8 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
return -EINVAL;
delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2);
- xp = xfrm_policy_byid(net, XFRM_POLICY_TYPE_MAIN, dir,
- pol->sadb_x_policy_id, delete, &err);
+ xp = xfrm_policy_byid(net, DUMMY_MARK, XFRM_POLICY_TYPE_MAIN,
+ dir, pol->sadb_x_policy_id, delete, &err);
if (xp == NULL)
return -ENOENT;
@@ -2706,14 +2686,19 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg
struct net *net = sock_net(sk);
struct km_event c;
struct xfrm_audit audit_info;
- int err;
+ int err, err2;
audit_info.loginuid = audit_get_loginuid(current);
audit_info.sessionid = audit_get_sessionid(current);
audit_info.secid = 0;
err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
- if (err)
+ err2 = unicast_flush_resp(sk, hdr);
+ if (err || err2) {
+ if (err == -ESRCH) /* empty table - old silent behavior */
+ return 0;
return err;
+ }
+
c.data.type = XFRM_POLICY_TYPE_MAIN;
c.event = XFRM_MSG_FLUSHPOLICY;
c.pid = hdr->sadb_msg_pid;
@@ -3019,12 +3004,11 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, struct km_e
static u32 get_acqseq(void)
{
u32 res;
- static u32 acqseq;
- static DEFINE_SPINLOCK(acqseq_lock);
+ static atomic_t acqseq;
- spin_lock_bh(&acqseq_lock);
- res = (++acqseq ? : ++acqseq);
- spin_unlock_bh(&acqseq_lock);
+ do {
+ res = atomic_inc_return(&acqseq);
+ } while (!res);
return res;
}
@@ -3655,9 +3639,8 @@ static const struct net_proto_family pfkey_family_ops = {
#ifdef CONFIG_PROC_FS
static int pfkey_seq_show(struct seq_file *f, void *v)
{
- struct sock *s;
+ struct sock *s = sk_entry(v);
- s = (struct sock *)v;
if (v == SEQ_START_TOKEN)
seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
else
@@ -3676,19 +3659,9 @@ static void *pfkey_seq_start(struct seq_file *f, loff_t *ppos)
{
struct net *net = seq_file_net(f);
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
- struct sock *s;
- struct hlist_node *node;
- loff_t pos = *ppos;
- read_lock(&pfkey_table_lock);
- if (pos == 0)
- return SEQ_START_TOKEN;
-
- sk_for_each(s, node, &net_pfkey->table)
- if (pos-- == 1)
- return s;
-
- return NULL;
+ rcu_read_lock();
+ return seq_hlist_start_head_rcu(&net_pfkey->table, *ppos);
}
static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
@@ -3696,15 +3669,12 @@ static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
struct net *net = seq_file_net(f);
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
- ++*ppos;
- return (v == SEQ_START_TOKEN) ?
- sk_head(&net_pfkey->table) :
- sk_next((struct sock *)v);
+ return seq_hlist_next_rcu(v, &net_pfkey->table, ppos);
}
static void pfkey_seq_stop(struct seq_file *f, void *v)
{
- read_unlock(&pfkey_table_lock);
+ rcu_read_unlock();
}
static const struct seq_operations pfkey_seq_ops = {
@@ -3738,17 +3708,17 @@ static int __net_init pfkey_init_proc(struct net *net)
return 0;
}
-static void pfkey_exit_proc(struct net *net)
+static void __net_exit pfkey_exit_proc(struct net *net)
{
proc_net_remove(net, "pfkey");
}
#else
-static int __net_init pfkey_init_proc(struct net *net)
+static inline int pfkey_init_proc(struct net *net)
{
return 0;
}
-static void pfkey_exit_proc(struct net *net)
+static inline void pfkey_exit_proc(struct net *net)
{
}
#endif
@@ -3794,9 +3764,9 @@ static struct pernet_operations pfkey_net_ops = {
static void __exit ipsec_pfkey_exit(void)
{
- unregister_pernet_subsys(&pfkey_net_ops);
xfrm_unregister_km(&pfkeyv2_mgr);
sock_unregister(PF_KEY);
+ unregister_pernet_subsys(&pfkey_net_ops);
proto_unregister(&key_proto);
}
@@ -3807,21 +3777,22 @@ static int __init ipsec_pfkey_init(void)
if (err != 0)
goto out;
- err = sock_register(&pfkey_family_ops);
+ err = register_pernet_subsys(&pfkey_net_ops);
if (err != 0)
goto out_unregister_key_proto;
+ err = sock_register(&pfkey_family_ops);
+ if (err != 0)
+ goto out_unregister_pernet;
err = xfrm_register_km(&pfkeyv2_mgr);
if (err != 0)
goto out_sock_unregister;
- err = register_pernet_subsys(&pfkey_net_ops);
- if (err != 0)
- goto out_xfrm_unregister_km;
out:
return err;
-out_xfrm_unregister_km:
- xfrm_unregister_km(&pfkeyv2_mgr);
+
out_sock_unregister:
sock_unregister(PF_KEY);
+out_unregister_pernet:
+ unregister_pernet_subsys(&pfkey_net_ops);
out_unregister_key_proto:
proto_unregister(&key_proto);
goto out;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 3a66546cad0..e35d907fba2 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -47,6 +47,10 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout);
#define dprintk(args...)
#endif
+/* Maybe we'll add some more in the future. */
+#define LLC_CMSG_PKTINFO 1
+
+
/**
* llc_ui_next_link_no - return the next unused link number for a sap
* @sap: Address of sap to get link number from.
@@ -136,6 +140,7 @@ static struct proto llc_proto = {
.name = "LLC",
.owner = THIS_MODULE,
.obj_size = sizeof(struct llc_sock),
+ .slab_flags = SLAB_DESTROY_BY_RCU,
};
/**
@@ -192,10 +197,8 @@ static int llc_ui_release(struct socket *sock)
llc->laddr.lsap, llc->daddr.lsap);
if (!llc_send_disc(sk))
llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
- if (!sock_flag(sk, SOCK_ZAPPED)) {
- llc_sap_put(llc->sap);
+ if (!sock_flag(sk, SOCK_ZAPPED))
llc_sap_remove_socket(llc->sap, sk);
- }
release_sock(sk);
if (llc->dev)
dev_put(llc->dev);
@@ -255,7 +258,14 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
if (!sock_flag(sk, SOCK_ZAPPED))
goto out;
rc = -ENODEV;
- llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd);
+ if (sk->sk_bound_dev_if) {
+ llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
+ if (llc->dev && addr->sllc_arphrd != llc->dev->type) {
+ dev_put(llc->dev);
+ llc->dev = NULL;
+ }
+ } else
+ llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd);
if (!llc->dev)
goto out;
rc = -EUSERS;
@@ -306,7 +316,25 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
goto out;
rc = -ENODEV;
rtnl_lock();
- llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd, addr->sllc_mac);
+ if (sk->sk_bound_dev_if) {
+ llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
+ if (llc->dev) {
+ if (!addr->sllc_arphrd)
+ addr->sllc_arphrd = llc->dev->type;
+ if (llc_mac_null(addr->sllc_mac))
+ memcpy(addr->sllc_mac, llc->dev->dev_addr,
+ IFHWADDRLEN);
+ if (addr->sllc_arphrd != llc->dev->type ||
+ !llc_mac_match(addr->sllc_mac,
+ llc->dev->dev_addr)) {
+ rc = -EINVAL;
+ dev_put(llc->dev);
+ llc->dev = NULL;
+ }
+ }
+ } else
+ llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd,
+ addr->sllc_mac);
rtnl_unlock();
if (!llc->dev)
goto out;
@@ -322,7 +350,6 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
rc = -EBUSY; /* some other network layer is using the sap */
if (!sap)
goto out;
- llc_sap_hold(sap);
} else {
struct llc_addr laddr, daddr;
struct sock *ask;
@@ -591,6 +618,20 @@ static int llc_wait_data(struct sock *sk, long timeo)
return rc;
}
+static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
+{
+ struct llc_sock *llc = llc_sk(skb->sk);
+
+ if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
+ struct llc_pktinfo info;
+
+ info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
+ llc_pdu_decode_dsap(skb, &info.lpi_sap);
+ llc_pdu_decode_da(skb, info.lpi_mac);
+ put_cmsg(msg, SOL_LLC, LLC_OPT_PKTINFO, sizeof(info), &info);
+ }
+}
+
/**
* llc_ui_accept - accept a new incoming connection.
* @sock: Socket which connections arrive on.
@@ -812,6 +853,8 @@ copy_uaddr:
memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
msg->msg_namelen = sizeof(*uaddr);
}
+ if (llc_sk(sk)->cmsg_flags)
+ llc_cmsg_rcv(msg, skb);
goto out;
}
@@ -1030,6 +1073,12 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
goto out;
llc->rw = opt;
break;
+ case LLC_OPT_PKTINFO:
+ if (opt)
+ llc->cmsg_flags |= LLC_CMSG_PKTINFO;
+ else
+ llc->cmsg_flags &= ~LLC_CMSG_PKTINFO;
+ break;
default:
rc = -ENOPROTOOPT;
goto out;
@@ -1083,6 +1132,9 @@ static int llc_ui_getsockopt(struct socket *sock, int level, int optname,
val = llc->k; break;
case LLC_OPT_RX_WIN:
val = llc->rw; break;
+ case LLC_OPT_PKTINFO:
+ val = (llc->cmsg_flags & LLC_CMSG_PKTINFO) != 0;
+ break;
default:
rc = -ENOPROTOOPT;
goto out;
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index c6bab39b018..a8dde9b010d 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -468,6 +468,19 @@ static int llc_exec_conn_trans_actions(struct sock *sk,
return rc;
}
+static inline bool llc_estab_match(const struct llc_sap *sap,
+ const struct llc_addr *daddr,
+ const struct llc_addr *laddr,
+ const struct sock *sk)
+{
+ struct llc_sock *llc = llc_sk(sk);
+
+ return llc->laddr.lsap == laddr->lsap &&
+ llc->daddr.lsap == daddr->lsap &&
+ llc_mac_match(llc->laddr.mac, laddr->mac) &&
+ llc_mac_match(llc->daddr.mac, daddr->mac);
+}
+
/**
* __llc_lookup_established - Finds connection for the remote/local sap/mac
* @sap: SAP
@@ -484,23 +497,35 @@ static struct sock *__llc_lookup_established(struct llc_sap *sap,
struct llc_addr *laddr)
{
struct sock *rc;
- struct hlist_node *node;
-
- read_lock(&sap->sk_list.lock);
- sk_for_each(rc, node, &sap->sk_list.list) {
- struct llc_sock *llc = llc_sk(rc);
-
- if (llc->laddr.lsap == laddr->lsap &&
- llc->daddr.lsap == daddr->lsap &&
- llc_mac_match(llc->laddr.mac, laddr->mac) &&
- llc_mac_match(llc->daddr.mac, daddr->mac)) {
- sock_hold(rc);
+ struct hlist_nulls_node *node;
+ int slot = llc_sk_laddr_hashfn(sap, laddr);
+ struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
+
+ rcu_read_lock();
+again:
+ sk_nulls_for_each_rcu(rc, node, laddr_hb) {
+ if (llc_estab_match(sap, daddr, laddr, rc)) {
+ /* Extra checks required by SLAB_DESTROY_BY_RCU */
+ if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
+ goto again;
+ if (unlikely(llc_sk(rc)->sap != sap ||
+ !llc_estab_match(sap, daddr, laddr, rc))) {
+ sock_put(rc);
+ continue;
+ }
goto found;
}
}
rc = NULL;
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (unlikely(get_nulls_value(node) != slot))
+ goto again;
found:
- read_unlock(&sap->sk_list.lock);
+ rcu_read_unlock();
return rc;
}
@@ -516,6 +541,53 @@ struct sock *llc_lookup_established(struct llc_sap *sap,
return sk;
}
+static inline bool llc_listener_match(const struct llc_sap *sap,
+ const struct llc_addr *laddr,
+ const struct sock *sk)
+{
+ struct llc_sock *llc = llc_sk(sk);
+
+ return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN &&
+ llc->laddr.lsap == laddr->lsap &&
+ llc_mac_match(llc->laddr.mac, laddr->mac);
+}
+
+static struct sock *__llc_lookup_listener(struct llc_sap *sap,
+ struct llc_addr *laddr)
+{
+ struct sock *rc;
+ struct hlist_nulls_node *node;
+ int slot = llc_sk_laddr_hashfn(sap, laddr);
+ struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
+
+ rcu_read_lock();
+again:
+ sk_nulls_for_each_rcu(rc, node, laddr_hb) {
+ if (llc_listener_match(sap, laddr, rc)) {
+ /* Extra checks required by SLAB_DESTROY_BY_RCU */
+ if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
+ goto again;
+ if (unlikely(llc_sk(rc)->sap != sap ||
+ !llc_listener_match(sap, laddr, rc))) {
+ sock_put(rc);
+ continue;
+ }
+ goto found;
+ }
+ }
+ rc = NULL;
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (unlikely(get_nulls_value(node) != slot))
+ goto again;
+found:
+ rcu_read_unlock();
+ return rc;
+}
+
/**
* llc_lookup_listener - Finds listener for local MAC + SAP
* @sap: SAP
@@ -529,24 +601,12 @@ struct sock *llc_lookup_established(struct llc_sap *sap,
static struct sock *llc_lookup_listener(struct llc_sap *sap,
struct llc_addr *laddr)
{
- struct sock *rc;
- struct hlist_node *node;
+ static struct llc_addr null_addr;
+ struct sock *rc = __llc_lookup_listener(sap, laddr);
- read_lock(&sap->sk_list.lock);
- sk_for_each(rc, node, &sap->sk_list.list) {
- struct llc_sock *llc = llc_sk(rc);
+ if (!rc)
+ rc = __llc_lookup_listener(sap, &null_addr);
- if (rc->sk_type == SOCK_STREAM && rc->sk_state == TCP_LISTEN &&
- llc->laddr.lsap == laddr->lsap &&
- (llc_mac_match(llc->laddr.mac, laddr->mac) ||
- llc_mac_null(llc->laddr.mac))) {
- sock_hold(rc);
- goto found;
- }
- }
- rc = NULL;
-found:
- read_unlock(&sap->sk_list.lock);
return rc;
}
@@ -647,15 +707,22 @@ static int llc_find_offset(int state, int ev_type)
* @sap: SAP
* @sk: socket
*
- * This function adds a socket to sk_list of a SAP.
+ * This function adds a socket to the hash tables of a SAP.
*/
void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
{
+ struct llc_sock *llc = llc_sk(sk);
+ struct hlist_head *dev_hb = llc_sk_dev_hash(sap, llc->dev->ifindex);
+ struct hlist_nulls_head *laddr_hb = llc_sk_laddr_hash(sap, &llc->laddr);
+
llc_sap_hold(sap);
- write_lock_bh(&sap->sk_list.lock);
llc_sk(sk)->sap = sap;
- sk_add_node(sk, &sap->sk_list.list);
- write_unlock_bh(&sap->sk_list.lock);
+
+ spin_lock_bh(&sap->sk_lock);
+ sap->sk_count++;
+ sk_nulls_add_node_rcu(sk, laddr_hb);
+ hlist_add_head(&llc->dev_hash_node, dev_hb);
+ spin_unlock_bh(&sap->sk_lock);
}
/**
@@ -663,14 +730,18 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
* @sap: SAP
* @sk: socket
*
- * This function removes a connection from sk_list.list of a SAP if
+ * This function removes a connection from the hash tables of a SAP if
* the connection was in this list.
*/
void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk)
{
- write_lock_bh(&sap->sk_list.lock);
- sk_del_node_init(sk);
- write_unlock_bh(&sap->sk_list.lock);
+ struct llc_sock *llc = llc_sk(sk);
+
+ spin_lock_bh(&sap->sk_lock);
+ sk_nulls_del_node_init_rcu(sk);
+ hlist_del(&llc->dev_hash_node);
+ sap->sk_count--;
+ spin_unlock_bh(&sap->sk_lock);
llc_sap_put(sap);
}
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index ff4c0ab96a6..78167e81dfe 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -23,7 +23,7 @@
#include <net/llc.h>
LIST_HEAD(llc_sap_list);
-DEFINE_RWLOCK(llc_sap_list_lock);
+DEFINE_SPINLOCK(llc_sap_list_lock);
/**
* llc_sap_alloc - allocates and initializes sap.
@@ -33,40 +33,19 @@ DEFINE_RWLOCK(llc_sap_list_lock);
static struct llc_sap *llc_sap_alloc(void)
{
struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC);
+ int i;
if (sap) {
/* sap->laddr.mac - leave as a null, it's filled by bind */
sap->state = LLC_SAP_STATE_ACTIVE;
- rwlock_init(&sap->sk_list.lock);
+ spin_lock_init(&sap->sk_lock);
+ for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++)
+ INIT_HLIST_NULLS_HEAD(&sap->sk_laddr_hash[i], i);
atomic_set(&sap->refcnt, 1);
}
return sap;
}
-/**
- * llc_add_sap - add sap to station list
- * @sap: Address of the sap
- *
- * Adds a sap to the LLC's station sap list.
- */
-static void llc_add_sap(struct llc_sap *sap)
-{
- list_add_tail(&sap->node, &llc_sap_list);
-}
-
-/**
- * llc_del_sap - del sap from station list
- * @sap: Address of the sap
- *
- * Removes a sap to the LLC's station sap list.
- */
-static void llc_del_sap(struct llc_sap *sap)
-{
- write_lock_bh(&llc_sap_list_lock);
- list_del(&sap->node);
- write_unlock_bh(&llc_sap_list_lock);
-}
-
static struct llc_sap *__llc_sap_find(unsigned char sap_value)
{
struct llc_sap* sap;
@@ -90,13 +69,13 @@ out:
*/
struct llc_sap *llc_sap_find(unsigned char sap_value)
{
- struct llc_sap* sap;
+ struct llc_sap *sap;
- read_lock_bh(&llc_sap_list_lock);
+ rcu_read_lock_bh();
sap = __llc_sap_find(sap_value);
if (sap)
llc_sap_hold(sap);
- read_unlock_bh(&llc_sap_list_lock);
+ rcu_read_unlock_bh();
return sap;
}
@@ -117,7 +96,7 @@ struct llc_sap *llc_sap_open(unsigned char lsap,
{
struct llc_sap *sap = NULL;
- write_lock_bh(&llc_sap_list_lock);
+ spin_lock_bh(&llc_sap_list_lock);
if (__llc_sap_find(lsap)) /* SAP already exists */
goto out;
sap = llc_sap_alloc();
@@ -125,9 +104,9 @@ struct llc_sap *llc_sap_open(unsigned char lsap,
goto out;
sap->laddr.lsap = lsap;
sap->rcv_func = func;
- llc_add_sap(sap);
+ list_add_tail_rcu(&sap->node, &llc_sap_list);
out:
- write_unlock_bh(&llc_sap_list_lock);
+ spin_unlock_bh(&llc_sap_list_lock);
return sap;
}
@@ -142,8 +121,14 @@ out:
*/
void llc_sap_close(struct llc_sap *sap)
{
- WARN_ON(!hlist_empty(&sap->sk_list.list));
- llc_del_sap(sap);
+ WARN_ON(sap->sk_count);
+
+ spin_lock_bh(&llc_sap_list_lock);
+ list_del_rcu(&sap->node);
+ spin_unlock_bh(&llc_sap_list_lock);
+
+ synchronize_rcu();
+
kfree(sap);
}
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index 754f4fedc85..b38a1079a98 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -33,48 +33,19 @@
int llc_mac_hdr_init(struct sk_buff *skb,
const unsigned char *sa, const unsigned char *da)
{
- int rc = 0;
+ int rc = -EINVAL;
switch (skb->dev->type) {
-#ifdef CONFIG_TR
- case ARPHRD_IEEE802_TR: {
- struct net_device *dev = skb->dev;
- struct trh_hdr *trh;
-
- skb_push(skb, sizeof(*trh));
- skb_reset_mac_header(skb);
- trh = tr_hdr(skb);
- trh->ac = AC;
- trh->fc = LLC_FRAME;
- if (sa)
- memcpy(trh->saddr, sa, dev->addr_len);
- else
- memset(trh->saddr, 0, dev->addr_len);
- if (da) {
- memcpy(trh->daddr, da, dev->addr_len);
- tr_source_route(skb, trh, dev);
- skb_reset_mac_header(skb);
- }
- break;
- }
-#endif
+ case ARPHRD_IEEE802_TR:
case ARPHRD_ETHER:
- case ARPHRD_LOOPBACK: {
- unsigned short len = skb->len;
- struct ethhdr *eth;
-
- skb_push(skb, sizeof(*eth));
- skb_reset_mac_header(skb);
- eth = eth_hdr(skb);
- eth->h_proto = htons(len);
- memcpy(eth->h_dest, da, ETH_ALEN);
- memcpy(eth->h_source, sa, ETH_ALEN);
+ case ARPHRD_LOOPBACK:
+ rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa,
+ skb->len);
+ if (rc > 0)
+ rc = 0;
break;
- }
default:
- printk(KERN_WARNING "device type not supported: %d\n",
- skb->dev->type);
- rc = -EINVAL;
+ WARN(1, "device type not supported: %d\n", skb->dev->type);
}
return rc;
}
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index be47ac427f6..7af1ff2d1f1 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -32,21 +32,23 @@ static void llc_ui_format_mac(struct seq_file *seq, u8 *addr)
static struct sock *llc_get_sk_idx(loff_t pos)
{
- struct list_head *sap_entry;
struct llc_sap *sap;
- struct hlist_node *node;
struct sock *sk = NULL;
-
- list_for_each(sap_entry, &llc_sap_list) {
- sap = list_entry(sap_entry, struct llc_sap, node);
-
- read_lock_bh(&sap->sk_list.lock);
- sk_for_each(sk, node, &sap->sk_list.list) {
- if (!pos)
- goto found;
- --pos;
+ int i;
+
+ list_for_each_entry_rcu(sap, &llc_sap_list, node) {
+ spin_lock_bh(&sap->sk_lock);
+ for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++) {
+ struct hlist_nulls_head *head = &sap->sk_laddr_hash[i];
+ struct hlist_nulls_node *node;
+
+ sk_nulls_for_each(sk, node, head) {
+ if (!pos)
+ goto found; /* keep the lock */
+ --pos;
+ }
}
- read_unlock_bh(&sap->sk_list.lock);
+ spin_unlock_bh(&sap->sk_lock);
}
sk = NULL;
found:
@@ -57,10 +59,23 @@ static void *llc_seq_start(struct seq_file *seq, loff_t *pos)
{
loff_t l = *pos;
- read_lock_bh(&llc_sap_list_lock);
+ rcu_read_lock_bh();
return l ? llc_get_sk_idx(--l) : SEQ_START_TOKEN;
}
+static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket)
+{
+ struct hlist_nulls_node *node;
+ struct sock *sk = NULL;
+
+ while (++bucket < LLC_SK_LADDR_HASH_ENTRIES)
+ sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket])
+ goto out;
+
+out:
+ return sk;
+}
+
static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct sock* sk, *next;
@@ -73,25 +88,23 @@ static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
goto out;
}
sk = v;
- next = sk_next(sk);
+ next = sk_nulls_next(sk);
if (next) {
sk = next;
goto out;
}
llc = llc_sk(sk);
sap = llc->sap;
- read_unlock_bh(&sap->sk_list.lock);
- sk = NULL;
- for (;;) {
- if (sap->node.next == &llc_sap_list)
- break;
- sap = list_entry(sap->node.next, struct llc_sap, node);
- read_lock_bh(&sap->sk_list.lock);
- if (!hlist_empty(&sap->sk_list.list)) {
- sk = sk_head(&sap->sk_list.list);
- break;
- }
- read_unlock_bh(&sap->sk_list.lock);
+ sk = laddr_hash_next(sap, llc_sk_laddr_hashfn(sap, &llc->laddr));
+ if (sk)
+ goto out;
+ spin_unlock_bh(&sap->sk_lock);
+ list_for_each_entry_continue_rcu(sap, &llc_sap_list, node) {
+ spin_lock_bh(&sap->sk_lock);
+ sk = laddr_hash_next(sap, -1);
+ if (sk)
+ break; /* keep the lock */
+ spin_unlock_bh(&sap->sk_lock);
}
out:
return sk;
@@ -104,9 +117,9 @@ static void llc_seq_stop(struct seq_file *seq, void *v)
struct llc_sock *llc = llc_sk(sk);
struct llc_sap *sap = llc->sap;
- read_unlock_bh(&sap->sk_list.lock);
+ spin_unlock_bh(&sap->sk_lock);
}
- read_unlock_bh(&llc_sap_list_lock);
+ rcu_read_unlock_bh();
}
static int llc_seq_socket_show(struct seq_file *seq, void *v)
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 008de1fc42c..ad6e6e1cf22 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -297,6 +297,17 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
llc_sap_state_process(sap, skb);
}
+static inline bool llc_dgram_match(const struct llc_sap *sap,
+ const struct llc_addr *laddr,
+ const struct sock *sk)
+{
+ struct llc_sock *llc = llc_sk(sk);
+
+ return sk->sk_type == SOCK_DGRAM &&
+ llc->laddr.lsap == laddr->lsap &&
+ llc_mac_match(llc->laddr.mac, laddr->mac);
+}
+
/**
* llc_lookup_dgram - Finds dgram socket for the local sap/mac
* @sap: SAP
@@ -309,25 +320,68 @@ static struct sock *llc_lookup_dgram(struct llc_sap *sap,
const struct llc_addr *laddr)
{
struct sock *rc;
- struct hlist_node *node;
-
- read_lock_bh(&sap->sk_list.lock);
- sk_for_each(rc, node, &sap->sk_list.list) {
- struct llc_sock *llc = llc_sk(rc);
-
- if (rc->sk_type == SOCK_DGRAM &&
- llc->laddr.lsap == laddr->lsap &&
- llc_mac_match(llc->laddr.mac, laddr->mac)) {
- sock_hold(rc);
+ struct hlist_nulls_node *node;
+ int slot = llc_sk_laddr_hashfn(sap, laddr);
+ struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
+
+ rcu_read_lock_bh();
+again:
+ sk_nulls_for_each_rcu(rc, node, laddr_hb) {
+ if (llc_dgram_match(sap, laddr, rc)) {
+ /* Extra checks required by SLAB_DESTROY_BY_RCU */
+ if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
+ goto again;
+ if (unlikely(llc_sk(rc)->sap != sap ||
+ !llc_dgram_match(sap, laddr, rc))) {
+ sock_put(rc);
+ continue;
+ }
goto found;
}
}
rc = NULL;
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (unlikely(get_nulls_value(node) != slot))
+ goto again;
found:
- read_unlock_bh(&sap->sk_list.lock);
+ rcu_read_unlock_bh();
return rc;
}
+static inline bool llc_mcast_match(const struct llc_sap *sap,
+ const struct llc_addr *laddr,
+ const struct sk_buff *skb,
+ const struct sock *sk)
+{
+ struct llc_sock *llc = llc_sk(sk);
+
+ return sk->sk_type == SOCK_DGRAM &&
+ llc->laddr.lsap == laddr->lsap &&
+ llc->dev == skb->dev;
+}
+
+static void llc_do_mcast(struct llc_sap *sap, struct sk_buff *skb,
+ struct sock **stack, int count)
+{
+ struct sk_buff *skb1;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ skb1 = skb_clone(skb, GFP_ATOMIC);
+ if (!skb1) {
+ sock_put(stack[i]);
+ continue;
+ }
+
+ llc_sap_rcv(sap, skb1, stack[i]);
+ sock_put(stack[i]);
+ }
+}
+
/**
* llc_sap_mcast - Deliver multicast PDU's to all matching datagram sockets.
* @sap: SAP
@@ -340,32 +394,31 @@ static void llc_sap_mcast(struct llc_sap *sap,
const struct llc_addr *laddr,
struct sk_buff *skb)
{
- struct sock *sk;
+ int i = 0, count = 256 / sizeof(struct sock *);
+ struct sock *sk, *stack[count];
struct hlist_node *node;
+ struct llc_sock *llc;
+ struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex);
- read_lock_bh(&sap->sk_list.lock);
- sk_for_each(sk, node, &sap->sk_list.list) {
- struct llc_sock *llc = llc_sk(sk);
- struct sk_buff *skb1;
+ spin_lock_bh(&sap->sk_lock);
+ hlist_for_each_entry(llc, node, dev_hb, dev_hash_node) {
- if (sk->sk_type != SOCK_DGRAM)
- continue;
+ sk = &llc->sk;
- if (llc->laddr.lsap != laddr->lsap)
+ if (!llc_mcast_match(sap, laddr, skb, sk))
continue;
- if (llc->dev != skb->dev)
- continue;
-
- skb1 = skb_clone(skb, GFP_ATOMIC);
- if (!skb1)
- break;
-
sock_hold(sk);
- llc_sap_rcv(sap, skb1, sk);
- sock_put(sk);
+ if (i < count)
+ stack[i++] = sk;
+ else {
+ llc_do_mcast(sap, skb, stack, i);
+ i = 0;
+ }
}
- read_unlock_bh(&sap->sk_list.lock);
+ spin_unlock_bh(&sap->sk_lock);
+
+ llc_do_mcast(sap, skb, stack, i);
}
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index a10d508b07e..a952b7f8c64 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -96,18 +96,6 @@ menuconfig MAC80211_DEBUG_MENU
---help---
This option collects various mac80211 debug settings.
-config MAC80211_DEBUG_PACKET_ALIGNMENT
- bool "Enable packet alignment debugging"
- depends on MAC80211_DEBUG_MENU
- ---help---
- This option is recommended for driver authors and strongly
- discouraged for everybody else, it will trigger a warning
- when a driver hands mac80211 a buffer that is aligned in
- a way that will cause problems with the IP stack on some
- architectures.
-
- Say N unless you're writing a mac80211 based driver.
-
config MAC80211_NOINLINE
bool "Do not inline TX/RX handlers"
depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 298cfcc1bf8..04420291e7a 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -6,10 +6,10 @@ mac80211-y := \
sta_info.o \
wep.o \
wpa.o \
- scan.o \
+ scan.o offchannel.o \
ht.o agg-tx.o agg-rx.o \
ibss.o \
- mlme.o \
+ mlme.o work.o \
iface.o \
rate.o \
michael.o \
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 51c7dc3c4c3..a978e666ed6 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -41,8 +41,7 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
sta->sta.addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- if (drv_ampdu_action(local, &sta->sdata->vif,
- IEEE80211_AMPDU_RX_STOP,
+ if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
&sta->sta, tid, NULL))
printk(KERN_DEBUG "HW problem - can not stop rx "
"aggregation for tid %d\n", tid);
@@ -83,12 +82,11 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
u16 initiator, u16 reason)
{
- struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
rcu_read_lock();
- sta = sta_info_get(local, ra);
+ sta = sta_info_get(sdata, ra);
if (!sta) {
rcu_read_unlock();
return;
@@ -136,7 +134,7 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
if (!skb) {
printk(KERN_DEBUG "%s: failed to allocate buffer "
- "for addba resp frame\n", sdata->dev->name);
+ "for addba resp frame\n", sdata->name);
return;
}
@@ -144,10 +142,10 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
memcpy(mgmt->da, da, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
@@ -281,8 +279,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
goto end;
}
- ret = drv_ampdu_action(local, &sta->sdata->vif,
- IEEE80211_AMPDU_RX_START,
+ ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
&sta->sta, tid, &start_seq_num);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5e3a7eccef5..5538e1b4a69 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -58,17 +58,17 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
if (!skb) {
printk(KERN_ERR "%s: failed to allocate buffer "
- "for addba request frame\n", sdata->dev->name);
+ "for addba request frame\n", sdata->name);
return;
}
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
memcpy(mgmt->da, da, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
@@ -104,7 +104,7 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
if (!skb) {
printk(KERN_ERR "%s: failed to allocate buffer for "
- "bar frame\n", sdata->dev->name);
+ "bar frame\n", sdata->name);
return;
}
skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -113,7 +113,7 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_BACK_REQ);
memcpy(bar->ra, ra, ETH_ALEN);
- memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
bar_control |= (u16)(tid << 12);
@@ -144,7 +144,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
*state = HT_AGG_STATE_REQ_STOP_BA_MSK |
(initiator << HT_AGG_STATE_INITIATOR_SHIFT);
- ret = drv_ampdu_action(local, &sta->sdata->vif,
+ ret = drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_STOP,
&sta->sta, tid, NULL);
@@ -179,7 +179,8 @@ static void sta_addba_resp_timer_expired(unsigned long data)
/* check if the TID waits for addBA response */
spin_lock_bh(&sta->lock);
- if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK)) !=
+ if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK |
+ HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
HT_ADDBA_REQUESTED_MSK) {
spin_unlock_bh(&sta->lock);
*state = HT_AGG_STATE_IDLE;
@@ -236,6 +237,14 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
sdata->vif.type != NL80211_IFTYPE_AP)
return -EINVAL;
+ if (test_sta_flags(sta, WLAN_STA_DISASSOC)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG "Disassociation is in progress. "
+ "Denying BA session request\n");
+#endif
+ return -EINVAL;
+ }
+
if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Suspend in progress. "
@@ -301,10 +310,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
* call back right away, it must see that the flow has begun */
*state |= HT_ADDBA_REQUESTED_MSK;
- start_seq_num = sta->tid_seq[tid];
+ start_seq_num = sta->tid_seq[tid] >> 4;
- ret = drv_ampdu_action(local, &sdata->vif,
- IEEE80211_AMPDU_TX_START,
+ ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
pubsta, tid, &start_seq_num);
if (ret) {
@@ -420,7 +428,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
ieee80211_agg_splice_finish(local, sta, tid);
spin_unlock(&local->ampdu_lock);
- drv_ampdu_action(local, &sta->sdata->vif,
+ drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_OPERATIONAL,
&sta->sta, tid, NULL);
}
@@ -441,7 +449,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
}
rcu_read_lock();
- sta = sta_info_get(local, ra);
+ sta = sta_info_get(sdata, ra);
if (!sta) {
rcu_read_unlock();
#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -489,7 +497,7 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
printk(KERN_WARNING "%s: Not enough memory, "
- "dropping start BA session", skb->dev->name);
+ "dropping start BA session", sdata->name);
#endif
return;
}
@@ -564,7 +572,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
#endif /* CONFIG_MAC80211_HT_DEBUG */
rcu_read_lock();
- sta = sta_info_get(local, ra);
+ sta = sta_info_get(sdata, ra);
if (!sta) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Could not find station: %pM\n", ra);
@@ -621,7 +629,7 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
printk(KERN_WARNING "%s: Not enough memory, "
- "dropping stop BA session", skb->dev->name);
+ "dropping stop BA session", sdata->name);
#endif
return;
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 9ae1a4760b5..b7116ef84a3 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1,7 +1,7 @@
/*
* mac80211 configuration hooks for cfg80211
*
- * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
*
* This file is GPLv2 as found in COPYING.
*/
@@ -78,17 +78,15 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
int ret;
- if (netif_running(dev))
+ if (ieee80211_sdata_running(sdata))
return -EBUSY;
if (!nl80211_params_check(type, params))
return -EINVAL;
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
ret = ieee80211_if_change_type(sdata, type);
if (ret)
return ret;
@@ -150,7 +148,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
rcu_read_lock();
if (mac_addr) {
- sta = sta_info_get(sdata->local, mac_addr);
+ sta = sta_info_get_bss(sdata, mac_addr);
if (!sta) {
ieee80211_key_free(key);
err = -ENOENT;
@@ -181,7 +179,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
if (mac_addr) {
ret = -ENOENT;
- sta = sta_info_get(sdata->local, mac_addr);
+ sta = sta_info_get_bss(sdata, mac_addr);
if (!sta)
goto out_unlock;
@@ -228,7 +226,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
rcu_read_lock();
if (mac_addr) {
- sta = sta_info_get(sdata->local, mac_addr);
+ sta = sta_info_get_bss(sdata, mac_addr);
if (!sta)
goto out;
@@ -415,15 +413,13 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_info *sinfo)
{
- struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct sta_info *sta;
int ret = -ENOENT;
rcu_read_lock();
- /* XXX: verify sta->dev == dev */
-
- sta = sta_info_get(local, mac);
+ sta = sta_info_get_bss(sdata, mac);
if (sta) {
ret = 0;
sta_set_sinfo(sta, sinfo);
@@ -519,6 +515,8 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
if (old)
memcpy(new->tail, old->tail, new_tail_len);
+ sdata->vif.bss_conf.dtim_period = new->dtim_period;
+
rcu_assign_pointer(sdata->u.ap.beacon, new);
synchronize_rcu();
@@ -732,7 +730,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
} else
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (compare_ether_addr(mac, dev->dev_addr) == 0)
+ if (compare_ether_addr(mac, sdata->vif.addr) == 0)
return -EINVAL;
if (is_multicast_ether_addr(mac))
@@ -751,9 +749,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
sdata->vif.type == NL80211_IFTYPE_AP;
- rcu_read_lock();
-
- err = sta_info_insert(sta);
+ err = sta_info_insert_rcu(sta);
if (err) {
rcu_read_unlock();
return err;
@@ -772,27 +768,13 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
{
struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata;
- struct sta_info *sta;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (mac) {
- rcu_read_lock();
-
- /* XXX: get sta belonging to dev */
- sta = sta_info_get(local, mac);
- if (!sta) {
- rcu_read_unlock();
- return -ENOENT;
- }
-
- sta_info_unlink(&sta);
- rcu_read_unlock();
-
- sta_info_destroy(sta);
- } else
- sta_info_flush(local, sdata);
+ if (mac)
+ return sta_info_destroy_addr_bss(sdata, mac);
+ sta_info_flush(local, sdata);
return 0;
}
@@ -801,14 +783,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
u8 *mac,
struct station_parameters *params)
{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = wiphy_priv(wiphy);
struct sta_info *sta;
struct ieee80211_sub_if_data *vlansdata;
rcu_read_lock();
- /* XXX: get sta belonging to dev */
- sta = sta_info_get(local, mac);
+ sta = sta_info_get_bss(sdata, mac);
if (!sta) {
rcu_read_unlock();
return -ENOENT;
@@ -847,7 +829,6 @@ static int ieee80211_change_station(struct wiphy *wiphy,
static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
u8 *dst, u8 *next_hop)
{
- struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata;
struct mesh_path *mpath;
struct sta_info *sta;
@@ -856,7 +837,7 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
rcu_read_lock();
- sta = sta_info_get(local, next_hop);
+ sta = sta_info_get(sdata, next_hop);
if (!sta) {
rcu_read_unlock();
return -ENOENT;
@@ -895,7 +876,6 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
struct net_device *dev,
u8 *dst, u8 *next_hop)
{
- struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata;
struct mesh_path *mpath;
struct sta_info *sta;
@@ -904,7 +884,7 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
rcu_read_lock();
- sta = sta_info_get(local, next_hop);
+ sta = sta_info_get(sdata, next_hop);
if (!sta) {
rcu_read_unlock();
return -ENOENT;
@@ -1092,6 +1072,13 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
params->use_short_preamble;
changed |= BSS_CHANGED_ERP_PREAMBLE;
}
+
+ if (!sdata->vif.bss_conf.use_short_slot &&
+ sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) {
+ sdata->vif.bss_conf.use_short_slot = true;
+ changed |= BSS_CHANGED_ERP_SLOT;
+ }
+
if (params->use_short_slot_time >= 0) {
sdata->vif.bss_conf.use_short_slot =
params->use_short_slot_time;
@@ -1135,6 +1122,13 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
p.cw_max = params->cwmax;
p.cw_min = params->cwmin;
p.txop = params->txop;
+
+ /*
+ * Setting tx queue params disables u-apsd because it's only
+ * called in master mode.
+ */
+ p.uapsd = false;
+
if (drv_conf_tx(local, params->queue, &p)) {
printk(KERN_DEBUG "%s: failed to set TX queue "
"parameters for queue %d\n",
@@ -1237,6 +1231,13 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
struct ieee80211_local *local = wiphy_priv(wiphy);
int err;
+ if (changed & WIPHY_PARAM_COVERAGE_CLASS) {
+ err = drv_set_coverage_class(local, wiphy->coverage_class);
+
+ if (err)
+ return err;
+ }
+
if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
err = drv_set_rts_threshold(local, wiphy->rts_threshold);
@@ -1324,6 +1325,50 @@ static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
}
#endif
+int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps_mode)
+{
+ const u8 *ap;
+ enum ieee80211_smps_mode old_req;
+ int err;
+
+ old_req = sdata->u.mgd.req_smps;
+ sdata->u.mgd.req_smps = smps_mode;
+
+ if (old_req == smps_mode &&
+ smps_mode != IEEE80211_SMPS_AUTOMATIC)
+ return 0;
+
+ /*
+ * If not associated, or current association is not an HT
+ * association, there's no need to send an action frame.
+ */
+ if (!sdata->u.mgd.associated ||
+ sdata->local->oper_channel_type == NL80211_CHAN_NO_HT) {
+ mutex_lock(&sdata->local->iflist_mtx);
+ ieee80211_recalc_smps(sdata->local, sdata);
+ mutex_unlock(&sdata->local->iflist_mtx);
+ return 0;
+ }
+
+ ap = sdata->u.mgd.associated->bssid;
+
+ if (smps_mode == IEEE80211_SMPS_AUTOMATIC) {
+ if (sdata->u.mgd.powersave)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+ else
+ smps_mode = IEEE80211_SMPS_OFF;
+ }
+
+ /* send SM PS frame to AP */
+ err = ieee80211_send_smps_action(sdata, smps_mode,
+ ap, ap);
+ if (err)
+ sdata->u.mgd.req_smps = old_req;
+
+ return err;
+}
+
static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
bool enabled, int timeout)
{
@@ -1344,6 +1389,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
sdata->u.mgd.powersave = enabled;
conf->dynamic_ps_timeout = timeout;
+ /* no change, but if automatic follow powersave */
+ mutex_lock(&sdata->u.mgd.mtx);
+ __ieee80211_request_smps(sdata, sdata->u.mgd.req_smps);
+ mutex_unlock(&sdata->u.mgd.mtx);
+
if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
@@ -1359,39 +1409,52 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
- int i, err = -EINVAL;
- u32 target_rate;
- struct ieee80211_supported_band *sband;
+ int i;
+
+ /*
+ * This _could_ be supported by providing a hook for
+ * drivers for this function, but at this point it
+ * doesn't seem worth bothering.
+ */
+ if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
+ return -EOPNOTSUPP;
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
- /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates
- * target_rate = X, rate->fixed = 1 means only rate X
- * target_rate = X, rate->fixed = 0 means all rates <= X */
- sdata->max_ratectrl_rateidx = -1;
- sdata->force_unicast_rateidx = -1;
+ for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+ sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
- if (mask->fixed)
- target_rate = mask->fixed / 100;
- else if (mask->maxrate)
- target_rate = mask->maxrate / 100;
- else
- return 0;
+ return 0;
+}
- for (i=0; i< sband->n_bitrates; i++) {
- struct ieee80211_rate *brate = &sband->bitrates[i];
- int this_rate = brate->bitrate;
+static int ieee80211_remain_on_channel(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration,
+ u64 *cookie)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (target_rate == this_rate) {
- sdata->max_ratectrl_rateidx = i;
- if (mask->fixed)
- sdata->force_unicast_rateidx = i;
- err = 0;
- break;
- }
- }
+ return ieee80211_wk_remain_on_channel(sdata, chan, channel_type,
+ duration, cookie);
+}
- return err;
+static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct net_device *dev,
+ u64 cookie)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ return ieee80211_wk_cancel_remain_on_channel(sdata, cookie);
+}
+
+static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ const u8 *buf, size_t len, u64 *cookie)
+{
+ return ieee80211_mgd_action(IEEE80211_DEV_TO_SUB_IF(dev), chan,
+ channel_type, buf, len, cookie);
}
struct cfg80211_ops mac80211_config_ops = {
@@ -1440,4 +1503,7 @@ struct cfg80211_ops mac80211_config_ops = {
CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd)
.set_power_mgmt = ieee80211_set_power_mgmt,
.set_bitrate_mask = ieee80211_set_bitrate_mask,
+ .remain_on_channel = ieee80211_remain_on_channel,
+ .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel,
+ .action = ieee80211_action,
};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index e4b54093d41..637929b65cc 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -158,6 +158,130 @@ static const struct file_operations noack_ops = {
.open = mac80211_open_file_generic
};
+static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ int res;
+ char buf[10];
+
+ res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_queues);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+}
+
+static ssize_t uapsd_queues_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ unsigned long val;
+ char buf[10];
+ size_t len;
+ int ret;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+
+ ret = strict_strtoul(buf, 0, &val);
+
+ if (ret)
+ return -EINVAL;
+
+ if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+ return -ERANGE;
+
+ local->uapsd_queues = val;
+
+ return count;
+}
+
+static const struct file_operations uapsd_queues_ops = {
+ .read = uapsd_queues_read,
+ .write = uapsd_queues_write,
+ .open = mac80211_open_file_generic
+};
+
+static ssize_t uapsd_max_sp_len_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ int res;
+ char buf[10];
+
+ res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_max_sp_len);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+}
+
+static ssize_t uapsd_max_sp_len_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ unsigned long val;
+ char buf[10];
+ size_t len;
+ int ret;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+
+ ret = strict_strtoul(buf, 0, &val);
+
+ if (ret)
+ return -EINVAL;
+
+ if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
+ return -ERANGE;
+
+ local->uapsd_max_sp_len = val;
+
+ return count;
+}
+
+static const struct file_operations uapsd_max_sp_len_ops = {
+ .read = uapsd_max_sp_len_read,
+ .write = uapsd_max_sp_len_write,
+ .open = mac80211_open_file_generic
+};
+
+static ssize_t channel_type_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ const char *buf;
+
+ switch (local->hw.conf.channel_type) {
+ case NL80211_CHAN_NO_HT:
+ buf = "no ht\n";
+ break;
+ case NL80211_CHAN_HT20:
+ buf = "ht20\n";
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ buf = "ht40-\n";
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ buf = "ht40+\n";
+ break;
+ default:
+ buf = "???";
+ break;
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static const struct file_operations channel_type_ops = {
+ .read = channel_type_read,
+ .open = mac80211_open_file_generic
+};
+
static ssize_t queues_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -314,6 +438,9 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(queues);
DEBUGFS_ADD_MODE(reset, 0200);
DEBUGFS_ADD(noack);
+ DEBUGFS_ADD(uapsd_queues);
+ DEBUGFS_ADD(uapsd_max_sp_len);
+ DEBUGFS_ADD(channel_type);
statsd = debugfs_create_dir("statistics", phyd);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index e0f5224630d..d12e743cb4e 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -56,7 +56,7 @@ KEY_CONF_FILE(keyidx, D);
KEY_CONF_FILE(hw_key_idx, D);
KEY_FILE(flags, X);
KEY_FILE(tx_rx_count, D);
-KEY_READ(ifindex, sdata->dev->ifindex, 20, "%d\n");
+KEY_READ(ifindex, sdata->name, IFNAMSIZ + 2, "%s\n");
KEY_OPS(ifindex);
static ssize_t key_algorithm_read(struct file *file,
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 472b2039906..9affe2cd185 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -41,6 +41,30 @@ static ssize_t ieee80211_if_read(
return ret;
}
+static ssize_t ieee80211_if_write(
+ struct ieee80211_sub_if_data *sdata,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos,
+ ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int))
+{
+ u8 *buf;
+ ssize_t ret = -ENODEV;
+
+ buf = kzalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ rtnl_lock();
+ if (sdata->dev->reg_state == NETREG_REGISTERED)
+ ret = (*write)(sdata, buf, count);
+ rtnl_unlock();
+
+ return ret;
+}
+
#define IEEE80211_IF_FMT(name, field, format_string) \
static ssize_t ieee80211_if_fmt_##name( \
const struct ieee80211_sub_if_data *sdata, char *buf, \
@@ -71,7 +95,7 @@ static ssize_t ieee80211_if_fmt_##name( \
return scnprintf(buf, buflen, "%pM\n", sdata->field); \
}
-#define __IEEE80211_IF_FILE(name) \
+#define __IEEE80211_IF_FILE(name, _write) \
static ssize_t ieee80211_if_read_##name(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
@@ -82,22 +106,99 @@ static ssize_t ieee80211_if_read_##name(struct file *file, \
} \
static const struct file_operations name##_ops = { \
.read = ieee80211_if_read_##name, \
+ .write = (_write), \
.open = mac80211_open_file_generic, \
}
+#define __IEEE80211_IF_FILE_W(name) \
+static ssize_t ieee80211_if_write_##name(struct file *file, \
+ const char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ return ieee80211_if_write(file->private_data, userbuf, count, \
+ ppos, ieee80211_if_parse_##name); \
+} \
+__IEEE80211_IF_FILE(name, ieee80211_if_write_##name)
+
+
#define IEEE80211_IF_FILE(name, field, format) \
IEEE80211_IF_FMT_##format(name, field) \
- __IEEE80211_IF_FILE(name)
+ __IEEE80211_IF_FILE(name, NULL)
/* common attributes */
IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
-IEEE80211_IF_FILE(force_unicast_rateidx, force_unicast_rateidx, DEC);
-IEEE80211_IF_FILE(max_ratectrl_rateidx, max_ratectrl_rateidx, DEC);
+IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
+ HEX);
+IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
+ HEX);
/* STA attributes */
IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
-IEEE80211_IF_FILE(capab, u.mgd.capab, HEX);
+
+static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps_mode)
+{
+ struct ieee80211_local *local = sdata->local;
+ int err;
+
+ if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS) &&
+ smps_mode == IEEE80211_SMPS_STATIC)
+ return -EINVAL;
+
+ /* auto should be dynamic if in PS mode */
+ if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) &&
+ (smps_mode == IEEE80211_SMPS_DYNAMIC ||
+ smps_mode == IEEE80211_SMPS_AUTOMATIC))
+ return -EINVAL;
+
+ /* supported only on managed interfaces for now */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&local->iflist_mtx);
+ err = __ieee80211_request_smps(sdata, smps_mode);
+ mutex_unlock(&local->iflist_mtx);
+
+ return err;
+}
+
+static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
+ [IEEE80211_SMPS_AUTOMATIC] = "auto",
+ [IEEE80211_SMPS_OFF] = "off",
+ [IEEE80211_SMPS_STATIC] = "static",
+ [IEEE80211_SMPS_DYNAMIC] = "dynamic",
+};
+
+static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata,
+ char *buf, int buflen)
+{
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ return snprintf(buf, buflen, "request: %s\nused: %s\n",
+ smps_modes[sdata->u.mgd.req_smps],
+ smps_modes[sdata->u.mgd.ap_smps]);
+}
+
+static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
+ const char *buf, int buflen)
+{
+ enum ieee80211_smps_mode mode;
+
+ for (mode = 0; mode < IEEE80211_SMPS_NUM_MODES; mode++) {
+ if (strncmp(buf, smps_modes[mode], buflen) == 0) {
+ int err = ieee80211_set_smps(sdata, mode);
+ if (!err)
+ return buflen;
+ return err;
+ }
+ }
+
+ return -EINVAL;
+}
+
+__IEEE80211_IF_FILE_W(smps);
/* AP attributes */
IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
@@ -109,7 +210,7 @@ static ssize_t ieee80211_if_fmt_num_buffered_multicast(
return scnprintf(buf, buflen, "%u\n",
skb_queue_len(&sdata->u.ap.ps_bc_buf));
}
-__IEEE80211_IF_FILE(num_buffered_multicast);
+__IEEE80211_IF_FILE(num_buffered_multicast, NULL);
/* WDS attributes */
IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
@@ -154,46 +255,50 @@ IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
#endif
-#define DEBUGFS_ADD(name, type) \
+#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, sdata->debugfs.dir, \
sdata, &name##_ops);
+#define DEBUGFS_ADD_MODE(name, mode) \
+ debugfs_create_file(#name, mode, sdata->debugfs.dir, \
+ sdata, &name##_ops);
+
static void add_sta_files(struct ieee80211_sub_if_data *sdata)
{
- DEBUGFS_ADD(drop_unencrypted, sta);
- DEBUGFS_ADD(force_unicast_rateidx, sta);
- DEBUGFS_ADD(max_ratectrl_rateidx, sta);
+ DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(rc_rateidx_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_mask_5ghz);
- DEBUGFS_ADD(bssid, sta);
- DEBUGFS_ADD(aid, sta);
- DEBUGFS_ADD(capab, sta);
+ DEBUGFS_ADD(bssid);
+ DEBUGFS_ADD(aid);
+ DEBUGFS_ADD_MODE(smps, 0600);
}
static void add_ap_files(struct ieee80211_sub_if_data *sdata)
{
- DEBUGFS_ADD(drop_unencrypted, ap);
- DEBUGFS_ADD(force_unicast_rateidx, ap);
- DEBUGFS_ADD(max_ratectrl_rateidx, ap);
+ DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(rc_rateidx_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_mask_5ghz);
- DEBUGFS_ADD(num_sta_ps, ap);
- DEBUGFS_ADD(dtim_count, ap);
- DEBUGFS_ADD(num_buffered_multicast, ap);
+ DEBUGFS_ADD(num_sta_ps);
+ DEBUGFS_ADD(dtim_count);
+ DEBUGFS_ADD(num_buffered_multicast);
}
static void add_wds_files(struct ieee80211_sub_if_data *sdata)
{
- DEBUGFS_ADD(drop_unencrypted, wds);
- DEBUGFS_ADD(force_unicast_rateidx, wds);
- DEBUGFS_ADD(max_ratectrl_rateidx, wds);
+ DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(rc_rateidx_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_mask_5ghz);
- DEBUGFS_ADD(peer, wds);
+ DEBUGFS_ADD(peer);
}
static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
{
- DEBUGFS_ADD(drop_unencrypted, vlan);
- DEBUGFS_ADD(force_unicast_rateidx, vlan);
- DEBUGFS_ADD(max_ratectrl_rateidx, vlan);
+ DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(rc_rateidx_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_mask_5ghz);
}
static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
@@ -280,16 +385,11 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
}
}
-static int notif_registered;
-
void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
{
char buf[10+IFNAMSIZ];
- if (!notif_registered)
- return;
-
- sprintf(buf, "netdev:%s", sdata->dev->name);
+ sprintf(buf, "netdev:%s", sdata->name);
sdata->debugfs.dir = debugfs_create_dir(buf,
sdata->local->hw.wiphy->debugfsdir);
add_files(sdata);
@@ -304,58 +404,18 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
sdata->debugfs.dir = NULL;
}
-static int netdev_notify(struct notifier_block *nb,
- unsigned long state,
- void *ndev)
+void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
{
- struct net_device *dev = ndev;
struct dentry *dir;
- struct ieee80211_sub_if_data *sdata;
- char buf[10+IFNAMSIZ];
-
- if (state != NETDEV_CHANGENAME)
- return 0;
-
- if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
- return 0;
-
- if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
- return 0;
-
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ char buf[10 + IFNAMSIZ];
dir = sdata->debugfs.dir;
if (!dir)
- return 0;
+ return;
- sprintf(buf, "netdev:%s", dev->name);
+ sprintf(buf, "netdev:%s", sdata->name);
if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf))
printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs "
"dir to %s\n", buf);
-
- return 0;
-}
-
-static struct notifier_block mac80211_debugfs_netdev_notifier = {
- .notifier_call = netdev_notify,
-};
-
-void ieee80211_debugfs_netdev_init(void)
-{
- int err;
-
- err = register_netdevice_notifier(&mac80211_debugfs_netdev_notifier);
- if (err) {
- printk(KERN_ERR
- "mac80211: failed to install netdev notifier,"
- " disabling per-netdev debugfs!\n");
- } else
- notif_registered = 1;
-}
-
-void ieee80211_debugfs_netdev_exit(void)
-{
- unregister_netdevice_notifier(&mac80211_debugfs_netdev_notifier);
- notif_registered = 0;
}
diff --git a/net/mac80211/debugfs_netdev.h b/net/mac80211/debugfs_netdev.h
index 7af731f0b73..79025e79f4d 100644
--- a/net/mac80211/debugfs_netdev.h
+++ b/net/mac80211/debugfs_netdev.h
@@ -6,8 +6,7 @@
#ifdef CONFIG_MAC80211_DEBUGFS
void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata);
void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata);
-void ieee80211_debugfs_netdev_init(void);
-void ieee80211_debugfs_netdev_exit(void);
+void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata);
#else
static inline void ieee80211_debugfs_add_netdev(
struct ieee80211_sub_if_data *sdata)
@@ -15,10 +14,8 @@ static inline void ieee80211_debugfs_add_netdev(
static inline void ieee80211_debugfs_remove_netdev(
struct ieee80211_sub_if_data *sdata)
{}
-static inline void ieee80211_debugfs_netdev_init(void)
-{}
-
-static inline void ieee80211_debugfs_netdev_exit(void)
+static inline void ieee80211_debugfs_rename_netdev(
+ struct ieee80211_sub_if_data *sdata)
{}
#endif
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 3f41608c808..d92800bb2d2 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -44,7 +44,7 @@ static const struct file_operations sta_ ##name## _ops = { \
STA_OPS(name)
STA_FILE(aid, sta.aid, D);
-STA_FILE(dev, sdata->dev->name, S);
+STA_FILE(dev, sdata->name, S);
STA_FILE(rx_packets, rx_packets, LU);
STA_FILE(tx_packets, tx_packets, LU);
STA_FILE(rx_bytes, rx_bytes, LU);
@@ -120,36 +120,38 @@ STA_OPS(last_seq_ctrl);
static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- char buf[30 + STA_TID_NUM * 70], *p = buf;
+ char buf[64 + STA_TID_NUM * 40], *p = buf;
int i;
struct sta_info *sta = file->private_data;
spin_lock_bh(&sta->lock);
- p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
+ p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
sta->ampdu_mlme.dialog_token_allocator + 1);
+ p += scnprintf(p, sizeof(buf) + buf - p,
+ "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n");
for (i = 0; i < STA_TID_NUM; i++) {
- p += scnprintf(p, sizeof(buf)+buf-p, "TID %02d:", i);
- p += scnprintf(p, sizeof(buf)+buf-p, " RX=%x",
+ p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
+ p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
sta->ampdu_mlme.tid_state_rx[i]);
- p += scnprintf(p, sizeof(buf)+buf-p, "/DTKN=%#.2x",
+ p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
sta->ampdu_mlme.tid_state_rx[i] ?
sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
- p += scnprintf(p, sizeof(buf)+buf-p, "/SSN=%#.3x",
+ p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
sta->ampdu_mlme.tid_state_rx[i] ?
sta->ampdu_mlme.tid_rx[i]->ssn : 0);
- p += scnprintf(p, sizeof(buf)+buf-p, " TX=%x",
+ p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
sta->ampdu_mlme.tid_state_tx[i]);
- p += scnprintf(p, sizeof(buf)+buf-p, "/DTKN=%#.2x",
+ p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
sta->ampdu_mlme.tid_state_tx[i] ?
sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
- p += scnprintf(p, sizeof(buf)+buf-p, "/SSN=%#.3x",
+ p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
sta->ampdu_mlme.tid_state_tx[i] ?
sta->ampdu_mlme.tid_tx[i]->ssn : 0);
- p += scnprintf(p, sizeof(buf)+buf-p, "/pending=%03d",
+ p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d",
sta->ampdu_mlme.tid_state_tx[i] ?
skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0);
- p += scnprintf(p, sizeof(buf)+buf-p, "\n");
+ p += scnprintf(p, sizeof(buf) + buf - p, "\n");
}
spin_unlock_bh(&sta->lock);
@@ -160,7 +162,12 @@ STA_OPS(agg_status);
static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- char buf[200], *p = buf;
+#define PRINT_HT_CAP(_cond, _str) \
+ do { \
+ if (_cond) \
+ p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \
+ } while (0)
+ char buf[512], *p = buf;
int i;
struct sta_info *sta = file->private_data;
struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
@@ -168,15 +175,64 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
htc->ht_supported ? "" : "not ");
if (htc->ht_supported) {
- p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.2x\n", htc->cap);
+ p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap);
+
+ PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDCP");
+ PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40");
+ PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20");
+
+ PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save");
+ PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save");
+ PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled");
+
+ PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield");
+ PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI");
+ PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI");
+ PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC");
+
+ PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC");
+ PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream");
+ PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams");
+ PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams");
+
+ PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
+
+ PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
+ "3839 bytes");
+ PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
+ "7935 bytes");
+
+ /*
+ * For beacons and probe response this would mean the BSS
+ * does or does not allow the usage of DSSS/CCK HT40.
+ * Otherwise it means the STA does or does not use
+ * DSSS/CCK HT40.
+ */
+ PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40");
+ PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40");
+
+ /* BIT(13) is reserved */
+
+ PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant");
+
+ PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection");
+
p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n",
htc->ampdu_factor, htc->ampdu_density);
p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:");
+
for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
p += scnprintf(p, sizeof(buf)+buf-p, " %.2x",
htc->mcs.rx_mask[i]);
- p += scnprintf(p, sizeof(buf)+buf-p, "\nMCS rx highest: %d\n",
- le16_to_cpu(htc->mcs.rx_highest));
+ p += scnprintf(p, sizeof(buf)+buf-p, "\n");
+
+ /* If not set this is meaningless */
+ if (le16_to_cpu(htc->mcs.rx_highest)) {
+ p += scnprintf(p, sizeof(buf)+buf-p,
+ "MCS rx highest: %d Mbps\n",
+ le16_to_cpu(htc->mcs.rx_highest));
+ }
+
p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n",
htc->mcs.tx_params);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 921dd9c9ff6..c3d844093a2 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -14,6 +14,8 @@ static inline int drv_start(struct ieee80211_local *local)
{
int ret;
+ might_sleep();
+
local->started = true;
smp_mb();
ret = local->ops->start(&local->hw);
@@ -23,6 +25,8 @@ static inline int drv_start(struct ieee80211_local *local)
static inline void drv_stop(struct ieee80211_local *local)
{
+ might_sleep();
+
local->ops->stop(&local->hw);
trace_drv_stop(local);
@@ -36,35 +40,47 @@ static inline void drv_stop(struct ieee80211_local *local)
}
static inline int drv_add_interface(struct ieee80211_local *local,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
- int ret = local->ops->add_interface(&local->hw, conf);
- trace_drv_add_interface(local, conf->mac_addr, conf->vif, ret);
+ int ret;
+
+ might_sleep();
+
+ ret = local->ops->add_interface(&local->hw, vif);
+ trace_drv_add_interface(local, vif_to_sdata(vif), ret);
return ret;
}
static inline void drv_remove_interface(struct ieee80211_local *local,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
- local->ops->remove_interface(&local->hw, conf);
- trace_drv_remove_interface(local, conf->mac_addr, conf->vif);
+ might_sleep();
+
+ local->ops->remove_interface(&local->hw, vif);
+ trace_drv_remove_interface(local, vif_to_sdata(vif));
}
static inline int drv_config(struct ieee80211_local *local, u32 changed)
{
- int ret = local->ops->config(&local->hw, changed);
+ int ret;
+
+ might_sleep();
+
+ ret = local->ops->config(&local->hw, changed);
trace_drv_config(local, changed, ret);
return ret;
}
static inline void drv_bss_info_changed(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss_conf *info,
u32 changed)
{
+ might_sleep();
+
if (local->ops->bss_info_changed)
- local->ops->bss_info_changed(&local->hw, vif, info, changed);
- trace_drv_bss_info_changed(local, vif, info, changed);
+ local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
+ trace_drv_bss_info_changed(local, sdata, info, changed);
}
static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
@@ -106,36 +122,53 @@ static inline int drv_set_tim(struct ieee80211_local *local,
}
static inline int drv_set_key(struct ieee80211_local *local,
- enum set_key_cmd cmd, struct ieee80211_vif *vif,
+ enum set_key_cmd cmd,
+ struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- int ret = local->ops->set_key(&local->hw, cmd, vif, sta, key);
- trace_drv_set_key(local, cmd, vif, sta, key, ret);
+ int ret;
+
+ might_sleep();
+
+ ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
+ trace_drv_set_key(local, cmd, sdata, sta, key, ret);
return ret;
}
static inline void drv_update_tkip_key(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
struct ieee80211_key_conf *conf,
- const u8 *address, u32 iv32,
+ struct sta_info *sta, u32 iv32,
u16 *phase1key)
{
+ struct ieee80211_sta *ista = NULL;
+
+ if (sta)
+ ista = &sta->sta;
+
if (local->ops->update_tkip_key)
- local->ops->update_tkip_key(&local->hw, conf, address,
- iv32, phase1key);
- trace_drv_update_tkip_key(local, conf, address, iv32);
+ local->ops->update_tkip_key(&local->hw, &sdata->vif, conf,
+ ista, iv32, phase1key);
+ trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
}
static inline int drv_hw_scan(struct ieee80211_local *local,
struct cfg80211_scan_request *req)
{
- int ret = local->ops->hw_scan(&local->hw, req);
+ int ret;
+
+ might_sleep();
+
+ ret = local->ops->hw_scan(&local->hw, req);
trace_drv_hw_scan(local, req, ret);
return ret;
}
static inline void drv_sw_scan_start(struct ieee80211_local *local)
{
+ might_sleep();
+
if (local->ops->sw_scan_start)
local->ops->sw_scan_start(&local->hw);
trace_drv_sw_scan_start(local);
@@ -143,6 +176,8 @@ static inline void drv_sw_scan_start(struct ieee80211_local *local)
static inline void drv_sw_scan_complete(struct ieee80211_local *local)
{
+ might_sleep();
+
if (local->ops->sw_scan_complete)
local->ops->sw_scan_complete(&local->hw);
trace_drv_sw_scan_complete(local);
@@ -153,6 +188,8 @@ static inline int drv_get_stats(struct ieee80211_local *local,
{
int ret = -EOPNOTSUPP;
+ might_sleep();
+
if (local->ops->get_stats)
ret = local->ops->get_stats(&local->hw, stats);
trace_drv_get_stats(local, stats, ret);
@@ -172,43 +209,93 @@ static inline int drv_set_rts_threshold(struct ieee80211_local *local,
u32 value)
{
int ret = 0;
+
+ might_sleep();
+
if (local->ops->set_rts_threshold)
ret = local->ops->set_rts_threshold(&local->hw, value);
trace_drv_set_rts_threshold(local, value, ret);
return ret;
}
+static inline int drv_set_coverage_class(struct ieee80211_local *local,
+ u8 value)
+{
+ int ret = 0;
+ might_sleep();
+
+ if (local->ops->set_coverage_class)
+ local->ops->set_coverage_class(&local->hw, value);
+ else
+ ret = -EOPNOTSUPP;
+
+ trace_drv_set_coverage_class(local, value, ret);
+ return ret;
+}
+
static inline void drv_sta_notify(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta)
{
if (local->ops->sta_notify)
- local->ops->sta_notify(&local->hw, vif, cmd, sta);
- trace_drv_sta_notify(local, vif, cmd, sta);
+ local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta);
+ trace_drv_sta_notify(local, sdata, cmd, sta);
+}
+
+static inline int drv_sta_add(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta)
+{
+ int ret = 0;
+
+ might_sleep();
+
+ if (local->ops->sta_add)
+ ret = local->ops->sta_add(&local->hw, &sdata->vif, sta);
+ else if (local->ops->sta_notify)
+ local->ops->sta_notify(&local->hw, &sdata->vif,
+ STA_NOTIFY_ADD, sta);
+
+ trace_drv_sta_add(local, sdata, sta, ret);
+
+ return ret;
+}
+
+static inline void drv_sta_remove(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta)
+{
+ might_sleep();
+
+ if (local->ops->sta_remove)
+ local->ops->sta_remove(&local->hw, &sdata->vif, sta);
+ else if (local->ops->sta_notify)
+ local->ops->sta_notify(&local->hw, &sdata->vif,
+ STA_NOTIFY_REMOVE, sta);
+
+ trace_drv_sta_remove(local, sdata, sta);
}
static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
if (local->ops->conf_tx)
ret = local->ops->conf_tx(&local->hw, queue, params);
trace_drv_conf_tx(local, queue, params, ret);
return ret;
}
-static inline int drv_get_tx_stats(struct ieee80211_local *local,
- struct ieee80211_tx_queue_stats *stats)
-{
- int ret = local->ops->get_tx_stats(&local->hw, stats);
- trace_drv_get_tx_stats(local, stats, ret);
- return ret;
-}
-
static inline u64 drv_get_tsf(struct ieee80211_local *local)
{
u64 ret = -1ULL;
+
+ might_sleep();
+
if (local->ops->get_tsf)
ret = local->ops->get_tsf(&local->hw);
trace_drv_get_tsf(local, ret);
@@ -217,6 +304,8 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local)
static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
{
+ might_sleep();
+
if (local->ops->set_tsf)
local->ops->set_tsf(&local->hw, tsf);
trace_drv_set_tsf(local, tsf);
@@ -224,6 +313,8 @@ static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
static inline void drv_reset_tsf(struct ieee80211_local *local)
{
+ might_sleep();
+
if (local->ops->reset_tsf)
local->ops->reset_tsf(&local->hw);
trace_drv_reset_tsf(local);
@@ -232,6 +323,9 @@ static inline void drv_reset_tsf(struct ieee80211_local *local)
static inline int drv_tx_last_beacon(struct ieee80211_local *local)
{
int ret = 1;
+
+ might_sleep();
+
if (local->ops->tx_last_beacon)
ret = local->ops->tx_last_beacon(&local->hw);
trace_drv_tx_last_beacon(local, ret);
@@ -239,23 +333,34 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
}
static inline int drv_ampdu_action(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid,
u16 *ssn)
{
int ret = -EOPNOTSUPP;
if (local->ops->ampdu_action)
- ret = local->ops->ampdu_action(&local->hw, vif, action,
+ ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
sta, tid, ssn);
- trace_drv_ampdu_action(local, vif, action, sta, tid, ssn, ret);
+ trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, ret);
return ret;
}
static inline void drv_rfkill_poll(struct ieee80211_local *local)
{
+ might_sleep();
+
if (local->ops->rfkill_poll)
local->ops->rfkill_poll(&local->hw);
}
+
+static inline void drv_flush(struct ieee80211_local *local, bool drop)
+{
+ might_sleep();
+
+ trace_drv_flush(local, drop);
+ if (local->ops->flush)
+ local->ops->flush(&local->hw, drop);
+}
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index ee94ea0c67e..41baf730a5c 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -25,10 +25,12 @@ static inline void trace_ ## name(proto) {}
#define STA_PR_FMT " sta:%pM"
#define STA_PR_ARG __entry->sta_addr
-#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, vif)
-#define VIF_ASSIGN __entry->vif_type = vif ? vif->type : 0; __entry->vif = vif
-#define VIF_PR_FMT " vif:%p(%d)"
-#define VIF_PR_ARG __entry->vif, __entry->vif_type
+#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \
+ __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
+ __assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+#define VIF_PR_FMT " vif:%s(%d)"
+#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type
TRACE_EVENT(drv_start,
TP_PROTO(struct ieee80211_local *local, int ret),
@@ -70,11 +72,10 @@ TRACE_EVENT(drv_stop,
TRACE_EVENT(drv_add_interface,
TP_PROTO(struct ieee80211_local *local,
- const u8 *addr,
- struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata,
int ret),
- TP_ARGS(local, addr, vif, ret),
+ TP_ARGS(local, sdata, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -86,7 +87,7 @@ TRACE_EVENT(drv_add_interface,
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
- memcpy(__entry->addr, addr, 6);
+ memcpy(__entry->addr, sdata->vif.addr, 6);
__entry->ret = ret;
),
@@ -97,10 +98,9 @@ TRACE_EVENT(drv_add_interface,
);
TRACE_EVENT(drv_remove_interface,
- TP_PROTO(struct ieee80211_local *local,
- const u8 *addr, struct ieee80211_vif *vif),
+ TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata),
- TP_ARGS(local, addr, vif),
+ TP_ARGS(local, sdata),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -111,7 +111,7 @@ TRACE_EVENT(drv_remove_interface,
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
- memcpy(__entry->addr, addr, 6);
+ memcpy(__entry->addr, sdata->vif.addr, 6);
),
TP_printk(
@@ -140,6 +140,7 @@ TRACE_EVENT(drv_config,
__field(u8, short_frame_max_tx_count)
__field(int, center_freq)
__field(int, channel_type)
+ __field(int, smps)
),
TP_fast_assign(
@@ -155,6 +156,7 @@ TRACE_EVENT(drv_config,
__entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count;
__entry->center_freq = local->hw.conf.channel->center_freq;
__entry->channel_type = local->hw.conf.channel_type;
+ __entry->smps = local->hw.conf.smps_mode;
),
TP_printk(
@@ -165,11 +167,11 @@ TRACE_EVENT(drv_config,
TRACE_EVENT(drv_bss_info_changed,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss_conf *info,
u32 changed),
- TP_ARGS(local, vif, info, changed),
+ TP_ARGS(local, sdata, info, changed),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -293,11 +295,11 @@ TRACE_EVENT(drv_set_tim,
TRACE_EVENT(drv_set_key,
TP_PROTO(struct ieee80211_local *local,
- enum set_key_cmd cmd, struct ieee80211_vif *vif,
+ enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key, int ret),
- TP_ARGS(local, cmd, vif, sta, key, ret),
+ TP_ARGS(local, cmd, sdata, sta, key, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -329,26 +331,29 @@ TRACE_EVENT(drv_set_key,
TRACE_EVENT(drv_update_tkip_key,
TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
struct ieee80211_key_conf *conf,
- const u8 *address, u32 iv32),
+ struct ieee80211_sta *sta, u32 iv32),
- TP_ARGS(local, conf, address, iv32),
+ TP_ARGS(local, sdata, conf, sta, iv32),
TP_STRUCT__entry(
LOCAL_ENTRY
- __array(u8, addr, 6)
+ VIF_ENTRY
+ STA_ENTRY
__field(u32, iv32)
),
TP_fast_assign(
LOCAL_ASSIGN;
- memcpy(__entry->addr, address, 6);
+ VIF_ASSIGN;
+ STA_ASSIGN;
__entry->iv32 = iv32;
),
TP_printk(
- LOCAL_PR_FMT " addr:%pM iv32:%#x",
- LOCAL_PR_ARG, __entry->addr, __entry->iv32
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " iv32:%#x",
+ LOCAL_PR_ARG,VIF_PR_ARG,STA_PR_ARG, __entry->iv32
)
);
@@ -489,13 +494,36 @@ TRACE_EVENT(drv_set_rts_threshold,
)
);
+TRACE_EVENT(drv_set_coverage_class,
+ TP_PROTO(struct ieee80211_local *local, u8 value, int ret),
+
+ TP_ARGS(local, value, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u8, value)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->ret = ret;
+ __entry->value = value;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " value:%d ret:%d",
+ LOCAL_PR_ARG, __entry->value, __entry->ret
+ )
+);
+
TRACE_EVENT(drv_sta_notify,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta),
- TP_ARGS(local, vif, cmd, sta),
+ TP_ARGS(local, sdata, cmd, sta),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -517,59 +545,88 @@ TRACE_EVENT(drv_sta_notify,
)
);
-TRACE_EVENT(drv_conf_tx,
- TP_PROTO(struct ieee80211_local *local, u16 queue,
- const struct ieee80211_tx_queue_params *params,
- int ret),
+TRACE_EVENT(drv_sta_add,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta, int ret),
- TP_ARGS(local, queue, params, ret),
+ TP_ARGS(local, sdata, sta, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
- __field(u16, queue)
- __field(u16, txop)
- __field(u16, cw_min)
- __field(u16, cw_max)
- __field(u8, aifs)
+ VIF_ENTRY
+ STA_ENTRY
__field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
- __entry->queue = queue;
+ VIF_ASSIGN;
+ STA_ASSIGN;
__entry->ret = ret;
- __entry->txop = params->txop;
- __entry->cw_max = params->cw_max;
- __entry->cw_min = params->cw_min;
- __entry->aifs = params->aifs;
),
TP_printk(
- LOCAL_PR_FMT " queue:%d ret:%d",
- LOCAL_PR_ARG, __entry->queue, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret
)
);
-TRACE_EVENT(drv_get_tx_stats,
+TRACE_EVENT(drv_sta_remove,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_tx_queue_stats *stats,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta),
+
+ TP_ARGS(local, sdata, sta),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
+ )
+);
+
+TRACE_EVENT(drv_conf_tx,
+ TP_PROTO(struct ieee80211_local *local, u16 queue,
+ const struct ieee80211_tx_queue_params *params,
int ret),
- TP_ARGS(local, stats, ret),
+ TP_ARGS(local, queue, params, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
+ __field(u16, queue)
+ __field(u16, txop)
+ __field(u16, cw_min)
+ __field(u16, cw_max)
+ __field(u8, aifs)
__field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ __entry->queue = queue;
__entry->ret = ret;
+ __entry->txop = params->txop;
+ __entry->cw_max = params->cw_max;
+ __entry->cw_min = params->cw_min;
+ __entry->aifs = params->aifs;
),
TP_printk(
- LOCAL_PR_FMT " ret:%d",
- LOCAL_PR_ARG, __entry->ret
+ LOCAL_PR_FMT " queue:%d ret:%d",
+ LOCAL_PR_ARG, __entry->queue, __entry->ret
)
);
@@ -656,12 +713,12 @@ TRACE_EVENT(drv_tx_last_beacon,
TRACE_EVENT(drv_ampdu_action,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid,
u16 *ssn, int ret),
- TP_ARGS(local, vif, action, sta, tid, ssn, ret),
+ TP_ARGS(local, sdata, action, sta, tid, ssn, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -680,7 +737,7 @@ TRACE_EVENT(drv_ampdu_action,
__entry->ret = ret;
__entry->action = action;
__entry->tid = tid;
- __entry->ssn = *ssn;
+ __entry->ssn = ssn ? *ssn : 0;
),
TP_printk(
@@ -688,6 +745,27 @@ TRACE_EVENT(drv_ampdu_action,
LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret
)
);
+
+TRACE_EVENT(drv_flush,
+ TP_PROTO(struct ieee80211_local *local, bool drop),
+
+ TP_ARGS(local, drop),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(bool, drop)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->drop = drop;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " drop:%d",
+ LOCAL_PR_ARG, __entry->drop
+ )
+);
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index d7dcee68072..bb677a73b7c 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -125,7 +125,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
if (!skb) {
printk(KERN_ERR "%s: failed to allocate buffer "
- "for delba frame\n", sdata->dev->name);
+ "for delba frame\n", sdata->name);
return;
}
@@ -133,10 +133,10 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
memcpy(mgmt->da, da, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
@@ -185,3 +185,50 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
spin_unlock_bh(&sta->lock);
}
}
+
+int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps, const u8 *da,
+ const u8 *bssid)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff *skb;
+ struct ieee80211_mgmt *action_frame;
+
+ /* 27 = header + category + action + smps mode */
+ skb = dev_alloc_skb(27 + local->hw.extra_tx_headroom);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+ action_frame = (void *)skb_put(skb, 27);
+ memcpy(action_frame->da, da, ETH_ALEN);
+ memcpy(action_frame->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(action_frame->bssid, bssid, ETH_ALEN);
+ action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ action_frame->u.action.category = WLAN_CATEGORY_HT;
+ action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS;
+ switch (smps) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_NUM_MODES:
+ WARN_ON(1);
+ case IEEE80211_SMPS_OFF:
+ action_frame->u.action.u.ht_smps.smps_control =
+ WLAN_HT_SMPS_CONTROL_DISABLED;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ action_frame->u.action.u.ht_smps.smps_control =
+ WLAN_HT_SMPS_CONTROL_STATIC;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ action_frame->u.action.u.ht_smps.smps_control =
+ WLAN_HT_SMPS_CONTROL_DYNAMIC;
+ break;
+ }
+
+ /* we'll do more on status of this frame */
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ ieee80211_tx_skb(sdata, skb);
+
+ return 0;
+}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 1f2db647bb5..f3e94248674 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -117,7 +117,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
memset(mgmt->da, 0xff, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
mgmt->u.beacon.timestamp = cpu_to_le64(tsf);
@@ -187,15 +187,17 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss *bss)
{
+ struct cfg80211_bss *cbss =
+ container_of((void *)bss, struct cfg80211_bss, priv);
struct ieee80211_supported_band *sband;
u32 basic_rates;
int i, j;
- u16 beacon_int = bss->cbss.beacon_interval;
+ u16 beacon_int = cbss->beacon_interval;
if (beacon_int < 10)
beacon_int = 10;
- sband = sdata->local->hw.wiphy->bands[bss->cbss.channel->band];
+ sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
basic_rates = 0;
@@ -212,12 +214,12 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
}
}
- __ieee80211_sta_join_ibss(sdata, bss->cbss.bssid,
+ __ieee80211_sta_join_ibss(sdata, cbss->bssid,
beacon_int,
- bss->cbss.channel,
+ cbss->channel,
basic_rates,
- bss->cbss.capability,
- bss->cbss.tsf);
+ cbss->capability,
+ cbss->tsf);
}
static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -229,6 +231,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
int freq;
+ struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
struct sta_info *sta;
struct ieee80211_channel *channel;
@@ -252,7 +255,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
rcu_read_lock();
- sta = sta_info_get(local, mgmt->sa);
+ sta = sta_info_get(sdata, mgmt->sa);
if (sta) {
u32 prev_rates;
@@ -266,16 +269,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: updated supp_rates set "
"for %pM based on beacon info (0x%llx | "
"0x%llx -> 0x%llx)\n",
- sdata->dev->name,
+ sdata->name,
sta->sta.addr,
(unsigned long long) prev_rates,
(unsigned long long) supp_rates,
(unsigned long long) sta->sta.supp_rates[band]);
#endif
- } else
- ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates);
-
- rcu_read_unlock();
+ rcu_read_unlock();
+ } else {
+ rcu_read_unlock();
+ ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
+ supp_rates, GFP_KERNEL);
+ }
}
bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
@@ -283,25 +288,23 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
if (!bss)
return;
+ cbss = container_of((void *)bss, struct cfg80211_bss, priv);
+
/* was just updated in ieee80211_bss_info_update */
- beacon_timestamp = bss->cbss.tsf;
+ beacon_timestamp = cbss->tsf;
/* check if we need to merge IBSS */
- /* merge only on beacons (???) */
- if (!beacon)
- goto put_bss;
-
/* we use a fixed BSSID */
- if (sdata->u.ibss.bssid)
+ if (sdata->u.ibss.fixed_bssid)
goto put_bss;
/* not an IBSS */
- if (!(bss->cbss.capability & WLAN_CAPABILITY_IBSS))
+ if (!(cbss->capability & WLAN_CAPABILITY_IBSS))
goto put_bss;
/* different channel */
- if (bss->cbss.channel != local->oper_channel)
+ if (cbss->channel != local->oper_channel)
goto put_bss;
/* different SSID */
@@ -311,7 +314,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
goto put_bss;
/* same BSSID */
- if (memcmp(bss->cbss.bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0)
+ if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0)
goto put_bss;
if (rx_status->flag & RX_FLAG_TSFT) {
@@ -364,10 +367,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: beacon TSF higher than "
"local TSF - IBSS merge with BSSID %pM\n",
- sdata->dev->name, mgmt->bssid);
+ sdata->name, mgmt->bssid);
#endif
ieee80211_sta_join_ibss(sdata, bss);
- ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates);
+ ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
+ supp_rates, GFP_KERNEL);
}
put_bss:
@@ -380,7 +384,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
* must be callable in atomic context.
*/
struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
- u8 *bssid,u8 *addr, u32 supp_rates)
+ u8 *bssid,u8 *addr, u32 supp_rates,
+ gfp_t gfp)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
@@ -394,7 +399,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
if (net_ratelimit())
printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
- sdata->dev->name, addr);
+ sdata->name, addr);
return NULL;
}
@@ -406,10 +411,10 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
printk(KERN_DEBUG "%s: Adding new IBSS station %pM (dev=%s)\n",
- wiphy_name(local->hw.wiphy), addr, sdata->dev->name);
+ wiphy_name(local->hw.wiphy), addr, sdata->name);
#endif
- sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
+ sta = sta_info_alloc(sdata, addr, gfp);
if (!sta)
return NULL;
@@ -421,9 +426,9 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
rate_control_rate_init(sta);
+ /* If it fails, maybe we raced another insertion? */
if (sta_info_insert(sta))
- return NULL;
-
+ return sta_info_get(sdata, addr);
return sta;
}
@@ -449,6 +454,9 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
return active;
}
+/*
+ * This function is called with state == IEEE80211_IBSS_MLME_JOINED
+ */
static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
{
@@ -470,7 +478,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
return;
printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
- "IBSS networks with same SSID (merge)\n", sdata->dev->name);
+ "IBSS networks with same SSID (merge)\n", sdata->name);
ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len);
}
@@ -492,13 +500,13 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
* random number generator get different BSSID. */
get_random_bytes(bssid, ETH_ALEN);
for (i = 0; i < ETH_ALEN; i++)
- bssid[i] ^= sdata->dev->dev_addr[i];
+ bssid[i] ^= sdata->vif.addr[i];
bssid[0] &= ~0x01;
bssid[0] |= 0x02;
}
printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n",
- sdata->dev->name, bssid);
+ sdata->name, bssid);
sband = local->hw.wiphy->bands[ifibss->channel->band];
@@ -514,11 +522,15 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
capability, 0);
}
+/*
+ * This function is called with state == IEEE80211_IBSS_MLME_SEARCH
+ */
+
static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
- struct ieee80211_bss *bss;
+ struct cfg80211_bss *cbss;
struct ieee80211_channel *chan = NULL;
const u8 *bssid = NULL;
int active_ibss;
@@ -527,7 +539,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
active_ibss = ieee80211_sta_active_ibss(sdata);
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
- sdata->dev->name, active_ibss);
+ sdata->name, active_ibss);
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
if (active_ibss)
@@ -542,21 +554,23 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
chan = ifibss->channel;
if (!is_zero_ether_addr(ifibss->bssid))
bssid = ifibss->bssid;
- bss = (void *)cfg80211_get_bss(local->hw.wiphy, chan, bssid,
- ifibss->ssid, ifibss->ssid_len,
- WLAN_CAPABILITY_IBSS |
- WLAN_CAPABILITY_PRIVACY,
- capability);
+ cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid,
+ ifibss->ssid, ifibss->ssid_len,
+ WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_PRIVACY,
+ capability);
+
+ if (cbss) {
+ struct ieee80211_bss *bss;
- if (bss) {
+ bss = (void *)cbss->priv;
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG " sta_find_ibss: selected %pM current "
- "%pM\n", bss->cbss.bssid, ifibss->bssid);
+ "%pM\n", cbss->bssid, ifibss->bssid);
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
" based on configured SSID\n",
- sdata->dev->name, bss->cbss.bssid);
+ sdata->name, cbss->bssid);
ieee80211_sta_join_ibss(sdata, bss);
ieee80211_rx_bss_put(local, bss);
@@ -568,18 +582,14 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
/* Selected IBSS not found in current scan results - try to scan */
- if (ifibss->state == IEEE80211_IBSS_MLME_JOINED &&
- !ieee80211_sta_active_ibss(sdata)) {
- mod_timer(&ifibss->timer,
- round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
- } else if (time_after(jiffies, ifibss->last_scan_completed +
+ if (time_after(jiffies, ifibss->last_scan_completed +
IEEE80211_SCAN_INTERVAL)) {
printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
- "join\n", sdata->dev->name);
+ "join\n", sdata->name);
ieee80211_request_internal_scan(sdata, ifibss->ssid,
ifibss->ssid_len);
- } else if (ifibss->state != IEEE80211_IBSS_MLME_JOINED) {
+ } else {
int interval = IEEE80211_SCAN_INTERVAL;
if (time_after(jiffies, ifibss->ibss_join_req +
@@ -589,7 +599,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
return;
}
printk(KERN_DEBUG "%s: IBSS not allowed on"
- " %d MHz\n", sdata->dev->name,
+ " %d MHz\n", sdata->name,
local->hw.conf.channel->center_freq);
/* No IBSS found - decrease scan interval and continue
@@ -597,7 +607,6 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
interval = IEEE80211_SCAN_INTERVAL_SLOW;
}
- ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
mod_timer(&ifibss->timer,
round_jiffies(jiffies + interval));
}
@@ -623,7 +632,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM"
" (tx_last_beacon=%d)\n",
- sdata->dev->name, mgmt->sa, mgmt->da,
+ sdata->name, mgmt->sa, mgmt->da,
mgmt->bssid, tx_last_beacon);
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
@@ -641,13 +650,13 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq "
"from %pM\n",
- sdata->dev->name, mgmt->sa);
+ sdata->name, mgmt->sa);
#endif
return;
}
if (pos[1] != 0 &&
(pos[1] != ifibss->ssid_len ||
- !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
+ memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
/* Ignore ProbeReq for foreign SSID */
return;
}
@@ -661,7 +670,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
memcpy(resp->da, mgmt->sa, ETH_ALEN);
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n",
- sdata->dev->name, resp->da);
+ sdata->name, resp->da);
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
@@ -675,7 +684,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
size_t baselen;
struct ieee802_11_elems elems;
- if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
+ if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
return; /* ignore ProbeResp to foreign address */
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -748,7 +757,7 @@ static void ieee80211_ibss_work(struct work_struct *work)
if (WARN_ON(local->suspended))
return;
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
return;
if (local->scanning)
@@ -831,7 +840,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
continue;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 91dc8636d64..241533e1bc0 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -2,7 +2,7 @@
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
- * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -58,6 +58,15 @@ struct ieee80211_local;
#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024))
+#define IEEE80211_DEFAULT_UAPSD_QUEUES \
+ (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+
+#define IEEE80211_DEFAULT_MAX_SP_LEN \
+ IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
+
struct ieee80211_fragment_entry {
unsigned long first_frag_time;
unsigned int seq;
@@ -71,9 +80,6 @@ struct ieee80211_fragment_entry {
struct ieee80211_bss {
- /* Yes, this is a hack */
- struct cfg80211_bss cbss;
-
/* don't want to look up all the time */
size_t ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -81,6 +87,7 @@ struct ieee80211_bss {
u8 dtim_period;
bool wmm_used;
+ bool uapsd_supported;
unsigned long last_probe_resp;
@@ -140,7 +147,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
struct ieee80211_tx_data {
struct sk_buff *skb;
- struct net_device *dev;
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
@@ -228,31 +234,77 @@ struct mesh_preq_queue {
u8 flags;
};
-enum ieee80211_mgd_state {
- IEEE80211_MGD_STATE_IDLE,
- IEEE80211_MGD_STATE_PROBE,
- IEEE80211_MGD_STATE_AUTH,
- IEEE80211_MGD_STATE_ASSOC,
+enum ieee80211_work_type {
+ IEEE80211_WORK_ABORT,
+ IEEE80211_WORK_DIRECT_PROBE,
+ IEEE80211_WORK_AUTH,
+ IEEE80211_WORK_ASSOC,
+ IEEE80211_WORK_REMAIN_ON_CHANNEL,
};
-struct ieee80211_mgd_work {
+/**
+ * enum work_done_result - indicates what to do after work was done
+ *
+ * @WORK_DONE_DESTROY: This work item is no longer needed, destroy.
+ * @WORK_DONE_REQUEUE: This work item was reset to be reused, and
+ * should be requeued.
+ */
+enum work_done_result {
+ WORK_DONE_DESTROY,
+ WORK_DONE_REQUEUE,
+};
+
+struct ieee80211_work {
struct list_head list;
- struct ieee80211_bss *bss;
- int ie_len;
- u8 prev_bssid[ETH_ALEN];
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 ssid_len;
+
+ struct rcu_head rcu_head;
+
+ struct ieee80211_sub_if_data *sdata;
+
+ enum work_done_result (*done)(struct ieee80211_work *wk,
+ struct sk_buff *skb);
+
+ struct ieee80211_channel *chan;
+ enum nl80211_channel_type chan_type;
+
unsigned long timeout;
- enum ieee80211_mgd_state state;
- u16 auth_alg, auth_transaction;
+ enum ieee80211_work_type type;
+
+ u8 filter_ta[ETH_ALEN];
- int tries;
+ bool started;
- u8 key[WLAN_KEY_LEN_WEP104];
- u8 key_len, key_idx;
+ union {
+ struct {
+ int tries;
+ u16 algorithm, transaction;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ u8 key[WLAN_KEY_LEN_WEP104];
+ u8 key_len, key_idx;
+ bool privacy;
+ } probe_auth;
+ struct {
+ struct cfg80211_bss *bss;
+ const u8 *supp_rates;
+ const u8 *ht_information_ie;
+ enum ieee80211_smps_mode smps;
+ int tries;
+ u16 capability;
+ u8 prev_bssid[ETH_ALEN];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ u8 supp_rates_len;
+ bool wmm_used, use_11n, uapsd_used;
+ } assoc;
+ struct {
+ u32 duration;
+ } remain;
+ };
+ int ie_len;
/* must be last */
- u8 ie[0]; /* for auth or assoc frame, not probe */
+ u8 ie[0];
};
/* flags used in struct ieee80211_if_managed.flags */
@@ -260,15 +312,11 @@ enum ieee80211_sta_flags {
IEEE80211_STA_BEACON_POLL = BIT(0),
IEEE80211_STA_CONNECTION_POLL = BIT(1),
IEEE80211_STA_CONTROL_PORT = BIT(2),
- IEEE80211_STA_WMM_ENABLED = BIT(3),
IEEE80211_STA_DISABLE_11N = BIT(4),
IEEE80211_STA_CSA_RECEIVED = BIT(5),
IEEE80211_STA_MFP_ENABLED = BIT(6),
-};
-
-/* flags for MLME request */
-enum ieee80211_sta_request {
- IEEE80211_STA_REQ_SCAN,
+ IEEE80211_STA_UAPSD_ENABLED = BIT(7),
+ IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
};
struct ieee80211_if_managed {
@@ -285,21 +333,18 @@ struct ieee80211_if_managed {
int probe_send_count;
struct mutex mtx;
- struct ieee80211_bss *associated;
- struct ieee80211_mgd_work *old_associate_work;
- struct list_head work_list;
+ struct cfg80211_bss *associated;
u8 bssid[ETH_ALEN];
u16 aid;
- u16 capab;
struct sk_buff_head skb_queue;
unsigned long timers_running; /* used for quiesce/restart */
bool powersave; /* powersave requested for this iface */
-
- unsigned long request;
+ enum ieee80211_smps_mode req_smps, /* requested smps mode */
+ ap_smps; /* smps mode AP thinks we're in */
unsigned int flags;
@@ -433,6 +478,8 @@ struct ieee80211_sub_if_data {
int drop_unencrypted;
+ char name[IFNAMSIZ];
+
/*
* keep track of whether the HT opmode (stored in
* vif.bss_info.ht_operation_mode) is valid.
@@ -458,8 +505,8 @@ struct ieee80211_sub_if_data {
*/
struct ieee80211_if_ap *bss;
- int force_unicast_rateidx; /* forced TX rateidx for unicast frames */
- int max_ratectrl_rateidx; /* max TX rateidx for rate control */
+ /* bitmap of allowed (non-MCS) rate indexes for rate control */
+ u32 rc_rateidx_mask[IEEE80211_NUM_BANDS];
union {
struct ieee80211_if_ap ap;
@@ -565,6 +612,15 @@ struct ieee80211_local {
const struct ieee80211_ops *ops;
/*
+ * work stuff, potentially off-channel (in the future)
+ */
+ struct mutex work_mtx;
+ struct list_head work_list;
+ struct timer_list work_timer;
+ struct work_struct work_work;
+ struct sk_buff_head work_skb_queue;
+
+ /*
* private workqueue to mac80211. mac80211 makes this accessible
* via ieee80211_queue_work()
*/
@@ -586,6 +642,9 @@ struct ieee80211_local {
/* used for uploading changed mc list */
struct work_struct reconfig_filter;
+ /* used to reconfigure hardware SM PS */
+ struct work_struct recalc_smps;
+
/* aggregated multicast list */
struct dev_addr_list *mc_list;
int mc_count;
@@ -630,15 +689,18 @@ struct ieee80211_local {
/* Station data */
/*
- * The lock only protects the list, hash, timer and counter
- * against manipulation, reads are done in RCU. Additionally,
- * the lock protects each BSS's TIM bitmap.
+ * The mutex only protects the list and counter,
+ * reads are done in RCU.
+ * Additionally, the lock protects the hash table,
+ * the pending list and each BSS's TIM bitmap.
*/
+ struct mutex sta_mtx;
spinlock_t sta_lock;
unsigned long num_sta;
- struct list_head sta_list;
+ struct list_head sta_list, sta_pending_list;
struct sta_info *sta_hash[STA_HASH_SIZE];
struct timer_list sta_cleanup;
+ struct work_struct sta_finish_work;
int sta_generation;
struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
@@ -689,6 +751,10 @@ struct ieee80211_local {
enum nl80211_channel_type oper_channel_type;
struct ieee80211_channel *oper_channel, *csa_channel;
+ /* Temporary remain-on-channel for off-channel operations */
+ struct ieee80211_channel *tmp_channel;
+ enum nl80211_channel_type tmp_channel_type;
+
/* SNMP counters */
/* dot11CountersTable */
u32 dot11TransmittedFragmentCount;
@@ -708,10 +774,6 @@ struct ieee80211_local {
assoc_led_name[32], radio_led_name[32];
#endif
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct work_struct sta_debugfs_add;
-#endif
-
#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
/* TX/RX handler statistics */
unsigned int tx_handlers_drop;
@@ -745,8 +807,22 @@ struct ieee80211_local {
int wifi_wme_noack_test;
unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
+ /*
+ * Bitmask of enabled u-apsd queues,
+ * IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association
+ * to take effect.
+ */
+ unsigned int uapsd_queues;
+
+ /*
+ * Maximum number of buffered frames AP can deliver during a
+ * service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar.
+ * Needs a new association to take effect.
+ */
+ unsigned int uapsd_max_sp_len;
+
bool pspolling;
- bool scan_ps_enabled;
+ bool offchannel_ps_enabled;
/*
* PS can only be enabled when we have exactly one managed
* interface (and monitors) in PS, this then points there.
@@ -760,6 +836,8 @@ struct ieee80211_local {
int user_power_level; /* in dBm */
int power_constr_level; /* in dBm */
+ enum ieee80211_smps_mode smps_mode;
+
struct work_struct restart_work;
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -874,6 +952,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
void ieee80211_configure_filter(struct ieee80211_local *local);
u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
+extern bool ieee80211_disable_40mhz_24ghz;
+
/* STA code */
void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
@@ -886,6 +966,10 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
struct cfg80211_disassoc_request *req,
void *cookie);
+int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ const u8 *buf, size_t len, u64 *cookie);
ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
void ieee80211_send_pspoll(struct ieee80211_local *local,
@@ -905,7 +989,8 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
ieee80211_rx_result
ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
- u8 *bssid, u8 *addr, u32 supp_rates);
+ u8 *bssid, u8 *addr, u32 supp_rates,
+ gfp_t gfp);
int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
struct cfg80211_ibss_params *params);
int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
@@ -937,7 +1022,15 @@ ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
void ieee80211_rx_bss_put(struct ieee80211_local *local,
struct ieee80211_bss *bss);
+/* off-channel helpers */
+void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
+void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
+void ieee80211_offchannel_return(struct ieee80211_local *local,
+ bool enable_beaconing);
+
/* interface handling */
+int ieee80211_iface_init(void);
+void ieee80211_iface_exit(void);
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
struct net_device **new_dev, enum nl80211_iftype type,
struct vif_params *params);
@@ -948,6 +1041,11 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local);
u32 __ieee80211_recalc_idle(struct ieee80211_local *local);
void ieee80211_recalc_idle(struct ieee80211_local *local);
+static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
+{
+ return netif_running(sdata->dev);
+}
+
/* tx handling */
void ieee80211_clear_tx_pending(struct ieee80211_local *local);
void ieee80211_tx_pending(unsigned long data);
@@ -976,6 +1074,9 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
const u8 *da, u16 tid,
u16 initiator, u16 reason_code);
+int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps, const u8 *da,
+ const u8 *bssid);
void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
u16 tid, u16 initiator, u16 reason);
@@ -1086,6 +1187,28 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
struct ieee802_11_elems *elems,
enum ieee80211_band band);
+int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps_mode);
+void ieee80211_recalc_smps(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *forsdata);
+
+size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids, size_t offset);
+size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
+
+/* internal work items */
+void ieee80211_work_init(struct ieee80211_local *local);
+void ieee80211_add_work(struct ieee80211_work *wk);
+void free_work(struct ieee80211_work *wk);
+void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata);
+ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
+int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, u64 *cookie);
+int ieee80211_wk_cancel_remain_on_channel(
+ struct ieee80211_sub_if_data *sdata, u64 cookie);
#ifdef CONFIG_MAC80211_NOINLINE
#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 32abae3ce32..0793d7a8d74 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -62,6 +62,23 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static int ieee80211_change_mac(struct net_device *dev, void *addr)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct sockaddr *sa = addr;
+ int ret;
+
+ if (ieee80211_sdata_running(sdata))
+ return -EBUSY;
+
+ ret = eth_mac_addr(dev, sa);
+
+ if (ret == 0)
+ memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN);
+
+ return ret;
+}
+
static inline int identical_mac_addr_allowed(int type1, int type2)
{
return type1 == NL80211_IFTYPE_MONITOR ||
@@ -82,7 +99,6 @@ static int ieee80211_open(struct net_device *dev)
struct ieee80211_sub_if_data *nsdata;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- struct ieee80211_if_init_conf conf;
u32 changed = 0;
int res;
u32 hw_reconf_flags = 0;
@@ -97,7 +113,7 @@ static int ieee80211_open(struct net_device *dev)
list_for_each_entry(nsdata, &local->interfaces, list) {
struct net_device *ndev = nsdata->dev;
- if (ndev != dev && netif_running(ndev)) {
+ if (ndev != dev && ieee80211_sdata_running(nsdata)) {
/*
* Allow only a single IBSS interface to be up at any
* time. This is restricted because beacon distribution
@@ -183,7 +199,7 @@ static int ieee80211_open(struct net_device *dev)
struct net_device *ndev = nsdata->dev;
/*
- * No need to check netif_running since we do not allow
+ * No need to check running since we do not allow
* it to start up with this invalid address.
*/
if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) {
@@ -234,10 +250,7 @@ static int ieee80211_open(struct net_device *dev)
ieee80211_configure_filter(local);
break;
default:
- conf.vif = &sdata->vif;
- conf.type = sdata->vif.type;
- conf.mac_addr = dev->dev_addr;
- res = drv_add_interface(local, &conf);
+ res = drv_add_interface(local, &sdata->vif);
if (res)
goto err_stop;
@@ -320,7 +333,7 @@ static int ieee80211_open(struct net_device *dev)
return 0;
err_del_interface:
- drv_remove_interface(local, &conf);
+ drv_remove_interface(local, &sdata->vif);
err_stop:
if (!local->open_count)
drv_stop(local);
@@ -335,7 +348,6 @@ static int ieee80211_stop(struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_init_conf conf;
struct sta_info *sta;
unsigned long flags;
struct sk_buff *skb, *tmp;
@@ -348,6 +360,11 @@ static int ieee80211_stop(struct net_device *dev)
netif_tx_stop_all_queues(dev);
/*
+ * Purge work for this interface.
+ */
+ ieee80211_work_purge(sdata);
+
+ /*
* Now delete all active aggregation sessions.
*/
rcu_read_lock();
@@ -514,12 +531,9 @@ static int ieee80211_stop(struct net_device *dev)
BSS_CHANGED_BEACON_ENABLED);
}
- conf.vif = &sdata->vif;
- conf.type = sdata->vif.type;
- conf.mac_addr = dev->dev_addr;
/* disable all keys for as long as this netdev is down */
ieee80211_disable_keys(sdata);
- drv_remove_interface(local, &conf);
+ drv_remove_interface(local, &sdata->vif);
}
sdata->bss = NULL;
@@ -659,7 +673,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_start_xmit = ieee80211_subif_start_xmit,
.ndo_set_multicast_list = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = ieee80211_change_mac,
.ndo_select_queue = ieee80211_netdev_select_queue,
};
@@ -681,10 +695,14 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
- if (!ieee80211_is_data_qos(hdr->frame_control)) {
+ if (!ieee80211_is_data(hdr->frame_control)) {
skb->priority = 7;
return ieee802_1d_to_ac[skb->priority];
}
+ if (!ieee80211_is_data_qos(hdr->frame_control)) {
+ skb->priority = 0;
+ return ieee802_1d_to_ac[skb->priority];
+ }
p = ieee80211_get_qos_ctl(hdr);
skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
@@ -779,7 +797,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
* and goes into the requested mode.
*/
- if (netif_running(sdata->dev))
+ if (ieee80211_sdata_running(sdata))
return -EBUSY;
/* Purge and reset type-dependent state. */
@@ -833,6 +851,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
/* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
sdata = netdev_priv(ndev);
ndev->ieee80211_ptr = &sdata->wdev;
+ memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
+ memcpy(sdata->name, ndev->name, IFNAMSIZ);
/* initialise type-independent data */
sdata->wdev.wiphy = local->hw.wiphy;
@@ -844,8 +864,12 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
INIT_LIST_HEAD(&sdata->key_list);
- sdata->force_unicast_rateidx = -1;
- sdata->max_ratectrl_rateidx = -1;
+ for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+ struct ieee80211_supported_band *sband;
+ sband = local->hw.wiphy->bands[i];
+ sdata->rc_rateidx_mask[i] =
+ sband ? (1 << sband->n_bitrates) - 1 : 0;
+ }
/* setup type-dependent data */
ieee80211_setup_sdata(sdata, type);
@@ -938,6 +962,8 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
wiphy_name(local->hw.wiphy));
#endif
+ drv_flush(local, false);
+
local->hw.conf.flags |= IEEE80211_CONF_IDLE;
return IEEE80211_CONF_CHANGE_IDLE;
}
@@ -947,16 +973,18 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
struct ieee80211_sub_if_data *sdata;
int count = 0;
+ if (!list_empty(&local->work_list))
+ return ieee80211_idle_off(local, "working");
+
if (local->scanning)
return ieee80211_idle_off(local, "scanning");
list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
/* do not count disabled managed interfaces */
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- !sdata->u.mgd.associated &&
- list_empty(&sdata->u.mgd.work_list))
+ !sdata->u.mgd.associated)
continue;
/* do not count unused IBSS interfaces */
if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
@@ -984,3 +1012,41 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
if (chg)
ieee80211_hw_config(local, chg);
}
+
+static int netdev_notify(struct notifier_block *nb,
+ unsigned long state,
+ void *ndev)
+{
+ struct net_device *dev = ndev;
+ struct ieee80211_sub_if_data *sdata;
+
+ if (state != NETDEV_CHANGENAME)
+ return 0;
+
+ if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
+ return 0;
+
+ if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
+ return 0;
+
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ memcpy(sdata->name, dev->name, IFNAMSIZ);
+
+ ieee80211_debugfs_rename_netdev(sdata);
+ return 0;
+}
+
+static struct notifier_block mac80211_netdev_notifier = {
+ .notifier_call = netdev_notify,
+};
+
+int ieee80211_iface_init(void)
+{
+ return register_netdevice_notifier(&mac80211_netdev_notifier);
+}
+
+void ieee80211_iface_exit(void)
+{
+ unregister_netdevice_notifier(&mac80211_netdev_notifier);
+}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 659a42d529e..8160d9c5372 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -139,7 +139,7 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
struct ieee80211_sub_if_data,
u.ap);
- ret = drv_set_key(key->local, SET_KEY, &sdata->vif, sta, &key->conf);
+ ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
if (!ret) {
spin_lock_bh(&todo_lock);
@@ -181,7 +181,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
struct ieee80211_sub_if_data,
u.ap);
- ret = drv_set_key(key->local, DISABLE_KEY, &sdata->vif,
+ ret = drv_set_key(key->local, DISABLE_KEY, sdata,
sta, &key->conf);
if (ret)
@@ -421,7 +421,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
*/
/* same here, the AP could be using QoS */
- ap = sta_info_get(key->local, key->sdata->u.mgd.bssid);
+ ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid);
if (ap) {
if (test_sta_flags(ap, WLAN_STA_WME))
key->conf.flags |=
@@ -443,7 +443,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
add_todo(old_key, KEY_FLAG_TODO_DELETE);
add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS);
- if (netif_running(sdata->dev))
+ if (ieee80211_sdata_running(sdata))
add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD);
spin_unlock_irqrestore(&sdata->local->key_lock, flags);
@@ -509,7 +509,7 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
{
ASSERT_RTNL();
- if (WARN_ON(!netif_running(sdata->dev)))
+ if (WARN_ON(!ieee80211_sdata_running(sdata)))
return;
ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index a49f93b79e9..bdc2968c2bb 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -59,11 +59,17 @@ enum ieee80211_internal_key_flags {
KEY_FLAG_TODO_DEFMGMTKEY = BIT(6),
};
+enum ieee80211_internal_tkip_state {
+ TKIP_STATE_NOT_INIT,
+ TKIP_STATE_PHASE1_DONE,
+ TKIP_STATE_PHASE1_HW_UPLOADED,
+};
+
struct tkip_ctx {
u32 iv32;
u16 iv16;
u16 p1k[5];
- int initialized;
+ enum ieee80211_internal_tkip_state state;
};
struct ieee80211_key {
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0d2d94881f1..06c33b68d8e 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -17,7 +17,6 @@
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
-#include <linux/wireless.h>
#include <linux/rtnetlink.h>
#include <linux/bitmap.h>
#include <linux/pm_qos_params.h>
@@ -32,7 +31,12 @@
#include "led.h"
#include "cfg.h"
#include "debugfs.h"
-#include "debugfs_netdev.h"
+
+
+bool ieee80211_disable_40mhz_24ghz;
+module_param(ieee80211_disable_40mhz_24ghz, bool, 0644);
+MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
+ "Disable 40MHz support in the 2.4GHz band");
void ieee80211_configure_filter(struct ieee80211_local *local)
{
@@ -102,6 +106,9 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
if (scan_chan) {
chan = scan_chan;
channel_type = NL80211_CHAN_NO_HT;
+ } else if (local->tmp_channel) {
+ chan = scan_chan = local->tmp_channel;
+ channel_type = local->tmp_channel_type;
} else {
chan = local->oper_channel;
channel_type = local->oper_channel_type;
@@ -114,6 +121,18 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
changed |= IEEE80211_CONF_CHANGE_CHANNEL;
}
+ if (!conf_is_ht(&local->hw.conf)) {
+ /*
+ * mac80211.h documents that this is only valid
+ * when the channel is set to an HT type, and
+ * that otherwise STATIC is used.
+ */
+ local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC;
+ } else if (local->hw.conf.smps_mode != local->smps_mode) {
+ local->hw.conf.smps_mode = local->smps_mode;
+ changed |= IEEE80211_CONF_CHANGE_SMPS;
+ }
+
if (scan_chan)
power = chan->max_power;
else
@@ -173,7 +192,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
else if (sdata->vif.type == NL80211_IFTYPE_AP)
- sdata->vif.bss_conf.bssid = sdata->dev->dev_addr;
+ sdata->vif.bss_conf.bssid = sdata->vif.addr;
else if (ieee80211_vif_is_mesh(&sdata->vif)) {
sdata->vif.bss_conf.bssid = zero;
} else {
@@ -195,7 +214,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
}
if (changed & BSS_CHANGED_BEACON_ENABLED) {
- if (local->quiescing || !netif_running(sdata->dev) ||
+ if (local->quiescing || !ieee80211_sdata_running(sdata) ||
test_bit(SCAN_SW_SCANNING, &local->scanning)) {
sdata->vif.bss_conf.enable_beacon = false;
} else {
@@ -223,8 +242,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
}
}
- drv_bss_info_changed(local, &sdata->vif,
- &sdata->vif.bss_conf, changed);
+ drv_bss_info_changed(local, sdata, &sdata->vif.bss_conf, changed);
}
u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
@@ -299,6 +317,16 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL(ieee80211_restart_hw);
+static void ieee80211_recalc_smps_work(struct work_struct *work)
+{
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, recalc_smps);
+
+ mutex_lock(&local->iflist_mtx);
+ ieee80211_recalc_smps(local, NULL);
+ mutex_unlock(&local->iflist_mtx);
+}
+
struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
const struct ieee80211_ops *ops)
{
@@ -333,9 +361,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
WIPHY_FLAG_4ADDR_STATION;
wiphy->privid = mac80211_wiphy_privid;
- /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */
- wiphy->bss_priv_size = sizeof(struct ieee80211_bss) -
- sizeof(struct cfg80211_bss);
+ wiphy->bss_priv_size = sizeof(struct ieee80211_bss);
local = wiphy_priv(wiphy);
@@ -358,6 +384,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
local->user_power_level = -1;
+ local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
+ local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
INIT_LIST_HEAD(&local->interfaces);
mutex_init(&local->iflist_mtx);
@@ -369,9 +397,13 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
+ ieee80211_work_init(local);
+
INIT_WORK(&local->restart_work, ieee80211_restart_work);
INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
+ INIT_WORK(&local->recalc_smps, ieee80211_recalc_smps_work);
+ local->smps_mode = IEEE80211_SMPS_OFF;
INIT_WORK(&local->dynamic_ps_enable_work,
ieee80211_dynamic_ps_enable_work);
@@ -461,6 +493,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+ WARN((local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
+ && (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
+ "U-APSD not supported with HW_PS_NULLFUNC_STACK\n");
+
/*
* Calculate scan IE length -- we need this to alloc
* memory and to subtract from the driver limit. It
@@ -522,8 +558,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
debugfs_hw_add(local);
+ /*
+ * if the driver doesn't specify a max listen interval we
+ * use 5 which should be a safe default
+ */
if (local->hw.max_listen_interval == 0)
- local->hw.max_listen_interval = 1;
+ local->hw.max_listen_interval = 5;
local->hw.conf.listen_interval = local->hw.max_listen_interval;
@@ -674,11 +714,19 @@ static int __init ieee80211_init(void)
ret = rc80211_pid_init();
if (ret)
- return ret;
+ goto err_pid;
- ieee80211_debugfs_netdev_init();
+ ret = ieee80211_iface_init();
+ if (ret)
+ goto err_netdev;
return 0;
+ err_netdev:
+ rc80211_pid_exit();
+ err_pid:
+ rc80211_minstrel_exit();
+
+ return ret;
}
static void __exit ieee80211_exit(void)
@@ -695,7 +743,7 @@ static void __exit ieee80211_exit(void)
if (mesh_allocated)
ieee80211s_stop();
- ieee80211_debugfs_netdev_exit();
+ ieee80211_iface_exit();
}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 6a433142959..61080c5fad5 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -457,7 +457,7 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
printk(KERN_DEBUG "%s: running mesh housekeeping\n",
- sdata->dev->name);
+ sdata->name);
#endif
ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
@@ -565,7 +565,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
/* ignore ProbeResp to foreign address */
if (stype == IEEE80211_STYPE_PROBE_RESP &&
- compare_ether_addr(mgmt->da, sdata->dev->dev_addr))
+ compare_ether_addr(mgmt->da, sdata->vif.addr))
return;
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -645,7 +645,7 @@ static void ieee80211_mesh_work(struct work_struct *work)
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct sk_buff *skb;
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
return;
if (local->scanning)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index d28acb6b1f8..ce84237ebad 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -128,9 +128,9 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID == SA */
- memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
@@ -222,7 +222,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, ra, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID is left zeroed, wildcard value */
mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
@@ -335,7 +335,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
bool process = true;
rcu_read_lock();
- sta = sta_info_get(local, mgmt->sa);
+ sta = sta_info_get(sdata, mgmt->sa);
if (!sta) {
rcu_read_unlock();
return 0;
@@ -374,7 +374,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
new_metric = MAX_METRIC;
exp_time = TU_TO_EXP_TIME(orig_lifetime);
- if (memcmp(orig_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
+ if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) {
/* This MP is the originator, we are not interested in this
* frame, except for updating transmitter's path info.
*/
@@ -486,7 +486,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
mhwmp_dbg("received PREQ from %pM\n", orig_addr);
- if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
+ if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) {
mhwmp_dbg("PREQ is for us\n");
forward = false;
reply = true;
@@ -579,7 +579,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
* replies
*/
target_addr = PREP_IE_TARGET_ADDR(prep_elem);
- if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0)
+ if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0)
/* destination, no forwarding required */
return;
@@ -890,7 +890,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
target_flags = MP_F_RF;
spin_unlock_bh(&mpath->state_lock);
- mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr,
+ mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr,
cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
cpu_to_le32(mpath->sn), broadcast_addr, 0,
ttl, cpu_to_le32(lifetime), 0,
@@ -939,7 +939,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
if (time_after(jiffies,
mpath->exp_time -
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
- !memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) &&
+ !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
!(mpath->flags & MESH_PATH_RESOLVING) &&
!(mpath->flags & MESH_PATH_FIXED)) {
mesh_queue_preq(mpath,
@@ -1010,7 +1010,7 @@ mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->dev->dev_addr,
+ mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->vif.addr,
cpu_to_le32(++ifmsh->sn),
0, NULL, 0, broadcast_addr,
0, MESH_TTL, 0, 0, 0, sdata);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 0192cfdacae..2312efe04c6 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -260,7 +260,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
int err = 0;
u32 hash_idx;
- if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
+ if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
@@ -377,7 +377,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
int err = 0;
u32 hash_idx;
- if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
+ if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
@@ -605,7 +605,7 @@ void mesh_path_discard_frame(struct sk_buff *skb,
struct mesh_path *mpath;
u32 sn = 0;
- if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
+ if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
u8 *ra, *da;
da = hdr->addr3;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 0f7c6e6a424..bc4e20e57ff 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -102,7 +102,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
if (local->num_sta >= MESH_MAX_PLINKS)
return NULL;
- sta = sta_info_alloc(sdata, hw_addr, GFP_ATOMIC);
+ sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
if (!sta)
return NULL;
@@ -169,7 +169,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID is left zeroed, wildcard value */
mgmt->u.action.category = MESH_PLINK_CATEGORY;
mgmt->u.action.u.plink_action.action_code = action;
@@ -234,14 +234,14 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data
rcu_read_lock();
- sta = sta_info_get(local, hw_addr);
+ sta = sta_info_get(sdata, hw_addr);
if (!sta) {
+ rcu_read_unlock();
+
sta = mesh_plink_alloc(sdata, hw_addr, rates);
- if (!sta) {
- rcu_read_unlock();
+ if (!sta)
return;
- }
- if (sta_info_insert(sta)) {
+ if (sta_info_insert_rcu(sta)) {
rcu_read_unlock();
return;
}
@@ -455,7 +455,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
rcu_read_lock();
- sta = sta_info_get(local, mgmt->sa);
+ sta = sta_info_get(sdata, mgmt->sa);
if (!sta && ftype != PLINK_OPEN) {
mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
rcu_read_unlock();
@@ -485,9 +485,11 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
} else if (!sta) {
/* ftype == PLINK_OPEN */
u32 rates;
+
+ rcu_read_unlock();
+
if (!mesh_plink_free_count(sdata)) {
mpl_dbg("Mesh plink error: no more free plinks\n");
- rcu_read_unlock();
return;
}
@@ -495,10 +497,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
sta = mesh_plink_alloc(sdata, mgmt->sa, rates);
if (!sta) {
mpl_dbg("Mesh plink error: plink table full\n");
- rcu_read_unlock();
return;
}
- if (sta_info_insert(sta)) {
+ if (sta_info_insert_rcu(sta)) {
rcu_read_unlock();
return;
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 05a18f43e1b..41812a15eea 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -27,10 +27,6 @@
#include "rate.h"
#include "led.h"
-#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
-#define IEEE80211_AUTH_MAX_TRIES 3
-#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
-#define IEEE80211_ASSOC_MAX_TRIES 3
#define IEEE80211_MAX_PROBE_TRIES 5
/*
@@ -75,11 +71,8 @@ enum rx_mgmt_action {
/* caller must call cfg80211_send_disassoc() */
RX_MGMT_CFG80211_DISASSOC,
- /* caller must call cfg80211_auth_timeout() & free work */
- RX_MGMT_CFG80211_AUTH_TO,
-
- /* caller must call cfg80211_assoc_timeout() & free work */
- RX_MGMT_CFG80211_ASSOC_TO,
+ /* caller must tell cfg80211 about internal error */
+ RX_MGMT_CFG80211_ASSOC_ERROR,
};
/* utils */
@@ -122,27 +115,6 @@ static int ecw2cw(int ecw)
return (1 << ecw) - 1;
}
-static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
- struct ieee80211_supported_band *sband,
- u32 *rates)
-{
- int i, j, count;
- *rates = 0;
- count = 0;
- for (i = 0; i < bss->supp_rates_len; i++) {
- int rate = (bss->supp_rates[i] & 0x7F) * 5;
-
- for (j = 0; j < sband->n_bitrates; j++)
- if (sband->bitrates[j].bitrate == rate) {
- *rates |= BIT(j);
- count++;
- break;
- }
- }
-
- return count;
-}
-
/*
* ieee80211_enable_ht should be called only after the operating band
* has been determined as ht configuration depends on the hw's
@@ -202,7 +174,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
ieee80211_hw_config(local, 0);
rcu_read_lock();
- sta = sta_info_get(local, bssid);
+ sta = sta_info_get(sdata, bssid);
if (sta)
rate_control_rate_update(local, sband, sta,
IEEE80211_RC_HT_CHANGED);
@@ -228,209 +200,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
/* frame sending functions */
-static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk)
-{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_local *local = sdata->local;
- struct sk_buff *skb;
- struct ieee80211_mgmt *mgmt;
- u8 *pos;
- const u8 *ies, *ht_ie;
- int i, len, count, rates_len, supp_rates_len;
- u16 capab;
- int wmm = 0;
- struct ieee80211_supported_band *sband;
- u32 rates = 0;
-
- skb = dev_alloc_skb(local->hw.extra_tx_headroom +
- sizeof(*mgmt) + 200 + wk->ie_len +
- wk->ssid_len);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
- "frame\n", sdata->dev->name);
- return;
- }
- skb_reserve(skb, local->hw.extra_tx_headroom);
-
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-
- capab = ifmgd->capab;
-
- if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) {
- if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
- capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
- if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
- capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
- }
-
- if (wk->bss->cbss.capability & WLAN_CAPABILITY_PRIVACY)
- capab |= WLAN_CAPABILITY_PRIVACY;
- if (wk->bss->wmm_used)
- wmm = 1;
-
- /* get all rates supported by the device and the AP as
- * some APs don't like getting a superset of their rates
- * in the association request (e.g. D-Link DAP 1353 in
- * b-only mode) */
- rates_len = ieee80211_compatible_rates(wk->bss, sband, &rates);
-
- if ((wk->bss->cbss.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
- (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
- capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
-
- mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
- memset(mgmt, 0, 24);
- memcpy(mgmt->da, wk->bss->cbss.bssid, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
- memcpy(mgmt->bssid, wk->bss->cbss.bssid, ETH_ALEN);
-
- if (!is_zero_ether_addr(wk->prev_bssid)) {
- skb_put(skb, 10);
- mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_REASSOC_REQ);
- mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
- mgmt->u.reassoc_req.listen_interval =
- cpu_to_le16(local->hw.conf.listen_interval);
- memcpy(mgmt->u.reassoc_req.current_ap, wk->prev_bssid,
- ETH_ALEN);
- } else {
- skb_put(skb, 4);
- mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_ASSOC_REQ);
- mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
- mgmt->u.assoc_req.listen_interval =
- cpu_to_le16(local->hw.conf.listen_interval);
- }
-
- /* SSID */
- ies = pos = skb_put(skb, 2 + wk->ssid_len);
- *pos++ = WLAN_EID_SSID;
- *pos++ = wk->ssid_len;
- memcpy(pos, wk->ssid, wk->ssid_len);
-
- /* add all rates which were marked to be used above */
- supp_rates_len = rates_len;
- if (supp_rates_len > 8)
- supp_rates_len = 8;
-
- len = sband->n_bitrates;
- pos = skb_put(skb, supp_rates_len + 2);
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = supp_rates_len;
-
- count = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
- if (BIT(i) & rates) {
- int rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
- if (++count == 8)
- break;
- }
- }
-
- if (rates_len > count) {
- pos = skb_put(skb, rates_len - count + 2);
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = rates_len - count;
-
- for (i++; i < sband->n_bitrates; i++) {
- if (BIT(i) & rates) {
- int rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
- }
- }
- }
-
- if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
- /* 1. power capabilities */
- pos = skb_put(skb, 4);
- *pos++ = WLAN_EID_PWR_CAPABILITY;
- *pos++ = 2;
- *pos++ = 0; /* min tx power */
- *pos++ = local->hw.conf.channel->max_power; /* max tx power */
-
- /* 2. supported channels */
- /* TODO: get this in reg domain format */
- pos = skb_put(skb, 2 * sband->n_channels + 2);
- *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
- *pos++ = 2 * sband->n_channels;
- for (i = 0; i < sband->n_channels; i++) {
- *pos++ = ieee80211_frequency_to_channel(
- sband->channels[i].center_freq);
- *pos++ = 1; /* one channel in the subband*/
- }
- }
-
- if (wk->ie_len && wk->ie) {
- pos = skb_put(skb, wk->ie_len);
- memcpy(pos, wk->ie, wk->ie_len);
- }
-
- if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED)) {
- pos = skb_put(skb, 9);
- *pos++ = WLAN_EID_VENDOR_SPECIFIC;
- *pos++ = 7; /* len */
- *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
- *pos++ = 0x50;
- *pos++ = 0xf2;
- *pos++ = 2; /* WME */
- *pos++ = 0; /* WME info */
- *pos++ = 1; /* WME ver */
- *pos++ = 0;
- }
-
- /* wmm support is a must to HT */
- /*
- * IEEE802.11n does not allow TKIP/WEP as pairwise
- * ciphers in HT mode. We still associate in non-ht
- * mode (11a/b/g) if any one of these ciphers is
- * configured as pairwise.
- */
- if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) &&
- sband->ht_cap.ht_supported &&
- (ht_ie = ieee80211_bss_get_ie(&wk->bss->cbss, WLAN_EID_HT_INFORMATION)) &&
- ht_ie[1] >= sizeof(struct ieee80211_ht_info) &&
- (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))) {
- struct ieee80211_ht_info *ht_info =
- (struct ieee80211_ht_info *)(ht_ie + 2);
- u16 cap = sband->ht_cap.cap;
- __le16 tmp;
- u32 flags = local->hw.conf.channel->flags;
-
- switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
- cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- cap &= ~IEEE80211_HT_CAP_SGI_40;
- }
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
- cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- cap &= ~IEEE80211_HT_CAP_SGI_40;
- }
- break;
- }
-
- tmp = cpu_to_le16(cap);
- pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2);
- *pos++ = WLAN_EID_HT_CAPABILITY;
- *pos++ = sizeof(struct ieee80211_ht_cap);
- memset(pos, 0, sizeof(struct ieee80211_ht_cap));
- memcpy(pos, &tmp, sizeof(u16));
- pos += sizeof(u16);
- /* TODO: needs a define here for << 2 */
- *pos++ = sband->ht_cap.ampdu_factor |
- (sband->ht_cap.ampdu_density << 2);
- memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
- }
-
- IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
- ieee80211_tx_skb(sdata, skb);
-}
-
-
static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
const u8 *bssid, u16 stype, u16 reason,
void *cookie)
@@ -443,7 +212,7 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
if (!skb) {
printk(KERN_DEBUG "%s: failed to allocate buffer for "
- "deauth/disassoc frame\n", sdata->dev->name);
+ "deauth/disassoc frame\n", sdata->name);
return;
}
skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -451,7 +220,7 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
memcpy(mgmt->da, bssid, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, bssid, ETH_ALEN);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
skb_put(skb, 2);
@@ -476,30 +245,15 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
void ieee80211_send_pspoll(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_pspoll *pspoll;
struct sk_buff *skb;
- u16 fc;
- skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for "
- "pspoll frame\n", sdata->dev->name);
+ skb = ieee80211_pspoll_get(&local->hw, &sdata->vif);
+ if (!skb)
return;
- }
- skb_reserve(skb, local->hw.extra_tx_headroom);
-
- pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
- memset(pspoll, 0, sizeof(*pspoll));
- fc = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL | IEEE80211_FCTL_PM;
- pspoll->frame_control = cpu_to_le16(fc);
- pspoll->aid = cpu_to_le16(ifmgd->aid);
-
- /* aid in PS-Poll has its two MSBs each set to 1 */
- pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
- memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
- memcpy(pspoll->ta, sdata->dev->dev_addr, ETH_ALEN);
+ pspoll = (struct ieee80211_pspoll *) skb->data;
+ pspoll->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
@@ -510,30 +264,47 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
int powersave)
{
struct sk_buff *skb;
+ struct ieee80211_hdr_3addr *nullfunc;
+
+ skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
+ if (!skb)
+ return;
+
+ nullfunc = (struct ieee80211_hdr_3addr *) skb->data;
+ if (powersave)
+ nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ ieee80211_tx_skb(sdata, skb);
+}
+
+static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ struct sk_buff *skb;
struct ieee80211_hdr *nullfunc;
__le16 fc;
if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
return;
- skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24);
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
- "frame\n", sdata->dev->name);
+ printk(KERN_DEBUG "%s: failed to allocate buffer for 4addr "
+ "nullfunc frame\n", sdata->name);
return;
}
skb_reserve(skb, local->hw.extra_tx_headroom);
- nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24);
- memset(nullfunc, 0, 24);
+ nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30);
+ memset(nullfunc, 0, 30);
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
- IEEE80211_FCTL_TODS);
- if (powersave)
- fc |= cpu_to_le16(IEEE80211_FCTL_PM);
+ IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
nullfunc->frame_control = fc;
memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN);
- memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(nullfunc->addr4, sdata->vif.addr, ETH_ALEN);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
@@ -546,7 +317,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
return;
mutex_lock(&ifmgd->mtx);
@@ -557,7 +328,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL);
/* XXX: shouldn't really modify cfg80211-owned data! */
- ifmgd->associated->cbss.channel = sdata->local->oper_channel;
+ ifmgd->associated->channel = sdata->local->oper_channel;
ieee80211_wake_queues_by_reason(&sdata->local->hw,
IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -584,6 +355,8 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_sw_ie *sw_elem,
struct ieee80211_bss *bss)
{
+ struct cfg80211_bss *cbss =
+ container_of((void *)bss, struct cfg80211_bss, priv);
struct ieee80211_channel *new_ch;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
@@ -617,7 +390,7 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
mod_timer(&ifmgd->chswitch_timer,
jiffies +
msecs_to_jiffies(sw_elem->count *
- bss->cbss.beacon_interval));
+ cbss->beacon_interval));
}
}
@@ -661,8 +434,11 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
} else {
if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
ieee80211_send_nullfunc(local, sdata, 1);
- conf->flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+
+ if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
+ conf->flags |= IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ }
}
}
@@ -691,8 +467,13 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
return;
}
+ if (!list_empty(&local->work_list)) {
+ local->ps_sdata = NULL;
+ goto change;
+ }
+
list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
continue;
@@ -701,7 +482,8 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
}
if (count == 1 && found->u.mgd.powersave &&
- found->u.mgd.associated && list_empty(&found->u.mgd.work_list) &&
+ found->u.mgd.associated &&
+ found->u.mgd.associated->beacon_ies &&
!(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
IEEE80211_STA_CONNECTION_POLL))) {
s32 beaconint_us;
@@ -715,20 +497,29 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
if (beaconint_us > latency) {
local->ps_sdata = NULL;
} else {
- u8 dtimper = found->vif.bss_conf.dtim_period;
+ struct ieee80211_bss *bss;
int maxslp = 1;
+ u8 dtimper;
+
+ bss = (void *)found->u.mgd.associated->priv;
+ dtimper = bss->dtim_period;
- if (dtimper > 1)
+ /* If the TIM IE is invalid, pretend the value is 1 */
+ if (!dtimper)
+ dtimper = 1;
+ else if (dtimper > 1)
maxslp = min_t(int, dtimper,
latency / beaconint_us);
local->hw.conf.max_sleep_period = maxslp;
+ local->hw.conf.ps_dtim_period = dtimper;
local->ps_sdata = found;
}
} else {
local->ps_sdata = NULL;
}
+ change:
ieee80211_change_ps(local);
}
@@ -753,6 +544,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
container_of(work, struct ieee80211_local,
dynamic_ps_enable_work);
struct ieee80211_sub_if_data *sdata = local->ps_sdata;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
/* can only happen when PS was just disabled anyway */
if (!sdata)
@@ -761,11 +553,16 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
if (local->hw.conf.flags & IEEE80211_CONF_PS)
return;
- if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
+ if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
+ (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
ieee80211_send_nullfunc(local, sdata, 1);
- local->hw.conf.flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ||
+ (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
+ ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
+ local->hw.conf.flags |= IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ }
}
void ieee80211_dynamic_ps_timer(unsigned long data)
@@ -786,9 +583,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
struct ieee80211_tx_queue_params params;
size_t left;
int count;
- u8 *pos;
+ u8 *pos, uapsd_queues = 0;
- if (!(ifmgd->flags & IEEE80211_STA_WMM_ENABLED))
+ if (local->hw.queues < 4)
return;
if (!wmm_param)
@@ -796,6 +593,10 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
return;
+
+ if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
+ uapsd_queues = local->uapsd_queues;
+
count = wmm_param[6] & 0x0f;
if (count == ifmgd->wmm_last_param_set)
return;
@@ -810,6 +611,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
for (; left >= 4; left -= 4, pos += 4) {
int aci = (pos[0] >> 5) & 0x03;
int acm = (pos[0] >> 4) & 0x01;
+ bool uapsd = false;
int queue;
switch (aci) {
@@ -817,22 +619,30 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
queue = 3;
if (acm)
local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd = true;
break;
case 2: /* AC_VI */
queue = 1;
if (acm)
local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd = true;
break;
case 3: /* AC_VO */
queue = 0;
if (acm)
local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd = true;
break;
case 0: /* AC_BE */
default:
queue = 2;
if (acm)
local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd = true;
break;
}
@@ -840,11 +650,14 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
params.cw_min = ecw2cw(pos[1] & 0x0f);
params.txop = get_unaligned_le16(pos + 2);
+ params.uapsd = uapsd;
+
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
- "cWmin=%d cWmax=%d txop=%d\n",
+ "cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
wiphy_name(local->hw.wiphy), queue, aci, acm,
- params.aifs, params.cw_min, params.cw_max, params.txop);
+ params.aifs, params.cw_min, params.cw_max, params.txop,
+ params.uapsd);
#endif
if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx)
printk(KERN_DEBUG "%s: failed to set TX queue "
@@ -871,6 +684,8 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
}
use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
+ if (sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
+ use_short_slot = true;
if (use_protection != bss_conf->use_cts_prot) {
bss_conf->use_cts_prot = use_protection;
@@ -891,25 +706,23 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
}
static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk,
+ struct cfg80211_bss *cbss,
u32 bss_info_changed)
{
+ struct ieee80211_bss *bss = (void *)cbss->priv;
struct ieee80211_local *local = sdata->local;
- struct ieee80211_bss *bss = wk->bss;
bss_info_changed |= BSS_CHANGED_ASSOC;
/* set timing information */
- sdata->vif.bss_conf.beacon_int = bss->cbss.beacon_interval;
- sdata->vif.bss_conf.timestamp = bss->cbss.tsf;
- sdata->vif.bss_conf.dtim_period = bss->dtim_period;
+ sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
+ sdata->vif.bss_conf.timestamp = cbss->tsf;
bss_info_changed |= BSS_CHANGED_BEACON_INT;
bss_info_changed |= ieee80211_handle_bss_capability(sdata,
- bss->cbss.capability, bss->has_erp_value, bss->erp_value);
+ cbss->capability, bss->has_erp_value, bss->erp_value);
- sdata->u.mgd.associated = bss;
- sdata->u.mgd.old_associate_work = wk;
- memcpy(sdata->u.mgd.bssid, bss->cbss.bssid, ETH_ALEN);
+ sdata->u.mgd.associated = cbss;
+ memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
/* just to be sure */
sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
@@ -940,99 +753,14 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
mutex_lock(&local->iflist_mtx);
ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_smps(local, sdata);
mutex_unlock(&local->iflist_mtx);
netif_tx_start_all_queues(sdata->dev);
netif_carrier_on(sdata->dev);
}
-static enum rx_mgmt_action __must_check
-ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk)
-{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_local *local = sdata->local;
-
- wk->tries++;
- if (wk->tries > IEEE80211_AUTH_MAX_TRIES) {
- printk(KERN_DEBUG "%s: direct probe to AP %pM timed out\n",
- sdata->dev->name, wk->bss->cbss.bssid);
-
- /*
- * Most likely AP is not in the range so remove the
- * bss struct for that AP.
- */
- cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
-
- /*
- * We might have a pending scan which had no chance to run yet
- * due to work needing to be done. Hence, queue the STAs work
- * again for that.
- */
- ieee80211_queue_work(&local->hw, &ifmgd->work);
- return RX_MGMT_CFG80211_AUTH_TO;
- }
-
- printk(KERN_DEBUG "%s: direct probe to AP %pM (try %d)\n",
- sdata->dev->name, wk->bss->cbss.bssid,
- wk->tries);
-
- /*
- * Direct probe is sent to broadcast address as some APs
- * will not answer to direct packet in unassociated state.
- */
- ieee80211_send_probe_req(sdata, NULL, wk->ssid, wk->ssid_len, NULL, 0);
-
- wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
- run_again(ifmgd, wk->timeout);
-
- return RX_MGMT_NONE;
-}
-
-
-static enum rx_mgmt_action __must_check
-ieee80211_authenticate(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk)
-{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_local *local = sdata->local;
-
- wk->tries++;
- if (wk->tries > IEEE80211_AUTH_MAX_TRIES) {
- printk(KERN_DEBUG "%s: authentication with AP %pM"
- " timed out\n",
- sdata->dev->name, wk->bss->cbss.bssid);
-
- /*
- * Most likely AP is not in the range so remove the
- * bss struct for that AP.
- */
- cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
-
- /*
- * We might have a pending scan which had no chance to run yet
- * due to work needing to be done. Hence, queue the STAs work
- * again for that.
- */
- ieee80211_queue_work(&local->hw, &ifmgd->work);
- return RX_MGMT_CFG80211_AUTH_TO;
- }
-
- printk(KERN_DEBUG "%s: authenticate with AP %pM (try %d)\n",
- sdata->dev->name, wk->bss->cbss.bssid, wk->tries);
-
- ieee80211_send_auth(sdata, 1, wk->auth_alg, wk->ie, wk->ie_len,
- wk->bss->cbss.bssid, NULL, 0, 0);
- wk->auth_transaction = 2;
-
- wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
- run_again(ifmgd, wk->timeout);
-
- return RX_MGMT_NONE;
-}
-
-static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
- bool deauth)
+static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
@@ -1045,21 +773,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!ifmgd->associated))
return;
- memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN);
+ memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
ifmgd->associated = NULL;
memset(ifmgd->bssid, 0, ETH_ALEN);
- if (deauth) {
- kfree(ifmgd->old_associate_work);
- ifmgd->old_associate_work = NULL;
- } else {
- struct ieee80211_mgd_work *wk = ifmgd->old_associate_work;
-
- wk->state = IEEE80211_MGD_STATE_IDLE;
- list_add(&wk->list, &ifmgd->work_list);
- }
-
/*
* we need to commit the associated = NULL change because the
* scan code uses that to determine whether this iface should
@@ -1078,9 +796,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
netif_carrier_off(sdata->dev);
rcu_read_lock();
- sta = sta_info_get(local, bssid);
- if (sta)
+ sta = sta_info_get(sdata, bssid);
+ if (sta) {
+ set_sta_flags(sta, WLAN_STA_DISASSOC);
ieee80211_sta_tear_down_BA_sessions(sta);
+ }
rcu_read_unlock();
changed |= ieee80211_reset_erp_info(sdata);
@@ -1113,57 +833,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
changed |= BSS_CHANGED_BSSID;
ieee80211_bss_info_change_notify(sdata, changed);
- rcu_read_lock();
-
- sta = sta_info_get(local, bssid);
- if (!sta) {
- rcu_read_unlock();
- return;
- }
-
- sta_info_unlink(&sta);
-
- rcu_read_unlock();
-
- sta_info_destroy(sta);
-}
-
-static enum rx_mgmt_action __must_check
-ieee80211_associate(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk)
-{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_local *local = sdata->local;
-
- wk->tries++;
- if (wk->tries > IEEE80211_ASSOC_MAX_TRIES) {
- printk(KERN_DEBUG "%s: association with AP %pM"
- " timed out\n",
- sdata->dev->name, wk->bss->cbss.bssid);
-
- /*
- * Most likely AP is not in the range so remove the
- * bss struct for that AP.
- */
- cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
-
- /*
- * We might have a pending scan which had no chance to run yet
- * due to work needing to be done. Hence, queue the STAs work
- * again for that.
- */
- ieee80211_queue_work(&local->hw, &ifmgd->work);
- return RX_MGMT_CFG80211_ASSOC_TO;
- }
-
- printk(KERN_DEBUG "%s: associate with AP %pM (try %d)\n",
- sdata->dev->name, wk->bss->cbss.bssid, wk->tries);
- ieee80211_send_assoc(sdata, wk);
-
- wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
- run_again(ifmgd, wk->timeout);
-
- return RX_MGMT_NONE;
+ sta_info_destroy_addr(sdata, bssid);
}
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1189,8 +859,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
const u8 *ssid;
- ssid = ieee80211_bss_get_ie(&ifmgd->associated->cbss, WLAN_EID_SSID);
- ieee80211_send_probe_req(sdata, ifmgd->associated->cbss.bssid,
+ ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
+ ieee80211_send_probe_req(sdata, ifmgd->associated->bssid,
ssid + 2, ssid[1], NULL, 0);
ifmgd->probe_send_count++;
@@ -1204,12 +874,15 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
bool already = false;
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
return;
if (sdata->local->scanning)
return;
+ if (sdata->local->tmp_channel)
+ return;
+
mutex_lock(&ifmgd->mtx);
if (!ifmgd->associated)
@@ -1218,7 +891,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (beacon && net_ratelimit())
printk(KERN_DEBUG "%s: detected beacon loss from AP "
- "- sending probe request\n", sdata->dev->name);
+ "- sending probe request\n", sdata->name);
#endif
/*
@@ -1271,88 +944,8 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif)
}
EXPORT_SYMBOL(ieee80211_beacon_loss);
-static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk)
-{
- wk->state = IEEE80211_MGD_STATE_IDLE;
- printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name);
-}
-
-
-static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk,
- struct ieee80211_mgmt *mgmt,
- size_t len)
-{
- u8 *pos;
- struct ieee802_11_elems elems;
-
- pos = mgmt->u.auth.variable;
- ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
- if (!elems.challenge)
- return;
- ieee80211_send_auth(sdata, 3, wk->auth_alg,
- elems.challenge - 2, elems.challenge_len + 2,
- wk->bss->cbss.bssid,
- wk->key, wk->key_len, wk->key_idx);
- wk->auth_transaction = 4;
-}
-
-static enum rx_mgmt_action __must_check
-ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk,
- struct ieee80211_mgmt *mgmt, size_t len)
-{
- u16 auth_alg, auth_transaction, status_code;
-
- if (wk->state != IEEE80211_MGD_STATE_AUTH)
- return RX_MGMT_NONE;
-
- if (len < 24 + 6)
- return RX_MGMT_NONE;
-
- if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0)
- return RX_MGMT_NONE;
-
- if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0)
- return RX_MGMT_NONE;
-
- auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
- auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
- status_code = le16_to_cpu(mgmt->u.auth.status_code);
-
- if (auth_alg != wk->auth_alg ||
- auth_transaction != wk->auth_transaction)
- return RX_MGMT_NONE;
-
- if (status_code != WLAN_STATUS_SUCCESS) {
- list_del(&wk->list);
- kfree(wk);
- return RX_MGMT_CFG80211_AUTH;
- }
-
- switch (wk->auth_alg) {
- case WLAN_AUTH_OPEN:
- case WLAN_AUTH_LEAP:
- case WLAN_AUTH_FT:
- ieee80211_auth_completed(sdata, wk);
- return RX_MGMT_CFG80211_AUTH;
- case WLAN_AUTH_SHARED_KEY:
- if (wk->auth_transaction == 4) {
- ieee80211_auth_completed(sdata, wk);
- return RX_MGMT_CFG80211_AUTH;
- } else
- ieee80211_auth_challenge(sdata, wk, mgmt, len);
- break;
- }
-
- return RX_MGMT_NONE;
-}
-
-
static enum rx_mgmt_action __must_check
ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk,
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1364,23 +957,15 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
ASSERT_MGD_MTX(ifmgd);
- if (wk)
- bssid = wk->bss->cbss.bssid;
- else
- bssid = ifmgd->associated->cbss.bssid;
+ bssid = ifmgd->associated->bssid;
reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
- sdata->dev->name, bssid, reason_code);
+ sdata->name, bssid, reason_code);
- if (!wk) {
- ieee80211_set_disassoc(sdata, true);
- ieee80211_recalc_idle(sdata->local);
- } else {
- list_del(&wk->list);
- kfree(wk);
- }
+ ieee80211_set_disassoc(sdata);
+ ieee80211_recalc_idle(sdata->local);
return RX_MGMT_CFG80211_DEAUTH;
}
@@ -1401,123 +986,72 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!ifmgd->associated))
return RX_MGMT_NONE;
- if (WARN_ON(memcmp(ifmgd->associated->cbss.bssid, mgmt->sa, ETH_ALEN)))
+ if (WARN_ON(memcmp(ifmgd->associated->bssid, mgmt->sa, ETH_ALEN)))
return RX_MGMT_NONE;
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
- sdata->dev->name, mgmt->sa, reason_code);
+ sdata->name, mgmt->sa, reason_code);
- ieee80211_set_disassoc(sdata, false);
+ ieee80211_set_disassoc(sdata);
ieee80211_recalc_idle(sdata->local);
return RX_MGMT_CFG80211_DISASSOC;
}
-static enum rx_mgmt_action __must_check
-ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk,
- struct ieee80211_mgmt *mgmt, size_t len,
- bool reassoc)
+static bool ieee80211_assoc_success(struct ieee80211_work *wk,
+ struct ieee80211_mgmt *mgmt, size_t len)
{
+ struct ieee80211_sub_if_data *sdata = wk->sdata;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
struct sta_info *sta;
+ struct cfg80211_bss *cbss = wk->assoc.bss;
+ u8 *pos;
u32 rates, basic_rates;
- u16 capab_info, status_code, aid;
+ u16 capab_info, aid;
struct ieee802_11_elems elems;
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
- u8 *pos;
u32 changed = 0;
- int i, j;
- bool have_higher_than_11mbit = false, newsta = false;
+ int i, j, err;
+ bool have_higher_than_11mbit = false;
u16 ap_ht_cap_flags;
- /*
- * AssocResp and ReassocResp have identical structure, so process both
- * of them in this function.
- */
-
- if (len < 24 + 6)
- return RX_MGMT_NONE;
-
- if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0)
- return RX_MGMT_NONE;
+ /* AssocResp and ReassocResp have identical structure */
- capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
- status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
-
- printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
- "status=%d aid=%d)\n",
- sdata->dev->name, reassoc ? "Rea" : "A", mgmt->sa,
- capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
-
- pos = mgmt->u.assoc_resp.variable;
- ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
-
- if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
- elems.timeout_int && elems.timeout_int_len == 5 &&
- elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
- u32 tu, ms;
- tu = get_unaligned_le32(elems.timeout_int + 1);
- ms = tu * 1024 / 1000;
- printk(KERN_DEBUG "%s: AP rejected association temporarily; "
- "comeback duration %u TU (%u ms)\n",
- sdata->dev->name, tu, ms);
- wk->timeout = jiffies + msecs_to_jiffies(ms);
- if (ms > IEEE80211_ASSOC_TIMEOUT)
- run_again(ifmgd, jiffies + msecs_to_jiffies(ms));
- return RX_MGMT_NONE;
- }
-
- if (status_code != WLAN_STATUS_SUCCESS) {
- printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
- sdata->dev->name, status_code);
- wk->state = IEEE80211_MGD_STATE_IDLE;
- return RX_MGMT_CFG80211_ASSOC;
- }
+ capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not "
- "set\n", sdata->dev->name, aid);
+ "set\n", sdata->name, aid);
aid &= ~(BIT(15) | BIT(14));
+ pos = mgmt->u.assoc_resp.variable;
+ ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
+
if (!elems.supp_rates) {
printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
- sdata->dev->name);
- return RX_MGMT_NONE;
+ sdata->name);
+ return false;
}
- printk(KERN_DEBUG "%s: associated\n", sdata->dev->name);
ifmgd->aid = aid;
- rcu_read_lock();
-
- /* Add STA entry for the AP */
- sta = sta_info_get(local, wk->bss->cbss.bssid);
+ sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
if (!sta) {
- newsta = true;
-
- rcu_read_unlock();
-
- sta = sta_info_alloc(sdata, wk->bss->cbss.bssid, GFP_KERNEL);
- if (!sta) {
- printk(KERN_DEBUG "%s: failed to alloc STA entry for"
- " the AP\n", sdata->dev->name);
- return RX_MGMT_NONE;
- }
-
- set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
- WLAN_STA_ASSOC_AP);
- if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
- set_sta_flags(sta, WLAN_STA_AUTHORIZED);
-
- rcu_read_lock();
+ printk(KERN_DEBUG "%s: failed to alloc STA entry for"
+ " the AP\n", sdata->name);
+ return false;
}
+ set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
+ WLAN_STA_ASSOC_AP);
+ if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
+ set_sta_flags(sta, WLAN_STA_AUTHORIZED);
+
rates = 0;
basic_rates = 0;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
@@ -1580,40 +1114,40 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
if (elems.wmm_param)
set_sta_flags(sta, WLAN_STA_WME);
- if (newsta) {
- int err = sta_info_insert(sta);
- if (err) {
- printk(KERN_DEBUG "%s: failed to insert STA entry for"
- " the AP (error %d)\n", sdata->dev->name, err);
- rcu_read_unlock();
- return RX_MGMT_NONE;
- }
+ err = sta_info_insert(sta);
+ sta = NULL;
+ if (err) {
+ printk(KERN_DEBUG "%s: failed to insert STA entry for"
+ " the AP (error %d)\n", sdata->name, err);
+ return false;
}
- rcu_read_unlock();
-
if (elems.wmm_param)
ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param,
elems.wmm_param_len);
else
ieee80211_set_wmm_default(sdata);
+ local->oper_channel = wk->chan;
+
if (elems.ht_info_elem && elems.wmm_param &&
- (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) &&
+ (sdata->local->hw.queues >= 4) &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
- wk->bss->cbss.bssid,
- ap_ht_cap_flags);
-
- /* delete work item -- must be before set_associated for PS */
- list_del(&wk->list);
+ cbss->bssid, ap_ht_cap_flags);
/* set AID and assoc capability,
* ieee80211_set_associated() will tell the driver */
bss_conf->aid = aid;
bss_conf->assoc_capability = capab_info;
- /* this will take ownership of wk */
- ieee80211_set_associated(sdata, wk, changed);
+ ieee80211_set_associated(sdata, cbss, changed);
+
+ /*
+ * If we're using 4-addr mode, let the AP know that we're
+ * doing so, so that it can create the STA VLAN on its side
+ */
+ if (ifmgd->use_4addr)
+ ieee80211_send_4addr_nullfunc(local, sdata);
/*
* Start timer to probe the connection to the AP now.
@@ -1622,7 +1156,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
mod_beacon_timer(sdata);
- return RX_MGMT_CFG80211_ASSOC;
+ return true;
}
@@ -1637,6 +1171,13 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
int freq;
struct ieee80211_bss *bss;
struct ieee80211_channel *channel;
+ bool need_ps = false;
+
+ if (sdata->u.mgd.associated) {
+ bss = (void *)sdata->u.mgd.associated->priv;
+ /* not previously set so we may need to recalc */
+ need_ps = !bss->dtim_period;
+ }
if (elems->ds_params && elems->ds_params_len == 1)
freq = ieee80211_channel_to_frequency(elems->ds_params[0]);
@@ -1656,8 +1197,14 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
if (!sdata->u.mgd.associated)
return;
+ if (need_ps) {
+ mutex_lock(&local->iflist_mtx);
+ ieee80211_recalc_ps(local, -1);
+ mutex_unlock(&local->iflist_mtx);
+ }
+
if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) &&
- (memcmp(mgmt->bssid, sdata->u.mgd.associated->cbss.bssid,
+ (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid,
ETH_ALEN) == 0)) {
struct ieee80211_channel_sw_ie *sw_elem =
(struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
@@ -1667,19 +1214,19 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk,
- struct ieee80211_mgmt *mgmt, size_t len,
- struct ieee80211_rx_status *rx_status)
+ struct sk_buff *skb)
{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_if_managed *ifmgd;
- size_t baselen;
+ struct ieee80211_rx_status *rx_status = (void *) skb->cb;
+ size_t baselen, len = skb->len;
struct ieee802_11_elems elems;
ifmgd = &sdata->u.mgd;
ASSERT_MGD_MTX(ifmgd);
- if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
+ if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
return; /* ignore ProbeResp to foreign address */
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -1691,17 +1238,8 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
- /* direct probe may be part of the association flow */
- if (wk && wk->state == IEEE80211_MGD_STATE_PROBE) {
- printk(KERN_DEBUG "%s: direct probe responded\n",
- sdata->dev->name);
- wk->tries = 0;
- wk->state = IEEE80211_MGD_STATE_AUTH;
- WARN_ON(ieee80211_authenticate(sdata, wk) != RX_MGMT_NONE);
- }
-
if (ifmgd->associated &&
- memcmp(mgmt->bssid, ifmgd->associated->cbss.bssid, ETH_ALEN) == 0 &&
+ memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0 &&
ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
IEEE80211_STA_CONNECTION_POLL)) {
ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
@@ -1774,7 +1312,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (!ifmgd->associated)
return;
- bssid = ifmgd->associated->cbss.bssid;
+ bssid = ifmgd->associated->bssid;
/*
* And in theory even frames from a different AP we were just
@@ -1787,7 +1325,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: cancelling probereq poll due "
- "to a received beacon\n", sdata->dev->name);
+ "to a received beacon\n", sdata->name);
}
#endif
ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
@@ -1865,7 +1403,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
rcu_read_lock();
- sta = sta_info_get(local, bssid);
+ sta = sta_info_get(sdata, bssid);
if (WARN_ON(!sta)) {
rcu_read_unlock();
return;
@@ -1913,9 +1451,6 @@ ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_PROBE_RESP:
case IEEE80211_STYPE_BEACON:
- case IEEE80211_STYPE_AUTH:
- case IEEE80211_STYPE_ASSOC_RESP:
- case IEEE80211_STYPE_REASSOC_RESP:
case IEEE80211_STYPE_DEAUTH:
case IEEE80211_STYPE_DISASSOC:
case IEEE80211_STYPE_ACTION:
@@ -1933,7 +1468,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_rx_status *rx_status;
struct ieee80211_mgmt *mgmt;
- struct ieee80211_mgd_work *wk;
enum rx_mgmt_action rma = RX_MGMT_NONE;
u16 fc;
@@ -1944,20 +1478,17 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
mutex_lock(&ifmgd->mtx);
if (ifmgd->associated &&
- memcmp(ifmgd->associated->cbss.bssid, mgmt->bssid,
- ETH_ALEN) == 0) {
+ memcmp(ifmgd->associated->bssid, mgmt->bssid, ETH_ALEN) == 0) {
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_BEACON:
ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
rx_status);
break;
case IEEE80211_STYPE_PROBE_RESP:
- ieee80211_rx_mgmt_probe_resp(sdata, NULL, mgmt,
- skb->len, rx_status);
+ ieee80211_rx_mgmt_probe_resp(sdata, skb);
break;
case IEEE80211_STYPE_DEAUTH:
- rma = ieee80211_rx_mgmt_deauth(sdata, NULL,
- mgmt, skb->len);
+ rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
break;
case IEEE80211_STYPE_DISASSOC:
rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
@@ -1968,7 +1499,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
ieee80211_sta_process_chanswitch(sdata,
&mgmt->u.action.u.chan_switch.sw_elem,
- ifmgd->associated);
+ (void *)ifmgd->associated->priv);
break;
}
mutex_unlock(&ifmgd->mtx);
@@ -1989,58 +1520,11 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
goto out;
}
- list_for_each_entry(wk, &ifmgd->work_list, list) {
- if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0)
- continue;
-
- switch (fc & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_PROBE_RESP:
- ieee80211_rx_mgmt_probe_resp(sdata, wk, mgmt, skb->len,
- rx_status);
- break;
- case IEEE80211_STYPE_AUTH:
- rma = ieee80211_rx_mgmt_auth(sdata, wk, mgmt, skb->len);
- break;
- case IEEE80211_STYPE_ASSOC_RESP:
- rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt,
- skb->len, false);
- break;
- case IEEE80211_STYPE_REASSOC_RESP:
- rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt,
- skb->len, true);
- break;
- case IEEE80211_STYPE_DEAUTH:
- rma = ieee80211_rx_mgmt_deauth(sdata, wk, mgmt,
- skb->len);
- break;
- }
- /*
- * We've processed this frame for that work, so it can't
- * belong to another work struct.
- * NB: this is also required for correctness because the
- * called functions can free 'wk', and for 'rma'!
- */
- break;
- }
-
mutex_unlock(&ifmgd->mtx);
- switch (rma) {
- case RX_MGMT_NONE:
- /* no action */
- break;
- case RX_MGMT_CFG80211_AUTH:
- cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, skb->len);
- break;
- case RX_MGMT_CFG80211_ASSOC:
- cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, skb->len);
- break;
- case RX_MGMT_CFG80211_DEAUTH:
+ if (skb->len >= 24 + 2 /* mgmt + deauth reason */ &&
+ (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH)
cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
- break;
- default:
- WARN(1, "unexpected: %d", rma);
- }
out:
kfree_skb(skb);
@@ -2068,12 +1552,8 @@ static void ieee80211_sta_work(struct work_struct *work)
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd;
struct sk_buff *skb;
- struct ieee80211_mgd_work *wk, *tmp;
- LIST_HEAD(free_work);
- enum rx_mgmt_action rma;
- bool anybusy = false;
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
return;
if (local->scanning)
@@ -2104,7 +1584,7 @@ static void ieee80211_sta_work(struct work_struct *work)
ifmgd->associated) {
u8 bssid[ETH_ALEN];
- memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN);
+ memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
if (time_is_after_jiffies(ifmgd->probe_timeout))
run_again(ifmgd, ifmgd->probe_timeout);
@@ -2126,7 +1606,7 @@ static void ieee80211_sta_work(struct work_struct *work)
printk(KERN_DEBUG "No probe response from AP %pM"
" after %dms, disconnecting.\n",
bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
- ieee80211_set_disassoc(sdata, true);
+ ieee80211_set_disassoc(sdata);
ieee80211_recalc_idle(local);
mutex_unlock(&ifmgd->mtx);
/*
@@ -2141,87 +1621,7 @@ static void ieee80211_sta_work(struct work_struct *work)
}
}
-
- ieee80211_recalc_idle(local);
-
- list_for_each_entry_safe(wk, tmp, &ifmgd->work_list, list) {
- if (time_is_after_jiffies(wk->timeout)) {
- /*
- * This work item isn't supposed to be worked on
- * right now, but take care to adjust the timer
- * properly.
- */
- run_again(ifmgd, wk->timeout);
- continue;
- }
-
- switch (wk->state) {
- default:
- WARN_ON(1);
- /* fall through */
- case IEEE80211_MGD_STATE_IDLE:
- /* nothing */
- rma = RX_MGMT_NONE;
- break;
- case IEEE80211_MGD_STATE_PROBE:
- rma = ieee80211_direct_probe(sdata, wk);
- break;
- case IEEE80211_MGD_STATE_AUTH:
- rma = ieee80211_authenticate(sdata, wk);
- break;
- case IEEE80211_MGD_STATE_ASSOC:
- rma = ieee80211_associate(sdata, wk);
- break;
- }
-
- switch (rma) {
- case RX_MGMT_NONE:
- /* no action required */
- break;
- case RX_MGMT_CFG80211_AUTH_TO:
- case RX_MGMT_CFG80211_ASSOC_TO:
- list_del(&wk->list);
- list_add(&wk->list, &free_work);
- wk->tries = rma; /* small abuse but only local */
- break;
- default:
- WARN(1, "unexpected: %d", rma);
- }
- }
-
- list_for_each_entry(wk, &ifmgd->work_list, list) {
- if (wk->state != IEEE80211_MGD_STATE_IDLE) {
- anybusy = true;
- break;
- }
- }
- if (!anybusy &&
- test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request))
- ieee80211_queue_delayed_work(&local->hw,
- &local->scan_work,
- round_jiffies_relative(0));
-
mutex_unlock(&ifmgd->mtx);
-
- list_for_each_entry_safe(wk, tmp, &free_work, list) {
- switch (wk->tries) {
- case RX_MGMT_CFG80211_AUTH_TO:
- cfg80211_send_auth_timeout(sdata->dev,
- wk->bss->cbss.bssid);
- break;
- case RX_MGMT_CFG80211_ASSOC_TO:
- cfg80211_send_assoc_timeout(sdata->dev,
- wk->bss->cbss.bssid);
- break;
- default:
- WARN(1, "unexpected: %d", wk->tries);
- }
-
- list_del(&wk->list);
- kfree(wk);
- }
-
- ieee80211_recalc_idle(local);
}
static void ieee80211_sta_bcn_mon_timer(unsigned long data)
@@ -2330,14 +1730,14 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
(unsigned long) sdata);
skb_queue_head_init(&ifmgd->skb_queue);
- INIT_LIST_HEAD(&ifmgd->work_list);
-
- ifmgd->capab = WLAN_CAPABILITY_ESS;
ifmgd->flags = 0;
- if (sdata->local->hw.queues >= 4)
- ifmgd->flags |= IEEE80211_STA_WMM_ENABLED;
mutex_init(&ifmgd->mtx);
+
+ if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
+ ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC;
+ else
+ ifmgd->req_smps = IEEE80211_SMPS_OFF;
}
/* scan finished notification */
@@ -2368,12 +1768,34 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
}
/* config hooks */
+static enum work_done_result
+ieee80211_probe_auth_done(struct ieee80211_work *wk,
+ struct sk_buff *skb)
+{
+ if (!skb) {
+ cfg80211_send_auth_timeout(wk->sdata->dev, wk->filter_ta);
+ return WORK_DONE_DESTROY;
+ }
+
+ if (wk->type == IEEE80211_WORK_AUTH) {
+ cfg80211_send_rx_auth(wk->sdata->dev, skb->data, skb->len);
+ return WORK_DONE_DESTROY;
+ }
+
+ mutex_lock(&wk->sdata->u.mgd.mtx);
+ ieee80211_rx_mgmt_probe_resp(wk->sdata, skb);
+ mutex_unlock(&wk->sdata->u.mgd.mtx);
+
+ wk->type = IEEE80211_WORK_AUTH;
+ wk->probe_auth.tries = 0;
+ return WORK_DONE_REQUEUE;
+}
+
int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
struct cfg80211_auth_request *req)
{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
const u8 *ssid;
- struct ieee80211_mgd_work *wk;
+ struct ieee80211_work *wk;
u16 auth_alg;
switch (req->auth_type) {
@@ -2397,7 +1819,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
if (!wk)
return -ENOMEM;
- wk->bss = (void *)req->bss;
+ memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN);
if (req->ie && req->ie_len) {
memcpy(wk->ie, req->ie, req->ie_len);
@@ -2405,68 +1827,83 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
}
if (req->key && req->key_len) {
- wk->key_len = req->key_len;
- wk->key_idx = req->key_idx;
- memcpy(wk->key, req->key, req->key_len);
+ wk->probe_auth.key_len = req->key_len;
+ wk->probe_auth.key_idx = req->key_idx;
+ memcpy(wk->probe_auth.key, req->key, req->key_len);
}
ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
- memcpy(wk->ssid, ssid + 2, ssid[1]);
- wk->ssid_len = ssid[1];
-
- wk->state = IEEE80211_MGD_STATE_PROBE;
- wk->auth_alg = auth_alg;
- wk->timeout = jiffies; /* run right away */
+ memcpy(wk->probe_auth.ssid, ssid + 2, ssid[1]);
+ wk->probe_auth.ssid_len = ssid[1];
- /*
- * XXX: if still associated need to tell AP that we're going
- * to sleep and then change channel etc.
- */
- sdata->local->oper_channel = req->bss->channel;
- ieee80211_hw_config(sdata->local, 0);
+ wk->probe_auth.algorithm = auth_alg;
+ wk->probe_auth.privacy = req->bss->capability & WLAN_CAPABILITY_PRIVACY;
- mutex_lock(&ifmgd->mtx);
- list_add(&wk->list, &sdata->u.mgd.work_list);
- mutex_unlock(&ifmgd->mtx);
+ /* if we already have a probe, don't probe again */
+ if (req->bss->proberesp_ies)
+ wk->type = IEEE80211_WORK_AUTH;
+ else
+ wk->type = IEEE80211_WORK_DIRECT_PROBE;
+ wk->chan = req->bss->channel;
+ wk->sdata = sdata;
+ wk->done = ieee80211_probe_auth_done;
- ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work);
+ ieee80211_add_work(wk);
return 0;
}
-int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_assoc_request *req)
+static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
+ struct sk_buff *skb)
{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_mgd_work *wk, *found = NULL;
- int i, err;
+ struct ieee80211_mgmt *mgmt;
+ u16 status;
- mutex_lock(&ifmgd->mtx);
+ if (!skb) {
+ cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta);
+ return WORK_DONE_DESTROY;
+ }
- list_for_each_entry(wk, &ifmgd->work_list, list) {
- if (&wk->bss->cbss == req->bss &&
- wk->state == IEEE80211_MGD_STATE_IDLE) {
- found = wk;
- break;
+ mgmt = (void *)skb->data;
+ status = le16_to_cpu(mgmt->u.assoc_resp.status_code);
+
+ if (status == WLAN_STATUS_SUCCESS) {
+ mutex_lock(&wk->sdata->u.mgd.mtx);
+ if (!ieee80211_assoc_success(wk, mgmt, skb->len)) {
+ mutex_unlock(&wk->sdata->u.mgd.mtx);
+ /* oops -- internal error -- send timeout for now */
+ cfg80211_send_assoc_timeout(wk->sdata->dev,
+ wk->filter_ta);
+ return WORK_DONE_DESTROY;
}
+ mutex_unlock(&wk->sdata->u.mgd.mtx);
}
- if (!found) {
- err = -ENOLINK;
- goto out;
- }
+ cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len);
+ return WORK_DONE_DESTROY;
+}
- list_del(&found->list);
+int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_assoc_request *req)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_bss *bss = (void *)req->bss->priv;
+ struct ieee80211_work *wk;
+ const u8 *ssid;
+ int i;
- wk = krealloc(found, sizeof(*wk) + req->ie_len, GFP_KERNEL);
- if (!wk) {
- list_add(&found->list, &ifmgd->work_list);
- err = -ENOMEM;
- goto out;
+ mutex_lock(&ifmgd->mtx);
+ if (ifmgd->associated) {
+ mutex_unlock(&ifmgd->mtx);
+ return -EALREADY;
}
+ mutex_unlock(&ifmgd->mtx);
- list_add(&wk->list, &ifmgd->work_list);
+ wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL);
+ if (!wk)
+ return -ENOMEM;
ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
+ ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
@@ -2474,8 +1911,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104)
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
- sdata->local->oper_channel = req->bss->channel;
- ieee80211_hw_config(sdata->local, 0);
if (req->ie && req->ie_len) {
memcpy(wk->ie, req->ie, req->ie_len);
@@ -2483,12 +1918,55 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
} else
wk->ie_len = 0;
+ wk->assoc.bss = req->bss;
+
+ memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN);
+
+ /* new association always uses requested smps mode */
+ if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
+ if (ifmgd->powersave)
+ ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC;
+ else
+ ifmgd->ap_smps = IEEE80211_SMPS_OFF;
+ } else
+ ifmgd->ap_smps = ifmgd->req_smps;
+
+ wk->assoc.smps = ifmgd->ap_smps;
+ /*
+ * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
+ * We still associate in non-HT mode (11a/b/g) if any one of these
+ * ciphers is configured as pairwise.
+ * We can set this to true for non-11n hardware, that'll be checked
+ * separately along with the peer capabilities.
+ */
+ wk->assoc.use_11n = !(ifmgd->flags & IEEE80211_STA_DISABLE_11N);
+ wk->assoc.capability = req->bss->capability;
+ wk->assoc.wmm_used = bss->wmm_used;
+ wk->assoc.supp_rates = bss->supp_rates;
+ wk->assoc.supp_rates_len = bss->supp_rates_len;
+ wk->assoc.ht_information_ie =
+ ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION);
+
+ if (bss->wmm_used && bss->uapsd_supported &&
+ (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
+ wk->assoc.uapsd_used = true;
+ ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
+ } else {
+ wk->assoc.uapsd_used = false;
+ ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED;
+ }
+
+ ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
+ memcpy(wk->assoc.ssid, ssid + 2, ssid[1]);
+ wk->assoc.ssid_len = ssid[1];
+
if (req->prev_bssid)
- memcpy(wk->prev_bssid, req->prev_bssid, ETH_ALEN);
+ memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN);
- wk->state = IEEE80211_MGD_STATE_ASSOC;
- wk->tries = 0;
- wk->timeout = jiffies; /* run right away */
+ wk->type = IEEE80211_WORK_ASSOC;
+ wk->chan = req->bss->channel;
+ wk->sdata = sdata;
+ wk->done = ieee80211_assoc_done;
if (req->use_mfp) {
ifmgd->mfp = IEEE80211_MFP_REQUIRED;
@@ -2503,69 +1981,65 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
else
ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT;
- ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work);
-
- err = 0;
-
- out:
- mutex_unlock(&ifmgd->mtx);
- return err;
+ ieee80211_add_work(wk);
+ return 0;
}
int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
struct cfg80211_deauth_request *req,
void *cookie)
{
+ struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_mgd_work *wk;
- const u8 *bssid = NULL;
- bool not_auth_yet = false;
+ struct ieee80211_work *wk;
+ const u8 *bssid = req->bss->bssid;
mutex_lock(&ifmgd->mtx);
- if (ifmgd->associated && &ifmgd->associated->cbss == req->bss) {
+ if (ifmgd->associated == req->bss) {
bssid = req->bss->bssid;
- ieee80211_set_disassoc(sdata, true);
- } else list_for_each_entry(wk, &ifmgd->work_list, list) {
- if (&wk->bss->cbss == req->bss) {
- bssid = req->bss->bssid;
- if (wk->state == IEEE80211_MGD_STATE_PROBE)
- not_auth_yet = true;
- list_del(&wk->list);
- kfree(wk);
- break;
- }
- }
-
- /*
- * If somebody requests authentication and we haven't
- * sent out an auth frame yet there's no need to send
- * out a deauth frame either. If the state was PROBE,
- * then this is the case. If it's AUTH we have sent a
- * frame, and if it's IDLE we have completed the auth
- * process already.
- */
- if (not_auth_yet) {
+ ieee80211_set_disassoc(sdata);
mutex_unlock(&ifmgd->mtx);
- __cfg80211_auth_canceled(sdata->dev, bssid);
- return 0;
- }
+ } else {
+ bool not_auth_yet = false;
- /*
- * cfg80211 should catch this ... but it's racy since
- * we can receive a deauth frame, process it, hand it
- * to cfg80211 while that's in a locked section already
- * trying to tell us that the user wants to disconnect.
- */
- if (!bssid) {
mutex_unlock(&ifmgd->mtx);
- return -ENOLINK;
- }
- mutex_unlock(&ifmgd->mtx);
+ mutex_lock(&local->work_mtx);
+ list_for_each_entry(wk, &local->work_list, list) {
+ if (wk->sdata != sdata)
+ continue;
+
+ if (wk->type != IEEE80211_WORK_DIRECT_PROBE &&
+ wk->type != IEEE80211_WORK_AUTH)
+ continue;
+
+ if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN))
+ continue;
+
+ not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE;
+ list_del_rcu(&wk->list);
+ free_work(wk);
+ break;
+ }
+ mutex_unlock(&local->work_mtx);
+
+ /*
+ * If somebody requests authentication and we haven't
+ * sent out an auth frame yet there's no need to send
+ * out a deauth frame either. If the state was PROBE,
+ * then this is the case. If it's AUTH we have sent a
+ * frame, and if it's IDLE we have completed the auth
+ * process already.
+ */
+ if (not_auth_yet) {
+ __cfg80211_auth_canceled(sdata->dev, bssid);
+ return 0;
+ }
+ }
printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n",
- sdata->dev->name, bssid, req->reason_code);
+ sdata->name, bssid, req->reason_code);
ieee80211_send_deauth_disassoc(sdata, bssid,
IEEE80211_STYPE_DEAUTH, req->reason_code,
@@ -2590,15 +2064,15 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
* to cfg80211 while that's in a locked section already
* trying to tell us that the user wants to disconnect.
*/
- if (&ifmgd->associated->cbss != req->bss) {
+ if (ifmgd->associated != req->bss) {
mutex_unlock(&ifmgd->mtx);
return -ENOLINK;
}
printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
- sdata->dev->name, req->bss->bssid, req->reason_code);
+ sdata->name, req->bss->bssid, req->reason_code);
- ieee80211_set_disassoc(sdata, false);
+ ieee80211_set_disassoc(sdata);
mutex_unlock(&ifmgd->mtx);
@@ -2610,3 +2084,38 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
return 0;
}
+
+int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ const u8 *buf, size_t len, u64 *cookie)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct sk_buff *skb;
+
+ /* Check that we are on the requested channel for transmission */
+ if ((chan != local->tmp_channel ||
+ channel_type != local->tmp_channel_type) &&
+ (chan != local->oper_channel ||
+ channel_type != local->oper_channel_type))
+ return -EBUSY;
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
+ if (!skb)
+ return -ENOMEM;
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ memcpy(skb_put(skb, len), buf, len);
+
+ if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
+ IEEE80211_SKB_CB(skb)->flags |=
+ IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+ skb->dev = sdata->dev;
+ ieee80211_tx_skb(sdata, skb);
+
+ *cookie = (unsigned long) skb;
+ return 0;
+}
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
new file mode 100644
index 00000000000..c36b1911987
--- /dev/null
+++ b/net/mac80211/offchannel.c
@@ -0,0 +1,170 @@
+/*
+ * Off-channel operation helpers
+ *
+ * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright 2004, Instant802 Networks, Inc.
+ * Copyright 2005, Devicescape Software, Inc.
+ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+
+/*
+ * inform AP that we will go to sleep so that it will buffer the frames
+ * while we scan
+ */
+static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ local->offchannel_ps_enabled = false;
+
+ /* FIXME: what to do when local->pspolling is true? */
+
+ del_timer_sync(&local->dynamic_ps_timer);
+ cancel_work_sync(&local->dynamic_ps_enable_work);
+
+ if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+ local->offchannel_ps_enabled = true;
+ local->hw.conf.flags &= ~IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ }
+
+ if (!(local->offchannel_ps_enabled) ||
+ !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
+ /*
+ * If power save was enabled, no need to send a nullfunc
+ * frame because AP knows that we are sleeping. But if the
+ * hardware is creating the nullfunc frame for power save
+ * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
+ * enabled) and power save was enabled, the firmware just
+ * sent a null frame with power save disabled. So we need
+ * to send a new nullfunc frame to inform the AP that we
+ * are again sleeping.
+ */
+ ieee80211_send_nullfunc(local, sdata, 1);
+}
+
+/* inform AP that we are awake again, unless power save is enabled */
+static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ if (!local->ps_sdata)
+ ieee80211_send_nullfunc(local, sdata, 0);
+ else if (local->offchannel_ps_enabled) {
+ /*
+ * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
+ * will send a nullfunc frame with the powersave bit set
+ * even though the AP already knows that we are sleeping.
+ * This could be avoided by sending a null frame with power
+ * save bit disabled before enabling the power save, but
+ * this doesn't gain anything.
+ *
+ * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
+ * to send a nullfunc frame because AP already knows that
+ * we are sleeping, let's just enable power save mode in
+ * hardware.
+ */
+ local->hw.conf.flags |= IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ } else if (local->hw.conf.dynamic_ps_timeout > 0) {
+ /*
+ * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
+ * had been running before leaving the operating channel,
+ * restart the timer now and send a nullfunc frame to inform
+ * the AP that we are awake.
+ */
+ ieee80211_send_nullfunc(local, sdata, 0);
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
+ }
+}
+
+void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
+ /* disable beaconing */
+ if (sdata->vif.type == NL80211_IFTYPE_AP ||
+ sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
+ ieee80211_bss_info_change_notify(
+ sdata, BSS_CHANGED_BEACON_ENABLED);
+
+ /*
+ * only handle non-STA interfaces here, STA interfaces
+ * are handled in ieee80211_offchannel_stop_station(),
+ * e.g., from the background scan state machine.
+ *
+ * In addition, do not stop monitor interface to allow it to be
+ * used from user space controlled off-channel operations.
+ */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+ sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ netif_tx_stop_all_queues(sdata->dev);
+ }
+ mutex_unlock(&local->iflist_mtx);
+}
+
+void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ /*
+ * notify the AP about us leaving the channel and stop all STA interfaces
+ */
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ netif_tx_stop_all_queues(sdata->dev);
+ if (sdata->u.mgd.associated)
+ ieee80211_offchannel_ps_enable(sdata);
+ }
+ }
+ mutex_unlock(&local->iflist_mtx);
+}
+
+void ieee80211_offchannel_return(struct ieee80211_local *local,
+ bool enable_beaconing)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
+ /* Tell AP we're back */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ if (sdata->u.mgd.associated)
+ ieee80211_offchannel_ps_disable(sdata);
+ }
+
+ if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ netif_tx_wake_all_queues(sdata->dev);
+
+ /* re-enable beaconing */
+ if (enable_beaconing &&
+ (sdata->vif.type == NL80211_IFTYPE_AP ||
+ sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT))
+ ieee80211_bss_info_change_notify(
+ sdata, BSS_CHANGED_BEACON_ENABLED);
+ }
+ mutex_unlock(&local->iflist_mtx);
+}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index e535f1c988f..0e64484e861 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -10,9 +10,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
- struct ieee80211_if_init_conf conf;
struct sta_info *sta;
- unsigned long flags;
ieee80211_scan_cancel(local);
@@ -56,22 +54,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
rcu_read_unlock();
/* remove STAs */
- spin_lock_irqsave(&local->sta_lock, flags);
+ mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
- if (local->ops->sta_notify) {
+ if (sta->uploaded) {
sdata = sta->sdata;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data,
u.ap);
- drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE,
- &sta->sta);
+ drv_sta_remove(local, sdata, &sta->sta);
}
mesh_plink_quiesce(sta);
}
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ mutex_unlock(&local->sta_mtx);
/* remove all interfaces */
list_for_each_entry(sdata, &local->interfaces, list) {
@@ -93,17 +90,14 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
break;
}
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
/* disable beaconing */
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_BEACON_ENABLED);
- conf.vif = &sdata->vif;
- conf.type = sdata->vif.type;
- conf.mac_addr = sdata->dev->dev_addr;
- drv_remove_interface(local, &conf);
+ drv_remove_interface(local, &sdata->vif);
}
/* stop hardware - this must stop RX */
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index b9007f80cb9..0b299d236fa 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -145,7 +145,7 @@ static const struct file_operations rcname_ops = {
};
#endif
-struct rate_control_ref *rate_control_alloc(const char *name,
+static struct rate_control_ref *rate_control_alloc(const char *name,
struct ieee80211_local *local)
{
struct dentry *debugfsdir = NULL;
@@ -207,6 +207,27 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc));
}
+static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx)
+{
+ u8 i;
+
+ if (basic_rates == 0)
+ return; /* assume basic rates unknown and accept rate */
+ if (*idx < 0)
+ return;
+ if (basic_rates & (1 << *idx))
+ return; /* selected rate is a basic rate */
+
+ for (i = *idx + 1; i <= max_rate_idx; i++) {
+ if (basic_rates & (1 << i)) {
+ *idx = i;
+ return;
+ }
+ }
+
+ /* could not find a basic rate; use original selection */
+}
+
bool rate_control_send_low(struct ieee80211_sta *sta,
void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
@@ -218,12 +239,48 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
info->control.rates[0].count =
(info->flags & IEEE80211_TX_CTL_NO_ACK) ?
1 : txrc->hw->max_rate_tries;
+ if (!sta && txrc->ap)
+ rc_send_low_broadcast(&info->control.rates[0].idx,
+ txrc->bss_conf->basic_rates,
+ txrc->sband->n_bitrates);
return true;
}
return false;
}
EXPORT_SYMBOL(rate_control_send_low);
+static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
+ int n_bitrates, u32 mask)
+{
+ int j;
+
+ /* See whether the selected rate or anything below it is allowed. */
+ for (j = rate->idx; j >= 0; j--) {
+ if (mask & (1 << j)) {
+ /* Okay, found a suitable rate. Use it. */
+ rate->idx = j;
+ return;
+ }
+ }
+
+ /* Try to find a higher rate that would be allowed */
+ for (j = rate->idx + 1; j < n_bitrates; j++) {
+ if (mask & (1 << j)) {
+ /* Okay, found a suitable rate. Use it. */
+ rate->idx = j;
+ return;
+ }
+ }
+
+ /*
+ * Uh.. No suitable rate exists. This should not really happen with
+ * sane TX rate mask configurations. However, should someone manage to
+ * configure supported rates and TX rate mask in incompatible way,
+ * allow the frame to be transmitted with whatever the rate control
+ * selected.
+ */
+}
+
void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee80211_tx_rate_control *txrc)
@@ -233,6 +290,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta *ista = NULL;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
int i;
+ u32 mask;
if (sta) {
ista = &sta->sta;
@@ -245,23 +303,34 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
info->control.rates[i].count = 1;
}
- if (sta && sdata->force_unicast_rateidx > -1) {
- info->control.rates[0].idx = sdata->force_unicast_rateidx;
- } else {
- ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
- info->flags |= IEEE80211_TX_INTFL_RCALGO;
- }
+ if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
+ return;
+
+ ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
/*
- * try to enforce the maximum rate the user wanted
+ * Try to enforce the rateidx mask the user wanted. skip this if the
+ * default mask (allow all rates) is used to save some processing for
+ * the common case.
*/
- if (sdata->max_ratectrl_rateidx > -1)
+ mask = sdata->rc_rateidx_mask[info->band];
+ if (mask != (1 << txrc->sband->n_bitrates) - 1) {
+ if (sta) {
+ /* Filter out rates that the STA does not support */
+ mask &= sta->sta.supp_rates[info->band];
+ }
+ /*
+ * Make sure the rate index selected for each TX rate is
+ * included in the configured mask and change the rate indexes
+ * if needed.
+ */
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ /* Rate masking supports only legacy rates for now */
if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS)
continue;
- info->control.rates[i].idx =
- min_t(s8, info->control.rates[i].idx,
- sdata->max_ratectrl_rateidx);
+ rate_idx_match_mask(&info->control.rates[i],
+ txrc->sband->n_bitrates, mask);
+ }
}
BUG_ON(info->control.rates[0].idx < 0);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index cb9bd1f65e2..b6108bca96d 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -26,10 +26,6 @@ struct rate_control_ref {
struct kref kref;
};
-/* Get a reference to the rate control algorithm. If `name' is NULL, get the
- * first available algorithm. */
-struct rate_control_ref *rate_control_alloc(const char *name,
- struct ieee80211_local *local);
void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee80211_tx_rate_control *txrc);
@@ -44,10 +40,11 @@ static inline void rate_control_tx_status(struct ieee80211_local *local,
struct rate_control_ref *ref = local->rate_ctrl;
struct ieee80211_sta *ista = &sta->sta;
void *priv_sta = sta->rate_ctrl_priv;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- if (likely(info->flags & IEEE80211_TX_INTFL_RCALGO))
- ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
+ if (!ref)
+ return;
+
+ ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
}
@@ -115,7 +112,8 @@ static inline void rate_control_remove_sta_debugfs(struct sta_info *sta)
#endif
}
-/* functions for rate control related to a device */
+/* Get a reference to the rate control algorithm. If `name' is NULL, get the
+ * first available algorithm. */
int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
const char *name);
void rate_control_deinitialize(struct ieee80211_local *local);
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 29bc4c51623..2652a374974 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -157,9 +157,7 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
/* In case nothing happened during the previous control interval, turn
* the sharpening factor on. */
- period = (HZ * pinfo->sampling_period + 500) / 1000;
- if (!period)
- period = 1;
+ period = msecs_to_jiffies(pinfo->sampling_period);
if (jiffies - spinfo->last_sample > 2 * period)
spinfo->sharp_cnt = pinfo->sharpen_duration;
@@ -252,9 +250,7 @@ static void rate_control_pid_tx_status(void *priv, struct ieee80211_supported_ba
}
/* Update PID controller state. */
- period = (HZ * pinfo->sampling_period + 500) / 1000;
- if (!period)
- period = 1;
+ period = msecs_to_jiffies(pinfo->sampling_period);
if (time_after(jiffies, spinfo->last_sample + period))
rate_control_pid_sample(pinfo, sband, sta, spinfo);
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 82a30c1bf3a..b5c48de81d8 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2,7 +2,7 @@
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
- * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -283,15 +283,15 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
skb->protocol = htons(ETH_P_802_2);
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
- continue;
-
if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
continue;
if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
continue;
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
if (prev_dev) {
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) {
@@ -361,7 +361,9 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
* boundary. In the case of regular frames, this simply means aligning the
* payload to a four-byte boundary (because either the IP header is directly
* contained, or IV/RFC1042 headers that have a length divisible by four are
- * in front of it).
+ * in front of it). If the payload data is not properly aligned and the
+ * architecture doesn't support efficient unaligned operations, mac80211
+ * will align the data.
*
* With A-MSDU frames, however, the payload data address must yield two modulo
* four because there are 14-byte 802.3 headers within the A-MSDU frames that
@@ -375,25 +377,10 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
*/
static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
- int hdrlen;
-
-#ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
- return;
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ WARN_ONCE((unsigned long)rx->skb->data & 1,
+ "unaligned packet at 0x%p\n", rx->skb->data);
#endif
-
- if (WARN_ONCE((unsigned long)rx->skb->data & 1,
- "unaligned packet at 0x%p\n", rx->skb->data))
- return;
-
- if (!ieee80211_is_data_present(hdr->frame_control))
- return;
-
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
- if (rx->flags & IEEE80211_RX_AMSDU)
- hdrlen += ETH_HLEN;
- WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
- "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
}
@@ -476,7 +463,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
- char *dev_addr = rx->sdata->dev->dev_addr;
+ char *dev_addr = rx->sdata->vif.addr;
if (ieee80211_is_data(hdr->frame_control)) {
if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1021,10 +1008,10 @@ static void ap_sta_ps_start(struct sta_info *sta)
atomic_inc(&sdata->bss->num_sta_ps);
set_sta_flags(sta, WLAN_STA_PS_STA);
- drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta);
+ drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
- sdata->dev->name, sta->sta.addr, sta->sta.aid);
+ sdata->name, sta->sta.addr, sta->sta.aid);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
@@ -1038,13 +1025,13 @@ static void ap_sta_ps_end(struct sta_info *sta)
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
- sdata->dev->name, sta->sta.addr, sta->sta.aid);
+ sdata->name, sta->sta.addr, sta->sta.aid);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
- sdata->dev->name, sta->sta.addr, sta->sta.aid);
+ sdata->name, sta->sta.addr, sta->sta.aid);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
return;
}
@@ -1124,6 +1111,18 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
if (ieee80211_is_nullfunc(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control)) {
I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
+
+ /*
+ * If we receive a 4-addr nullfunc frame from a STA
+ * that was not moved to a 4-addr STA vlan yet, drop
+ * the frame to the monitor interface, to make sure
+ * that hostapd sees it
+ */
+ if (ieee80211_has_a4(hdr->frame_control) &&
+ (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
+ (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+ !rx->sdata->u.vlan.sta)))
+ return RX_DROP_MONITOR;
/*
* Update counter and free packet here to avoid
* counting this as a dropped packed.
@@ -1156,7 +1155,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: RX reassembly removed oldest "
"fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
"addr1=%pM addr2=%pM\n",
- sdata->dev->name, idx,
+ sdata->name, idx,
jiffies - entry->first_frag_time, entry->seq,
entry->last_frag, hdr->addr1, hdr->addr2);
#endif
@@ -1398,6 +1397,21 @@ ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
ieee80211_is_data(fc) &&
(rx->key || rx->sdata->drop_unencrypted)))
return -EACCES;
+
+ return 0;
+}
+
+static int
+ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
+ __le16 fc = hdr->frame_control;
+ int res;
+
+ res = ieee80211_drop_unencrypted(rx, fc);
+ if (unlikely(res))
+ return res;
+
if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
rx->key))
@@ -1424,7 +1438,6 @@ static int
__ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
- struct net_device *dev = sdata->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
if (ieee80211_has_a4(hdr->frame_control) &&
@@ -1436,7 +1449,7 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
(sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
return -1;
- return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type);
+ return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
}
/*
@@ -1453,7 +1466,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
* of whether the frame was encrypted or not.
*/
if (ehdr->h_proto == htons(ETH_P_PAE) &&
- (compare_ether_addr(ehdr->h_dest, rx->sdata->dev->dev_addr) == 0 ||
+ (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
return true;
@@ -1472,7 +1485,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct net_device *dev = sdata->dev;
- struct ieee80211_local *local = rx->local;
struct sk_buff *skb, *xmit_skb;
struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
struct sta_info *dsta;
@@ -1495,8 +1507,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
printk(KERN_DEBUG "%s: failed to clone "
"multicast frame\n", dev->name);
} else {
- dsta = sta_info_get(local, skb->data);
- if (dsta && dsta->sdata->dev == dev) {
+ dsta = sta_info_get(sdata, skb->data);
+ if (dsta) {
/*
* The destination station is associated to
* this AP (in this VLAN), so send the frame
@@ -1512,7 +1524,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
if (skb) {
int align __maybe_unused;
-#if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
/*
* 'align' will only take the values 0 or 2 here
* since all frames are required to be aligned
@@ -1556,16 +1568,10 @@ static ieee80211_rx_result debug_noinline
ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
{
struct net_device *dev = rx->sdata->dev;
- struct ieee80211_local *local = rx->local;
- u16 ethertype;
- u8 *payload;
- struct sk_buff *skb = rx->skb, *frame = NULL;
+ struct sk_buff *skb = rx->skb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc = hdr->frame_control;
- const struct ethhdr *eth;
- int remaining, err;
- u8 dst[ETH_ALEN];
- u8 src[ETH_ALEN];
+ struct sk_buff_head frame_list;
if (unlikely(!ieee80211_is_data(fc)))
return RX_CONTINUE;
@@ -1576,94 +1582,34 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
if (!(rx->flags & IEEE80211_RX_AMSDU))
return RX_CONTINUE;
- err = __ieee80211_data_to_8023(rx);
- if (unlikely(err))
+ if (ieee80211_has_a4(hdr->frame_control) &&
+ rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+ !rx->sdata->u.vlan.sta)
return RX_DROP_UNUSABLE;
- skb->dev = dev;
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
-
- /* skip the wrapping header */
- eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
- if (!eth)
+ if (is_multicast_ether_addr(hdr->addr1) &&
+ ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+ rx->sdata->u.vlan.sta) ||
+ (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
+ rx->sdata->u.mgd.use_4addr)))
return RX_DROP_UNUSABLE;
- while (skb != frame) {
- u8 padding;
- __be16 len = eth->h_proto;
- unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
-
- remaining = skb->len;
- memcpy(dst, eth->h_dest, ETH_ALEN);
- memcpy(src, eth->h_source, ETH_ALEN);
-
- padding = ((4 - subframe_len) & 0x3);
- /* the last MSDU has no padding */
- if (subframe_len > remaining)
- return RX_DROP_UNUSABLE;
+ skb->dev = dev;
+ __skb_queue_head_init(&frame_list);
- skb_pull(skb, sizeof(struct ethhdr));
- /* if last subframe reuse skb */
- if (remaining <= subframe_len + padding)
- frame = skb;
- else {
- /*
- * Allocate and reserve two bytes more for payload
- * alignment since sizeof(struct ethhdr) is 14.
- */
- frame = dev_alloc_skb(
- ALIGN(local->hw.extra_tx_headroom, 4) +
- subframe_len + 2);
-
- if (frame == NULL)
- return RX_DROP_UNUSABLE;
-
- skb_reserve(frame,
- ALIGN(local->hw.extra_tx_headroom, 4) +
- sizeof(struct ethhdr) + 2);
- memcpy(skb_put(frame, ntohs(len)), skb->data,
- ntohs(len));
-
- eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
- padding);
- if (!eth) {
- dev_kfree_skb(frame);
- return RX_DROP_UNUSABLE;
- }
- }
+ ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
+ rx->sdata->vif.type,
+ rx->local->hw.extra_tx_headroom);
- skb_reset_network_header(frame);
- frame->dev = dev;
- frame->priority = skb->priority;
- rx->skb = frame;
-
- payload = frame->data;
- ethertype = (payload[6] << 8) | payload[7];
-
- if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
- ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- compare_ether_addr(payload,
- bridge_tunnel_header) == 0)) {
- /* remove RFC1042 or Bridge-Tunnel
- * encapsulation and replace EtherType */
- skb_pull(frame, 6);
- memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
- } else {
- memcpy(skb_push(frame, sizeof(__be16)),
- &len, sizeof(__be16));
- memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
- }
+ while (!skb_queue_empty(&frame_list)) {
+ rx->skb = __skb_dequeue(&frame_list);
if (!ieee80211_frame_allowed(rx, fc)) {
- if (skb == frame) /* last frame */
- return RX_DROP_UNUSABLE;
- dev_kfree_skb(frame);
+ dev_kfree_skb(rx->skb);
continue;
}
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += rx->skb->len;
ieee80211_deliver_skb(rx);
}
@@ -1721,7 +1667,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
/* Frame has reached destination. Don't forward */
if (!is_multicast_ether_addr(hdr->addr1) &&
- compare_ether_addr(sdata->dev->dev_addr, hdr->addr3) == 0)
+ compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
return RX_CONTINUE;
mesh_hdr->ttl--;
@@ -1738,10 +1684,10 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (!fwd_skb && net_ratelimit())
printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
- sdata->dev->name);
+ sdata->name);
fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
- memcpy(fwd_hdr->addr2, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
info = IEEE80211_SKB_CB(fwd_skb);
memset(info, 0, sizeof(*info));
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
@@ -1788,6 +1734,7 @@ static ieee80211_rx_result debug_noinline
ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct ieee80211_local *local = rx->local;
struct net_device *dev = sdata->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
__le16 fc = hdr->frame_control;
@@ -1819,6 +1766,13 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
dev->stats.rx_packets++;
dev->stats.rx_bytes += rx->skb->len;
+ if (ieee80211_is_data(hdr->frame_control) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) {
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
+ }
+
ieee80211_deliver_skb(rx);
return RX_QUEUED;
@@ -1872,7 +1826,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb;
struct ieee80211_mgmt *resp;
- if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) {
+ if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
/* Not to own unicast address */
return;
}
@@ -1896,7 +1850,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(resp, 0, 24);
memcpy(resp->da, mgmt->sa, ETH_ALEN);
- memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
@@ -1916,23 +1870,25 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
struct ieee80211_local *local = rx->local;
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
+ struct sk_buff *nskb;
+ struct ieee80211_rx_status *status;
int len = rx->skb->len;
if (!ieee80211_is_action(mgmt->frame_control))
return RX_CONTINUE;
- if (!rx->sta)
- return RX_DROP_MONITOR;
+ /* drop too small frames */
+ if (len < IEEE80211_MIN_ACTION_SIZE)
+ return RX_DROP_UNUSABLE;
- if (!(rx->flags & IEEE80211_RX_RA_MATCH))
- return RX_DROP_MONITOR;
+ if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
+ return RX_DROP_UNUSABLE;
- if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
- return RX_DROP_MONITOR;
+ if (!(rx->flags & IEEE80211_RX_RA_MATCH))
+ return RX_DROP_UNUSABLE;
- /* all categories we currently handle have action_code */
- if (len < IEEE80211_MIN_ACTION_SIZE + 1)
- return RX_DROP_MONITOR;
+ if (ieee80211_drop_unencrypted_mgmt(rx))
+ return RX_DROP_UNUSABLE;
switch (mgmt->u.action.category) {
case WLAN_CATEGORY_BACK:
@@ -1945,7 +1901,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
if (sdata->vif.type != NL80211_IFTYPE_STATION &&
sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
sdata->vif.type != NL80211_IFTYPE_AP)
- return RX_DROP_MONITOR;
+ break;
+
+ /* verify action_code is present */
+ if (len < IEEE80211_MIN_ACTION_SIZE + 1)
+ break;
switch (mgmt->u.action.u.addba_req.action_code) {
case WLAN_ACTION_ADDBA_REQ:
@@ -1953,45 +1913,49 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
sizeof(mgmt->u.action.u.addba_req)))
return RX_DROP_MONITOR;
ieee80211_process_addba_request(local, rx->sta, mgmt, len);
- break;
+ goto handled;
case WLAN_ACTION_ADDBA_RESP:
if (len < (IEEE80211_MIN_ACTION_SIZE +
sizeof(mgmt->u.action.u.addba_resp)))
- return RX_DROP_MONITOR;
+ break;
ieee80211_process_addba_resp(local, rx->sta, mgmt, len);
- break;
+ goto handled;
case WLAN_ACTION_DELBA:
if (len < (IEEE80211_MIN_ACTION_SIZE +
sizeof(mgmt->u.action.u.delba)))
- return RX_DROP_MONITOR;
+ break;
ieee80211_process_delba(sdata, rx->sta, mgmt, len);
- break;
+ goto handled;
}
break;
case WLAN_CATEGORY_SPECTRUM_MGMT:
if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
- return RX_DROP_MONITOR;
+ break;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
- return RX_DROP_MONITOR;
+ break;
+
+ /* verify action_code is present */
+ if (len < IEEE80211_MIN_ACTION_SIZE + 1)
+ break;
switch (mgmt->u.action.u.measurement.action_code) {
case WLAN_ACTION_SPCT_MSR_REQ:
if (len < (IEEE80211_MIN_ACTION_SIZE +
sizeof(mgmt->u.action.u.measurement)))
- return RX_DROP_MONITOR;
+ break;
ieee80211_process_measurement_req(sdata, mgmt, len);
- break;
+ goto handled;
case WLAN_ACTION_SPCT_CHL_SWITCH:
if (len < (IEEE80211_MIN_ACTION_SIZE +
sizeof(mgmt->u.action.u.chan_switch)))
- return RX_DROP_MONITOR;
+ break;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
- return RX_DROP_MONITOR;
+ break;
if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
- return RX_DROP_MONITOR;
+ break;
return ieee80211_sta_rx_mgmt(sdata, rx->skb);
}
@@ -1999,30 +1963,64 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
case WLAN_CATEGORY_SA_QUERY:
if (len < (IEEE80211_MIN_ACTION_SIZE +
sizeof(mgmt->u.action.u.sa_query)))
- return RX_DROP_MONITOR;
+ break;
+
switch (mgmt->u.action.u.sa_query.action) {
case WLAN_ACTION_SA_QUERY_REQUEST:
if (sdata->vif.type != NL80211_IFTYPE_STATION)
- return RX_DROP_MONITOR;
+ break;
ieee80211_process_sa_query_req(sdata, mgmt, len);
- break;
- case WLAN_ACTION_SA_QUERY_RESPONSE:
- /*
- * SA Query response is currently only used in AP mode
- * and it is processed in user space.
- */
- return RX_CONTINUE;
+ goto handled;
}
break;
- default:
- /* do not process rejected action frames */
- if (mgmt->u.action.category & 0x80)
- return RX_DROP_MONITOR;
+ }
- return RX_CONTINUE;
+ /*
+ * For AP mode, hostapd is responsible for handling any action
+ * frames that we didn't handle, including returning unknown
+ * ones. For all other modes we will return them to the sender,
+ * setting the 0x80 bit in the action category, as required by
+ * 802.11-2007 7.3.1.11.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_AP ||
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ return RX_DROP_MONITOR;
+
+ /*
+ * Getting here means the kernel doesn't know how to handle
+ * it, but maybe userspace does ... include returned frames
+ * so userspace can register for those to know whether ones
+ * it transmitted were processed or returned.
+ */
+ status = IEEE80211_SKB_RXCB(rx->skb);
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ cfg80211_rx_action(rx->sdata->dev, status->freq,
+ rx->skb->data, rx->skb->len,
+ GFP_ATOMIC))
+ goto handled;
+
+ /* do not return rejected action frames */
+ if (mgmt->u.action.category & 0x80)
+ return RX_DROP_UNUSABLE;
+
+ nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
+ GFP_ATOMIC);
+ if (nskb) {
+ struct ieee80211_mgmt *mgmt = (void *)nskb->data;
+
+ mgmt->u.action.category |= 0x80;
+ memcpy(mgmt->da, mgmt->sa, ETH_ALEN);
+ memcpy(mgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
+
+ memset(nskb->cb, 0, sizeof(nskb->cb));
+
+ ieee80211_tx_skb(rx->sdata, nskb);
}
- rx->sta->rx_packets++;
+ handled:
+ if (rx->sta)
+ rx->sta->rx_packets++;
dev_kfree_skb(rx->skb);
return RX_QUEUED;
}
@@ -2031,13 +2029,17 @@ static ieee80211_rx_result debug_noinline
ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
+ ieee80211_rx_result rxs;
if (!(rx->flags & IEEE80211_RX_RA_MATCH))
return RX_DROP_MONITOR;
- if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
- return RX_DROP_MONITOR;
+ if (ieee80211_drop_unencrypted_mgmt(rx))
+ return RX_DROP_UNUSABLE;
+
+ rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
+ if (rxs != RX_CONTINUE)
+ return rxs;
if (ieee80211_vif_is_mesh(&sdata->vif))
return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
@@ -2143,7 +2145,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
skb->protocol = htons(ETH_P_802_2);
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
@@ -2280,7 +2282,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
if (!bssid && !sdata->u.mgd.use_4addr)
return 0;
if (!multicast &&
- compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) {
+ compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
if (!(sdata->dev->flags & IFF_PROMISC))
return 0;
rx->flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2297,7 +2299,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
return 0;
rx->flags &= ~IEEE80211_RX_RA_MATCH;
} else if (!multicast &&
- compare_ether_addr(sdata->dev->dev_addr,
+ compare_ether_addr(sdata->vif.addr,
hdr->addr1) != 0) {
if (!(sdata->dev->flags & IFF_PROMISC))
return 0;
@@ -2308,13 +2310,13 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
rate_idx = 0; /* TODO: HT rates */
else
rate_idx = status->rate_idx;
- rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2,
- BIT(rate_idx));
+ rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
+ hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
}
break;
case NL80211_IFTYPE_MESH_POINT:
if (!multicast &&
- compare_ether_addr(sdata->dev->dev_addr,
+ compare_ether_addr(sdata->vif.addr,
hdr->addr1) != 0) {
if (!(sdata->dev->flags & IFF_PROMISC))
return 0;
@@ -2325,11 +2327,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_AP:
if (!bssid) {
- if (compare_ether_addr(sdata->dev->dev_addr,
+ if (compare_ether_addr(sdata->vif.addr,
hdr->addr1))
return 0;
} else if (!ieee80211_bssid_match(bssid,
- sdata->dev->dev_addr)) {
+ sdata->vif.addr)) {
if (!(rx->flags & IEEE80211_RX_IN_SCAN))
return 0;
rx->flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2368,6 +2370,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
int prepares;
struct ieee80211_sub_if_data *prev = NULL;
struct sk_buff *skb_new;
+ struct sta_info *sta, *tmp;
+ bool found_sta = false;
hdr = (struct ieee80211_hdr *)skb->data;
memset(&rx, 0, sizeof(rx));
@@ -2384,68 +2388,87 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
ieee80211_parse_qos(&rx);
ieee80211_verify_alignment(&rx);
- rx.sta = sta_info_get(local, hdr->addr2);
- if (rx.sta)
- rx.sdata = rx.sta->sdata;
-
- if (rx.sdata && ieee80211_is_data(hdr->frame_control)) {
- rx.flags |= IEEE80211_RX_RA_MATCH;
- prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
- if (prepares) {
- if (status->flag & RX_FLAG_MMIC_ERROR) {
- if (rx.flags & IEEE80211_RX_RA_MATCH)
- ieee80211_rx_michael_mic_report(hdr, &rx);
- } else
- prev = rx.sdata;
+ if (ieee80211_is_data(hdr->frame_control)) {
+ for_each_sta_info(local, hdr->addr2, sta, tmp) {
+ rx.sta = sta;
+ found_sta = true;
+ rx.sdata = sta->sdata;
+
+ rx.flags |= IEEE80211_RX_RA_MATCH;
+ prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
+ if (prepares) {
+ if (status->flag & RX_FLAG_MMIC_ERROR) {
+ if (rx.flags & IEEE80211_RX_RA_MATCH)
+ ieee80211_rx_michael_mic_report(hdr, &rx);
+ } else
+ prev = rx.sdata;
+ }
}
- } else list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
- continue;
+ }
+ if (!found_sta) {
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- continue;
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ continue;
- rx.flags |= IEEE80211_RX_RA_MATCH;
- prepares = prepare_for_handlers(sdata, &rx, hdr);
+ /*
+ * frame is destined for this interface, but if it's
+ * not also for the previous one we handle that after
+ * the loop to avoid copying the SKB once too much
+ */
- if (!prepares)
- continue;
+ if (!prev) {
+ prev = sdata;
+ continue;
+ }
- if (status->flag & RX_FLAG_MMIC_ERROR) {
- rx.sdata = sdata;
- if (rx.flags & IEEE80211_RX_RA_MATCH)
- ieee80211_rx_michael_mic_report(hdr, &rx);
- continue;
- }
+ rx.sta = sta_info_get_bss(prev, hdr->addr2);
- /*
- * frame is destined for this interface, but if it's not
- * also for the previous one we handle that after the
- * loop to avoid copying the SKB once too much
- */
+ rx.flags |= IEEE80211_RX_RA_MATCH;
+ prepares = prepare_for_handlers(prev, &rx, hdr);
+
+ if (!prepares)
+ goto next;
- if (!prev) {
+ if (status->flag & RX_FLAG_MMIC_ERROR) {
+ rx.sdata = prev;
+ if (rx.flags & IEEE80211_RX_RA_MATCH)
+ ieee80211_rx_michael_mic_report(hdr,
+ &rx);
+ goto next;
+ }
+
+ /*
+ * frame was destined for the previous interface
+ * so invoke RX handlers for it
+ */
+
+ skb_new = skb_copy(skb, GFP_ATOMIC);
+ if (!skb_new) {
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: failed to copy "
+ "multicast frame for %s\n",
+ wiphy_name(local->hw.wiphy),
+ prev->name);
+ goto next;
+ }
+ ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
+next:
prev = sdata;
- continue;
}
- /*
- * frame was destined for the previous interface
- * so invoke RX handlers for it
- */
+ if (prev) {
+ rx.sta = sta_info_get_bss(prev, hdr->addr2);
- skb_new = skb_copy(skb, GFP_ATOMIC);
- if (!skb_new) {
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: failed to copy "
- "multicast frame for %s\n",
- wiphy_name(local->hw.wiphy),
- prev->dev->name);
- continue;
+ rx.flags |= IEEE80211_RX_RA_MATCH;
+ prepares = prepare_for_handlers(prev, &rx, hdr);
+
+ if (!prepares)
+ prev = NULL;
}
- ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
- prev = sdata;
}
if (prev)
ieee80211_invoke_rx_handlers(prev, &rx, skb, rate);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index f934c9620b7..b822dce9786 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -12,7 +12,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/wireless.h>
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <net/mac80211.h>
@@ -29,16 +28,19 @@ struct ieee80211_bss *
ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
u8 *ssid, u8 ssid_len)
{
- return (void *)cfg80211_get_bss(local->hw.wiphy,
- ieee80211_get_channel(local->hw.wiphy,
- freq),
- bssid, ssid, ssid_len,
- 0, 0);
+ struct cfg80211_bss *cbss;
+
+ cbss = cfg80211_get_bss(local->hw.wiphy,
+ ieee80211_get_channel(local->hw.wiphy, freq),
+ bssid, ssid, ssid_len, 0, 0);
+ if (!cbss)
+ return NULL;
+ return (void *)cbss->priv;
}
static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
{
- struct ieee80211_bss *bss = (void *)cbss;
+ struct ieee80211_bss *bss = (void *)cbss->priv;
kfree(bss_mesh_id(bss));
kfree(bss_mesh_cfg(bss));
@@ -47,7 +49,26 @@ static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
void ieee80211_rx_bss_put(struct ieee80211_local *local,
struct ieee80211_bss *bss)
{
- cfg80211_put_bss((struct cfg80211_bss *)bss);
+ if (!bss)
+ return;
+ cfg80211_put_bss(container_of((void *)bss, struct cfg80211_bss, priv));
+}
+
+static bool is_uapsd_supported(struct ieee802_11_elems *elems)
+{
+ u8 qos_info;
+
+ if (elems->wmm_info && elems->wmm_info_len == 7
+ && elems->wmm_info[5] == 1)
+ qos_info = elems->wmm_info[6];
+ else if (elems->wmm_param && elems->wmm_param_len == 24
+ && elems->wmm_param[5] == 1)
+ qos_info = elems->wmm_param[6];
+ else
+ /* no valid wmm information or parameter element found */
+ return false;
+
+ return qos_info & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD;
}
struct ieee80211_bss *
@@ -59,6 +80,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
struct ieee80211_channel *channel,
bool beacon)
{
+ struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
int clen;
s32 signal = 0;
@@ -68,13 +90,14 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
signal = (rx_status->signal * 100) / local->hw.max_signal;
- bss = (void *)cfg80211_inform_bss_frame(local->hw.wiphy, channel,
- mgmt, len, signal, GFP_ATOMIC);
+ cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel,
+ mgmt, len, signal, GFP_ATOMIC);
- if (!bss)
+ if (!cbss)
return NULL;
- bss->cbss.free_priv = ieee80211_rx_bss_free;
+ cbss->free_priv = ieee80211_rx_bss_free;
+ bss = (void *)cbss->priv;
/* save the ERP value so that it is available at association time */
if (elems->erp_info && elems->erp_info_len >= 1) {
@@ -88,10 +111,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
bss->dtim_period = tim_ie->dtim_period;
}
- /* set default value for buggy AP/no TIM element */
- if (bss->dtim_period == 0)
- bss->dtim_period = 1;
-
bss->supp_rates_len = 0;
if (elems->supp_rates) {
clen = IEEE80211_MAX_SUPP_RATES - bss->supp_rates_len;
@@ -111,6 +130,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
}
bss->wmm_used = elems->wmm_param || elems->wmm_info;
+ bss->uapsd_supported = is_uapsd_supported(elems);
if (!beacon)
bss->last_probe_resp = jiffies;
@@ -147,7 +167,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
presp = ieee80211_is_probe_resp(fc);
if (presp) {
/* ignore ProbeResp to foreign address */
- if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
+ if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
return RX_DROP_MONITOR;
presp = true;
@@ -220,82 +240,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
return true;
}
-/*
- * inform AP that we will go to sleep so that it will buffer the frames
- * while we scan
- */
-static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
-{
- struct ieee80211_local *local = sdata->local;
-
- local->scan_ps_enabled = false;
-
- /* FIXME: what to do when local->pspolling is true? */
-
- del_timer_sync(&local->dynamic_ps_timer);
- cancel_work_sync(&local->dynamic_ps_enable_work);
-
- if (local->hw.conf.flags & IEEE80211_CONF_PS) {
- local->scan_ps_enabled = true;
- local->hw.conf.flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
- }
-
- if (!(local->scan_ps_enabled) ||
- !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
- /*
- * If power save was enabled, no need to send a nullfunc
- * frame because AP knows that we are sleeping. But if the
- * hardware is creating the nullfunc frame for power save
- * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
- * enabled) and power save was enabled, the firmware just
- * sent a null frame with power save disabled. So we need
- * to send a new nullfunc frame to inform the AP that we
- * are again sleeping.
- */
- ieee80211_send_nullfunc(local, sdata, 1);
-}
-
-/* inform AP that we are awake again, unless power save is enabled */
-static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
-{
- struct ieee80211_local *local = sdata->local;
-
- if (!local->ps_sdata)
- ieee80211_send_nullfunc(local, sdata, 0);
- else if (local->scan_ps_enabled) {
- /*
- * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
- * will send a nullfunc frame with the powersave bit set
- * even though the AP already knows that we are sleeping.
- * This could be avoided by sending a null frame with power
- * save bit disabled before enabling the power save, but
- * this doesn't gain anything.
- *
- * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
- * to send a nullfunc frame because AP already knows that
- * we are sleeping, let's just enable power save mode in
- * hardware.
- */
- local->hw.conf.flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
- } else if (local->hw.conf.dynamic_ps_timeout > 0) {
- /*
- * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
- * had been running before leaving the operating channel,
- * restart the timer now and send a nullfunc frame to inform
- * the AP that we are awake.
- */
- ieee80211_send_nullfunc(local, sdata, 0);
- mod_timer(&local->dynamic_ps_timer, jiffies +
- msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
- }
-}
-
void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
{
struct ieee80211_local *local = hw_to_local(hw);
- struct ieee80211_sub_if_data *sdata;
bool was_hw_scan;
mutex_lock(&local->scan_mtx);
@@ -344,41 +291,19 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
drv_sw_scan_complete(local);
- mutex_lock(&local->iflist_mtx);
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
- continue;
-
- /* Tell AP we're back */
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- if (sdata->u.mgd.associated) {
- ieee80211_scan_ps_disable(sdata);
- netif_tx_wake_all_queues(sdata->dev);
- }
- } else
- netif_tx_wake_all_queues(sdata->dev);
-
- /* re-enable beaconing */
- if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_ADHOC ||
- sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
- ieee80211_bss_info_change_notify(
- sdata, BSS_CHANGED_BEACON_ENABLED);
- }
- mutex_unlock(&local->iflist_mtx);
+ ieee80211_offchannel_return(local, true);
done:
ieee80211_recalc_idle(local);
ieee80211_mlme_notify_scan_completed(local);
ieee80211_ibss_notify_scan_completed(local);
ieee80211_mesh_notify_scan_completed(local);
+ ieee80211_queue_work(&local->hw, &local->work_work);
}
EXPORT_SYMBOL(ieee80211_scan_completed);
static int ieee80211_start_sw_scan(struct ieee80211_local *local)
{
- struct ieee80211_sub_if_data *sdata;
-
/*
* Hardware/driver doesn't support hw_scan, so use software
* scanning instead. First send a nullfunc frame with power save
@@ -394,33 +319,15 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
*/
drv_sw_scan_start(local);
- mutex_lock(&local->iflist_mtx);
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
- continue;
-
- /* disable beaconing */
- if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_ADHOC ||
- sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
- ieee80211_bss_info_change_notify(
- sdata, BSS_CHANGED_BEACON_ENABLED);
-
- /*
- * only handle non-STA interfaces here, STA interfaces
- * are handled in the scan state machine
- */
- if (sdata->vif.type != NL80211_IFTYPE_STATION)
- netif_tx_stop_all_queues(sdata->dev);
- }
- mutex_unlock(&local->iflist_mtx);
+ ieee80211_offchannel_stop_beaconing(local);
local->next_scan_state = SCAN_DECISION;
local->scan_channel_idx = 0;
+ drv_flush(local, false);
+
ieee80211_configure_filter(local);
- /* TODO: start scan as soon as all nullfunc frames are ACKed */
ieee80211_queue_delayed_work(&local->hw,
&local->scan_work,
IEEE80211_CHANNEL_TIME);
@@ -433,12 +340,18 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req)
{
struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
int rc;
if (local->scan_req)
return -EBUSY;
+ if (!list_empty(&local->work_list)) {
+ /* wait for the work to finish/time out */
+ local->scan_req = req;
+ local->scan_sdata = sdata;
+ return 0;
+ }
+
if (local->ops->hw_scan) {
u8 *ies;
@@ -458,32 +371,33 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
local->hw_scan_req->ie = ies;
local->hw_scan_band = 0;
+
+ /*
+ * After allocating local->hw_scan_req, we must
+ * go through until ieee80211_prep_hw_scan(), so
+ * anything that might be changed here and leave
+ * this function early must not go after this
+ * allocation.
+ */
}
local->scan_req = req;
local->scan_sdata = sdata;
- if (req != local->int_scan_req &&
- sdata->vif.type == NL80211_IFTYPE_STATION &&
- !list_empty(&ifmgd->work_list)) {
- /* actually wait for the work it's doing to finish/time out */
- set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
- return 0;
- }
-
if (local->ops->hw_scan)
__set_bit(SCAN_HW_SCANNING, &local->scanning);
else
__set_bit(SCAN_SW_SCANNING, &local->scanning);
+
/*
* Kicking off the scan need not be protected,
* only the scan variable stuff, since now
* local->scan_req is assigned and other callers
* will abort their scan attempts.
*
- * This avoids getting a scan_mtx -> iflist_mtx
- * dependency, so that the scan completed calls
- * have more locking freedom.
+ * This avoids too many locking dependencies
+ * so that the scan completed calls have more
+ * locking freedom.
*/
ieee80211_recalc_idle(local);
@@ -526,7 +440,7 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
/* check if at least one STA interface is associated */
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
@@ -564,56 +478,35 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
unsigned long *next_delay)
{
- struct ieee80211_sub_if_data *sdata;
+ ieee80211_offchannel_stop_station(local);
+
+ __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
/*
- * notify the AP about us leaving the channel and stop all STA interfaces
+ * What if the nullfunc frames didn't arrive?
*/
- mutex_lock(&local->iflist_mtx);
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
- continue;
-
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- netif_tx_stop_all_queues(sdata->dev);
- if (sdata->u.mgd.associated)
- ieee80211_scan_ps_enable(sdata);
- }
- }
- mutex_unlock(&local->iflist_mtx);
-
- __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
+ drv_flush(local, false);
+ if (local->ops->flush)
+ *next_delay = 0;
+ else
+ *next_delay = HZ / 10;
/* advance to the next channel to be scanned */
- *next_delay = HZ / 10;
local->next_scan_state = SCAN_SET_CHANNEL;
}
static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local,
unsigned long *next_delay)
{
- struct ieee80211_sub_if_data *sdata = local->scan_sdata;
-
/* switch back to the operating channel */
local->scan_channel = NULL;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
/*
- * notify the AP about us being back and restart all STA interfaces
+ * Only re-enable station mode interface now; beaconing will be
+ * re-enabled once the full scan has been completed.
*/
- mutex_lock(&local->iflist_mtx);
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
- continue;
-
- /* Tell AP we're back */
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- if (sdata->u.mgd.associated)
- ieee80211_scan_ps_disable(sdata);
- netif_tx_wake_all_queues(sdata->dev);
- }
- }
- mutex_unlock(&local->iflist_mtx);
+ ieee80211_offchannel_return(local, false);
__clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
@@ -727,7 +620,7 @@ void ieee80211_scan_work(struct work_struct *work)
/*
* Avoid re-scheduling when the sdata is going away.
*/
- if (!netif_running(sdata->dev)) {
+ if (!ieee80211_sdata_running(sdata)) {
ieee80211_scan_completed(&local->hw, true);
return;
}
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index aa743a895cf..7733f66ee2c 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -35,7 +35,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
if (!skb) {
printk(KERN_ERR "%s: failed to allocate buffer for "
- "measurement report frame\n", sdata->dev->name);
+ "measurement report frame\n", sdata->name);
return;
}
@@ -43,7 +43,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
memset(msr_report, 0, 24);
memcpy(msr_report->da, da, ETH_ALEN);
- memcpy(msr_report->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(msr_report->sa, sdata->vif.addr, ETH_ALEN);
memcpy(msr_report->bssid, bssid, ETH_ALEN);
msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 71f370dd24b..211c475f73c 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -32,49 +32,33 @@
* for faster lookup and a list for iteration. They are managed using
* RCU, i.e. access to the list and hash table is protected by RCU.
*
- * Upon allocating a STA info structure with sta_info_alloc(), the caller owns
- * that structure. It must then either destroy it using sta_info_destroy()
- * (which is pretty useless) or insert it into the hash table using
- * sta_info_insert() which demotes the reference from ownership to a regular
- * RCU-protected reference; if the function is called without protection by an
- * RCU critical section the reference is instantly invalidated. Note that the
- * caller may not do much with the STA info before inserting it, in particular,
- * it may not start any mesh peer link management or add encryption keys.
+ * Upon allocating a STA info structure with sta_info_alloc(), the caller
+ * owns that structure. It must then insert it into the hash table using
+ * either sta_info_insert() or sta_info_insert_rcu(); only in the latter
+ * case (which acquires an rcu read section but must not be called from
+ * within one) will the pointer still be valid after the call. Note that
+ * the caller may not do much with the STA info before inserting it, in
+ * particular, it may not start any mesh peer link management or add
+ * encryption keys.
*
* When the insertion fails (sta_info_insert()) returns non-zero), the
* structure will have been freed by sta_info_insert()!
*
- * sta entries are added by mac80211 when you establish a link with a
+ * Station entries are added by mac80211 when you establish a link with a
* peer. This means different things for the different type of interfaces
* we support. For a regular station this mean we add the AP sta when we
* receive an assocation response from the AP. For IBSS this occurs when
- * we receive a probe response or a beacon from target IBSS network. For
- * WDS we add the sta for the peer imediately upon device open. When using
- * AP mode we add stations for each respective station upon request from
- * userspace through nl80211.
+ * get to know about a peer on the same IBSS. For WDS we add the sta for
+ * the peer imediately upon device open. When using AP mode we add stations
+ * for each respective station upon request from userspace through nl80211.
*
- * Because there are debugfs entries for each station, and adding those
- * must be able to sleep, it is also possible to "pin" a station entry,
- * that means it can be removed from the hash table but not be freed.
- * See the comment in __sta_info_unlink() for more information, this is
- * an internal capability only.
+ * In order to remove a STA info structure, various sta_info_destroy_*()
+ * calls are available.
*
- * In order to remove a STA info structure, the caller needs to first
- * unlink it (sta_info_unlink()) from the list and hash tables and
- * then destroy it; sta_info_destroy() will wait for an RCU grace period
- * to elapse before actually freeing it. Due to the pinning and the
- * possibility of multiple callers trying to remove the same STA info at
- * the same time, sta_info_unlink() can clear the STA info pointer it is
- * passed to indicate that the STA info is owned by somebody else now.
- *
- * If sta_info_unlink() did not clear the pointer then the caller owns
- * the STA info structure now and is responsible of destroying it with
- * a call to sta_info_destroy().
- *
- * In all other cases, there is no concept of ownership on a STA entry,
- * each structure is owned by the global hash table/list until it is
- * removed. All users of the structure need to be RCU protected so that
- * the structure won't be freed before they are done using it.
+ * There is no concept of ownership on a STA entry, each structure is
+ * owned by the global hash table/list until it is removed. All users of
+ * the structure need to be RCU protected so that the structure won't be
+ * freed before they are done using it.
*/
/* Caller must hold local->sta_lock */
@@ -103,13 +87,37 @@ static int sta_info_hash_del(struct ieee80211_local *local,
}
/* protected by RCU */
-struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr)
+struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr)
{
+ struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
while (sta) {
- if (memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
+ if (sta->sdata == sdata &&
+ memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
+ break;
+ sta = rcu_dereference(sta->hnext);
+ }
+ return sta;
+}
+
+/*
+ * Get sta info either from the specified interface
+ * or from one of its vlans
+ */
+struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
+ while (sta) {
+ if ((sta->sdata == sdata ||
+ sta->sdata->bss == sdata->bss) &&
+ memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
sta = rcu_dereference(sta->hnext);
}
@@ -161,101 +169,6 @@ static void __sta_info_free(struct ieee80211_local *local,
kfree(sta);
}
-void sta_info_destroy(struct sta_info *sta)
-{
- struct ieee80211_local *local;
- struct sk_buff *skb;
- int i;
-
- might_sleep();
-
- if (!sta)
- return;
-
- local = sta->local;
-
- cancel_work_sync(&sta->drv_unblock_wk);
-
- rate_control_remove_sta_debugfs(sta);
- ieee80211_sta_debugfs_remove(sta);
-
-#ifdef CONFIG_MAC80211_MESH
- if (ieee80211_vif_is_mesh(&sta->sdata->vif))
- mesh_plink_deactivate(sta);
-#endif
-
- /*
- * We have only unlinked the key, and actually destroying it
- * may mean it is removed from hardware which requires that
- * the key->sta pointer is still valid, so flush the key todo
- * list here.
- *
- * ieee80211_key_todo() will synchronize_rcu() so after this
- * nothing can reference this sta struct any more.
- */
- ieee80211_key_todo();
-
-#ifdef CONFIG_MAC80211_MESH
- if (ieee80211_vif_is_mesh(&sta->sdata->vif))
- del_timer_sync(&sta->plink_timer);
-#endif
-
- while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
- local->total_ps_buffered--;
- dev_kfree_skb_any(skb);
- }
-
- while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
- dev_kfree_skb_any(skb);
-
- for (i = 0; i < STA_TID_NUM; i++) {
- struct tid_ampdu_rx *tid_rx;
- struct tid_ampdu_tx *tid_tx;
-
- spin_lock_bh(&sta->lock);
- tid_rx = sta->ampdu_mlme.tid_rx[i];
- /* Make sure timer won't free the tid_rx struct, see below */
- if (tid_rx)
- tid_rx->shutdown = true;
-
- spin_unlock_bh(&sta->lock);
-
- /*
- * Outside spinlock - shutdown is true now so that the timer
- * won't free tid_rx, we have to do that now. Can't let the
- * timer do it because we have to sync the timer outside the
- * lock that it takes itself.
- */
- if (tid_rx) {
- del_timer_sync(&tid_rx->session_timer);
- kfree(tid_rx);
- }
-
- /*
- * No need to do such complications for TX agg sessions, the
- * path leading to freeing the tid_tx struct goes via a call
- * from the driver, and thus needs to look up the sta struct
- * again, which cannot be found when we get here. Hence, we
- * just need to delete the timer and free the aggregation
- * info; we won't be telling the peer about it then but that
- * doesn't matter if we're not talking to it again anyway.
- */
- tid_tx = sta->ampdu_mlme.tid_tx[i];
- if (tid_tx) {
- del_timer_sync(&tid_tx->addba_resp_timer);
- /*
- * STA removed while aggregation session being
- * started? Bit odd, but purge frames anyway.
- */
- skb_queue_purge(&tid_tx->pending);
- kfree(tid_tx);
- }
- }
-
- __sta_info_free(local, sta);
-}
-
-
/* Caller must hold local->sta_lock */
static void sta_info_hash_add(struct ieee80211_local *local,
struct sta_info *sta)
@@ -352,7 +265,93 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
return sta;
}
-int sta_info_insert(struct sta_info *sta)
+static int sta_info_finish_insert(struct sta_info *sta, bool async)
+{
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct station_info sinfo;
+ unsigned long flags;
+ int err = 0;
+
+ WARN_ON(!mutex_is_locked(&local->sta_mtx));
+
+ /* notify driver */
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data,
+ u.ap);
+ err = drv_sta_add(local, sdata, &sta->sta);
+ if (err) {
+ if (!async)
+ return err;
+ printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to driver (%d)"
+ " - keeping it anyway.\n",
+ sdata->name, sta->sta.addr, err);
+ } else {
+ sta->uploaded = true;
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ if (async)
+ printk(KERN_DEBUG "%s: Finished adding IBSS STA %pM\n",
+ wiphy_name(local->hw.wiphy), sta->sta.addr);
+#endif
+ }
+
+ sdata = sta->sdata;
+
+ if (!async) {
+ local->num_sta++;
+ local->sta_generation++;
+ smp_mb();
+
+ /* make the station visible */
+ spin_lock_irqsave(&local->sta_lock, flags);
+ sta_info_hash_add(local, sta);
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+ }
+
+ list_add(&sta->list, &local->sta_list);
+
+ ieee80211_sta_debugfs_add(sta);
+ rate_control_add_sta_debugfs(sta);
+
+ sinfo.filled = 0;
+ sinfo.generation = local->sta_generation;
+ cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+
+
+ return 0;
+}
+
+static void sta_info_finish_pending(struct ieee80211_local *local)
+{
+ struct sta_info *sta;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local->sta_lock, flags);
+ while (!list_empty(&local->sta_pending_list)) {
+ sta = list_first_entry(&local->sta_pending_list,
+ struct sta_info, list);
+ list_del(&sta->list);
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+
+ sta_info_finish_insert(sta, true);
+
+ spin_lock_irqsave(&local->sta_lock, flags);
+ }
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+}
+
+static void sta_info_finish_work(struct work_struct *work)
+{
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, sta_finish_work);
+
+ mutex_lock(&local->sta_mtx);
+ sta_info_finish_pending(local);
+ mutex_unlock(&local->sta_mtx);
+}
+
+int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -364,38 +363,89 @@ int sta_info_insert(struct sta_info *sta)
* something inserts a STA (on one CPU) without holding the RTNL
* and another CPU turns off the net device.
*/
- if (unlikely(!netif_running(sdata->dev))) {
+ if (unlikely(!ieee80211_sdata_running(sdata))) {
err = -ENETDOWN;
+ rcu_read_lock();
goto out_free;
}
- if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->dev->dev_addr) == 0 ||
+ if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
is_multicast_ether_addr(sta->sta.addr))) {
err = -EINVAL;
+ rcu_read_lock();
goto out_free;
}
+ /*
+ * In ad-hoc mode, we sometimes need to insert stations
+ * from tasklet context from the RX path. To avoid races,
+ * always do so in that case -- see the comment below.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ spin_lock_irqsave(&local->sta_lock, flags);
+ /* check if STA exists already */
+ if (sta_info_get_bss(sdata, sta->sta.addr)) {
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+ rcu_read_lock();
+ err = -EEXIST;
+ goto out_free;
+ }
+
+ local->num_sta++;
+ local->sta_generation++;
+ smp_mb();
+ sta_info_hash_add(local, sta);
+
+ list_add_tail(&sta->list, &local->sta_pending_list);
+
+ rcu_read_lock();
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ printk(KERN_DEBUG "%s: Added IBSS STA %pM\n",
+ wiphy_name(local->hw.wiphy), sta->sta.addr);
+#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+
+ ieee80211_queue_work(&local->hw, &local->sta_finish_work);
+
+ return 0;
+ }
+
+ /*
+ * On first glance, this will look racy, because the code
+ * below this point, which inserts a station with sleeping,
+ * unlocks the sta_lock between checking existence in the
+ * hash table and inserting into it.
+ *
+ * However, it is not racy against itself because it keeps
+ * the mutex locked. It still seems to race against the
+ * above code that atomically inserts the station... That,
+ * however, is not true because the above code can only
+ * be invoked for IBSS interfaces, and the below code will
+ * not be -- and the two do not race against each other as
+ * the hash table also keys off the interface.
+ */
+
+ might_sleep();
+
+ mutex_lock(&local->sta_mtx);
+
spin_lock_irqsave(&local->sta_lock, flags);
/* check if STA exists already */
- if (sta_info_get(local, sta->sta.addr)) {
+ if (sta_info_get_bss(sdata, sta->sta.addr)) {
spin_unlock_irqrestore(&local->sta_lock, flags);
+ rcu_read_lock();
err = -EEXIST;
goto out_free;
}
- list_add(&sta->list, &local->sta_list);
- local->sta_generation++;
- local->num_sta++;
- sta_info_hash_add(local, sta);
- /* notify driver */
- if (local->ops->sta_notify) {
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- sdata = container_of(sdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
+ spin_unlock_irqrestore(&local->sta_lock, flags);
- drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, &sta->sta);
- sdata = sta->sdata;
+ err = sta_info_finish_insert(sta, false);
+ if (err) {
+ mutex_unlock(&local->sta_mtx);
+ rcu_read_lock();
+ goto out_free;
}
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -403,18 +453,9 @@ int sta_info_insert(struct sta_info *sta)
wiphy_name(local->hw.wiphy), sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
- spin_unlock_irqrestore(&local->sta_lock, flags);
-
-#ifdef CONFIG_MAC80211_DEBUGFS
- /*
- * Debugfs entry adding might sleep, so schedule process
- * context task for adding entry for STAs that do not yet
- * have one.
- * NOTE: due to auto-freeing semantics this may only be done
- * if the insertion is successful!
- */
- schedule_work(&local->sta_debugfs_add);
-#endif
+ /* move reference to rcu-protected */
+ rcu_read_lock();
+ mutex_unlock(&local->sta_mtx);
if (ieee80211_vif_is_mesh(&sdata->vif))
mesh_accept_plinks_update(sdata);
@@ -426,6 +467,15 @@ int sta_info_insert(struct sta_info *sta)
return err;
}
+int sta_info_insert(struct sta_info *sta)
+{
+ int err = sta_info_insert_rcu(sta);
+
+ rcu_read_unlock();
+
+ return err;
+}
+
static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
{
/*
@@ -494,108 +544,6 @@ void sta_info_clear_tim_bit(struct sta_info *sta)
spin_unlock_irqrestore(&sta->local->sta_lock, flags);
}
-static void __sta_info_unlink(struct sta_info **sta)
-{
- struct ieee80211_local *local = (*sta)->local;
- struct ieee80211_sub_if_data *sdata = (*sta)->sdata;
- /*
- * pull caller's reference if we're already gone.
- */
- if (sta_info_hash_del(local, *sta)) {
- *sta = NULL;
- return;
- }
-
- if ((*sta)->key) {
- ieee80211_key_free((*sta)->key);
- WARN_ON((*sta)->key);
- }
-
- list_del(&(*sta)->list);
- (*sta)->dead = true;
-
- if (test_and_clear_sta_flags(*sta,
- WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
- BUG_ON(!sdata->bss);
-
- atomic_dec(&sdata->bss->num_sta_ps);
- __sta_info_clear_tim_bit(sdata->bss, *sta);
- }
-
- local->num_sta--;
- local->sta_generation++;
-
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- rcu_assign_pointer(sdata->u.vlan.sta, NULL);
-
- if (local->ops->sta_notify) {
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- sdata = container_of(sdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
-
- drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE,
- &(*sta)->sta);
- sdata = (*sta)->sdata;
- }
-
- if (ieee80211_vif_is_mesh(&sdata->vif)) {
- mesh_accept_plinks_update(sdata);
-#ifdef CONFIG_MAC80211_MESH
- del_timer(&(*sta)->plink_timer);
-#endif
- }
-
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: Removed STA %pM\n",
- wiphy_name(local->hw.wiphy), (*sta)->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
-
- /*
- * Finally, pull caller's reference if the STA is pinned by the
- * task that is adding the debugfs entries. In that case, we
- * leave the STA "to be freed".
- *
- * The rules are not trivial, but not too complex either:
- * (1) pin_status is only modified under the sta_lock
- * (2) STAs may only be pinned under the RTNL so that
- * sta_info_flush() is guaranteed to actually destroy
- * all STAs that are active for a given interface, this
- * is required for correctness because otherwise we
- * could notify a driver that an interface is going
- * away and only after that (!) notify it about a STA
- * on that interface going away.
- * (3) sta_info_debugfs_add_work() will set the status
- * to PINNED when it found an item that needs a new
- * debugfs directory created. In that case, that item
- * must not be freed although all *RCU* users are done
- * with it. Hence, we tell the caller of _unlink()
- * that the item is already gone (as can happen when
- * two tasks try to unlink/destroy at the same time)
- * (4) We set the pin_status to DESTROY here when we
- * find such an item.
- * (5) sta_info_debugfs_add_work() will reset the pin_status
- * from PINNED to NORMAL when it is done with the item,
- * but will check for DESTROY before resetting it in
- * which case it will free the item.
- */
- if ((*sta)->pin_status == STA_INFO_PIN_STAT_PINNED) {
- (*sta)->pin_status = STA_INFO_PIN_STAT_DESTROY;
- *sta = NULL;
- return;
- }
-}
-
-void sta_info_unlink(struct sta_info **sta)
-{
- struct ieee80211_local *local = (*sta)->local;
- unsigned long flags;
-
- spin_lock_irqsave(&local->sta_lock, flags);
- __sta_info_unlink(sta);
- spin_unlock_irqrestore(&local->sta_lock, flags);
-}
-
static int sta_info_buffer_expired(struct sta_info *sta,
struct sk_buff *skb)
{
@@ -652,109 +600,209 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
}
}
-
-static void sta_info_cleanup(unsigned long data)
+static int __must_check __sta_info_destroy(struct sta_info *sta)
{
- struct ieee80211_local *local = (struct ieee80211_local *) data;
- struct sta_info *sta;
+ struct ieee80211_local *local;
+ struct ieee80211_sub_if_data *sdata;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int ret, i;
- rcu_read_lock();
- list_for_each_entry_rcu(sta, &local->sta_list, list)
- sta_info_cleanup_expire_buffered(local, sta);
- rcu_read_unlock();
+ might_sleep();
- if (local->quiescing)
- return;
+ if (!sta)
+ return -ENOENT;
- local->sta_cleanup.expires =
- round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
- add_timer(&local->sta_cleanup);
-}
+ local = sta->local;
+ sdata = sta->sdata;
-#ifdef CONFIG_MAC80211_DEBUGFS
-/*
- * See comment in __sta_info_unlink,
- * caller must hold local->sta_lock.
- */
-static void __sta_info_pin(struct sta_info *sta)
-{
- WARN_ON(sta->pin_status != STA_INFO_PIN_STAT_NORMAL);
- sta->pin_status = STA_INFO_PIN_STAT_PINNED;
+ spin_lock_irqsave(&local->sta_lock, flags);
+ ret = sta_info_hash_del(local, sta);
+ /* this might still be the pending list ... which is fine */
+ if (!ret)
+ list_del(&sta->list);
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+ if (ret)
+ return ret;
+
+ if (sta->key) {
+ ieee80211_key_free(sta->key);
+ /*
+ * We have only unlinked the key, and actually destroying it
+ * may mean it is removed from hardware which requires that
+ * the key->sta pointer is still valid, so flush the key todo
+ * list here.
+ *
+ * ieee80211_key_todo() will synchronize_rcu() so after this
+ * nothing can reference this sta struct any more.
+ */
+ ieee80211_key_todo();
+
+ WARN_ON(sta->key);
+ }
+
+ sta->dead = true;
+
+ if (test_and_clear_sta_flags(sta,
+ WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
+ BUG_ON(!sdata->bss);
+
+ atomic_dec(&sdata->bss->num_sta_ps);
+ __sta_info_clear_tim_bit(sdata->bss, sta);
+ }
+
+ local->num_sta--;
+ local->sta_generation++;
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ rcu_assign_pointer(sdata->u.vlan.sta, NULL);
+
+ if (sta->uploaded) {
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data,
+ u.ap);
+ drv_sta_remove(local, sdata, &sta->sta);
+ sdata = sta->sdata;
+ }
+
+#ifdef CONFIG_MAC80211_MESH
+ if (ieee80211_vif_is_mesh(&sdata->vif)) {
+ mesh_accept_plinks_update(sdata);
+ del_timer(&sta->plink_timer);
+ }
+#endif
+
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ printk(KERN_DEBUG "%s: Removed STA %pM\n",
+ wiphy_name(local->hw.wiphy), sta->sta.addr);
+#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+ cancel_work_sync(&sta->drv_unblock_wk);
+
+ rate_control_remove_sta_debugfs(sta);
+ ieee80211_sta_debugfs_remove(sta);
+
+#ifdef CONFIG_MAC80211_MESH
+ if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
+ mesh_plink_deactivate(sta);
+ del_timer_sync(&sta->plink_timer);
+ }
+#endif
+
+ while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
+ local->total_ps_buffered--;
+ dev_kfree_skb_any(skb);
+ }
+
+ while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
+ dev_kfree_skb_any(skb);
+
+ for (i = 0; i < STA_TID_NUM; i++) {
+ struct tid_ampdu_rx *tid_rx;
+ struct tid_ampdu_tx *tid_tx;
+
+ spin_lock_bh(&sta->lock);
+ tid_rx = sta->ampdu_mlme.tid_rx[i];
+ /* Make sure timer won't free the tid_rx struct, see below */
+ if (tid_rx)
+ tid_rx->shutdown = true;
+
+ spin_unlock_bh(&sta->lock);
+
+ /*
+ * Outside spinlock - shutdown is true now so that the timer
+ * won't free tid_rx, we have to do that now. Can't let the
+ * timer do it because we have to sync the timer outside the
+ * lock that it takes itself.
+ */
+ if (tid_rx) {
+ del_timer_sync(&tid_rx->session_timer);
+ kfree(tid_rx);
+ }
+
+ /*
+ * No need to do such complications for TX agg sessions, the
+ * path leading to freeing the tid_tx struct goes via a call
+ * from the driver, and thus needs to look up the sta struct
+ * again, which cannot be found when we get here. Hence, we
+ * just need to delete the timer and free the aggregation
+ * info; we won't be telling the peer about it then but that
+ * doesn't matter if we're not talking to it again anyway.
+ */
+ tid_tx = sta->ampdu_mlme.tid_tx[i];
+ if (tid_tx) {
+ del_timer_sync(&tid_tx->addba_resp_timer);
+ /*
+ * STA removed while aggregation session being
+ * started? Bit odd, but purge frames anyway.
+ */
+ skb_queue_purge(&tid_tx->pending);
+ kfree(tid_tx);
+ }
+ }
+
+ __sta_info_free(local, sta);
+
+ return 0;
}
-/*
- * See comment in __sta_info_unlink, returns sta if it
- * needs to be destroyed.
- */
-static struct sta_info *__sta_info_unpin(struct sta_info *sta)
+int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
{
- struct sta_info *ret = NULL;
- unsigned long flags;
+ struct sta_info *sta;
+ int ret;
- spin_lock_irqsave(&sta->local->sta_lock, flags);
- WARN_ON(sta->pin_status != STA_INFO_PIN_STAT_DESTROY &&
- sta->pin_status != STA_INFO_PIN_STAT_PINNED);
- if (sta->pin_status == STA_INFO_PIN_STAT_DESTROY)
- ret = sta;
- sta->pin_status = STA_INFO_PIN_STAT_NORMAL;
- spin_unlock_irqrestore(&sta->local->sta_lock, flags);
+ mutex_lock(&sdata->local->sta_mtx);
+ sta = sta_info_get(sdata, addr);
+ ret = __sta_info_destroy(sta);
+ mutex_unlock(&sdata->local->sta_mtx);
return ret;
}
-static void sta_info_debugfs_add_work(struct work_struct *work)
+int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr)
{
- struct ieee80211_local *local =
- container_of(work, struct ieee80211_local, sta_debugfs_add);
- struct sta_info *sta, *tmp;
- unsigned long flags;
+ struct sta_info *sta;
+ int ret;
- /* We need to keep the RTNL across the whole pinned status. */
- rtnl_lock();
- while (1) {
- sta = NULL;
+ mutex_lock(&sdata->local->sta_mtx);
+ sta = sta_info_get_bss(sdata, addr);
+ ret = __sta_info_destroy(sta);
+ mutex_unlock(&sdata->local->sta_mtx);
- spin_lock_irqsave(&local->sta_lock, flags);
- list_for_each_entry(tmp, &local->sta_list, list) {
- /*
- * debugfs.add_has_run will be set by
- * ieee80211_sta_debugfs_add regardless
- * of what else it does.
- */
- if (!tmp->debugfs.add_has_run) {
- sta = tmp;
- __sta_info_pin(sta);
- break;
- }
- }
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ return ret;
+}
- if (!sta)
- break;
+static void sta_info_cleanup(unsigned long data)
+{
+ struct ieee80211_local *local = (struct ieee80211_local *) data;
+ struct sta_info *sta;
- ieee80211_sta_debugfs_add(sta);
- rate_control_add_sta_debugfs(sta);
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &local->sta_list, list)
+ sta_info_cleanup_expire_buffered(local, sta);
+ rcu_read_unlock();
- sta = __sta_info_unpin(sta);
- sta_info_destroy(sta);
- }
- rtnl_unlock();
+ if (local->quiescing)
+ return;
+
+ local->sta_cleanup.expires =
+ round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
+ add_timer(&local->sta_cleanup);
}
-#endif
void sta_info_init(struct ieee80211_local *local)
{
spin_lock_init(&local->sta_lock);
+ mutex_init(&local->sta_mtx);
INIT_LIST_HEAD(&local->sta_list);
+ INIT_LIST_HEAD(&local->sta_pending_list);
+ INIT_WORK(&local->sta_finish_work, sta_info_finish_work);
setup_timer(&local->sta_cleanup, sta_info_cleanup,
(unsigned long)local);
local->sta_cleanup.expires =
round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
-
-#ifdef CONFIG_MAC80211_DEBUGFS
- INIT_WORK(&local->sta_debugfs_add, sta_info_debugfs_add_work);
-#endif
}
int sta_info_start(struct ieee80211_local *local)
@@ -766,16 +814,6 @@ int sta_info_start(struct ieee80211_local *local)
void sta_info_stop(struct ieee80211_local *local)
{
del_timer(&local->sta_cleanup);
-#ifdef CONFIG_MAC80211_DEBUGFS
- /*
- * Make sure the debugfs adding work isn't pending after this
- * because we're about to be destroyed. It doesn't matter
- * whether it ran or not since we're going to flush all STAs
- * anyway.
- */
- cancel_work_sync(&local->sta_debugfs_add);
-#endif
-
sta_info_flush(local, NULL);
}
@@ -791,26 +829,19 @@ int sta_info_flush(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
struct sta_info *sta, *tmp;
- LIST_HEAD(tmp_list);
int ret = 0;
- unsigned long flags;
might_sleep();
- spin_lock_irqsave(&local->sta_lock, flags);
+ mutex_lock(&local->sta_mtx);
+
+ sta_info_finish_pending(local);
+
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
- if (!sdata || sdata == sta->sdata) {
- __sta_info_unlink(&sta);
- if (sta) {
- list_add_tail(&sta->list, &tmp_list);
- ret++;
- }
- }
+ if (!sdata || sdata == sta->sdata)
+ WARN_ON(__sta_info_destroy(sta));
}
- spin_unlock_irqrestore(&local->sta_lock, flags);
-
- list_for_each_entry_safe(sta, tmp, &tmp_list, list)
- sta_info_destroy(sta);
+ mutex_unlock(&local->sta_mtx);
return ret;
}
@@ -820,34 +851,28 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct sta_info *sta, *tmp;
- LIST_HEAD(tmp_list);
- unsigned long flags;
- spin_lock_irqsave(&local->sta_lock, flags);
+ mutex_lock(&local->sta_mtx);
list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
if (time_after(jiffies, sta->last_rx + exp_time)) {
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: expiring inactive STA %pM\n",
- sdata->dev->name, sta->sta.addr);
+ sdata->name, sta->sta.addr);
#endif
- __sta_info_unlink(&sta);
- if (sta)
- list_add(&sta->list, &tmp_list);
+ WARN_ON(__sta_info_destroy(sta));
}
- spin_unlock_irqrestore(&local->sta_lock, flags);
-
- list_for_each_entry_safe(sta, tmp, &tmp_list, list)
- sta_info_destroy(sta);
+ mutex_unlock(&local->sta_mtx);
}
struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw,
const u8 *addr)
{
- struct sta_info *sta = sta_info_get(hw_to_local(hw), addr);
+ struct sta_info *sta, *nxt;
- if (!sta)
- return NULL;
- return &sta->sta;
+ /* Just return a random station ... first in list ... */
+ for_each_sta_info(hw_to_local(hw), addr, sta, nxt)
+ return &sta->sta;
+ return NULL;
}
EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
@@ -872,7 +897,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
struct ieee80211_local *local = sdata->local;
int sent, buffered;
- drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta);
+ drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
if (!skb_queue_empty(&sta->ps_tx_buf))
sta_info_clear_tim_bit(sta);
@@ -885,7 +910,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
- "since STA not sleeping anymore\n", sdata->dev->name,
+ "since STA not sleeping anymore\n", sdata->name,
sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
@@ -944,7 +969,7 @@ void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
*/
printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
"though there are no buffered frames for it\n",
- sdata->dev->name, sta->sta.addr);
+ sdata->name, sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index b4810f6aa94..822d8452293 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -42,6 +42,9 @@
* be in the queues
* @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
* station in power-save mode, reply when the driver unblocks.
+ * @WLAN_STA_DISASSOC: Disassociation in progress.
+ * This is used to reject TX BA session requests when disassociation
+ * is in progress.
*/
enum ieee80211_sta_info_flags {
WLAN_STA_AUTH = 1<<0,
@@ -57,6 +60,7 @@ enum ieee80211_sta_info_flags {
WLAN_STA_SUSPEND = 1<<11,
WLAN_STA_PS_DRIVER = 1<<12,
WLAN_STA_PSPOLL = 1<<13,
+ WLAN_STA_DISASSOC = 1<<14,
};
#define STA_TID_NUM 16
@@ -162,11 +166,6 @@ struct sta_ampdu_mlme {
};
-/* see __sta_info_unlink */
-#define STA_INFO_PIN_STAT_NORMAL 0
-#define STA_INFO_PIN_STAT_PINNED 1
-#define STA_INFO_PIN_STAT_DESTROY 2
-
/**
* struct sta_info - STA information
*
@@ -187,7 +186,6 @@ struct sta_ampdu_mlme {
* @flaglock: spinlock for flags accesses
* @drv_unblock_wk: used for driver PS unblocking
* @listen_interval: listen interval of this station, when we're acting as AP
- * @pin_status: used internally for pinning a STA struct into memory
* @flags: STA flags, see &enum ieee80211_sta_info_flags
* @ps_tx_buf: buffer of frames to transmit to this station
* when it leaves power saving state
@@ -226,6 +224,7 @@ struct sta_ampdu_mlme {
* @debugfs: debug filesystem info
* @sta: station information we share with the driver
* @dead: set to true when sta is unlinked
+ * @uploaded: set to true when sta is uploaded to the driver
*/
struct sta_info {
/* General information, mostly static */
@@ -245,11 +244,7 @@ struct sta_info {
bool dead;
- /*
- * for use by the internal lifetime management,
- * see __sta_info_unlink
- */
- u8 pin_status;
+ bool uploaded;
/*
* frequently updated, locked with own spinlock (flaglock),
@@ -403,9 +398,37 @@ static inline u32 get_sta_flags(struct sta_info *sta)
#define STA_INFO_CLEANUP_INTERVAL (10 * HZ)
/*
- * Get a STA info, must have be under RCU read lock.
+ * Get a STA info, must be under RCU read lock.
*/
-struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr);
+struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr);
+
+struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr);
+
+static inline
+void for_each_sta_info_type_check(struct ieee80211_local *local,
+ const u8 *addr,
+ struct sta_info *sta,
+ struct sta_info *nxt)
+{
+}
+
+#define for_each_sta_info(local, _addr, sta, nxt) \
+ for ( /* initialise loop */ \
+ sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
+ nxt = sta ? rcu_dereference(sta->hnext) : NULL; \
+ /* typecheck */ \
+ for_each_sta_info_type_check(local, (_addr), sta, nxt), \
+ /* continue condition */ \
+ sta; \
+ /* advance loop */ \
+ sta = nxt, \
+ nxt = sta ? rcu_dereference(sta->hnext) : NULL \
+ ) \
+ /* compare address and run code only if it matches */ \
+ if (memcmp(sta->sta.addr, (_addr), ETH_ALEN) == 0)
+
/*
* Get STA info by index, BROKEN!
*/
@@ -421,18 +444,19 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
* Insert STA info into hash table/list, returns zero or a
* -EEXIST if (if the same MAC address is already present).
*
- * Calling this without RCU protection makes the caller
- * relinquish its reference to @sta.
+ * Calling the non-rcu version makes the caller relinquish,
+ * the _rcu version calls read_lock_rcu() and must be called
+ * without it held.
*/
int sta_info_insert(struct sta_info *sta);
-/*
- * Unlink a STA info from the hash table/list.
- * This can NULL the STA pointer if somebody else
- * has already unlinked it.
- */
-void sta_info_unlink(struct sta_info **sta);
+int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
+int sta_info_insert_atomic(struct sta_info *sta);
+
+int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr);
+int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr);
-void sta_info_destroy(struct sta_info *sta);
void sta_info_set_tim_bit(struct sta_info *sta);
void sta_info_clear_tim_bit(struct sta_info *sta);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index d78f36c64c7..56d5b9a6ec5 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -2,7 +2,7 @@
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
- * Copyright 2008-2009 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2008-2010 Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -45,29 +45,19 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
/*
- * XXX: This is temporary!
- *
- * The problem here is that when we get here, the driver will
- * quite likely have pretty much overwritten info->control by
- * using info->driver_data or info->rate_driver_data. Thus,
- * when passing out the frame to the driver again, we would be
- * passing completely bogus data since the driver would then
- * expect a properly filled info->control. In mac80211 itself
- * the same problem occurs, since we need info->control.vif
- * internally.
- *
- * To fix this, we should send the frame through TX processing
- * again. However, it's not that simple, since the frame will
- * have been software-encrypted (if applicable) already, and
- * encrypting it again doesn't do much good. So to properly do
- * that, we not only have to skip the actual 'raw' encryption
- * (key selection etc. still has to be done!) but also the
- * sequence number assignment since that impacts the crypto
- * encapsulation, of course.
- *
- * Hence, for now, fix the bug by just dropping the frame.
+ * This skb 'survived' a round-trip through the driver, and
+ * hopefully the driver didn't mangle it too badly. However,
+ * we can definitely not rely on the the control information
+ * being correct. Clear it so we don't get junk there, and
+ * indicate that it needs new processing, but must not be
+ * modified/encrypted again.
*/
- goto drop;
+ memset(&info->control, 0, sizeof(info->control));
+
+ info->control.jiffies = jiffies;
+ info->control.vif = &sta->sdata->vif;
+ info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING |
+ IEEE80211_TX_INTFL_RETRANSMISSION;
sta->tx_filtered_count++;
@@ -122,7 +112,6 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
return;
}
- drop:
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit())
printk(KERN_DEBUG "%s: dropped TX filtered frame, "
@@ -134,6 +123,40 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
dev_kfree_skb(skb);
}
+static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *) skb->data;
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+
+ if (ieee80211_is_action(mgmt->frame_control) &&
+ sdata->vif.type == NL80211_IFTYPE_STATION &&
+ mgmt->u.action.category == WLAN_CATEGORY_HT &&
+ mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS) {
+ /*
+ * This update looks racy, but isn't -- if we come
+ * here we've definitely got a station that we're
+ * talking to, and on a managed interface that can
+ * only be the AP. And the only other place updating
+ * this variable is before we're associated.
+ */
+ switch (mgmt->u.action.u.ht_smps.smps_control) {
+ case WLAN_HT_SMPS_CONTROL_DYNAMIC:
+ sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_DYNAMIC;
+ break;
+ case WLAN_HT_SMPS_CONTROL_STATIC:
+ sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_STATIC;
+ break;
+ case WLAN_HT_SMPS_CONTROL_DISABLED:
+ default: /* shouldn't happen since we don't send that */
+ sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_OFF;
+ break;
+ }
+
+ ieee80211_queue_work(&local->hw, &local->recalc_smps);
+ }
+}
+
void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct sk_buff *skb2;
@@ -146,7 +169,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_tx_status_rtap_hdr *rthdr;
struct ieee80211_sub_if_data *sdata;
struct net_device *prev_dev = NULL;
- struct sta_info *sta;
+ struct sta_info *sta, *tmp;
int retry_count = -1, i;
bool injected;
@@ -165,10 +188,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
rcu_read_lock();
sband = local->hw.wiphy->bands[info->band];
+ fc = hdr->frame_control;
- sta = sta_info_get(local, hdr->addr1);
+ for_each_sta_info(local, hdr->addr1, sta, tmp) {
+ /* skip wrong virtual interface */
+ if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN))
+ continue;
- if (sta) {
if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
test_sta_flags(sta, WLAN_STA_PS_STA)) {
/*
@@ -180,8 +206,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
}
- fc = hdr->frame_control;
-
if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
(ieee80211_is_data_qos(fc))) {
u16 tid, ssn;
@@ -208,6 +232,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
rate_control_tx_status(local, sband, sta, skb);
if (ieee80211_vif_is_mesh(&sta->sdata->vif))
ieee80211s_update_metric(local, sta, skb);
+
+ if (!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
+ (info->flags & IEEE80211_TX_STAT_ACK))
+ ieee80211_frame_acked(sta, skb);
}
rcu_read_unlock();
@@ -246,6 +274,25 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
local->dot11FailedCount++;
}
+ if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
+ (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
+ !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
+ local->ps_sdata && !(local->scanning)) {
+ if (info->flags & IEEE80211_TX_STAT_ACK) {
+ local->ps_sdata->u.mgd.flags |=
+ IEEE80211_STA_NULLFUNC_ACKED;
+ ieee80211_queue_work(&local->hw,
+ &local->dynamic_ps_enable_work);
+ } else
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(10));
+ }
+
+ if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX)
+ cfg80211_action_tx_status(
+ skb->dev, (unsigned long) skb, skb->data, skb->len,
+ !!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
+
/* this was a transmitted frame, but now we want to reuse it */
skb_orphan(skb);
@@ -311,7 +358,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 4921d724b6c..7ef491e9d66 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -100,7 +100,7 @@ static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx,
p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j));
p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i;
}
- ctx->initialized = 1;
+ ctx->state = TKIP_STATE_PHASE1_DONE;
}
static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
@@ -183,7 +183,7 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
/* Update the p1k only when the iv16 in the packet wraps around, this
* might occur after the wrap around of iv16 in the key in case of
* fragmented packets. */
- if (iv16 == 0 || !ctx->initialized)
+ if (iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32);
if (type == IEEE80211_TKIP_P1_KEY) {
@@ -195,11 +195,13 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
}
EXPORT_SYMBOL(ieee80211_get_tkip_key);
-/* Encrypt packet payload with TKIP using @key. @pos is a pointer to the
+/*
+ * Encrypt packet payload with TKIP using @key. @pos is a pointer to the
* beginning of the buffer containing payload. This payload must include
- * headroom of eight octets for IV and Ext. IV and taildroom of four octets
- * for ICV. @payload_len is the length of payload (_not_ including extra
- * headroom and tailroom). @ta is the transmitter addresses. */
+ * the IV/Ext.IV and space for (taildroom) four octets for ICV.
+ * @payload_len is the length of payload (_not_ including IV/ICV length).
+ * @ta is the transmitter addresses.
+ */
void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
struct ieee80211_key *key,
u8 *pos, size_t payload_len, u8 *ta)
@@ -209,12 +211,11 @@ void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
/* Calculate per-packet key */
- if (ctx->iv16 == 0 || !ctx->initialized)
+ if (ctx->iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
tkip_mixing_phase1(tk, ctx, ta, ctx->iv32);
tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key);
- pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len);
}
@@ -259,7 +260,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
if ((keyid >> 6) != key->conf.keyidx)
return TKIP_DECRYPT_INVALID_KEYIDX;
- if (key->u.tkip.rx[queue].initialized &&
+ if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT &&
(iv32 < key->u.tkip.rx[queue].iv32 ||
(iv32 == key->u.tkip.rx[queue].iv32 &&
iv16 <= key->u.tkip.rx[queue].iv16))) {
@@ -275,11 +276,11 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
if (only_iv) {
res = TKIP_DECRYPT_OK;
- key->u.tkip.rx[queue].initialized = 1;
+ key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
goto done;
}
- if (!key->u.tkip.rx[queue].initialized ||
+ if (key->u.tkip.rx[queue].state == TKIP_STATE_NOT_INIT ||
key->u.tkip.rx[queue].iv32 != iv32) {
/* IV16 wrapped around - perform TKIP phase 1 */
tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32);
@@ -299,18 +300,18 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
printk("\n");
}
#endif
- if (key->local->ops->update_tkip_key &&
- key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
- static const u8 bcast[ETH_ALEN] =
- {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- const u8 *sta_addr = key->sta->sta.addr;
-
- if (is_multicast_ether_addr(ra))
- sta_addr = bcast;
-
- drv_update_tkip_key(key->local, &key->conf, sta_addr,
- iv32, key->u.tkip.rx[queue].p1k);
- }
+ }
+ if (key->local->ops->update_tkip_key &&
+ key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
+ key->u.tkip.rx[queue].state != TKIP_STATE_PHASE1_HW_UPLOADED) {
+ struct ieee80211_sub_if_data *sdata = key->sdata;
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(key->sdata->bss,
+ struct ieee80211_sub_if_data, u.ap);
+ drv_update_tkip_key(key->local, sdata, &key->conf, key->sta,
+ iv32, key->u.tkip.rx[queue].p1k);
+ key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
}
tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index ac210b58670..cbe53ed4fb0 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -180,6 +180,71 @@ static int inline is_ieee80211_device(struct ieee80211_local *local,
}
/* tx handlers */
+static ieee80211_tx_result debug_noinline
+ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
+{
+ struct ieee80211_local *local = tx->local;
+ struct ieee80211_if_managed *ifmgd;
+
+ /* driver doesn't support power save */
+ if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
+ return TX_CONTINUE;
+
+ /* hardware does dynamic power save */
+ if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
+ return TX_CONTINUE;
+
+ /* dynamic power save disabled */
+ if (local->hw.conf.dynamic_ps_timeout <= 0)
+ return TX_CONTINUE;
+
+ /* we are scanning, don't enable power save */
+ if (local->scanning)
+ return TX_CONTINUE;
+
+ if (!local->ps_sdata)
+ return TX_CONTINUE;
+
+ /* No point if we're going to suspend */
+ if (local->quiescing)
+ return TX_CONTINUE;
+
+ /* dynamic ps is supported only in managed mode */
+ if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
+ return TX_CONTINUE;
+
+ ifmgd = &tx->sdata->u.mgd;
+
+ /*
+ * Don't wakeup from power save if u-apsd is enabled, voip ac has
+ * u-apsd enabled and the frame is in voip class. This effectively
+ * means that even if all access categories have u-apsd enabled, in
+ * practise u-apsd is only used with the voip ac. This is a
+ * workaround for the case when received voip class packets do not
+ * have correct qos tag for some reason, due the network or the
+ * peer application.
+ *
+ * Note: local->uapsd_queues access is racy here. If the value is
+ * changed via debugfs, user needs to reassociate manually to have
+ * everything in sync.
+ */
+ if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
+ && (local->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ && skb_get_queue_mapping(tx->skb) == 0)
+ return TX_CONTINUE;
+
+ if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+ ieee80211_stop_queues_by_reason(&local->hw,
+ IEEE80211_QUEUE_STOP_REASON_PS);
+ ieee80211_queue_work(&local->hw,
+ &local->dynamic_ps_disable_work);
+ }
+
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
+
+ return TX_CONTINUE;
+}
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
@@ -223,7 +288,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
printk(KERN_DEBUG "%s: dropped data frame to not "
"associated station %pM\n",
- tx->dev->name, hdr->addr1);
+ tx->sdata->name, hdr->addr1);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
return TX_DROP;
@@ -331,7 +396,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
if (net_ratelimit())
printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n",
- tx->dev->name);
+ tx->sdata->name);
#endif
dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
} else
@@ -391,7 +456,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: STA %pM TX "
"buffer full - dropping oldest frame\n",
- tx->dev->name, sta->sta.addr);
+ tx->sdata->name, sta->sta.addr);
}
#endif
dev_kfree_skb(old);
@@ -416,7 +481,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
else if (unlikely(staflags & WLAN_STA_PS_STA)) {
printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll "
- "set -> send frame\n", tx->dev->name,
+ "set -> send frame\n", tx->sdata->name,
sta->sta.addr);
}
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
@@ -464,6 +529,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
tx->key = NULL;
if (tx->key) {
+ bool skip_hw = false;
+
tx->key->tx_rx_count++;
/* TODO: add threshold stuff again */
@@ -480,16 +547,32 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
!ieee80211_use_mfp(hdr->frame_control, tx->sta,
tx->skb))
tx->key = NULL;
+ else
+ skip_hw = (tx->key->conf.flags &
+ IEEE80211_KEY_FLAG_SW_MGMT) &&
+ ieee80211_is_mgmt(hdr->frame_control);
break;
case ALG_AES_CMAC:
if (!ieee80211_is_mgmt(hdr->frame_control))
tx->key = NULL;
break;
}
+
+ if (!skip_hw && tx->key &&
+ tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
+ info->control.hw_key = &tx->key->conf;
}
- if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
- info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ return TX_CONTINUE;
+}
+
+static ieee80211_tx_result debug_noinline
+ieee80211_tx_h_sta(struct ieee80211_tx_data *tx)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+
+ if (tx->sta && tx->sta->uploaded)
+ info->control.sta = &tx->sta->sta;
return TX_CONTINUE;
}
@@ -519,7 +602,12 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
txrc.bss_conf = &tx->sdata->vif.bss_conf;
txrc.skb = tx->skb;
txrc.reported_rate.idx = -1;
- txrc.max_rate_idx = tx->sdata->max_ratectrl_rateidx;
+ txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band];
+ if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
+ txrc.max_rate_idx = -1;
+ else
+ txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
+ txrc.ap = tx->sdata->vif.type == NL80211_IFTYPE_AP;
/* set up RTS protection if desired */
if (len > tx->local->hw.wiphy->rts_threshold) {
@@ -549,7 +637,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
"%s: Dropped data frame as no usable bitrate found while "
"scanning and associated. Target station: "
"%pM on %d GHz band\n",
- tx->dev->name, hdr->addr1,
+ tx->sdata->name, hdr->addr1,
tx->channel->band ? 5 : 2))
return TX_DROP;
@@ -664,17 +752,6 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
}
static ieee80211_tx_result debug_noinline
-ieee80211_tx_h_misc(struct ieee80211_tx_data *tx)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
-
- if (tx->sta)
- info->control.sta = &tx->sta->sta;
-
- return TX_CONTINUE;
-}
-
-static ieee80211_tx_result debug_noinline
ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
@@ -933,7 +1010,8 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
(struct ieee80211_radiotap_header *) skb->data;
struct ieee80211_supported_band *sband;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len);
+ int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
+ NULL);
sband = tx->local->hw.wiphy->bands[tx->channel->band];
@@ -969,7 +1047,7 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
* because it will be recomputed and added
* on transmission
*/
- if (skb->len < (iterator.max_length + FCS_LEN))
+ if (skb->len < (iterator._max_length + FCS_LEN))
return false;
skb_trim(skb, skb->len - FCS_LEN);
@@ -996,10 +1074,10 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
/*
* remove the radiotap header
- * iterator->max_length was sanity-checked against
+ * iterator->_max_length was sanity-checked against
* skb->len by iterator init
*/
- skb_pull(skb, iterator.max_length);
+ skb_pull(skb, iterator._max_length);
return true;
}
@@ -1021,7 +1099,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
memset(tx, 0, sizeof(*tx));
tx->skb = skb;
- tx->dev = sdata->dev; /* use original interface */
tx->local = local;
tx->sdata = sdata;
tx->channel = local->hw.conf.channel;
@@ -1032,7 +1109,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->flags |= IEEE80211_TX_FRAGMENTED;
/* process and remove the injection radiotap header */
- if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) {
+ if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) {
if (!__ieee80211_parse_tx_radiotap(tx, skb))
return TX_DROP;
@@ -1041,6 +1118,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
* the radiotap header that was present and pre-filled
* 'tx' with tx control information.
*/
+ info->flags &= ~IEEE80211_TX_INTFL_HAS_RADIOTAP;
}
/*
@@ -1052,10 +1130,15 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
hdr = (struct ieee80211_hdr *) skb->data;
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
tx->sta = rcu_dereference(sdata->u.vlan.sta);
+ if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
+ return TX_DROP;
+ } else if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+ tx->sta = sta_info_get_bss(sdata, hdr->addr1);
+ }
if (!tx->sta)
- tx->sta = sta_info_get(local, hdr->addr1);
+ tx->sta = sta_info_get(sdata, hdr->addr1);
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
@@ -1207,6 +1290,7 @@ static int __ieee80211_tx(struct ieee80211_local *local,
static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb = tx->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
ieee80211_tx_result res = TX_DROP;
#define CALL_TXH(txh) \
@@ -1216,13 +1300,18 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
goto txh_done; \
} while (0)
+ CALL_TXH(ieee80211_tx_h_dynamic_ps);
CALL_TXH(ieee80211_tx_h_check_assoc);
CALL_TXH(ieee80211_tx_h_ps_buf);
CALL_TXH(ieee80211_tx_h_select_key);
- CALL_TXH(ieee80211_tx_h_michael_mic_add);
+ CALL_TXH(ieee80211_tx_h_sta);
if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
CALL_TXH(ieee80211_tx_h_rate_ctrl);
- CALL_TXH(ieee80211_tx_h_misc);
+
+ if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION))
+ goto txh_done;
+
+ CALL_TXH(ieee80211_tx_h_michael_mic_add);
CALL_TXH(ieee80211_tx_h_sequence);
CALL_TXH(ieee80211_tx_h_fragment);
/* handlers after fragment must be aware of tx info fragmentation! */
@@ -1398,34 +1487,6 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
return 0;
}
-static bool need_dynamic_ps(struct ieee80211_local *local)
-{
- /* driver doesn't support power save */
- if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
- return false;
-
- /* hardware does dynamic power save */
- if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
- return false;
-
- /* dynamic power save disabled */
- if (local->hw.conf.dynamic_ps_timeout <= 0)
- return false;
-
- /* we are scanning, don't enable power save */
- if (local->scanning)
- return false;
-
- if (!local->ps_sdata)
- return false;
-
- /* No point if we're going to suspend */
- if (local->quiescing)
- return false;
-
- return true;
-}
-
static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
@@ -1436,25 +1497,14 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
int headroom;
bool may_encrypt;
- if (need_dynamic_ps(local)) {
- if (local->hw.conf.flags & IEEE80211_CONF_PS) {
- ieee80211_stop_queues_by_reason(&local->hw,
- IEEE80211_QUEUE_STOP_REASON_PS);
- ieee80211_queue_work(&local->hw,
- &local->dynamic_ps_disable_work);
- }
-
- mod_timer(&local->dynamic_ps_timer, jiffies +
- msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
- }
-
rcu_read_lock();
if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
int hdrlen;
u16 len_rthdr;
- info->flags |= IEEE80211_TX_CTL_INJECTED;
+ info->flags |= IEEE80211_TX_CTL_INJECTED |
+ IEEE80211_TX_INTFL_HAS_RADIOTAP;
len_rthdr = ieee80211_get_radiotap_len(skb->data);
hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
@@ -1474,11 +1524,11 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
list) {
- if (!netif_running(tmp_sdata->dev))
+ if (!ieee80211_sdata_running(tmp_sdata))
continue;
if (tmp_sdata->vif.type != NL80211_IFTYPE_AP)
continue;
- if (compare_ether_addr(tmp_sdata->dev->dev_addr,
+ if (compare_ether_addr(tmp_sdata->vif.addr,
hdr->addr2) == 0) {
sdata = tmp_sdata;
break;
@@ -1642,7 +1692,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
/* RA TA DA SA */
memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
- memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
+ memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 30;
@@ -1656,7 +1706,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
/* DA BSSID SA */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
- memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
+ memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 24;
break;
@@ -1664,7 +1714,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
/* RA TA DA SA */
memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
- memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
+ memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 30;
@@ -1678,8 +1728,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
goto fail;
}
- if (compare_ether_addr(dev->dev_addr,
- skb->data + ETH_ALEN) == 0) {
+ if (compare_ether_addr(sdata->vif.addr,
+ skb->data + ETH_ALEN) == 0) {
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
skb->data, skb->data + ETH_ALEN);
meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
@@ -1709,7 +1759,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
}
}
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
- mesh_da, dev->dev_addr);
+ mesh_da, sdata->vif.addr);
rcu_read_unlock();
if (is_mesh_mcast)
meshhdrlen =
@@ -1734,7 +1784,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) {
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
/* RA TA DA SA */
- memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
+ memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 30;
@@ -1765,9 +1815,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
*/
if (!is_multicast_ether_addr(hdr.addr1)) {
rcu_read_lock();
- sta = sta_info_get(local, hdr.addr1);
- /* XXX: in the future, use sdata to look up the sta */
- if (sta && sta->sdata == sdata)
+ sta = sta_info_get(sdata, hdr.addr1);
+ if (sta)
sta_flags = get_sta_flags(sta);
rcu_read_unlock();
}
@@ -1786,7 +1835,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
unlikely(!is_multicast_ether_addr(hdr.addr1) &&
!(sta_flags & WLAN_STA_AUTHORIZED) &&
!(ethertype == ETH_P_PAE &&
- compare_ether_addr(dev->dev_addr,
+ compare_ether_addr(sdata->vif.addr,
skb->data + ETH_ALEN) == 0))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit())
@@ -1926,7 +1975,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
ieee80211_tx(sdata, skb, true);
} else {
hdr = (struct ieee80211_hdr *)skb->data;
- sta = sta_info_get(local, hdr->addr1);
+ sta = sta_info_get(sdata, hdr->addr1);
ret = __ieee80211_tx(local, &skb, sta, true);
if (ret != IEEE80211_TX_OK)
@@ -2062,6 +2111,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct beacon_data *beacon;
struct ieee80211_supported_band *sband;
enum ieee80211_band band = local->hw.conf.channel->band;
+ struct ieee80211_tx_rate_control txrc;
sband = local->hw.wiphy->bands[band];
@@ -2150,8 +2200,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
mgmt->frame_control =
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
memset(mgmt->da, 0xff, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
- memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
mgmt->u.beacon.beacon_int =
cpu_to_le16(sdata->vif.bss_conf.beacon_int);
mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
@@ -2169,21 +2219,25 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
info = IEEE80211_SKB_CB(skb);
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
info->band = band;
- /*
- * XXX: For now, always use the lowest rate
- */
- info->control.rates[0].idx = 0;
- info->control.rates[0].count = 1;
- info->control.rates[1].idx = -1;
- info->control.rates[2].idx = -1;
- info->control.rates[3].idx = -1;
- info->control.rates[4].idx = -1;
- BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5);
+
+ memset(&txrc, 0, sizeof(txrc));
+ txrc.hw = hw;
+ txrc.sband = sband;
+ txrc.bss_conf = &sdata->vif.bss_conf;
+ txrc.skb = skb;
+ txrc.reported_rate.idx = -1;
+ txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
+ if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
+ txrc.max_rate_idx = -1;
+ else
+ txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
+ txrc.ap = true;
+ rate_control_get_rate(sdata, NULL, &txrc);
info->control.vif = vif;
- info->flags |= IEEE80211_TX_CTL_NO_ACK;
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
out:
@@ -2192,6 +2246,134 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_beacon_get_tim);
+struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_if_managed *ifmgd;
+ struct ieee80211_pspoll *pspoll;
+ struct ieee80211_local *local;
+ struct sk_buff *skb;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
+ return NULL;
+
+ sdata = vif_to_sdata(vif);
+ ifmgd = &sdata->u.mgd;
+ local = sdata->local;
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
+ if (!skb) {
+ printk(KERN_DEBUG "%s: failed to allocate buffer for "
+ "pspoll template\n", sdata->name);
+ return NULL;
+ }
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
+ memset(pspoll, 0, sizeof(*pspoll));
+ pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
+ IEEE80211_STYPE_PSPOLL);
+ pspoll->aid = cpu_to_le16(ifmgd->aid);
+
+ /* aid in PS-Poll has its two MSBs each set to 1 */
+ pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
+
+ memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
+ memcpy(pspoll->ta, vif->addr, ETH_ALEN);
+
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_pspoll_get);
+
+struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_hdr_3addr *nullfunc;
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_if_managed *ifmgd;
+ struct ieee80211_local *local;
+ struct sk_buff *skb;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
+ return NULL;
+
+ sdata = vif_to_sdata(vif);
+ ifmgd = &sdata->u.mgd;
+ local = sdata->local;
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
+ if (!skb) {
+ printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
+ "template\n", sdata->name);
+ return NULL;
+ }
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
+ sizeof(*nullfunc));
+ memset(nullfunc, 0, sizeof(*nullfunc));
+ nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_TODS);
+ memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
+ memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
+
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_nullfunc_get);
+
+struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len)
+{
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_local *local;
+ struct ieee80211_hdr_3addr *hdr;
+ struct sk_buff *skb;
+ size_t ie_ssid_len;
+ u8 *pos;
+
+ sdata = vif_to_sdata(vif);
+ local = sdata->local;
+ ie_ssid_len = 2 + ssid_len;
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
+ ie_ssid_len + ie_len);
+ if (!skb) {
+ printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
+ "request template\n", sdata->name);
+ return NULL;
+ }
+
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_REQ);
+ memset(hdr->addr1, 0xff, ETH_ALEN);
+ memcpy(hdr->addr2, vif->addr, ETH_ALEN);
+ memset(hdr->addr3, 0xff, ETH_ALEN);
+
+ pos = skb_put(skb, ie_ssid_len);
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = ssid_len;
+ if (ssid)
+ memcpy(pos, ssid, ssid_len);
+ pos += ssid_len;
+
+ if (ie) {
+ pos = skb_put(skb, ie_len);
+ memcpy(pos, ie, ie_len);
+ }
+
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_probereq_get);
+
void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const void *frame, size_t frame_len,
const struct ieee80211_tx_info *frame_txctl,
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 3848140313f..c453226f06b 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -18,7 +18,6 @@
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
-#include <linux/wireless.h>
#include <linux/bitmap.h>
#include <linux/crc32.h>
#include <net/net_namespace.h>
@@ -480,8 +479,8 @@ void ieee80211_iterate_active_interfaces(
case NL80211_IFTYPE_MESH_POINT:
break;
}
- if (netif_running(sdata->dev))
- iterator(data, sdata->dev->dev_addr,
+ if (ieee80211_sdata_running(sdata))
+ iterator(data, sdata->vif.addr,
&sdata->vif);
}
@@ -514,8 +513,8 @@ void ieee80211_iterate_active_interfaces_atomic(
case NL80211_IFTYPE_MESH_POINT:
break;
}
- if (netif_running(sdata->dev))
- iterator(data, sdata->dev->dev_addr,
+ if (ieee80211_sdata_running(sdata))
+ iterator(data, sdata->vif.addr,
&sdata->vif);
}
@@ -793,6 +792,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
break;
}
+ qparam.uapsd = false;
+
drv_conf_tx(local, queue, &qparam);
}
}
@@ -860,7 +861,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
sizeof(*mgmt) + 6 + extra_len);
if (!skb) {
printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
- "frame\n", sdata->dev->name);
+ "frame\n", sdata->name);
return;
}
skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -870,7 +871,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_AUTH);
memcpy(mgmt->da, bssid, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, bssid, ETH_ALEN);
mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg);
mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
@@ -893,43 +894,87 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
enum ieee80211_band band)
{
struct ieee80211_supported_band *sband;
- u8 *pos, *supp_rates_len, *esupp_rates_len = NULL;
- int i;
+ u8 *pos;
+ size_t offset = 0, noffset;
+ int supp_rates_len, i;
sband = local->hw.wiphy->bands[band];
pos = buffer;
+ supp_rates_len = min_t(int, sband->n_bitrates, 8);
+
*pos++ = WLAN_EID_SUPP_RATES;
- supp_rates_len = pos;
- *pos++ = 0;
-
- for (i = 0; i < sband->n_bitrates; i++) {
- struct ieee80211_rate *rate = &sband->bitrates[i];
-
- if (esupp_rates_len) {
- *esupp_rates_len += 1;
- } else if (*supp_rates_len == 8) {
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- esupp_rates_len = pos;
- *pos++ = 1;
- } else
- *supp_rates_len += 1;
+ *pos++ = supp_rates_len;
+
+ for (i = 0; i < supp_rates_len; i++) {
+ int rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ }
+
+ /* insert "request information" if in custom IEs */
+ if (ie && ie_len) {
+ static const u8 before_extrates[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_REQUEST,
+ };
+ noffset = ieee80211_ie_split(ie, ie_len,
+ before_extrates,
+ ARRAY_SIZE(before_extrates),
+ offset);
+ memcpy(pos, ie + offset, noffset - offset);
+ pos += noffset - offset;
+ offset = noffset;
+ }
+
+ if (sband->n_bitrates > i) {
+ *pos++ = WLAN_EID_EXT_SUPP_RATES;
+ *pos++ = sband->n_bitrates - i;
+
+ for (; i < sband->n_bitrates; i++) {
+ int rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ }
+ }
- *pos++ = rate->bitrate / 5;
+ /* insert custom IEs that go before HT */
+ if (ie && ie_len) {
+ static const u8 before_ht[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_REQUEST,
+ WLAN_EID_EXT_SUPP_RATES,
+ WLAN_EID_DS_PARAMS,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ };
+ noffset = ieee80211_ie_split(ie, ie_len,
+ before_ht, ARRAY_SIZE(before_ht),
+ offset);
+ memcpy(pos, ie + offset, noffset - offset);
+ pos += noffset - offset;
+ offset = noffset;
}
if (sband->ht_cap.ht_supported) {
- __le16 tmp = cpu_to_le16(sband->ht_cap.cap);
+ u16 cap = sband->ht_cap.cap;
+ __le16 tmp;
+
+ if (ieee80211_disable_40mhz_24ghz &&
+ sband->band == IEEE80211_BAND_2GHZ) {
+ cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ cap &= ~IEEE80211_HT_CAP_SGI_40;
+ }
*pos++ = WLAN_EID_HT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_ht_cap);
memset(pos, 0, sizeof(struct ieee80211_ht_cap));
+ tmp = cpu_to_le16(cap);
memcpy(pos, &tmp, sizeof(u16));
pos += sizeof(u16);
- /* TODO: needs a define here for << 2 */
*pos++ = sband->ht_cap.ampdu_factor |
- (sband->ht_cap.ampdu_density << 2);
+ (sband->ht_cap.ampdu_density <<
+ IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
pos += sizeof(sband->ht_cap.mcs);
pos += 2 + 4 + 1; /* ext info, BF cap, antsel */
@@ -940,9 +985,11 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
* that calculates local->scan_ies_len.
*/
- if (ie) {
- memcpy(pos, ie, ie_len);
- pos += ie_len;
+ /* add any remaining custom IEs */
+ if (ie && ie_len) {
+ noffset = ie_len;
+ memcpy(pos, ie + offset, noffset - offset);
+ pos += noffset - offset;
}
return pos - buffer;
@@ -955,40 +1002,33 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- u8 *pos;
-
- skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 +
- ie_len);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
- "request\n", sdata->dev->name);
+ size_t buf_len;
+ u8 *buf;
+
+ /* FIXME: come up with a proper value */
+ buf = kmalloc(200 + ie_len, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_DEBUG "%s: failed to allocate temporary IE "
+ "buffer\n", sdata->name);
return;
}
- skb_reserve(skb, local->hw.extra_tx_headroom);
- mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
- memset(mgmt, 0, 24);
- mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_PROBE_REQ);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
+ local->hw.conf.channel->band);
+
+ skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
+ ssid, ssid_len,
+ buf, buf_len);
+
if (dst) {
+ mgmt = (struct ieee80211_mgmt *) skb->data;
memcpy(mgmt->da, dst, ETH_ALEN);
memcpy(mgmt->bssid, dst, ETH_ALEN);
- } else {
- memset(mgmt->da, 0xff, ETH_ALEN);
- memset(mgmt->bssid, 0xff, ETH_ALEN);
}
- pos = skb_put(skb, 2 + ssid_len);
- *pos++ = WLAN_EID_SSID;
- *pos++ = ssid_len;
- memcpy(pos, ssid, ssid_len);
- pos += ssid_len;
-
- skb_put(skb, ieee80211_build_preq_ies(local, pos, ie, ie_len,
- local->hw.conf.channel->band));
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
+ kfree(buf);
}
u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -1032,18 +1072,16 @@ void ieee80211_stop_device(struct ieee80211_local *local)
ieee80211_led_radio(local, false);
cancel_work_sync(&local->reconfig_filter);
- drv_stop(local);
flush_workqueue(local->workqueue);
+ drv_stop(local);
}
int ieee80211_reconfig(struct ieee80211_local *local)
{
struct ieee80211_hw *hw = &local->hw;
struct ieee80211_sub_if_data *sdata;
- struct ieee80211_if_init_conf conf;
struct sta_info *sta;
- unsigned long flags;
int res;
if (local->suspended)
@@ -1061,7 +1099,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
if (res) {
WARN(local->suspended, "Harware became unavailable "
"upon resume. This is could be a software issue"
- "prior to suspend or a harware issue\n");
+ "prior to suspend or a hardware issue\n");
return res;
}
@@ -1072,29 +1110,24 @@ int ieee80211_reconfig(struct ieee80211_local *local)
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
sdata->vif.type != NL80211_IFTYPE_MONITOR &&
- netif_running(sdata->dev)) {
- conf.vif = &sdata->vif;
- conf.type = sdata->vif.type;
- conf.mac_addr = sdata->dev->dev_addr;
- res = drv_add_interface(local, &conf);
- }
+ ieee80211_sdata_running(sdata))
+ res = drv_add_interface(local, &sdata->vif);
}
/* add STAs back */
- if (local->ops->sta_notify) {
- spin_lock_irqsave(&local->sta_lock, flags);
- list_for_each_entry(sta, &local->sta_list, list) {
+ mutex_lock(&local->sta_mtx);
+ list_for_each_entry(sta, &local->sta_list, list) {
+ if (sta->uploaded) {
sdata = sta->sdata;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data,
u.ap);
- drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD,
- &sta->sta);
+ WARN_ON(drv_sta_add(local, sdata, &sta->sta));
}
- spin_unlock_irqrestore(&local->sta_lock, flags);
}
+ mutex_unlock(&local->sta_mtx);
/* Clear Suspend state so that ADDBA requests can be processed */
@@ -1119,7 +1152,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* Finally also reconfigure all the BSS information */
list_for_each_entry(sdata, &local->interfaces, list) {
u32 changed = ~0;
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
@@ -1145,9 +1178,17 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
}
+ rcu_read_lock();
+ if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ ieee80211_sta_tear_down_BA_sessions(sta);
+ }
+ }
+ rcu_read_unlock();
+
/* add back keys */
list_for_each_entry(sdata, &local->interfaces, list)
- if (netif_running(sdata->dev))
+ if (ieee80211_sdata_running(sdata))
ieee80211_enable_keys(sdata);
ieee80211_wake_queues_by_reason(hw,
@@ -1184,13 +1225,143 @@ int ieee80211_reconfig(struct ieee80211_local *local)
add_timer(&local->sta_cleanup);
- spin_lock_irqsave(&local->sta_lock, flags);
+ mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list)
mesh_plink_restart(sta);
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ mutex_unlock(&local->sta_mtx);
#else
WARN_ON(1);
#endif
return 0;
}
+static int check_mgd_smps(struct ieee80211_if_managed *ifmgd,
+ enum ieee80211_smps_mode *smps_mode)
+{
+ if (ifmgd->associated) {
+ *smps_mode = ifmgd->ap_smps;
+
+ if (*smps_mode == IEEE80211_SMPS_AUTOMATIC) {
+ if (ifmgd->powersave)
+ *smps_mode = IEEE80211_SMPS_DYNAMIC;
+ else
+ *smps_mode = IEEE80211_SMPS_OFF;
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/* must hold iflist_mtx */
+void ieee80211_recalc_smps(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *forsdata)
+{
+ struct ieee80211_sub_if_data *sdata;
+ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF;
+ int count = 0;
+
+ if (forsdata)
+ WARN_ON(!mutex_is_locked(&forsdata->u.mgd.mtx));
+
+ WARN_ON(!mutex_is_locked(&local->iflist_mtx));
+
+ /*
+ * This function could be improved to handle multiple
+ * interfaces better, but right now it makes any
+ * non-station interfaces force SM PS to be turned
+ * off. If there are multiple station interfaces it
+ * could also use the best possible mode, e.g. if
+ * one is in static and the other in dynamic then
+ * dynamic is ok.
+ */
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!netif_running(sdata->dev))
+ continue;
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ goto set;
+ if (sdata != forsdata) {
+ /*
+ * This nested is ok -- we are holding the iflist_mtx
+ * so can't get here twice or so. But it's required
+ * since normally we acquire it first and then the
+ * iflist_mtx.
+ */
+ mutex_lock_nested(&sdata->u.mgd.mtx, SINGLE_DEPTH_NESTING);
+ count += check_mgd_smps(&sdata->u.mgd, &smps_mode);
+ mutex_unlock(&sdata->u.mgd.mtx);
+ } else
+ count += check_mgd_smps(&sdata->u.mgd, &smps_mode);
+
+ if (count > 1) {
+ smps_mode = IEEE80211_SMPS_OFF;
+ break;
+ }
+ }
+
+ if (smps_mode == local->smps_mode)
+ return;
+
+ set:
+ local->smps_mode = smps_mode;
+ /* changed flag is auto-detected for this */
+ ieee80211_hw_config(local, 0);
+}
+
+static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
+{
+ int i;
+
+ for (i = 0; i < n_ids; i++)
+ if (ids[i] == id)
+ return true;
+ return false;
+}
+
+/**
+ * ieee80211_ie_split - split an IE buffer according to ordering
+ *
+ * @ies: the IE buffer
+ * @ielen: the length of the IE buffer
+ * @ids: an array with element IDs that are allowed before
+ * the split
+ * @n_ids: the size of the element ID array
+ * @offset: offset where to start splitting in the buffer
+ *
+ * This function splits an IE buffer by updating the @offset
+ * variable to point to the location where the buffer should be
+ * split.
+ *
+ * It assumes that the given IE buffer is well-formed, this
+ * has to be guaranteed by the caller!
+ *
+ * It also assumes that the IEs in the buffer are ordered
+ * correctly, if not the result of using this function will not
+ * be ordered correctly either, i.e. it does no reordering.
+ *
+ * The function returns the offset where the next part of the
+ * buffer starts, which may be @ielen if the entire (remainder)
+ * of the buffer should be used.
+ */
+size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids, size_t offset)
+{
+ size_t pos = offset;
+
+ while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos]))
+ pos += 2 + ies[pos + 1];
+
+ return pos;
+}
+
+size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
+{
+ size_t pos = offset;
+
+ while (pos < ielen && ies[pos] != WLAN_EID_VENDOR_SPECIFIC)
+ pos += 2 + ies[pos + 1];
+
+ return pos;
+}
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 247123fe1a7..5d745f2d723 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -305,20 +305,19 @@ static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) {
+ if (!info->control.hw_key) {
if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
tx->key->conf.keylen,
tx->key->conf.keyidx))
return -1;
- } else {
- info->control.hw_key = &tx->key->conf;
- if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) {
- if (!ieee80211_wep_add_iv(tx->local, skb,
- tx->key->conf.keylen,
- tx->key->conf.keyidx))
- return -1;
- }
+ } else if (info->control.hw_key->flags &
+ IEEE80211_KEY_FLAG_GENERATE_IV) {
+ if (!ieee80211_wep_add_iv(tx->local, skb,
+ tx->key->conf.keylen,
+ tx->key->conf.keyidx))
+ return -1;
}
+
return 0;
}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 79d887dae73..34e6d02da77 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -96,7 +96,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
}
if (!sta && ra && !is_multicast_ether_addr(ra)) {
- sta = sta_info_get(local, ra);
+ sta = sta_info_get(sdata, ra);
if (sta)
sta_flags = get_sta_flags(sta);
}
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
new file mode 100644
index 00000000000..1e1ea3007b0
--- /dev/null
+++ b/net/mac80211/work.c
@@ -0,0 +1,1100 @@
+/*
+ * mac80211 work implementation
+ *
+ * Copyright 2003-2008, Jouni Malinen <j@w1.fi>
+ * Copyright 2004, Instant802 Networks, Inc.
+ * Copyright 2005, Devicescape Software, Inc.
+ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <net/mac80211.h>
+#include <asm/unaligned.h>
+
+#include "ieee80211_i.h"
+#include "rate.h"
+
+#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
+#define IEEE80211_AUTH_MAX_TRIES 3
+#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
+#define IEEE80211_ASSOC_MAX_TRIES 3
+#define IEEE80211_MAX_PROBE_TRIES 5
+
+enum work_action {
+ WORK_ACT_NONE,
+ WORK_ACT_TIMEOUT,
+ WORK_ACT_DONE,
+};
+
+
+/* utils */
+static inline void ASSERT_WORK_MTX(struct ieee80211_local *local)
+{
+ WARN_ON(!mutex_is_locked(&local->work_mtx));
+}
+
+/*
+ * We can have multiple work items (and connection probing)
+ * scheduling this timer, but we need to take care to only
+ * reschedule it when it should fire _earlier_ than it was
+ * asked for before, or if it's not pending right now. This
+ * function ensures that. Note that it then is required to
+ * run this function for all timeouts after the first one
+ * has happened -- the work that runs from this timer will
+ * do that.
+ */
+static void run_again(struct ieee80211_local *local,
+ unsigned long timeout)
+{
+ ASSERT_WORK_MTX(local);
+
+ if (!timer_pending(&local->work_timer) ||
+ time_before(timeout, local->work_timer.expires))
+ mod_timer(&local->work_timer, timeout);
+}
+
+static void work_free_rcu(struct rcu_head *head)
+{
+ struct ieee80211_work *wk =
+ container_of(head, struct ieee80211_work, rcu_head);
+
+ kfree(wk);
+}
+
+void free_work(struct ieee80211_work *wk)
+{
+ call_rcu(&wk->rcu_head, work_free_rcu);
+}
+
+static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
+ struct ieee80211_supported_band *sband,
+ u32 *rates)
+{
+ int i, j, count;
+ *rates = 0;
+ count = 0;
+ for (i = 0; i < supp_rates_len; i++) {
+ int rate = (supp_rates[i] & 0x7F) * 5;
+
+ for (j = 0; j < sband->n_bitrates; j++)
+ if (sband->bitrates[j].bitrate == rate) {
+ *rates |= BIT(j);
+ count++;
+ break;
+ }
+ }
+
+ return count;
+}
+
+/* frame sending functions */
+
+static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_channel *channel,
+ enum ieee80211_smps_mode smps)
+{
+ struct ieee80211_ht_info *ht_info;
+ u8 *pos;
+ u32 flags = channel->flags;
+ u16 cap = sband->ht_cap.cap;
+ __le16 tmp;
+
+ if (!sband->ht_cap.ht_supported)
+ return;
+
+ if (!ht_info_ie)
+ return;
+
+ if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
+ return;
+
+ ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
+
+ /* determine capability flags */
+
+ if (ieee80211_disable_40mhz_24ghz &&
+ sband->band == IEEE80211_BAND_2GHZ) {
+ cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ cap &= ~IEEE80211_HT_CAP_SGI_40;
+ }
+
+ switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
+ cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ cap &= ~IEEE80211_HT_CAP_SGI_40;
+ }
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
+ cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ cap &= ~IEEE80211_HT_CAP_SGI_40;
+ }
+ break;
+ }
+
+ /* set SM PS mode properly */
+ cap &= ~IEEE80211_HT_CAP_SM_PS;
+ switch (smps) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_NUM_MODES:
+ WARN_ON(1);
+ case IEEE80211_SMPS_OFF:
+ cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ cap |= WLAN_HT_CAP_SM_PS_STATIC <<
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ cap |= WLAN_HT_CAP_SM_PS_DYNAMIC <<
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
+ break;
+ }
+
+ /* reserve and fill IE */
+
+ pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ memset(pos, 0, sizeof(struct ieee80211_ht_cap));
+
+ /* capability flags */
+ tmp = cpu_to_le16(cap);
+ memcpy(pos, &tmp, sizeof(u16));
+ pos += sizeof(u16);
+
+ /* AMPDU parameters */
+ *pos++ = sband->ht_cap.ampdu_factor |
+ (sband->ht_cap.ampdu_density <<
+ IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
+
+ /* MCS set */
+ memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
+ pos += sizeof(sband->ht_cap.mcs);
+
+ /* extended capabilities */
+ pos += sizeof(__le16);
+
+ /* BF capabilities */
+ pos += sizeof(__le32);
+
+ /* antenna selection */
+ pos += sizeof(u8);
+}
+
+static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_work *wk)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff *skb;
+ struct ieee80211_mgmt *mgmt;
+ u8 *pos, qos_info;
+ const u8 *ies;
+ size_t offset = 0, noffset;
+ int i, len, count, rates_len, supp_rates_len;
+ u16 capab;
+ struct ieee80211_supported_band *sband;
+ u32 rates = 0;
+
+ sband = local->hw.wiphy->bands[wk->chan->band];
+
+ /*
+ * Get all rates supported by the device and the AP as
+ * some APs don't like getting a superset of their rates
+ * in the association request (e.g. D-Link DAP 1353 in
+ * b-only mode)...
+ */
+ rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
+ wk->assoc.supp_rates_len,
+ sband, &rates);
+
+ skb = alloc_skb(local->hw.extra_tx_headroom +
+ sizeof(*mgmt) + /* bit too much but doesn't matter */
+ 2 + wk->assoc.ssid_len + /* SSID */
+ 4 + rates_len + /* (extended) rates */
+ 4 + /* power capability */
+ 2 + 2 * sband->n_channels + /* supported channels */
+ 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
+ wk->ie_len + /* extra IEs */
+ 9, /* WMM */
+ GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
+ "frame\n", sdata->name);
+ return;
+ }
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ capab = WLAN_CAPABILITY_ESS;
+
+ if (sband->band == IEEE80211_BAND_2GHZ) {
+ if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
+ capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
+ if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
+ capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
+ }
+
+ if (wk->assoc.capability & WLAN_CAPABILITY_PRIVACY)
+ capab |= WLAN_CAPABILITY_PRIVACY;
+
+ if ((wk->assoc.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
+ (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
+ capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
+
+ mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
+ memset(mgmt, 0, 24);
+ memcpy(mgmt->da, wk->filter_ta, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, wk->filter_ta, ETH_ALEN);
+
+ if (!is_zero_ether_addr(wk->assoc.prev_bssid)) {
+ skb_put(skb, 10);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_REASSOC_REQ);
+ mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
+ mgmt->u.reassoc_req.listen_interval =
+ cpu_to_le16(local->hw.conf.listen_interval);
+ memcpy(mgmt->u.reassoc_req.current_ap, wk->assoc.prev_bssid,
+ ETH_ALEN);
+ } else {
+ skb_put(skb, 4);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ASSOC_REQ);
+ mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
+ mgmt->u.assoc_req.listen_interval =
+ cpu_to_le16(local->hw.conf.listen_interval);
+ }
+
+ /* SSID */
+ ies = pos = skb_put(skb, 2 + wk->assoc.ssid_len);
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = wk->assoc.ssid_len;
+ memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len);
+
+ /* add all rates which were marked to be used above */
+ supp_rates_len = rates_len;
+ if (supp_rates_len > 8)
+ supp_rates_len = 8;
+
+ len = sband->n_bitrates;
+ pos = skb_put(skb, supp_rates_len + 2);
+ *pos++ = WLAN_EID_SUPP_RATES;
+ *pos++ = supp_rates_len;
+
+ count = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (BIT(i) & rates) {
+ int rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ if (++count == 8)
+ break;
+ }
+ }
+
+ if (rates_len > count) {
+ pos = skb_put(skb, rates_len - count + 2);
+ *pos++ = WLAN_EID_EXT_SUPP_RATES;
+ *pos++ = rates_len - count;
+
+ for (i++; i < sband->n_bitrates; i++) {
+ if (BIT(i) & rates) {
+ int rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ }
+ }
+ }
+
+ if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
+ /* 1. power capabilities */
+ pos = skb_put(skb, 4);
+ *pos++ = WLAN_EID_PWR_CAPABILITY;
+ *pos++ = 2;
+ *pos++ = 0; /* min tx power */
+ *pos++ = wk->chan->max_power; /* max tx power */
+
+ /* 2. supported channels */
+ /* TODO: get this in reg domain format */
+ pos = skb_put(skb, 2 * sband->n_channels + 2);
+ *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
+ *pos++ = 2 * sband->n_channels;
+ for (i = 0; i < sband->n_channels; i++) {
+ *pos++ = ieee80211_frequency_to_channel(
+ sband->channels[i].center_freq);
+ *pos++ = 1; /* one channel in the subband*/
+ }
+ }
+
+ /* if present, add any custom IEs that go before HT */
+ if (wk->ie_len && wk->ie) {
+ static const u8 before_ht[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_EXT_SUPP_RATES,
+ WLAN_EID_PWR_CAPABILITY,
+ WLAN_EID_SUPPORTED_CHANNELS,
+ WLAN_EID_RSN,
+ WLAN_EID_QOS_CAPA,
+ WLAN_EID_RRM_ENABLED_CAPABILITIES,
+ WLAN_EID_MOBILITY_DOMAIN,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ };
+ noffset = ieee80211_ie_split(wk->ie, wk->ie_len,
+ before_ht, ARRAY_SIZE(before_ht),
+ offset);
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, wk->ie + offset, noffset - offset);
+ offset = noffset;
+ }
+
+ if (wk->assoc.use_11n && wk->assoc.wmm_used &&
+ local->hw.queues >= 4)
+ ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie,
+ sband, wk->chan, wk->assoc.smps);
+
+ /* if present, add any custom non-vendor IEs that go after HT */
+ if (wk->ie_len && wk->ie) {
+ noffset = ieee80211_ie_split_vendor(wk->ie, wk->ie_len,
+ offset);
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, wk->ie + offset, noffset - offset);
+ offset = noffset;
+ }
+
+ if (wk->assoc.wmm_used && local->hw.queues >= 4) {
+ if (wk->assoc.uapsd_used) {
+ qos_info = local->uapsd_queues;
+ qos_info |= (local->uapsd_max_sp_len <<
+ IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
+ } else {
+ qos_info = 0;
+ }
+
+ pos = skb_put(skb, 9);
+ *pos++ = WLAN_EID_VENDOR_SPECIFIC;
+ *pos++ = 7; /* len */
+ *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
+ *pos++ = 0x50;
+ *pos++ = 0xf2;
+ *pos++ = 2; /* WME */
+ *pos++ = 0; /* WME info */
+ *pos++ = 1; /* WME ver */
+ *pos++ = qos_info;
+ }
+
+ /* add any remaining custom (i.e. vendor specific here) IEs */
+ if (wk->ie_len && wk->ie) {
+ noffset = wk->ie_len;
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, wk->ie + offset, noffset - offset);
+ }
+
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ ieee80211_tx_skb(sdata, skb);
+}
+
+static void ieee80211_remove_auth_bss(struct ieee80211_local *local,
+ struct ieee80211_work *wk)
+{
+ struct cfg80211_bss *cbss;
+ u16 capa_val = WLAN_CAPABILITY_ESS;
+
+ if (wk->probe_auth.privacy)
+ capa_val |= WLAN_CAPABILITY_PRIVACY;
+
+ cbss = cfg80211_get_bss(local->hw.wiphy, wk->chan, wk->filter_ta,
+ wk->probe_auth.ssid, wk->probe_auth.ssid_len,
+ WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
+ capa_val);
+ if (!cbss)
+ return;
+
+ cfg80211_unlink_bss(local->hw.wiphy, cbss);
+ cfg80211_put_bss(cbss);
+}
+
+static enum work_action __must_check
+ieee80211_direct_probe(struct ieee80211_work *wk)
+{
+ struct ieee80211_sub_if_data *sdata = wk->sdata;
+ struct ieee80211_local *local = sdata->local;
+
+ wk->probe_auth.tries++;
+ if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
+ printk(KERN_DEBUG "%s: direct probe to %pM timed out\n",
+ sdata->name, wk->filter_ta);
+
+ /*
+ * Most likely AP is not in the range so remove the
+ * bss struct for that AP.
+ */
+ ieee80211_remove_auth_bss(local, wk);
+
+ return WORK_ACT_TIMEOUT;
+ }
+
+ printk(KERN_DEBUG "%s: direct probe to %pM (try %d)\n",
+ sdata->name, wk->filter_ta, wk->probe_auth.tries);
+
+ /*
+ * Direct probe is sent to broadcast address as some APs
+ * will not answer to direct packet in unassociated state.
+ */
+ ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
+ wk->probe_auth.ssid_len, NULL, 0);
+
+ wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
+ run_again(local, wk->timeout);
+
+ return WORK_ACT_NONE;
+}
+
+
+static enum work_action __must_check
+ieee80211_authenticate(struct ieee80211_work *wk)
+{
+ struct ieee80211_sub_if_data *sdata = wk->sdata;
+ struct ieee80211_local *local = sdata->local;
+
+ wk->probe_auth.tries++;
+ if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
+ printk(KERN_DEBUG "%s: authentication with %pM"
+ " timed out\n", sdata->name, wk->filter_ta);
+
+ /*
+ * Most likely AP is not in the range so remove the
+ * bss struct for that AP.
+ */
+ ieee80211_remove_auth_bss(local, wk);
+
+ return WORK_ACT_TIMEOUT;
+ }
+
+ printk(KERN_DEBUG "%s: authenticate with %pM (try %d)\n",
+ sdata->name, wk->filter_ta, wk->probe_auth.tries);
+
+ ieee80211_send_auth(sdata, 1, wk->probe_auth.algorithm, wk->ie,
+ wk->ie_len, wk->filter_ta, NULL, 0, 0);
+ wk->probe_auth.transaction = 2;
+
+ wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
+ run_again(local, wk->timeout);
+
+ return WORK_ACT_NONE;
+}
+
+static enum work_action __must_check
+ieee80211_associate(struct ieee80211_work *wk)
+{
+ struct ieee80211_sub_if_data *sdata = wk->sdata;
+ struct ieee80211_local *local = sdata->local;
+
+ wk->assoc.tries++;
+ if (wk->assoc.tries > IEEE80211_ASSOC_MAX_TRIES) {
+ printk(KERN_DEBUG "%s: association with %pM"
+ " timed out\n",
+ sdata->name, wk->filter_ta);
+
+ /*
+ * Most likely AP is not in the range so remove the
+ * bss struct for that AP.
+ */
+ if (wk->assoc.bss)
+ cfg80211_unlink_bss(local->hw.wiphy, wk->assoc.bss);
+
+ return WORK_ACT_TIMEOUT;
+ }
+
+ printk(KERN_DEBUG "%s: associate with %pM (try %d)\n",
+ sdata->name, wk->filter_ta, wk->assoc.tries);
+ ieee80211_send_assoc(sdata, wk);
+
+ wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
+ run_again(local, wk->timeout);
+
+ return WORK_ACT_NONE;
+}
+
+static enum work_action __must_check
+ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
+{
+ /*
+ * First time we run, do nothing -- the generic code will
+ * have switched to the right channel etc.
+ */
+ if (!wk->started) {
+ wk->timeout = jiffies + msecs_to_jiffies(wk->remain.duration);
+
+ cfg80211_ready_on_channel(wk->sdata->dev, (unsigned long) wk,
+ wk->chan, wk->chan_type,
+ wk->remain.duration, GFP_KERNEL);
+
+ return WORK_ACT_NONE;
+ }
+
+ return WORK_ACT_TIMEOUT;
+}
+
+static void ieee80211_auth_challenge(struct ieee80211_work *wk,
+ struct ieee80211_mgmt *mgmt,
+ size_t len)
+{
+ struct ieee80211_sub_if_data *sdata = wk->sdata;
+ u8 *pos;
+ struct ieee802_11_elems elems;
+
+ pos = mgmt->u.auth.variable;
+ ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
+ if (!elems.challenge)
+ return;
+ ieee80211_send_auth(sdata, 3, wk->probe_auth.algorithm,
+ elems.challenge - 2, elems.challenge_len + 2,
+ wk->filter_ta, wk->probe_auth.key,
+ wk->probe_auth.key_len, wk->probe_auth.key_idx);
+ wk->probe_auth.transaction = 4;
+}
+
+static enum work_action __must_check
+ieee80211_rx_mgmt_auth(struct ieee80211_work *wk,
+ struct ieee80211_mgmt *mgmt, size_t len)
+{
+ u16 auth_alg, auth_transaction, status_code;
+
+ if (wk->type != IEEE80211_WORK_AUTH)
+ return WORK_ACT_NONE;
+
+ if (len < 24 + 6)
+ return WORK_ACT_NONE;
+
+ auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
+ auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
+ status_code = le16_to_cpu(mgmt->u.auth.status_code);
+
+ if (auth_alg != wk->probe_auth.algorithm ||
+ auth_transaction != wk->probe_auth.transaction)
+ return WORK_ACT_NONE;
+
+ if (status_code != WLAN_STATUS_SUCCESS) {
+ printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
+ wk->sdata->name, mgmt->sa, status_code);
+ return WORK_ACT_DONE;
+ }
+
+ switch (wk->probe_auth.algorithm) {
+ case WLAN_AUTH_OPEN:
+ case WLAN_AUTH_LEAP:
+ case WLAN_AUTH_FT:
+ break;
+ case WLAN_AUTH_SHARED_KEY:
+ if (wk->probe_auth.transaction != 4) {
+ ieee80211_auth_challenge(wk, mgmt, len);
+ /* need another frame */
+ return WORK_ACT_NONE;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ return WORK_ACT_NONE;
+ }
+
+ printk(KERN_DEBUG "%s: authenticated\n", wk->sdata->name);
+ return WORK_ACT_DONE;
+}
+
+static enum work_action __must_check
+ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ bool reassoc)
+{
+ struct ieee80211_sub_if_data *sdata = wk->sdata;
+ struct ieee80211_local *local = sdata->local;
+ u16 capab_info, status_code, aid;
+ struct ieee802_11_elems elems;
+ u8 *pos;
+
+ /*
+ * AssocResp and ReassocResp have identical structure, so process both
+ * of them in this function.
+ */
+
+ if (len < 24 + 6)
+ return WORK_ACT_NONE;
+
+ capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
+ status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
+ aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
+
+ printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
+ "status=%d aid=%d)\n",
+ sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
+ capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
+
+ pos = mgmt->u.assoc_resp.variable;
+ ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
+
+ if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
+ elems.timeout_int && elems.timeout_int_len == 5 &&
+ elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
+ u32 tu, ms;
+ tu = get_unaligned_le32(elems.timeout_int + 1);
+ ms = tu * 1024 / 1000;
+ printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
+ "comeback duration %u TU (%u ms)\n",
+ sdata->name, mgmt->sa, tu, ms);
+ wk->timeout = jiffies + msecs_to_jiffies(ms);
+ if (ms > IEEE80211_ASSOC_TIMEOUT)
+ run_again(local, wk->timeout);
+ return WORK_ACT_NONE;
+ }
+
+ if (status_code != WLAN_STATUS_SUCCESS)
+ printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
+ sdata->name, mgmt->sa, status_code);
+ else
+ printk(KERN_DEBUG "%s: associated\n", sdata->name);
+
+ return WORK_ACT_DONE;
+}
+
+static enum work_action __must_check
+ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_sub_if_data *sdata = wk->sdata;
+ struct ieee80211_local *local = sdata->local;
+ size_t baselen;
+
+ ASSERT_WORK_MTX(local);
+
+ baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
+ if (baselen > len)
+ return WORK_ACT_NONE;
+
+ printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
+ return WORK_ACT_DONE;
+}
+
+static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *rx_status;
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_work *wk;
+ enum work_action rma = WORK_ACT_NONE;
+ u16 fc;
+
+ rx_status = (struct ieee80211_rx_status *) skb->cb;
+ mgmt = (struct ieee80211_mgmt *) skb->data;
+ fc = le16_to_cpu(mgmt->frame_control);
+
+ mutex_lock(&local->work_mtx);
+
+ list_for_each_entry(wk, &local->work_list, list) {
+ const u8 *bssid = NULL;
+
+ switch (wk->type) {
+ case IEEE80211_WORK_DIRECT_PROBE:
+ case IEEE80211_WORK_AUTH:
+ case IEEE80211_WORK_ASSOC:
+ bssid = wk->filter_ta;
+ break;
+ default:
+ continue;
+ }
+
+ /*
+ * Before queuing, we already verified mgmt->sa,
+ * so this is needed just for matching.
+ */
+ if (compare_ether_addr(bssid, mgmt->bssid))
+ continue;
+
+ switch (fc & IEEE80211_FCTL_STYPE) {
+ case IEEE80211_STYPE_PROBE_RESP:
+ rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len,
+ rx_status);
+ break;
+ case IEEE80211_STYPE_AUTH:
+ rma = ieee80211_rx_mgmt_auth(wk, mgmt, skb->len);
+ break;
+ case IEEE80211_STYPE_ASSOC_RESP:
+ rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
+ skb->len, false);
+ break;
+ case IEEE80211_STYPE_REASSOC_RESP:
+ rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
+ skb->len, true);
+ break;
+ default:
+ WARN_ON(1);
+ }
+ /*
+ * We've processed this frame for that work, so it can't
+ * belong to another work struct.
+ * NB: this is also required for correctness for 'rma'!
+ */
+ break;
+ }
+
+ switch (rma) {
+ case WORK_ACT_NONE:
+ break;
+ case WORK_ACT_DONE:
+ list_del_rcu(&wk->list);
+ break;
+ default:
+ WARN(1, "unexpected: %d", rma);
+ }
+
+ mutex_unlock(&local->work_mtx);
+
+ if (rma != WORK_ACT_DONE)
+ goto out;
+
+ switch (wk->done(wk, skb)) {
+ case WORK_DONE_DESTROY:
+ free_work(wk);
+ break;
+ case WORK_DONE_REQUEUE:
+ synchronize_rcu();
+ wk->started = false; /* restart */
+ mutex_lock(&local->work_mtx);
+ list_add_tail(&wk->list, &local->work_list);
+ mutex_unlock(&local->work_mtx);
+ }
+
+ out:
+ kfree_skb(skb);
+}
+
+static void ieee80211_work_timer(unsigned long data)
+{
+ struct ieee80211_local *local = (void *) data;
+
+ if (local->quiescing)
+ return;
+
+ ieee80211_queue_work(&local->hw, &local->work_work);
+}
+
+static void ieee80211_work_work(struct work_struct *work)
+{
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, work_work);
+ struct sk_buff *skb;
+ struct ieee80211_work *wk, *tmp;
+ LIST_HEAD(free_work);
+ enum work_action rma;
+ bool remain_off_channel = false;
+
+ if (local->scanning)
+ return;
+
+ /*
+ * ieee80211_queue_work() should have picked up most cases,
+ * here we'll pick the the rest.
+ */
+ if (WARN(local->suspended, "work scheduled while going to suspend\n"))
+ return;
+
+ /* first process frames to avoid timing out while a frame is pending */
+ while ((skb = skb_dequeue(&local->work_skb_queue)))
+ ieee80211_work_rx_queued_mgmt(local, skb);
+
+ ieee80211_recalc_idle(local);
+
+ mutex_lock(&local->work_mtx);
+
+ list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
+ bool started = wk->started;
+
+ /* mark work as started if it's on the current off-channel */
+ if (!started && local->tmp_channel &&
+ wk->chan == local->tmp_channel &&
+ wk->chan_type == local->tmp_channel_type) {
+ started = true;
+ wk->timeout = jiffies;
+ }
+
+ if (!started && !local->tmp_channel) {
+ /*
+ * TODO: could optimize this by leaving the
+ * station vifs in awake mode if they
+ * happen to be on the same channel as
+ * the requested channel
+ */
+ ieee80211_offchannel_stop_beaconing(local);
+ ieee80211_offchannel_stop_station(local);
+
+ local->tmp_channel = wk->chan;
+ local->tmp_channel_type = wk->chan_type;
+ ieee80211_hw_config(local, 0);
+ started = true;
+ wk->timeout = jiffies;
+ }
+
+ /* don't try to work with items that aren't started */
+ if (!started)
+ continue;
+
+ if (time_is_after_jiffies(wk->timeout)) {
+ /*
+ * This work item isn't supposed to be worked on
+ * right now, but take care to adjust the timer
+ * properly.
+ */
+ run_again(local, wk->timeout);
+ continue;
+ }
+
+ switch (wk->type) {
+ default:
+ WARN_ON(1);
+ /* nothing */
+ rma = WORK_ACT_NONE;
+ break;
+ case IEEE80211_WORK_ABORT:
+ rma = WORK_ACT_TIMEOUT;
+ break;
+ case IEEE80211_WORK_DIRECT_PROBE:
+ rma = ieee80211_direct_probe(wk);
+ break;
+ case IEEE80211_WORK_AUTH:
+ rma = ieee80211_authenticate(wk);
+ break;
+ case IEEE80211_WORK_ASSOC:
+ rma = ieee80211_associate(wk);
+ break;
+ case IEEE80211_WORK_REMAIN_ON_CHANNEL:
+ rma = ieee80211_remain_on_channel_timeout(wk);
+ break;
+ }
+
+ wk->started = started;
+
+ switch (rma) {
+ case WORK_ACT_NONE:
+ /* might have changed the timeout */
+ run_again(local, wk->timeout);
+ break;
+ case WORK_ACT_TIMEOUT:
+ list_del_rcu(&wk->list);
+ synchronize_rcu();
+ list_add(&wk->list, &free_work);
+ break;
+ default:
+ WARN(1, "unexpected: %d", rma);
+ }
+ }
+
+ list_for_each_entry(wk, &local->work_list, list) {
+ if (!wk->started)
+ continue;
+ if (wk->chan != local->tmp_channel)
+ continue;
+ if (wk->chan_type != local->tmp_channel_type)
+ continue;
+ remain_off_channel = true;
+ }
+
+ if (!remain_off_channel && local->tmp_channel) {
+ local->tmp_channel = NULL;
+ ieee80211_hw_config(local, 0);
+ ieee80211_offchannel_return(local, true);
+ /* give connection some time to breathe */
+ run_again(local, jiffies + HZ/2);
+ }
+
+ if (list_empty(&local->work_list) && local->scan_req)
+ ieee80211_queue_delayed_work(&local->hw,
+ &local->scan_work,
+ round_jiffies_relative(0));
+
+ mutex_unlock(&local->work_mtx);
+
+ ieee80211_recalc_idle(local);
+
+ list_for_each_entry_safe(wk, tmp, &free_work, list) {
+ wk->done(wk, NULL);
+ list_del(&wk->list);
+ kfree(wk);
+ }
+}
+
+void ieee80211_add_work(struct ieee80211_work *wk)
+{
+ struct ieee80211_local *local;
+
+ if (WARN_ON(!wk->chan))
+ return;
+
+ if (WARN_ON(!wk->sdata))
+ return;
+
+ if (WARN_ON(!wk->done))
+ return;
+
+ if (WARN_ON(!ieee80211_sdata_running(wk->sdata)))
+ return;
+
+ wk->started = false;
+
+ local = wk->sdata->local;
+ mutex_lock(&local->work_mtx);
+ list_add_tail(&wk->list, &local->work_list);
+ mutex_unlock(&local->work_mtx);
+
+ ieee80211_queue_work(&local->hw, &local->work_work);
+}
+
+void ieee80211_work_init(struct ieee80211_local *local)
+{
+ mutex_init(&local->work_mtx);
+ INIT_LIST_HEAD(&local->work_list);
+ setup_timer(&local->work_timer, ieee80211_work_timer,
+ (unsigned long)local);
+ INIT_WORK(&local->work_work, ieee80211_work_work);
+ skb_queue_head_init(&local->work_skb_queue);
+}
+
+void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_work *wk;
+
+ mutex_lock(&local->work_mtx);
+ list_for_each_entry(wk, &local->work_list, list) {
+ if (wk->sdata != sdata)
+ continue;
+ wk->type = IEEE80211_WORK_ABORT;
+ wk->started = true;
+ wk->timeout = jiffies;
+ }
+ mutex_unlock(&local->work_mtx);
+
+ /* run cleanups etc. */
+ ieee80211_work_work(&local->work_work);
+
+ mutex_lock(&local->work_mtx);
+ list_for_each_entry(wk, &local->work_list, list) {
+ if (wk->sdata != sdata)
+ continue;
+ WARN_ON(1);
+ break;
+ }
+ mutex_unlock(&local->work_mtx);
+}
+
+ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_work *wk;
+ u16 fc;
+
+ if (skb->len < 24)
+ return RX_DROP_MONITOR;
+
+ mgmt = (struct ieee80211_mgmt *) skb->data;
+ fc = le16_to_cpu(mgmt->frame_control);
+
+ list_for_each_entry_rcu(wk, &local->work_list, list) {
+ if (sdata != wk->sdata)
+ continue;
+ if (compare_ether_addr(wk->filter_ta, mgmt->sa))
+ continue;
+ if (compare_ether_addr(wk->filter_ta, mgmt->bssid))
+ continue;
+
+ switch (fc & IEEE80211_FCTL_STYPE) {
+ case IEEE80211_STYPE_AUTH:
+ case IEEE80211_STYPE_PROBE_RESP:
+ case IEEE80211_STYPE_ASSOC_RESP:
+ case IEEE80211_STYPE_REASSOC_RESP:
+ skb_queue_tail(&local->work_skb_queue, skb);
+ ieee80211_queue_work(&local->hw, &local->work_work);
+ return RX_QUEUED;
+ }
+ }
+
+ return RX_CONTINUE;
+}
+
+static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk,
+ struct sk_buff *skb)
+{
+ /*
+ * We are done serving the remain-on-channel command.
+ */
+ cfg80211_remain_on_channel_expired(wk->sdata->dev, (unsigned long) wk,
+ wk->chan, wk->chan_type,
+ GFP_KERNEL);
+
+ return WORK_DONE_DESTROY;
+}
+
+int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, u64 *cookie)
+{
+ struct ieee80211_work *wk;
+
+ wk = kzalloc(sizeof(*wk), GFP_KERNEL);
+ if (!wk)
+ return -ENOMEM;
+
+ wk->type = IEEE80211_WORK_REMAIN_ON_CHANNEL;
+ wk->chan = chan;
+ wk->chan_type = channel_type;
+ wk->sdata = sdata;
+ wk->done = ieee80211_remain_done;
+
+ wk->remain.duration = duration;
+
+ *cookie = (unsigned long) wk;
+
+ ieee80211_add_work(wk);
+
+ return 0;
+}
+
+int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
+ u64 cookie)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_work *wk, *tmp;
+ bool found = false;
+
+ mutex_lock(&local->work_mtx);
+ list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
+ if ((unsigned long) wk == cookie) {
+ wk->timeout = jiffies;
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&local->work_mtx);
+
+ if (!found)
+ return -ENOENT;
+
+ ieee80211_queue_work(&local->hw, &local->work_work);
+
+ return 0;
+}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 5332014cb22..f4971cd45c6 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -31,8 +31,8 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
unsigned int hdrlen;
struct ieee80211_hdr *hdr;
struct sk_buff *skb = tx->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int authenticator;
- int wpa_test = 0;
int tail;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -47,16 +47,15 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
data = skb->data + hdrlen;
data_len = skb->len - hdrlen;
- if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
+ if (info->control.hw_key &&
!(tx->flags & IEEE80211_TX_FRAGMENTED) &&
- !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) &&
- !wpa_test) {
- /* hwaccel - with no need for preallocated room for MMIC */
+ !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
+ /* hwaccel - with no need for SW-generated MMIC */
return TX_CONTINUE;
}
tail = MICHAEL_MIC_LEN;
- if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
+ if (!info->control.hw_key)
tail += TKIP_ICV_LEN;
if (WARN_ON(skb_tailroom(skb) < tail ||
@@ -147,17 +146,16 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
int len, tail;
u8 *pos;
- if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
- !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
- /* hwaccel - with no need for preallocated room for IV/ICV */
- info->control.hw_key = &tx->key->conf;
+ if (info->control.hw_key &&
+ !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+ /* hwaccel - with no need for software-generated IV */
return 0;
}
hdrlen = ieee80211_hdrlen(hdr->frame_control);
len = skb->len - hdrlen;
- if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
+ if (info->control.hw_key)
tail = 0;
else
tail = TKIP_ICV_LEN;
@@ -175,13 +173,11 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
if (key->u.tkip.tx.iv16 == 0)
key->u.tkip.tx.iv32++;
- if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
- /* hwaccel - with preallocated room for IV */
- ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
+ pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16);
- info->control.hw_key = &tx->key->conf;
+ /* hwaccel - with software IV */
+ if (info->control.hw_key)
return 0;
- }
/* Add room for ICV */
skb_put(skb, TKIP_ICV_LEN);
@@ -363,24 +359,20 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
int hdrlen, len, tail;
u8 *pos, *pn;
int i;
- bool skip_hw;
-
- skip_hw = (tx->key->conf.flags & IEEE80211_KEY_FLAG_SW_MGMT) &&
- ieee80211_is_mgmt(hdr->frame_control);
- if ((tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) &&
- !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) &&
- !skip_hw) {
- /* hwaccel - with no need for preallocated room for CCMP
- * header or MIC fields */
- info->control.hw_key = &tx->key->conf;
+ if (info->control.hw_key &&
+ !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
+ /*
+ * hwaccel has no need for preallocated room for CCMP
+ * header or MIC fields
+ */
return 0;
}
hdrlen = ieee80211_hdrlen(hdr->frame_control);
len = skb->len - hdrlen;
- if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
+ if (info->control.hw_key)
tail = 0;
else
tail = CCMP_MIC_LEN;
@@ -405,11 +397,9 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
ccmp_pn2hdr(pos, pn, key->conf.keyidx);
- if ((key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) && !skip_hw) {
- /* hwaccel - with preallocated room for CCMP header */
- info->control.hw_key = &tx->key->conf;
+ /* hwaccel - with software CCMP header */
+ if (info->control.hw_key)
return 0;
- }
pos += CCMP_HDR_LEN;
ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0);
@@ -525,11 +515,8 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
u8 *pn, aad[20];
int i;
- if (tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
- /* hwaccel */
- info->control.hw_key = &tx->key->conf;
+ if (info->control.hw_key)
return 0;
- }
if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie)))
return TX_DROP;
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 634d14affc8..18d77b5c351 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -83,6 +83,19 @@ config NF_CONNTRACK_SECMARK
If unsure, say 'N'.
+config NF_CONNTRACK_ZONES
+ bool 'Connection tracking zones'
+ depends on NETFILTER_ADVANCED
+ depends on NETFILTER_XT_TARGET_CT
+ help
+ This option enables support for connection tracking zones.
+ Normally, each connection needs to have a unique system wide
+ identity. Connection tracking zones allow to have multiple
+ connections using the same identity, as long as they are
+ contained in different zones.
+
+ If unsure, say `N'.
+
config NF_CONNTRACK_EVENTS
bool "Connection tracking events"
depends on NETFILTER_ADVANCED
@@ -341,6 +354,18 @@ config NETFILTER_XT_TARGET_CONNSECMARK
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_TARGET_CT
+ tristate '"CT" target support'
+ depends on NF_CONNTRACK
+ depends on IP_NF_RAW || IP6_NF_RAW
+ depends on NETFILTER_ADVANCED
+ help
+ This options adds a `CT' target, which allows to specify initial
+ connection tracking parameters like events to be delivered and
+ the helper to be used.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_TARGET_DSCP
tristate '"DSCP" and "TOS" target support'
depends on IP_NF_MANGLE || IP6_NF_MANGLE
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 49f62ee4e9f..f873644f02f 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -44,6 +44,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index f2d76238b9b..712ccad1334 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -68,6 +68,10 @@ config IP_VS_TAB_BITS
each hash entry uses 8 bytes, so you can estimate how much memory is
needed for your box.
+ You can overwrite this number setting conn_tab_bits module parameter
+ or by appending ip_vs.conn_tab_bits=? to the kernel command line
+ if IP VS was compiled built-in.
+
comment "IPVS transport protocol load balancing support"
config IP_VS_PROTO_TCP
@@ -100,6 +104,13 @@ config IP_VS_PROTO_AH
This option enables support for load balancing AH (Authentication
Header) transport protocol. Say Y if unsure.
+config IP_VS_PROTO_SCTP
+ bool "SCTP load balancing support"
+ select LIBCRC32C
+ ---help---
+ This option enables support for load balancing SCTP transport
+ protocol. Say Y if unsure.
+
comment "IPVS scheduler"
config IP_VS_RR
diff --git a/net/netfilter/ipvs/Makefile b/net/netfilter/ipvs/Makefile
index 73a46fe1fe4..e3baefd7066 100644
--- a/net/netfilter/ipvs/Makefile
+++ b/net/netfilter/ipvs/Makefile
@@ -7,6 +7,7 @@ ip_vs_proto-objs-y :=
ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o
ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o
ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH_ESP) += ip_vs_proto_ah_esp.o
+ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_SCTP) += ip_vs_proto_sctp.o
ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \
ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 27c30cf933d..60bb41a8d8d 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -40,6 +40,21 @@
#include <net/ip_vs.h>
+#ifndef CONFIG_IP_VS_TAB_BITS
+#define CONFIG_IP_VS_TAB_BITS 12
+#endif
+
+/*
+ * Connection hash size. Default is what was selected at compile time.
+*/
+int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
+module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
+MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
+
+/* size and mask values */
+int ip_vs_conn_tab_size;
+int ip_vs_conn_tab_mask;
+
/*
* Connection hash table: for input and output packets lookups of IPVS
*/
@@ -125,11 +140,11 @@ static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
if (af == AF_INET6)
return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
(__force u32)port, proto, ip_vs_conn_rnd)
- & IP_VS_CONN_TAB_MASK;
+ & ip_vs_conn_tab_mask;
#endif
return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
ip_vs_conn_rnd)
- & IP_VS_CONN_TAB_MASK;
+ & ip_vs_conn_tab_mask;
}
@@ -760,7 +775,7 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
int idx;
struct ip_vs_conn *cp;
- for(idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) {
+ for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
ct_read_lock_bh(idx);
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
if (pos-- == 0) {
@@ -797,7 +812,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
idx = l - ip_vs_conn_tab;
ct_read_unlock_bh(idx);
- while (++idx < IP_VS_CONN_TAB_SIZE) {
+ while (++idx < ip_vs_conn_tab_size) {
ct_read_lock_bh(idx);
list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
seq->private = &ip_vs_conn_tab[idx];
@@ -976,8 +991,8 @@ void ip_vs_random_dropentry(void)
/*
* Randomly scan 1/32 of the whole table every second
*/
- for (idx = 0; idx < (IP_VS_CONN_TAB_SIZE>>5); idx++) {
- unsigned hash = net_random() & IP_VS_CONN_TAB_MASK;
+ for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
+ unsigned hash = net_random() & ip_vs_conn_tab_mask;
/*
* Lock is actually needed in this loop.
@@ -1029,7 +1044,7 @@ static void ip_vs_conn_flush(void)
struct ip_vs_conn *cp;
flush_again:
- for (idx=0; idx<IP_VS_CONN_TAB_SIZE; idx++) {
+ for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
/*
* Lock is actually needed in this loop.
*/
@@ -1060,10 +1075,15 @@ int __init ip_vs_conn_init(void)
{
int idx;
+ /* Compute size and mask */
+ ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
+ ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
+
/*
* Allocate the connection hash table and initialize its list heads
*/
- ip_vs_conn_tab = vmalloc(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head));
+ ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size *
+ sizeof(struct list_head));
if (!ip_vs_conn_tab)
return -ENOMEM;
@@ -1078,12 +1098,12 @@ int __init ip_vs_conn_init(void)
pr_info("Connection hash table configured "
"(size=%d, memory=%ldKbytes)\n",
- IP_VS_CONN_TAB_SIZE,
- (long)(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head))/1024);
+ ip_vs_conn_tab_size,
+ (long)(ip_vs_conn_tab_size*sizeof(struct list_head))/1024);
IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
sizeof(struct ip_vs_conn));
- for (idx = 0; idx < IP_VS_CONN_TAB_SIZE; idx++) {
+ for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
INIT_LIST_HEAD(&ip_vs_conn_tab[idx]);
}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 847ffca4018..44590887a92 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <linux/sctp.h>
#include <linux/icmp.h>
#include <net/ip.h>
@@ -81,6 +82,8 @@ const char *ip_vs_proto_name(unsigned proto)
return "UDP";
case IPPROTO_TCP:
return "TCP";
+ case IPPROTO_SCTP:
+ return "SCTP";
case IPPROTO_ICMP:
return "ICMP";
#ifdef CONFIG_IP_VS_IPV6
@@ -512,8 +515,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
*/
#ifdef CONFIG_IP_VS_IPV6
if (svc->af == AF_INET6)
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0,
- skb->dev);
+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
else
#endif
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
@@ -589,8 +591,9 @@ void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
ip_send_check(ciph);
}
- /* the TCP/UDP port */
- if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol) {
+ /* the TCP/UDP/SCTP port */
+ if (IPPROTO_TCP == ciph->protocol || IPPROTO_UDP == ciph->protocol ||
+ IPPROTO_SCTP == ciph->protocol) {
__be16 *ports = (void *)ciph + ciph->ihl*4;
if (inout)
@@ -630,8 +633,9 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
ciph->saddr = cp->daddr.in6;
}
- /* the TCP/UDP port */
- if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr) {
+ /* the TCP/UDP/SCTP port */
+ if (IPPROTO_TCP == ciph->nexthdr || IPPROTO_UDP == ciph->nexthdr ||
+ IPPROTO_SCTP == ciph->nexthdr) {
__be16 *ports = (void *)ciph + sizeof(struct ipv6hdr);
if (inout)
@@ -679,7 +683,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
goto out;
}
- if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol)
+ if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
+ IPPROTO_SCTP == protocol)
offset += 2 * sizeof(__u16);
if (!skb_make_writable(skb, offset))
goto out;
@@ -857,6 +862,21 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related)
}
#endif
+/*
+ * Check if sctp chunc is ABORT chunk
+ */
+static inline int is_sctp_abort(const struct sk_buff *skb, int nh_len)
+{
+ sctp_chunkhdr_t *sch, schunk;
+ sch = skb_header_pointer(skb, nh_len + sizeof(sctp_sctphdr_t),
+ sizeof(schunk), &schunk);
+ if (sch == NULL)
+ return 0;
+ if (sch->type == SCTP_CID_ABORT)
+ return 1;
+ return 0;
+}
+
static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
{
struct tcphdr _tcph, *th;
@@ -999,7 +1019,8 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
if (unlikely(!cp)) {
if (sysctl_ip_vs_nat_icmp_send &&
(pp->protocol == IPPROTO_TCP ||
- pp->protocol == IPPROTO_UDP)) {
+ pp->protocol == IPPROTO_UDP ||
+ pp->protocol == IPPROTO_SCTP)) {
__be16 _ports[2], *pptr;
pptr = skb_header_pointer(skb, iph.len,
@@ -1014,14 +1035,19 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb,
* existing entry if it is not RST
* packet or not TCP packet.
*/
- if (iph.protocol != IPPROTO_TCP
- || !is_tcp_reset(skb, iph.len)) {
+ if ((iph.protocol != IPPROTO_TCP &&
+ iph.protocol != IPPROTO_SCTP)
+ || ((iph.protocol == IPPROTO_TCP
+ && !is_tcp_reset(skb, iph.len))
+ || (iph.protocol == IPPROTO_SCTP
+ && !is_sctp_abort(skb,
+ iph.len)))) {
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
icmpv6_send(skb,
ICMPV6_DEST_UNREACH,
ICMPV6_PORT_UNREACH,
- 0, skb->dev);
+ 0);
else
#endif
icmp_send(skb,
@@ -1235,7 +1261,8 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
/* do the statistics and put it back */
ip_vs_in_stats(cp, skb);
- if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr)
+ if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr ||
+ IPPROTO_SCTP == cih->nexthdr)
offset += 2 * sizeof(__u16);
verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset);
/* do not touch skb anymore */
@@ -1358,6 +1385,21 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
* encorage the standby servers to update the connections timeout
*/
pkts = atomic_add_return(1, &cp->in_pkts);
+ if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
+ cp->protocol == IPPROTO_SCTP) {
+ if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
+ (atomic_read(&cp->in_pkts) %
+ sysctl_ip_vs_sync_threshold[1]
+ == sysctl_ip_vs_sync_threshold[0])) ||
+ (cp->old_state != cp->state &&
+ ((cp->state == IP_VS_SCTP_S_CLOSED) ||
+ (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
+ (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
+ ip_vs_sync_conn(cp);
+ goto out;
+ }
+ }
+
if (af == AF_INET &&
(ip_vs_sync_state & IP_VS_STATE_MASTER) &&
(((cp->protocol != IPPROTO_TCP ||
@@ -1370,6 +1412,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
(cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
(cp->state == IP_VS_TCP_S_TIME_WAIT)))))
ip_vs_sync_conn(cp);
+out:
cp->old_state = cp->state;
ip_vs_conn_put(cp);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c37ac2d7bec..7ee9c3426f4 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1843,7 +1843,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
if (v == SEQ_START_TOKEN) {
seq_printf(seq,
"IP Virtual Server version %d.%d.%d (size=%d)\n",
- NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE);
+ NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
seq_puts(seq,
"Prot LocalAddress:Port Scheduler Flags\n");
seq_puts(seq,
@@ -2132,8 +2132,9 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
}
}
- /* Check for valid protocol: TCP or UDP, even for fwmark!=0 */
- if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP) {
+ /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
+ if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
+ usvc.protocol != IPPROTO_SCTP) {
pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
usvc.protocol, &usvc.addr.ip,
ntohs(usvc.port), usvc.sched_name);
@@ -2386,7 +2387,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
char buf[64];
sprintf(buf, "IP Virtual Server version %d.%d.%d (size=%d)",
- NVERSION(IP_VS_VERSION_CODE), IP_VS_CONN_TAB_SIZE);
+ NVERSION(IP_VS_VERSION_CODE), ip_vs_conn_tab_size);
if (copy_to_user(user, buf, strlen(buf)+1) != 0) {
ret = -EFAULT;
goto out;
@@ -2399,7 +2400,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
struct ip_vs_getinfo info;
info.version = IP_VS_VERSION_CODE;
- info.size = IP_VS_CONN_TAB_SIZE;
+ info.size = ip_vs_conn_tab_size;
info.num_services = ip_vs_num_services;
if (copy_to_user(user, &info, sizeof(info)) != 0)
ret = -EFAULT;
@@ -3243,7 +3244,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
case IPVS_CMD_GET_INFO:
NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE);
NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
- IP_VS_CONN_TAB_SIZE);
+ ip_vs_conn_tab_size);
break;
}
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 33e2c799cba..73f38ea98f2 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -208,7 +208,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
*/
from.ip = n_cp->vaddr.ip;
port = n_cp->vport;
- sprintf(buf, "%d,%d,%d,%d,%d,%d", NIPQUAD(from.ip),
+ sprintf(buf, "%u,%u,%u,%u,%u,%u", NIPQUAD(from.ip),
(ntohs(port)>>8)&255, ntohs(port)&255);
buf_len = strlen(buf);
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index f7476b95ab4..caa58fa1438 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -45,6 +45,7 @@
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/jiffies.h>
+#include <linux/list.h>
/* for sysctl */
#include <linux/fs.h>
@@ -85,25 +86,25 @@ static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
/*
* IPVS destination set structure and operations
*/
-struct ip_vs_dest_list {
- struct ip_vs_dest_list *next; /* list link */
+struct ip_vs_dest_set_elem {
+ struct list_head list; /* list link */
struct ip_vs_dest *dest; /* destination server */
};
struct ip_vs_dest_set {
atomic_t size; /* set size */
unsigned long lastmod; /* last modified time */
- struct ip_vs_dest_list *list; /* destination list */
+ struct list_head list; /* destination list */
rwlock_t lock; /* lock for this list */
};
-static struct ip_vs_dest_list *
+static struct ip_vs_dest_set_elem *
ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
{
- struct ip_vs_dest_list *e;
+ struct ip_vs_dest_set_elem *e;
- for (e=set->list; e!=NULL; e=e->next) {
+ list_for_each_entry(e, &set->list, list) {
if (e->dest == dest)
/* already existed */
return NULL;
@@ -118,9 +119,7 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
atomic_inc(&dest->refcnt);
e->dest = dest;
- /* link it to the list */
- e->next = set->list;
- set->list = e;
+ list_add(&e->list, &set->list);
atomic_inc(&set->size);
set->lastmod = jiffies;
@@ -130,34 +129,33 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
static void
ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
{
- struct ip_vs_dest_list *e, **ep;
+ struct ip_vs_dest_set_elem *e;
- for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
+ list_for_each_entry(e, &set->list, list) {
if (e->dest == dest) {
/* HIT */
- *ep = e->next;
atomic_dec(&set->size);
set->lastmod = jiffies;
atomic_dec(&e->dest->refcnt);
+ list_del(&e->list);
kfree(e);
break;
}
- ep = &e->next;
}
}
static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
{
- struct ip_vs_dest_list *e, **ep;
+ struct ip_vs_dest_set_elem *e, *ep;
write_lock(&set->lock);
- for (ep=&set->list, e=*ep; e!=NULL; e=*ep) {
- *ep = e->next;
+ list_for_each_entry_safe(e, ep, &set->list, list) {
/*
* We don't kfree dest because it is refered either
* by its service or by the trash dest list.
*/
atomic_dec(&e->dest->refcnt);
+ list_del(&e->list);
kfree(e);
}
write_unlock(&set->lock);
@@ -166,7 +164,7 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
/* get weighted least-connection node in the destination set */
static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
{
- register struct ip_vs_dest_list *e;
+ register struct ip_vs_dest_set_elem *e;
struct ip_vs_dest *dest, *least;
int loh, doh;
@@ -174,7 +172,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
return NULL;
/* select the first destination server, whose weight > 0 */
- for (e=set->list; e!=NULL; e=e->next) {
+ list_for_each_entry(e, &set->list, list) {
least = e->dest;
if (least->flags & IP_VS_DEST_F_OVERLOAD)
continue;
@@ -190,7 +188,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
/* find the destination with the weighted least load */
nextstage:
- for (e=e->next; e!=NULL; e=e->next) {
+ list_for_each_entry(e, &set->list, list) {
dest = e->dest;
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
@@ -220,7 +218,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
/* get weighted most-connection node in the destination set */
static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
{
- register struct ip_vs_dest_list *e;
+ register struct ip_vs_dest_set_elem *e;
struct ip_vs_dest *dest, *most;
int moh, doh;
@@ -228,7 +226,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
return NULL;
/* select the first destination server, whose weight > 0 */
- for (e=set->list; e!=NULL; e=e->next) {
+ list_for_each_entry(e, &set->list, list) {
most = e->dest;
if (atomic_read(&most->weight) > 0) {
moh = atomic_read(&most->activeconns) * 50
@@ -240,7 +238,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
/* find the destination with the weighted most load */
nextstage:
- for (e=e->next; e!=NULL; e=e->next) {
+ list_for_each_entry(e, &set->list, list) {
dest = e->dest;
doh = atomic_read(&dest->activeconns) * 50
+ atomic_read(&dest->inactconns);
@@ -389,7 +387,7 @@ ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
/* initilize its dest set */
atomic_set(&(en->set.size), 0);
- en->set.list = NULL;
+ INIT_LIST_HEAD(&en->set.list);
rwlock_init(&en->set.lock);
ip_vs_lblcr_hash(tbl, en);
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 3e767167454..0e584553819 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -257,6 +257,9 @@ int __init ip_vs_protocol_init(void)
#ifdef CONFIG_IP_VS_PROTO_UDP
REGISTER_PROTOCOL(&ip_vs_protocol_udp);
#endif
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+ REGISTER_PROTOCOL(&ip_vs_protocol_sctp);
+#endif
#ifdef CONFIG_IP_VS_PROTO_AH
REGISTER_PROTOCOL(&ip_vs_protocol_ah);
#endif
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
new file mode 100644
index 00000000000..c9a3f7a21d5
--- /dev/null
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -0,0 +1,1183 @@
+#include <linux/kernel.h>
+#include <linux/ip.h>
+#include <linux/sctp.h>
+#include <net/ip.h>
+#include <net/ip6_checksum.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <net/sctp/checksum.h>
+#include <net/ip_vs.h>
+
+
+static struct ip_vs_conn *
+sctp_conn_in_get(int af,
+ const struct sk_buff *skb,
+ struct ip_vs_protocol *pp,
+ const struct ip_vs_iphdr *iph,
+ unsigned int proto_off,
+ int inverse)
+{
+ __be16 _ports[2], *pptr;
+
+ pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
+ if (pptr == NULL)
+ return NULL;
+
+ if (likely(!inverse))
+ return ip_vs_conn_in_get(af, iph->protocol,
+ &iph->saddr, pptr[0],
+ &iph->daddr, pptr[1]);
+ else
+ return ip_vs_conn_in_get(af, iph->protocol,
+ &iph->daddr, pptr[1],
+ &iph->saddr, pptr[0]);
+}
+
+static struct ip_vs_conn *
+sctp_conn_out_get(int af,
+ const struct sk_buff *skb,
+ struct ip_vs_protocol *pp,
+ const struct ip_vs_iphdr *iph,
+ unsigned int proto_off,
+ int inverse)
+{
+ __be16 _ports[2], *pptr;
+
+ pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
+ if (pptr == NULL)
+ return NULL;
+
+ if (likely(!inverse))
+ return ip_vs_conn_out_get(af, iph->protocol,
+ &iph->saddr, pptr[0],
+ &iph->daddr, pptr[1]);
+ else
+ return ip_vs_conn_out_get(af, iph->protocol,
+ &iph->daddr, pptr[1],
+ &iph->saddr, pptr[0]);
+}
+
+static int
+sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+ int *verdict, struct ip_vs_conn **cpp)
+{
+ struct ip_vs_service *svc;
+ sctp_chunkhdr_t _schunkh, *sch;
+ sctp_sctphdr_t *sh, _sctph;
+ struct ip_vs_iphdr iph;
+
+ ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
+
+ sh = skb_header_pointer(skb, iph.len, sizeof(_sctph), &_sctph);
+ if (sh == NULL)
+ return 0;
+
+ sch = skb_header_pointer(skb, iph.len + sizeof(sctp_sctphdr_t),
+ sizeof(_schunkh), &_schunkh);
+ if (sch == NULL)
+ return 0;
+
+ if ((sch->type == SCTP_CID_INIT) &&
+ (svc = ip_vs_service_get(af, skb->mark, iph.protocol,
+ &iph.daddr, sh->dest))) {
+ if (ip_vs_todrop()) {
+ /*
+ * It seems that we are very loaded.
+ * We have to drop this packet :(
+ */
+ ip_vs_service_put(svc);
+ *verdict = NF_DROP;
+ return 0;
+ }
+ /*
+ * Let the virtual server select a real server for the
+ * incoming connection, and create a connection entry.
+ */
+ *cpp = ip_vs_schedule(svc, skb);
+ if (!*cpp) {
+ *verdict = ip_vs_leave(svc, skb, pp);
+ return 0;
+ }
+ ip_vs_service_put(svc);
+ }
+
+ return 1;
+}
+
+static int
+sctp_snat_handler(struct sk_buff *skb,
+ struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
+{
+ sctp_sctphdr_t *sctph;
+ unsigned int sctphoff;
+ __be32 crc32;
+
+#ifdef CONFIG_IP_VS_IPV6
+ if (cp->af == AF_INET6)
+ sctphoff = sizeof(struct ipv6hdr);
+ else
+#endif
+ sctphoff = ip_hdrlen(skb);
+
+ /* csum_check requires unshared skb */
+ if (!skb_make_writable(skb, sctphoff + sizeof(*sctph)))
+ return 0;
+
+ if (unlikely(cp->app != NULL)) {
+ /* Some checks before mangling */
+ if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
+ return 0;
+
+ /* Call application helper if needed */
+ if (!ip_vs_app_pkt_out(cp, skb))
+ return 0;
+ }
+
+ sctph = (void *) skb_network_header(skb) + sctphoff;
+ sctph->source = cp->vport;
+
+ /* Calculate the checksum */
+ crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
+ for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next)
+ crc32 = sctp_update_cksum((u8 *) skb->data, skb_headlen(skb),
+ crc32);
+ crc32 = sctp_end_cksum(crc32);
+ sctph->checksum = crc32;
+
+ return 1;
+}
+
+static int
+sctp_dnat_handler(struct sk_buff *skb,
+ struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
+{
+
+ sctp_sctphdr_t *sctph;
+ unsigned int sctphoff;
+ __be32 crc32;
+
+#ifdef CONFIG_IP_VS_IPV6
+ if (cp->af == AF_INET6)
+ sctphoff = sizeof(struct ipv6hdr);
+ else
+#endif
+ sctphoff = ip_hdrlen(skb);
+
+ /* csum_check requires unshared skb */
+ if (!skb_make_writable(skb, sctphoff + sizeof(*sctph)))
+ return 0;
+
+ if (unlikely(cp->app != NULL)) {
+ /* Some checks before mangling */
+ if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
+ return 0;
+
+ /* Call application helper if needed */
+ if (!ip_vs_app_pkt_out(cp, skb))
+ return 0;
+ }
+
+ sctph = (void *) skb_network_header(skb) + sctphoff;
+ sctph->dest = cp->dport;
+
+ /* Calculate the checksum */
+ crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
+ for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next)
+ crc32 = sctp_update_cksum((u8 *) skb->data, skb_headlen(skb),
+ crc32);
+ crc32 = sctp_end_cksum(crc32);
+ sctph->checksum = crc32;
+
+ return 1;
+}
+
+static int
+sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
+{
+ struct sk_buff *list = skb_shinfo(skb)->frag_list;
+ unsigned int sctphoff;
+ struct sctphdr *sh, _sctph;
+ __le32 cmp;
+ __le32 val;
+ __u32 tmp;
+
+#ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6)
+ sctphoff = sizeof(struct ipv6hdr);
+ else
+#endif
+ sctphoff = ip_hdrlen(skb);
+
+ sh = skb_header_pointer(skb, sctphoff, sizeof(_sctph), &_sctph);
+ if (sh == NULL)
+ return 0;
+
+ cmp = sh->checksum;
+
+ tmp = sctp_start_cksum((__u8 *) sh, skb_headlen(skb));
+ for (; list; list = list->next)
+ tmp = sctp_update_cksum((__u8 *) list->data,
+ skb_headlen(list), tmp);
+
+ val = sctp_end_cksum(tmp);
+
+ if (val != cmp) {
+ /* CRC failure, dump it. */
+ IP_VS_DBG_RL_PKT(0, pp, skb, 0,
+ "Failed checksum for");
+ return 0;
+ }
+ return 1;
+}
+
+struct ipvs_sctp_nextstate {
+ int next_state;
+};
+enum ipvs_sctp_event_t {
+ IP_VS_SCTP_EVE_DATA_CLI,
+ IP_VS_SCTP_EVE_DATA_SER,
+ IP_VS_SCTP_EVE_INIT_CLI,
+ IP_VS_SCTP_EVE_INIT_SER,
+ IP_VS_SCTP_EVE_INIT_ACK_CLI,
+ IP_VS_SCTP_EVE_INIT_ACK_SER,
+ IP_VS_SCTP_EVE_COOKIE_ECHO_CLI,
+ IP_VS_SCTP_EVE_COOKIE_ECHO_SER,
+ IP_VS_SCTP_EVE_COOKIE_ACK_CLI,
+ IP_VS_SCTP_EVE_COOKIE_ACK_SER,
+ IP_VS_SCTP_EVE_ABORT_CLI,
+ IP_VS_SCTP_EVE__ABORT_SER,
+ IP_VS_SCTP_EVE_SHUT_CLI,
+ IP_VS_SCTP_EVE_SHUT_SER,
+ IP_VS_SCTP_EVE_SHUT_ACK_CLI,
+ IP_VS_SCTP_EVE_SHUT_ACK_SER,
+ IP_VS_SCTP_EVE_SHUT_COM_CLI,
+ IP_VS_SCTP_EVE_SHUT_COM_SER,
+ IP_VS_SCTP_EVE_LAST
+};
+
+static enum ipvs_sctp_event_t sctp_events[255] = {
+ IP_VS_SCTP_EVE_DATA_CLI,
+ IP_VS_SCTP_EVE_INIT_CLI,
+ IP_VS_SCTP_EVE_INIT_ACK_CLI,
+ IP_VS_SCTP_EVE_DATA_CLI,
+ IP_VS_SCTP_EVE_DATA_CLI,
+ IP_VS_SCTP_EVE_DATA_CLI,
+ IP_VS_SCTP_EVE_ABORT_CLI,
+ IP_VS_SCTP_EVE_SHUT_CLI,
+ IP_VS_SCTP_EVE_SHUT_ACK_CLI,
+ IP_VS_SCTP_EVE_DATA_CLI,
+ IP_VS_SCTP_EVE_COOKIE_ECHO_CLI,
+ IP_VS_SCTP_EVE_COOKIE_ACK_CLI,
+ IP_VS_SCTP_EVE_DATA_CLI,
+ IP_VS_SCTP_EVE_DATA_CLI,
+ IP_VS_SCTP_EVE_SHUT_COM_CLI,
+};
+
+static struct ipvs_sctp_nextstate
+ sctp_states_table[IP_VS_SCTP_S_LAST][IP_VS_SCTP_EVE_LAST] = {
+ /*
+ * STATE : IP_VS_SCTP_S_NONE
+ */
+ /*next state *//*event */
+ {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ },
+ },
+ /*
+ * STATE : IP_VS_SCTP_S_INIT_CLI
+ * Cient sent INIT and is waiting for reply from server(In ECHO_WAIT)
+ */
+ {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
+ {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ECHO_CLI */ },
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_ECHO_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
+ },
+ /*
+ * State : IP_VS_SCTP_S_INIT_SER
+ * Server sent INIT and waiting for INIT ACK from the client
+ */
+ {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
+ {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
+ },
+ /*
+ * State : IP_VS_SCTP_S_INIT_ACK_CLI
+ * Client sent INIT ACK and waiting for ECHO from the server
+ */
+ {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
+ /*
+ * We have got an INIT from client. From the spec.“Upon receipt of
+ * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
+ * an INIT ACK using the same parameters it sent in its original
+ * INIT chunk (including its Initiate Tag, unchanged”).
+ */
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
+ /*
+ * INIT_ACK has been resent by the client, let us stay is in
+ * the same state
+ */
+ {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
+ /*
+ * INIT_ACK sent by the server, close the connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
+ /*
+ * ECHO by client, it should not happen, close the connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
+ /*
+ * ECHO by server, this is what we are expecting, move to ECHO_SER
+ */
+ {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
+ /*
+ * COOKIE ACK from client, it should not happen, close the connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
+ /*
+ * Unexpected COOKIE ACK from server, staty in the same state
+ */
+ {IP_VS_SCTP_S_INIT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
+ },
+ /*
+ * State : IP_VS_SCTP_S_INIT_ACK_SER
+ * Server sent INIT ACK and waiting for ECHO from the client
+ */
+ {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
+ /*
+ * We have got an INIT from client. From the spec.“Upon receipt of
+ * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
+ * an INIT ACK using the same parameters it sent in its original
+ * INIT chunk (including its Initiate Tag, unchanged”).
+ */
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
+ /*
+ * Unexpected INIT_ACK by the client, let us close the connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
+ /*
+ * INIT_ACK resent by the server, let us move to same state
+ */
+ {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
+ /*
+ * Client send the ECHO, this is what we are expecting,
+ * move to ECHO_CLI
+ */
+ {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
+ /*
+ * ECHO received from the server, Not sure what to do,
+ * let us close it
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
+ /*
+ * COOKIE ACK from client, let us stay in the same state
+ */
+ {IP_VS_SCTP_S_INIT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
+ /*
+ * COOKIE ACK from server, hmm... this should not happen, lets close
+ * the connection.
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
+ },
+ /*
+ * State : IP_VS_SCTP_S_ECHO_CLI
+ * Cient sent ECHO and waiting COOKEI ACK from the Server
+ */
+ {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
+ /*
+ * We have got an INIT from client. From the spec.“Upon receipt of
+ * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
+ * an INIT ACK using the same parameters it sent in its original
+ * INIT chunk (including its Initiate Tag, unchanged”).
+ */
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
+ /*
+ * INIT_ACK has been by the client, let us close the connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
+ /*
+ * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
+ * “If an INIT ACK is received by an endpoint in any state other
+ * than the COOKIE-WAIT state, the endpoint should discard the
+ * INIT ACK chunk”. Stay in the same state
+ */
+ {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
+ /*
+ * Client resent the ECHO, let us stay in the same state
+ */
+ {IP_VS_SCTP_S_ECHO_CLI /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
+ /*
+ * ECHO received from the server, Not sure what to do,
+ * let us close it
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
+ /*
+ * COOKIE ACK from client, this shoud not happen, let's close the
+ * connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
+ /*
+ * COOKIE ACK from server, this is what we are awaiting,lets move to
+ * ESTABLISHED.
+ */
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
+ },
+ /*
+ * State : IP_VS_SCTP_S_ECHO_SER
+ * Server sent ECHO and waiting COOKEI ACK from the client
+ */
+ {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
+ /*
+ * We have got an INIT from client. From the spec.“Upon receipt of
+ * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
+ * an INIT ACK using the same parameters it sent in its original
+ * INIT chunk (including its Initiate Tag, unchanged”).
+ */
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
+ /*
+ * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
+ * “If an INIT ACK is received by an endpoint in any state other
+ * than the COOKIE-WAIT state, the endpoint should discard the
+ * INIT ACK chunk”. Stay in the same state
+ */
+ {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
+ /*
+ * INIT_ACK has been by the server, let us close the connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
+ /*
+ * Client sent the ECHO, not sure what to do, let's close the
+ * connection.
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
+ /*
+ * ECHO resent by the server, stay in the same state
+ */
+ {IP_VS_SCTP_S_ECHO_SER /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
+ /*
+ * COOKIE ACK from client, this is what we are expecting, let's move
+ * to ESTABLISHED.
+ */
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
+ /*
+ * COOKIE ACK from server, this should not happen, lets close the
+ * connection.
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
+ },
+ /*
+ * State : IP_VS_SCTP_S_ESTABLISHED
+ * Association established
+ */
+ {{IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_DATA_CLI */ },
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_DATA_SER */ },
+ /*
+ * We have got an INIT from client. From the spec.“Upon receipt of
+ * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
+ * an INIT ACK using the same parameters it sent in its original
+ * INIT chunk (including its Initiate Tag, unchanged”).
+ */
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
+ /*
+ * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
+ * “If an INIT ACK is received by an endpoint in any state other
+ * than the COOKIE-WAIT state, the endpoint should discard the
+ * INIT ACK chunk”. Stay in the same state
+ */
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
+ /*
+ * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
+ * peer and peer shall move to the ESTABISHED. if it doesn't handle
+ * it will send ERROR chunk. So, stay in the same state
+ */
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
+ /*
+ * COOKIE ACK from client, not sure what to do stay in the same state
+ */
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
+ /*
+ * SHUTDOWN from the client, move to SHUDDOWN_CLI
+ */
+ {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
+ /*
+ * SHUTDOWN from the server, move to SHUTDOWN_SER
+ */
+ {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
+ /*
+ * client sent SHUDTDOWN_ACK, this should not happen, let's close
+ * the connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
+ },
+ /*
+ * State : IP_VS_SCTP_S_SHUT_CLI
+ * SHUTDOWN sent from the client, waitinf for SHUT ACK from the server
+ */
+ /*
+ * We recieved the data chuck, keep the state unchanged. I assume
+ * that still data chuncks can be received by both the peers in
+ * SHUDOWN state
+ */
+
+ {{IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_DATA_CLI */ },
+ {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_DATA_SER */ },
+ /*
+ * We have got an INIT from client. From the spec.“Upon receipt of
+ * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
+ * an INIT ACK using the same parameters it sent in its original
+ * INIT chunk (including its Initiate Tag, unchanged”).
+ */
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
+ /*
+ * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
+ * “If an INIT ACK is received by an endpoint in any state other
+ * than the COOKIE-WAIT state, the endpoint should discard the
+ * INIT ACK chunk”. Stay in the same state
+ */
+ {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
+ {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
+ /*
+ * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
+ * peer and peer shall move to the ESTABISHED. if it doesn't handle
+ * it will send ERROR chunk. So, stay in the same state
+ */
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
+ /*
+ * COOKIE ACK from client, not sure what to do stay in the same state
+ */
+ {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
+ {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
+ /*
+ * SHUTDOWN resent from the client, move to SHUDDOWN_CLI
+ */
+ {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
+ /*
+ * SHUTDOWN from the server, move to SHUTDOWN_SER
+ */
+ {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
+ /*
+ * client sent SHUDTDOWN_ACK, this should not happen, let's close
+ * the connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
+ /*
+ * Server sent SHUTDOWN ACK, this is what we are expecting, let's move
+ * to SHUDOWN_ACK_SER
+ */
+ {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
+ /*
+ * SHUTDOWN COM from client, this should not happen, let's close the
+ * connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
+ },
+ /*
+ * State : IP_VS_SCTP_S_SHUT_SER
+ * SHUTDOWN sent from the server, waitinf for SHUTDOWN ACK from client
+ */
+ /*
+ * We recieved the data chuck, keep the state unchanged. I assume
+ * that still data chuncks can be received by both the peers in
+ * SHUDOWN state
+ */
+
+ {{IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_DATA_CLI */ },
+ {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_DATA_SER */ },
+ /*
+ * We have got an INIT from client. From the spec.“Upon receipt of
+ * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
+ * an INIT ACK using the same parameters it sent in its original
+ * INIT chunk (including its Initiate Tag, unchanged”).
+ */
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
+ /*
+ * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
+ * “If an INIT ACK is received by an endpoint in any state other
+ * than the COOKIE-WAIT state, the endpoint should discard the
+ * INIT ACK chunk”. Stay in the same state
+ */
+ {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
+ {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
+ /*
+ * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
+ * peer and peer shall move to the ESTABISHED. if it doesn't handle
+ * it will send ERROR chunk. So, stay in the same state
+ */
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
+ /*
+ * COOKIE ACK from client, not sure what to do stay in the same state
+ */
+ {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
+ {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
+ /*
+ * SHUTDOWN resent from the client, move to SHUDDOWN_CLI
+ */
+ {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
+ /*
+ * SHUTDOWN resent from the server, move to SHUTDOWN_SER
+ */
+ {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
+ /*
+ * client sent SHUDTDOWN_ACK, this is what we are expecting, let's
+ * move to SHUT_ACK_CLI
+ */
+ {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
+ /*
+ * Server sent SHUTDOWN ACK, this should not happen, let's close the
+ * connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
+ /*
+ * SHUTDOWN COM from client, this should not happen, let's close the
+ * connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
+ },
+
+ /*
+ * State : IP_VS_SCTP_S_SHUT_ACK_CLI
+ * SHUTDOWN ACK from the client, awaiting for SHUTDOWN COM from server
+ */
+ /*
+ * We recieved the data chuck, keep the state unchanged. I assume
+ * that still data chuncks can be received by both the peers in
+ * SHUDOWN state
+ */
+
+ {{IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_DATA_CLI */ },
+ {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_DATA_SER */ },
+ /*
+ * We have got an INIT from client. From the spec.“Upon receipt of
+ * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
+ * an INIT ACK using the same parameters it sent in its original
+ * INIT chunk (including its Initiate Tag, unchanged”).
+ */
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
+ /*
+ * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
+ * “If an INIT ACK is received by an endpoint in any state other
+ * than the COOKIE-WAIT state, the endpoint should discard the
+ * INIT ACK chunk”. Stay in the same state
+ */
+ {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
+ {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
+ /*
+ * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
+ * peer and peer shall move to the ESTABISHED. if it doesn't handle
+ * it will send ERROR chunk. So, stay in the same state
+ */
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
+ /*
+ * COOKIE ACK from client, not sure what to do stay in the same state
+ */
+ {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
+ {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
+ /*
+ * SHUTDOWN sent from the client, move to SHUDDOWN_CLI
+ */
+ {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
+ /*
+ * SHUTDOWN sent from the server, move to SHUTDOWN_SER
+ */
+ {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
+ /*
+ * client resent SHUDTDOWN_ACK, let's stay in the same state
+ */
+ {IP_VS_SCTP_S_SHUT_ACK_CLI /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
+ /*
+ * Server sent SHUTDOWN ACK, this should not happen, let's close the
+ * connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
+ /*
+ * SHUTDOWN COM from client, this should not happen, let's close the
+ * connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
+ /*
+ * SHUTDOWN COMPLETE from server this is what we are expecting.
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
+ },
+
+ /*
+ * State : IP_VS_SCTP_S_SHUT_ACK_SER
+ * SHUTDOWN ACK from the server, awaiting for SHUTDOWN COM from client
+ */
+ /*
+ * We recieved the data chuck, keep the state unchanged. I assume
+ * that still data chuncks can be received by both the peers in
+ * SHUDOWN state
+ */
+
+ {{IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_DATA_CLI */ },
+ {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_DATA_SER */ },
+ /*
+ * We have got an INIT from client. From the spec.“Upon receipt of
+ * an INIT in the COOKIE-WAIT state, an endpoint MUST respond with
+ * an INIT ACK using the same parameters it sent in its original
+ * INIT chunk (including its Initiate Tag, unchanged”).
+ */
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
+ /*
+ * INIT_ACK sent by the server, Unexpected INIT ACK, spec says,
+ * “If an INIT ACK is received by an endpoint in any state other
+ * than the COOKIE-WAIT state, the endpoint should discard the
+ * INIT ACK chunk”. Stay in the same state
+ */
+ {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
+ {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
+ /*
+ * Client sent ECHO, Spec(sec 5.2.4) says it may be handled by the
+ * peer and peer shall move to the ESTABISHED. if it doesn't handle
+ * it will send ERROR chunk. So, stay in the same state
+ */
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
+ {IP_VS_SCTP_S_ESTABLISHED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
+ /*
+ * COOKIE ACK from client, not sure what to do stay in the same state
+ */
+ {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
+ {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
+ /*
+ * SHUTDOWN sent from the client, move to SHUDDOWN_CLI
+ */
+ {IP_VS_SCTP_S_SHUT_CLI /* IP_VS_SCTP_EVE_SHUT_CLI */ },
+ /*
+ * SHUTDOWN sent from the server, move to SHUTDOWN_SER
+ */
+ {IP_VS_SCTP_S_SHUT_SER /* IP_VS_SCTP_EVE_SHUT_SER */ },
+ /*
+ * client sent SHUDTDOWN_ACK, this should not happen let's close
+ * the connection.
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
+ /*
+ * Server resent SHUTDOWN ACK, stay in the same state
+ */
+ {IP_VS_SCTP_S_SHUT_ACK_SER /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
+ /*
+ * SHUTDOWN COM from client, this what we are expecting, let's close
+ * the connection
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
+ /*
+ * SHUTDOWN COMPLETE from server this should not happen.
+ */
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
+ },
+ /*
+ * State : IP_VS_SCTP_S_CLOSED
+ */
+ {{IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_DATA_SER */ },
+ {IP_VS_SCTP_S_INIT_CLI /* IP_VS_SCTP_EVE_INIT_CLI */ },
+ {IP_VS_SCTP_S_INIT_SER /* IP_VS_SCTP_EVE_INIT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_INIT_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ECHO_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_COOKIE_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_ABORT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_ACK_SER */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_CLI */ },
+ {IP_VS_SCTP_S_CLOSED /* IP_VS_SCTP_EVE_SHUT_COM_SER */ }
+ }
+};
+
+/*
+ * Timeout table[state]
+ */
+static int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
+ [IP_VS_SCTP_S_NONE] = 2 * HZ,
+ [IP_VS_SCTP_S_INIT_CLI] = 1 * 60 * HZ,
+ [IP_VS_SCTP_S_INIT_SER] = 1 * 60 * HZ,
+ [IP_VS_SCTP_S_INIT_ACK_CLI] = 1 * 60 * HZ,
+ [IP_VS_SCTP_S_INIT_ACK_SER] = 1 * 60 * HZ,
+ [IP_VS_SCTP_S_ECHO_CLI] = 1 * 60 * HZ,
+ [IP_VS_SCTP_S_ECHO_SER] = 1 * 60 * HZ,
+ [IP_VS_SCTP_S_ESTABLISHED] = 15 * 60 * HZ,
+ [IP_VS_SCTP_S_SHUT_CLI] = 1 * 60 * HZ,
+ [IP_VS_SCTP_S_SHUT_SER] = 1 * 60 * HZ,
+ [IP_VS_SCTP_S_SHUT_ACK_CLI] = 1 * 60 * HZ,
+ [IP_VS_SCTP_S_SHUT_ACK_SER] = 1 * 60 * HZ,
+ [IP_VS_SCTP_S_CLOSED] = 10 * HZ,
+ [IP_VS_SCTP_S_LAST] = 2 * HZ,
+};
+
+static const char *sctp_state_name_table[IP_VS_SCTP_S_LAST + 1] = {
+ [IP_VS_SCTP_S_NONE] = "NONE",
+ [IP_VS_SCTP_S_INIT_CLI] = "INIT_CLI",
+ [IP_VS_SCTP_S_INIT_SER] = "INIT_SER",
+ [IP_VS_SCTP_S_INIT_ACK_CLI] = "INIT_ACK_CLI",
+ [IP_VS_SCTP_S_INIT_ACK_SER] = "INIT_ACK_SER",
+ [IP_VS_SCTP_S_ECHO_CLI] = "COOKIE_ECHO_CLI",
+ [IP_VS_SCTP_S_ECHO_SER] = "COOKIE_ECHO_SER",
+ [IP_VS_SCTP_S_ESTABLISHED] = "ESTABISHED",
+ [IP_VS_SCTP_S_SHUT_CLI] = "SHUTDOWN_CLI",
+ [IP_VS_SCTP_S_SHUT_SER] = "SHUTDOWN_SER",
+ [IP_VS_SCTP_S_SHUT_ACK_CLI] = "SHUTDOWN_ACK_CLI",
+ [IP_VS_SCTP_S_SHUT_ACK_SER] = "SHUTDOWN_ACK_SER",
+ [IP_VS_SCTP_S_CLOSED] = "CLOSED",
+ [IP_VS_SCTP_S_LAST] = "BUG!"
+};
+
+
+static const char *sctp_state_name(int state)
+{
+ if (state >= IP_VS_SCTP_S_LAST)
+ return "ERR!";
+ if (sctp_state_name_table[state])
+ return sctp_state_name_table[state];
+ return "?";
+}
+
+static void sctp_timeout_change(struct ip_vs_protocol *pp, int flags)
+{
+}
+
+static int
+sctp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
+{
+
+return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_SCTP_S_LAST,
+ sctp_state_name_table, sname, to);
+}
+
+static inline int
+set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
+ int direction, const struct sk_buff *skb)
+{
+ sctp_chunkhdr_t _sctpch, *sch;
+ unsigned char chunk_type;
+ int event, next_state;
+ int ihl;
+
+#ifdef CONFIG_IP_VS_IPV6
+ ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
+#else
+ ihl = ip_hdrlen(skb);
+#endif
+
+ sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t),
+ sizeof(_sctpch), &_sctpch);
+ if (sch == NULL)
+ return 0;
+
+ chunk_type = sch->type;
+ /*
+ * Section 3: Multiple chunks can be bundled into one SCTP packet
+ * up to the MTU size, except for the INIT, INIT ACK, and
+ * SHUTDOWN COMPLETE chunks. These chunks MUST NOT be bundled with
+ * any other chunk in a packet.
+ *
+ * Section 3.3.7: DATA chunks MUST NOT be bundled with ABORT. Control
+ * chunks (except for INIT, INIT ACK, and SHUTDOWN COMPLETE) MAY be
+ * bundled with an ABORT, but they MUST be placed before the ABORT
+ * in the SCTP packet or they will be ignored by the receiver.
+ */
+ if ((sch->type == SCTP_CID_COOKIE_ECHO) ||
+ (sch->type == SCTP_CID_COOKIE_ACK)) {
+ sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) +
+ sch->length), sizeof(_sctpch), &_sctpch);
+ if (sch) {
+ if (sch->type == SCTP_CID_ABORT)
+ chunk_type = sch->type;
+ }
+ }
+
+ event = sctp_events[chunk_type];
+
+ /*
+ * If the direction is IP_VS_DIR_OUTPUT, this event is from server
+ */
+ if (direction == IP_VS_DIR_OUTPUT)
+ event++;
+ /*
+ * get next state
+ */
+ next_state = sctp_states_table[cp->state][event].next_state;
+
+ if (next_state != cp->state) {
+ struct ip_vs_dest *dest = cp->dest;
+
+ IP_VS_DBG_BUF(8, "%s %s %s:%d->"
+ "%s:%d state: %s->%s conn->refcnt:%d\n",
+ pp->name,
+ ((direction == IP_VS_DIR_OUTPUT) ?
+ "output " : "input "),
+ IP_VS_DBG_ADDR(cp->af, &cp->daddr),
+ ntohs(cp->dport),
+ IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+ ntohs(cp->cport),
+ sctp_state_name(cp->state),
+ sctp_state_name(next_state),
+ atomic_read(&cp->refcnt));
+ if (dest) {
+ if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+ (next_state != IP_VS_SCTP_S_ESTABLISHED)) {
+ atomic_dec(&dest->activeconns);
+ atomic_inc(&dest->inactconns);
+ cp->flags |= IP_VS_CONN_F_INACTIVE;
+ } else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
+ (next_state == IP_VS_SCTP_S_ESTABLISHED)) {
+ atomic_inc(&dest->activeconns);
+ atomic_dec(&dest->inactconns);
+ cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+ }
+ }
+ }
+
+ cp->timeout = pp->timeout_table[cp->state = next_state];
+
+ return 1;
+}
+
+static int
+sctp_state_transition(struct ip_vs_conn *cp, int direction,
+ const struct sk_buff *skb, struct ip_vs_protocol *pp)
+{
+ int ret = 0;
+
+ spin_lock(&cp->lock);
+ ret = set_sctp_state(pp, cp, direction, skb);
+ spin_unlock(&cp->lock);
+
+ return ret;
+}
+
+/*
+ * Hash table for SCTP application incarnations
+ */
+#define SCTP_APP_TAB_BITS 4
+#define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS)
+#define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
+
+static struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
+static DEFINE_SPINLOCK(sctp_app_lock);
+
+static inline __u16 sctp_app_hashkey(__be16 port)
+{
+ return (((__force u16)port >> SCTP_APP_TAB_BITS) ^ (__force u16)port)
+ & SCTP_APP_TAB_MASK;
+}
+
+static int sctp_register_app(struct ip_vs_app *inc)
+{
+ struct ip_vs_app *i;
+ __u16 hash;
+ __be16 port = inc->port;
+ int ret = 0;
+
+ hash = sctp_app_hashkey(port);
+
+ spin_lock_bh(&sctp_app_lock);
+ list_for_each_entry(i, &sctp_apps[hash], p_list) {
+ if (i->port == port) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+ list_add(&inc->p_list, &sctp_apps[hash]);
+ atomic_inc(&ip_vs_protocol_sctp.appcnt);
+out:
+ spin_unlock_bh(&sctp_app_lock);
+
+ return ret;
+}
+
+static void sctp_unregister_app(struct ip_vs_app *inc)
+{
+ spin_lock_bh(&sctp_app_lock);
+ atomic_dec(&ip_vs_protocol_sctp.appcnt);
+ list_del(&inc->p_list);
+ spin_unlock_bh(&sctp_app_lock);
+}
+
+static int sctp_app_conn_bind(struct ip_vs_conn *cp)
+{
+ int hash;
+ struct ip_vs_app *inc;
+ int result = 0;
+
+ /* Default binding: bind app only for NAT */
+ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
+ return 0;
+ /* Lookup application incarnations and bind the right one */
+ hash = sctp_app_hashkey(cp->vport);
+
+ spin_lock(&sctp_app_lock);
+ list_for_each_entry(inc, &sctp_apps[hash], p_list) {
+ if (inc->port == cp->vport) {
+ if (unlikely(!ip_vs_app_inc_get(inc)))
+ break;
+ spin_unlock(&sctp_app_lock);
+
+ IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
+ "%s:%u to app %s on port %u\n",
+ __func__,
+ IP_VS_DBG_ADDR(cp->af, &cp->caddr),
+ ntohs(cp->cport),
+ IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
+ ntohs(cp->vport),
+ inc->name, ntohs(inc->port));
+ cp->app = inc;
+ if (inc->init_conn)
+ result = inc->init_conn(inc, cp);
+ goto out;
+ }
+ }
+ spin_unlock(&sctp_app_lock);
+out:
+ return result;
+}
+
+static void ip_vs_sctp_init(struct ip_vs_protocol *pp)
+{
+ IP_VS_INIT_HASH_TABLE(sctp_apps);
+ pp->timeout_table = sctp_timeouts;
+}
+
+
+static void ip_vs_sctp_exit(struct ip_vs_protocol *pp)
+{
+
+}
+
+struct ip_vs_protocol ip_vs_protocol_sctp = {
+ .name = "SCTP",
+ .protocol = IPPROTO_SCTP,
+ .num_states = IP_VS_SCTP_S_LAST,
+ .dont_defrag = 0,
+ .appcnt = ATOMIC_INIT(0),
+ .init = ip_vs_sctp_init,
+ .exit = ip_vs_sctp_exit,
+ .register_app = sctp_register_app,
+ .unregister_app = sctp_unregister_app,
+ .conn_schedule = sctp_conn_schedule,
+ .conn_in_get = sctp_conn_in_get,
+ .conn_out_get = sctp_conn_out_get,
+ .snat_handler = sctp_snat_handler,
+ .dnat_handler = sctp_dnat_handler,
+ .csum_check = sctp_csum_check,
+ .state_name = sctp_state_name,
+ .state_transition = sctp_state_transition,
+ .app_conn_bind = sctp_app_conn_bind,
+ .debug_packet = ip_vs_tcpudp_debug_packet,
+ .timeout_change = sctp_timeout_change,
+ .set_state_timeout = sctp_set_state_timeout,
+};
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index e177f0dc208..8fb0ae61676 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -400,6 +400,11 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
flags |= IP_VS_CONN_F_INACTIVE;
else
flags &= ~IP_VS_CONN_F_INACTIVE;
+ } else if (s->protocol == IPPROTO_SCTP) {
+ if (state != IP_VS_SCTP_S_ESTABLISHED)
+ flags |= IP_VS_CONN_F_INACTIVE;
+ else
+ flags &= ~IP_VS_CONN_F_INACTIVE;
}
cp = ip_vs_conn_new(AF_INET, s->protocol,
(union nf_inet_addr *)&s->caddr,
@@ -434,6 +439,15 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
atomic_dec(&dest->inactconns);
cp->flags &= ~IP_VS_CONN_F_INACTIVE;
}
+ } else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
+ (cp->state != state)) {
+ dest = cp->dest;
+ if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+ (state != IP_VS_SCTP_S_ESTABLISHED)) {
+ atomic_dec(&dest->activeconns);
+ atomic_inc(&dest->inactconns);
+ cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+ }
}
if (opt)
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 30b3189bd29..223b5018c7d 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -311,7 +311,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
mtu = dst_mtu(&rt->u.dst);
if (skb->len > mtu) {
dst_release(&rt->u.dst);
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
@@ -454,7 +454,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
mtu = dst_mtu(&rt->u.dst);
if (skb->len > mtu) {
dst_release(&rt->u.dst);
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG_RL_PKT(0, pp, skb, 0,
"ip_vs_nat_xmit_v6(): frag needed for");
goto tx_error;
@@ -672,7 +672,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
dst_release(&rt->u.dst);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
@@ -814,7 +814,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* MTU checking */
mtu = dst_mtu(&rt->u.dst);
if (skb->len > mtu) {
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
dst_release(&rt->u.dst);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
@@ -965,7 +965,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
mtu = dst_mtu(&rt->u.dst);
if (skb->len > mtu) {
dst_release(&rt->u.dst);
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0e98c3282d4..0c9bbe93cc1 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -30,6 +30,7 @@
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/mm.h>
+#include <linux/nsproxy.h>
#include <linux/rculist_nulls.h>
#include <net/netfilter/nf_conntrack.h>
@@ -41,6 +42,7 @@
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
@@ -63,13 +65,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
struct nf_conn nf_conntrack_untracked __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
-static struct kmem_cache *nf_conntrack_cachep __read_mostly;
-
static int nf_conntrack_hash_rnd_initted;
static unsigned int nf_conntrack_hash_rnd;
static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
- unsigned int size, unsigned int rnd)
+ u16 zone, unsigned int size, unsigned int rnd)
{
unsigned int n;
u_int32_t h;
@@ -80,15 +80,16 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
*/
n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
h = jhash2((u32 *)tuple, n,
- rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
- tuple->dst.protonum));
+ zone ^ rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
+ tuple->dst.protonum));
return ((u64)h * size) >> 32;
}
-static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
+static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
+ const struct nf_conntrack_tuple *tuple)
{
- return __hash_conntrack(tuple, nf_conntrack_htable_size,
+ return __hash_conntrack(tuple, zone, net->ct.htable_size,
nf_conntrack_hash_rnd);
}
@@ -292,11 +293,12 @@ static void death_by_timeout(unsigned long ul_conntrack)
* - Caller must lock nf_conntrack_lock before calling this function
*/
struct nf_conntrack_tuple_hash *
-__nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
+__nf_conntrack_find(struct net *net, u16 zone,
+ const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- unsigned int hash = hash_conntrack(tuple);
+ unsigned int hash = hash_conntrack(net, zone, tuple);
/* Disable BHs the entire time since we normally need to disable them
* at least once for the stats anyway.
@@ -304,7 +306,8 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
local_bh_disable();
begin:
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
- if (nf_ct_tuple_equal(tuple, &h->tuple)) {
+ if (nf_ct_tuple_equal(tuple, &h->tuple) &&
+ nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
NF_CT_STAT_INC(net, found);
local_bh_enable();
return h;
@@ -326,21 +329,23 @@ EXPORT_SYMBOL_GPL(__nf_conntrack_find);
/* Find a connection corresponding to a tuple. */
struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
+nf_conntrack_find_get(struct net *net, u16 zone,
+ const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
rcu_read_lock();
begin:
- h = __nf_conntrack_find(net, tuple);
+ h = __nf_conntrack_find(net, zone, tuple);
if (h) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (unlikely(nf_ct_is_dying(ct) ||
!atomic_inc_not_zero(&ct->ct_general.use)))
h = NULL;
else {
- if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
+ if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
+ nf_ct_zone(ct) != zone)) {
nf_ct_put(ct);
goto begin;
}
@@ -366,10 +371,13 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
void nf_conntrack_hash_insert(struct nf_conn *ct)
{
+ struct net *net = nf_ct_net(ct);
unsigned int hash, repl_hash;
+ u16 zone;
- hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
- repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ zone = nf_ct_zone(ct);
+ hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
__nf_conntrack_hash_insert(ct, hash, repl_hash);
}
@@ -386,6 +394,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
struct hlist_nulls_node *n;
enum ip_conntrack_info ctinfo;
struct net *net;
+ u16 zone;
ct = nf_ct_get(skb, &ctinfo);
net = nf_ct_net(ct);
@@ -397,8 +406,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
return NF_ACCEPT;
- hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
- repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ zone = nf_ct_zone(ct);
+ hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ repl_hash = hash_conntrack(net, zone, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
/* We're not in hash table, and we refuse to set up related
connections for unconfirmed conns. But packet copies and
@@ -417,11 +427,13 @@ __nf_conntrack_confirm(struct sk_buff *skb)
not in the hash. If there is, we lost race. */
hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
- &h->tuple))
+ &h->tuple) &&
+ zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
goto out;
hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
- &h->tuple))
+ &h->tuple) &&
+ zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
goto out;
/* Remove from unconfirmed list */
@@ -468,15 +480,19 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
struct net *net = nf_ct_net(ignored_conntrack);
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- unsigned int hash = hash_conntrack(tuple);
+ struct nf_conn *ct;
+ u16 zone = nf_ct_zone(ignored_conntrack);
+ unsigned int hash = hash_conntrack(net, zone, tuple);
/* Disable BHs the entire time since we need to disable them at
* least once for the stats anyway.
*/
rcu_read_lock_bh();
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
- if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
- nf_ct_tuple_equal(tuple, &h->tuple)) {
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if (ct != ignored_conntrack &&
+ nf_ct_tuple_equal(tuple, &h->tuple) &&
+ nf_ct_zone(ct) == zone) {
NF_CT_STAT_INC(net, found);
rcu_read_unlock_bh();
return 1;
@@ -503,7 +519,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
int dropped = 0;
rcu_read_lock();
- for (i = 0; i < nf_conntrack_htable_size; i++) {
+ for (i = 0; i < net->ct.htable_size; i++) {
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
hnnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
@@ -523,7 +539,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
if (cnt >= NF_CT_EVICTION_RANGE)
break;
- hash = (hash + 1) % nf_conntrack_htable_size;
+ hash = (hash + 1) % net->ct.htable_size;
}
rcu_read_unlock();
@@ -539,7 +555,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
return dropped;
}
-struct nf_conn *nf_conntrack_alloc(struct net *net,
+struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
const struct nf_conntrack_tuple *orig,
const struct nf_conntrack_tuple *repl,
gfp_t gfp)
@@ -557,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
if (nf_conntrack_max &&
unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
- unsigned int hash = hash_conntrack(orig);
+ unsigned int hash = hash_conntrack(net, zone, orig);
if (!early_drop(net, hash)) {
atomic_dec(&net->ct.count);
if (net_ratelimit())
@@ -572,7 +588,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
* Do not use kmem_cache_zalloc(), as this cache uses
* SLAB_DESTROY_BY_RCU.
*/
- ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
+ ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
if (ct == NULL) {
pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
atomic_dec(&net->ct.count);
@@ -594,13 +610,28 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
#ifdef CONFIG_NET_NS
ct->ct_net = net;
#endif
-
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ if (zone) {
+ struct nf_conntrack_zone *nf_ct_zone;
+
+ nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
+ if (!nf_ct_zone)
+ goto out_free;
+ nf_ct_zone->id = zone;
+ }
+#endif
/*
* changes to lookup keys must be done before setting refcnt to 1
*/
smp_wmb();
atomic_set(&ct->ct_general.use, 1);
return ct;
+
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+out_free:
+ kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+ return ERR_PTR(-ENOMEM);
+#endif
}
EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
@@ -611,14 +642,14 @@ void nf_conntrack_free(struct nf_conn *ct)
nf_ct_ext_destroy(ct);
atomic_dec(&net->ct.count);
nf_ct_ext_free(ct);
- kmem_cache_free(nf_conntrack_cachep, ct);
+ kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
}
EXPORT_SYMBOL_GPL(nf_conntrack_free);
/* Allocate a new conntrack: we return -ENOMEM if classification
failed due to stress. Otherwise it really is unclassifiable. */
static struct nf_conntrack_tuple_hash *
-init_conntrack(struct net *net,
+init_conntrack(struct net *net, struct nf_conn *tmpl,
const struct nf_conntrack_tuple *tuple,
struct nf_conntrack_l3proto *l3proto,
struct nf_conntrack_l4proto *l4proto,
@@ -628,14 +659,16 @@ init_conntrack(struct net *net,
struct nf_conn *ct;
struct nf_conn_help *help;
struct nf_conntrack_tuple repl_tuple;
+ struct nf_conntrack_ecache *ecache;
struct nf_conntrack_expect *exp;
+ u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
pr_debug("Can't invert tuple.\n");
return NULL;
}
- ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
+ ct = nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC);
if (IS_ERR(ct)) {
pr_debug("Can't allocate conntrack.\n");
return (struct nf_conntrack_tuple_hash *)ct;
@@ -648,10 +681,14 @@ init_conntrack(struct net *net,
}
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
- nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
+
+ ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
+ nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
+ ecache ? ecache->expmask : 0,
+ GFP_ATOMIC);
spin_lock_bh(&nf_conntrack_lock);
- exp = nf_ct_find_expectation(net, tuple);
+ exp = nf_ct_find_expectation(net, zone, tuple);
if (exp) {
pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
ct, exp);
@@ -673,7 +710,7 @@ init_conntrack(struct net *net,
nf_conntrack_get(&ct->master->ct_general);
NF_CT_STAT_INC(net, expect_new);
} else {
- __nf_ct_try_assign_helper(ct, GFP_ATOMIC);
+ __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
NF_CT_STAT_INC(net, new);
}
@@ -694,7 +731,7 @@ init_conntrack(struct net *net,
/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
static inline struct nf_conn *
-resolve_normal_ct(struct net *net,
+resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
u_int16_t l3num,
@@ -707,6 +744,7 @@ resolve_normal_ct(struct net *net,
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
+ u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
dataoff, l3num, protonum, &tuple, l3proto,
@@ -716,9 +754,10 @@ resolve_normal_ct(struct net *net,
}
/* look for tuple match */
- h = nf_conntrack_find_get(net, &tuple);
+ h = nf_conntrack_find_get(net, zone, &tuple);
if (!h) {
- h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff);
+ h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
+ skb, dataoff);
if (!h)
return NULL;
if (IS_ERR(h))
@@ -755,7 +794,7 @@ unsigned int
nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
struct sk_buff *skb)
{
- struct nf_conn *ct;
+ struct nf_conn *ct, *tmpl = NULL;
enum ip_conntrack_info ctinfo;
struct nf_conntrack_l3proto *l3proto;
struct nf_conntrack_l4proto *l4proto;
@@ -764,10 +803,14 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
int set_reply = 0;
int ret;
- /* Previously seen (loopback or untracked)? Ignore. */
if (skb->nfct) {
- NF_CT_STAT_INC_ATOMIC(net, ignore);
- return NF_ACCEPT;
+ /* Previously seen (loopback or untracked)? Ignore. */
+ tmpl = (struct nf_conn *)skb->nfct;
+ if (!nf_ct_is_template(tmpl)) {
+ NF_CT_STAT_INC_ATOMIC(net, ignore);
+ return NF_ACCEPT;
+ }
+ skb->nfct = NULL;
}
/* rcu_read_lock()ed by nf_hook_slow */
@@ -778,7 +821,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
pr_debug("not prepared to track yet or error occured\n");
NF_CT_STAT_INC_ATOMIC(net, error);
NF_CT_STAT_INC_ATOMIC(net, invalid);
- return -ret;
+ ret = -ret;
+ goto out;
}
l4proto = __nf_ct_l4proto_find(pf, protonum);
@@ -787,26 +831,30 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
* inverse of the return code tells to the netfilter
* core what to do with the packet. */
if (l4proto->error != NULL) {
- ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum);
+ ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
+ pf, hooknum);
if (ret <= 0) {
NF_CT_STAT_INC_ATOMIC(net, error);
NF_CT_STAT_INC_ATOMIC(net, invalid);
- return -ret;
+ ret = -ret;
+ goto out;
}
}
- ct = resolve_normal_ct(net, skb, dataoff, pf, protonum,
+ ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
l3proto, l4proto, &set_reply, &ctinfo);
if (!ct) {
/* Not valid part of a connection */
NF_CT_STAT_INC_ATOMIC(net, invalid);
- return NF_ACCEPT;
+ ret = NF_ACCEPT;
+ goto out;
}
if (IS_ERR(ct)) {
/* Too stressed to deal. */
NF_CT_STAT_INC_ATOMIC(net, drop);
- return NF_DROP;
+ ret = NF_DROP;
+ goto out;
}
NF_CT_ASSERT(skb->nfct);
@@ -821,11 +869,15 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
NF_CT_STAT_INC_ATOMIC(net, invalid);
if (ret == -NF_DROP)
NF_CT_STAT_INC_ATOMIC(net, drop);
- return -ret;
+ ret = -ret;
+ goto out;
}
if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
- nf_conntrack_event_cache(IPCT_STATUS, ct);
+ nf_conntrack_event_cache(IPCT_REPLY, ct);
+out:
+ if (tmpl)
+ nf_ct_put(tmpl);
return ret;
}
@@ -864,7 +916,7 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
return;
rcu_read_lock();
- __nf_ct_try_assign_helper(ct, GFP_ATOMIC);
+ __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
@@ -938,6 +990,14 @@ bool __nf_ct_kill_acct(struct nf_conn *ct,
}
EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
+ .len = sizeof(struct nf_conntrack_zone),
+ .align = __alignof__(struct nf_conntrack_zone),
+ .id = NF_CT_EXT_ZONE,
+};
+#endif
+
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
@@ -1014,7 +1074,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
struct hlist_nulls_node *n;
spin_lock_bh(&nf_conntrack_lock);
- for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+ for (; *bucket < net->ct.htable_size; (*bucket)++) {
hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter(ct, data))
@@ -1113,9 +1173,15 @@ static void nf_ct_release_dying_list(struct net *net)
static void nf_conntrack_cleanup_init_net(void)
{
+ /* wait until all references to nf_conntrack_untracked are dropped */
+ while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
+ schedule();
+
nf_conntrack_helper_fini();
nf_conntrack_proto_fini();
- kmem_cache_destroy(nf_conntrack_cachep);
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ nf_ct_extend_unregister(&nf_ct_zone_extend);
+#endif
}
static void nf_conntrack_cleanup_net(struct net *net)
@@ -1127,15 +1193,14 @@ static void nf_conntrack_cleanup_net(struct net *net)
schedule();
goto i_see_dead_people;
}
- /* wait until all references to nf_conntrack_untracked are dropped */
- while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
- schedule();
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
- nf_conntrack_htable_size);
+ net->ct.htable_size);
nf_conntrack_ecache_fini(net);
nf_conntrack_acct_fini(net);
nf_conntrack_expect_fini(net);
+ kmem_cache_destroy(net->ct.nf_conntrack_cachep);
+ kfree(net->ct.slabname);
free_percpu(net->ct.stat);
}
@@ -1190,9 +1255,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
{
int i, bucket, vmalloced, old_vmalloced;
unsigned int hashsize, old_size;
- int rnd;
struct hlist_nulls_head *hash, *old_hash;
struct nf_conntrack_tuple_hash *h;
+ struct nf_conn *ct;
+
+ if (current->nsproxy->net_ns != &init_net)
+ return -EOPNOTSUPP;
/* On boot, we can set this without any fancy locking. */
if (!nf_conntrack_htable_size)
@@ -1206,33 +1274,31 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
if (!hash)
return -ENOMEM;
- /* We have to rehahs for the new table anyway, so we also can
- * use a newrandom seed */
- get_random_bytes(&rnd, sizeof(rnd));
-
/* Lookups in the old hash might happen in parallel, which means we
* might get false negatives during connection lookup. New connections
* created because of a false negative won't make it into the hash
* though since that required taking the lock.
*/
spin_lock_bh(&nf_conntrack_lock);
- for (i = 0; i < nf_conntrack_htable_size; i++) {
+ for (i = 0; i < init_net.ct.htable_size; i++) {
while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
h = hlist_nulls_entry(init_net.ct.hash[i].first,
struct nf_conntrack_tuple_hash, hnnode);
+ ct = nf_ct_tuplehash_to_ctrack(h);
hlist_nulls_del_rcu(&h->hnnode);
- bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
+ bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
+ hashsize,
+ nf_conntrack_hash_rnd);
hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
}
}
- old_size = nf_conntrack_htable_size;
+ old_size = init_net.ct.htable_size;
old_vmalloced = init_net.ct.hash_vmalloc;
old_hash = init_net.ct.hash;
- nf_conntrack_htable_size = hashsize;
+ init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
init_net.ct.hash_vmalloc = vmalloced;
init_net.ct.hash = hash;
- nf_conntrack_hash_rnd = rnd;
spin_unlock_bh(&nf_conntrack_lock);
nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
@@ -1271,15 +1337,6 @@ static int nf_conntrack_init_init_net(void)
NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
nf_conntrack_max);
- nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
- sizeof(struct nf_conn),
- 0, SLAB_DESTROY_BY_RCU, NULL);
- if (!nf_conntrack_cachep) {
- printk(KERN_ERR "Unable to create nf_conn slab cache\n");
- ret = -ENOMEM;
- goto err_cache;
- }
-
ret = nf_conntrack_proto_init();
if (ret < 0)
goto err_proto;
@@ -1288,13 +1345,28 @@ static int nf_conntrack_init_init_net(void)
if (ret < 0)
goto err_helper;
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ ret = nf_ct_extend_register(&nf_ct_zone_extend);
+ if (ret < 0)
+ goto err_extend;
+#endif
+ /* Set up fake conntrack: to never be deleted, not in any hashes */
+#ifdef CONFIG_NET_NS
+ nf_conntrack_untracked.ct_net = &init_net;
+#endif
+ atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
+ /* - and look it like as a confirmed connection */
+ set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
+
return 0;
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+err_extend:
+ nf_conntrack_helper_fini();
+#endif
err_helper:
nf_conntrack_proto_fini();
err_proto:
- kmem_cache_destroy(nf_conntrack_cachep);
-err_cache:
return ret;
}
@@ -1316,7 +1388,24 @@ static int nf_conntrack_init_net(struct net *net)
ret = -ENOMEM;
goto err_stat;
}
- net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
+
+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
+ if (!net->ct.slabname) {
+ ret = -ENOMEM;
+ goto err_slabname;
+ }
+
+ net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
+ sizeof(struct nf_conn), 0,
+ SLAB_DESTROY_BY_RCU, NULL);
+ if (!net->ct.nf_conntrack_cachep) {
+ printk(KERN_ERR "Unable to create nf_conn slab cache\n");
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
+ net->ct.htable_size = nf_conntrack_htable_size;
+ net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
&net->ct.hash_vmalloc, 1);
if (!net->ct.hash) {
ret = -ENOMEM;
@@ -1333,15 +1422,6 @@ static int nf_conntrack_init_net(struct net *net)
if (ret < 0)
goto err_ecache;
- /* Set up fake conntrack:
- - to never be deleted, not in any hashes */
-#ifdef CONFIG_NET_NS
- nf_conntrack_untracked.ct_net = &init_net;
-#endif
- atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
- /* - and look it like as a confirmed connection */
- set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
-
return 0;
err_ecache:
@@ -1350,8 +1430,12 @@ err_acct:
nf_conntrack_expect_fini(net);
err_expect:
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
- nf_conntrack_htable_size);
+ net->ct.htable_size);
err_hash:
+ kmem_cache_destroy(net->ct.nf_conntrack_cachep);
+err_cache:
+ kfree(net->ct.slabname);
+err_slabname:
free_percpu(net->ct.stat);
err_stat:
return ret;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index fdf5d2a1d9b..acb29ccaa41 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -27,6 +27,7 @@
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_zones.h>
unsigned int nf_ct_expect_hsize __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
@@ -84,7 +85,8 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
}
struct nf_conntrack_expect *
-__nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple)
+__nf_ct_expect_find(struct net *net, u16 zone,
+ const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i;
struct hlist_node *n;
@@ -95,7 +97,8 @@ __nf_ct_expect_find(struct net *net, const struct nf_conntrack_tuple *tuple)
h = nf_ct_expect_dst_hash(tuple);
hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
- if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask))
+ if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
+ nf_ct_zone(i->master) == zone)
return i;
}
return NULL;
@@ -104,12 +107,13 @@ EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
/* Just find a expectation corresponding to a tuple. */
struct nf_conntrack_expect *
-nf_ct_expect_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
+nf_ct_expect_find_get(struct net *net, u16 zone,
+ const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i;
rcu_read_lock();
- i = __nf_ct_expect_find(net, tuple);
+ i = __nf_ct_expect_find(net, zone, tuple);
if (i && !atomic_inc_not_zero(&i->use))
i = NULL;
rcu_read_unlock();
@@ -121,7 +125,8 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
/* If an expectation for this connection is found, it gets delete from
* global list then returned. */
struct nf_conntrack_expect *
-nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple)
+nf_ct_find_expectation(struct net *net, u16 zone,
+ const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_expect *i, *exp = NULL;
struct hlist_node *n;
@@ -133,7 +138,8 @@ nf_ct_find_expectation(struct net *net, const struct nf_conntrack_tuple *tuple)
h = nf_ct_expect_dst_hash(tuple);
hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
- nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)) {
+ nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
+ nf_ct_zone(i->master) == zone) {
exp = i;
break;
}
@@ -204,7 +210,8 @@ static inline int expect_matches(const struct nf_conntrack_expect *a,
{
return a->master == b->master && a->class == b->class &&
nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
- nf_ct_tuple_mask_equal(&a->mask, &b->mask);
+ nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
+ nf_ct_zone(a->master) == nf_ct_zone(b->master);
}
/* Generally a bad idea to call this: could have matched already. */
@@ -232,7 +239,6 @@ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
new->master = me;
atomic_set(&new->use, 1);
- INIT_RCU_HEAD(&new->rcu);
return new;
}
EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
@@ -500,6 +506,7 @@ static void exp_seq_stop(struct seq_file *seq, void *v)
static int exp_seq_show(struct seq_file *s, void *v)
{
struct nf_conntrack_expect *expect;
+ struct nf_conntrack_helper *helper;
struct hlist_node *n = v;
char *delim = "";
@@ -525,6 +532,14 @@ static int exp_seq_show(struct seq_file *s, void *v)
if (expect->flags & NF_CT_EXPECT_INACTIVE)
seq_printf(s, "%sINACTIVE", delim);
+ helper = rcu_dereference(nfct_help(expect->master)->helper);
+ if (helper) {
+ seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
+ if (helper->expect_policy[expect->class].name)
+ seq_printf(s, "/%s",
+ helper->expect_policy[expect->class].name);
+ }
+
return seq_putc(s, '\n');
}
@@ -569,7 +584,7 @@ static void exp_proc_remove(struct net *net)
#endif /* CONFIG_PROC_FS */
}
-module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600);
+module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
int nf_conntrack_expect_init(struct net *net)
{
@@ -577,7 +592,7 @@ int nf_conntrack_expect_init(struct net *net)
if (net_eq(net, &init_net)) {
if (!nf_ct_expect_hsize) {
- nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
+ nf_ct_expect_hsize = net->ct.htable_size / 256;
if (!nf_ct_expect_hsize)
nf_ct_expect_hsize = 1;
}
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index fef95be334b..fdc8fb4ae10 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -59,7 +59,6 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
if (!*ext)
return NULL;
- INIT_RCU_HEAD(&(*ext)->rcu);
(*ext)->offset[id] = off;
(*ext)->len = len;
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 66369490230..a1c8dd917e1 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -29,6 +29,7 @@
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_h323.h>
/* Parameters */
@@ -1216,7 +1217,7 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
tuple.dst.u.tcp.port = port;
tuple.dst.protonum = IPPROTO_TCP;
- exp = __nf_ct_expect_find(net, &tuple);
+ exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple);
if (exp && exp->master == ct)
return exp;
return NULL;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 65c2a7bc3af..4509fa6726f 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -65,7 +65,7 @@ __nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
}
struct nf_conntrack_helper *
-__nf_conntrack_helper_find_byname(const char *name)
+__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
{
struct nf_conntrack_helper *h;
struct hlist_node *n;
@@ -73,13 +73,34 @@ __nf_conntrack_helper_find_byname(const char *name)
for (i = 0; i < nf_ct_helper_hsize; i++) {
hlist_for_each_entry_rcu(h, n, &nf_ct_helper_hash[i], hnode) {
- if (!strcmp(h->name, name))
+ if (!strcmp(h->name, name) &&
+ h->tuple.src.l3num == l3num &&
+ h->tuple.dst.protonum == protonum)
return h;
}
}
return NULL;
}
-EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find_byname);
+EXPORT_SYMBOL_GPL(__nf_conntrack_helper_find);
+
+struct nf_conntrack_helper *
+nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
+{
+ struct nf_conntrack_helper *h;
+
+ h = __nf_conntrack_helper_find(name, l3num, protonum);
+#ifdef CONFIG_MODULES
+ if (h == NULL) {
+ if (request_module("nfct-helper-%s", name) == 0)
+ h = __nf_conntrack_helper_find(name, l3num, protonum);
+ }
+#endif
+ if (h != NULL && !try_module_get(h->me))
+ h = NULL;
+
+ return h;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
{
@@ -94,13 +115,22 @@ struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add);
-int __nf_ct_try_assign_helper(struct nf_conn *ct, gfp_t flags)
+int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
+ gfp_t flags)
{
+ struct nf_conntrack_helper *helper = NULL;
+ struct nf_conn_help *help;
int ret = 0;
- struct nf_conntrack_helper *helper;
- struct nf_conn_help *help = nfct_help(ct);
- helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ if (tmpl != NULL) {
+ help = nfct_help(tmpl);
+ if (help != NULL)
+ helper = help->helper;
+ }
+
+ help = nfct_help(ct);
+ if (helper == NULL)
+ helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
if (helper == NULL) {
if (help)
rcu_assign_pointer(help->helper, NULL);
@@ -192,7 +222,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
/* Get rid of expecteds, set helpers to NULL. */
hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
unhelp(h, me);
- for (i = 0; i < nf_conntrack_htable_size; i++) {
+ for (i = 0; i < net->ct.htable_size; i++) {
hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
unhelp(h, me);
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 59d8064eb52..2b2af631d2b 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -30,6 +30,7 @@
#include <linux/netfilter.h>
#include <net/netlink.h>
+#include <net/sock.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
@@ -38,6 +39,7 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_zones.h>
#ifdef CONFIG_NF_NAT_NEEDED
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_protocol.h>
@@ -378,6 +380,9 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
+ if (nf_ct_zone(ct))
+ NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
+
if (ctnetlink_dump_status(skb, ct) < 0 ||
ctnetlink_dump_timeout(skb, ct) < 0 ||
ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
@@ -456,6 +461,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
static int
ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
{
+ struct net *net;
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
struct nlattr *nest_parms;
@@ -482,7 +488,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
} else
return 0;
- if (!item->report && !nfnetlink_has_listeners(group))
+ net = nf_ct_net(ct);
+ if (!item->report && !nfnetlink_has_listeners(net, group))
return 0;
skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
@@ -514,6 +521,9 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
+ if (nf_ct_zone(ct))
+ NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct)));
+
if (ctnetlink_dump_id(skb, ct) < 0)
goto nla_put_failure;
@@ -559,7 +569,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
rcu_read_unlock();
nlmsg_end(skb, nlh);
- err = nfnetlink_send(skb, item->pid, group, item->report, GFP_ATOMIC);
+ err = nfnetlink_send(skb, net, item->pid, group, item->report,
+ GFP_ATOMIC);
if (err == -ENOBUFS || err == -EAGAIN)
return -ENOBUFS;
@@ -571,7 +582,7 @@ nla_put_failure:
nlmsg_failure:
kfree_skb(skb);
errout:
- nfnetlink_set_err(0, group, -ENOBUFS);
+ nfnetlink_set_err(net, 0, group, -ENOBUFS);
return 0;
}
#endif /* CONFIG_NF_CONNTRACK_EVENTS */
@@ -586,6 +597,7 @@ static int ctnetlink_done(struct netlink_callback *cb)
static int
ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct net *net = sock_net(skb->sk);
struct nf_conn *ct, *last;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
@@ -594,9 +606,9 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
last = (struct nf_conn *)cb->args[1];
- for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
+ for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
restart:
- hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
+ hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],
hnnode) {
if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
continue;
@@ -703,6 +715,11 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
return ret;
}
+static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
+ [CTA_TUPLE_IP] = { .type = NLA_NESTED },
+ [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
+};
+
static int
ctnetlink_parse_tuple(const struct nlattr * const cda[],
struct nf_conntrack_tuple *tuple,
@@ -713,7 +730,7 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
memset(tuple, 0, sizeof(*tuple));
- nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], NULL);
+ nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy);
if (!tb[CTA_TUPLE_IP])
return -EINVAL;
@@ -740,12 +757,31 @@ ctnetlink_parse_tuple(const struct nlattr * const cda[],
return 0;
}
+static int
+ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
+{
+ if (attr)
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ *zone = ntohs(nla_get_be16(attr));
+#else
+ return -EOPNOTSUPP;
+#endif
+ else
+ *zone = 0;
+
+ return 0;
+}
+
+static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
+ [CTA_HELP_NAME] = { .type = NLA_NUL_STRING },
+};
+
static inline int
ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
{
struct nlattr *tb[CTA_HELP_MAX+1];
- nla_parse_nested(tb, CTA_HELP_MAX, attr, NULL);
+ nla_parse_nested(tb, CTA_HELP_MAX, attr, help_nla_policy);
if (!tb[CTA_HELP_NAME])
return -EINVAL;
@@ -756,11 +792,18 @@ ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
}
static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
+ [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
+ [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
[CTA_STATUS] = { .type = NLA_U32 },
+ [CTA_PROTOINFO] = { .type = NLA_NESTED },
+ [CTA_HELP] = { .type = NLA_NESTED },
+ [CTA_NAT_SRC] = { .type = NLA_NESTED },
[CTA_TIMEOUT] = { .type = NLA_U32 },
[CTA_MARK] = { .type = NLA_U32 },
- [CTA_USE] = { .type = NLA_U32 },
[CTA_ID] = { .type = NLA_U32 },
+ [CTA_NAT_DST] = { .type = NLA_NESTED },
+ [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
+ [CTA_ZONE] = { .type = NLA_U16 },
};
static int
@@ -768,12 +811,18 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[])
{
+ struct net *net = sock_net(ctnl);
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
struct nf_conn *ct;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->nfgen_family;
- int err = 0;
+ u16 zone;
+ int err;
+
+ err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
+ if (err < 0)
+ return err;
if (cda[CTA_TUPLE_ORIG])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
@@ -781,7 +830,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
else {
/* Flush the whole table */
- nf_conntrack_flush_report(&init_net,
+ nf_conntrack_flush_report(net,
NETLINK_CB(skb).pid,
nlmsg_report(nlh));
return 0;
@@ -790,7 +839,7 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
if (err < 0)
return err;
- h = nf_conntrack_find_get(&init_net, &tuple);
+ h = nf_conntrack_find_get(net, zone, &tuple);
if (!h)
return -ENOENT;
@@ -828,18 +877,24 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[])
{
+ struct net *net = sock_net(ctnl);
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
struct nf_conn *ct;
struct sk_buff *skb2 = NULL;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->nfgen_family;
- int err = 0;
+ u16 zone;
+ int err;
if (nlh->nlmsg_flags & NLM_F_DUMP)
return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
ctnetlink_done);
+ err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
+ if (err < 0)
+ return err;
+
if (cda[CTA_TUPLE_ORIG])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
else if (cda[CTA_TUPLE_REPLY])
@@ -850,7 +905,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
if (err < 0)
return err;
- h = nf_conntrack_find_get(&init_net, &tuple);
+ h = nf_conntrack_find_get(net, zone, &tuple);
if (!h)
return -ENOENT;
@@ -994,7 +1049,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
return 0;
}
- helper = __nf_conntrack_helper_find_byname(helpname);
+ helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
+ nf_ct_protonum(ct));
if (helper == NULL) {
#ifdef CONFIG_MODULES
spin_unlock_bh(&nf_conntrack_lock);
@@ -1005,7 +1061,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
}
spin_lock_bh(&nf_conntrack_lock);
- helper = __nf_conntrack_helper_find_byname(helpname);
+ helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
+ nf_ct_protonum(ct));
if (helper)
return -EAGAIN;
#endif
@@ -1020,9 +1077,8 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
/* need to zero data of old helper */
memset(&help->help, 0, sizeof(help->help));
} else {
- help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
- if (help == NULL)
- return -ENOMEM;
+ /* we cannot set a helper for an existing conntrack */
+ return -EOPNOTSUPP;
}
rcu_assign_pointer(help->helper, helper);
@@ -1044,6 +1100,12 @@ ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
return 0;
}
+static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
+ [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
+ [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
+ [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
+};
+
static inline int
ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
{
@@ -1052,7 +1114,7 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]
struct nf_conntrack_l4proto *l4proto;
int err = 0;
- nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL);
+ nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy);
rcu_read_lock();
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
@@ -1064,12 +1126,18 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]
}
#ifdef CONFIG_NF_NAT_NEEDED
+static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = {
+ [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 },
+ [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 },
+ [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 },
+};
+
static inline int
change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr)
{
struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
- nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, NULL);
+ nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy);
if (!cda[CTA_NAT_SEQ_CORRECTION_POS])
return -EINVAL;
@@ -1175,7 +1243,8 @@ ctnetlink_change_conntrack(struct nf_conn *ct,
}
static struct nf_conn *
-ctnetlink_create_conntrack(const struct nlattr * const cda[],
+ctnetlink_create_conntrack(struct net *net, u16 zone,
+ const struct nlattr * const cda[],
struct nf_conntrack_tuple *otuple,
struct nf_conntrack_tuple *rtuple,
u8 u3)
@@ -1184,7 +1253,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
int err = -EINVAL;
struct nf_conntrack_helper *helper;
- ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC);
+ ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
if (IS_ERR(ct))
return ERR_PTR(-ENOMEM);
@@ -1193,7 +1262,6 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
- ct->status |= IPS_CONFIRMED;
rcu_read_lock();
if (cda[CTA_HELP]) {
@@ -1203,7 +1271,8 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
if (err < 0)
goto err2;
- helper = __nf_conntrack_helper_find_byname(helpname);
+ helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
+ nf_ct_protonum(ct));
if (helper == NULL) {
rcu_read_unlock();
#ifdef CONFIG_MODULES
@@ -1213,7 +1282,9 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
}
rcu_read_lock();
- helper = __nf_conntrack_helper_find_byname(helpname);
+ helper = __nf_conntrack_helper_find(helpname,
+ nf_ct_l3num(ct),
+ nf_ct_protonum(ct));
if (helper) {
err = -EAGAIN;
goto err2;
@@ -1236,19 +1307,24 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
}
} else {
/* try an implicit helper assignation */
- err = __nf_ct_try_assign_helper(ct, GFP_ATOMIC);
+ err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
if (err < 0)
goto err2;
}
- if (cda[CTA_STATUS]) {
- err = ctnetlink_change_status(ct, cda);
+ if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
+ err = ctnetlink_change_nat(ct, cda);
if (err < 0)
goto err2;
}
- if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
- err = ctnetlink_change_nat(ct, cda);
+ nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+ nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
+ /* we must add conntrack extensions before confirmation. */
+ ct->status |= IPS_CONFIRMED;
+
+ if (cda[CTA_STATUS]) {
+ err = ctnetlink_change_status(ct, cda);
if (err < 0)
goto err2;
}
@@ -1267,9 +1343,6 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
goto err2;
}
- nf_ct_acct_ext_add(ct, GFP_ATOMIC);
- nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
-
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (cda[CTA_MARK])
ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
@@ -1285,7 +1358,7 @@ ctnetlink_create_conntrack(const struct nlattr * const cda[],
if (err < 0)
goto err2;
- master_h = nf_conntrack_find_get(&init_net, &master);
+ master_h = nf_conntrack_find_get(net, zone, &master);
if (master_h == NULL) {
err = -ENOENT;
goto err2;
@@ -1313,11 +1386,17 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[])
{
+ struct net *net = sock_net(ctnl);
struct nf_conntrack_tuple otuple, rtuple;
struct nf_conntrack_tuple_hash *h = NULL;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->nfgen_family;
- int err = 0;
+ u16 zone;
+ int err;
+
+ err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
+ if (err < 0)
+ return err;
if (cda[CTA_TUPLE_ORIG]) {
err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
@@ -1333,9 +1412,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
spin_lock_bh(&nf_conntrack_lock);
if (cda[CTA_TUPLE_ORIG])
- h = __nf_conntrack_find(&init_net, &otuple);
+ h = __nf_conntrack_find(net, zone, &otuple);
else if (cda[CTA_TUPLE_REPLY])
- h = __nf_conntrack_find(&init_net, &rtuple);
+ h = __nf_conntrack_find(net, zone, &rtuple);
if (h == NULL) {
err = -ENOENT;
@@ -1343,7 +1422,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
struct nf_conn *ct;
enum ip_conntrack_events events;
- ct = ctnetlink_create_conntrack(cda, &otuple,
+ ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
&rtuple, u3);
if (IS_ERR(ct)) {
err = PTR_ERR(ct);
@@ -1357,7 +1436,8 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
else
events = IPCT_NEW;
- nf_conntrack_eventmask_report((1 << IPCT_STATUS) |
+ nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
+ (1 << IPCT_ASSURED) |
(1 << IPCT_HELPER) |
(1 << IPCT_PROTOINFO) |
(1 << IPCT_NATSEQADJ) |
@@ -1382,7 +1462,8 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
if (err == 0) {
nf_conntrack_get(&ct->ct_general);
spin_unlock_bh(&nf_conntrack_lock);
- nf_conntrack_eventmask_report((1 << IPCT_STATUS) |
+ nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
+ (1 << IPCT_ASSURED) |
(1 << IPCT_HELPER) |
(1 << IPCT_PROTOINFO) |
(1 << IPCT_NATSEQADJ) |
@@ -1437,8 +1518,9 @@ ctnetlink_exp_dump_mask(struct sk_buff *skb,
struct nlattr *nest_parms;
memset(&m, 0xFF, sizeof(m));
- m.src.u.all = mask->src.u.all;
memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
+ m.src.u.all = mask->src.u.all;
+ m.dst.protonum = tuple->dst.protonum;
nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED);
if (!nest_parms)
@@ -1468,6 +1550,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
const struct nf_conntrack_expect *exp)
{
struct nf_conn *master = exp->master;
+ struct nf_conntrack_helper *helper;
long timeout = (exp->timeout.expires - jiffies) / HZ;
if (timeout < 0)
@@ -1484,6 +1567,9 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
+ helper = rcu_dereference(nfct_help(master)->helper);
+ if (helper)
+ NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name);
return 0;
@@ -1525,9 +1611,10 @@ nla_put_failure:
static int
ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
{
+ struct nf_conntrack_expect *exp = item->exp;
+ struct net *net = nf_ct_exp_net(exp);
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
- struct nf_conntrack_expect *exp = item->exp;
struct sk_buff *skb;
unsigned int type;
int flags = 0;
@@ -1539,7 +1626,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
return 0;
if (!item->report &&
- !nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW))
+ !nfnetlink_has_listeners(net, NFNLGRP_CONNTRACK_EXP_NEW))
return 0;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
@@ -1562,7 +1649,7 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
rcu_read_unlock();
nlmsg_end(skb, nlh);
- nfnetlink_send(skb, item->pid, NFNLGRP_CONNTRACK_EXP_NEW,
+ nfnetlink_send(skb, net, item->pid, NFNLGRP_CONNTRACK_EXP_NEW,
item->report, GFP_ATOMIC);
return 0;
@@ -1572,7 +1659,7 @@ nla_put_failure:
nlmsg_failure:
kfree_skb(skb);
errout:
- nfnetlink_set_err(0, 0, -ENOBUFS);
+ nfnetlink_set_err(net, 0, 0, -ENOBUFS);
return 0;
}
#endif
@@ -1586,7 +1673,7 @@ static int ctnetlink_exp_done(struct netlink_callback *cb)
static int
ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct net *net = &init_net;
+ struct net *net = sock_net(skb->sk);
struct nf_conntrack_expect *exp, *last;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
struct hlist_node *n;
@@ -1630,8 +1717,12 @@ out:
}
static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
+ [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
+ [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
+ [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
[CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
[CTA_EXPECT_ID] = { .type = NLA_U32 },
+ [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING },
};
static int
@@ -1639,12 +1730,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[])
{
+ struct net *net = sock_net(ctnl);
struct nf_conntrack_tuple tuple;
struct nf_conntrack_expect *exp;
struct sk_buff *skb2;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->nfgen_family;
- int err = 0;
+ u16 zone;
+ int err;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
return netlink_dump_start(ctnl, skb, nlh,
@@ -1652,6 +1745,10 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
ctnetlink_exp_done);
}
+ err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
+ if (err < 0)
+ return err;
+
if (cda[CTA_EXPECT_MASTER])
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
else
@@ -1660,7 +1757,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
if (err < 0)
return err;
- exp = nf_ct_expect_find_get(&init_net, &tuple);
+ exp = nf_ct_expect_find_get(net, zone, &tuple);
if (!exp)
return -ENOENT;
@@ -1700,23 +1797,28 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[])
{
+ struct net *net = sock_net(ctnl);
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple tuple;
- struct nf_conntrack_helper *h;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct hlist_node *n, *next;
u_int8_t u3 = nfmsg->nfgen_family;
unsigned int i;
+ u16 zone;
int err;
if (cda[CTA_EXPECT_TUPLE]) {
/* delete a single expect by tuple */
+ err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
+ if (err < 0)
+ return err;
+
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
if (err < 0)
return err;
/* bump usage count to 2 */
- exp = nf_ct_expect_find_get(&init_net, &tuple);
+ exp = nf_ct_expect_find_get(net, zone, &tuple);
if (!exp)
return -ENOENT;
@@ -1739,18 +1841,13 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
/* delete all expectations for this helper */
spin_lock_bh(&nf_conntrack_lock);
- h = __nf_conntrack_helper_find_byname(name);
- if (!h) {
- spin_unlock_bh(&nf_conntrack_lock);
- return -EOPNOTSUPP;
- }
for (i = 0; i < nf_ct_expect_hsize; i++) {
hlist_for_each_entry_safe(exp, n, next,
- &init_net.ct.expect_hash[i],
+ &net->ct.expect_hash[i],
hnode) {
m_help = nfct_help(exp->master);
- if (m_help->helper == h
- && del_timer(&exp->timeout)) {
+ if (!strcmp(m_help->helper->name, name) &&
+ del_timer(&exp->timeout)) {
nf_ct_unlink_expect(exp);
nf_ct_expect_put(exp);
}
@@ -1762,7 +1859,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
spin_lock_bh(&nf_conntrack_lock);
for (i = 0; i < nf_ct_expect_hsize; i++) {
hlist_for_each_entry_safe(exp, n, next,
- &init_net.ct.expect_hash[i],
+ &net->ct.expect_hash[i],
hnode) {
if (del_timer(&exp->timeout)) {
nf_ct_unlink_expect(exp);
@@ -1783,7 +1880,9 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
}
static int
-ctnetlink_create_expect(const struct nlattr * const cda[], u_int8_t u3,
+ctnetlink_create_expect(struct net *net, u16 zone,
+ const struct nlattr * const cda[],
+ u_int8_t u3,
u32 pid, int report)
{
struct nf_conntrack_tuple tuple, mask, master_tuple;
@@ -1805,7 +1904,7 @@ ctnetlink_create_expect(const struct nlattr * const cda[], u_int8_t u3,
return err;
/* Look for master conntrack of this expectation */
- h = nf_conntrack_find_get(&init_net, &master_tuple);
+ h = nf_conntrack_find_get(net, zone, &master_tuple);
if (!h)
return -ENOENT;
ct = nf_ct_tuplehash_to_ctrack(h);
@@ -1845,29 +1944,35 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const cda[])
{
+ struct net *net = sock_net(ctnl);
struct nf_conntrack_tuple tuple;
struct nf_conntrack_expect *exp;
struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int8_t u3 = nfmsg->nfgen_family;
- int err = 0;
+ u16 zone;
+ int err;
if (!cda[CTA_EXPECT_TUPLE]
|| !cda[CTA_EXPECT_MASK]
|| !cda[CTA_EXPECT_MASTER])
return -EINVAL;
+ err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
+ if (err < 0)
+ return err;
+
err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
if (err < 0)
return err;
spin_lock_bh(&nf_conntrack_lock);
- exp = __nf_ct_expect_find(&init_net, &tuple);
+ exp = __nf_ct_expect_find(net, zone, &tuple);
if (!exp) {
spin_unlock_bh(&nf_conntrack_lock);
err = -ENOENT;
if (nlh->nlmsg_flags & NLM_F_CREATE) {
- err = ctnetlink_create_expect(cda,
+ err = ctnetlink_create_expect(net, zone, cda,
u3,
NETLINK_CB(skb).pid,
nlmsg_report(nlh));
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 3807ac7faf4..088944824e1 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -28,6 +28,7 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
@@ -123,7 +124,7 @@ static void pptp_expectfn(struct nf_conn *ct,
pr_debug("trying to unexpect other dir: ");
nf_ct_dump_tuple(&inv_t);
- exp_other = nf_ct_expect_find_get(net, &inv_t);
+ exp_other = nf_ct_expect_find_get(net, nf_ct_zone(ct), &inv_t);
if (exp_other) {
/* delete other expectation. */
pr_debug("found\n");
@@ -136,17 +137,18 @@ static void pptp_expectfn(struct nf_conn *ct,
rcu_read_unlock();
}
-static int destroy_sibling_or_exp(struct net *net,
+static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct,
const struct nf_conntrack_tuple *t)
{
const struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_expect *exp;
struct nf_conn *sibling;
+ u16 zone = nf_ct_zone(ct);
pr_debug("trying to timeout ct or exp for tuple ");
nf_ct_dump_tuple(t);
- h = nf_conntrack_find_get(net, t);
+ h = nf_conntrack_find_get(net, zone, t);
if (h) {
sibling = nf_ct_tuplehash_to_ctrack(h);
pr_debug("setting timeout of conntrack %p to 0\n", sibling);
@@ -157,7 +159,7 @@ static int destroy_sibling_or_exp(struct net *net,
nf_ct_put(sibling);
return 1;
} else {
- exp = nf_ct_expect_find_get(net, t);
+ exp = nf_ct_expect_find_get(net, zone, t);
if (exp) {
pr_debug("unexpect_related of expect %p\n", exp);
nf_ct_unexpect_related(exp);
@@ -182,7 +184,7 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
t.dst.protonum = IPPROTO_GRE;
t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id;
t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id;
- if (!destroy_sibling_or_exp(net, &t))
+ if (!destroy_sibling_or_exp(net, ct, &t))
pr_debug("failed to timeout original pns->pac ct/exp\n");
/* try reply (pac->pns) tuple */
@@ -190,7 +192,7 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
t.dst.protonum = IPPROTO_GRE;
t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id;
t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id;
- if (!destroy_sibling_or_exp(net, &t))
+ if (!destroy_sibling_or_exp(net, ct, &t))
pr_debug("failed to timeout reply pac->pns ct/exp\n");
}
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index dd375500dcc..9a281554937 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -561,8 +561,9 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
return NF_ACCEPT;
}
-static int dccp_error(struct net *net, struct sk_buff *skb,
- unsigned int dataoff, enum ip_conntrack_info *ctinfo,
+static int dccp_error(struct net *net, struct nf_conn *tmpl,
+ struct sk_buff *skb, unsigned int dataoff,
+ enum ip_conntrack_info *ctinfo,
u_int8_t pf, unsigned int hooknum)
{
struct dccp_hdr _dh, *dh;
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index c99cfba64dd..d899b1a6994 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -241,7 +241,7 @@ static int gre_packet(struct nf_conn *ct,
ct->proto.gre.stream_timeout);
/* Also, more likely to be important, and not a probe. */
set_bit(IPS_ASSURED_BIT, &ct->status);
- nf_conntrack_event_cache(IPCT_STATUS, ct);
+ nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else
nf_ct_refresh_acct(ct, ctinfo, skb,
ct->proto.gre.timeout);
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index f9d930f8027..b68ff15ed97 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -377,7 +377,7 @@ static int sctp_packet(struct nf_conn *ct,
new_state == SCTP_CONNTRACK_ESTABLISHED) {
pr_debug("Setting assured bit\n");
set_bit(IPS_ASSURED_BIT, &ct->status);
- nf_conntrack_event_cache(IPCT_STATUS, ct);
+ nf_conntrack_event_cache(IPCT_ASSURED, ct);
}
return NF_ACCEPT;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 3c96437b45a..9dd8cd4fb6e 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -760,7 +760,7 @@ static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] =
};
/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
-static int tcp_error(struct net *net,
+static int tcp_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info *ctinfo,
@@ -1045,7 +1045,7 @@ static int tcp_packet(struct nf_conn *ct,
after SYN_RECV or a valid answer for a picked up
connection. */
set_bit(IPS_ASSURED_BIT, &ct->status);
- nf_conntrack_event_cache(IPCT_STATUS, ct);
+ nf_conntrack_event_cache(IPCT_ASSURED, ct);
}
nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 5c5518bedb4..8289088b821 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -77,7 +77,7 @@ static int udp_packet(struct nf_conn *ct,
nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream);
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
- nf_conntrack_event_cache(IPCT_STATUS, ct);
+ nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else
nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout);
@@ -91,8 +91,8 @@ static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
return true;
}
-static int udp_error(struct net *net, struct sk_buff *skb, unsigned int dataoff,
- enum ip_conntrack_info *ctinfo,
+static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
+ unsigned int dataoff, enum ip_conntrack_info *ctinfo,
u_int8_t pf,
unsigned int hooknum)
{
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 458655bb210..263b5a72588 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -75,7 +75,7 @@ static int udplite_packet(struct nf_conn *ct,
nf_ct_udplite_timeout_stream);
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
- nf_conntrack_event_cache(IPCT_STATUS, ct);
+ nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else
nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout);
@@ -89,7 +89,7 @@ static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
return true;
}
-static int udplite_error(struct net *net,
+static int udplite_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info *ctinfo,
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 4b572163784..8dd75d90efc 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -16,12 +16,14 @@
#include <linux/inet.h>
#include <linux/in.h>
#include <linux/udp.h>
+#include <linux/tcp.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_conntrack_sip.h>
MODULE_LICENSE("GPL");
@@ -50,12 +52,16 @@ module_param(sip_direct_media, int, 0600);
MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
"endpoints only (default 1)");
-unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
+unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int dataoff,
const char **dptr,
unsigned int *datalen) __read_mostly;
EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
+void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, s16 off) __read_mostly;
+EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook);
+
unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
+ unsigned int dataoff,
const char **dptr,
unsigned int *datalen,
struct nf_conntrack_expect *exp,
@@ -63,17 +69,17 @@ unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
unsigned int matchlen) __read_mostly;
EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook);
-unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
+unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int dataoff,
const char **dptr,
- unsigned int dataoff,
unsigned int *datalen,
+ unsigned int sdpoff,
enum sdp_header_types type,
enum sdp_header_types term,
const union nf_inet_addr *addr)
__read_mostly;
EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook);
-unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
+unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int dataoff,
const char **dptr,
unsigned int *datalen,
unsigned int matchoff,
@@ -82,14 +88,15 @@ unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook);
unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
- const char **dptr,
unsigned int dataoff,
+ const char **dptr,
unsigned int *datalen,
+ unsigned int sdpoff,
const union nf_inet_addr *addr)
__read_mostly;
EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook);
-unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
+unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int dataoff,
const char **dptr,
unsigned int *datalen,
struct nf_conntrack_expect *rtp_exp,
@@ -236,12 +243,13 @@ int ct_sip_parse_request(const struct nf_conn *ct,
return 0;
/* Find SIP URI */
- limit -= strlen("sip:");
- for (; dptr < limit; dptr++) {
+ for (; dptr < limit - strlen("sip:"); dptr++) {
if (*dptr == '\r' || *dptr == '\n')
return -1;
- if (strnicmp(dptr, "sip:", strlen("sip:")) == 0)
+ if (strnicmp(dptr, "sip:", strlen("sip:")) == 0) {
+ dptr += strlen("sip:");
break;
+ }
}
if (!skp_epaddr_len(ct, dptr, limit, &shift))
return 0;
@@ -284,7 +292,8 @@ static const struct sip_header ct_sip_hdrs[] = {
[SIP_HDR_FROM] = SIP_HDR("From", "f", "sip:", skp_epaddr_len),
[SIP_HDR_TO] = SIP_HDR("To", "t", "sip:", skp_epaddr_len),
[SIP_HDR_CONTACT] = SIP_HDR("Contact", "m", "sip:", skp_epaddr_len),
- [SIP_HDR_VIA] = SIP_HDR("Via", "v", "UDP ", epaddr_len),
+ [SIP_HDR_VIA_UDP] = SIP_HDR("Via", "v", "UDP ", epaddr_len),
+ [SIP_HDR_VIA_TCP] = SIP_HDR("Via", "v", "TCP ", epaddr_len),
[SIP_HDR_EXPIRES] = SIP_HDR("Expires", NULL, NULL, digits_len),
[SIP_HDR_CONTENT_LENGTH] = SIP_HDR("Content-Length", "l", NULL, digits_len),
};
@@ -376,7 +385,7 @@ int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
dptr += hdr->len;
else if (hdr->cname && limit - dptr >= hdr->clen + 1 &&
strnicmp(dptr, hdr->cname, hdr->clen) == 0 &&
- !isalpha(*(dptr + hdr->clen + 1)))
+ !isalpha(*(dptr + hdr->clen)))
dptr += hdr->clen;
else
continue;
@@ -516,6 +525,33 @@ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
}
EXPORT_SYMBOL_GPL(ct_sip_parse_header_uri);
+static int ct_sip_parse_param(const struct nf_conn *ct, const char *dptr,
+ unsigned int dataoff, unsigned int datalen,
+ const char *name,
+ unsigned int *matchoff, unsigned int *matchlen)
+{
+ const char *limit = dptr + datalen;
+ const char *start;
+ const char *end;
+
+ limit = ct_sip_header_search(dptr + dataoff, limit, ",", strlen(","));
+ if (!limit)
+ limit = dptr + datalen;
+
+ start = ct_sip_header_search(dptr + dataoff, limit, name, strlen(name));
+ if (!start)
+ return 0;
+ start += strlen(name);
+
+ end = ct_sip_header_search(start, limit, ";", strlen(";"));
+ if (!end)
+ end = limit;
+
+ *matchoff = start - dptr;
+ *matchlen = end - start;
+ return 1;
+}
+
/* Parse address from header parameter and return address, offset and length */
int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
unsigned int dataoff, unsigned int datalen,
@@ -574,6 +610,29 @@ int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
}
EXPORT_SYMBOL_GPL(ct_sip_parse_numerical_param);
+static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr,
+ unsigned int dataoff, unsigned int datalen,
+ u8 *proto)
+{
+ unsigned int matchoff, matchlen;
+
+ if (ct_sip_parse_param(ct, dptr, dataoff, datalen, "transport=",
+ &matchoff, &matchlen)) {
+ if (!strnicmp(dptr + matchoff, "TCP", strlen("TCP")))
+ *proto = IPPROTO_TCP;
+ else if (!strnicmp(dptr + matchoff, "UDP", strlen("UDP")))
+ *proto = IPPROTO_UDP;
+ else
+ return 0;
+
+ if (*proto != nf_ct_protonum(ct))
+ return 0;
+ } else
+ *proto = nf_ct_protonum(ct);
+
+ return 1;
+}
+
/* SDP header parsing: a SDP session description contains an ordered set of
* headers, starting with a section containing general session parameters,
* optionally followed by multiple media descriptions.
@@ -682,7 +741,7 @@ static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr,
static int refresh_signalling_expectation(struct nf_conn *ct,
union nf_inet_addr *addr,
- __be16 port,
+ u8 proto, __be16 port,
unsigned int expires)
{
struct nf_conn_help *help = nfct_help(ct);
@@ -694,6 +753,7 @@ static int refresh_signalling_expectation(struct nf_conn *ct,
hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
if (exp->class != SIP_EXPECT_SIGNALLING ||
!nf_inet_addr_cmp(&exp->tuple.dst.u3, addr) ||
+ exp->tuple.dst.protonum != proto ||
exp->tuple.dst.u.udp.port != port)
continue;
if (!del_timer(&exp->timeout))
@@ -728,7 +788,7 @@ static void flush_expectations(struct nf_conn *ct, bool media)
spin_unlock_bh(&nf_conntrack_lock);
}
-static int set_expected_rtp_rtcp(struct sk_buff *skb,
+static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen,
union nf_inet_addr *daddr, __be16 port,
enum sip_expectation_classes class,
@@ -777,7 +837,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
rcu_read_lock();
do {
- exp = __nf_ct_expect_find(net, &tuple);
+ exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple);
if (!exp || exp->master == ct ||
nfct_help(exp->master)->helper != nfct_help(ct)->helper ||
@@ -805,7 +865,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
if (direct_rtp) {
nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook);
if (nf_nat_sdp_port &&
- !nf_nat_sdp_port(skb, dptr, datalen,
+ !nf_nat_sdp_port(skb, dataoff, dptr, datalen,
mediaoff, medialen, ntohs(rtp_port)))
goto err1;
}
@@ -827,7 +887,8 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb,
nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp)
- ret = nf_nat_sdp_media(skb, dptr, datalen, rtp_exp, rtcp_exp,
+ ret = nf_nat_sdp_media(skb, dataoff, dptr, datalen,
+ rtp_exp, rtcp_exp,
mediaoff, medialen, daddr);
else {
if (nf_ct_expect_related(rtp_exp) == 0) {
@@ -847,6 +908,7 @@ err1:
static const struct sdp_media_type sdp_media_types[] = {
SDP_MEDIA_TYPE("audio ", SIP_EXPECT_AUDIO),
SDP_MEDIA_TYPE("video ", SIP_EXPECT_VIDEO),
+ SDP_MEDIA_TYPE("image ", SIP_EXPECT_IMAGE),
};
static const struct sdp_media_type *sdp_media_type(const char *dptr,
@@ -866,13 +928,12 @@ static const struct sdp_media_type *sdp_media_type(const char *dptr,
return NULL;
}
-static int process_sdp(struct sk_buff *skb,
+static int process_sdp(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_help *help = nfct_help(ct);
unsigned int matchoff, matchlen;
unsigned int mediaoff, medialen;
unsigned int sdpoff;
@@ -941,7 +1002,7 @@ static int process_sdp(struct sk_buff *skb,
else
return NF_DROP;
- ret = set_expected_rtp_rtcp(skb, dptr, datalen,
+ ret = set_expected_rtp_rtcp(skb, dataoff, dptr, datalen,
&rtp_addr, htons(port), t->class,
mediaoff, medialen);
if (ret != NF_ACCEPT)
@@ -949,8 +1010,9 @@ static int process_sdp(struct sk_buff *skb,
/* Update media connection address if present */
if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
- ret = nf_nat_sdp_addr(skb, dptr, mediaoff, datalen,
- c_hdr, SDP_HDR_MEDIA, &rtp_addr);
+ ret = nf_nat_sdp_addr(skb, dataoff, dptr, datalen,
+ mediaoff, c_hdr, SDP_HDR_MEDIA,
+ &rtp_addr);
if (ret != NF_ACCEPT)
return ret;
}
@@ -960,14 +1022,12 @@ static int process_sdp(struct sk_buff *skb,
/* Update session connection and owner addresses */
nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook);
if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK)
- ret = nf_nat_sdp_session(skb, dptr, sdpoff, datalen, &rtp_addr);
-
- if (ret == NF_ACCEPT && i > 0)
- help->help.ct_sip_info.invite_cseq = cseq;
+ ret = nf_nat_sdp_session(skb, dataoff, dptr, datalen, sdpoff,
+ &rtp_addr);
return ret;
}
-static int process_invite_response(struct sk_buff *skb,
+static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq, unsigned int code)
{
@@ -977,13 +1037,13 @@ static int process_invite_response(struct sk_buff *skb,
if ((code >= 100 && code <= 199) ||
(code >= 200 && code <= 299))
- return process_sdp(skb, dptr, datalen, cseq);
+ return process_sdp(skb, dataoff, dptr, datalen, cseq);
else if (help->help.ct_sip_info.invite_cseq == cseq)
flush_expectations(ct, true);
return NF_ACCEPT;
}
-static int process_update_response(struct sk_buff *skb,
+static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq, unsigned int code)
{
@@ -993,13 +1053,13 @@ static int process_update_response(struct sk_buff *skb,
if ((code >= 100 && code <= 199) ||
(code >= 200 && code <= 299))
- return process_sdp(skb, dptr, datalen, cseq);
+ return process_sdp(skb, dataoff, dptr, datalen, cseq);
else if (help->help.ct_sip_info.invite_cseq == cseq)
flush_expectations(ct, true);
return NF_ACCEPT;
}
-static int process_prack_response(struct sk_buff *skb,
+static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq, unsigned int code)
{
@@ -1009,13 +1069,29 @@ static int process_prack_response(struct sk_buff *skb,
if ((code >= 100 && code <= 199) ||
(code >= 200 && code <= 299))
- return process_sdp(skb, dptr, datalen, cseq);
+ return process_sdp(skb, dataoff, dptr, datalen, cseq);
else if (help->help.ct_sip_info.invite_cseq == cseq)
flush_expectations(ct, true);
return NF_ACCEPT;
}
-static int process_bye_request(struct sk_buff *skb,
+static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
+ const char **dptr, unsigned int *datalen,
+ unsigned int cseq)
+{
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+ struct nf_conn_help *help = nfct_help(ct);
+ unsigned int ret;
+
+ flush_expectations(ct, true);
+ ret = process_sdp(skb, dataoff, dptr, datalen, cseq);
+ if (ret == NF_ACCEPT)
+ help->help.ct_sip_info.invite_cseq = cseq;
+ return ret;
+}
+
+static int process_bye_request(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq)
{
@@ -1030,7 +1106,7 @@ static int process_bye_request(struct sk_buff *skb,
* signalling connections. The expectation is marked inactive and is activated
* when receiving a response indicating success from the registrar.
*/
-static int process_register_request(struct sk_buff *skb,
+static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq)
{
@@ -1042,6 +1118,7 @@ static int process_register_request(struct sk_buff *skb,
struct nf_conntrack_expect *exp;
union nf_inet_addr *saddr, daddr;
__be16 port;
+ u8 proto;
unsigned int expires = 0;
int ret;
typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect;
@@ -1074,6 +1151,10 @@ static int process_register_request(struct sk_buff *skb,
if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &daddr))
return NF_ACCEPT;
+ if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen, *datalen,
+ &proto) == 0)
+ return NF_ACCEPT;
+
if (ct_sip_parse_numerical_param(ct, *dptr,
matchoff + matchlen, *datalen,
"expires=", NULL, NULL, &expires) < 0)
@@ -1093,14 +1174,14 @@ static int process_register_request(struct sk_buff *skb,
saddr = &ct->tuplehash[!dir].tuple.src.u3;
nf_ct_expect_init(exp, SIP_EXPECT_SIGNALLING, nf_ct_l3num(ct),
- saddr, &daddr, IPPROTO_UDP, NULL, &port);
+ saddr, &daddr, proto, NULL, &port);
exp->timeout.expires = sip_timeout * HZ;
exp->helper = nfct_help(ct)->helper;
exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook);
if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK)
- ret = nf_nat_sip_expect(skb, dptr, datalen, exp,
+ ret = nf_nat_sip_expect(skb, dataoff, dptr, datalen, exp,
matchoff, matchlen);
else {
if (nf_ct_expect_related(exp) != 0)
@@ -1116,7 +1197,7 @@ store_cseq:
return ret;
}
-static int process_register_response(struct sk_buff *skb,
+static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen,
unsigned int cseq, unsigned int code)
{
@@ -1126,7 +1207,8 @@ static int process_register_response(struct sk_buff *skb,
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
union nf_inet_addr addr;
__be16 port;
- unsigned int matchoff, matchlen, dataoff = 0;
+ u8 proto;
+ unsigned int matchoff, matchlen, coff = 0;
unsigned int expires = 0;
int in_contact = 0, ret;
@@ -1153,7 +1235,7 @@ static int process_register_response(struct sk_buff *skb,
while (1) {
unsigned int c_expires = expires;
- ret = ct_sip_parse_header_uri(ct, *dptr, &dataoff, *datalen,
+ ret = ct_sip_parse_header_uri(ct, *dptr, &coff, *datalen,
SIP_HDR_CONTACT, &in_contact,
&matchoff, &matchlen,
&addr, &port);
@@ -1166,6 +1248,10 @@ static int process_register_response(struct sk_buff *skb,
if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &addr))
continue;
+ if (ct_sip_parse_transport(ct, *dptr, matchoff + matchlen,
+ *datalen, &proto) == 0)
+ continue;
+
ret = ct_sip_parse_numerical_param(ct, *dptr,
matchoff + matchlen,
*datalen, "expires=",
@@ -1174,7 +1260,8 @@ static int process_register_response(struct sk_buff *skb,
return NF_DROP;
if (c_expires == 0)
break;
- if (refresh_signalling_expectation(ct, &addr, port, c_expires))
+ if (refresh_signalling_expectation(ct, &addr, proto, port,
+ c_expires))
return NF_ACCEPT;
}
@@ -1184,7 +1271,7 @@ flush:
}
static const struct sip_handler sip_handlers[] = {
- SIP_HANDLER("INVITE", process_sdp, process_invite_response),
+ SIP_HANDLER("INVITE", process_invite_request, process_invite_response),
SIP_HANDLER("UPDATE", process_sdp, process_update_response),
SIP_HANDLER("ACK", process_sdp, NULL),
SIP_HANDLER("PRACK", process_sdp, process_prack_response),
@@ -1192,13 +1279,13 @@ static const struct sip_handler sip_handlers[] = {
SIP_HANDLER("REGISTER", process_register_request, process_register_response),
};
-static int process_sip_response(struct sk_buff *skb,
+static int process_sip_response(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen)
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- unsigned int matchoff, matchlen;
- unsigned int code, cseq, dataoff, i;
+ unsigned int matchoff, matchlen, matchend;
+ unsigned int code, cseq, i;
if (*datalen < strlen("SIP/2.0 200"))
return NF_ACCEPT;
@@ -1212,7 +1299,7 @@ static int process_sip_response(struct sk_buff *skb,
cseq = simple_strtoul(*dptr + matchoff, NULL, 10);
if (!cseq)
return NF_DROP;
- dataoff = matchoff + matchlen + 1;
+ matchend = matchoff + matchlen + 1;
for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
const struct sip_handler *handler;
@@ -1220,15 +1307,16 @@ static int process_sip_response(struct sk_buff *skb,
handler = &sip_handlers[i];
if (handler->response == NULL)
continue;
- if (*datalen < dataoff + handler->len ||
- strnicmp(*dptr + dataoff, handler->method, handler->len))
+ if (*datalen < matchend + handler->len ||
+ strnicmp(*dptr + matchend, handler->method, handler->len))
continue;
- return handler->response(skb, dptr, datalen, cseq, code);
+ return handler->response(skb, dataoff, dptr, datalen,
+ cseq, code);
}
return NF_ACCEPT;
}
-static int process_sip_request(struct sk_buff *skb,
+static int process_sip_request(struct sk_buff *skb, unsigned int dataoff,
const char **dptr, unsigned int *datalen)
{
enum ip_conntrack_info ctinfo;
@@ -1253,69 +1341,157 @@ static int process_sip_request(struct sk_buff *skb,
if (!cseq)
return NF_DROP;
- return handler->request(skb, dptr, datalen, cseq);
+ return handler->request(skb, dataoff, dptr, datalen, cseq);
}
return NF_ACCEPT;
}
-static int sip_help(struct sk_buff *skb,
- unsigned int protoff,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo)
+static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
+ unsigned int dataoff, const char **dptr,
+ unsigned int *datalen)
+{
+ typeof(nf_nat_sip_hook) nf_nat_sip;
+ int ret;
+
+ if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
+ ret = process_sip_request(skb, dataoff, dptr, datalen);
+ else
+ ret = process_sip_response(skb, dataoff, dptr, datalen);
+
+ if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
+ nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
+ if (nf_nat_sip && !nf_nat_sip(skb, dataoff, dptr, datalen))
+ ret = NF_DROP;
+ }
+
+ return ret;
+}
+
+static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
+ struct tcphdr *th, _tcph;
unsigned int dataoff, datalen;
- const char *dptr;
+ unsigned int matchoff, matchlen, clen;
+ unsigned int msglen, origlen;
+ const char *dptr, *end;
+ s16 diff, tdiff = 0;
int ret;
- typeof(nf_nat_sip_hook) nf_nat_sip;
+ typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
+
+ if (ctinfo != IP_CT_ESTABLISHED &&
+ ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
+ return NF_ACCEPT;
/* No Data ? */
- dataoff = protoff + sizeof(struct udphdr);
+ th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
+ if (th == NULL)
+ return NF_ACCEPT;
+ dataoff = protoff + th->doff * 4;
if (dataoff >= skb->len)
return NF_ACCEPT;
nf_ct_refresh(ct, skb, sip_timeout * HZ);
- if (!skb_is_nonlinear(skb))
- dptr = skb->data + dataoff;
- else {
+ if (skb_is_nonlinear(skb)) {
pr_debug("Copy of skbuff not supported yet.\n");
return NF_ACCEPT;
}
+ dptr = skb->data + dataoff;
datalen = skb->len - dataoff;
if (datalen < strlen("SIP/2.0 200"))
return NF_ACCEPT;
- if (strnicmp(dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
- ret = process_sip_request(skb, &dptr, &datalen);
- else
- ret = process_sip_response(skb, &dptr, &datalen);
+ while (1) {
+ if (ct_sip_get_header(ct, dptr, 0, datalen,
+ SIP_HDR_CONTENT_LENGTH,
+ &matchoff, &matchlen) <= 0)
+ break;
+
+ clen = simple_strtoul(dptr + matchoff, (char **)&end, 10);
+ if (dptr + matchoff == end)
+ break;
+
+ if (end + strlen("\r\n\r\n") > dptr + datalen)
+ break;
+ if (end[0] != '\r' || end[1] != '\n' ||
+ end[2] != '\r' || end[3] != '\n')
+ break;
+ end += strlen("\r\n\r\n") + clen;
+
+ msglen = origlen = end - dptr;
+
+ ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
+ if (ret != NF_ACCEPT)
+ break;
+ diff = msglen - origlen;
+ tdiff += diff;
+
+ dataoff += msglen;
+ dptr += msglen;
+ datalen = datalen + diff - msglen;
+ }
if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
- nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
- if (nf_nat_sip && !nf_nat_sip(skb, &dptr, &datalen))
- ret = NF_DROP;
+ nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook);
+ if (nf_nat_sip_seq_adjust)
+ nf_nat_sip_seq_adjust(skb, tdiff);
}
return ret;
}
-static struct nf_conntrack_helper sip[MAX_PORTS][2] __read_mostly;
-static char sip_names[MAX_PORTS][2][sizeof("sip-65535")] __read_mostly;
+static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ unsigned int dataoff, datalen;
+ const char *dptr;
+
+ /* No Data ? */
+ dataoff = protoff + sizeof(struct udphdr);
+ if (dataoff >= skb->len)
+ return NF_ACCEPT;
+
+ nf_ct_refresh(ct, skb, sip_timeout * HZ);
+
+ if (skb_is_nonlinear(skb)) {
+ pr_debug("Copy of skbuff not supported yet.\n");
+ return NF_ACCEPT;
+ }
+
+ dptr = skb->data + dataoff;
+ datalen = skb->len - dataoff;
+ if (datalen < strlen("SIP/2.0 200"))
+ return NF_ACCEPT;
+
+ return process_sip_msg(skb, ct, dataoff, &dptr, &datalen);
+}
+
+static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly;
+static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly;
static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = {
[SIP_EXPECT_SIGNALLING] = {
+ .name = "signalling",
.max_expected = 1,
.timeout = 3 * 60,
},
[SIP_EXPECT_AUDIO] = {
+ .name = "audio",
.max_expected = 2 * IP_CT_DIR_MAX,
.timeout = 3 * 60,
},
[SIP_EXPECT_VIDEO] = {
+ .name = "video",
.max_expected = 2 * IP_CT_DIR_MAX,
.timeout = 3 * 60,
},
+ [SIP_EXPECT_IMAGE] = {
+ .name = "image",
+ .max_expected = IP_CT_DIR_MAX,
+ .timeout = 3 * 60,
+ },
};
static void nf_conntrack_sip_fini(void)
@@ -1323,7 +1499,7 @@ static void nf_conntrack_sip_fini(void)
int i, j;
for (i = 0; i < ports_c; i++) {
- for (j = 0; j < 2; j++) {
+ for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
if (sip[i][j].me == NULL)
continue;
nf_conntrack_helper_unregister(&sip[i][j]);
@@ -1343,14 +1519,24 @@ static int __init nf_conntrack_sip_init(void)
memset(&sip[i], 0, sizeof(sip[i]));
sip[i][0].tuple.src.l3num = AF_INET;
- sip[i][1].tuple.src.l3num = AF_INET6;
- for (j = 0; j < 2; j++) {
- sip[i][j].tuple.dst.protonum = IPPROTO_UDP;
+ sip[i][0].tuple.dst.protonum = IPPROTO_UDP;
+ sip[i][0].help = sip_help_udp;
+ sip[i][1].tuple.src.l3num = AF_INET;
+ sip[i][1].tuple.dst.protonum = IPPROTO_TCP;
+ sip[i][1].help = sip_help_tcp;
+
+ sip[i][2].tuple.src.l3num = AF_INET6;
+ sip[i][2].tuple.dst.protonum = IPPROTO_UDP;
+ sip[i][2].help = sip_help_udp;
+ sip[i][3].tuple.src.l3num = AF_INET6;
+ sip[i][3].tuple.dst.protonum = IPPROTO_TCP;
+ sip[i][3].help = sip_help_tcp;
+
+ for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
sip[i][j].tuple.src.u.udp.port = htons(ports[i]);
sip[i][j].expect_policy = sip_exp_policy;
sip[i][j].expect_class_max = SIP_EXPECT_MAX;
sip[i][j].me = THIS_MODULE;
- sip[i][j].help = sip_help;
tmpname = &sip_names[i][j][0];
if (ports[i] == SIP_PORT)
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 028aba667ef..24a42efe62e 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -26,6 +26,7 @@
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_acct.h>
+#include <net/netfilter/nf_conntrack_zones.h>
MODULE_LICENSE("GPL");
@@ -51,7 +52,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
struct hlist_nulls_node *n;
for (st->bucket = 0;
- st->bucket < nf_conntrack_htable_size;
+ st->bucket < net->ct.htable_size;
st->bucket++) {
n = rcu_dereference(net->ct.hash[st->bucket].first);
if (!is_a_nulls(n))
@@ -69,7 +70,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
head = rcu_dereference(head->next);
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
- if (++st->bucket >= nf_conntrack_htable_size)
+ if (++st->bucket >= net->ct.htable_size)
return NULL;
}
head = rcu_dereference(net->ct.hash[st->bucket].first);
@@ -171,6 +172,11 @@ static int ct_seq_show(struct seq_file *s, void *v)
goto release;
#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ if (seq_printf(s, "zone=%u ", nf_ct_zone(ct)))
+ goto release;
+#endif
+
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
goto release;
@@ -355,7 +361,7 @@ static ctl_table nf_ct_sysctl_table[] = {
},
{
.procname = "nf_conntrack_buckets",
- .data = &nf_conntrack_htable_size,
+ .data = &init_net.ct.htable_size,
.maxlen = sizeof(unsigned int),
.mode = 0444,
.proc_handler = proc_dointvec,
@@ -421,6 +427,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
goto out_kmemdup;
table[1].data = &net->ct.count;
+ table[2].data = &net->ct.htable_size;
table[3].data = &net->ct.sysctl_checksum;
table[4].data = &net->ct.sysctl_log_invalid;
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 3a6fd77f776..ba095fd014e 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -265,7 +265,6 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
local_bh_disable();
entry->okfn(skb);
local_bh_enable();
- case NF_STOLEN:
break;
case NF_QUEUE:
if (!__nf_queue(skb, elem, entry->pf, entry->hook,
@@ -273,6 +272,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
verdict >> NF_VERDICT_BITS))
goto next_hook;
break;
+ case NF_STOLEN:
default:
kfree_skb(skb);
}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index eedc0c1ac7a..8eb0cc23ada 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -40,7 +40,6 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
static char __initdata nfversion[] = "0.30";
-static struct sock *nfnl = NULL;
static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT];
static DEFINE_MUTEX(nfnl_mutex);
@@ -101,34 +100,35 @@ nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss)
return &ss->cb[cb_id];
}
-int nfnetlink_has_listeners(unsigned int group)
+int nfnetlink_has_listeners(struct net *net, unsigned int group)
{
- return netlink_has_listeners(nfnl, group);
+ return netlink_has_listeners(net->nfnl, group);
}
EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
-int nfnetlink_send(struct sk_buff *skb, u32 pid,
+int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid,
unsigned group, int echo, gfp_t flags)
{
- return nlmsg_notify(nfnl, skb, pid, group, echo, flags);
+ return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags);
}
EXPORT_SYMBOL_GPL(nfnetlink_send);
-void nfnetlink_set_err(u32 pid, u32 group, int error)
+void nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error)
{
- netlink_set_err(nfnl, pid, group, error);
+ netlink_set_err(net->nfnl, pid, group, error);
}
EXPORT_SYMBOL_GPL(nfnetlink_set_err);
-int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags)
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags)
{
- return netlink_unicast(nfnl, skb, pid, flags);
+ return netlink_unicast(net->nfnl, skb, pid, flags);
}
EXPORT_SYMBOL_GPL(nfnetlink_unicast);
/* Process one complete nfnetlink message. */
static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
+ struct net *net = sock_net(skb->sk);
const struct nfnl_callback *nc;
const struct nfnetlink_subsystem *ss;
int type, err;
@@ -170,7 +170,7 @@ replay:
if (err < 0)
return err;
- err = nc->call(nfnl, skb, nlh, (const struct nlattr **)cda);
+ err = nc->call(net->nfnl, skb, nlh, (const struct nlattr **)cda);
if (err == -EAGAIN)
goto replay;
return err;
@@ -184,26 +184,45 @@ static void nfnetlink_rcv(struct sk_buff *skb)
nfnl_unlock();
}
-static void __exit nfnetlink_exit(void)
+static int __net_init nfnetlink_net_init(struct net *net)
{
- printk("Removing netfilter NETLINK layer.\n");
- netlink_kernel_release(nfnl);
- return;
+ struct sock *nfnl;
+
+ nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, NFNLGRP_MAX,
+ nfnetlink_rcv, NULL, THIS_MODULE);
+ if (!nfnl)
+ return -ENOMEM;
+ net->nfnl_stash = nfnl;
+ rcu_assign_pointer(net->nfnl, nfnl);
+ return 0;
}
-static int __init nfnetlink_init(void)
+static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
{
- printk("Netfilter messages via NETLINK v%s.\n", nfversion);
+ struct net *net;
- nfnl = netlink_kernel_create(&init_net, NETLINK_NETFILTER, NFNLGRP_MAX,
- nfnetlink_rcv, NULL, THIS_MODULE);
- if (!nfnl) {
- printk(KERN_ERR "cannot initialize nfnetlink!\n");
- return -ENOMEM;
- }
+ list_for_each_entry(net, net_exit_list, exit_list)
+ rcu_assign_pointer(net->nfnl, NULL);
+ synchronize_net();
+ list_for_each_entry(net, net_exit_list, exit_list)
+ netlink_kernel_release(net->nfnl_stash);
+}
- return 0;
+static struct pernet_operations nfnetlink_net_ops = {
+ .init = nfnetlink_net_init,
+ .exit_batch = nfnetlink_net_exit_batch,
+};
+
+static int __init nfnetlink_init(void)
+{
+ printk("Netfilter messages via NETLINK v%s.\n", nfversion);
+ return register_pernet_subsys(&nfnetlink_net_ops);
}
+static void __exit nfnetlink_exit(void)
+{
+ printk("Removing netfilter NETLINK layer.\n");
+ unregister_pernet_subsys(&nfnetlink_net_ops);
+}
module_init(nfnetlink_init);
module_exit(nfnetlink_exit);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 9de0470d557..d9b8fb8ab34 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -323,7 +323,8 @@ __nfulnl_send(struct nfulnl_instance *inst)
NLMSG_DONE,
sizeof(struct nfgenmsg));
- status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT);
+ status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid,
+ MSG_DONTWAIT);
inst->qlen = 0;
inst->skb = NULL;
@@ -767,7 +768,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
}
instance_destroy(inst);
- goto out;
+ goto out_put;
default:
ret = -ENOTSUPP;
break;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 7e3fa410641..7ba4abc405c 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -112,7 +112,6 @@ instance_create(u_int16_t queue_num, int pid)
inst->copy_mode = NFQNL_COPY_NONE;
spin_lock_init(&inst->lock);
INIT_LIST_HEAD(&inst->queue_list);
- INIT_RCU_HEAD(&inst->rcu);
if (!try_module_get(THIS_MODULE)) {
err = -EAGAIN;
@@ -414,13 +413,13 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
queue->queue_dropped++;
if (net_ratelimit())
printk(KERN_WARNING "nf_queue: full at %d entries, "
- "dropping packets(s). Dropped: %d\n",
- queue->queue_total, queue->queue_dropped);
+ "dropping packets(s).\n",
+ queue->queue_total);
goto err_out_free_nskb;
}
/* nfnetlink_unicast will either free the nskb or add it to a socket */
- err = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
+ err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT);
if (err < 0) {
queue->queue_user_dropped++;
goto err_out_unlock;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index f01955cce31..0a12cedfe9e 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -26,7 +26,9 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp.h>
-
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter_arp/arp_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
@@ -37,7 +39,7 @@ MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
struct compat_delta {
struct compat_delta *next;
unsigned int offset;
- short delta;
+ int delta;
};
struct xt_af {
@@ -364,8 +366,10 @@ int xt_check_match(struct xt_mtchk_param *par,
* ebt_among is exempt from centralized matchsize checking
* because it uses a dynamic-size data set.
*/
- pr_err("%s_tables: %s match: invalid size %Zu != %u\n",
+ pr_err("%s_tables: %s.%u match: invalid size "
+ "%u (kernel) != (user) %u\n",
xt_prefix[par->family], par->match->name,
+ par->match->revision,
XT_ALIGN(par->match->matchsize), size);
return -EINVAL;
}
@@ -435,10 +439,10 @@ void xt_compat_flush_offsets(u_int8_t af)
}
EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
-short xt_compat_calc_jump(u_int8_t af, unsigned int offset)
+int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
{
struct compat_delta *tmp;
- short delta;
+ int delta;
for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
if (tmp->offset < offset)
@@ -481,8 +485,8 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
}
EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
-int xt_compat_match_to_user(struct xt_entry_match *m, void __user **dstptr,
- unsigned int *size)
+int xt_compat_match_to_user(const struct xt_entry_match *m,
+ void __user **dstptr, unsigned int *size)
{
const struct xt_match *match = m->u.kernel.match;
struct compat_xt_entry_match __user *cm = *dstptr;
@@ -514,8 +518,10 @@ int xt_check_target(struct xt_tgchk_param *par,
unsigned int size, u_int8_t proto, bool inv_proto)
{
if (XT_ALIGN(par->target->targetsize) != size) {
- pr_err("%s_tables: %s target: invalid size %Zu != %u\n",
+ pr_err("%s_tables: %s.%u target: invalid size "
+ "%u (kernel) != (user) %u\n",
xt_prefix[par->family], par->target->name,
+ par->target->revision,
XT_ALIGN(par->target->targetsize), size);
return -EINVAL;
}
@@ -582,8 +588,8 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
}
EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
-int xt_compat_target_to_user(struct xt_entry_target *t, void __user **dstptr,
- unsigned int *size)
+int xt_compat_target_to_user(const struct xt_entry_target *t,
+ void __user **dstptr, unsigned int *size)
{
const struct xt_target *target = t->u.kernel.target;
struct compat_xt_entry_target __user *ct = *dstptr;
@@ -1091,6 +1097,60 @@ static const struct file_operations xt_target_ops = {
#endif /* CONFIG_PROC_FS */
+/**
+ * xt_hook_link - set up hooks for a new table
+ * @table: table with metadata needed to set up hooks
+ * @fn: Hook function
+ *
+ * This function will take care of creating and registering the necessary
+ * Netfilter hooks for XT tables.
+ */
+struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
+{
+ unsigned int hook_mask = table->valid_hooks;
+ uint8_t i, num_hooks = hweight32(hook_mask);
+ uint8_t hooknum;
+ struct nf_hook_ops *ops;
+ int ret;
+
+ ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
+ if (ops == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
+ hook_mask >>= 1, ++hooknum) {
+ if (!(hook_mask & 1))
+ continue;
+ ops[i].hook = fn;
+ ops[i].owner = table->me;
+ ops[i].pf = table->af;
+ ops[i].hooknum = hooknum;
+ ops[i].priority = table->priority;
+ ++i;
+ }
+
+ ret = nf_register_hooks(ops, num_hooks);
+ if (ret < 0) {
+ kfree(ops);
+ return ERR_PTR(ret);
+ }
+
+ return ops;
+}
+EXPORT_SYMBOL_GPL(xt_hook_link);
+
+/**
+ * xt_hook_unlink - remove hooks for a table
+ * @ops: nf_hook_ops array as returned by nf_hook_link
+ * @hook_mask: the very same mask that was passed to nf_hook_link
+ */
+void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
+{
+ nf_unregister_hooks(ops, hweight32(table->valid_hooks));
+ kfree(ops);
+}
+EXPORT_SYMBOL_GPL(xt_hook_unlink);
+
int xt_proto_init(struct net *net, u_int8_t af)
{
#ifdef CONFIG_PROC_FS
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
new file mode 100644
index 00000000000..61c50fa8470
--- /dev/null
+++ b/net/netfilter/xt_CT.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2010 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/selinux.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_CT.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+
+static unsigned int xt_ct_target(struct sk_buff *skb,
+ const struct xt_target_param *par)
+{
+ const struct xt_ct_target_info *info = par->targinfo;
+ struct nf_conn *ct = info->ct;
+
+ /* Previously seen (loopback)? Ignore. */
+ if (skb->nfct != NULL)
+ return XT_CONTINUE;
+
+ atomic_inc(&ct->ct_general.use);
+ skb->nfct = &ct->ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+
+ return XT_CONTINUE;
+}
+
+static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
+{
+ if (par->family == AF_INET) {
+ const struct ipt_entry *e = par->entryinfo;
+
+ if (e->ip.invflags & IPT_INV_PROTO)
+ return 0;
+ return e->ip.proto;
+ } else if (par->family == AF_INET6) {
+ const struct ip6t_entry *e = par->entryinfo;
+
+ if (e->ipv6.invflags & IP6T_INV_PROTO)
+ return 0;
+ return e->ipv6.proto;
+ } else
+ return 0;
+}
+
+static bool xt_ct_tg_check(const struct xt_tgchk_param *par)
+{
+ struct xt_ct_target_info *info = par->targinfo;
+ struct nf_conntrack_tuple t;
+ struct nf_conn_help *help;
+ struct nf_conn *ct;
+ u8 proto;
+
+ if (info->flags & ~XT_CT_NOTRACK)
+ return false;
+
+ if (info->flags & XT_CT_NOTRACK) {
+ ct = &nf_conntrack_untracked;
+ atomic_inc(&ct->ct_general.use);
+ goto out;
+ }
+
+#ifndef CONFIG_NF_CONNTRACK_ZONES
+ if (info->zone)
+ goto err1;
+#endif
+
+ if (nf_ct_l3proto_try_module_get(par->family) < 0)
+ goto err1;
+
+ memset(&t, 0, sizeof(t));
+ ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
+ if (IS_ERR(ct))
+ goto err2;
+
+ if ((info->ct_events || info->exp_events) &&
+ !nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
+ GFP_KERNEL))
+ goto err3;
+
+ if (info->helper[0]) {
+ proto = xt_ct_find_proto(par);
+ if (!proto)
+ goto err3;
+
+ help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
+ if (help == NULL)
+ goto err3;
+
+ help->helper = nf_conntrack_helper_try_module_get(info->helper,
+ par->family,
+ proto);
+ if (help->helper == NULL)
+ goto err3;
+ }
+
+ __set_bit(IPS_TEMPLATE_BIT, &ct->status);
+ __set_bit(IPS_CONFIRMED_BIT, &ct->status);
+out:
+ info->ct = ct;
+ return true;
+
+err3:
+ nf_conntrack_free(ct);
+err2:
+ nf_ct_l3proto_module_put(par->family);
+err1:
+ return false;
+}
+
+static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
+{
+ struct xt_ct_target_info *info = par->targinfo;
+ struct nf_conn *ct = info->ct;
+ struct nf_conn_help *help;
+
+ if (ct != &nf_conntrack_untracked) {
+ help = nfct_help(ct);
+ if (help)
+ module_put(help->helper->me);
+
+ nf_ct_l3proto_module_put(par->family);
+ }
+ nf_ct_put(info->ct);
+}
+
+static struct xt_target xt_ct_tg __read_mostly = {
+ .name = "CT",
+ .family = NFPROTO_UNSPEC,
+ .targetsize = XT_ALIGN(sizeof(struct xt_ct_target_info)),
+ .checkentry = xt_ct_tg_check,
+ .destroy = xt_ct_tg_destroy,
+ .target = xt_ct_target,
+ .table = "raw",
+ .me = THIS_MODULE,
+};
+
+static int __init xt_ct_tg_init(void)
+{
+ return xt_register_target(&xt_ct_tg);
+}
+
+static void __exit xt_ct_tg_exit(void)
+{
+ xt_unregister_target(&xt_ct_tg);
+}
+
+module_init(xt_ct_tg_init);
+module_exit(xt_ct_tg_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xtables: connection tracking target");
+MODULE_ALIAS("ipt_CT");
+MODULE_ALIAS("ip6t_CT");
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index f28f6a5fc02..12dcd7007c3 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -28,6 +28,7 @@ MODULE_ALIAS("ip6t_NFQUEUE");
MODULE_ALIAS("arpt_NFQUEUE");
static u32 jhash_initval __read_mostly;
+static bool rnd_inited __read_mostly;
static unsigned int
nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par)
@@ -90,6 +91,10 @@ static bool nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
const struct xt_NFQ_info_v1 *info = par->targinfo;
u32 maxid;
+ if (unlikely(!rnd_inited)) {
+ get_random_bytes(&jhash_initval, sizeof(jhash_initval));
+ rnd_inited = true;
+ }
if (info->queues_total == 0) {
pr_err("NFQUEUE: number of total queues is 0\n");
return false;
@@ -135,7 +140,6 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = {
static int __init nfqueue_tg_init(void)
{
- get_random_bytes(&jhash_initval, sizeof(jhash_initval));
return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg));
}
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index d80b8192e0d..87ae97e5516 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -23,6 +23,7 @@ static DEFINE_MUTEX(xt_rateest_mutex);
#define RATEEST_HSIZE 16
static struct hlist_head rateest_hash[RATEEST_HSIZE] __read_mostly;
static unsigned int jhash_rnd __read_mostly;
+static bool rnd_inited __read_mostly;
static unsigned int xt_rateest_hash(const char *name)
{
@@ -93,6 +94,11 @@ static bool xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
struct gnet_estimator est;
} cfg;
+ if (unlikely(!rnd_inited)) {
+ get_random_bytes(&jhash_rnd, sizeof(jhash_rnd));
+ rnd_inited = true;
+ }
+
est = xt_rateest_lookup(info->name);
if (est) {
/*
@@ -164,7 +170,6 @@ static int __init xt_rateest_tg_init(void)
for (i = 0; i < ARRAY_SIZE(rateest_hash); i++)
INIT_HLIST_HEAD(&rateest_hash[i]);
- get_random_bytes(&jhash_rnd, sizeof(jhash_rnd));
return xt_register_target(&xt_rateest_tg_reg);
}
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index eda64c1cb1e..0e357ac9a2a 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -60,17 +60,9 @@ tcpmss_mangle_packet(struct sk_buff *skb,
tcplen = skb->len - tcphoff;
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
- /* Since it passed flags test in tcp match, we know it is is
- not a fragment, and has data >= tcp header length. SYN
- packets should not contain data: if they did, then we risk
- running over MTU, sending Frag Needed and breaking things
- badly. --RR */
- if (tcplen != tcph->doff*4) {
- if (net_ratelimit())
- printk(KERN_ERR "xt_TCPMSS: bad length (%u bytes)\n",
- skb->len);
+ /* Header cannot be larger than the packet */
+ if (tcplen < tcph->doff*4)
return -1;
- }
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
if (dst_mtu(skb_dst(skb)) <= minlen) {
@@ -115,6 +107,12 @@ tcpmss_mangle_packet(struct sk_buff *skb,
}
}
+ /* There is data after the header so the option can't be added
+ without moving it, and doing so may make the SYN packet
+ itself too large. Accept the packet unmodified instead. */
+ if (tcplen > tcph->doff*4)
+ return 0;
+
/*
* MSS Option not found ?! add it..
*/
@@ -241,6 +239,7 @@ static bool tcpmss_tg4_check(const struct xt_tgchk_param *par)
{
const struct xt_tcpmss_info *info = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
+ const struct xt_entry_match *ematch;
if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
@@ -250,8 +249,9 @@ static bool tcpmss_tg4_check(const struct xt_tgchk_param *par)
"FORWARD, OUTPUT and POSTROUTING hooks\n");
return false;
}
- if (IPT_MATCH_ITERATE(e, find_syn_match))
- return true;
+ xt_ematch_foreach(ematch, e)
+ if (find_syn_match(ematch))
+ return true;
printk("xt_TCPMSS: Only works on TCP SYN packets\n");
return false;
}
@@ -261,6 +261,7 @@ static bool tcpmss_tg6_check(const struct xt_tgchk_param *par)
{
const struct xt_tcpmss_info *info = par->targinfo;
const struct ip6t_entry *e = par->entryinfo;
+ const struct xt_entry_match *ematch;
if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
@@ -270,8 +271,9 @@ static bool tcpmss_tg6_check(const struct xt_tgchk_param *par)
"FORWARD, OUTPUT and POSTROUTING hooks\n");
return false;
}
- if (IP6T_MATCH_ITERATE(e, find_syn_match))
- return true;
+ xt_ematch_foreach(ematch, e)
+ if (find_syn_match(ematch))
+ return true;
printk("xt_TCPMSS: Only works on TCP SYN packets\n");
return false;
}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 38f03f75a63..26997ce90e4 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -28,6 +28,7 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_zones.h>
/* we will save the tuples of all connections we care about */
struct xt_connlimit_conn {
@@ -40,15 +41,11 @@ struct xt_connlimit_data {
spinlock_t lock;
};
-static u_int32_t connlimit_rnd;
-static bool connlimit_rnd_inited;
+static u_int32_t connlimit_rnd __read_mostly;
+static bool connlimit_rnd_inited __read_mostly;
static inline unsigned int connlimit_iphash(__be32 addr)
{
- if (unlikely(!connlimit_rnd_inited)) {
- get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
- connlimit_rnd_inited = true;
- }
return jhash_1word((__force __u32)addr, connlimit_rnd) & 0xFF;
}
@@ -59,11 +56,6 @@ connlimit_iphash6(const union nf_inet_addr *addr,
union nf_inet_addr res;
unsigned int i;
- if (unlikely(!connlimit_rnd_inited)) {
- get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
- connlimit_rnd_inited = true;
- }
-
for (i = 0; i < ARRAY_SIZE(addr->ip6); ++i)
res.ip6[i] = addr->ip6[i] & mask->ip6[i];
@@ -99,7 +91,8 @@ same_source_net(const union nf_inet_addr *addr,
}
}
-static int count_them(struct xt_connlimit_data *data,
+static int count_them(struct net *net,
+ struct xt_connlimit_data *data,
const struct nf_conntrack_tuple *tuple,
const union nf_inet_addr *addr,
const union nf_inet_addr *mask,
@@ -122,7 +115,8 @@ static int count_them(struct xt_connlimit_data *data,
/* check the saved connections */
list_for_each_entry_safe(conn, tmp, hash, list) {
- found = nf_conntrack_find_get(&init_net, &conn->tuple);
+ found = nf_conntrack_find_get(net, NF_CT_DEFAULT_ZONE,
+ &conn->tuple);
found_ct = NULL;
if (found != NULL)
@@ -180,6 +174,7 @@ static int count_them(struct xt_connlimit_data *data,
static bool
connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
+ struct net *net = dev_net(par->in ? par->in : par->out);
const struct xt_connlimit_info *info = par->matchinfo;
union nf_inet_addr addr;
struct nf_conntrack_tuple tuple;
@@ -204,7 +199,7 @@ connlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
}
spin_lock_bh(&info->data->lock);
- connections = count_them(info->data, tuple_ptr, &addr,
+ connections = count_them(net, info->data, tuple_ptr, &addr,
&info->mask, par->family);
spin_unlock_bh(&info->data->lock);
@@ -226,6 +221,10 @@ static bool connlimit_mt_check(const struct xt_mtchk_param *par)
struct xt_connlimit_info *info = par->matchinfo;
unsigned int i;
+ if (unlikely(!connlimit_rnd_inited)) {
+ get_random_bytes(&connlimit_rnd, sizeof(connlimit_rnd));
+ connlimit_rnd_inited = true;
+ }
if (nf_ct_l3proto_try_module_get(par->family) < 0) {
printk(KERN_WARNING "cannot load conntrack support for "
"address family %u\n", par->family);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index dd16e404424..d952806b646 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -26,6 +26,7 @@
#endif
#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
@@ -40,9 +41,19 @@ MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
MODULE_ALIAS("ipt_hashlimit");
MODULE_ALIAS("ip6t_hashlimit");
+struct hashlimit_net {
+ struct hlist_head htables;
+ struct proc_dir_entry *ipt_hashlimit;
+ struct proc_dir_entry *ip6t_hashlimit;
+};
+
+static int hashlimit_net_id;
+static inline struct hashlimit_net *hashlimit_pernet(struct net *net)
+{
+ return net_generic(net, hashlimit_net_id);
+}
+
/* need to declare this at the top */
-static struct proc_dir_entry *hashlimit_procdir4;
-static struct proc_dir_entry *hashlimit_procdir6;
static const struct file_operations dl_file_ops;
/* hash table crap */
@@ -79,27 +90,26 @@ struct dsthash_ent {
struct xt_hashlimit_htable {
struct hlist_node node; /* global list of all htables */
- atomic_t use;
+ int use;
u_int8_t family;
+ bool rnd_initialized;
struct hashlimit_cfg1 cfg; /* config */
/* used internally */
spinlock_t lock; /* lock for list_head */
u_int32_t rnd; /* random seed for hash */
- int rnd_initialized;
unsigned int count; /* number entries in table */
struct timer_list timer; /* timer for gc */
/* seq_file stuff */
struct proc_dir_entry *pde;
+ struct net *net;
struct hlist_head hash[0]; /* hashtable itself */
};
-static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */
-static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */
-static HLIST_HEAD(hashlimit_htables);
+static DEFINE_MUTEX(hashlimit_mutex); /* protects htables list */
static struct kmem_cache *hashlimit_cachep __read_mostly;
static inline bool dst_cmp(const struct dsthash_ent *ent,
@@ -150,7 +160,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
* the first hashtable entry */
if (!ht->rnd_initialized) {
get_random_bytes(&ht->rnd, sizeof(ht->rnd));
- ht->rnd_initialized = 1;
+ ht->rnd_initialized = true;
}
if (ht->cfg.max && ht->count >= ht->cfg.max) {
@@ -185,8 +195,9 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
}
static void htable_gc(unsigned long htlong);
-static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family)
+static int htable_create_v0(struct net *net, struct xt_hashlimit_info *minfo, u_int8_t family)
{
+ struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
struct xt_hashlimit_htable *hinfo;
unsigned int size;
unsigned int i;
@@ -232,33 +243,34 @@ static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family)
for (i = 0; i < hinfo->cfg.size; i++)
INIT_HLIST_HEAD(&hinfo->hash[i]);
- atomic_set(&hinfo->use, 1);
+ hinfo->use = 1;
hinfo->count = 0;
hinfo->family = family;
- hinfo->rnd_initialized = 0;
+ hinfo->rnd_initialized = false;
spin_lock_init(&hinfo->lock);
hinfo->pde = proc_create_data(minfo->name, 0,
(family == NFPROTO_IPV4) ?
- hashlimit_procdir4 : hashlimit_procdir6,
+ hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
&dl_file_ops, hinfo);
if (!hinfo->pde) {
vfree(hinfo);
return -1;
}
+ hinfo->net = net;
setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo);
hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
add_timer(&hinfo->timer);
- spin_lock_bh(&hashlimit_lock);
- hlist_add_head(&hinfo->node, &hashlimit_htables);
- spin_unlock_bh(&hashlimit_lock);
+ hlist_add_head(&hinfo->node, &hashlimit_net->htables);
return 0;
}
-static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family)
+static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
+ u_int8_t family)
{
+ struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
struct xt_hashlimit_htable *hinfo;
unsigned int size;
unsigned int i;
@@ -293,28 +305,27 @@ static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family)
for (i = 0; i < hinfo->cfg.size; i++)
INIT_HLIST_HEAD(&hinfo->hash[i]);
- atomic_set(&hinfo->use, 1);
+ hinfo->use = 1;
hinfo->count = 0;
hinfo->family = family;
- hinfo->rnd_initialized = 0;
+ hinfo->rnd_initialized = false;
spin_lock_init(&hinfo->lock);
hinfo->pde = proc_create_data(minfo->name, 0,
(family == NFPROTO_IPV4) ?
- hashlimit_procdir4 : hashlimit_procdir6,
+ hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
&dl_file_ops, hinfo);
if (hinfo->pde == NULL) {
vfree(hinfo);
return -1;
}
+ hinfo->net = net;
setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo);
hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
add_timer(&hinfo->timer);
- spin_lock_bh(&hashlimit_lock);
- hlist_add_head(&hinfo->node, &hashlimit_htables);
- spin_unlock_bh(&hashlimit_lock);
+ hlist_add_head(&hinfo->node, &hashlimit_net->htables);
return 0;
}
@@ -364,43 +375,46 @@ static void htable_gc(unsigned long htlong)
static void htable_destroy(struct xt_hashlimit_htable *hinfo)
{
+ struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net);
+ struct proc_dir_entry *parent;
+
del_timer_sync(&hinfo->timer);
- /* remove proc entry */
- remove_proc_entry(hinfo->pde->name,
- hinfo->family == NFPROTO_IPV4 ? hashlimit_procdir4 :
- hashlimit_procdir6);
+ if (hinfo->family == NFPROTO_IPV4)
+ parent = hashlimit_net->ipt_hashlimit;
+ else
+ parent = hashlimit_net->ip6t_hashlimit;
+ remove_proc_entry(hinfo->pde->name, parent);
htable_selective_cleanup(hinfo, select_all);
vfree(hinfo);
}
-static struct xt_hashlimit_htable *htable_find_get(const char *name,
+static struct xt_hashlimit_htable *htable_find_get(struct net *net,
+ const char *name,
u_int8_t family)
{
+ struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
struct xt_hashlimit_htable *hinfo;
struct hlist_node *pos;
- spin_lock_bh(&hashlimit_lock);
- hlist_for_each_entry(hinfo, pos, &hashlimit_htables, node) {
+ hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) {
if (!strcmp(name, hinfo->pde->name) &&
hinfo->family == family) {
- atomic_inc(&hinfo->use);
- spin_unlock_bh(&hashlimit_lock);
+ hinfo->use++;
return hinfo;
}
}
- spin_unlock_bh(&hashlimit_lock);
return NULL;
}
static void htable_put(struct xt_hashlimit_htable *hinfo)
{
- if (atomic_dec_and_test(&hinfo->use)) {
- spin_lock_bh(&hashlimit_lock);
+ mutex_lock(&hashlimit_mutex);
+ if (--hinfo->use == 0) {
hlist_del(&hinfo->node);
- spin_unlock_bh(&hashlimit_lock);
htable_destroy(hinfo);
}
+ mutex_unlock(&hashlimit_mutex);
}
/* The algorithm used is the Simple Token Bucket Filter (TBF)
@@ -665,6 +679,7 @@ hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par)
{
+ struct net *net = par->net;
struct xt_hashlimit_info *r = par->matchinfo;
/* Check for overflow. */
@@ -687,25 +702,20 @@ static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par)
if (r->name[sizeof(r->name) - 1] != '\0')
return false;
- /* This is the best we've got: We cannot release and re-grab lock,
- * since checkentry() is called before x_tables.c grabs xt_mutex.
- * We also cannot grab the hashtable spinlock, since htable_create will
- * call vmalloc, and that can sleep. And we cannot just re-search
- * the list of htable's in htable_create(), since then we would
- * create duplicate proc files. -HW */
- mutex_lock(&hlimit_mutex);
- r->hinfo = htable_find_get(r->name, par->match->family);
- if (!r->hinfo && htable_create_v0(r, par->match->family) != 0) {
- mutex_unlock(&hlimit_mutex);
+ mutex_lock(&hashlimit_mutex);
+ r->hinfo = htable_find_get(net, r->name, par->match->family);
+ if (!r->hinfo && htable_create_v0(net, r, par->match->family) != 0) {
+ mutex_unlock(&hashlimit_mutex);
return false;
}
- mutex_unlock(&hlimit_mutex);
+ mutex_unlock(&hashlimit_mutex);
return true;
}
static bool hashlimit_mt_check(const struct xt_mtchk_param *par)
{
+ struct net *net = par->net;
struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
/* Check for overflow. */
@@ -728,19 +738,13 @@ static bool hashlimit_mt_check(const struct xt_mtchk_param *par)
return false;
}
- /* This is the best we've got: We cannot release and re-grab lock,
- * since checkentry() is called before x_tables.c grabs xt_mutex.
- * We also cannot grab the hashtable spinlock, since htable_create will
- * call vmalloc, and that can sleep. And we cannot just re-search
- * the list of htable's in htable_create(), since then we would
- * create duplicate proc files. -HW */
- mutex_lock(&hlimit_mutex);
- info->hinfo = htable_find_get(info->name, par->match->family);
- if (!info->hinfo && htable_create(info, par->match->family) != 0) {
- mutex_unlock(&hlimit_mutex);
+ mutex_lock(&hashlimit_mutex);
+ info->hinfo = htable_find_get(net, info->name, par->match->family);
+ if (!info->hinfo && htable_create(net, info, par->match->family) != 0) {
+ mutex_unlock(&hashlimit_mutex);
return false;
}
- mutex_unlock(&hlimit_mutex);
+ mutex_unlock(&hashlimit_mutex);
return true;
}
@@ -767,7 +771,7 @@ struct compat_xt_hashlimit_info {
compat_uptr_t master;
};
-static void hashlimit_mt_compat_from_user(void *dst, void *src)
+static void hashlimit_mt_compat_from_user(void *dst, const void *src)
{
int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
@@ -775,7 +779,7 @@ static void hashlimit_mt_compat_from_user(void *dst, void *src)
memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off);
}
-static int hashlimit_mt_compat_to_user(void __user *dst, void *src)
+static int hashlimit_mt_compat_to_user(void __user *dst, const void *src)
{
int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
@@ -841,8 +845,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
static void *dl_seq_start(struct seq_file *s, loff_t *pos)
__acquires(htable->lock)
{
- struct proc_dir_entry *pde = s->private;
- struct xt_hashlimit_htable *htable = pde->data;
+ struct xt_hashlimit_htable *htable = s->private;
unsigned int *bucket;
spin_lock_bh(&htable->lock);
@@ -859,8 +862,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos)
static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct proc_dir_entry *pde = s->private;
- struct xt_hashlimit_htable *htable = pde->data;
+ struct xt_hashlimit_htable *htable = s->private;
unsigned int *bucket = (unsigned int *)v;
*pos = ++(*bucket);
@@ -874,8 +876,7 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
static void dl_seq_stop(struct seq_file *s, void *v)
__releases(htable->lock)
{
- struct proc_dir_entry *pde = s->private;
- struct xt_hashlimit_htable *htable = pde->data;
+ struct xt_hashlimit_htable *htable = s->private;
unsigned int *bucket = (unsigned int *)v;
kfree(bucket);
@@ -917,8 +918,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
static int dl_seq_show(struct seq_file *s, void *v)
{
- struct proc_dir_entry *pde = s->private;
- struct xt_hashlimit_htable *htable = pde->data;
+ struct xt_hashlimit_htable *htable = s->private;
unsigned int *bucket = (unsigned int *)v;
struct dsthash_ent *ent;
struct hlist_node *pos;
@@ -944,7 +944,7 @@ static int dl_proc_open(struct inode *inode, struct file *file)
if (!ret) {
struct seq_file *sf = file->private_data;
- sf->private = PDE(inode);
+ sf->private = PDE(inode)->data;
}
return ret;
}
@@ -957,10 +957,61 @@ static const struct file_operations dl_file_ops = {
.release = seq_release
};
+static int __net_init hashlimit_proc_net_init(struct net *net)
+{
+ struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
+
+ hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
+ if (!hashlimit_net->ipt_hashlimit)
+ return -ENOMEM;
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+ hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
+ if (!hashlimit_net->ip6t_hashlimit) {
+ proc_net_remove(net, "ipt_hashlimit");
+ return -ENOMEM;
+ }
+#endif
+ return 0;
+}
+
+static void __net_exit hashlimit_proc_net_exit(struct net *net)
+{
+ proc_net_remove(net, "ipt_hashlimit");
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+ proc_net_remove(net, "ip6t_hashlimit");
+#endif
+}
+
+static int __net_init hashlimit_net_init(struct net *net)
+{
+ struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
+
+ INIT_HLIST_HEAD(&hashlimit_net->htables);
+ return hashlimit_proc_net_init(net);
+}
+
+static void __net_exit hashlimit_net_exit(struct net *net)
+{
+ struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
+
+ BUG_ON(!hlist_empty(&hashlimit_net->htables));
+ hashlimit_proc_net_exit(net);
+}
+
+static struct pernet_operations hashlimit_net_ops = {
+ .init = hashlimit_net_init,
+ .exit = hashlimit_net_exit,
+ .id = &hashlimit_net_id,
+ .size = sizeof(struct hashlimit_net),
+};
+
static int __init hashlimit_mt_init(void)
{
int err;
+ err = register_pernet_subsys(&hashlimit_net_ops);
+ if (err < 0)
+ return err;
err = xt_register_matches(hashlimit_mt_reg,
ARRAY_SIZE(hashlimit_mt_reg));
if (err < 0)
@@ -974,41 +1025,21 @@ static int __init hashlimit_mt_init(void)
printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n");
goto err2;
}
- hashlimit_procdir4 = proc_mkdir("ipt_hashlimit", init_net.proc_net);
- if (!hashlimit_procdir4) {
- printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
- "entry\n");
- goto err3;
- }
- err = 0;
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
- hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", init_net.proc_net);
- if (!hashlimit_procdir6) {
- printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
- "entry\n");
- err = -ENOMEM;
- }
-#endif
- if (!err)
- return 0;
- remove_proc_entry("ipt_hashlimit", init_net.proc_net);
-err3:
- kmem_cache_destroy(hashlimit_cachep);
+ return 0;
+
err2:
xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
err1:
+ unregister_pernet_subsys(&hashlimit_net_ops);
return err;
}
static void __exit hashlimit_mt_exit(void)
{
- remove_proc_entry("ipt_hashlimit", init_net.proc_net);
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
- remove_proc_entry("ip6t_hashlimit", init_net.proc_net);
-#endif
kmem_cache_destroy(hashlimit_cachep);
xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
+ unregister_pernet_subsys(&hashlimit_net_ops);
}
module_init(hashlimit_mt_init);
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
index 2773be6a71d..a0ca5339af4 100644
--- a/net/netfilter/xt_limit.c
+++ b/net/netfilter/xt_limit.c
@@ -148,7 +148,7 @@ struct compat_xt_rateinfo {
/* To keep the full "prev" timestamp, the upper 32 bits are stored in the
* master pointer, which does not need to be preserved. */
-static void limit_mt_compat_from_user(void *dst, void *src)
+static void limit_mt_compat_from_user(void *dst, const void *src)
{
const struct compat_xt_rateinfo *cm = src;
struct xt_rateinfo m = {
@@ -162,7 +162,7 @@ static void limit_mt_compat_from_user(void *dst, void *src)
memcpy(dst, &m, sizeof(m));
}
-static int limit_mt_compat_to_user(void __user *dst, void *src)
+static int limit_mt_compat_to_user(void __user *dst, const void *src)
{
const struct xt_rateinfo *m = src;
struct compat_xt_rateinfo cm = {
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 4d1a41bbd5d..4169e200588 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -334,7 +334,7 @@ static bool xt_osf_match_packet(const struct sk_buff *skb,
if (info->flags & XT_OSF_LOG)
nf_log_packet(p->family, p->hooknum, skb,
p->in, p->out, NULL,
- "%s [%s:%s] : %pi4:%d -> %pi4:%d hops=%d\n",
+ "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n",
f->genre, f->version, f->subtype,
&ip->saddr, ntohs(tcp->source),
&ip->daddr, ntohs(tcp->dest),
@@ -349,7 +349,7 @@ static bool xt_osf_match_packet(const struct sk_buff *skb,
if (!fcount && (info->flags & XT_OSF_LOG))
nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL,
- "Remote OS is not known: %pi4:%u -> %pi4:%u\n",
+ "Remote OS is not known: %pI4:%u -> %pI4:%u\n",
&ip->saddr, ntohs(tcp->source),
&ip->daddr, ntohs(tcp->dest));
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index fc70a49c0af..7073dbb8100 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -28,6 +28,7 @@
#include <linux/skbuff.h>
#include <linux/inet.h>
#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_recent.h>
@@ -52,7 +53,7 @@ module_param(ip_list_perms, uint, 0400);
module_param(ip_list_uid, uint, 0400);
module_param(ip_list_gid, uint, 0400);
MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list");
-MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP to remember (max. 255)");
+MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)");
MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs");
MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files");
MODULE_PARM_DESC(ip_list_uid,"owner of /proc/net/xt_recent/* files");
@@ -78,37 +79,40 @@ struct recent_table {
struct list_head iphash[0];
};
-static LIST_HEAD(tables);
+struct recent_net {
+ struct list_head tables;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *xt_recent;
+#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
+ struct proc_dir_entry *ipt_recent;
+#endif
+#endif
+};
+
+static int recent_net_id;
+static inline struct recent_net *recent_pernet(struct net *net)
+{
+ return net_generic(net, recent_net_id);
+}
+
static DEFINE_SPINLOCK(recent_lock);
static DEFINE_MUTEX(recent_mutex);
#ifdef CONFIG_PROC_FS
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
-static struct proc_dir_entry *proc_old_dir;
-#endif
-static struct proc_dir_entry *recent_proc_dir;
static const struct file_operations recent_old_fops, recent_mt_fops;
#endif
-static u_int32_t hash_rnd;
-static bool hash_rnd_initted;
+static u_int32_t hash_rnd __read_mostly;
+static bool hash_rnd_inited __read_mostly;
-static unsigned int recent_entry_hash4(const union nf_inet_addr *addr)
+static inline unsigned int recent_entry_hash4(const union nf_inet_addr *addr)
{
- if (!hash_rnd_initted) {
- get_random_bytes(&hash_rnd, sizeof(hash_rnd));
- hash_rnd_initted = true;
- }
return jhash_1word((__force u32)addr->ip, hash_rnd) &
(ip_list_hash_size - 1);
}
-static unsigned int recent_entry_hash6(const union nf_inet_addr *addr)
+static inline unsigned int recent_entry_hash6(const union nf_inet_addr *addr)
{
- if (!hash_rnd_initted) {
- get_random_bytes(&hash_rnd, sizeof(hash_rnd));
- hash_rnd_initted = true;
- }
return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6), hash_rnd) &
(ip_list_hash_size - 1);
}
@@ -173,18 +177,19 @@ recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr,
static void recent_entry_update(struct recent_table *t, struct recent_entry *e)
{
+ e->index %= ip_pkt_list_tot;
e->stamps[e->index++] = jiffies;
if (e->index > e->nstamps)
e->nstamps = e->index;
- e->index %= ip_pkt_list_tot;
list_move_tail(&e->lru_list, &t->lru_list);
}
-static struct recent_table *recent_table_lookup(const char *name)
+static struct recent_table *recent_table_lookup(struct recent_net *recent_net,
+ const char *name)
{
struct recent_table *t;
- list_for_each_entry(t, &tables, list)
+ list_for_each_entry(t, &recent_net->tables, list)
if (!strcmp(t->name, name))
return t;
return NULL;
@@ -203,6 +208,8 @@ static void recent_table_flush(struct recent_table *t)
static bool
recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
+ struct net *net = dev_net(par->in ? par->in : par->out);
+ struct recent_net *recent_net = recent_pernet(net);
const struct xt_recent_mtinfo *info = par->matchinfo;
struct recent_table *t;
struct recent_entry *e;
@@ -235,7 +242,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
ttl++;
spin_lock_bh(&recent_lock);
- t = recent_table_lookup(info->name);
+ t = recent_table_lookup(recent_net, info->name);
e = recent_entry_lookup(t, &addr, par->match->family,
(info->check_set & XT_RECENT_TTL) ? ttl : 0);
if (e == NULL) {
@@ -260,7 +267,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
for (i = 0; i < e->nstamps; i++) {
if (info->seconds && time_after(time, e->stamps[i]))
continue;
- if (++hits >= info->hit_count) {
+ if (info->hit_count && ++hits >= info->hit_count) {
ret = !ret;
break;
}
@@ -279,6 +286,7 @@ out:
static bool recent_mt_check(const struct xt_mtchk_param *par)
{
+ struct recent_net *recent_net = recent_pernet(par->net);
const struct xt_recent_mtinfo *info = par->matchinfo;
struct recent_table *t;
#ifdef CONFIG_PROC_FS
@@ -287,6 +295,10 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
unsigned i;
bool ret = false;
+ if (unlikely(!hash_rnd_inited)) {
+ get_random_bytes(&hash_rnd, sizeof(hash_rnd));
+ hash_rnd_inited = true;
+ }
if (hweight8(info->check_set &
(XT_RECENT_SET | XT_RECENT_REMOVE |
XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1)
@@ -294,14 +306,18 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) &&
(info->seconds || info->hit_count))
return false;
- if (info->hit_count > ip_pkt_list_tot)
+ if (info->hit_count > ip_pkt_list_tot) {
+ pr_info(KBUILD_MODNAME ": hitcount (%u) is larger than "
+ "packets to be remembered (%u)\n",
+ info->hit_count, ip_pkt_list_tot);
return false;
+ }
if (info->name[0] == '\0' ||
strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
return false;
mutex_lock(&recent_mutex);
- t = recent_table_lookup(info->name);
+ t = recent_table_lookup(recent_net, info->name);
if (t != NULL) {
t->refcnt++;
ret = true;
@@ -318,7 +334,7 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
for (i = 0; i < ip_list_hash_size; i++)
INIT_LIST_HEAD(&t->iphash[i]);
#ifdef CONFIG_PROC_FS
- pde = proc_create_data(t->name, ip_list_perms, recent_proc_dir,
+ pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent,
&recent_mt_fops, t);
if (pde == NULL) {
kfree(t);
@@ -327,10 +343,10 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
pde->uid = ip_list_uid;
pde->gid = ip_list_gid;
#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- pde = proc_create_data(t->name, ip_list_perms, proc_old_dir,
+ pde = proc_create_data(t->name, ip_list_perms, recent_net->ipt_recent,
&recent_old_fops, t);
if (pde == NULL) {
- remove_proc_entry(t->name, proc_old_dir);
+ remove_proc_entry(t->name, recent_net->xt_recent);
kfree(t);
goto out;
}
@@ -339,7 +355,7 @@ static bool recent_mt_check(const struct xt_mtchk_param *par)
#endif
#endif
spin_lock_bh(&recent_lock);
- list_add_tail(&t->list, &tables);
+ list_add_tail(&t->list, &recent_net->tables);
spin_unlock_bh(&recent_lock);
ret = true;
out:
@@ -349,20 +365,21 @@ out:
static void recent_mt_destroy(const struct xt_mtdtor_param *par)
{
+ struct recent_net *recent_net = recent_pernet(par->net);
const struct xt_recent_mtinfo *info = par->matchinfo;
struct recent_table *t;
mutex_lock(&recent_mutex);
- t = recent_table_lookup(info->name);
+ t = recent_table_lookup(recent_net, info->name);
if (--t->refcnt == 0) {
spin_lock_bh(&recent_lock);
list_del(&t->list);
spin_unlock_bh(&recent_lock);
#ifdef CONFIG_PROC_FS
#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- remove_proc_entry(t->name, proc_old_dir);
+ remove_proc_entry(t->name, recent_net->ipt_recent);
#endif
- remove_proc_entry(t->name, recent_proc_dir);
+ remove_proc_entry(t->name, recent_net->xt_recent);
#endif
recent_table_flush(t);
kfree(t);
@@ -611,8 +628,65 @@ static const struct file_operations recent_mt_fops = {
.release = seq_release_private,
.owner = THIS_MODULE,
};
+
+static int __net_init recent_proc_net_init(struct net *net)
+{
+ struct recent_net *recent_net = recent_pernet(net);
+
+ recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net);
+ if (!recent_net->xt_recent)
+ return -ENOMEM;
+#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
+ recent_net->ipt_recent = proc_mkdir("ipt_recent", net->proc_net);
+ if (!recent_net->ipt_recent) {
+ proc_net_remove(net, "xt_recent");
+ return -ENOMEM;
+ }
+#endif
+ return 0;
+}
+
+static void __net_exit recent_proc_net_exit(struct net *net)
+{
+#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
+ proc_net_remove(net, "ipt_recent");
+#endif
+ proc_net_remove(net, "xt_recent");
+}
+#else
+static inline int recent_proc_net_init(struct net *net)
+{
+ return 0;
+}
+
+static inline void recent_proc_net_exit(struct net *net)
+{
+}
#endif /* CONFIG_PROC_FS */
+static int __net_init recent_net_init(struct net *net)
+{
+ struct recent_net *recent_net = recent_pernet(net);
+
+ INIT_LIST_HEAD(&recent_net->tables);
+ return recent_proc_net_init(net);
+}
+
+static void __net_exit recent_net_exit(struct net *net)
+{
+ struct recent_net *recent_net = recent_pernet(net);
+
+ BUG_ON(!list_empty(&recent_net->tables));
+ recent_proc_net_exit(net);
+}
+
+static struct pernet_operations recent_net_ops = {
+ .init = recent_net_init,
+ .exit = recent_net_exit,
+ .id = &recent_net_id,
+ .size = sizeof(struct recent_net),
+};
+
static struct xt_match recent_mt_reg[] __read_mostly = {
{
.name = "recent",
@@ -644,39 +718,19 @@ static int __init recent_mt_init(void)
return -EINVAL;
ip_list_hash_size = 1 << fls(ip_list_tot);
- err = xt_register_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
-#ifdef CONFIG_PROC_FS
+ err = register_pernet_subsys(&recent_net_ops);
if (err)
return err;
- recent_proc_dir = proc_mkdir("xt_recent", init_net.proc_net);
- if (recent_proc_dir == NULL) {
- xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
- err = -ENOMEM;
- }
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- if (err < 0)
- return err;
- proc_old_dir = proc_mkdir("ipt_recent", init_net.proc_net);
- if (proc_old_dir == NULL) {
- remove_proc_entry("xt_recent", init_net.proc_net);
- xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
- err = -ENOMEM;
- }
-#endif
-#endif
+ err = xt_register_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
+ if (err)
+ unregister_pernet_subsys(&recent_net_ops);
return err;
}
static void __exit recent_mt_exit(void)
{
- BUG_ON(!list_empty(&tables));
xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg));
-#ifdef CONFIG_PROC_FS
-#ifdef CONFIG_NETFILTER_XT_MATCH_RECENT_PROC_COMPAT
- remove_proc_entry("ipt_recent", init_net.proc_net);
-#endif
- remove_proc_entry("xt_recent", init_net.proc_net);
-#endif
+ unregister_pernet_subsys(&recent_net_ops);
}
module_init(recent_mt_init);
diff --git a/net/netfilter/xt_repldata.h b/net/netfilter/xt_repldata.h
new file mode 100644
index 00000000000..6efe4e5a81c
--- /dev/null
+++ b/net/netfilter/xt_repldata.h
@@ -0,0 +1,35 @@
+/*
+ * Today's hack: quantum tunneling in structs
+ *
+ * 'entries' and 'term' are never anywhere referenced by word in code. In fact,
+ * they serve as the hanging-off data accessed through repl.data[].
+ */
+
+#define xt_alloc_initial_table(type, typ2) ({ \
+ unsigned int hook_mask = info->valid_hooks; \
+ unsigned int nhooks = hweight32(hook_mask); \
+ unsigned int bytes = 0, hooknum = 0, i = 0; \
+ struct { \
+ struct type##_replace repl; \
+ struct type##_standard entries[nhooks]; \
+ struct type##_error term; \
+ } *tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); \
+ if (tbl == NULL) \
+ return NULL; \
+ strncpy(tbl->repl.name, info->name, sizeof(tbl->repl.name)); \
+ tbl->term = (struct type##_error)typ2##_ERROR_INIT; \
+ tbl->repl.valid_hooks = hook_mask; \
+ tbl->repl.num_entries = nhooks + 1; \
+ tbl->repl.size = nhooks * sizeof(struct type##_standard) + \
+ sizeof(struct type##_error); \
+ for (; hook_mask != 0; hook_mask >>= 1, ++hooknum) { \
+ if (!(hook_mask & 1)) \
+ continue; \
+ tbl->repl.hook_entry[hooknum] = bytes; \
+ tbl->repl.underflow[hooknum] = bytes; \
+ tbl->entries[i++] = (struct type##_standard) \
+ typ2##_STANDARD_INIT(NF_ACCEPT); \
+ bytes += sizeof(struct type##_standard); \
+ } \
+ tbl; \
+})
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index c5d9f97ef21..0bfeaab88ef 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -315,7 +315,6 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
entry_old = netlbl_domhsh_search_def(entry->domain);
if (entry_old == NULL) {
entry->valid = 1;
- INIT_RCU_HEAD(&entry->rcu);
if (entry->domain != NULL) {
u32 bkt = netlbl_domhsh_hash(entry->domain);
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 98ed22ee2ff..852d9d7976b 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -327,7 +327,6 @@ static int netlbl_unlhsh_add_addr4(struct netlbl_unlhsh_iface *iface,
entry->list.addr = addr->s_addr & mask->s_addr;
entry->list.mask = mask->s_addr;
entry->list.valid = 1;
- INIT_RCU_HEAD(&entry->rcu);
entry->secid = secid;
spin_lock(&netlbl_unlhsh_lock);
@@ -373,7 +372,6 @@ static int netlbl_unlhsh_add_addr6(struct netlbl_unlhsh_iface *iface,
entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
ipv6_addr_copy(&entry->list.mask, mask);
entry->list.valid = 1;
- INIT_RCU_HEAD(&entry->rcu);
entry->secid = secid;
spin_lock(&netlbl_unlhsh_lock);
@@ -410,7 +408,6 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
INIT_LIST_HEAD(&iface->addr4_list);
INIT_LIST_HEAD(&iface->addr6_list);
iface->valid = 1;
- INIT_RCU_HEAD(&iface->rcu);
spin_lock(&netlbl_unlhsh_lock);
if (ifindex > 0) {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a4957bf2ca6..320d0423a24 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -455,9 +455,14 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
if (nl_table[protocol].registered &&
try_module_get(nl_table[protocol].module))
module = nl_table[protocol].module;
+ else
+ err = -EPROTONOSUPPORT;
cb_mutex = nl_table[protocol].cb_mutex;
netlink_unlock_table();
+ if (err < 0)
+ goto out;
+
err = __netlink_create(net, sock, cb_mutex, protocol);
if (err < 0)
goto out_module;
@@ -1973,12 +1978,12 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"sk Eth Pid Groups "
- "Rmem Wmem Dump Locks Drops\n");
+ "Rmem Wmem Dump Locks Drops Inode\n");
else {
struct sock *s = v;
struct netlink_sock *nlk = nlk_sk(s);
- seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
+ seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n",
s,
s->sk_protocol,
nlk->pid,
@@ -1987,7 +1992,8 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(s),
nlk->cb,
atomic_read(&s->sk_refcnt),
- atomic_read(&s->sk_drops)
+ atomic_read(&s->sk_drops),
+ sock_i_ino(s)
);
}
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index d07ecda0a92..a4b6e148c5d 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -681,9 +681,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
int chains_to_skip = cb->args[0];
int fams_to_skip = cb->args[1];
- for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
- if (i < chains_to_skip)
- continue;
+ for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
n = 0;
list_for_each_entry(rt, genl_family_chain(i), family_list) {
if (!rt->netnsok && !net_eq(net, &init_net))
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 71604c6613b..a249127020a 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1267,28 +1267,13 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
static void *nr_info_start(struct seq_file *seq, loff_t *pos)
{
- struct sock *s;
- struct hlist_node *node;
- int i = 1;
-
spin_lock_bh(&nr_list_lock);
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- sk_for_each(s, node, &nr_list) {
- if (i == *pos)
- return s;
- ++i;
- }
- return NULL;
+ return seq_hlist_start_head(&nr_list, *pos);
}
static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
- ++*pos;
-
- return (v == SEQ_START_TOKEN) ? sk_head(&nr_list)
- : sk_next((struct sock *)v);
+ return seq_hlist_next(v, &nr_list, pos);
}
static void nr_info_stop(struct seq_file *seq, void *v)
@@ -1298,7 +1283,7 @@ static void nr_info_stop(struct seq_file *seq, void *v)
static int nr_info_show(struct seq_file *seq, void *v)
{
- struct sock *s = v;
+ struct sock *s = sk_entry(v);
struct net_device *dev;
struct nr_sock *nr;
const char *devname;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index e2e2d33cafd..5cc648012f5 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -863,33 +863,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
static void *nr_node_start(struct seq_file *seq, loff_t *pos)
{
- struct nr_node *nr_node;
- struct hlist_node *node;
- int i = 1;
-
spin_lock_bh(&nr_node_list_lock);
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- nr_node_for_each(nr_node, node, &nr_node_list) {
- if (i == *pos)
- return nr_node;
- ++i;
- }
-
- return NULL;
+ return seq_hlist_start_head(&nr_node_list, *pos);
}
static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct hlist_node *node;
- ++*pos;
-
- node = (v == SEQ_START_TOKEN)
- ? nr_node_list.first
- : ((struct nr_node *)v)->node_node.next;
-
- return hlist_entry(node, struct nr_node, node_node);
+ return seq_hlist_next(v, &nr_node_list, pos);
}
static void nr_node_stop(struct seq_file *seq, void *v)
@@ -906,7 +886,9 @@ static int nr_node_show(struct seq_file *seq, void *v)
seq_puts(seq,
"callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
else {
- struct nr_node *nr_node = v;
+ struct nr_node *nr_node = hlist_entry(v, struct nr_node,
+ node_node);
+
nr_node_lock(nr_node);
seq_printf(seq, "%-9s %-7s %d %d",
ax2asc(buf, &nr_node->callsign),
@@ -949,31 +931,13 @@ const struct file_operations nr_nodes_fops = {
static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
{
- struct nr_neigh *nr_neigh;
- struct hlist_node *node;
- int i = 1;
-
spin_lock_bh(&nr_neigh_list_lock);
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) {
- if (i == *pos)
- return nr_neigh;
- }
- return NULL;
+ return seq_hlist_start_head(&nr_neigh_list, *pos);
}
static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct hlist_node *node;
- ++*pos;
-
- node = (v == SEQ_START_TOKEN)
- ? nr_neigh_list.first
- : ((struct nr_neigh *)v)->neigh_node.next;
-
- return hlist_entry(node, struct nr_neigh, neigh_node);
+ return seq_hlist_next(v, &nr_neigh_list, pos);
}
static void nr_neigh_stop(struct seq_file *seq, void *v)
@@ -989,8 +953,9 @@ static int nr_neigh_show(struct seq_file *seq, void *v)
if (v == SEQ_START_TOKEN)
seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
else {
- struct nr_neigh *nr_neigh = v;
+ struct nr_neigh *nr_neigh;
+ nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
nr_neigh->number,
ax2asc(buf, &nr_neigh->callsign),
diff --git a/net/packet/Kconfig b/net/packet/Kconfig
index 34ff93ff894..0060e3b396b 100644
--- a/net/packet/Kconfig
+++ b/net/packet/Kconfig
@@ -14,13 +14,3 @@ config PACKET
be called af_packet.
If unsure, say Y.
-
-config PACKET_MMAP
- bool "Packet socket: mmapped IO"
- depends on PACKET
- help
- If you say Y here, the Packet protocol driver will use an IO
- mechanism that results in faster communication.
-
- If unsure, say N.
-
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f126d18dbdc..031a5e6fb4a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -80,6 +80,7 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/if_vlan.h>
+#include <linux/virtio_net.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
@@ -156,7 +157,6 @@ struct packet_mreq_max {
unsigned char mr_address[MAX_ADDR_LEN];
};
-#ifdef CONFIG_PACKET_MMAP
static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
int closing, int tx_ring);
@@ -176,7 +176,6 @@ struct packet_ring_buffer {
struct packet_sock;
static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
-#endif
static void packet_flush_mclist(struct sock *sk);
@@ -184,26 +183,23 @@ struct packet_sock {
/* struct sock has to be the first member of packet_sock */
struct sock sk;
struct tpacket_stats stats;
-#ifdef CONFIG_PACKET_MMAP
struct packet_ring_buffer rx_ring;
struct packet_ring_buffer tx_ring;
int copy_thresh;
-#endif
spinlock_t bind_lock;
struct mutex pg_vec_lock;
unsigned int running:1, /* prot_hook is attached*/
auxdata:1,
- origdev:1;
+ origdev:1,
+ has_vnet_hdr:1;
int ifindex; /* bound device */
__be16 num;
struct packet_mclist *mclist;
-#ifdef CONFIG_PACKET_MMAP
atomic_t mapped;
enum tpacket_versions tp_version;
unsigned int tp_hdrlen;
unsigned int tp_reserve;
unsigned int tp_loss:1;
-#endif
struct packet_type prot_hook ____cacheline_aligned_in_smp;
};
@@ -217,8 +213,6 @@ struct packet_skb_cb {
#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
-#ifdef CONFIG_PACKET_MMAP
-
static void __packet_set_status(struct packet_sock *po, void *frame, int status)
{
union {
@@ -313,8 +307,6 @@ static inline void packet_increment_head(struct packet_ring_buffer *buff)
buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
}
-#endif
-
static inline struct packet_sock *pkt_sk(struct sock *sk)
{
return (struct packet_sock *)sk;
@@ -508,7 +500,7 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
struct sk_filter *filter;
rcu_read_lock_bh();
- filter = rcu_dereference(sk->sk_filter);
+ filter = rcu_dereference_bh(sk->sk_filter);
if (filter != NULL)
res = sk_run_filter(skb, filter->insns, filter->len);
rcu_read_unlock_bh();
@@ -638,7 +630,6 @@ drop:
return 0;
}
-#ifdef CONFIG_PACKET_MMAP
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
@@ -1054,7 +1045,30 @@ out:
mutex_unlock(&po->pg_vec_lock);
return err;
}
-#endif
+
+static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
+ size_t reserve, size_t len,
+ size_t linear, int noblock,
+ int *err)
+{
+ struct sk_buff *skb;
+
+ /* Under a page? Don't bother with paged skb. */
+ if (prepad + len < PAGE_SIZE || !linear)
+ linear = len;
+
+ skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
+ err);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, reserve);
+ skb_put(skb, linear);
+ skb->data_len = len - linear;
+ skb->len += len - linear;
+
+ return skb;
+}
static int packet_snd(struct socket *sock,
struct msghdr *msg, size_t len)
@@ -1066,14 +1080,17 @@ static int packet_snd(struct socket *sock,
__be16 proto;
unsigned char *addr;
int ifindex, err, reserve = 0;
+ struct virtio_net_hdr vnet_hdr = { 0 };
+ int offset = 0;
+ int vnet_hdr_len;
+ struct packet_sock *po = pkt_sk(sk);
+ unsigned short gso_type = 0;
/*
* Get and verify the address.
*/
if (saddr == NULL) {
- struct packet_sock *po = pkt_sk(sk);
-
ifindex = po->ifindex;
proto = po->num;
addr = NULL;
@@ -1100,25 +1117,74 @@ static int packet_snd(struct socket *sock,
if (!(dev->flags & IFF_UP))
goto out_unlock;
+ if (po->has_vnet_hdr) {
+ vnet_hdr_len = sizeof(vnet_hdr);
+
+ err = -EINVAL;
+ if (len < vnet_hdr_len)
+ goto out_unlock;
+
+ len -= vnet_hdr_len;
+
+ err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
+ vnet_hdr_len);
+ if (err < 0)
+ goto out_unlock;
+
+ if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
+ (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
+ vnet_hdr.hdr_len))
+ vnet_hdr.hdr_len = vnet_hdr.csum_start +
+ vnet_hdr.csum_offset + 2;
+
+ err = -EINVAL;
+ if (vnet_hdr.hdr_len > len)
+ goto out_unlock;
+
+ if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ gso_type = SKB_GSO_TCPV4;
+ break;
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ gso_type = SKB_GSO_TCPV6;
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
+ gso_type = SKB_GSO_UDP;
+ break;
+ default:
+ goto out_unlock;
+ }
+
+ if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
+ gso_type |= SKB_GSO_TCP_ECN;
+
+ if (vnet_hdr.gso_size == 0)
+ goto out_unlock;
+
+ }
+ }
+
err = -EMSGSIZE;
- if (len > dev->mtu+reserve)
+ if (!gso_type && (len > dev->mtu+reserve))
goto out_unlock;
- skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev),
- msg->msg_flags & MSG_DONTWAIT, &err);
+ err = -ENOBUFS;
+ skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
+ LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
+ msg->msg_flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto out_unlock;
- skb_reserve(skb, LL_RESERVED_SPACE(dev));
- skb_reset_network_header(skb);
+ skb_set_network_header(skb, reserve);
err = -EINVAL;
if (sock->type == SOCK_DGRAM &&
- dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len) < 0)
+ (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
goto out_free;
/* Returns -EFAULT on error */
- err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+ err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
if (err)
goto out_free;
@@ -1127,6 +1193,25 @@ static int packet_snd(struct socket *sock,
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
+ if (po->has_vnet_hdr) {
+ if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
+ vnet_hdr.csum_offset)) {
+ err = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
+ skb_shinfo(skb)->gso_type = gso_type;
+
+ /* Header must be checked, and gso_segs computed. */
+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+ skb_shinfo(skb)->gso_segs = 0;
+
+ len += vnet_hdr_len;
+ }
+
/*
* Now send it
*/
@@ -1151,13 +1236,11 @@ out:
static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
-#ifdef CONFIG_PACKET_MMAP
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
if (po->tx_ring.pg_vec)
return tpacket_snd(po, msg);
else
-#endif
return packet_snd(sock, msg, len);
}
@@ -1171,9 +1254,7 @@ static int packet_release(struct socket *sock)
struct sock *sk = sock->sk;
struct packet_sock *po;
struct net *net;
-#ifdef CONFIG_PACKET_MMAP
struct tpacket_req req;
-#endif
if (!sk)
return 0;
@@ -1181,28 +1262,25 @@ static int packet_release(struct socket *sock)
net = sock_net(sk);
po = pkt_sk(sk);
- write_lock_bh(&net->packet.sklist_lock);
- sk_del_node_init(sk);
+ spin_lock_bh(&net->packet.sklist_lock);
+ sk_del_node_init_rcu(sk);
sock_prot_inuse_add(net, sk->sk_prot, -1);
- write_unlock_bh(&net->packet.sklist_lock);
-
- /*
- * Unhook packet receive handler.
- */
+ spin_unlock_bh(&net->packet.sklist_lock);
+ spin_lock(&po->bind_lock);
if (po->running) {
/*
- * Remove the protocol hook
+ * Remove from protocol table
*/
- dev_remove_pack(&po->prot_hook);
po->running = 0;
po->num = 0;
+ __dev_remove_pack(&po->prot_hook);
__sock_put(sk);
}
+ spin_unlock(&po->bind_lock);
packet_flush_mclist(sk);
-#ifdef CONFIG_PACKET_MMAP
memset(&req, 0, sizeof(req));
if (po->rx_ring.pg_vec)
@@ -1210,12 +1288,11 @@ static int packet_release(struct socket *sock)
if (po->tx_ring.pg_vec)
packet_set_ring(sk, &req, 1, 1);
-#endif
+ synchronize_net();
/*
* Now the socket is dead. No more input will appear.
*/
-
sock_orphan(sk);
sock->sk = NULL;
@@ -1399,10 +1476,11 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
po->running = 1;
}
- write_lock_bh(&net->packet.sklist_lock);
- sk_add_node(sk, &net->packet.sklist);
+ spin_lock_bh(&net->packet.sklist_lock);
+ sk_add_node_rcu(sk, &net->packet.sklist);
sock_prot_inuse_add(net, &packet_proto, 1);
- write_unlock_bh(&net->packet.sklist_lock);
+ spin_unlock_bh(&net->packet.sklist_lock);
+
return 0;
out:
return err;
@@ -1420,6 +1498,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
struct sk_buff *skb;
int copied, err;
struct sockaddr_ll *sll;
+ int vnet_hdr_len = 0;
err = -EINVAL;
if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
@@ -1451,6 +1530,48 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
if (skb == NULL)
goto out;
+ if (pkt_sk(sk)->has_vnet_hdr) {
+ struct virtio_net_hdr vnet_hdr = { 0 };
+
+ err = -EINVAL;
+ vnet_hdr_len = sizeof(vnet_hdr);
+ if ((len -= vnet_hdr_len) < 0)
+ goto out_free;
+
+ if (skb_is_gso(skb)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ /* This is a hint as to how much should be linear. */
+ vnet_hdr.hdr_len = skb_headlen(skb);
+ vnet_hdr.gso_size = sinfo->gso_size;
+ if (sinfo->gso_type & SKB_GSO_TCPV4)
+ vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ else if (sinfo->gso_type & SKB_GSO_UDP)
+ vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ else if (sinfo->gso_type & SKB_GSO_FCOE)
+ goto out_free;
+ else
+ BUG();
+ if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+ vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
+ } else
+ vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ vnet_hdr.csum_start = skb->csum_start -
+ skb_headroom(skb);
+ vnet_hdr.csum_offset = skb->csum_offset;
+ } /* else everything is zero */
+
+ err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
+ vnet_hdr_len);
+ if (err < 0)
+ goto out_free;
+ }
+
/*
* If the address length field is there to be filled in, we fill
* it in now.
@@ -1502,7 +1623,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
* Free or return the buffer as appropriate. Again this
* hides all the races and re-entrancy issues from us.
*/
- err = (flags&MSG_TRUNC) ? skb->len : copied;
+ err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
out_free:
skb_free_datagram(sk, skb);
@@ -1613,7 +1734,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
goto done;
err = -EINVAL;
- if (mreq->mr_alen > dev->addr_len)
+ if (mreq->mr_alen != dev->addr_len)
goto done;
err = -ENOBUFS;
@@ -1732,7 +1853,6 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
return ret;
}
-#ifdef CONFIG_PACKET_MMAP
case PACKET_RX_RING:
case PACKET_TX_RING:
{
@@ -1740,6 +1860,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (optlen < sizeof(req))
return -EINVAL;
+ if (pkt_sk(sk)->has_vnet_hdr)
+ return -EINVAL;
if (copy_from_user(&req, optval, sizeof(req)))
return -EFAULT;
return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
@@ -1801,7 +1923,6 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
po->tp_loss = !!val;
return 0;
}
-#endif
case PACKET_AUXDATA:
{
int val;
@@ -1826,6 +1947,22 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
po->origdev = !!val;
return 0;
}
+ case PACKET_VNET_HDR:
+ {
+ int val;
+
+ if (sock->type != SOCK_RAW)
+ return -EINVAL;
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+ return -EBUSY;
+ if (optlen < sizeof(val))
+ return -EINVAL;
+ if (copy_from_user(&val, optval, sizeof(val)))
+ return -EFAULT;
+
+ po->has_vnet_hdr = !!val;
+ return 0;
+ }
default:
return -ENOPROTOOPT;
}
@@ -1876,7 +2013,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
data = &val;
break;
-#ifdef CONFIG_PACKET_MMAP
+ case PACKET_VNET_HDR:
+ if (len > sizeof(int))
+ len = sizeof(int);
+ val = po->has_vnet_hdr;
+
+ data = &val;
+ break;
case PACKET_VERSION:
if (len > sizeof(int))
len = sizeof(int);
@@ -1912,7 +2055,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
val = po->tp_loss;
data = &val;
break;
-#endif
default:
return -ENOPROTOOPT;
}
@@ -1932,8 +2074,8 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
struct net_device *dev = data;
struct net *net = dev_net(dev);
- read_lock(&net->packet.sklist_lock);
- sk_for_each(sk, node, &net->packet.sklist) {
+ rcu_read_lock();
+ sk_for_each_rcu(sk, node, &net->packet.sklist) {
struct packet_sock *po = pkt_sk(sk);
switch (msg) {
@@ -1961,18 +2103,19 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
}
break;
case NETDEV_UP:
- spin_lock(&po->bind_lock);
- if (dev->ifindex == po->ifindex && po->num &&
- !po->running) {
- dev_add_pack(&po->prot_hook);
- sock_hold(sk);
- po->running = 1;
+ if (dev->ifindex == po->ifindex) {
+ spin_lock(&po->bind_lock);
+ if (po->num && !po->running) {
+ dev_add_pack(&po->prot_hook);
+ sock_hold(sk);
+ po->running = 1;
+ }
+ spin_unlock(&po->bind_lock);
}
- spin_unlock(&po->bind_lock);
break;
}
}
- read_unlock(&net->packet.sklist_lock);
+ rcu_read_unlock();
return NOTIFY_DONE;
}
@@ -2032,11 +2175,6 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
return 0;
}
-#ifndef CONFIG_PACKET_MMAP
-#define packet_mmap sock_no_mmap
-#define packet_poll datagram_poll
-#else
-
static unsigned int packet_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
@@ -2318,8 +2456,6 @@ out:
mutex_unlock(&po->pg_vec_lock);
return err;
}
-#endif
-
static const struct proto_ops packet_ops_spkt = {
.family = PF_PACKET,
@@ -2374,40 +2510,26 @@ static struct notifier_block packet_netdev_notifier = {
};
#ifdef CONFIG_PROC_FS
-static inline struct sock *packet_seq_idx(struct net *net, loff_t off)
-{
- struct sock *s;
- struct hlist_node *node;
-
- sk_for_each(s, node, &net->packet.sklist) {
- if (!off--)
- return s;
- }
- return NULL;
-}
static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(seq_file_net(seq)->packet.sklist_lock)
+ __acquires(RCU)
{
struct net *net = seq_file_net(seq);
- read_lock(&net->packet.sklist_lock);
- return *pos ? packet_seq_idx(net, *pos - 1) : SEQ_START_TOKEN;
+
+ rcu_read_lock();
+ return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
}
static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net *net = seq_file_net(seq);
- ++*pos;
- return (v == SEQ_START_TOKEN)
- ? sk_head(&net->packet.sklist)
- : sk_next((struct sock *)v) ;
+ return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
}
static void packet_seq_stop(struct seq_file *seq, void *v)
- __releases(seq_file_net(seq)->packet.sklist_lock)
+ __releases(RCU)
{
- struct net *net = seq_file_net(seq);
- read_unlock(&net->packet.sklist_lock);
+ rcu_read_unlock();
}
static int packet_seq_show(struct seq_file *seq, void *v)
@@ -2415,7 +2537,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
if (v == SEQ_START_TOKEN)
seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
else {
- struct sock *s = v;
+ struct sock *s = sk_entry(v);
const struct packet_sock *po = pkt_sk(s);
seq_printf(seq,
@@ -2457,9 +2579,9 @@ static const struct file_operations packet_seq_fops = {
#endif
-static int packet_net_init(struct net *net)
+static int __net_init packet_net_init(struct net *net)
{
- rwlock_init(&net->packet.sklist_lock);
+ spin_lock_init(&net->packet.sklist_lock);
INIT_HLIST_HEAD(&net->packet.sklist);
if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
@@ -2468,7 +2590,7 @@ static int packet_net_init(struct net *net)
return 0;
}
-static void packet_net_exit(struct net *net)
+static void __net_exit packet_net_exit(struct net *net)
{
proc_net_remove(net, "packet");
}
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 67f072e94d0..387197b579b 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -75,7 +75,8 @@ static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
struct sk_buff *skb;
int err;
- if (msg->msg_flags & MSG_OOB)
+ if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
+ MSG_CMSG_COMPAT))
return -EOPNOTSUPP;
if (msg->msg_name == NULL)
@@ -119,7 +120,8 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
int rval = -EOPNOTSUPP;
int copylen;
- if (flags & MSG_OOB)
+ if (flags & ~(MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_NOSIGNAL|
+ MSG_CMSG_COMPAT))
goto out_nofree;
if (addr_len)
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index d183509d3fa..d01208968c8 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -96,11 +96,11 @@ static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb)
goto drop;
}
- if (likely(skb_headroom(skb) & 3)) {
+ if (skb_headroom(skb) & 3) {
struct sk_buff *rskb, *fs;
int flen = 0;
- /* Phonet Pipe data header is misaligned (3 bytes),
+ /* Phonet Pipe data header may be misaligned (3 bytes),
* so wrap the IP packet as a single fragment of an head-less
* socket buffer. The network stack will pull what it needs,
* but at least, the whole IP payload is not memcpy'd. */
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index b6356f3832f..360cf377693 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -354,6 +354,9 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
queue = &pn->ctrlreq_queue;
goto queue;
+ case PNS_PIPE_ALIGNED_DATA:
+ __skb_pull(skb, 1);
+ /* fall through */
case PNS_PIPE_DATA:
__skb_pull(skb, 3); /* Pipe data header */
if (!pn_flow_safe(pn->rx_fc)) {
@@ -441,6 +444,7 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
struct sockaddr_pn dst;
u16 peer_type;
u8 pipe_handle, enabled, n_sb;
+ u8 aligned = 0;
if (!pskb_pull(skb, sizeof(*hdr) + 4))
return -EINVAL;
@@ -479,6 +483,9 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
return -EINVAL;
peer_type = (peer_type & 0xff00) | data[0];
break;
+ case PN_PIPE_SB_ALIGNED_DATA:
+ aligned = data[0] != 0;
+ break;
}
n_sb--;
}
@@ -510,6 +517,7 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
newpn->rx_credits = 0;
newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
newpn->init_enable = enabled;
+ newpn->aligned = aligned;
BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
skb_queue_head(&newsk->sk_receive_queue, skb);
@@ -829,11 +837,15 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
return -ENOBUFS;
}
- skb_push(skb, 3);
+ skb_push(skb, 3 + pn->aligned);
skb_reset_transport_header(skb);
ph = pnp_hdr(skb);
ph->utid = 0;
- ph->message_id = PNS_PIPE_DATA;
+ if (pn->aligned) {
+ ph->message_id = PNS_PIPE_ALIGNED_DATA;
+ ph->data[0] = 0; /* padding */
+ } else
+ ph->message_id = PNS_PIPE_DATA;
ph->pipe_handle = pn->pipe_handle;
return pn_skb_send(sk, skb, &pipe_srv);
@@ -848,7 +860,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
int flags = msg->msg_flags;
int err, done;
- if (msg->msg_flags & MSG_OOB || !(msg->msg_flags & MSG_EOR))
+ if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
+ MSG_CMSG_COMPAT)) ||
+ !(msg->msg_flags & MSG_EOR))
return -EOPNOTSUPP;
skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
@@ -927,6 +941,9 @@ int pep_write(struct sock *sk, struct sk_buff *skb)
struct sk_buff *rskb, *fs;
int flen = 0;
+ if (pep_sk(sk)->aligned)
+ return pipe_skb_send(sk, skb);
+
rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
if (!rskb) {
kfree_skb(skb);
@@ -966,6 +983,10 @@ static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
struct sk_buff *skb;
int err;
+ if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
+ MSG_NOSIGNAL|MSG_CMSG_COMPAT))
+ return -EOPNOTSUPP;
+
if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
return -ENOTCONN;
@@ -973,6 +994,8 @@ static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
/* Dequeue and acknowledge control request */
struct pep_sock *pn = pep_sk(sk);
+ if (flags & MSG_PEEK)
+ return -EOPNOTSUPP;
skb = skb_dequeue(&pn->ctrlreq_queue);
if (skb) {
pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index bc4a33bf2d3..c597cc53a6f 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -311,7 +311,7 @@ static struct notifier_block phonet_device_notifier = {
};
/* Per-namespace Phonet devices handling */
-static int phonet_init_net(struct net *net)
+static int __net_init phonet_init_net(struct net *net)
{
struct phonet_net *pnn = net_generic(net, phonet_net_id);
@@ -324,7 +324,7 @@ static int phonet_init_net(struct net *net)
return 0;
}
-static void phonet_exit_net(struct net *net)
+static void __net_exit phonet_exit_net(struct net *net)
{
struct phonet_net *pnn = net_generic(net, phonet_net_id);
struct net_device *dev;
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 211522f9a9a..05625628598 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -90,8 +90,8 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
ret = sock->ops->bind(sock, (struct sockaddr *)&src, sizeof(src));
if (ret) {
- rdsdebug("bind failed with %d at address %u.%u.%u.%u\n",
- ret, NIPQUAD(conn->c_laddr));
+ rdsdebug("bind failed with %d at address %pI4\n",
+ ret, &conn->c_laddr);
goto out;
}
@@ -108,8 +108,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
O_NONBLOCK);
sock = NULL;
- rdsdebug("connect to address %u.%u.%u.%u returned %d\n",
- NIPQUAD(conn->c_faddr), ret);
+ rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret);
if (ret == -EINPROGRESS)
ret = 0;
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 45474a43686..53cb1b54165 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -66,9 +66,9 @@ static int rds_tcp_accept_one(struct socket *sock)
inet = inet_sk(new_sock->sk);
- rdsdebug("accepted tcp %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n",
- NIPQUAD(inet->inet_saddr), ntohs(inet->inet_sport),
- NIPQUAD(inet->inet_daddr), ntohs(inet->inet_dport));
+ rdsdebug("accepted tcp %pI4:%u -> %pI4:%u\n",
+ &inet->inet_saddr, ntohs(inet->inet_sport),
+ &inet->inet_daddr, ntohs(inet->inet_dport));
conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr,
&rds_tcp_transport, GFP_KERNEL);
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index ab545e0cd5d..34fdcc059e5 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -193,9 +193,9 @@ out:
rds_tcp_stats_inc(s_tcp_sndbuf_full);
ret = 0;
} else {
- printk(KERN_WARNING "RDS/tcp: send to %u.%u.%u.%u "
+ printk(KERN_WARNING "RDS/tcp: send to %pI4 "
"returned %d, disconnecting and reconnecting\n",
- NIPQUAD(conn->c_faddr), ret);
+ &conn->c_faddr, ret);
rds_conn_drop(conn);
}
}
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 8feb9e5d662..e90b9b6c16a 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1404,29 +1404,13 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
static void *rose_info_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_list_lock)
{
- int i;
- struct sock *s;
- struct hlist_node *node;
-
spin_lock_bh(&rose_list_lock);
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- i = 1;
- sk_for_each(s, node, &rose_list) {
- if (i == *pos)
- return s;
- ++i;
- }
- return NULL;
+ return seq_hlist_start_head(&rose_list, *pos);
}
static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
- ++*pos;
-
- return (v == SEQ_START_TOKEN) ? sk_head(&rose_list)
- : sk_next((struct sock *)v);
+ return seq_hlist_next(v, &rose_list, pos);
}
static void rose_info_stop(struct seq_file *seq, void *v)
@@ -1444,7 +1428,7 @@ static int rose_info_show(struct seq_file *seq, void *v)
"dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
else {
- struct sock *s = v;
+ struct sock *s = sk_entry(v);
struct rose_sock *rose = rose_sk(s);
const char *devname, *callsign;
const struct net_device *dev = rose->device;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 929218a4762..21f9c7678aa 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -433,7 +433,7 @@ config NET_ACT_POLICE
module.
To compile this code as a module, choose M here: the
- module will be called police.
+ module will be called act_police.
config NET_ACT_GACT
tristate "Generic actions"
@@ -443,7 +443,7 @@ config NET_ACT_GACT
accepting packets.
To compile this code as a module, choose M here: the
- module will be called gact.
+ module will be called act_gact.
config GACT_PROB
bool "Probability support"
@@ -459,7 +459,7 @@ config NET_ACT_MIRRED
other devices.
To compile this code as a module, choose M here: the
- module will be called mirred.
+ module will be called act_mirred.
config NET_ACT_IPT
tristate "IPtables targets"
@@ -469,7 +469,7 @@ config NET_ACT_IPT
classification.
To compile this code as a module, choose M here: the
- module will be called ipt.
+ module will be called act_ipt.
config NET_ACT_NAT
tristate "Stateless NAT"
@@ -479,7 +479,7 @@ config NET_ACT_NAT
netfilter for NAT unless you know what you are doing.
To compile this code as a module, choose M here: the
- module will be called nat.
+ module will be called act_nat.
config NET_ACT_PEDIT
tristate "Packet Editing"
@@ -488,7 +488,7 @@ config NET_ACT_PEDIT
Say Y here if you want to mangle the content of packets.
To compile this code as a module, choose M here: the
- module will be called pedit.
+ module will be called act_pedit.
config NET_ACT_SIMP
tristate "Simple Example (Debug)"
@@ -502,7 +502,7 @@ config NET_ACT_SIMP
If unsure, say N.
To compile this code as a module, choose M here: the
- module will be called simple.
+ module will be called act_simple.
config NET_ACT_SKBEDIT
tristate "SKB Editing"
@@ -513,7 +513,7 @@ config NET_ACT_SKBEDIT
If unsure, say N.
To compile this code as a module, choose M here: the
- module will be called skbedit.
+ module will be called act_skbedit.
config NET_CLS_IND
bool "Incoming device classification"
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 75fd1c672c6..6cd491013b5 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1707,6 +1707,7 @@ static int __init pktsched_init(void)
{
register_qdisc(&pfifo_qdisc_ops);
register_qdisc(&bfifo_qdisc_ops);
+ register_qdisc(&pfifo_head_drop_qdisc_ops);
register_qdisc(&mq_qdisc_ops);
proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 69188e8358b..4b0a6cc44c7 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -43,6 +43,26 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return qdisc_reshape_fail(skb, sch);
}
+static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+{
+ struct sk_buff *skb_head;
+ struct fifo_sched_data *q = qdisc_priv(sch);
+
+ if (likely(skb_queue_len(&sch->q) < q->limit))
+ return qdisc_enqueue_tail(skb, sch);
+
+ /* queue full, remove one skb to fulfill the limit */
+ skb_head = qdisc_dequeue_head(sch);
+ sch->bstats.bytes -= qdisc_pkt_len(skb_head);
+ sch->bstats.packets--;
+ sch->qstats.drops++;
+ kfree_skb(skb_head);
+
+ qdisc_enqueue_tail(skb, sch);
+
+ return NET_XMIT_CN;
+}
+
static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
{
struct fifo_sched_data *q = qdisc_priv(sch);
@@ -108,6 +128,20 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
};
EXPORT_SYMBOL(bfifo_qdisc_ops);
+struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
+ .id = "pfifo_head_drop",
+ .priv_size = sizeof(struct fifo_sched_data),
+ .enqueue = pfifo_tail_enqueue,
+ .dequeue = qdisc_dequeue_head,
+ .peek = qdisc_peek_head,
+ .drop = qdisc_queue_drop_head,
+ .init = fifo_init,
+ .reset = qdisc_reset_queue,
+ .change = fifo_init,
+ .dump = fifo_dump,
+ .owner = THIS_MODULE,
+};
+
/* Pass size change message down to embedded FIFO */
int fifo_set_limit(struct Qdisc *q, unsigned int limit)
{
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 13a6fba4107..bef13373168 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -186,7 +186,6 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
addr->valid = 1;
INIT_LIST_HEAD(&addr->list);
- INIT_RCU_HEAD(&addr->rcu);
/* We always hold a socket lock when calling this function,
* and that acts as a writer synchronizing lock.
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index cc50fbe9929..1d7ac70ba39 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -381,7 +381,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
addr->a.v6.sin6_scope_id = dev->ifindex;
addr->valid = 1;
INIT_LIST_HEAD(&addr->list);
- INIT_RCU_HEAD(&addr->rcu);
list_add_tail(&addr->list, addrlist);
}
}
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index d093cbfeaac..784bcc9a979 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -40,7 +40,7 @@
#include <net/sctp/sctp.h>
#include <net/ip.h> /* for snmp_fold_field */
-static struct snmp_mib sctp_snmp_list[] = {
+static const struct snmp_mib sctp_snmp_list[] = {
SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB),
SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS),
SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS),
@@ -83,7 +83,7 @@ static int sctp_snmp_seq_show(struct seq_file *seq, void *v)
for (i = 0; sctp_snmp_list[i].name != NULL; i++)
seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name,
- snmp_fold_field((void **)sctp_statistics,
+ snmp_fold_field((void __percpu **)sctp_statistics,
sctp_snmp_list[i].entry));
return 0;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index a3c8988758b..e771690f6d5 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -188,7 +188,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
addr->valid = 1;
INIT_LIST_HEAD(&addr->list);
- INIT_RCU_HEAD(&addr->rcu);
list_add_tail(&addr->list, addrlist);
}
}
@@ -996,12 +995,13 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
static inline int init_sctp_mibs(void)
{
- return snmp_mib_init((void**)sctp_statistics, sizeof(struct sctp_mib));
+ return snmp_mib_init((void __percpu **)sctp_statistics,
+ sizeof(struct sctp_mib));
}
static inline void cleanup_sctp_mibs(void)
{
- snmp_mib_free((void**)sctp_statistics);
+ snmp_mib_free((void __percpu **)sctp_statistics);
}
static void sctp_v4_pf_init(void)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 67fdac9d2d3..f6d1e59c415 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -6359,7 +6359,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
struct sctp_association *asoc)
{
struct inet_sock *inet = inet_sk(sk);
- struct inet_sock *newinet = inet_sk(newsk);
+ struct inet_sock *newinet;
newsk->sk_type = sk->sk_type;
newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 49278f83036..8d63f8fd29b 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -78,7 +78,7 @@ rpc_timeout_upcall_queue(struct work_struct *work)
}
/**
- * rpc_queue_upcall
+ * rpc_queue_upcall - queue an upcall message to userspace
* @inode: inode of upcall pipe on which to queue given message
* @msg: message to queue
*
@@ -999,19 +999,14 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
inode = rpc_get_inode(sb, S_IFDIR | 0755);
if (!inode)
return -ENOMEM;
- root = d_alloc_root(inode);
+ sb->s_root = root = d_alloc_root(inode);
if (!root) {
iput(inode);
return -ENOMEM;
}
if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL))
- goto out;
- sb->s_root = root;
+ return -ENOMEM;
return 0;
-out:
- d_genocide(root);
- dput(root);
- return -ENOMEM;
}
static int
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 0b15d7250c4..53196009160 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -71,7 +71,7 @@ static struct ctl_table_root net_sysctl_ro_root = {
.permissions = net_ctl_ro_header_perms,
};
-static int sysctl_net_init(struct net *net)
+static int __net_init sysctl_net_init(struct net *net)
{
setup_sysctl_set(&net->sysctls,
&net_sysctl_ro_root.default_set,
@@ -79,7 +79,7 @@ static int sysctl_net_init(struct net *net)
return 0;
}
-static void sysctl_net_exit(struct net *net)
+static void __net_exit sysctl_net_exit(struct net *net)
{
WARN_ON(!list_empty(&net->sysctls.list));
return;
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index 3b30d1130b6..b74f78d0c03 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -10,7 +10,7 @@ menuconfig TIPC
specially designed for intra cluster communication. This protocol
originates from Ericsson where it has been used in carrier grade
cluster applications for many years.
-
+
For more information about TIPC, see http://tipc.sourceforge.net.
This protocol support is also available as a module ( = code which
@@ -23,91 +23,76 @@ menuconfig TIPC
if TIPC
config TIPC_ADVANCED
- bool "TIPC: Advanced configuration"
+ bool "Advanced TIPC configuration"
default n
help
- Saying Y here will open some advanced configuration
- for TIPC. Most users do not need to bother, so if
- unsure, just say N.
+ Saying Y here will open some advanced configuration for TIPC.
+ Most users do not need to bother; if unsure, just say N.
config TIPC_ZONES
- int "Maximum number of zones in network"
+ int "Maximum number of zones in a network"
depends on TIPC_ADVANCED
+ range 1 255
default "3"
help
- Max number of zones inside TIPC network. Max supported value
- is 255 zones, minimum is 1
+ Specifies how many zones can be supported in a TIPC network.
+ Can range from 1 to 255 zones; default is 3.
- Default is 3 zones in a network; setting this to higher
- allows more zones but might use more memory.
+ Setting this to a smaller value saves some memory;
+ setting it to a higher value allows for more zones.
config TIPC_CLUSTERS
int "Maximum number of clusters in a zone"
depends on TIPC_ADVANCED
+ range 1 1
default "1"
help
- ***Only 1 (one cluster in a zone) is supported by current code.
- Any value set here will be overridden.***
-
- (Max number of clusters inside TIPC zone. Max supported
- value is 4095 clusters, minimum is 1.
+ Specifies how many clusters can be supported in a TIPC zone.
- Default is 1; setting this to smaller value might save
- some memory, setting it to higher
- allows more clusters and might consume more memory.)
+ *** Currently TIPC only supports a single cluster per zone. ***
config TIPC_NODES
- int "Maximum number of nodes in cluster"
+ int "Maximum number of nodes in a cluster"
depends on TIPC_ADVANCED
+ range 8 2047
default "255"
help
- Maximum number of nodes inside a TIPC cluster. Maximum
- supported value is 2047 nodes, minimum is 8.
-
- Setting this to a smaller value saves some memory,
- setting it to higher allows more nodes.
-
-config TIPC_SLAVE_NODES
- int "Maximum number of slave nodes in cluster"
- depends on TIPC_ADVANCED
- default "0"
- help
- ***This capability is not supported by current code.***
-
- Maximum number of slave nodes inside a TIPC cluster. Maximum
- supported value is 2047 nodes, minimum is 0.
+ Specifies how many nodes can be supported in a TIPC cluster.
+ Can range from 8 to 2047 nodes; default is 255.
- Setting this to a smaller value saves some memory,
- setting it to higher allows more nodes.
+ Setting this to a smaller value saves some memory;
+ setting it to higher allows for more nodes.
config TIPC_PORTS
int "Maximum number of ports in a node"
depends on TIPC_ADVANCED
+ range 127 65535
default "8191"
help
- Maximum number of ports within a node. Maximum
- supported value is 64535 nodes, minimum is 127.
+ Specifies how many ports can be supported by a node.
+ Can range from 127 to 65535 ports; default is 8191.
Setting this to a smaller value saves some memory,
- setting it to higher allows more ports.
+ setting it to higher allows for more ports.
config TIPC_LOG
int "Size of log buffer"
depends on TIPC_ADVANCED
- default 0
+ range 0 32768
+ default "0"
help
- Size (in bytes) of TIPC's internal log buffer, which records the
- occurrence of significant events. Maximum supported value
- is 32768 bytes, minimum is 0.
+ Size (in bytes) of TIPC's internal log buffer, which records the
+ occurrence of significant events. Can range from 0 to 32768 bytes;
+ default is 0.
There is no need to enable the log buffer unless the node will be
managed remotely via TIPC.
config TIPC_DEBUG
- bool "Enable debugging support"
+ bool "Enable debug messages"
default n
help
- This will enable debugging of TIPC.
+ This enables debugging of TIPC.
Only say Y here if you are having trouble with TIPC. It will
enable the display of detailed information about what is going on.
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 3256bd7d398..52c571fedbe 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -189,11 +189,11 @@ static int __init tipc_init(void)
tipc_remote_management = 1;
tipc_max_publications = 10000;
tipc_max_subscriptions = 2000;
- tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536);
- tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 255);
- tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1);
- tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047);
- tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
+ tipc_max_ports = CONFIG_TIPC_PORTS;
+ tipc_max_zones = CONFIG_TIPC_ZONES;
+ tipc_max_clusters = CONFIG_TIPC_CLUSTERS;
+ tipc_max_nodes = CONFIG_TIPC_NODES;
+ tipc_max_slaves = CONFIG_TIPC_SLAVE_NODES;
tipc_net_id = 4711;
if ((res = tipc_core_start()))
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index f2551190311..3d9122e78f4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -144,7 +144,7 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
/*
* SMP locking strategy:
* hash table is protected with spinlock unix_table_lock
- * each socket state is protected by separate rwlock.
+ * each socket state is protected by separate spin lock.
*/
static inline unsigned unix_hash_fold(__wsum n)
@@ -2224,7 +2224,7 @@ static const struct net_proto_family unix_family_ops = {
};
-static int unix_net_init(struct net *net)
+static int __net_init unix_net_init(struct net *net)
{
int error = -ENOMEM;
@@ -2243,7 +2243,7 @@ out:
return error;
}
-static void unix_net_exit(struct net *net)
+static void __net_exit unix_net_exit(struct net *net)
{
unix_sysctl_unregister(net);
proc_net_remove(net, "unix");
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 708f5df6b7f..d095c7be10d 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -31,7 +31,7 @@ static struct ctl_path unix_path[] = {
{ },
};
-int unix_sysctl_register(struct net *net)
+int __net_init unix_sysctl_register(struct net *net)
{
struct ctl_table *table;
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index d3bfb6ef13a..7718657e93d 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -320,8 +320,7 @@ int wimax_msg(struct wimax_dev *wimax_dev, const char *pipe_name,
EXPORT_SYMBOL_GPL(wimax_msg);
-static const
-struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
+static const struct nla_policy wimax_gnl_msg_policy[WIMAX_GNL_ATTR_MAX + 1] = {
[WIMAX_GNL_MSG_IFIDX] = {
.type = NLA_U32,
},
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index 35f370091f4..4dc82a54ba3 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -91,8 +91,7 @@ int wimax_reset(struct wimax_dev *wimax_dev)
EXPORT_SYMBOL(wimax_reset);
-static const
-struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = {
+static const struct nla_policy wimax_gnl_reset_policy[WIMAX_GNL_ATTR_MAX + 1] = {
[WIMAX_GNL_RESET_IFIDX] = {
.type = NLA_U32,
},
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index ae752a64d92..e978c7136c9 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -410,8 +410,7 @@ void wimax_rfkill_rm(struct wimax_dev *wimax_dev)
* just query).
*/
-static const
-struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = {
+static const struct nla_policy wimax_gnl_rfkill_policy[WIMAX_GNL_ATTR_MAX + 1] = {
[WIMAX_GNL_RFKILL_IFIDX] = {
.type = NLA_U32,
},
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c
index a76b8fcb056..11ad3356eb5 100644
--- a/net/wimax/op-state-get.c
+++ b/net/wimax/op-state-get.c
@@ -33,8 +33,7 @@
#include "debug-levels.h"
-static const
-struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = {
+static const struct nla_policy wimax_gnl_state_get_policy[WIMAX_GNL_ATTR_MAX + 1] = {
[WIMAX_GNL_STGET_IFIDX] = {
.type = NLA_U32,
},
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index c8866412f83..813e1eaea29 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -75,8 +75,7 @@ MODULE_PARM_DESC(debug,
* close to where the data is generated.
*/
/*
-static const
-struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
+static const struct nla_policy wimax_gnl_re_status_change[WIMAX_GNL_ATTR_MAX + 1] = {
[WIMAX_GNL_STCH_STATE_OLD] = { .type = NLA_U8 },
[WIMAX_GNL_STCH_STATE_NEW] = { .type = NLA_U8 },
};
diff --git a/net/wireless/.gitignore b/net/wireless/.gitignore
new file mode 100644
index 00000000000..c33451b896d
--- /dev/null
+++ b/net/wireless/.gitignore
@@ -0,0 +1 @@
+regdb.c
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 90e93a5701a..d0ee29063e5 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -94,20 +94,21 @@ config CFG80211_DEBUGFS
If unsure, say N.
-config WIRELESS_OLD_REGULATORY
- bool "Old wireless static regulatory definitions"
+config CFG80211_INTERNAL_REGDB
+ bool "use statically compiled regulatory rules database" if EMBEDDED
default n
depends on CFG80211
---help---
- This option enables the old static regulatory information
- and uses it within the new framework. This option is available
- for historical reasons and it is advised to leave it off.
+ This option generates an internal data structure representing
+ the wireless regulatory rules described in net/wireless/db.txt
+ and includes code to query that database. This is an alternative
+ to using CRDA for defining regulatory rules for the kernel.
For details see:
http://wireless.kernel.org/en/developers/Regulatory
- Say N and if you say Y, please tell us why. The default is N.
+ Most distributions have a CRDA package. So if unsure, say N.
config CFG80211_WEXT
bool "cfg80211 wireless extensions compatibility"
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index f07c8dc7aab..e77e508126f 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -13,5 +13,11 @@ cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o
cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
+cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
ccflags-y += -D__CHECK_ENDIAN__
+
+$(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk
+ @$(AWK) -f $(srctree)/$(src)/genregdb.awk < $< > $@
+
+clean-files := regdb.c
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index a46ac6c9b36..bf1737fc9a7 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -41,44 +41,57 @@ rdev_fixed_channel(struct cfg80211_registered_device *rdev,
return result;
}
-int rdev_set_freq(struct cfg80211_registered_device *rdev,
- struct wireless_dev *for_wdev,
+struct ieee80211_channel *
+rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
int freq, enum nl80211_channel_type channel_type)
{
struct ieee80211_channel *chan;
struct ieee80211_sta_ht_cap *ht_cap;
- int result;
-
- if (rdev_fixed_channel(rdev, for_wdev))
- return -EBUSY;
-
- if (!rdev->ops->set_channel)
- return -EOPNOTSUPP;
chan = ieee80211_get_channel(&rdev->wiphy, freq);
/* Primary channel not allowed */
if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
- return -EINVAL;
+ return NULL;
if (channel_type == NL80211_CHAN_HT40MINUS &&
chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
- return -EINVAL;
+ return NULL;
else if (channel_type == NL80211_CHAN_HT40PLUS &&
chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
- return -EINVAL;
+ return NULL;
ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
if (channel_type != NL80211_CHAN_NO_HT) {
if (!ht_cap->ht_supported)
- return -EINVAL;
+ return NULL;
if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)
- return -EINVAL;
+ return NULL;
}
+ return chan;
+}
+
+int rdev_set_freq(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *for_wdev,
+ int freq, enum nl80211_channel_type channel_type)
+{
+ struct ieee80211_channel *chan;
+ int result;
+
+ if (rdev_fixed_channel(rdev, for_wdev))
+ return -EBUSY;
+
+ if (!rdev->ops->set_channel)
+ return -EOPNOTSUPP;
+
+ chan = rdev_freq_to_chan(rdev, freq, channel_type);
+ if (!chan)
+ return -EINVAL;
+
result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type);
if (result)
return result;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 92b81244248..7fdb9409ad2 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1,7 +1,7 @@
/*
* This is the linux wireless configuration interface.
*
- * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
*/
#include <linux/if.h>
@@ -31,15 +31,10 @@ MODULE_AUTHOR("Johannes Berg");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("wireless configuration support");
-/* RCU might be appropriate here since we usually
- * only read the list, and that can happen quite
- * often because we need to do it for each command */
+/* RCU-protected (and cfg80211_mutex for writers) */
LIST_HEAD(cfg80211_rdev_list);
int cfg80211_rdev_list_generation;
-/*
- * This is used to protect the cfg80211_rdev_list
- */
DEFINE_MUTEX(cfg80211_mutex);
/* for debugfs */
@@ -402,6 +397,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
rdev->wiphy.retry_long = 4;
rdev->wiphy.frag_threshold = (u32) -1;
rdev->wiphy.rts_threshold = (u32) -1;
+ rdev->wiphy.coverage_class = 0;
return &rdev->wiphy;
}
@@ -417,6 +413,18 @@ int wiphy_register(struct wiphy *wiphy)
int i;
u16 ifmodes = wiphy->interface_modes;
+ if (WARN_ON(wiphy->addresses && !wiphy->n_addresses))
+ return -EINVAL;
+
+ if (WARN_ON(wiphy->addresses &&
+ !is_zero_ether_addr(wiphy->perm_addr) &&
+ memcmp(wiphy->perm_addr, wiphy->addresses[0].addr,
+ ETH_ALEN)))
+ return -EINVAL;
+
+ if (wiphy->addresses)
+ memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN);
+
/* sanity check ifmodes */
WARN_ON(!ifmodes);
ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1;
@@ -476,7 +484,7 @@ int wiphy_register(struct wiphy *wiphy)
/* set up regulatory info */
wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
- list_add(&rdev->list, &cfg80211_rdev_list);
+ list_add_rcu(&rdev->list, &cfg80211_rdev_list);
cfg80211_rdev_list_generation++;
mutex_unlock(&cfg80211_mutex);
@@ -553,7 +561,8 @@ void wiphy_unregister(struct wiphy *wiphy)
* it impossible to find from userspace.
*/
debugfs_remove_recursive(rdev->wiphy.debugfsdir);
- list_del(&rdev->list);
+ list_del_rcu(&rdev->list);
+ synchronize_rcu();
/*
* Try to grab rdev->mtx. If a command is still in progress,
@@ -668,8 +677,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work);
INIT_LIST_HEAD(&wdev->event_list);
spin_lock_init(&wdev->event_lock);
+ INIT_LIST_HEAD(&wdev->action_registrations);
+ spin_lock_init(&wdev->action_registrations_lock);
+
mutex_lock(&rdev->devlist_mtx);
- list_add(&wdev->list, &rdev->netdev_list);
+ list_add_rcu(&wdev->list, &rdev->netdev_list);
rdev->devlist_generation++;
/* can only change netns with wiphy */
dev->features |= NETIF_F_NETNS_LOCAL;
@@ -686,19 +698,21 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
wdev->wext.default_key = -1;
wdev->wext.default_mgmt_key = -1;
wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
+#endif
+
if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT)
- wdev->wext.ps = true;
+ wdev->ps = true;
else
- wdev->wext.ps = false;
- wdev->wext.ps_timeout = 100;
+ wdev->ps = false;
+ wdev->ps_timeout = 100;
if (rdev->ops->set_power_mgmt)
if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
- wdev->wext.ps,
- wdev->wext.ps_timeout)) {
+ wdev->ps,
+ wdev->ps_timeout)) {
/* assume this means it's off */
- wdev->wext.ps = false;
+ wdev->ps = false;
}
-#endif
+
if (!dev->ethtool_ops)
dev->ethtool_ops = &cfg80211_ethtool_ops;
@@ -781,13 +795,22 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
*/
if (!list_empty(&wdev->list)) {
sysfs_remove_link(&dev->dev.kobj, "phy80211");
- list_del_init(&wdev->list);
+ list_del_rcu(&wdev->list);
rdev->devlist_generation++;
+ cfg80211_mlme_purge_actions(wdev);
#ifdef CONFIG_CFG80211_WEXT
kfree(wdev->wext.keys);
#endif
}
mutex_unlock(&rdev->devlist_mtx);
+ /*
+ * synchronise (so that we won't find this netdev
+ * from other code any more) and then clear the list
+ * head so that the above code can safely check for
+ * !list_empty() to avoid double-cleanup.
+ */
+ synchronize_rcu();
+ INIT_LIST_HEAD(&wdev->list);
break;
case NETDEV_PRE_UP:
if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 4ef3efc9410..d52da913145 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -1,7 +1,7 @@
/*
* Wireless configuration interface internals.
*
- * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
*/
#ifndef __NET_WIRELESS_CORE_H
#define __NET_WIRELESS_CORE_H
@@ -48,6 +48,7 @@ struct cfg80211_registered_device {
/* associate netdev list */
struct mutex devlist_mtx;
+ /* protected by devlist_mtx or RCU */
struct list_head netdev_list;
int devlist_generation;
int opencount; /* also protected by devlist_mtx */
@@ -111,7 +112,8 @@ struct cfg80211_internal_bss {
unsigned long ts;
struct kref ref;
atomic_t hold;
- bool ies_allocated;
+ bool beacon_ies_allocated;
+ bool proberesp_ies_allocated;
/* must be last because of priv member */
struct cfg80211_bss pub;
@@ -327,6 +329,15 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
const u8 *resp_ie, size_t resp_ie_len,
u16 status, bool wextev,
struct cfg80211_bss *bss);
+int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid,
+ const u8 *match_data, int match_len);
+void cfg80211_mlme_unregister_actions(struct wireless_dev *wdev, u32 nlpid);
+void cfg80211_mlme_purge_actions(struct wireless_dev *wdev);
+int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ const u8 *buf, size_t len, u64 *cookie);
/* SME */
int __cfg80211_connect(struct cfg80211_registered_device *rdev,
@@ -374,10 +385,15 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
struct ieee80211_channel *
rdev_fixed_channel(struct cfg80211_registered_device *rdev,
struct wireless_dev *for_wdev);
+struct ieee80211_channel *
+rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
+ int freq, enum nl80211_channel_type channel_type);
int rdev_set_freq(struct cfg80211_registered_device *rdev,
struct wireless_dev *for_wdev,
int freq, enum nl80211_channel_type channel_type);
+u16 cfg80211_calculate_bitrate(struct rate_info *rate);
+
#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
#else
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
new file mode 100644
index 00000000000..a2fc3a09ccd
--- /dev/null
+++ b/net/wireless/db.txt
@@ -0,0 +1,17 @@
+#
+# This file is a placeholder to prevent accidental build breakage if someone
+# enables CONFIG_CFG80211_INTERNAL_REGDB. Almost no one actually needs to
+# enable that build option.
+#
+# You should be using CRDA instead. It is even better if you use the CRDA
+# package provided by your distribution, since they will probably keep it
+# up-to-date on your behalf.
+#
+# If you _really_ intend to use CONFIG_CFG80211_INTERNAL_REGDB then you will
+# need to replace this file with one containing appropriately formatted
+# regulatory rules that cover the regulatory domains you will be using. Your
+# best option is to extract the db.txt file from the wireless-regdb git
+# repository:
+#
+# git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git
+#
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
new file mode 100644
index 00000000000..3cc9e69880a
--- /dev/null
+++ b/net/wireless/genregdb.awk
@@ -0,0 +1,118 @@
+#!/usr/bin/awk -f
+#
+# genregdb.awk -- generate regdb.c from db.txt
+#
+# Actually, it reads from stdin (presumed to be db.txt) and writes
+# to stdout (presumed to be regdb.c), but close enough...
+#
+# Copyright 2009 John W. Linville <linville@tuxdriver.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+BEGIN {
+ active = 0
+ rules = 0;
+ print "/*"
+ print " * DO NOT EDIT -- file generated from data in db.txt"
+ print " */"
+ print ""
+ print "#include <linux/nl80211.h>"
+ print "#include <net/cfg80211.h>"
+ print ""
+ regdb = "const struct ieee80211_regdomain *reg_regdb[] = {\n"
+}
+
+/^[ \t]*#/ {
+ # Ignore
+}
+
+!active && /^[ \t]*$/ {
+ # Ignore
+}
+
+!active && /country/ {
+ country=$2
+ sub(/:/, "", country)
+ printf "static const struct ieee80211_regdomain regdom_%s = {\n", country
+ printf "\t.alpha2 = \"%s\",\n", country
+ printf "\t.reg_rules = {\n"
+ active = 1
+ regdb = regdb "\t&regdom_" country ",\n"
+}
+
+active && /^[ \t]*\(/ {
+ start = $1
+ sub(/\(/, "", start)
+ end = $3
+ bw = $5
+ sub(/\),/, "", bw)
+ gain = $6
+ sub(/\(/, "", gain)
+ sub(/,/, "", gain)
+ power = $7
+ sub(/\)/, "", power)
+ sub(/,/, "", power)
+ # power might be in mW...
+ units = $8
+ sub(/\)/, "", units)
+ sub(/,/, "", units)
+ if (units == "mW") {
+ if (power == 100) {
+ power = 20
+ } else if (power == 200) {
+ power = 23
+ } else if (power == 500) {
+ power = 27
+ } else if (power == 1000) {
+ power = 30
+ } else {
+ print "Unknown power value in database!"
+ }
+ }
+ flagstr = ""
+ for (i=8; i<=NF; i++)
+ flagstr = flagstr $i
+ split(flagstr, flagarray, ",")
+ flags = ""
+ for (arg in flagarray) {
+ if (flagarray[arg] == "NO-OFDM") {
+ flags = flags "\n\t\t\tNL80211_RRF_NO_OFDM | "
+ } else if (flagarray[arg] == "NO-CCK") {
+ flags = flags "\n\t\t\tNL80211_RRF_NO_CCK | "
+ } else if (flagarray[arg] == "NO-INDOOR") {
+ flags = flags "\n\t\t\tNL80211_RRF_NO_INDOOR | "
+ } else if (flagarray[arg] == "NO-OUTDOOR") {
+ flags = flags "\n\t\t\tNL80211_RRF_NO_OUTDOOR | "
+ } else if (flagarray[arg] == "DFS") {
+ flags = flags "\n\t\t\tNL80211_RRF_DFS | "
+ } else if (flagarray[arg] == "PTP-ONLY") {
+ flags = flags "\n\t\t\tNL80211_RRF_PTP_ONLY | "
+ } else if (flagarray[arg] == "PTMP-ONLY") {
+ flags = flags "\n\t\t\tNL80211_RRF_PTMP_ONLY | "
+ } else if (flagarray[arg] == "PASSIVE-SCAN") {
+ flags = flags "\n\t\t\tNL80211_RRF_PASSIVE_SCAN | "
+ } else if (flagarray[arg] == "NO-IBSS") {
+ flags = flags "\n\t\t\tNL80211_RRF_NO_IBSS | "
+ }
+ }
+ flags = flags "0"
+ printf "\t\tREG_RULE(%d, %d, %d, %d, %d, %s),\n", start, end, bw, gain, power, flags
+ rules++
+}
+
+active && /^[ \t]*$/ {
+ active = 0
+ printf "\t},\n"
+ printf "\t.n_reg_rules = %d\n", rules
+ printf "};\n\n"
+ rules = 0;
+}
+
+END {
+ print regdb "};"
+ print ""
+ print "int reg_regdb_size = ARRAY_SIZE(reg_regdb);"
+}
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index 2301dc1edc4..b7fa31d5fd1 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -237,7 +237,6 @@ static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
return -1;
pos = skb->data + hdr_len + CCMP_HDR_LEN;
- mic = skb_put(skb, CCMP_MIC_LEN);
hdr = (struct ieee80211_hdr *)skb->data;
ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
@@ -257,6 +256,7 @@ static int lib80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos += len;
}
+ mic = skb_put(skb, CCMP_MIC_LEN);
for (i = 0; i < CCMP_MIC_LEN; i++)
mic[i] = b[i] ^ s0[i];
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index c36287399d7..8cbdb32ff31 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -36,6 +36,8 @@ MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("lib80211 crypt: TKIP");
MODULE_LICENSE("GPL");
+#define TKIP_HDR_LEN 8
+
struct lib80211_tkip_data {
#define TKIP_KEY_LEN 32
u8 key[TKIP_KEY_LEN];
@@ -314,13 +316,12 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
u8 * rc4key, int keylen, void *priv)
{
struct lib80211_tkip_data *tkey = priv;
- int len;
u8 *pos;
struct ieee80211_hdr *hdr;
hdr = (struct ieee80211_hdr *)skb->data;
- if (skb_headroom(skb) < 8 || skb->len < hdr_len)
+ if (skb_headroom(skb) < TKIP_HDR_LEN || skb->len < hdr_len)
return -1;
if (rc4key == NULL || keylen < 16)
@@ -333,9 +334,8 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
}
tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
- len = skb->len - hdr_len;
- pos = skb_push(skb, 8);
- memmove(pos, pos + 8, hdr_len);
+ pos = skb_push(skb, TKIP_HDR_LEN);
+ memmove(pos, pos + TKIP_HDR_LEN, hdr_len);
pos += hdr_len;
*pos++ = *rc4key;
@@ -353,7 +353,7 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
tkey->tx_iv32++;
}
- return 8;
+ return TKIP_HDR_LEN;
}
static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
@@ -384,9 +384,8 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
if ((lib80211_tkip_hdr(skb, hdr_len, rc4key, 16, priv)) < 0)
return -1;
- icv = skb_put(skb, 4);
-
crc = ~crc32_le(~0, pos, len);
+ icv = skb_put(skb, 4);
icv[0] = crc;
icv[1] = crc >> 8;
icv[2] = crc >> 16;
@@ -434,7 +433,7 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return -1;
}
- if (skb->len < hdr_len + 8 + 4)
+ if (skb->len < hdr_len + TKIP_HDR_LEN + 4)
return -1;
pos = skb->data + hdr_len;
@@ -462,7 +461,7 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
}
iv16 = (pos[0] << 8) | pos[2];
iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
- pos += 8;
+ pos += TKIP_HDR_LEN;
if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) {
#ifdef CONFIG_LIB80211_DEBUG
@@ -523,8 +522,8 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
tkey->rx_iv16_new = iv16;
/* Remove IV and ICV */
- memmove(skb->data + 8, skb->data, hdr_len);
- skb_pull(skb, 8);
+ memmove(skb->data + TKIP_HDR_LEN, skb->data, hdr_len);
+ skb_pull(skb, TKIP_HDR_LEN);
skb_trim(skb, skb->len - 4);
return keyidx;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 82e6002c8d6..62bc8855e12 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -148,22 +148,23 @@ void __cfg80211_send_deauth(struct net_device *dev,
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
const u8 *bssid = mgmt->bssid;
int i;
+ bool found = false;
ASSERT_WDEV_LOCK(wdev);
- nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
-
if (wdev->current_bss &&
memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(&wdev->current_bss->pub);
wdev->current_bss = NULL;
+ found = true;
} else for (i = 0; i < MAX_AUTH_BSSES; i++) {
if (wdev->auth_bsses[i] &&
memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) {
cfg80211_unhold_bss(wdev->auth_bsses[i]);
cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
wdev->auth_bsses[i] = NULL;
+ found = true;
break;
}
if (wdev->authtry_bsses[i] &&
@@ -171,10 +172,16 @@ void __cfg80211_send_deauth(struct net_device *dev,
cfg80211_unhold_bss(wdev->authtry_bsses[i]);
cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
wdev->authtry_bsses[i] = NULL;
+ found = true;
break;
}
}
+ if (!found)
+ return;
+
+ nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
+
if (wdev->sme_state == CFG80211_SME_CONNECTED) {
u16 reason_code;
bool from_ap;
@@ -684,3 +691,206 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
}
}
}
+
+void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, gfp_t gfp)
+{
+ struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ nl80211_send_remain_on_channel(rdev, dev, cookie, chan, channel_type,
+ duration, gfp);
+}
+EXPORT_SYMBOL(cfg80211_ready_on_channel);
+
+void cfg80211_remain_on_channel_expired(struct net_device *dev,
+ u64 cookie,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ gfp_t gfp)
+{
+ struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ nl80211_send_remain_on_channel_cancel(rdev, dev, cookie, chan,
+ channel_type, gfp);
+}
+EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
+
+void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
+ struct station_info *sinfo, gfp_t gfp)
+{
+ struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp);
+}
+EXPORT_SYMBOL(cfg80211_new_sta);
+
+struct cfg80211_action_registration {
+ struct list_head list;
+
+ u32 nlpid;
+
+ int match_len;
+
+ u8 match[];
+};
+
+int cfg80211_mlme_register_action(struct wireless_dev *wdev, u32 snd_pid,
+ const u8 *match_data, int match_len)
+{
+ struct cfg80211_action_registration *reg, *nreg;
+ int err = 0;
+
+ nreg = kzalloc(sizeof(*reg) + match_len, GFP_KERNEL);
+ if (!nreg)
+ return -ENOMEM;
+
+ spin_lock_bh(&wdev->action_registrations_lock);
+
+ list_for_each_entry(reg, &wdev->action_registrations, list) {
+ int mlen = min(match_len, reg->match_len);
+
+ if (memcmp(reg->match, match_data, mlen) == 0) {
+ err = -EALREADY;
+ break;
+ }
+ }
+
+ if (err) {
+ kfree(nreg);
+ goto out;
+ }
+
+ memcpy(nreg->match, match_data, match_len);
+ nreg->match_len = match_len;
+ nreg->nlpid = snd_pid;
+ list_add(&nreg->list, &wdev->action_registrations);
+
+ out:
+ spin_unlock_bh(&wdev->action_registrations_lock);
+ return err;
+}
+
+void cfg80211_mlme_unregister_actions(struct wireless_dev *wdev, u32 nlpid)
+{
+ struct cfg80211_action_registration *reg, *tmp;
+
+ spin_lock_bh(&wdev->action_registrations_lock);
+
+ list_for_each_entry_safe(reg, tmp, &wdev->action_registrations, list) {
+ if (reg->nlpid == nlpid) {
+ list_del(&reg->list);
+ kfree(reg);
+ }
+ }
+
+ spin_unlock_bh(&wdev->action_registrations_lock);
+}
+
+void cfg80211_mlme_purge_actions(struct wireless_dev *wdev)
+{
+ struct cfg80211_action_registration *reg, *tmp;
+
+ spin_lock_bh(&wdev->action_registrations_lock);
+
+ list_for_each_entry_safe(reg, tmp, &wdev->action_registrations, list) {
+ list_del(&reg->list);
+ kfree(reg);
+ }
+
+ spin_unlock_bh(&wdev->action_registrations_lock);
+}
+
+int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ const u8 *buf, size_t len, u64 *cookie)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ const struct ieee80211_mgmt *mgmt;
+
+ if (rdev->ops->action == NULL)
+ return -EOPNOTSUPP;
+ if (len < 24 + 1)
+ return -EINVAL;
+
+ mgmt = (const struct ieee80211_mgmt *) buf;
+ if (!ieee80211_is_action(mgmt->frame_control))
+ return -EINVAL;
+ if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
+ /* Verify that we are associated with the destination AP */
+ if (!wdev->current_bss ||
+ memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
+ ETH_ALEN) != 0 ||
+ memcmp(wdev->current_bss->pub.bssid, mgmt->da,
+ ETH_ALEN) != 0)
+ return -ENOTCONN;
+ }
+
+ if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0)
+ return -EINVAL;
+
+ /* Transmit the Action frame as requested by user space */
+ return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type,
+ buf, len, cookie);
+}
+
+bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf,
+ size_t len, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+ struct cfg80211_action_registration *reg;
+ const u8 *action_data;
+ int action_data_len;
+ bool result = false;
+
+ /* frame length - min size excluding category */
+ action_data_len = len - (IEEE80211_MIN_ACTION_SIZE - 1);
+
+ /* action data starts with category */
+ action_data = buf + IEEE80211_MIN_ACTION_SIZE - 1;
+
+ spin_lock_bh(&wdev->action_registrations_lock);
+
+ list_for_each_entry(reg, &wdev->action_registrations, list) {
+ if (reg->match_len > action_data_len)
+ continue;
+
+ if (memcmp(reg->match, action_data, reg->match_len))
+ continue;
+
+ /* found match! */
+
+ /* Indicate the received Action frame to user space */
+ if (nl80211_send_action(rdev, dev, reg->nlpid, freq,
+ buf, len, gfp))
+ continue;
+
+ result = true;
+ break;
+ }
+
+ spin_unlock_bh(&wdev->action_registrations_lock);
+
+ return result;
+}
+EXPORT_SYMBOL(cfg80211_rx_action);
+
+void cfg80211_action_tx_status(struct net_device *dev, u64 cookie,
+ const u8 *buf, size_t len, bool ack, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ /* Indicate TX status of the Action frame to user space */
+ nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp);
+}
+EXPORT_SYMBOL(cfg80211_action_tx_status);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a6028433e3a..e447db04cf7 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1,7 +1,7 @@
/*
* This is the new netlink-based wireless configuration interface.
*
- * Copyright 2006-2009 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
*/
#include <linux/if.h>
@@ -58,7 +58,7 @@ static int get_rdev_dev_by_info_ifindex(struct genl_info *info,
}
/* policy for the attributes */
-static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
+static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING,
.len = 20-1 },
@@ -69,6 +69,7 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
[NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 },
[NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 },
+ [NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 },
[NL80211_ATTR_IFTYPE] = { .type = NLA_U32 },
[NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
@@ -141,11 +142,17 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
[NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
.len = WLAN_PMKID_LEN },
+ [NL80211_ATTR_DURATION] = { .type = NLA_U32 },
+ [NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
+ [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
+ [NL80211_ATTR_FRAME] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_DATA_LEN },
+ [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, },
+ [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 },
};
/* policy for the attributes */
-static struct nla_policy
-nl80211_key_policy[NL80211_KEY_MAX + 1] __read_mostly = {
+static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = {
[NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN },
[NL80211_KEY_IDX] = { .type = NLA_U8 },
[NL80211_KEY_CIPHER] = { .type = NLA_U32 },
@@ -442,6 +449,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
dev->wiphy.frag_threshold);
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
dev->wiphy.rts_threshold);
+ NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
+ dev->wiphy.coverage_class);
NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
dev->wiphy.max_scan_ssids);
@@ -569,6 +578,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
CMD(set_pmksa, SET_PMKSA);
CMD(del_pmksa, DEL_PMKSA);
CMD(flush_pmksa, FLUSH_PMKSA);
+ CMD(remain_on_channel, REMAIN_ON_CHANNEL);
+ CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
+ CMD(action, ACTION);
if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
i++;
NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
@@ -681,6 +693,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
u32 changed;
u8 retry_short = 0, retry_long = 0;
u32 frag_threshold = 0, rts_threshold = 0;
+ u8 coverage_class = 0;
rtnl_lock();
@@ -803,9 +816,16 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
changed |= WIPHY_PARAM_RTS_THRESHOLD;
}
+ if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) {
+ coverage_class = nla_get_u8(
+ info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]);
+ changed |= WIPHY_PARAM_COVERAGE_CLASS;
+ }
+
if (changed) {
u8 old_retry_short, old_retry_long;
u32 old_frag_threshold, old_rts_threshold;
+ u8 old_coverage_class;
if (!rdev->ops->set_wiphy_params) {
result = -EOPNOTSUPP;
@@ -816,6 +836,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
old_retry_long = rdev->wiphy.retry_long;
old_frag_threshold = rdev->wiphy.frag_threshold;
old_rts_threshold = rdev->wiphy.rts_threshold;
+ old_coverage_class = rdev->wiphy.coverage_class;
if (changed & WIPHY_PARAM_RETRY_SHORT)
rdev->wiphy.retry_short = retry_short;
@@ -825,6 +846,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
rdev->wiphy.frag_threshold = frag_threshold;
if (changed & WIPHY_PARAM_RTS_THRESHOLD)
rdev->wiphy.rts_threshold = rts_threshold;
+ if (changed & WIPHY_PARAM_COVERAGE_CLASS)
+ rdev->wiphy.coverage_class = coverage_class;
result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
if (result) {
@@ -832,6 +855,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
rdev->wiphy.retry_long = old_retry_long;
rdev->wiphy.frag_threshold = old_frag_threshold;
rdev->wiphy.rts_threshold = old_rts_threshold;
+ rdev->wiphy.coverage_class = old_coverage_class;
}
}
@@ -1637,42 +1661,9 @@ static int parse_station_flags(struct genl_info *info,
return 0;
}
-static u16 nl80211_calculate_bitrate(struct rate_info *rate)
-{
- int modulation, streams, bitrate;
-
- if (!(rate->flags & RATE_INFO_FLAGS_MCS))
- return rate->legacy;
-
- /* the formula below does only work for MCS values smaller than 32 */
- if (rate->mcs >= 32)
- return 0;
-
- modulation = rate->mcs & 7;
- streams = (rate->mcs >> 3) + 1;
-
- bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
- 13500000 : 6500000;
-
- if (modulation < 4)
- bitrate *= (modulation + 1);
- else if (modulation == 4)
- bitrate *= (modulation + 2);
- else
- bitrate *= (modulation + 3);
-
- bitrate *= streams;
-
- if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
- bitrate = (bitrate / 9) * 10;
-
- /* do NOT round down here */
- return (bitrate + 50000) / 100000;
-}
-
static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
int flags, struct net_device *dev,
- u8 *mac_addr, struct station_info *sinfo)
+ const u8 *mac_addr, struct station_info *sinfo)
{
void *hdr;
struct nlattr *sinfoattr, *txrate;
@@ -1716,8 +1707,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
if (!txrate)
goto nla_put_failure;
- /* nl80211_calculate_bitrate will return 0 for mcs >= 32 */
- bitrate = nl80211_calculate_bitrate(&sinfo->txrate);
+ /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
+ bitrate = cfg80211_calculate_bitrate(&sinfo->txrate);
if (bitrate > 0)
NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
@@ -2023,6 +2014,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES])
return -EINVAL;
+ if (!info->attrs[NL80211_ATTR_STA_AID])
+ return -EINVAL;
+
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
params.supported_rates =
nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]);
@@ -2031,11 +2025,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.listen_interval =
nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
- if (info->attrs[NL80211_ATTR_STA_AID]) {
- params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
- if (!params.aid || params.aid > IEEE80211_MAX_AID)
- return -EINVAL;
- }
+ params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
+ if (!params.aid || params.aid > IEEE80211_MAX_AID)
+ return -EINVAL;
if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
params.ht_capa =
@@ -2050,6 +2042,12 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
if (err)
goto out_rtnl;
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
+ err = -EINVAL;
+ goto out;
+ }
+
err = get_vlan(info, rdev, &params.vlan);
if (err)
goto out;
@@ -2057,35 +2055,6 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
/* validate settings */
err = 0;
- switch (dev->ieee80211_ptr->iftype) {
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_AP_VLAN:
- /* all ok but must have AID */
- if (!params.aid)
- err = -EINVAL;
- break;
- case NL80211_IFTYPE_MESH_POINT:
- /* disallow things mesh doesn't support */
- if (params.vlan)
- err = -EINVAL;
- if (params.aid)
- err = -EINVAL;
- if (params.ht_capa)
- err = -EINVAL;
- if (params.listen_interval >= 0)
- err = -EINVAL;
- if (params.supported_rates)
- err = -EINVAL;
- if (params.sta_flags_mask)
- err = -EINVAL;
- break;
- default:
- err = -EINVAL;
- }
-
- if (err)
- goto out;
-
if (!rdev->ops->add_station) {
err = -EOPNOTSUPP;
goto out;
@@ -2126,8 +2095,7 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
goto out_rtnl;
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
err = -EINVAL;
goto out;
}
@@ -2514,8 +2482,7 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
return err;
}
-static const struct nla_policy
- reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
+static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = {
[NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 },
[NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 },
[NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 },
@@ -2583,12 +2550,6 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
-#ifdef CONFIG_WIRELESS_OLD_REGULATORY
- /* We ignore world regdom requests with the old regdom setup */
- if (is_world_regdom(data))
- return -EINVAL;
-#endif
-
r = regulatory_hint_user(data);
return r;
@@ -2690,8 +2651,7 @@ do {\
} \
} while (0);\
-static struct nla_policy
-nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] __read_mostly = {
+static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = {
[NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 },
[NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 },
[NL80211_MESHCONF_HOLDING_TIMEOUT] = { .type = NLA_U16 },
@@ -3182,6 +3142,10 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS,
res->len_information_elements,
res->information_elements);
+ if (res->beacon_ies && res->len_beacon_ies &&
+ res->beacon_ies != res->information_elements)
+ NLA_PUT(msg, NL80211_BSS_BEACON_IES,
+ res->len_beacon_ies, res->beacon_ies);
if (res->tsf)
NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf);
if (res->beacon_interval)
@@ -3586,6 +3550,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev;
struct net_device *dev;
+ struct wireless_dev *wdev;
struct cfg80211_crypto_settings crypto;
struct ieee80211_channel *chan, *fixedchan;
const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL;
@@ -3631,7 +3596,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
}
mutex_lock(&rdev->devlist_mtx);
- fixedchan = rdev_fixed_channel(rdev, NULL);
+ wdev = dev->ieee80211_ptr;
+ fixedchan = rdev_fixed_channel(rdev, wdev);
if (fixedchan && chan != fixedchan) {
err = -EBUSY;
mutex_unlock(&rdev->devlist_mtx);
@@ -4322,6 +4288,496 @@ static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
}
+static int nl80211_remain_on_channel(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+ struct net_device *dev;
+ struct ieee80211_channel *chan;
+ struct sk_buff *msg;
+ void *hdr;
+ u64 cookie;
+ enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
+ u32 freq, duration;
+ int err;
+
+ if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
+ !info->attrs[NL80211_ATTR_DURATION])
+ return -EINVAL;
+
+ duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
+
+ /*
+ * We should be on that channel for at least one jiffie,
+ * and more than 5 seconds seems excessive.
+ */
+ if (!duration || !msecs_to_jiffies(duration) || duration > 5000)
+ return -EINVAL;
+
+ rtnl_lock();
+
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ if (!rdev->ops->remain_on_channel) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
+ channel_type = nla_get_u32(
+ info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+ if (channel_type != NL80211_CHAN_NO_HT &&
+ channel_type != NL80211_CHAN_HT20 &&
+ channel_type != NL80211_CHAN_HT40PLUS &&
+ channel_type != NL80211_CHAN_HT40MINUS)
+ err = -EINVAL;
+ goto out;
+ }
+
+ freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+ chan = rdev_freq_to_chan(rdev, freq, channel_type);
+ if (chan == NULL) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+ NL80211_CMD_REMAIN_ON_CHANNEL);
+
+ if (IS_ERR(hdr)) {
+ err = PTR_ERR(hdr);
+ goto free_msg;
+ }
+
+ err = rdev->ops->remain_on_channel(&rdev->wiphy, dev, chan,
+ channel_type, duration, &cookie);
+
+ if (err)
+ goto free_msg;
+
+ NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+
+ genlmsg_end(msg, hdr);
+ err = genlmsg_reply(msg, info);
+ goto out;
+
+ nla_put_failure:
+ err = -ENOBUFS;
+ free_msg:
+ nlmsg_free(msg);
+ out:
+ cfg80211_unlock_rdev(rdev);
+ dev_put(dev);
+ unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
+static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+ struct net_device *dev;
+ u64 cookie;
+ int err;
+
+ if (!info->attrs[NL80211_ATTR_COOKIE])
+ return -EINVAL;
+
+ rtnl_lock();
+
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ if (!rdev->ops->cancel_remain_on_channel) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+
+ cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
+
+ err = rdev->ops->cancel_remain_on_channel(&rdev->wiphy, dev, cookie);
+
+ out:
+ cfg80211_unlock_rdev(rdev);
+ dev_put(dev);
+ unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
+static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
+ u8 *rates, u8 rates_len)
+{
+ u8 i;
+ u32 mask = 0;
+
+ for (i = 0; i < rates_len; i++) {
+ int rate = (rates[i] & 0x7f) * 5;
+ int ridx;
+ for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
+ struct ieee80211_rate *srate =
+ &sband->bitrates[ridx];
+ if (rate == srate->bitrate) {
+ mask |= 1 << ridx;
+ break;
+ }
+ }
+ if (ridx == sband->n_bitrates)
+ return 0; /* rate not found */
+ }
+
+ return mask;
+}
+
+static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
+ [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
+ .len = NL80211_MAX_SUPP_RATES },
+};
+
+static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct nlattr *tb[NL80211_TXRATE_MAX + 1];
+ struct cfg80211_registered_device *rdev;
+ struct cfg80211_bitrate_mask mask;
+ int err, rem, i;
+ struct net_device *dev;
+ struct nlattr *tx_rates;
+ struct ieee80211_supported_band *sband;
+
+ if (info->attrs[NL80211_ATTR_TX_RATES] == NULL)
+ return -EINVAL;
+
+ rtnl_lock();
+
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ if (!rdev->ops->set_bitrate_mask) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ memset(&mask, 0, sizeof(mask));
+ /* Default to all rates enabled */
+ for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+ sband = rdev->wiphy.bands[i];
+ mask.control[i].legacy =
+ sband ? (1 << sband->n_bitrates) - 1 : 0;
+ }
+
+ /*
+ * The nested attribute uses enum nl80211_band as the index. This maps
+ * directly to the enum ieee80211_band values used in cfg80211.
+ */
+ nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem)
+ {
+ enum ieee80211_band band = nla_type(tx_rates);
+ if (band < 0 || band >= IEEE80211_NUM_BANDS) {
+ err = -EINVAL;
+ goto unlock;
+ }
+ sband = rdev->wiphy.bands[band];
+ if (sband == NULL) {
+ err = -EINVAL;
+ goto unlock;
+ }
+ nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
+ nla_len(tx_rates), nl80211_txattr_policy);
+ if (tb[NL80211_TXRATE_LEGACY]) {
+ mask.control[band].legacy = rateset_to_mask(
+ sband,
+ nla_data(tb[NL80211_TXRATE_LEGACY]),
+ nla_len(tb[NL80211_TXRATE_LEGACY]));
+ if (mask.control[band].legacy == 0) {
+ err = -EINVAL;
+ goto unlock;
+ }
+ }
+ }
+
+ err = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, NULL, &mask);
+
+ unlock:
+ dev_put(dev);
+ cfg80211_unlock_rdev(rdev);
+ unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
+static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+ struct net_device *dev;
+ int err;
+
+ if (!info->attrs[NL80211_ATTR_FRAME_MATCH])
+ return -EINVAL;
+
+ if (nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]) < 1)
+ return -EINVAL;
+
+ rtnl_lock();
+
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ /* not much point in registering if we can't reply */
+ if (!rdev->ops->action) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = cfg80211_mlme_register_action(dev->ieee80211_ptr, info->snd_pid,
+ nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]),
+ nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]));
+ out:
+ cfg80211_unlock_rdev(rdev);
+ dev_put(dev);
+ unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
+static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+ struct net_device *dev;
+ struct ieee80211_channel *chan;
+ enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
+ u32 freq;
+ int err;
+ void *hdr;
+ u64 cookie;
+ struct sk_buff *msg;
+
+ if (!info->attrs[NL80211_ATTR_FRAME] ||
+ !info->attrs[NL80211_ATTR_WIPHY_FREQ])
+ return -EINVAL;
+
+ rtnl_lock();
+
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ if (!rdev->ops->action) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
+ channel_type = nla_get_u32(
+ info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+ if (channel_type != NL80211_CHAN_NO_HT &&
+ channel_type != NL80211_CHAN_HT20 &&
+ channel_type != NL80211_CHAN_HT40PLUS &&
+ channel_type != NL80211_CHAN_HT40MINUS)
+ err = -EINVAL;
+ goto out;
+ }
+
+ freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+ chan = rdev_freq_to_chan(rdev, freq, channel_type);
+ if (chan == NULL) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+ NL80211_CMD_ACTION);
+
+ if (IS_ERR(hdr)) {
+ err = PTR_ERR(hdr);
+ goto free_msg;
+ }
+ err = cfg80211_mlme_action(rdev, dev, chan, channel_type,
+ nla_data(info->attrs[NL80211_ATTR_FRAME]),
+ nla_len(info->attrs[NL80211_ATTR_FRAME]),
+ &cookie);
+ if (err)
+ goto free_msg;
+
+ NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+
+ genlmsg_end(msg, hdr);
+ err = genlmsg_reply(msg, info);
+ goto out;
+
+ nla_put_failure:
+ err = -ENOBUFS;
+ free_msg:
+ nlmsg_free(msg);
+ out:
+ cfg80211_unlock_rdev(rdev);
+ dev_put(dev);
+unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
+static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
+ struct net_device *dev;
+ u8 ps_state;
+ bool state;
+ int err;
+
+ if (!info->attrs[NL80211_ATTR_PS_STATE]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ ps_state = nla_get_u32(info->attrs[NL80211_ATTR_PS_STATE]);
+
+ if (ps_state != NL80211_PS_DISABLED && ps_state != NL80211_PS_ENABLED) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ rtnl_lock();
+
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ if (err)
+ goto unlock_rdev;
+
+ wdev = dev->ieee80211_ptr;
+
+ if (!rdev->ops->set_power_mgmt) {
+ err = -EOPNOTSUPP;
+ goto unlock_rdev;
+ }
+
+ state = (ps_state == NL80211_PS_ENABLED) ? true : false;
+
+ if (state == wdev->ps)
+ goto unlock_rdev;
+
+ wdev->ps = state;
+
+ if (rdev->ops->set_power_mgmt(wdev->wiphy, dev, wdev->ps,
+ wdev->ps_timeout))
+ /* assume this means it's off */
+ wdev->ps = false;
+
+unlock_rdev:
+ cfg80211_unlock_rdev(rdev);
+ dev_put(dev);
+ rtnl_unlock();
+
+out:
+ return err;
+}
+
+static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+ enum nl80211_ps_state ps_state;
+ struct wireless_dev *wdev;
+ struct net_device *dev;
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+
+ rtnl_lock();
+
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ wdev = dev->ieee80211_ptr;
+
+ if (!rdev->ops->set_power_mgmt) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+ NL80211_CMD_GET_POWER_SAVE);
+ if (!hdr) {
+ err = -ENOMEM;
+ goto free_msg;
+ }
+
+ if (wdev->ps)
+ ps_state = NL80211_PS_ENABLED;
+ else
+ ps_state = NL80211_PS_DISABLED;
+
+ NLA_PUT_U32(msg, NL80211_ATTR_PS_STATE, ps_state);
+
+ genlmsg_end(msg, hdr);
+ err = genlmsg_reply(msg, info);
+ goto out;
+
+nla_put_failure:
+ err = -ENOBUFS;
+
+free_msg:
+ nlmsg_free(msg);
+
+out:
+ cfg80211_unlock_rdev(rdev);
+ dev_put(dev);
+
+unlock_rtnl:
+ rtnl_unlock();
+
+ return err;
+}
+
static struct genl_ops nl80211_ops[] = {
{
.cmd = NL80211_CMD_GET_WIPHY,
@@ -4584,8 +5040,50 @@ static struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
},
-
+ {
+ .cmd = NL80211_CMD_REMAIN_ON_CHANNEL,
+ .doit = nl80211_remain_on_channel,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
+ .doit = nl80211_cancel_remain_on_channel,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_SET_TX_BITRATE_MASK,
+ .doit = nl80211_set_tx_bitrate_mask,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_REGISTER_ACTION,
+ .doit = nl80211_register_action,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_ACTION,
+ .doit = nl80211_action,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_SET_POWER_SAVE,
+ .doit = nl80211_set_power_save,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_GET_POWER_SAVE,
+ .doit = nl80211_get_power_save,
+ .policy = nl80211_policy,
+ /* can be retrieved by unprivileged users */
+ },
};
+
static struct genl_multicast_group nl80211_mlme_mcgrp = {
.name = "mlme",
};
@@ -5173,6 +5671,193 @@ nla_put_failure:
nlmsg_free(msg);
}
+static void nl80211_send_remain_on_chan_event(
+ int cmd, struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u64 cookie,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, gfp_t gfp)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq);
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type);
+ NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+
+ if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
+ NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
+
+ if (genlmsg_end(msg, hdr) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+
+void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u64 cookie,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, gfp_t gfp)
+{
+ nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
+ rdev, netdev, cookie, chan,
+ channel_type, duration, gfp);
+}
+
+void nl80211_send_remain_on_channel_cancel(
+ struct cfg80211_registered_device *rdev, struct net_device *netdev,
+ u64 cookie, struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type, gfp_t gfp)
+{
+ nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
+ rdev, netdev, cookie, chan,
+ channel_type, 0, gfp);
+}
+
+void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *mac_addr,
+ struct station_info *sinfo, gfp_t gfp)
+{
+ struct sk_buff *msg;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ if (!msg)
+ return;
+
+ if (nl80211_send_station(msg, 0, 0, 0, dev, mac_addr, sinfo) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+}
+
+int nl80211_send_action(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u32 nlpid,
+ int freq, const u8 *buf, size_t len, gfp_t gfp)
+{
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ACTION);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return -ENOMEM;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq);
+ NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
+
+ err = genlmsg_end(msg, hdr);
+ if (err < 0) {
+ nlmsg_free(msg);
+ return err;
+ }
+
+ err = genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid);
+ if (err < 0)
+ return err;
+ return 0;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+
+void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u64 cookie,
+ const u8 *buf, size_t len, bool ack,
+ gfp_t gfp)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ACTION_TX_STATUS);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+ NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf);
+ NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+ if (ack)
+ NLA_PUT_FLAG(msg, NL80211_ATTR_ACK);
+
+ if (genlmsg_end(msg, hdr) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+
+static int nl80211_netlink_notify(struct notifier_block * nb,
+ unsigned long state,
+ void *_notify)
+{
+ struct netlink_notify *notify = _notify;
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
+
+ if (state != NETLINK_URELEASE)
+ return NOTIFY_DONE;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list)
+ list_for_each_entry_rcu(wdev, &rdev->netdev_list, list)
+ cfg80211_mlme_unregister_actions(wdev, notify->pid);
+
+ rcu_read_unlock();
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nl80211_netlink_notifier = {
+ .notifier_call = nl80211_netlink_notify,
+};
+
/* initialisation/exit functions */
int nl80211_init(void)
@@ -5206,6 +5891,10 @@ int nl80211_init(void)
goto err_out;
#endif
+ err = netlink_register_notifier(&nl80211_netlink_notifier);
+ if (err)
+ goto err_out;
+
return 0;
err_out:
genl_unregister_family(&nl80211_fam);
@@ -5214,5 +5903,6 @@ int nl80211_init(void)
void nl80211_exit(void)
{
+ netlink_unregister_notifier(&nl80211_netlink_notifier);
genl_unregister_family(&nl80211_fam);
}
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 44cc2a76a1b..4ca511102c6 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -59,4 +59,27 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
gfp_t gfp);
+void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ u64 cookie,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, gfp_t gfp);
+void nl80211_send_remain_on_channel_cancel(
+ struct cfg80211_registered_device *rdev, struct net_device *netdev,
+ u64 cookie, struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type, gfp_t gfp);
+
+void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *mac_addr,
+ struct station_info *sinfo, gfp_t gfp);
+
+int nl80211_send_action(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u32 nlpid, int freq,
+ const u8 *buf, size_t len, gfp_t gfp);
+void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u64 cookie,
+ const u8 *buf, size_t len, bool ack,
+ gfp_t gfp);
+
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index f591871a7b4..1332c445d1c 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -2,6 +2,16 @@
* Radiotap parser
*
* Copyright 2007 Andy Green <andy@warmcat.com>
+ * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Alternatively, this software may be distributed under the terms of BSD
+ * license.
+ *
+ * See COPYING for more details.
*/
#include <net/cfg80211.h>
@@ -10,6 +20,35 @@
/* function prototypes and related defs are in include/net/cfg80211.h */
+static const struct radiotap_align_size rtap_namespace_sizes[] = {
+ [IEEE80211_RADIOTAP_TSFT] = { .align = 8, .size = 8, },
+ [IEEE80211_RADIOTAP_FLAGS] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_RATE] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_CHANNEL] = { .align = 2, .size = 4, },
+ [IEEE80211_RADIOTAP_FHSS] = { .align = 2, .size = 2, },
+ [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_DBM_ANTNOISE] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_LOCK_QUALITY] = { .align = 2, .size = 2, },
+ [IEEE80211_RADIOTAP_TX_ATTENUATION] = { .align = 2, .size = 2, },
+ [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = { .align = 2, .size = 2, },
+ [IEEE80211_RADIOTAP_DBM_TX_POWER] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_ANTENNA] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_DB_ANTNOISE] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_RX_FLAGS] = { .align = 2, .size = 2, },
+ [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, },
+ [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, },
+ /*
+ * add more here as they are defined in radiotap.h
+ */
+};
+
+static const struct ieee80211_radiotap_namespace radiotap_ns = {
+ .n_bits = sizeof(rtap_namespace_sizes) / sizeof(rtap_namespace_sizes[0]),
+ .align_size = rtap_namespace_sizes,
+};
+
/**
* ieee80211_radiotap_iterator_init - radiotap parser iterator initialization
* @iterator: radiotap_iterator to initialize
@@ -50,9 +89,9 @@
*/
int ieee80211_radiotap_iterator_init(
- struct ieee80211_radiotap_iterator *iterator,
- struct ieee80211_radiotap_header *radiotap_header,
- int max_length)
+ struct ieee80211_radiotap_iterator *iterator,
+ struct ieee80211_radiotap_header *radiotap_header,
+ int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
{
/* Linux only supports version 0 radiotap format */
if (radiotap_header->it_version)
@@ -62,19 +101,24 @@ int ieee80211_radiotap_iterator_init(
if (max_length < get_unaligned_le16(&radiotap_header->it_len))
return -EINVAL;
- iterator->rtheader = radiotap_header;
- iterator->max_length = get_unaligned_le16(&radiotap_header->it_len);
- iterator->arg_index = 0;
- iterator->bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present);
- iterator->arg = (u8 *)radiotap_header + sizeof(*radiotap_header);
- iterator->this_arg = NULL;
+ iterator->_rtheader = radiotap_header;
+ iterator->_max_length = get_unaligned_le16(&radiotap_header->it_len);
+ iterator->_arg_index = 0;
+ iterator->_bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present);
+ iterator->_arg = (uint8_t *)radiotap_header + sizeof(*radiotap_header);
+ iterator->_reset_on_ext = 0;
+ iterator->_next_bitmap = &radiotap_header->it_present;
+ iterator->_next_bitmap++;
+ iterator->_vns = vns;
+ iterator->current_namespace = &radiotap_ns;
+ iterator->is_radiotap_ns = 1;
/* find payload start allowing for extended bitmap(s) */
- if (unlikely(iterator->bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT))) {
- while (get_unaligned_le32(iterator->arg) &
- (1 << IEEE80211_RADIOTAP_EXT)) {
- iterator->arg += sizeof(u32);
+ if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
+ while (get_unaligned_le32(iterator->_arg) &
+ (1 << IEEE80211_RADIOTAP_EXT)) {
+ iterator->_arg += sizeof(uint32_t);
/*
* check for insanity where the present bitmaps
@@ -82,12 +126,13 @@ int ieee80211_radiotap_iterator_init(
* stated radiotap header length
*/
- if (((ulong)iterator->arg -
- (ulong)iterator->rtheader) > iterator->max_length)
+ if ((unsigned long)iterator->_arg -
+ (unsigned long)iterator->_rtheader >
+ (unsigned long)iterator->_max_length)
return -EINVAL;
}
- iterator->arg += sizeof(u32);
+ iterator->_arg += sizeof(uint32_t);
/*
* no need to check again for blowing past stated radiotap
@@ -96,12 +141,36 @@ int ieee80211_radiotap_iterator_init(
*/
}
+ iterator->this_arg = iterator->_arg;
+
/* we are all initialized happily */
return 0;
}
EXPORT_SYMBOL(ieee80211_radiotap_iterator_init);
+static void find_ns(struct ieee80211_radiotap_iterator *iterator,
+ uint32_t oui, uint8_t subns)
+{
+ int i;
+
+ iterator->current_namespace = NULL;
+
+ if (!iterator->_vns)
+ return;
+
+ for (i = 0; i < iterator->_vns->n_ns; i++) {
+ if (iterator->_vns->ns[i].oui != oui)
+ continue;
+ if (iterator->_vns->ns[i].subns != subns)
+ continue;
+
+ iterator->current_namespace = &iterator->_vns->ns[i];
+ break;
+ }
+}
+
+
/**
* ieee80211_radiotap_iterator_next - return next radiotap parser iterator arg
@@ -127,99 +196,80 @@ EXPORT_SYMBOL(ieee80211_radiotap_iterator_init);
*/
int ieee80211_radiotap_iterator_next(
- struct ieee80211_radiotap_iterator *iterator)
+ struct ieee80211_radiotap_iterator *iterator)
{
-
- /*
- * small length lookup table for all radiotap types we heard of
- * starting from b0 in the bitmap, so we can walk the payload
- * area of the radiotap header
- *
- * There is a requirement to pad args, so that args
- * of a given length must begin at a boundary of that length
- * -- but note that compound args are allowed (eg, 2 x u16
- * for IEEE80211_RADIOTAP_CHANNEL) so total arg length is not
- * a reliable indicator of alignment requirement.
- *
- * upper nybble: content alignment for arg
- * lower nybble: content length for arg
- */
-
- static const u8 rt_sizes[] = {
- [IEEE80211_RADIOTAP_TSFT] = 0x88,
- [IEEE80211_RADIOTAP_FLAGS] = 0x11,
- [IEEE80211_RADIOTAP_RATE] = 0x11,
- [IEEE80211_RADIOTAP_CHANNEL] = 0x24,
- [IEEE80211_RADIOTAP_FHSS] = 0x22,
- [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = 0x11,
- [IEEE80211_RADIOTAP_DBM_ANTNOISE] = 0x11,
- [IEEE80211_RADIOTAP_LOCK_QUALITY] = 0x22,
- [IEEE80211_RADIOTAP_TX_ATTENUATION] = 0x22,
- [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = 0x22,
- [IEEE80211_RADIOTAP_DBM_TX_POWER] = 0x11,
- [IEEE80211_RADIOTAP_ANTENNA] = 0x11,
- [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = 0x11,
- [IEEE80211_RADIOTAP_DB_ANTNOISE] = 0x11,
- [IEEE80211_RADIOTAP_RX_FLAGS] = 0x22,
- [IEEE80211_RADIOTAP_TX_FLAGS] = 0x22,
- [IEEE80211_RADIOTAP_RTS_RETRIES] = 0x11,
- [IEEE80211_RADIOTAP_DATA_RETRIES] = 0x11,
- /*
- * add more here as they are defined in
- * include/net/ieee80211_radiotap.h
- */
- };
-
- /*
- * for every radiotap entry we can at
- * least skip (by knowing the length)...
- */
-
- while (iterator->arg_index < sizeof(rt_sizes)) {
+ while (1) {
int hit = 0;
- int pad;
+ int pad, align, size, subns, vnslen;
+ uint32_t oui;
- if (!(iterator->bitmap_shifter & 1))
+ /* if no more EXT bits, that's it */
+ if ((iterator->_arg_index % 32) == IEEE80211_RADIOTAP_EXT &&
+ !(iterator->_bitmap_shifter & 1))
+ return -ENOENT;
+
+ if (!(iterator->_bitmap_shifter & 1))
goto next_entry; /* arg not present */
+ /* get alignment/size of data */
+ switch (iterator->_arg_index % 32) {
+ case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE:
+ case IEEE80211_RADIOTAP_EXT:
+ align = 1;
+ size = 0;
+ break;
+ case IEEE80211_RADIOTAP_VENDOR_NAMESPACE:
+ align = 2;
+ size = 6;
+ break;
+ default:
+ if (!iterator->current_namespace ||
+ iterator->_arg_index >= iterator->current_namespace->n_bits) {
+ if (iterator->current_namespace == &radiotap_ns)
+ return -ENOENT;
+ align = 0;
+ } else {
+ align = iterator->current_namespace->align_size[iterator->_arg_index].align;
+ size = iterator->current_namespace->align_size[iterator->_arg_index].size;
+ }
+ if (!align) {
+ /* skip all subsequent data */
+ iterator->_arg = iterator->_next_ns_data;
+ /* give up on this namespace */
+ iterator->current_namespace = NULL;
+ goto next_entry;
+ }
+ break;
+ }
+
/*
* arg is present, account for alignment padding
- * 8-bit args can be at any alignment
- * 16-bit args must start on 16-bit boundary
- * 32-bit args must start on 32-bit boundary
- * 64-bit args must start on 64-bit boundary
*
- * note that total arg size can differ from alignment of
- * elements inside arg, so we use upper nybble of length
- * table to base alignment on
- *
- * also note: these alignments are ** relative to the
- * start of the radiotap header **. There is no guarantee
+ * Note that these alignments are relative to the start
+ * of the radiotap header. There is no guarantee
* that the radiotap header itself is aligned on any
* kind of boundary.
*
- * the above is why get_unaligned() is used to dereference
- * multibyte elements from the radiotap area
+ * The above is why get_unaligned() is used to dereference
+ * multibyte elements from the radiotap area.
*/
- pad = (((ulong)iterator->arg) -
- ((ulong)iterator->rtheader)) &
- ((rt_sizes[iterator->arg_index] >> 4) - 1);
+ pad = ((unsigned long)iterator->_arg -
+ (unsigned long)iterator->_rtheader) & (align - 1);
if (pad)
- iterator->arg +=
- (rt_sizes[iterator->arg_index] >> 4) - pad;
+ iterator->_arg += align - pad;
/*
* this is what we will return to user, but we need to
* move on first so next call has something fresh to test
*/
- iterator->this_arg_index = iterator->arg_index;
- iterator->this_arg = iterator->arg;
- hit = 1;
+ iterator->this_arg_index = iterator->_arg_index;
+ iterator->this_arg = iterator->_arg;
+ iterator->this_arg_size = size;
/* internally move on the size of this arg */
- iterator->arg += rt_sizes[iterator->arg_index] & 0x0f;
+ iterator->_arg += size;
/*
* check for insanity where we are given a bitmap that
@@ -228,32 +278,73 @@ int ieee80211_radiotap_iterator_next(
* max_length on the last arg, never exceeding it.
*/
- if (((ulong)iterator->arg - (ulong)iterator->rtheader) >
- iterator->max_length)
+ if ((unsigned long)iterator->_arg -
+ (unsigned long)iterator->_rtheader >
+ (unsigned long)iterator->_max_length)
return -EINVAL;
- next_entry:
- iterator->arg_index++;
- if (unlikely((iterator->arg_index & 31) == 0)) {
- /* completed current u32 bitmap */
- if (iterator->bitmap_shifter & 1) {
- /* b31 was set, there is more */
- /* move to next u32 bitmap */
- iterator->bitmap_shifter =
- get_unaligned_le32(iterator->next_bitmap);
- iterator->next_bitmap++;
- } else
- /* no more bitmaps: end */
- iterator->arg_index = sizeof(rt_sizes);
- } else /* just try the next bit */
- iterator->bitmap_shifter >>= 1;
+ /* these special ones are valid in each bitmap word */
+ switch (iterator->_arg_index % 32) {
+ case IEEE80211_RADIOTAP_VENDOR_NAMESPACE:
+ iterator->_bitmap_shifter >>= 1;
+ iterator->_arg_index++;
+
+ iterator->_reset_on_ext = 1;
+
+ vnslen = get_unaligned_le16(iterator->this_arg + 4);
+ iterator->_next_ns_data = iterator->_arg + vnslen;
+ oui = (*iterator->this_arg << 16) |
+ (*(iterator->this_arg + 1) << 8) |
+ *(iterator->this_arg + 2);
+ subns = *(iterator->this_arg + 3);
+
+ find_ns(iterator, oui, subns);
+
+ iterator->is_radiotap_ns = 0;
+ /* allow parsers to show this information */
+ iterator->this_arg_index =
+ IEEE80211_RADIOTAP_VENDOR_NAMESPACE;
+ iterator->this_arg_size += vnslen;
+ if ((unsigned long)iterator->this_arg +
+ iterator->this_arg_size -
+ (unsigned long)iterator->_rtheader >
+ (unsigned long)(unsigned long)iterator->_max_length)
+ return -EINVAL;
+ hit = 1;
+ break;
+ case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE:
+ iterator->_bitmap_shifter >>= 1;
+ iterator->_arg_index++;
+
+ iterator->_reset_on_ext = 1;
+ iterator->current_namespace = &radiotap_ns;
+ iterator->is_radiotap_ns = 1;
+ break;
+ case IEEE80211_RADIOTAP_EXT:
+ /*
+ * bit 31 was set, there is more
+ * -- move to next u32 bitmap
+ */
+ iterator->_bitmap_shifter =
+ get_unaligned_le32(iterator->_next_bitmap);
+ iterator->_next_bitmap++;
+ if (iterator->_reset_on_ext)
+ iterator->_arg_index = 0;
+ else
+ iterator->_arg_index++;
+ iterator->_reset_on_ext = 0;
+ break;
+ default:
+ /* we've got a hit! */
+ hit = 1;
+ next_entry:
+ iterator->_bitmap_shifter >>= 1;
+ iterator->_arg_index++;
+ }
/* if we found a valid arg earlier, return it now */
if (hit)
return 0;
}
-
- /* we don't know how to handle any more args, we're done */
- return -ENOENT;
}
EXPORT_SYMBOL(ieee80211_radiotap_iterator_next);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 7a0754c92df..ed89c59bb43 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -40,8 +40,18 @@
#include <net/cfg80211.h>
#include "core.h"
#include "reg.h"
+#include "regdb.h"
#include "nl80211.h"
+#ifdef CONFIG_CFG80211_REG_DEBUG
+#define REG_DBG_PRINT(format, args...) \
+ do { \
+ printk(KERN_DEBUG format , ## args); \
+ } while (0)
+#else
+#define REG_DBG_PRINT(args...)
+#endif
+
/* Receipt of information from last regulatory request */
static struct regulatory_request *last_request;
@@ -124,82 +134,11 @@ static const struct ieee80211_regdomain *cfg80211_world_regdom =
&world_regdom;
static char *ieee80211_regdom = "00";
+static char user_alpha2[2];
module_param(ieee80211_regdom, charp, 0444);
MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
-#ifdef CONFIG_WIRELESS_OLD_REGULATORY
-/*
- * We assume 40 MHz bandwidth for the old regulatory work.
- * We make emphasis we are using the exact same frequencies
- * as before
- */
-
-static const struct ieee80211_regdomain us_regdom = {
- .n_reg_rules = 6,
- .alpha2 = "US",
- .reg_rules = {
- /* IEEE 802.11b/g, channels 1..11 */
- REG_RULE(2412-10, 2462+10, 40, 6, 27, 0),
- /* IEEE 802.11a, channel 36..48 */
- REG_RULE(5180-10, 5240+10, 40, 6, 17, 0),
- /* IEEE 802.11a, channels 48..64 */
- REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
- /* IEEE 802.11a, channels 100..124 */
- REG_RULE(5500-10, 5590+10, 40, 6, 20, NL80211_RRF_DFS),
- /* IEEE 802.11a, channels 132..144 */
- REG_RULE(5660-10, 5700+10, 40, 6, 20, NL80211_RRF_DFS),
- /* IEEE 802.11a, channels 149..165, outdoor */
- REG_RULE(5745-10, 5825+10, 40, 6, 30, 0),
- }
-};
-
-static const struct ieee80211_regdomain jp_regdom = {
- .n_reg_rules = 6,
- .alpha2 = "JP",
- .reg_rules = {
- /* IEEE 802.11b/g, channels 1..11 */
- REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
- /* IEEE 802.11b/g, channels 12..13 */
- REG_RULE(2467-10, 2472+10, 20, 6, 20, 0),
- /* IEEE 802.11b/g, channel 14 */
- REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_OFDM),
- /* IEEE 802.11a, channels 36..48 */
- REG_RULE(5180-10, 5240+10, 40, 6, 20, 0),
- /* IEEE 802.11a, channels 52..64 */
- REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
- /* IEEE 802.11a, channels 100..144 */
- REG_RULE(5500-10, 5700+10, 40, 6, 23, NL80211_RRF_DFS),
- }
-};
-
-static const struct ieee80211_regdomain *static_regdom(char *alpha2)
-{
- if (alpha2[0] == 'U' && alpha2[1] == 'S')
- return &us_regdom;
- if (alpha2[0] == 'J' && alpha2[1] == 'P')
- return &jp_regdom;
- /* Use world roaming rules for "EU", since it was a pseudo
- domain anyway... */
- if (alpha2[0] == 'E' && alpha2[1] == 'U')
- return &world_regdom;
- /* Default, world roaming rules */
- return &world_regdom;
-}
-
-static bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
-{
- if (rd == &us_regdom || rd == &jp_regdom || rd == &world_regdom)
- return true;
- return false;
-}
-#else
-static inline bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
-{
- return false;
-}
-#endif
-
static void reset_regdomains(void)
{
/* avoid freeing static information or freeing something twice */
@@ -209,8 +148,6 @@ static void reset_regdomains(void)
cfg80211_world_regdom = NULL;
if (cfg80211_regdomain == &world_regdom)
cfg80211_regdomain = NULL;
- if (is_old_static_regdom(cfg80211_regdomain))
- cfg80211_regdomain = NULL;
kfree(cfg80211_regdomain);
kfree(cfg80211_world_regdom);
@@ -316,6 +253,27 @@ static bool regdom_changes(const char *alpha2)
return true;
}
+/*
+ * The NL80211_REGDOM_SET_BY_USER regdom alpha2 is cached, this lets
+ * you know if a valid regulatory hint with NL80211_REGDOM_SET_BY_USER
+ * has ever been issued.
+ */
+static bool is_user_regdom_saved(void)
+{
+ if (user_alpha2[0] == '9' && user_alpha2[1] == '7')
+ return false;
+
+ /* This would indicate a mistake on the design */
+ if (WARN((!is_world_regdom(user_alpha2) &&
+ !is_an_alpha2(user_alpha2)),
+ "Unexpected user alpha2: %c%c\n",
+ user_alpha2[0],
+ user_alpha2[1]))
+ return false;
+
+ return true;
+}
+
/**
* country_ie_integrity_changes - tells us if the country IE has changed
* @checksum: checksum of country IE of fields we are interested in
@@ -335,6 +293,98 @@ static bool country_ie_integrity_changes(u32 checksum)
return false;
}
+static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
+ const struct ieee80211_regdomain *src_regd)
+{
+ struct ieee80211_regdomain *regd;
+ int size_of_regd = 0;
+ unsigned int i;
+
+ size_of_regd = sizeof(struct ieee80211_regdomain) +
+ ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
+
+ regd = kzalloc(size_of_regd, GFP_KERNEL);
+ if (!regd)
+ return -ENOMEM;
+
+ memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
+
+ for (i = 0; i < src_regd->n_reg_rules; i++)
+ memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
+ sizeof(struct ieee80211_reg_rule));
+
+ *dst_regd = regd;
+ return 0;
+}
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+struct reg_regdb_search_request {
+ char alpha2[2];
+ struct list_head list;
+};
+
+static LIST_HEAD(reg_regdb_search_list);
+static DEFINE_SPINLOCK(reg_regdb_search_lock);
+
+static void reg_regdb_search(struct work_struct *work)
+{
+ struct reg_regdb_search_request *request;
+ const struct ieee80211_regdomain *curdom, *regdom;
+ int i, r;
+
+ spin_lock(&reg_regdb_search_lock);
+ while (!list_empty(&reg_regdb_search_list)) {
+ request = list_first_entry(&reg_regdb_search_list,
+ struct reg_regdb_search_request,
+ list);
+ list_del(&request->list);
+
+ for (i=0; i<reg_regdb_size; i++) {
+ curdom = reg_regdb[i];
+
+ if (!memcmp(request->alpha2, curdom->alpha2, 2)) {
+ r = reg_copy_regd(&regdom, curdom);
+ if (r)
+ break;
+ spin_unlock(&reg_regdb_search_lock);
+ mutex_lock(&cfg80211_mutex);
+ set_regdom(regdom);
+ mutex_unlock(&cfg80211_mutex);
+ spin_lock(&reg_regdb_search_lock);
+ break;
+ }
+ }
+
+ kfree(request);
+ }
+ spin_unlock(&reg_regdb_search_lock);
+}
+
+static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
+
+static void reg_regdb_query(const char *alpha2)
+{
+ struct reg_regdb_search_request *request;
+
+ if (!alpha2)
+ return;
+
+ request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL);
+ if (!request)
+ return;
+
+ memcpy(request->alpha2, alpha2, 2);
+
+ spin_lock(&reg_regdb_search_lock);
+ list_add_tail(&request->list, &reg_regdb_search_list);
+ spin_unlock(&reg_regdb_search_lock);
+
+ schedule_work(&reg_regdb_work);
+}
+#else
+static inline void reg_regdb_query(const char *alpha2) {}
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
/*
* This lets us keep regulatory code which is updated on a regulatory
* basis in userspace.
@@ -354,6 +404,9 @@ static int call_crda(const char *alpha2)
printk(KERN_INFO "cfg80211: Calling CRDA to update world "
"regulatory domain\n");
+ /* query internal regulatory database (if it exists) */
+ reg_regdb_query(alpha2);
+
country_env[8] = alpha2[0];
country_env[9] = alpha2[1];
@@ -454,12 +507,212 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
}
/*
+ * This is a work around for sanity checking ieee80211_channel_to_frequency()'s
+ * work. ieee80211_channel_to_frequency() can for example currently provide a
+ * 2 GHz channel when in fact a 5 GHz channel was desired. An example would be
+ * an AP providing channel 8 on a country IE triplet when it sent this on the
+ * 5 GHz band, that channel is designed to be channel 8 on 5 GHz, not a 2 GHz
+ * channel.
+ *
+ * This can be removed once ieee80211_channel_to_frequency() takes in a band.
+ */
+static bool chan_in_band(int chan, enum ieee80211_band band)
+{
+ int center_freq = ieee80211_channel_to_frequency(chan);
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ if (center_freq <= 2484)
+ return true;
+ return false;
+ case IEEE80211_BAND_5GHZ:
+ if (center_freq >= 5005)
+ return true;
+ return false;
+ default:
+ return false;
+ }
+}
+
+/*
+ * Some APs may send a country IE triplet for each channel they
+ * support and while this is completely overkill and silly we still
+ * need to support it. We avoid making a single rule for each channel
+ * though and to help us with this we use this helper to find the
+ * actual subband end channel. These type of country IE triplet
+ * scenerios are handled then, all yielding two regulaotry rules from
+ * parsing a country IE:
+ *
+ * [1]
+ * [2]
+ * [36]
+ * [40]
+ *
+ * [1]
+ * [2-4]
+ * [5-12]
+ * [36]
+ * [40-44]
+ *
+ * [1-4]
+ * [5-7]
+ * [36-44]
+ * [48-64]
+ *
+ * [36-36]
+ * [40-40]
+ * [44-44]
+ * [48-48]
+ * [52-52]
+ * [56-56]
+ * [60-60]
+ * [64-64]
+ * [100-100]
+ * [104-104]
+ * [108-108]
+ * [112-112]
+ * [116-116]
+ * [120-120]
+ * [124-124]
+ * [128-128]
+ * [132-132]
+ * [136-136]
+ * [140-140]
+ *
+ * Returns 0 if the IE has been found to be invalid in the middle
+ * somewhere.
+ */
+static int max_subband_chan(enum ieee80211_band band,
+ int orig_cur_chan,
+ int orig_end_channel,
+ s8 orig_max_power,
+ u8 **country_ie,
+ u8 *country_ie_len)
+{
+ u8 *triplets_start = *country_ie;
+ u8 len_at_triplet = *country_ie_len;
+ int end_subband_chan = orig_end_channel;
+
+ /*
+ * We'll deal with padding for the caller unless
+ * its not immediate and we don't process any channels
+ */
+ if (*country_ie_len == 1) {
+ *country_ie += 1;
+ *country_ie_len -= 1;
+ return orig_end_channel;
+ }
+
+ /* Move to the next triplet and then start search */
+ *country_ie += 3;
+ *country_ie_len -= 3;
+
+ if (!chan_in_band(orig_cur_chan, band))
+ return 0;
+
+ while (*country_ie_len >= 3) {
+ int end_channel = 0;
+ struct ieee80211_country_ie_triplet *triplet =
+ (struct ieee80211_country_ie_triplet *) *country_ie;
+ int cur_channel = 0, next_expected_chan;
+
+ /* means last triplet is completely unrelated to this one */
+ if (triplet->ext.reg_extension_id >=
+ IEEE80211_COUNTRY_EXTENSION_ID) {
+ *country_ie -= 3;
+ *country_ie_len += 3;
+ break;
+ }
+
+ if (triplet->chans.first_channel == 0) {
+ *country_ie += 1;
+ *country_ie_len -= 1;
+ if (*country_ie_len != 0)
+ return 0;
+ break;
+ }
+
+ if (triplet->chans.num_channels == 0)
+ return 0;
+
+ /* Monitonically increasing channel order */
+ if (triplet->chans.first_channel <= end_subband_chan)
+ return 0;
+
+ if (!chan_in_band(triplet->chans.first_channel, band))
+ return 0;
+
+ /* 2 GHz */
+ if (triplet->chans.first_channel <= 14) {
+ end_channel = triplet->chans.first_channel +
+ triplet->chans.num_channels - 1;
+ }
+ else {
+ end_channel = triplet->chans.first_channel +
+ (4 * (triplet->chans.num_channels - 1));
+ }
+
+ if (!chan_in_band(end_channel, band))
+ return 0;
+
+ if (orig_max_power != triplet->chans.max_power) {
+ *country_ie -= 3;
+ *country_ie_len += 3;
+ break;
+ }
+
+ cur_channel = triplet->chans.first_channel;
+
+ /* The key is finding the right next expected channel */
+ if (band == IEEE80211_BAND_2GHZ)
+ next_expected_chan = end_subband_chan + 1;
+ else
+ next_expected_chan = end_subband_chan + 4;
+
+ if (cur_channel != next_expected_chan) {
+ *country_ie -= 3;
+ *country_ie_len += 3;
+ break;
+ }
+
+ end_subband_chan = end_channel;
+
+ /* Move to the next one */
+ *country_ie += 3;
+ *country_ie_len -= 3;
+
+ /*
+ * Padding needs to be dealt with if we processed
+ * some channels.
+ */
+ if (*country_ie_len == 1) {
+ *country_ie += 1;
+ *country_ie_len -= 1;
+ break;
+ }
+
+ /* If seen, the IE is invalid */
+ if (*country_ie_len == 2)
+ return 0;
+ }
+
+ if (end_subband_chan == orig_end_channel) {
+ *country_ie = triplets_start;
+ *country_ie_len = len_at_triplet;
+ return orig_end_channel;
+ }
+
+ return end_subband_chan;
+}
+
+/*
* Converts a country IE to a regulatory domain. A regulatory domain
* structure has a lot of information which the IE doesn't yet have,
* so for the other values we use upper max values as we will intersect
* with our userspace regulatory agent to get lower bounds.
*/
static struct ieee80211_regdomain *country_ie_2_rd(
+ enum ieee80211_band band,
u8 *country_ie,
u8 country_ie_len,
u32 *checksum)
@@ -521,10 +774,29 @@ static struct ieee80211_regdomain *country_ie_2_rd(
continue;
}
+ /*
+ * APs can add padding to make length divisible
+ * by two, required by the spec.
+ */
+ if (triplet->chans.first_channel == 0) {
+ country_ie++;
+ country_ie_len--;
+ /* This is expected to be at the very end only */
+ if (country_ie_len != 0)
+ return NULL;
+ break;
+ }
+
+ if (triplet->chans.num_channels == 0)
+ return NULL;
+
+ if (!chan_in_band(triplet->chans.first_channel, band))
+ return NULL;
+
/* 2 GHz */
- if (triplet->chans.first_channel <= 14)
+ if (band == IEEE80211_BAND_2GHZ)
end_channel = triplet->chans.first_channel +
- triplet->chans.num_channels;
+ triplet->chans.num_channels - 1;
else
/*
* 5 GHz -- For example in country IEs if the first
@@ -539,6 +811,24 @@ static struct ieee80211_regdomain *country_ie_2_rd(
(4 * (triplet->chans.num_channels - 1));
cur_channel = triplet->chans.first_channel;
+
+ /*
+ * Enhancement for APs that send a triplet for every channel
+ * or for whatever reason sends triplets with multiple channels
+ * separated when in fact they should be together.
+ */
+ end_channel = max_subband_chan(band,
+ cur_channel,
+ end_channel,
+ triplet->chans.max_power,
+ &country_ie,
+ &country_ie_len);
+ if (!end_channel)
+ return NULL;
+
+ if (!chan_in_band(end_channel, band))
+ return NULL;
+
cur_sub_max_channel = end_channel;
/* Basic sanity check */
@@ -569,10 +859,13 @@ static struct ieee80211_regdomain *country_ie_2_rd(
last_sub_max_channel = cur_sub_max_channel;
- country_ie += 3;
- country_ie_len -= 3;
num_rules++;
+ if (country_ie_len >= 3) {
+ country_ie += 3;
+ country_ie_len -= 3;
+ }
+
/*
* Note: this is not a IEEE requirement but
* simply a memory requirement
@@ -615,6 +908,12 @@ static struct ieee80211_regdomain *country_ie_2_rd(
continue;
}
+ if (triplet->chans.first_channel == 0) {
+ country_ie++;
+ country_ie_len--;
+ break;
+ }
+
reg_rule = &rd->reg_rules[i];
freq_range = &reg_rule->freq_range;
power_rule = &reg_rule->power_rule;
@@ -622,13 +921,20 @@ static struct ieee80211_regdomain *country_ie_2_rd(
reg_rule->flags = flags;
/* 2 GHz */
- if (triplet->chans.first_channel <= 14)
+ if (band == IEEE80211_BAND_2GHZ)
end_channel = triplet->chans.first_channel +
- triplet->chans.num_channels;
+ triplet->chans.num_channels -1;
else
end_channel = triplet->chans.first_channel +
(4 * (triplet->chans.num_channels - 1));
+ end_channel = max_subband_chan(band,
+ triplet->chans.first_channel,
+ end_channel,
+ triplet->chans.max_power,
+ &country_ie,
+ &country_ie_len);
+
/*
* The +10 is since the regulatory domain expects
* the actual band edge, not the center of freq for
@@ -649,12 +955,15 @@ static struct ieee80211_regdomain *country_ie_2_rd(
*/
freq_range->max_bandwidth_khz = MHZ_TO_KHZ(40);
power_rule->max_antenna_gain = DBI_TO_MBI(100);
- power_rule->max_eirp = DBM_TO_MBM(100);
+ power_rule->max_eirp = DBM_TO_MBM(triplet->chans.max_power);
- country_ie += 3;
- country_ie_len -= 3;
i++;
+ if (country_ie_len >= 3) {
+ country_ie += 3;
+ country_ie_len -= 3;
+ }
+
BUG_ON(i > NL80211_MAX_SUPP_REG_RULES);
}
@@ -950,25 +1259,21 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
if (r == -ERANGE &&
last_request->initiator ==
NL80211_REGDOM_SET_BY_COUNTRY_IE) {
-#ifdef CONFIG_CFG80211_REG_DEBUG
- printk(KERN_DEBUG "cfg80211: Leaving channel %d MHz "
+ REG_DBG_PRINT("cfg80211: Leaving channel %d MHz "
"intact on %s - no rule found in band on "
"Country IE\n",
- chan->center_freq, wiphy_name(wiphy));
-#endif
+ chan->center_freq, wiphy_name(wiphy));
} else {
/*
* In this case we know the country IE has at least one reg rule
* for the band so we respect its band definitions
*/
-#ifdef CONFIG_CFG80211_REG_DEBUG
if (last_request->initiator ==
NL80211_REGDOM_SET_BY_COUNTRY_IE)
- printk(KERN_DEBUG "cfg80211: Disabling "
+ REG_DBG_PRINT("cfg80211: Disabling "
"channel %d MHz on %s due to "
"Country IE\n",
chan->center_freq, wiphy_name(wiphy));
-#endif
flags |= IEEE80211_CHAN_DISABLED;
chan->flags = flags;
}
@@ -1342,30 +1647,6 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
}
EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
-static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
- const struct ieee80211_regdomain *src_regd)
-{
- struct ieee80211_regdomain *regd;
- int size_of_regd = 0;
- unsigned int i;
-
- size_of_regd = sizeof(struct ieee80211_regdomain) +
- ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
-
- regd = kzalloc(size_of_regd, GFP_KERNEL);
- if (!regd)
- return -ENOMEM;
-
- memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
-
- for (i = 0; i < src_regd->n_reg_rules; i++)
- memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
- sizeof(struct ieee80211_reg_rule));
-
- *dst_regd = regd;
- return 0;
-}
-
/*
* Return value which can be used by ignore_request() to indicate
* it has been determined we should intersect two regulatory domains
@@ -1387,7 +1668,7 @@ static int ignore_request(struct wiphy *wiphy,
switch (pending_request->initiator) {
case NL80211_REGDOM_SET_BY_CORE:
- return -EINVAL;
+ return 0;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
@@ -1418,8 +1699,6 @@ static int ignore_request(struct wiphy *wiphy,
return REG_INTERSECT;
case NL80211_REGDOM_SET_BY_DRIVER:
if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) {
- if (is_old_static_regdom(cfg80211_regdomain))
- return 0;
if (regdom_changes(pending_request->alpha2))
return 0;
return -EALREADY;
@@ -1456,8 +1735,7 @@ static int ignore_request(struct wiphy *wiphy,
return -EAGAIN;
}
- if (!is_old_static_regdom(cfg80211_regdomain) &&
- !regdom_changes(pending_request->alpha2))
+ if (!regdom_changes(pending_request->alpha2))
return -EALREADY;
return 0;
@@ -1529,6 +1807,11 @@ new_request:
pending_request = NULL;
+ if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) {
+ user_alpha2[0] = last_request->alpha2[0];
+ user_alpha2[1] = last_request->alpha2[1];
+ }
+
/* When r == REG_INTERSECT we do need to call CRDA */
if (r < 0) {
/*
@@ -1648,12 +1931,16 @@ static void queue_regulatory_request(struct regulatory_request *request)
schedule_work(&reg_work);
}
-/* Core regulatory hint -- happens once during cfg80211_init() */
+/*
+ * Core regulatory hint -- happens during cfg80211_init()
+ * and when we restore regulatory settings.
+ */
static int regulatory_hint_core(const char *alpha2)
{
struct regulatory_request *request;
- BUG_ON(last_request);
+ kfree(last_request);
+ last_request = NULL;
request = kzalloc(sizeof(struct regulatory_request),
GFP_KERNEL);
@@ -1664,14 +1951,12 @@ static int regulatory_hint_core(const char *alpha2)
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_CORE;
- queue_regulatory_request(request);
-
/*
* This ensures last_request is populated once modules
* come swinging in and calling regulatory hints and
* wiphy_apply_custom_regulatory().
*/
- flush_scheduled_work();
+ reg_process_hint(request);
return 0;
}
@@ -1758,8 +2043,9 @@ static bool reg_same_country_ie_hint(struct wiphy *wiphy,
* therefore cannot iterate over the rdev list here.
*/
void regulatory_hint_11d(struct wiphy *wiphy,
- u8 *country_ie,
- u8 country_ie_len)
+ enum ieee80211_band band,
+ u8 *country_ie,
+ u8 country_ie_len)
{
struct ieee80211_regdomain *rd = NULL;
char alpha2[2];
@@ -1805,9 +2091,11 @@ void regulatory_hint_11d(struct wiphy *wiphy,
wiphy_idx_valid(last_request->wiphy_idx)))
goto out;
- rd = country_ie_2_rd(country_ie, country_ie_len, &checksum);
- if (!rd)
+ rd = country_ie_2_rd(band, country_ie, country_ie_len, &checksum);
+ if (!rd) {
+ REG_DBG_PRINT("cfg80211: Ignoring bogus country IE\n");
goto out;
+ }
/*
* This will not happen right now but we leave it here for the
@@ -1850,6 +2138,123 @@ out:
mutex_unlock(&reg_mutex);
}
+static void restore_alpha2(char *alpha2, bool reset_user)
+{
+ /* indicates there is no alpha2 to consider for restoration */
+ alpha2[0] = '9';
+ alpha2[1] = '7';
+
+ /* The user setting has precedence over the module parameter */
+ if (is_user_regdom_saved()) {
+ /* Unless we're asked to ignore it and reset it */
+ if (reset_user) {
+ REG_DBG_PRINT("cfg80211: Restoring regulatory settings "
+ "including user preference\n");
+ user_alpha2[0] = '9';
+ user_alpha2[1] = '7';
+
+ /*
+ * If we're ignoring user settings, we still need to
+ * check the module parameter to ensure we put things
+ * back as they were for a full restore.
+ */
+ if (!is_world_regdom(ieee80211_regdom)) {
+ REG_DBG_PRINT("cfg80211: Keeping preference on "
+ "module parameter ieee80211_regdom: %c%c\n",
+ ieee80211_regdom[0],
+ ieee80211_regdom[1]);
+ alpha2[0] = ieee80211_regdom[0];
+ alpha2[1] = ieee80211_regdom[1];
+ }
+ } else {
+ REG_DBG_PRINT("cfg80211: Restoring regulatory settings "
+ "while preserving user preference for: %c%c\n",
+ user_alpha2[0],
+ user_alpha2[1]);
+ alpha2[0] = user_alpha2[0];
+ alpha2[1] = user_alpha2[1];
+ }
+ } else if (!is_world_regdom(ieee80211_regdom)) {
+ REG_DBG_PRINT("cfg80211: Keeping preference on "
+ "module parameter ieee80211_regdom: %c%c\n",
+ ieee80211_regdom[0],
+ ieee80211_regdom[1]);
+ alpha2[0] = ieee80211_regdom[0];
+ alpha2[1] = ieee80211_regdom[1];
+ } else
+ REG_DBG_PRINT("cfg80211: Restoring regulatory settings\n");
+}
+
+/*
+ * Restoring regulatory settings involves ingoring any
+ * possibly stale country IE information and user regulatory
+ * settings if so desired, this includes any beacon hints
+ * learned as we could have traveled outside to another country
+ * after disconnection. To restore regulatory settings we do
+ * exactly what we did at bootup:
+ *
+ * - send a core regulatory hint
+ * - send a user regulatory hint if applicable
+ *
+ * Device drivers that send a regulatory hint for a specific country
+ * keep their own regulatory domain on wiphy->regd so that does does
+ * not need to be remembered.
+ */
+static void restore_regulatory_settings(bool reset_user)
+{
+ char alpha2[2];
+ struct reg_beacon *reg_beacon, *btmp;
+
+ mutex_lock(&cfg80211_mutex);
+ mutex_lock(&reg_mutex);
+
+ reset_regdomains();
+ restore_alpha2(alpha2, reset_user);
+
+ /* Clear beacon hints */
+ spin_lock_bh(&reg_pending_beacons_lock);
+ if (!list_empty(&reg_pending_beacons)) {
+ list_for_each_entry_safe(reg_beacon, btmp,
+ &reg_pending_beacons, list) {
+ list_del(&reg_beacon->list);
+ kfree(reg_beacon);
+ }
+ }
+ spin_unlock_bh(&reg_pending_beacons_lock);
+
+ if (!list_empty(&reg_beacon_list)) {
+ list_for_each_entry_safe(reg_beacon, btmp,
+ &reg_beacon_list, list) {
+ list_del(&reg_beacon->list);
+ kfree(reg_beacon);
+ }
+ }
+
+ /* First restore to the basic regulatory settings */
+ cfg80211_regdomain = cfg80211_world_regdom;
+
+ mutex_unlock(&reg_mutex);
+ mutex_unlock(&cfg80211_mutex);
+
+ regulatory_hint_core(cfg80211_regdomain->alpha2);
+
+ /*
+ * This restores the ieee80211_regdom module parameter
+ * preference or the last user requested regulatory
+ * settings, user regulatory settings takes precedence.
+ */
+ if (is_an_alpha2(alpha2))
+ regulatory_hint_user(user_alpha2);
+}
+
+
+void regulatory_hint_disconnect(void)
+{
+ REG_DBG_PRINT("cfg80211: All devices are disconnected, going to "
+ "restore regulatory settings\n");
+ restore_regulatory_settings(false);
+}
+
static bool freq_is_chan_12_13_14(u16 freq)
{
if (freq == ieee80211_channel_to_frequency(12) ||
@@ -1875,13 +2280,12 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
if (!reg_beacon)
return -ENOMEM;
-#ifdef CONFIG_CFG80211_REG_DEBUG
- printk(KERN_DEBUG "cfg80211: Found new beacon on "
- "frequency: %d MHz (Ch %d) on %s\n",
- beacon_chan->center_freq,
- ieee80211_frequency_to_channel(beacon_chan->center_freq),
- wiphy_name(wiphy));
-#endif
+ REG_DBG_PRINT("cfg80211: Found new beacon on "
+ "frequency: %d MHz (Ch %d) on %s\n",
+ beacon_chan->center_freq,
+ ieee80211_frequency_to_channel(beacon_chan->center_freq),
+ wiphy_name(wiphy));
+
memcpy(&reg_beacon->chan, beacon_chan,
sizeof(struct ieee80211_channel));
@@ -2039,8 +2443,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
* If someone else asked us to change the rd lets only bother
* checking if the alpha2 changes if CRDA was already called
*/
- if (!is_old_static_regdom(cfg80211_regdomain) &&
- !regdom_changes(rd->alpha2))
+ if (!regdom_changes(rd->alpha2))
return -EINVAL;
}
@@ -2239,15 +2642,11 @@ int regulatory_init(void)
spin_lock_init(&reg_requests_lock);
spin_lock_init(&reg_pending_beacons_lock);
-#ifdef CONFIG_WIRELESS_OLD_REGULATORY
- cfg80211_regdomain = static_regdom(ieee80211_regdom);
-
- printk(KERN_INFO "cfg80211: Using static regulatory domain info\n");
- print_regdomain_info(cfg80211_regdomain);
-#else
cfg80211_regdomain = cfg80211_world_regdom;
-#endif
+ user_alpha2[0] = '9';
+ user_alpha2[1] = '7';
+
/* We always try to get an update for the static regdomain */
err = regulatory_hint_core(cfg80211_regdomain->alpha2);
if (err) {
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 3362c7c069b..b26224a9f3b 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -41,15 +41,44 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
* regulatory_hint_11d - hints a country IE as a regulatory domain
* @wiphy: the wireless device giving the hint (used only for reporting
* conflicts)
+ * @band: the band on which the country IE was received on. This determines
+ * the band we'll process the country IE channel triplets for.
* @country_ie: pointer to the country IE
* @country_ie_len: length of the country IE
*
* We will intersect the rd with the what CRDA tells us should apply
* for the alpha2 this country IE belongs to, this prevents APs from
* sending us incorrect or outdated information against a country.
+ *
+ * The AP is expected to provide Country IE channel triplets for the
+ * band it is on. It is technically possible for APs to send channel
+ * country IE triplets even for channels outside of the band they are
+ * in but for that they would have to use the regulatory extension
+ * in combination with a triplet but this behaviour is currently
+ * not observed. For this reason if a triplet is seen with channel
+ * information for a band the BSS is not present in it will be ignored.
*/
void regulatory_hint_11d(struct wiphy *wiphy,
+ enum ieee80211_band band,
u8 *country_ie,
u8 country_ie_len);
+/**
+ * regulatory_hint_disconnect - informs all devices have been disconneted
+ *
+ * Regulotory rules can be enhanced further upon scanning and upon
+ * connection to an AP. These rules become stale if we disconnect
+ * and go to another country, whether or not we suspend and resume.
+ * If we suspend, go to another country and resume we'll automatically
+ * get disconnected shortly after resuming and things will be reset as well.
+ * This routine is a helper to restore regulatory settings to how they were
+ * prior to our first connect attempt. This includes ignoring country IE and
+ * beacon regulatory hints. The ieee80211_regdom module parameter will always
+ * be respected but if a user had set the regulatory domain that will take
+ * precedence.
+ *
+ * Must be called from process context.
+ */
+void regulatory_hint_disconnect(void);
+
#endif /* __NET_WIRELESS_REG_H */
diff --git a/net/wireless/regdb.h b/net/wireless/regdb.h
new file mode 100644
index 00000000000..818222c9251
--- /dev/null
+++ b/net/wireless/regdb.h
@@ -0,0 +1,7 @@
+#ifndef __REGDB_H__
+#define __REGDB_H__
+
+extern const struct ieee80211_regdomain *reg_regdb[];
+extern int reg_regdb_size;
+
+#endif /* __REGDB_H__ */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 0c2cbbebca9..978cac3414b 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -100,8 +100,10 @@ static void bss_release(struct kref *ref)
if (bss->pub.free_priv)
bss->pub.free_priv(&bss->pub);
- if (bss->ies_allocated)
- kfree(bss->pub.information_elements);
+ if (bss->beacon_ies_allocated)
+ kfree(bss->pub.beacon_ies);
+ if (bss->proberesp_ies_allocated)
+ kfree(bss->pub.proberesp_ies);
BUG_ON(atomic_read(&bss->hold));
@@ -141,9 +143,9 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
dev->bss_generation++;
}
-static u8 *find_ie(u8 num, u8 *ies, int len)
+const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
{
- while (len > 2 && ies[0] != num) {
+ while (len > 2 && ies[0] != eid) {
len -= ies[1] + 2;
ies += ies[1] + 2;
}
@@ -153,11 +155,12 @@ static u8 *find_ie(u8 num, u8 *ies, int len)
return NULL;
return ies;
}
+EXPORT_SYMBOL(cfg80211_find_ie);
static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
{
- const u8 *ie1 = find_ie(num, ies1, len1);
- const u8 *ie2 = find_ie(num, ies2, len2);
+ const u8 *ie1 = cfg80211_find_ie(num, ies1, len1);
+ const u8 *ie2 = cfg80211_find_ie(num, ies2, len2);
int r;
if (!ie1 && !ie2)
@@ -183,9 +186,9 @@ static bool is_bss(struct cfg80211_bss *a,
if (!ssid)
return true;
- ssidie = find_ie(WLAN_EID_SSID,
- a->information_elements,
- a->len_information_elements);
+ ssidie = cfg80211_find_ie(WLAN_EID_SSID,
+ a->information_elements,
+ a->len_information_elements);
if (!ssidie)
return false;
if (ssidie[1] != ssid_len)
@@ -202,9 +205,9 @@ static bool is_mesh(struct cfg80211_bss *a,
if (!is_zero_ether_addr(a->bssid))
return false;
- ie = find_ie(WLAN_EID_MESH_ID,
- a->information_elements,
- a->len_information_elements);
+ ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
+ a->information_elements,
+ a->len_information_elements);
if (!ie)
return false;
if (ie[1] != meshidlen)
@@ -212,9 +215,9 @@ static bool is_mesh(struct cfg80211_bss *a,
if (memcmp(ie + 2, meshid, meshidlen))
return false;
- ie = find_ie(WLAN_EID_MESH_CONFIG,
- a->information_elements,
- a->len_information_elements);
+ ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
+ a->information_elements,
+ a->len_information_elements);
if (!ie)
return false;
if (ie[1] != sizeof(struct ieee80211_meshconf_ie))
@@ -375,8 +378,7 @@ rb_find_bss(struct cfg80211_registered_device *dev,
static struct cfg80211_internal_bss *
cfg80211_bss_update(struct cfg80211_registered_device *dev,
- struct cfg80211_internal_bss *res,
- bool overwrite)
+ struct cfg80211_internal_bss *res)
{
struct cfg80211_internal_bss *found = NULL;
const u8 *meshid, *meshcfg;
@@ -394,11 +396,12 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
if (is_zero_ether_addr(res->pub.bssid)) {
/* must be mesh, verify */
- meshid = find_ie(WLAN_EID_MESH_ID, res->pub.information_elements,
- res->pub.len_information_elements);
- meshcfg = find_ie(WLAN_EID_MESH_CONFIG,
- res->pub.information_elements,
- res->pub.len_information_elements);
+ meshid = cfg80211_find_ie(WLAN_EID_MESH_ID,
+ res->pub.information_elements,
+ res->pub.len_information_elements);
+ meshcfg = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
+ res->pub.information_elements,
+ res->pub.len_information_elements);
if (!meshid || !meshcfg ||
meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) {
/* bogus mesh */
@@ -418,28 +421,64 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
found->pub.capability = res->pub.capability;
found->ts = res->ts;
- /* overwrite IEs */
- if (overwrite) {
+ /* Update IEs */
+ if (res->pub.proberesp_ies) {
size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
- size_t ielen = res->pub.len_information_elements;
+ size_t ielen = res->pub.len_proberesp_ies;
+
+ if (found->pub.proberesp_ies &&
+ !found->proberesp_ies_allocated &&
+ ksize(found) >= used + ielen) {
+ memcpy(found->pub.proberesp_ies,
+ res->pub.proberesp_ies, ielen);
+ found->pub.len_proberesp_ies = ielen;
+ } else {
+ u8 *ies = found->pub.proberesp_ies;
+
+ if (found->proberesp_ies_allocated)
+ ies = krealloc(ies, ielen, GFP_ATOMIC);
+ else
+ ies = kmalloc(ielen, GFP_ATOMIC);
+
+ if (ies) {
+ memcpy(ies, res->pub.proberesp_ies,
+ ielen);
+ found->proberesp_ies_allocated = true;
+ found->pub.proberesp_ies = ies;
+ found->pub.len_proberesp_ies = ielen;
+ }
+ }
- if (!found->ies_allocated && ksize(found) >= used + ielen) {
- memcpy(found->pub.information_elements,
- res->pub.information_elements, ielen);
- found->pub.len_information_elements = ielen;
+ /* Override possible earlier Beacon frame IEs */
+ found->pub.information_elements =
+ found->pub.proberesp_ies;
+ found->pub.len_information_elements =
+ found->pub.len_proberesp_ies;
+ }
+ if (res->pub.beacon_ies) {
+ size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
+ size_t ielen = res->pub.len_beacon_ies;
+
+ if (found->pub.beacon_ies &&
+ !found->beacon_ies_allocated &&
+ ksize(found) >= used + ielen) {
+ memcpy(found->pub.beacon_ies,
+ res->pub.beacon_ies, ielen);
+ found->pub.len_beacon_ies = ielen;
} else {
- u8 *ies = found->pub.information_elements;
+ u8 *ies = found->pub.beacon_ies;
- if (found->ies_allocated)
+ if (found->beacon_ies_allocated)
ies = krealloc(ies, ielen, GFP_ATOMIC);
else
ies = kmalloc(ielen, GFP_ATOMIC);
if (ies) {
- memcpy(ies, res->pub.information_elements, ielen);
- found->ies_allocated = true;
- found->pub.information_elements = ies;
- found->pub.len_information_elements = ielen;
+ memcpy(ies, res->pub.beacon_ies,
+ ielen);
+ found->beacon_ies_allocated = true;
+ found->pub.beacon_ies = ies;
+ found->pub.len_beacon_ies = ielen;
}
}
}
@@ -489,14 +528,26 @@ cfg80211_inform_bss(struct wiphy *wiphy,
res->pub.tsf = timestamp;
res->pub.beacon_interval = beacon_interval;
res->pub.capability = capability;
- /* point to after the private area */
- res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz;
- memcpy(res->pub.information_elements, ie, ielen);
- res->pub.len_information_elements = ielen;
+ /*
+ * Since we do not know here whether the IEs are from a Beacon or Probe
+ * Response frame, we need to pick one of the options and only use it
+ * with the driver that does not provide the full Beacon/Probe Response
+ * frame. Use Beacon frame pointer to avoid indicating that this should
+ * override the information_elements pointer should we have received an
+ * earlier indication of Probe Response data.
+ *
+ * The initial buffer for the IEs is allocated with the BSS entry and
+ * is located after the private area.
+ */
+ res->pub.beacon_ies = (u8 *)res + sizeof(*res) + privsz;
+ memcpy(res->pub.beacon_ies, ie, ielen);
+ res->pub.len_beacon_ies = ielen;
+ res->pub.information_elements = res->pub.beacon_ies;
+ res->pub.len_information_elements = res->pub.len_beacon_ies;
kref_init(&res->ref);
- res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, 0);
+ res = cfg80211_bss_update(wiphy_to_dev(wiphy), res);
if (!res)
return NULL;
@@ -517,7 +568,6 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
struct cfg80211_internal_bss *res;
size_t ielen = len - offsetof(struct ieee80211_mgmt,
u.probe_resp.variable);
- bool overwrite;
size_t privsz = wiphy->bss_priv_size;
if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC &&
@@ -538,16 +588,28 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
res->pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
res->pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
res->pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
- /* point to after the private area */
- res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz;
- memcpy(res->pub.information_elements, mgmt->u.probe_resp.variable, ielen);
- res->pub.len_information_elements = ielen;
+ /*
+ * The initial buffer for the IEs is allocated with the BSS entry and
+ * is located after the private area.
+ */
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ res->pub.proberesp_ies = (u8 *) res + sizeof(*res) + privsz;
+ memcpy(res->pub.proberesp_ies, mgmt->u.probe_resp.variable,
+ ielen);
+ res->pub.len_proberesp_ies = ielen;
+ res->pub.information_elements = res->pub.proberesp_ies;
+ res->pub.len_information_elements = res->pub.len_proberesp_ies;
+ } else {
+ res->pub.beacon_ies = (u8 *) res + sizeof(*res) + privsz;
+ memcpy(res->pub.beacon_ies, mgmt->u.beacon.variable, ielen);
+ res->pub.len_beacon_ies = ielen;
+ res->pub.information_elements = res->pub.beacon_ies;
+ res->pub.len_information_elements = res->pub.len_beacon_ies;
+ }
kref_init(&res->ref);
- overwrite = ieee80211_is_probe_resp(mgmt->frame_control);
-
- res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, overwrite);
+ res = cfg80211_bss_update(wiphy_to_dev(wiphy), res);
if (!res)
return NULL;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index dc0fc4989d5..17fde0da1b0 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -34,6 +34,44 @@ struct cfg80211_conn {
bool auto_auth, prev_bssid_valid;
};
+bool cfg80211_is_all_idle(void)
+{
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
+ bool is_all_idle = true;
+
+ mutex_lock(&cfg80211_mutex);
+
+ /*
+ * All devices must be idle as otherwise if you are actively
+ * scanning some new beacon hints could be learned and would
+ * count as new regulatory hints.
+ */
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ cfg80211_lock_rdev(rdev);
+ list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ wdev_lock(wdev);
+ if (wdev->sme_state != CFG80211_SME_IDLE)
+ is_all_idle = false;
+ wdev_unlock(wdev);
+ }
+ cfg80211_unlock_rdev(rdev);
+ }
+
+ mutex_unlock(&cfg80211_mutex);
+
+ return is_all_idle;
+}
+
+static void disconnect_work(struct work_struct *work)
+{
+ if (!cfg80211_is_all_idle())
+ return;
+
+ regulatory_hint_disconnect();
+}
+
+static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
static int cfg80211_conn_scan(struct wireless_dev *wdev)
{
@@ -454,6 +492,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
* - and country_ie[1] which is the IE length
*/
regulatory_hint_11d(wdev->wiphy,
+ bss->channel->band,
country_ie + 2,
country_ie[1]);
}
@@ -657,6 +696,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
wdev->wext.connect.ssid_len = 0;
#endif
+
+ schedule_work(&cfg80211_disconnect_work);
}
void cfg80211_disconnected(struct net_device *dev, u16 reason,
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index efe3c5c92b2..9f2cef3e0ca 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -33,10 +33,30 @@ static ssize_t name ## _show(struct device *dev, \
SHOW_FMT(index, "%d", wiphy_idx);
SHOW_FMT(macaddress, "%pM", wiphy.perm_addr);
+SHOW_FMT(address_mask, "%pM", wiphy.addr_mask);
+
+static ssize_t addresses_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy;
+ char *start = buf;
+ int i;
+
+ if (!wiphy->addresses)
+ return sprintf(buf, "%pM\n", wiphy->perm_addr);
+
+ for (i = 0; i < wiphy->n_addresses; i++)
+ buf += sprintf(buf, "%pM\n", &wiphy->addresses[i].addr);
+
+ return buf - start;
+}
static struct device_attribute ieee80211_dev_attrs[] = {
__ATTR_RO(index),
__ATTR_RO(macaddress),
+ __ATTR_RO(address_mask),
+ __ATTR_RO(addresses),
{}
};
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 59361fdcb5d..be2ab8c59e3 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -227,8 +227,11 @@ unsigned int ieee80211_hdrlen(__le16 fc)
if (ieee80211_is_data(fc)) {
if (ieee80211_has_a4(fc))
hdrlen = 30;
- if (ieee80211_is_data_qos(fc))
+ if (ieee80211_is_data_qos(fc)) {
hdrlen += IEEE80211_QOS_CTL_LEN;
+ if (ieee80211_has_order(fc))
+ hdrlen += IEEE80211_HT_CTL_LEN;
+ }
goto out;
}
@@ -285,7 +288,7 @@ static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
}
}
-int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
+int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -383,7 +386,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
}
EXPORT_SYMBOL(ieee80211_data_to_8023);
-int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr,
+int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype, u8 *bssid, bool qos)
{
struct ieee80211_hdr hdr;
@@ -497,6 +500,101 @@ int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr,
}
EXPORT_SYMBOL(ieee80211_data_from_8023);
+
+void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
+ const u8 *addr, enum nl80211_iftype iftype,
+ const unsigned int extra_headroom)
+{
+ struct sk_buff *frame = NULL;
+ u16 ethertype;
+ u8 *payload;
+ const struct ethhdr *eth;
+ int remaining, err;
+ u8 dst[ETH_ALEN], src[ETH_ALEN];
+
+ err = ieee80211_data_to_8023(skb, addr, iftype);
+ if (err)
+ goto out;
+
+ /* skip the wrapping header */
+ eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
+ if (!eth)
+ goto out;
+
+ while (skb != frame) {
+ u8 padding;
+ __be16 len = eth->h_proto;
+ unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
+
+ remaining = skb->len;
+ memcpy(dst, eth->h_dest, ETH_ALEN);
+ memcpy(src, eth->h_source, ETH_ALEN);
+
+ padding = (4 - subframe_len) & 0x3;
+ /* the last MSDU has no padding */
+ if (subframe_len > remaining)
+ goto purge;
+
+ skb_pull(skb, sizeof(struct ethhdr));
+ /* reuse skb for the last subframe */
+ if (remaining <= subframe_len + padding)
+ frame = skb;
+ else {
+ unsigned int hlen = ALIGN(extra_headroom, 4);
+ /*
+ * Allocate and reserve two bytes more for payload
+ * alignment since sizeof(struct ethhdr) is 14.
+ */
+ frame = dev_alloc_skb(hlen + subframe_len + 2);
+ if (!frame)
+ goto purge;
+
+ skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
+ memcpy(skb_put(frame, ntohs(len)), skb->data,
+ ntohs(len));
+
+ eth = (struct ethhdr *)skb_pull(skb, ntohs(len) +
+ padding);
+ if (!eth) {
+ dev_kfree_skb(frame);
+ goto purge;
+ }
+ }
+
+ skb_reset_network_header(frame);
+ frame->dev = skb->dev;
+ frame->priority = skb->priority;
+
+ payload = frame->data;
+ ethertype = (payload[6] << 8) | payload[7];
+
+ if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
+ ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
+ compare_ether_addr(payload,
+ bridge_tunnel_header) == 0)) {
+ /* remove RFC1042 or Bridge-Tunnel
+ * encapsulation and replace EtherType */
+ skb_pull(frame, 6);
+ memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
+ memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
+ } else {
+ memcpy(skb_push(frame, sizeof(__be16)), &len,
+ sizeof(__be16));
+ memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
+ memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
+ }
+ __skb_queue_tail(list, frame);
+ }
+
+ return;
+
+ purge:
+ __skb_queue_purge(list);
+ out:
+ dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
+
/* Given a data frame determine the 802.1p/1d tag to use. */
unsigned int cfg80211_classify8021d(struct sk_buff *skb)
{
@@ -720,3 +818,36 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
return err;
}
+
+u16 cfg80211_calculate_bitrate(struct rate_info *rate)
+{
+ int modulation, streams, bitrate;
+
+ if (!(rate->flags & RATE_INFO_FLAGS_MCS))
+ return rate->legacy;
+
+ /* the formula below does only work for MCS values smaller than 32 */
+ if (rate->mcs >= 32)
+ return 0;
+
+ modulation = rate->mcs & 7;
+ streams = (rate->mcs >> 3) + 1;
+
+ bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
+ 13500000 : 6500000;
+
+ if (modulation < 4)
+ bitrate *= (modulation + 1);
+ else if (modulation == 4)
+ bitrate *= (modulation + 2);
+ else
+ bitrate *= (modulation + 3);
+
+ bitrate *= streams;
+
+ if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
+ bitrate = (bitrate / 9) * 10;
+
+ /* do NOT round down here */
+ return (bitrate + 50000) / 100000;
+}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 54face3d442..9ab51838849 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1099,8 +1099,8 @@ int cfg80211_wext_siwpower(struct net_device *dev,
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
- bool ps = wdev->wext.ps;
- int timeout = wdev->wext.ps_timeout;
+ bool ps = wdev->ps;
+ int timeout = wdev->ps_timeout;
int err;
if (wdev->iftype != NL80211_IFTYPE_STATION)
@@ -1133,8 +1133,8 @@ int cfg80211_wext_siwpower(struct net_device *dev,
if (err)
return err;
- wdev->wext.ps = ps;
- wdev->wext.ps_timeout = timeout;
+ wdev->ps = ps;
+ wdev->ps_timeout = timeout;
return 0;
@@ -1147,7 +1147,7 @@ int cfg80211_wext_giwpower(struct net_device *dev,
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- wrq->disabled = !wdev->wext.ps;
+ wrq->disabled = !wdev->ps;
return 0;
}
@@ -1204,21 +1204,47 @@ int cfg80211_wext_siwrate(struct net_device *dev,
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
struct cfg80211_bitrate_mask mask;
+ u32 fixed, maxrate;
+ struct ieee80211_supported_band *sband;
+ int band, ridx;
+ bool match = false;
if (!rdev->ops->set_bitrate_mask)
return -EOPNOTSUPP;
- mask.fixed = 0;
- mask.maxrate = 0;
+ memset(&mask, 0, sizeof(mask));
+ fixed = 0;
+ maxrate = (u32)-1;
if (rate->value < 0) {
/* nothing */
} else if (rate->fixed) {
- mask.fixed = rate->value / 1000; /* kbps */
+ fixed = rate->value / 100000;
} else {
- mask.maxrate = rate->value / 1000; /* kbps */
+ maxrate = rate->value / 100000;
}
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ sband = wdev->wiphy->bands[band];
+ if (sband == NULL)
+ continue;
+ for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
+ struct ieee80211_rate *srate = &sband->bitrates[ridx];
+ if (fixed == srate->bitrate) {
+ mask.control[band].legacy = 1 << ridx;
+ match = true;
+ break;
+ }
+ if (srate->bitrate <= maxrate) {
+ mask.control[band].legacy |= 1 << ridx;
+ match = true;
+ }
+ }
+ }
+
+ if (!match)
+ return -EINVAL;
+
return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask);
}
EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate);
@@ -1257,10 +1283,7 @@ int cfg80211_wext_giwrate(struct net_device *dev,
if (!(sinfo.filled & STATION_INFO_TX_BITRATE))
return -EOPNOTSUPP;
- rate->value = 0;
-
- if (!(sinfo.txrate.flags & RATE_INFO_FLAGS_MCS))
- rate->value = 100000 * sinfo.txrate.legacy;
+ rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
return 0;
}
diff --git a/net/wireless/wext-proc.c b/net/wireless/wext-proc.c
index 273a7f77c83..8bafa31fa9f 100644
--- a/net/wireless/wext-proc.c
+++ b/net/wireless/wext-proc.c
@@ -140,7 +140,7 @@ static const struct file_operations wireless_seq_fops = {
.release = seq_release_net,
};
-int wext_proc_init(struct net *net)
+int __net_init wext_proc_init(struct net *net)
{
/* Create /proc/net/wireless entry */
if (!proc_net_fops_create(net, "wireless", S_IRUGO, &wireless_seq_fops))
@@ -149,7 +149,7 @@ int wext_proc_init(struct net *net)
return 0;
}
-void wext_proc_exit(struct net *net)
+void __net_exit wext_proc_exit(struct net *net)
{
proc_net_remove(net, "wireless");
}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index e3219e4cd04..9796f3ed1ed 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -55,6 +55,7 @@
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/compat.h>
+#include <linux/ctype.h>
#include <net/x25.h>
#include <net/compat.h>
@@ -512,15 +513,20 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
{
struct sock *sk;
struct x25_sock *x25;
- int rc = -ESOCKTNOSUPPORT;
+ int rc = -EAFNOSUPPORT;
if (!net_eq(net, &init_net))
- return -EAFNOSUPPORT;
+ goto out;
- if (sock->type != SOCK_SEQPACKET || protocol)
+ rc = -ESOCKTNOSUPPORT;
+ if (sock->type != SOCK_SEQPACKET)
goto out;
- rc = -ENOMEM;
+ rc = -EINVAL;
+ if (protocol)
+ goto out;
+
+ rc = -ENOBUFS;
if ((sk = x25_alloc_socket(net)) == NULL)
goto out;
@@ -643,7 +649,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
- int rc = 0;
+ int len, i, rc = 0;
lock_kernel();
if (!sock_flag(sk, SOCK_ZAPPED) ||
@@ -653,6 +659,14 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
}
+ len = strlen(addr->sx25_addr.x25_addr);
+ for (i = 0; i < len; i++) {
+ if (!isdigit(addr->sx25_addr.x25_addr[i])) {
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
x25_sk(sk)->source_addr = addr->sx25_addr;
x25_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index 0a04e62e0e1..7ff37379232 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -25,49 +25,17 @@
#include <net/x25.h>
#ifdef CONFIG_PROC_FS
-static __inline__ struct x25_route *x25_get_route_idx(loff_t pos)
-{
- struct list_head *route_entry;
- struct x25_route *rt = NULL;
-
- list_for_each(route_entry, &x25_route_list) {
- rt = list_entry(route_entry, struct x25_route, node);
- if (!pos--)
- goto found;
- }
- rt = NULL;
-found:
- return rt;
-}
static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_route_list_lock)
{
- loff_t l = *pos;
-
read_lock_bh(&x25_route_list_lock);
- return l ? x25_get_route_idx(--l) : SEQ_START_TOKEN;
+ return seq_list_start_head(&x25_route_list, *pos);
}
static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct x25_route *rt;
-
- ++*pos;
- if (v == SEQ_START_TOKEN) {
- rt = NULL;
- if (!list_empty(&x25_route_list))
- rt = list_entry(x25_route_list.next,
- struct x25_route, node);
- goto out;
- }
- rt = v;
- if (rt->node.next != &x25_route_list)
- rt = list_entry(rt->node.next, struct x25_route, node);
- else
- rt = NULL;
-out:
- return rt;
+ return seq_list_next(v, &x25_route_list, pos);
}
static void x25_seq_route_stop(struct seq_file *seq, void *v)
@@ -78,9 +46,9 @@ static void x25_seq_route_stop(struct seq_file *seq, void *v)
static int x25_seq_route_show(struct seq_file *seq, void *v)
{
- struct x25_route *rt;
+ struct x25_route *rt = list_entry(v, struct x25_route, node);
- if (v == SEQ_START_TOKEN) {
+ if (v == &x25_route_list) {
seq_puts(seq, "Address Digits Device\n");
goto out;
}
@@ -93,40 +61,16 @@ out:
return 0;
}
-static __inline__ struct sock *x25_get_socket_idx(loff_t pos)
-{
- struct sock *s;
- struct hlist_node *node;
-
- sk_for_each(s, node, &x25_list)
- if (!pos--)
- goto found;
- s = NULL;
-found:
- return s;
-}
-
static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_list_lock)
{
- loff_t l = *pos;
-
read_lock_bh(&x25_list_lock);
- return l ? x25_get_socket_idx(--l) : SEQ_START_TOKEN;
+ return seq_hlist_start_head(&x25_list, *pos);
}
static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct sock *s;
-
- ++*pos;
- if (v == SEQ_START_TOKEN) {
- s = sk_head(&x25_list);
- goto out;
- }
- s = sk_next(v);
-out:
- return s;
+ return seq_hlist_next(v, &x25_list, pos);
}
static void x25_seq_socket_stop(struct seq_file *seq, void *v)
@@ -148,7 +92,7 @@ static int x25_seq_socket_show(struct seq_file *seq, void *v)
goto out;
}
- s = v;
+ s = sk_entry(v);
x25 = x25_sk(s);
if (!x25->neighbour || (dev = x25->neighbour->dev) == NULL)
@@ -170,51 +114,16 @@ out:
return 0;
}
-static __inline__ struct x25_forward *x25_get_forward_idx(loff_t pos)
-{
- struct x25_forward *f;
- struct list_head *entry;
-
- list_for_each(entry, &x25_forward_list) {
- f = list_entry(entry, struct x25_forward, node);
- if (!pos--)
- goto found;
- }
-
- f = NULL;
-found:
- return f;
-}
-
static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_forward_list_lock)
{
- loff_t l = *pos;
-
read_lock_bh(&x25_forward_list_lock);
- return l ? x25_get_forward_idx(--l) : SEQ_START_TOKEN;
+ return seq_list_start_head(&x25_forward_list, *pos);
}
static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct x25_forward *f;
-
- ++*pos;
- if (v == SEQ_START_TOKEN) {
- f = NULL;
- if (!list_empty(&x25_forward_list))
- f = list_entry(x25_forward_list.next,
- struct x25_forward, node);
- goto out;
- }
- f = v;
- if (f->node.next != &x25_forward_list)
- f = list_entry(f->node.next, struct x25_forward, node);
- else
- f = NULL;
-out:
- return f;
-
+ return seq_list_next(v, &x25_forward_list, pos);
}
static void x25_seq_forward_stop(struct seq_file *seq, void *v)
@@ -225,9 +134,9 @@ static void x25_seq_forward_stop(struct seq_file *seq, void *v)
static int x25_seq_forward_show(struct seq_file *seq, void *v)
{
- struct x25_forward *f;
+ struct x25_forward *f = list_entry(v, struct x25_forward, node);
- if (v == SEQ_START_TOKEN) {
+ if (v == &x25_forward_list) {
seq_printf(seq, "lci dev1 dev2\n");
goto out;
}
@@ -236,7 +145,6 @@ static int x25_seq_forward_show(struct seq_file *seq, void *v)
seq_printf(seq, "%d %-10s %-10s\n",
f->lci, f->dev1->name, f->dev2->name);
-
out:
return 0;
}
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 743c0134a6a..8b4d6e3246e 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -125,6 +125,22 @@ static struct xfrm_algo_desc aead_list[] = {
.sadb_alg_maxbits = 256
}
},
+{
+ .name = "rfc4543(gcm(aes))",
+
+ .uinfo = {
+ .aead = {
+ .icv_truncbits = 128,
+ }
+ },
+
+ .desc = {
+ .sadb_alg_id = SADB_X_EALG_NULL_AES_GMAC,
+ .sadb_alg_ivlen = 8,
+ .sadb_alg_minbits = 128,
+ .sadb_alg_maxbits = 256
+ }
+},
};
static struct xfrm_algo_desc aalg_list[] = {
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index e0009c17d80..45f1c98d4fc 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -152,7 +152,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
goto drop;
}
- x = xfrm_state_lookup(net, daddr, spi, nexthdr, family);
+ x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family);
if (x == NULL) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
xfrm_audit_state_notfound(skb, family, spi, seq);
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index 42cd18391f4..0fc5ff66d1f 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -30,12 +30,12 @@
struct ipcomp_tfms {
struct list_head list;
- struct crypto_comp **tfms;
+ struct crypto_comp * __percpu *tfms;
int users;
};
static DEFINE_MUTEX(ipcomp_resource_mutex);
-static void **ipcomp_scratches;
+static void * __percpu *ipcomp_scratches;
static int ipcomp_scratch_users;
static LIST_HEAD(ipcomp_tfms_list);
@@ -200,7 +200,7 @@ EXPORT_SYMBOL_GPL(ipcomp_output);
static void ipcomp_free_scratches(void)
{
int i;
- void **scratches;
+ void * __percpu *scratches;
if (--ipcomp_scratch_users)
return;
@@ -215,10 +215,10 @@ static void ipcomp_free_scratches(void)
free_percpu(scratches);
}
-static void **ipcomp_alloc_scratches(void)
+static void * __percpu *ipcomp_alloc_scratches(void)
{
int i;
- void **scratches;
+ void * __percpu *scratches;
if (ipcomp_scratch_users++)
return ipcomp_scratches;
@@ -239,7 +239,7 @@ static void **ipcomp_alloc_scratches(void)
return scratches;
}
-static void ipcomp_free_tfms(struct crypto_comp **tfms)
+static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
{
struct ipcomp_tfms *pos;
int cpu;
@@ -267,10 +267,10 @@ static void ipcomp_free_tfms(struct crypto_comp **tfms)
free_percpu(tfms);
}
-static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name)
+static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
{
struct ipcomp_tfms *pos;
- struct crypto_comp **tfms;
+ struct crypto_comp * __percpu *tfms;
int cpu;
/* This can be any valid CPU ID so we don't need locking. */
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 0ecb16a9a88..34a5ef8316e 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -556,6 +556,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
struct hlist_head *chain;
struct hlist_node *entry, *newpos;
struct dst_entry *gc_list;
+ u32 mark = policy->mark.v & policy->mark.m;
write_lock_bh(&xfrm_policy_lock);
chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
@@ -564,6 +565,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
hlist_for_each_entry(pol, entry, chain, bydst) {
if (pol->type == policy->type &&
!selector_cmp(&pol->selector, &policy->selector) &&
+ (mark & pol->mark.m) == pol->mark.v &&
xfrm_sec_ctx_match(pol->security, policy->security) &&
!WARN_ON(delpol)) {
if (excl) {
@@ -635,8 +637,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
}
EXPORT_SYMBOL(xfrm_policy_insert);
-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
- struct xfrm_selector *sel,
+struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
+ int dir, struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err)
{
@@ -650,6 +652,7 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
ret = NULL;
hlist_for_each_entry(pol, entry, chain, bydst) {
if (pol->type == type &&
+ (mark & pol->mark.m) == pol->mark.v &&
!selector_cmp(sel, &pol->selector) &&
xfrm_sec_ctx_match(ctx, pol->security)) {
xfrm_pol_hold(pol);
@@ -676,8 +679,8 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
}
EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
- int delete, int *err)
+struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
+ int dir, u32 id, int delete, int *err)
{
struct xfrm_policy *pol, *ret;
struct hlist_head *chain;
@@ -692,7 +695,8 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
chain = net->xfrm.policy_byidx + idx_hash(net, id);
ret = NULL;
hlist_for_each_entry(pol, entry, chain, byidx) {
- if (pol->type == type && pol->index == id) {
+ if (pol->type == type && pol->index == id &&
+ (mark & pol->mark.m) == pol->mark.v) {
xfrm_pol_hold(pol);
if (delete) {
*err = security_xfrm_policy_delete(
@@ -771,7 +775,8 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
{
- int dir, err = 0;
+ int dir, err = 0, cnt = 0;
+ struct xfrm_policy *dp;
write_lock_bh(&xfrm_policy_lock);
@@ -789,8 +794,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
&net->xfrm.policy_inexact[dir], bydst) {
if (pol->type != type)
continue;
- __xfrm_policy_unlink(pol, dir);
+ dp = __xfrm_policy_unlink(pol, dir);
write_unlock_bh(&xfrm_policy_lock);
+ if (dp)
+ cnt++;
xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
audit_info->sessionid,
@@ -809,8 +816,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
bydst) {
if (pol->type != type)
continue;
- __xfrm_policy_unlink(pol, dir);
+ dp = __xfrm_policy_unlink(pol, dir);
write_unlock_bh(&xfrm_policy_lock);
+ if (dp)
+ cnt++;
xfrm_audit_policy_delete(pol, 1,
audit_info->loginuid,
@@ -824,6 +833,8 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
}
}
+ if (!cnt)
+ err = -ESRCH;
atomic_inc(&flow_cache_genid);
out:
write_unlock_bh(&xfrm_policy_lock);
@@ -909,6 +920,7 @@ static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
int match, ret = -ESRCH;
if (pol->family != family ||
+ (fl->mark & pol->mark.m) != pol->mark.v ||
pol->type != type)
return ret;
@@ -1033,6 +1045,10 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
int err = 0;
if (match) {
+ if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
+ pol = NULL;
+ goto out;
+ }
err = security_xfrm_policy_lookup(pol->security,
fl->secid,
policy_to_flow_dir(dir));
@@ -1045,6 +1061,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struc
} else
pol = NULL;
}
+out:
read_unlock_bh(&xfrm_policy_lock);
return pol;
}
@@ -1137,6 +1154,7 @@ static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
}
newp->lft = old->lft;
newp->curlft = old->curlft;
+ newp->mark = old->mark;
newp->action = old->action;
newp->flags = old->flags;
newp->xfrm_nr = old->xfrm_nr;
@@ -2045,8 +2063,7 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
int res;
if (xfrm_decode_session(skb, &fl, family) < 0) {
- /* XXX: we should have something like FWDHDRERROR here. */
- XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
return 0;
}
@@ -2421,19 +2438,19 @@ static int __net_init xfrm_statistics_init(struct net *net)
{
int rv;
- if (snmp_mib_init((void **)net->mib.xfrm_statistics,
+ if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
sizeof(struct linux_xfrm_mib)) < 0)
return -ENOMEM;
rv = xfrm_proc_init(net);
if (rv < 0)
- snmp_mib_free((void **)net->mib.xfrm_statistics);
+ snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
return rv;
}
static void xfrm_statistics_fini(struct net *net)
{
xfrm_proc_fini(net);
- snmp_mib_free((void **)net->mib.xfrm_statistics);
+ snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
}
#else
static int __net_init xfrm_statistics_init(struct net *net)
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index fef8db553e8..58d9ae00559 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -15,7 +15,7 @@
#include <net/snmp.h>
#include <net/xfrm.h>
-static struct snmp_mib xfrm_mib_list[] = {
+static const struct snmp_mib xfrm_mib_list[] = {
SNMP_MIB_ITEM("XfrmInError", LINUX_MIB_XFRMINERROR),
SNMP_MIB_ITEM("XfrmInBufferError", LINUX_MIB_XFRMINBUFFERERROR),
SNMP_MIB_ITEM("XfrmInHdrError", LINUX_MIB_XFRMINHDRERROR),
@@ -41,6 +41,7 @@ static struct snmp_mib xfrm_mib_list[] = {
SNMP_MIB_ITEM("XfrmOutPolBlock", LINUX_MIB_XFRMOUTPOLBLOCK),
SNMP_MIB_ITEM("XfrmOutPolDead", LINUX_MIB_XFRMOUTPOLDEAD),
SNMP_MIB_ITEM("XfrmOutPolError", LINUX_MIB_XFRMOUTPOLERROR),
+ SNMP_MIB_ITEM("XfrmFwdHdrError", LINUX_MIB_XFRMFWDHDRERROR),
SNMP_MIB_SENTINEL
};
@@ -50,7 +51,8 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
int i;
for (i=0; xfrm_mib_list[i].name; i++)
seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name,
- snmp_fold_field((void **)net->mib.xfrm_statistics,
+ snmp_fold_field((void __percpu **)
+ net->mib.xfrm_statistics,
xfrm_mib_list[i].entry));
return 0;
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index b36cc344474..17d5b96f2fc 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -603,13 +603,14 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info)
{
- int i, err = 0;
+ int i, err = 0, cnt = 0;
spin_lock_bh(&xfrm_state_lock);
err = xfrm_state_flush_secctx_check(net, proto, audit_info);
if (err)
goto out;
+ err = -ESRCH;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
struct hlist_node *entry;
struct xfrm_state *x;
@@ -626,13 +627,16 @@ restart:
audit_info->sessionid,
audit_info->secid);
xfrm_state_put(x);
+ if (!err)
+ cnt++;
spin_lock_bh(&xfrm_state_lock);
goto restart;
}
}
}
- err = 0;
+ if (cnt)
+ err = 0;
out:
spin_unlock_bh(&xfrm_state_lock);
@@ -665,7 +669,7 @@ xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
return 0;
}
-static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
+static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark, xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family)
{
unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
struct xfrm_state *x;
@@ -678,6 +682,8 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *d
xfrm_addr_cmp(&x->id.daddr, daddr, family))
continue;
+ if ((mark & x->mark.m) != x->mark.v)
+ continue;
xfrm_state_hold(x);
return x;
}
@@ -685,7 +691,7 @@ static struct xfrm_state *__xfrm_state_lookup(struct net *net, xfrm_address_t *d
return NULL;
}
-static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
+static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark, xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family)
{
unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
struct xfrm_state *x;
@@ -698,6 +704,8 @@ static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, xfrm_addre
xfrm_addr_cmp(&x->props.saddr, saddr, family))
continue;
+ if ((mark & x->mark.m) != x->mark.v)
+ continue;
xfrm_state_hold(x);
return x;
}
@@ -709,12 +717,14 @@ static inline struct xfrm_state *
__xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
{
struct net *net = xs_net(x);
+ u32 mark = x->mark.v & x->mark.m;
if (use_spi)
- return __xfrm_state_lookup(net, &x->id.daddr, x->id.spi,
- x->id.proto, family);
+ return __xfrm_state_lookup(net, mark, &x->id.daddr,
+ x->id.spi, x->id.proto, family);
else
- return __xfrm_state_lookup_byaddr(net, &x->id.daddr,
+ return __xfrm_state_lookup_byaddr(net, mark,
+ &x->id.daddr,
&x->props.saddr,
x->id.proto, family);
}
@@ -779,6 +789,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
int acquire_in_progress = 0;
int error = 0;
struct xfrm_state *best = NULL;
+ u32 mark = pol->mark.v & pol->mark.m;
to_put = NULL;
@@ -787,6 +798,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == family &&
x->props.reqid == tmpl->reqid &&
+ (mark & x->mark.m) == x->mark.v &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
xfrm_state_addr_check(x, daddr, saddr, family) &&
tmpl->mode == x->props.mode &&
@@ -802,6 +814,7 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
if (x->props.family == family &&
x->props.reqid == tmpl->reqid &&
+ (mark & x->mark.m) == x->mark.v &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
xfrm_state_addr_check(x, daddr, saddr, family) &&
tmpl->mode == x->props.mode &&
@@ -815,7 +828,7 @@ found:
x = best;
if (!x && !error && !acquire_in_progress) {
if (tmpl->id.spi &&
- (x0 = __xfrm_state_lookup(net, daddr, tmpl->id.spi,
+ (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
tmpl->id.proto, family)) != NULL) {
to_put = x0;
error = -EEXIST;
@@ -829,6 +842,7 @@ found:
/* Initialize temporary selector matching only
* to current session. */
xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
+ memcpy(&x->mark, &pol->mark, sizeof(x->mark));
error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
if (error) {
@@ -871,7 +885,7 @@ out:
}
struct xfrm_state *
-xfrm_stateonly_find(struct net *net,
+xfrm_stateonly_find(struct net *net, u32 mark,
xfrm_address_t *daddr, xfrm_address_t *saddr,
unsigned short family, u8 mode, u8 proto, u32 reqid)
{
@@ -884,6 +898,7 @@ xfrm_stateonly_find(struct net *net,
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == family &&
x->props.reqid == reqid &&
+ (mark & x->mark.m) == x->mark.v &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
xfrm_state_addr_check(x, daddr, saddr, family) &&
mode == x->props.mode &&
@@ -946,11 +961,13 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
struct xfrm_state *x;
struct hlist_node *entry;
unsigned int h;
+ u32 mark = xnew->mark.v & xnew->mark.m;
h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
if (x->props.family == family &&
x->props.reqid == reqid &&
+ (mark & x->mark.m) == x->mark.v &&
!xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
!xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
x->genid = xfrm_state_genid;
@@ -967,11 +984,12 @@ void xfrm_state_insert(struct xfrm_state *x)
EXPORT_SYMBOL(xfrm_state_insert);
/* xfrm_state_lock is held */
-static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
+static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m, unsigned short family, u8 mode, u32 reqid, u8 proto, xfrm_address_t *daddr, xfrm_address_t *saddr, int create)
{
unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
struct hlist_node *entry;
struct xfrm_state *x;
+ u32 mark = m->v & m->m;
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
if (x->props.reqid != reqid ||
@@ -980,6 +998,7 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
x->km.state != XFRM_STATE_ACQ ||
x->id.spi != 0 ||
x->id.proto != proto ||
+ (mark & x->mark.m) != x->mark.v ||
xfrm_addr_cmp(&x->id.daddr, daddr, family) ||
xfrm_addr_cmp(&x->props.saddr, saddr, family))
continue;
@@ -1022,6 +1041,8 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
x->props.family = family;
x->props.mode = mode;
x->props.reqid = reqid;
+ x->mark.v = m->v;
+ x->mark.m = m->m;
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
xfrm_state_hold(x);
tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
@@ -1038,7 +1059,7 @@ static struct xfrm_state *__find_acq_core(struct net *net, unsigned short family
return x;
}
-static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq);
+static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
int xfrm_state_add(struct xfrm_state *x)
{
@@ -1046,6 +1067,7 @@ int xfrm_state_add(struct xfrm_state *x)
struct xfrm_state *x1, *to_put;
int family;
int err;
+ u32 mark = x->mark.v & x->mark.m;
int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
family = x->props.family;
@@ -1063,7 +1085,7 @@ int xfrm_state_add(struct xfrm_state *x)
}
if (use_spi && x->km.seq) {
- x1 = __xfrm_find_acq_byseq(net, x->km.seq);
+ x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
if (x1 && ((x1->id.proto != x->id.proto) ||
xfrm_addr_cmp(&x1->id.daddr, &x->id.daddr, family))) {
to_put = x1;
@@ -1072,8 +1094,8 @@ int xfrm_state_add(struct xfrm_state *x)
}
if (use_spi && !x1)
- x1 = __find_acq_core(net, family, x->props.mode, x->props.reqid,
- x->id.proto,
+ x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
+ x->props.reqid, x->id.proto,
&x->id.daddr, &x->props.saddr, 0);
__xfrm_state_bump_genids(x);
@@ -1102,7 +1124,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
int err = -ENOMEM;
struct xfrm_state *x = xfrm_state_alloc(net);
if (!x)
- goto error;
+ goto out;
memcpy(&x->id, &orig->id, sizeof(x->id));
memcpy(&x->sel, &orig->sel, sizeof(x->sel));
@@ -1147,6 +1169,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
goto error;
}
+ memcpy(&x->mark, &orig->mark, sizeof(x->mark));
+
err = xfrm_init_state(x);
if (err)
goto error;
@@ -1160,16 +1184,10 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
return x;
error:
+ xfrm_state_put(x);
+out:
if (errp)
*errp = err;
- if (x) {
- kfree(x->aalg);
- kfree(x->ealg);
- kfree(x->calg);
- kfree(x->encap);
- kfree(x->coaddr);
- }
- kfree(x);
return NULL;
}
@@ -1344,41 +1362,41 @@ int xfrm_state_check_expire(struct xfrm_state *x)
EXPORT_SYMBOL(xfrm_state_check_expire);
struct xfrm_state *
-xfrm_state_lookup(struct net *net, xfrm_address_t *daddr, __be32 spi, u8 proto,
- unsigned short family)
+xfrm_state_lookup(struct net *net, u32 mark, xfrm_address_t *daddr, __be32 spi,
+ u8 proto, unsigned short family)
{
struct xfrm_state *x;
spin_lock_bh(&xfrm_state_lock);
- x = __xfrm_state_lookup(net, daddr, spi, proto, family);
+ x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
spin_unlock_bh(&xfrm_state_lock);
return x;
}
EXPORT_SYMBOL(xfrm_state_lookup);
struct xfrm_state *
-xfrm_state_lookup_byaddr(struct net *net,
+xfrm_state_lookup_byaddr(struct net *net, u32 mark,
xfrm_address_t *daddr, xfrm_address_t *saddr,
u8 proto, unsigned short family)
{
struct xfrm_state *x;
spin_lock_bh(&xfrm_state_lock);
- x = __xfrm_state_lookup_byaddr(net, daddr, saddr, proto, family);
+ x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
spin_unlock_bh(&xfrm_state_lock);
return x;
}
EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
struct xfrm_state *
-xfrm_find_acq(struct net *net, u8 mode, u32 reqid, u8 proto,
+xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto,
xfrm_address_t *daddr, xfrm_address_t *saddr,
int create, unsigned short family)
{
struct xfrm_state *x;
spin_lock_bh(&xfrm_state_lock);
- x = __find_acq_core(net, family, mode, reqid, proto, daddr, saddr, create);
+ x = __find_acq_core(net, mark, family, mode, reqid, proto, daddr, saddr, create);
spin_unlock_bh(&xfrm_state_lock);
return x;
@@ -1425,7 +1443,7 @@ EXPORT_SYMBOL(xfrm_state_sort);
/* Silly enough, but I'm lazy to build resolution list */
-static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq)
+static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
{
int i;
@@ -1435,6 +1453,7 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq)
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+i, bydst) {
if (x->km.seq == seq &&
+ (mark & x->mark.m) == x->mark.v &&
x->km.state == XFRM_STATE_ACQ) {
xfrm_state_hold(x);
return x;
@@ -1444,12 +1463,12 @@ static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 seq)
return NULL;
}
-struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 seq)
+struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
{
struct xfrm_state *x;
spin_lock_bh(&xfrm_state_lock);
- x = __xfrm_find_acq_byseq(net, seq);
+ x = __xfrm_find_acq_byseq(net, mark, seq);
spin_unlock_bh(&xfrm_state_lock);
return x;
}
@@ -1458,12 +1477,12 @@ EXPORT_SYMBOL(xfrm_find_acq_byseq);
u32 xfrm_get_acqseq(void)
{
u32 res;
- static u32 acqseq;
- static DEFINE_SPINLOCK(acqseq_lock);
+ static atomic_t acqseq;
+
+ do {
+ res = atomic_inc_return(&acqseq);
+ } while (!res);
- spin_lock_bh(&acqseq_lock);
- res = (++acqseq ? : ++acqseq);
- spin_unlock_bh(&acqseq_lock);
return res;
}
EXPORT_SYMBOL(xfrm_get_acqseq);
@@ -1476,6 +1495,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
int err = -ENOENT;
__be32 minspi = htonl(low);
__be32 maxspi = htonl(high);
+ u32 mark = x->mark.v & x->mark.m;
spin_lock_bh(&x->lock);
if (x->km.state == XFRM_STATE_DEAD)
@@ -1488,7 +1508,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
err = -ENOENT;
if (minspi == maxspi) {
- x0 = xfrm_state_lookup(net, &x->id.daddr, minspi, x->id.proto, x->props.family);
+ x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
if (x0) {
xfrm_state_put(x0);
goto unlock;
@@ -1498,7 +1518,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
u32 spi = 0;
for (h=0; h<high-low+1; h++) {
spi = low + net_random()%(high-low+1);
- x0 = xfrm_state_lookup(net, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
+ x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
if (x0 == NULL) {
x->id.spi = htonl(spi);
break;
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 2e221f2cad7..2c4d6cdcba4 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -2,7 +2,7 @@
#include <net/net_namespace.h>
#include <net/xfrm.h>
-static void __xfrm_sysctl_init(struct net *net)
+static void __net_init __xfrm_sysctl_init(struct net *net)
{
net->xfrm.sysctl_aevent_etime = XFRM_AE_ETIME;
net->xfrm.sysctl_aevent_rseqth = XFRM_AE_SEQT_SIZE;
@@ -64,7 +64,7 @@ out_kmemdup:
return -ENOMEM;
}
-void xfrm_sysctl_fini(struct net *net)
+void __net_exit xfrm_sysctl_fini(struct net *net)
{
struct ctl_table *table;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d5a71297600..6106b72826d 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -446,6 +446,8 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
goto error;
}
+ xfrm_mark_get(attrs, &x->mark);
+
err = xfrm_init_state(x);
if (err)
goto error;
@@ -526,11 +528,13 @@ static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
int *errp)
{
struct xfrm_state *x = NULL;
+ struct xfrm_mark m;
int err;
+ u32 mark = xfrm_mark_get(attrs, &m);
if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
err = -ESRCH;
- x = xfrm_state_lookup(net, &p->daddr, p->spi, p->proto, p->family);
+ x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
} else {
xfrm_address_t *saddr = NULL;
@@ -541,7 +545,8 @@ static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
}
err = -ESRCH;
- x = xfrm_state_lookup_byaddr(net, &p->daddr, saddr,
+ x = xfrm_state_lookup_byaddr(net, mark,
+ &p->daddr, saddr,
p->proto, p->family);
}
@@ -683,6 +688,9 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
if (x->encap)
NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
+ if (xfrm_mark_put(skb, &x->mark))
+ goto nla_put_failure;
+
if (x->security && copy_sec_ctx(x->security, skb) < 0)
goto nla_put_failure;
@@ -947,6 +955,8 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
xfrm_address_t *daddr;
int family;
int err;
+ u32 mark;
+ struct xfrm_mark m;
p = nlmsg_data(nlh);
err = verify_userspi_info(p);
@@ -957,8 +967,10 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
daddr = &p->info.id.daddr;
x = NULL;
+
+ mark = xfrm_mark_get(attrs, &m);
if (p->info.seq) {
- x = xfrm_find_acq_byseq(net, p->info.seq);
+ x = xfrm_find_acq_byseq(net, mark, p->info.seq);
if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) {
xfrm_state_put(x);
x = NULL;
@@ -966,7 +978,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
}
if (!x)
- x = xfrm_find_acq(net, p->info.mode, p->info.reqid,
+ x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
p->info.id.proto, daddr,
&p->info.saddr, 1,
family);
@@ -1220,6 +1232,8 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_us
if (err)
goto error;
+ xfrm_mark_get(attrs, &xp->mark);
+
return xp;
error:
*errp = err;
@@ -1366,10 +1380,13 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
goto nlmsg_failure;
if (copy_to_user_policy_type(xp->type, skb) < 0)
goto nlmsg_failure;
+ if (xfrm_mark_put(skb, &xp->mark))
+ goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
+nla_put_failure:
nlmsg_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
@@ -1441,6 +1458,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
struct km_event c;
int delete;
+ struct xfrm_mark m;
+ u32 mark = xfrm_mark_get(attrs, &m);
p = nlmsg_data(nlh);
delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
@@ -1454,7 +1473,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
if (p->index)
- xp = xfrm_policy_byid(net, type, p->dir, p->index, delete, &err);
+ xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err);
else {
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_sec_ctx *ctx;
@@ -1471,8 +1490,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
return err;
}
- xp = xfrm_policy_bysel_ctx(net, type, p->dir, &p->sel, ctx,
- delete, &err);
+ xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel,
+ ctx, delete, &err);
security_xfrm_policy_free(ctx);
}
if (xp == NULL)
@@ -1524,8 +1543,11 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
audit_info.sessionid = NETLINK_CB(skb).sessionid;
audit_info.secid = NETLINK_CB(skb).sid;
err = xfrm_state_flush(net, p->proto, &audit_info);
- if (err)
+ if (err) {
+ if (err == -ESRCH) /* empty table */
+ return 0;
return err;
+ }
c.data.proto = p->proto;
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
@@ -1541,6 +1563,7 @@ static inline size_t xfrm_aevent_msgsize(void)
return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
+ nla_total_size(sizeof(struct xfrm_replay_state))
+ nla_total_size(sizeof(struct xfrm_lifetime_cur))
+ + nla_total_size(sizeof(struct xfrm_mark))
+ nla_total_size(4) /* XFRM_AE_RTHR */
+ nla_total_size(4); /* XFRM_AE_ETHR */
}
@@ -1573,6 +1596,9 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, struct km_eve
NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH,
x->replay_maxage * 10 / HZ);
+ if (xfrm_mark_put(skb, &x->mark))
+ goto nla_put_failure;
+
return nlmsg_end(skb, nlh);
nla_put_failure:
@@ -1588,6 +1614,8 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
struct sk_buff *r_skb;
int err;
struct km_event c;
+ u32 mark;
+ struct xfrm_mark m;
struct xfrm_aevent_id *p = nlmsg_data(nlh);
struct xfrm_usersa_id *id = &p->sa_id;
@@ -1595,7 +1623,9 @@ static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
if (r_skb == NULL)
return -ENOMEM;
- x = xfrm_state_lookup(net, &id->daddr, id->spi, id->proto, id->family);
+ mark = xfrm_mark_get(attrs, &m);
+
+ x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
if (x == NULL) {
kfree_skb(r_skb);
return -ESRCH;
@@ -1626,6 +1656,8 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
struct xfrm_state *x;
struct km_event c;
int err = - EINVAL;
+ u32 mark = 0;
+ struct xfrm_mark m;
struct xfrm_aevent_id *p = nlmsg_data(nlh);
struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
@@ -1637,7 +1669,9 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
return err;
- x = xfrm_state_lookup(net, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
+ mark = xfrm_mark_get(attrs, &m);
+
+ x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
if (x == NULL)
return -ESRCH;
@@ -1676,8 +1710,12 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
audit_info.sessionid = NETLINK_CB(skb).sessionid;
audit_info.secid = NETLINK_CB(skb).sid;
err = xfrm_policy_flush(net, type, &audit_info);
- if (err)
+ if (err) {
+ if (err == -ESRCH) /* empty table */
+ return 0;
return err;
+ }
+
c.data.type = type;
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
@@ -1696,13 +1734,15 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
struct xfrm_userpolicy_info *p = &up->pol;
u8 type = XFRM_POLICY_TYPE_MAIN;
int err = -ENOENT;
+ struct xfrm_mark m;
+ u32 mark = xfrm_mark_get(attrs, &m);
err = copy_from_user_policy_type(&type, attrs);
if (err)
return err;
if (p->index)
- xp = xfrm_policy_byid(net, type, p->dir, p->index, 0, &err);
+ xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
else {
struct nlattr *rt = attrs[XFRMA_SEC_CTX];
struct xfrm_sec_ctx *ctx;
@@ -1719,7 +1759,8 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
return err;
}
- xp = xfrm_policy_bysel_ctx(net, type, p->dir, &p->sel, ctx, 0, &err);
+ xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir,
+ &p->sel, ctx, 0, &err);
security_xfrm_policy_free(ctx);
}
if (xp == NULL)
@@ -1759,8 +1800,10 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
int err;
struct xfrm_user_expire *ue = nlmsg_data(nlh);
struct xfrm_usersa_info *p = &ue->state;
+ struct xfrm_mark m;
+ u32 mark = xfrm_mark_get(attrs, &m);;
- x = xfrm_state_lookup(net, &p->id.daddr, p->id.spi, p->id.proto, p->family);
+ x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
err = -ENOENT;
if (x == NULL)
@@ -1794,6 +1837,7 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
struct xfrm_user_tmpl *ut;
int i;
struct nlattr *rt = attrs[XFRMA_TMPL];
+ struct xfrm_mark mark;
struct xfrm_user_acquire *ua = nlmsg_data(nlh);
struct xfrm_state *x = xfrm_state_alloc(net);
@@ -1802,6 +1846,8 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!x)
goto nomem;
+ xfrm_mark_get(attrs, &mark);
+
err = verify_newpolicy_info(&ua->policy);
if (err)
goto bad_policy;
@@ -1814,7 +1860,8 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
memcpy(&x->id, &ua->id, sizeof(ua->id));
memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
-
+ xp->mark.m = x->mark.m = mark.m;
+ xp->mark.v = x->mark.v = mark.v;
ut = nla_data(rt);
/* extract the templates and for each call km_key */
for (i = 0; i < xp->xfrm_nr; i++, ut++) {
@@ -2054,6 +2101,10 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
#undef XMSGSIZE
static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
+ [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
+ [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
+ [XFRMA_LASTUSED] = { .type = NLA_U64},
+ [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
[XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
[XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
@@ -2070,6 +2121,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
[XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
[XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
+ [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
};
static struct xfrm_link {
@@ -2149,7 +2201,8 @@ static void xfrm_netlink_rcv(struct sk_buff *skb)
static inline size_t xfrm_expire_msgsize(void)
{
- return NLMSG_ALIGN(sizeof(struct xfrm_user_expire));
+ return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
+ + nla_total_size(sizeof(struct xfrm_mark));
}
static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_event *c)
@@ -2165,7 +2218,13 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, struct km_eve
copy_to_user_state(x, &ue->state);
ue->hard = (c->data.hard != 0) ? 1 : 0;
+ if (xfrm_mark_put(skb, &x->mark))
+ goto nla_put_failure;
+
return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ return -EMSGSIZE;
}
static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
@@ -2177,8 +2236,10 @@ static int xfrm_exp_state_notify(struct xfrm_state *x, struct km_event *c)
if (skb == NULL)
return -ENOMEM;
- if (build_expire(skb, x, c) < 0)
- BUG();
+ if (build_expire(skb, x, c) < 0) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC);
}
@@ -2266,6 +2327,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, struct km_event *c)
if (c->event == XFRM_MSG_DELSA) {
len += nla_total_size(headlen);
headlen = sizeof(*id);
+ len += nla_total_size(sizeof(struct xfrm_mark));
}
len += NLMSG_ALIGN(headlen);
@@ -2336,6 +2398,7 @@ static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x,
{
return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
+ nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
+ + nla_total_size(sizeof(struct xfrm_mark))
+ nla_total_size(xfrm_user_sec_ctx_size(x->security))
+ userpolicy_type_attrsize();
}
@@ -2368,9 +2431,12 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
goto nlmsg_failure;
if (copy_to_user_policy_type(xp->type, skb) < 0)
goto nlmsg_failure;
+ if (xfrm_mark_put(skb, &xp->mark))
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
+nla_put_failure:
nlmsg_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
@@ -2457,6 +2523,7 @@ static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp)
return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
+ nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
+ nla_total_size(xfrm_user_sec_ctx_size(xp->security))
+ + nla_total_size(sizeof(struct xfrm_mark))
+ userpolicy_type_attrsize();
}
@@ -2479,10 +2546,13 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
goto nlmsg_failure;
if (copy_to_user_policy_type(xp->type, skb) < 0)
goto nlmsg_failure;
+ if (xfrm_mark_put(skb, &xp->mark))
+ goto nla_put_failure;
upe->hard = !!hard;
return nlmsg_end(skb, nlh);
+nla_put_failure:
nlmsg_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
@@ -2519,6 +2589,7 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *
headlen = sizeof(*id);
}
len += userpolicy_type_attrsize();
+ len += nla_total_size(sizeof(struct xfrm_mark));
len += NLMSG_ALIGN(headlen);
skb = nlmsg_new(len, GFP_ATOMIC);
@@ -2554,10 +2625,14 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *
if (copy_to_user_policy_type(xp->type, skb) < 0)
goto nlmsg_failure;
+ if (xfrm_mark_put(skb, &xp->mark))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
+nla_put_failure:
nlmsg_failure:
kfree_skb(skb);
return -1;