summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c5
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/trans_virtio.c2
-rw-r--r--net/appletalk/ddp.c8
-rw-r--r--net/atm/lec.c8
-rw-r--r--net/atm/pppoatm.c2
-rw-r--r--net/ax25/ax25_addr.c6
-rw-r--r--net/ax25/ax25_out.c2
-rw-r--r--net/ax25/ax25_route.c2
-rw-r--r--net/batman-adv/Makefile4
-rw-r--r--net/batman-adv/bat_algo.h6
-rw-r--r--net/batman-adv/bat_debugfs.c388
-rw-r--r--net/batman-adv/bat_iv_ogm.c1050
-rw-r--r--net/batman-adv/bat_sysfs.c735
-rw-r--r--net/batman-adv/bitarray.c65
-rw-r--r--net/batman-adv/bitarray.h24
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c799
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h76
-rw-r--r--net/batman-adv/debugfs.c409
-rw-r--r--net/batman-adv/debugfs.h (renamed from net/batman-adv/bat_debugfs.h)15
-rw-r--r--net/batman-adv/gateway_client.c354
-rw-r--r--net/batman-adv/gateway_client.h32
-rw-r--r--net/batman-adv/gateway_common.c61
-rw-r--r--net/batman-adv/gateway_common.h23
-rw-r--r--net/batman-adv/hard-interface.c342
-rw-r--r--net/batman-adv/hard-interface.h51
-rw-r--r--net/batman-adv/hash.c25
-rw-r--r--net/batman-adv/hash.h78
-rw-r--r--net/batman-adv/icmp_socket.c180
-rw-r--r--net/batman-adv/icmp_socket.h14
-rw-r--r--net/batman-adv/main.c276
-rw-r--r--net/batman-adv/main.h257
-rw-r--r--net/batman-adv/originator.c337
-rw-r--r--net/batman-adv/originator.h57
-rw-r--r--net/batman-adv/packet.h181
-rw-r--r--net/batman-adv/ring_buffer.c13
-rw-r--r--net/batman-adv/ring_buffer.h9
-rw-r--r--net/batman-adv/routing.c689
-rw-r--r--net/batman-adv/routing.h64
-rw-r--r--net/batman-adv/send.c237
-rw-r--r--net/batman-adv/send.h23
-rw-r--r--net/batman-adv/soft-interface.c304
-rw-r--r--net/batman-adv/soft-interface.h17
-rw-r--r--net/batman-adv/sysfs.c787
-rw-r--r--net/batman-adv/sysfs.h (renamed from net/batman-adv/bat_sysfs.h)24
-rw-r--r--net/batman-adv/translation-table.c1659
-rw-r--r--net/batman-adv/translation-table.h75
-rw-r--r--net/batman-adv/types.h183
-rw-r--r--net/batman-adv/unicast.c179
-rw-r--r--net/batman-adv/unicast.h34
-rw-r--r--net/batman-adv/vis.c728
-rw-r--r--net/batman-adv/vis.h26
-rw-r--r--net/bluetooth/Makefile3
-rw-r--r--net/bluetooth/a2mp.c568
-rw-r--r--net/bluetooth/af_bluetooth.c14
-rw-r--r--net/bluetooth/bnep/core.c21
-rw-r--r--net/bluetooth/bnep/netdev.c16
-rw-r--r--net/bluetooth/bnep/sock.c18
-rw-r--r--net/bluetooth/hci_conn.c143
-rw-r--r--net/bluetooth/hci_core.c265
-rw-r--r--net/bluetooth/hci_event.c479
-rw-r--r--net/bluetooth/hci_sock.c59
-rw-r--r--net/bluetooth/hci_sysfs.c99
-rw-r--r--net/bluetooth/hidp/core.c26
-rw-r--r--net/bluetooth/hidp/sock.c16
-rw-r--r--net/bluetooth/l2cap_core.c2248
-rw-r--r--net/bluetooth/l2cap_sock.c130
-rw-r--r--net/bluetooth/lib.c7
-rw-r--r--net/bluetooth/mgmt.c131
-rw-r--r--net/bluetooth/rfcomm/core.c32
-rw-r--r--net/bluetooth/rfcomm/sock.c21
-rw-r--r--net/bluetooth/rfcomm/tty.c9
-rw-r--r--net/bluetooth/sco.c43
-rw-r--r--net/bluetooth/smp.c7
-rw-r--r--net/bridge/br_device.c9
-rw-r--r--net/bridge/br_multicast.c11
-rw-r--r--net/bridge/br_netfilter.c77
-rw-r--r--net/bridge/br_sysfs_if.c6
-rw-r--r--net/bridge/netfilter/ebt_ulog.c29
-rw-r--r--net/caif/caif_dev.c8
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/caif/cfctrl.c17
-rw-r--r--net/can/af_can.c126
-rw-r--r--net/can/af_can.h3
-rw-r--r--net/can/gw.c90
-rw-r--r--net/can/proc.c3
-rw-r--r--net/can/raw.c50
-rw-r--r--net/ceph/ceph_common.c25
-rw-r--r--net/ceph/crush/mapper.c13
-rw-r--r--net/ceph/crypto.c1
-rw-r--r--net/ceph/crypto.h3
-rw-r--r--net/ceph/messenger.c925
-rw-r--r--net/ceph/mon_client.c76
-rw-r--r--net/ceph/msgpool.c7
-rw-r--r--net/ceph/osd_client.c77
-rw-r--r--net/ceph/osdmap.c59
-rw-r--r--net/ceph/pagelist.c14
-rw-r--r--net/compat.c4
-rw-r--r--net/core/datagram.c1
-rw-r--r--net/core/dev.c100
-rw-r--r--net/core/dst.c25
-rw-r--r--net/core/ethtool.c45
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/filter.c8
-rw-r--r--net/core/flow_dissector.c5
-rw-r--r--net/core/neighbour.c31
-rw-r--r--net/core/net-sysfs.c74
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/core/netprio_cgroup.c55
-rw-r--r--net/core/rtnetlink.c74
-rw-r--r--net/core/scm.c22
-rw-r--r--net/core/skbuff.c195
-rw-r--r--net/core/sock.c74
-rw-r--r--net/core/sock_diag.c42
-rw-r--r--net/dcb/dcbnl.c1168
-rw-r--r--net/dccp/ackvec.h7
-rw-r--r--net/dccp/ccid.c1
-rw-r--r--net/dccp/ccids/ccid3.c8
-rw-r--r--net/dccp/ccids/lib/loss_interval.c1
-rw-r--r--net/dccp/ccids/lib/packet_history.c3
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c2
-rw-r--r--net/dccp/dccp.h1
-rw-r--r--net/dccp/feat.c10
-rw-r--r--net/dccp/input.c1
-rw-r--r--net/dccp/ipv4.c24
-rw-r--r--net/dccp/ipv6.c61
-rw-r--r--net/dccp/options.c1
-rw-r--r--net/dccp/output.c1
-rw-r--r--net/decnet/dn_fib.c8
-rw-r--r--net/decnet/dn_neigh.c8
-rw-r--r--net/decnet/dn_nsp_out.c2
-rw-r--r--net/decnet/dn_route.c144
-rw-r--r--net/decnet/dn_table.c76
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c30
-rw-r--r--net/ethernet/Makefile2
-rw-r--r--net/ethernet/eth.c5
-rw-r--r--net/ieee802154/6lowpan.c251
-rw-r--r--net/ieee802154/netlink.c4
-rw-r--r--net/ieee802154/nl-mac.c2
-rw-r--r--net/ieee802154/nl-phy.c2
-rw-r--r--net/ipv4/Kconfig11
-rw-r--r--net/ipv4/Makefile5
-rw-r--r--net/ipv4/af_inet.c75
-rw-r--r--net/ipv4/ah4.c17
-rw-r--r--net/ipv4/arp.c6
-rw-r--r--net/ipv4/devinet.c5
-rw-r--r--net/ipv4/esp4.c17
-rw-r--r--net/ipv4/fib_frontend.c131
-rw-r--r--net/ipv4/fib_rules.c39
-rw-r--r--net/ipv4/fib_semantics.c80
-rw-r--r--net/ipv4/fib_trie.c60
-rw-r--r--net/ipv4/icmp.c191
-rw-r--r--net/ipv4/inet_connection_sock.c53
-rw-r--r--net/ipv4/inet_diag.c146
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/inetpeer.c99
-rw-r--r--net/ipv4/ip_fragment.c6
-rw-r--r--net/ipv4/ip_gre.c25
-rw-r--r--net/ipv4/ip_input.c30
-rw-r--r--net/ipv4/ip_options.c29
-rw-r--r--net/ipv4/ip_output.c93
-rw-r--r--net/ipv4/ip_sockglue.c12
-rw-r--r--net/ipv4/ip_vti.c956
-rw-r--r--net/ipv4/ipcomp.c17
-rw-r--r--net/ipv4/ipip.c28
-rw-r--r--net/ipv4/ipmr.c41
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c5
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c23
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c172
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c81
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c13
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c4
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/proc.c7
-rw-r--r--net/ipv4/protocol.c8
-rw-r--r--net/ipv4/raw.c5
-rw-r--r--net/ipv4/route.c2229
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c43
-rw-r--r--net/ipv4/tcp.c77
-rw-r--r--net/ipv4/tcp_cong.c5
-rw-r--r--net/ipv4/tcp_fastopen.c11
-rw-r--r--net/ipv4/tcp_input.c398
-rw-r--r--net/ipv4/tcp_ipv4.c181
-rw-r--r--net/ipv4/tcp_metrics.c745
-rw-r--r--net/ipv4/tcp_minisocks.c61
-rw-r--r--net/ipv4/tcp_output.c353
-rw-r--r--net/ipv4/tcp_timer.c70
-rw-r--r--net/ipv4/udp.c9
-rw-r--r--net/ipv4/udp_diag.c10
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c68
-rw-r--r--net/ipv4/xfrm4_policy.c34
-rw-r--r--net/ipv6/addrconf.c21
-rw-r--r--net/ipv6/ah6.c11
-rw-r--r--net/ipv6/esp6.c11
-rw-r--r--net/ipv6/exthdrs.c4
-rw-r--r--net/ipv6/icmp.c23
-rw-r--r--net/ipv6/inet6_connection_sock.c103
-rw-r--r--net/ipv6/ip6_fib.c5
-rw-r--r--net/ipv6/ip6_input.c20
-rw-r--r--net/ipv6/ip6_output.c40
-rw-r--r--net/ipv6/ip6_tunnel.c96
-rw-r--r--net/ipv6/ip6mr.c5
-rw-r--r--net/ipv6/ipcomp6.c11
-rw-r--r--net/ipv6/mcast.c3
-rw-r--r--net/ipv6/ndisc.c129
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c131
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c51
-rw-r--r--net/ipv6/protocol.c8
-rw-r--r--net/ipv6/raw.c11
-rw-r--r--net/ipv6/route.c544
-rw-r--r--net/ipv6/sit.c25
-rw-r--r--net/ipv6/syncookies.c5
-rw-r--r--net/ipv6/tcp_ipv6.c236
-rw-r--r--net/ipv6/udp.c13
-rw-r--r--net/ipv6/xfrm6_policy.c26
-rw-r--r--net/ipx/Makefile2
-rw-r--r--net/ipx/pe2.c (renamed from net/ethernet/pe2.c)2
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/irlan/irlan_provider.c2
-rw-r--r--net/irda/irqueue.c6
-rw-r--r--net/l2tp/l2tp_core.c11
-rw-r--r--net/l2tp/l2tp_eth.c15
-rw-r--r--net/l2tp/l2tp_netlink.c6
-rw-r--r--net/l2tp/l2tp_ppp.c8
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/llc/llc_station.c16
-rw-r--r--net/mac80211/Kconfig56
-rw-r--r--net/mac80211/Makefile7
-rw-r--r--net/mac80211/agg-rx.c38
-rw-r--r--net/mac80211/agg-tx.c118
-rw-r--r--net/mac80211/cfg.c734
-rw-r--r--net/mac80211/chan.c4
-rw-r--r--net/mac80211/debug.h170
-rw-r--r--net/mac80211/debugfs.c2
-rw-r--r--net/mac80211/debugfs_key.c16
-rw-r--r--net/mac80211/debugfs_netdev.c49
-rw-r--r--net/mac80211/driver-ops.h39
-rw-r--r--net/mac80211/driver-trace.c9
-rw-r--r--net/mac80211/ht.c10
-rw-r--r--net/mac80211/ibss.c127
-rw-r--r--net/mac80211/ieee80211_i.h139
-rw-r--r--net/mac80211/iface.c325
-rw-r--r--net/mac80211/key.c24
-rw-r--r--net/mac80211/led.c2
-rw-r--r--net/mac80211/main.c48
-rw-r--r--net/mac80211/mesh.c19
-rw-r--r--net/mac80211/mesh.h4
-rw-r--r--net/mac80211/mesh_hwmp.c173
-rw-r--r--net/mac80211/mesh_pathtbl.c34
-rw-r--r--net/mac80211/mesh_plink.c70
-rw-r--r--net/mac80211/mesh_sync.c47
-rw-r--r--net/mac80211/mlme.c382
-rw-r--r--net/mac80211/offchannel.c291
-rw-r--r--net/mac80211/pm.c11
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c8
-rw-r--r--net/mac80211/rx.c130
-rw-r--r--net/mac80211/scan.c123
-rw-r--r--net/mac80211/sta_info.c45
-rw-r--r--net/mac80211/status.c48
-rw-r--r--net/mac80211/tkip.c46
-rw-r--r--net/mac80211/trace.c75
-rw-r--r--net/mac80211/trace.h (renamed from net/mac80211/driver-trace.h)80
-rw-r--r--net/mac80211/tx.c95
-rw-r--r--net/mac80211/util.c178
-rw-r--r--net/mac80211/wme.c11
-rw-r--r--net/mac80211/wme.h2
-rw-r--r--net/mac80211/work.c370
-rw-r--r--net/mac802154/Makefile2
-rw-r--r--net/mac802154/ieee802154_dev.c4
-rw-r--r--net/mac802154/mac802154.h9
-rw-r--r--net/mac802154/mac_cmd.c33
-rw-r--r--net/mac802154/mib.c108
-rw-r--r--net/mac802154/rx.c1
-rw-r--r--net/mac802154/tx.c2
-rw-r--r--net/mac802154/wpan.c559
-rw-r--r--net/netfilter/Kconfig21
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/core.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c24
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c22
-rw-r--r--net/netfilter/nf_conntrack_extend.c16
-rw-r--r--net/netfilter/nf_conntrack_ftp.c11
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c16
-rw-r--r--net/netfilter/nf_conntrack_helper.c38
-rw-r--r--net/netfilter/nf_conntrack_irc.c8
-rw-r--r--net/netfilter/nf_conntrack_netlink.c407
-rw-r--r--net/netfilter/nf_conntrack_pptp.c17
-rw-r--r--net/netfilter/nf_conntrack_proto.c300
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c143
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c81
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c79
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c175
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c163
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c111
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c127
-rw-r--r--net/netfilter/nf_conntrack_sane.c12
-rw-r--r--net/netfilter/nf_conntrack_sip.c32
-rw-r--r--net/netfilter/nf_conntrack_tftp.c8
-rw-r--r--net/netfilter/nfnetlink.c40
-rw-r--r--net/netfilter/nfnetlink_cthelper.c672
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c13
-rw-r--r--net/netfilter/nfnetlink_log.c29
-rw-r--r--net/netfilter/nfnetlink_queue_core.c (renamed from net/netfilter/nfnetlink_queue.c)95
-rw-r--r--net/netfilter/nfnetlink_queue_ct.c98
-rw-r--r--net/netfilter/xt_CT.c44
-rw-r--r--net/netfilter/xt_NFQUEUE.c28
-rw-r--r--net/netfilter/xt_TPROXY.c4
-rw-r--r--net/netfilter/xt_connlimit.c35
-rw-r--r--net/netfilter/xt_recent.c62
-rw-r--r--net/netlink/af_netlink.c35
-rw-r--r--net/netlink/genetlink.c14
-rw-r--r--net/nfc/core.c157
-rw-r--r--net/nfc/hci/command.c26
-rw-r--r--net/nfc/hci/core.c137
-rw-r--r--net/nfc/hci/hci.h12
-rw-r--r--net/nfc/hci/hcp.c2
-rw-r--r--net/nfc/hci/shdlc.c44
-rw-r--r--net/nfc/llcp/commands.c54
-rw-r--r--net/nfc/llcp/llcp.c627
-rw-r--r--net/nfc/llcp/llcp.h31
-rw-r--r--net/nfc/llcp/sock.c74
-rw-r--r--net/nfc/nci/core.c23
-rw-r--r--net/nfc/nci/ntf.c5
-rw-r--r--net/nfc/netlink.c104
-rw-r--r--net/nfc/nfc.h12
-rw-r--r--net/openvswitch/actions.c2
-rw-r--r--net/openvswitch/datapath.c13
-rw-r--r--net/openvswitch/datapath.h2
-rw-r--r--net/openvswitch/dp_notify.c2
-rw-r--r--net/openvswitch/flow.c5
-rw-r--r--net/openvswitch/flow.h2
-rw-r--r--net/openvswitch/vport-internal_dev.c10
-rw-r--r--net/openvswitch/vport-internal_dev.h2
-rw-r--r--net/openvswitch/vport-netdev.c2
-rw-r--r--net/openvswitch/vport-netdev.h2
-rw-r--r--net/openvswitch/vport.c2
-rw-r--r--net/openvswitch/vport.h2
-rw-r--r--net/packet/af_packet.c29
-rw-r--r--net/rds/page.c9
-rw-r--r--net/rds/recv.c3
-rw-r--r--net/rfkill/core.c2
-rw-r--r--net/rxrpc/ar-error.c4
-rw-r--r--net/rxrpc/ar-output.c2
-rw-r--r--net/sched/Kconfig20
-rw-r--r--net/sched/Makefile2
-rw-r--r--net/sched/act_api.c59
-rw-r--r--net/sched/cls_api.c12
-rw-r--r--net/sched/cls_route.c2
-rw-r--r--net/sched/em_canid.c240
-rw-r--r--net/sched/em_ipset.c135
-rw-r--r--net/sched/em_meta.c2
-rw-r--r--net/sched/sch_api.c24
-rw-r--r--net/sched/sch_netem.c9
-rw-r--r--net/sched/sch_teql.c47
-rw-r--r--net/sctp/associola.c41
-rw-r--r--net/sctp/input.c20
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/output.c81
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/sm_sideeffect.c33
-rw-r--r--net/sctp/socket.c107
-rw-r--r--net/sctp/sysctl.c9
-rw-r--r--net/sctp/transport.c20
-rw-r--r--net/sctp/ulpevent.c3
-rw-r--r--net/socket.c8
-rw-r--r--net/sunrpc/Kconfig5
-rw-r--r--net/sunrpc/auth.c54
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c1
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c20
-rw-r--r--net/sunrpc/backchannel_rqst.c9
-rw-r--r--net/sunrpc/cache.c5
-rw-r--r--net/sunrpc/clnt.c14
-rw-r--r--net/sunrpc/rpcb_clnt.c4
-rw-r--r--net/sunrpc/sched.c14
-rw-r--r--net/sunrpc/svcauth_unix.c22
-rw-r--r--net/sunrpc/svcsock.c12
-rw-r--r--net/sunrpc/xdr.c139
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sunrpc/xprtrdma/transport.c3
-rw-r--r--net/sunrpc/xprtsock.c56
-rw-r--r--net/tipc/Kconfig25
-rw-r--r--net/tipc/bcast.c75
-rw-r--r--net/tipc/bearer.c69
-rw-r--r--net/tipc/bearer.h4
-rw-r--r--net/tipc/config.c41
-rw-r--r--net/tipc/core.c18
-rw-r--r--net/tipc/core.h65
-rw-r--r--net/tipc/discover.c10
-rw-r--r--net/tipc/handler.c4
-rw-r--r--net/tipc/link.c326
-rw-r--r--net/tipc/link.h63
-rw-r--r--net/tipc/log.c302
-rw-r--r--net/tipc/log.h66
-rw-r--r--net/tipc/msg.c242
-rw-r--r--net/tipc/name_distr.c25
-rw-r--r--net/tipc/name_table.c142
-rw-r--r--net/tipc/net.c8
-rw-r--r--net/tipc/netlink.c2
-rw-r--r--net/tipc/node.c22
-rw-r--r--net/tipc/node_subscr.c3
-rw-r--r--net/tipc/port.c77
-rw-r--r--net/tipc/port.h1
-rw-r--r--net/tipc/ref.c10
-rw-r--r--net/tipc/socket.c17
-rw-r--r--net/tipc/subscr.c14
-rw-r--r--net/unix/af_unix.c203
-rw-r--r--net/unix/diag.c115
-rw-r--r--net/wanrouter/wanmain.c51
-rw-r--r--net/wireless/Kconfig35
-rw-r--r--net/wireless/Makefile2
-rw-r--r--net/wireless/ap.c46
-rw-r--r--net/wireless/chan.c107
-rw-r--r--net/wireless/core.c134
-rw-r--r--net/wireless/core.h106
-rw-r--r--net/wireless/ibss.c11
-rw-r--r--net/wireless/mesh.c121
-rw-r--r--net/wireless/mlme.c64
-rw-r--r--net/wireless/nl80211.c1009
-rw-r--r--net/wireless/nl80211.h21
-rw-r--r--net/wireless/reg.c137
-rw-r--r--net/wireless/reg.h8
-rw-r--r--net/wireless/scan.c24
-rw-r--r--net/wireless/sme.c10
-rw-r--r--net/wireless/util.c171
-rw-r--r--net/wireless/wext-compat.c23
-rw-r--r--net/wireless/wext-sme.c10
-rw-r--r--net/x25/x25_route.c2
-rw-r--r--net/xfrm/xfrm_policy.c37
-rw-r--r--net/xfrm/xfrm_user.c401
439 files changed, 25349 insertions, 17586 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index da1bc9c3cf3..73a2a83ee2d 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -681,10 +681,7 @@ static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *n
if (!netpoll)
goto out;
- netpoll->dev = real_dev;
- strlcpy(netpoll->dev_name, real_dev->name, IFNAMSIZ);
-
- err = __netpoll_setup(netpoll);
+ err = __netpoll_setup(netpoll, real_dev);
if (err) {
kfree(netpoll);
goto out;
diff --git a/net/9p/client.c b/net/9p/client.c
index a170893d70e..8260f132b32 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1548,7 +1548,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
kernel_buf = 1;
indata = data;
} else
- indata = (char *)udata;
+ indata = (__force char *)udata;
/*
* response header len is 11
* PDU Header(7) + IO Size (4)
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 2a167658bb9..35b8911b1c8 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -212,7 +212,7 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
* this takes a list of pages.
* @sg: scatter/gather list to pack into
* @start: which segment of the sg_list to start at
- * @**pdata: a list of pages to add into sg.
+ * @pdata: a list of pages to add into sg.
* @nr_pages: number of pages to pack into the scatter/gather list
* @data: data to pack into scatter/gather list
* @count: amount of data to pack into the scatter/gather list
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 86852963b7f..33475291c9c 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -129,8 +129,8 @@ found:
/**
* atalk_find_or_insert_socket - Try to find a socket matching ADDR
- * @sk - socket to insert in the list if it is not there already
- * @sat - address to search for
+ * @sk: socket to insert in the list if it is not there already
+ * @sat: address to search for
*
* Try to find a socket matching ADDR in the socket list, if found then return
* it. If not, insert SK into the socket list.
@@ -1066,8 +1066,8 @@ static int atalk_release(struct socket *sock)
/**
* atalk_pick_and_bind_port - Pick a source port when one is not given
- * @sk - socket to insert into the tables
- * @sat - address to search for
+ * @sk: socket to insert into the tables
+ * @sat: address to search for
*
* Pick a source port when one is not given. If we can find a suitable free
* one, we insert the socket into the tables using it.
diff --git a/net/atm/lec.c b/net/atm/lec.c
index a7d172105c9..2e3d942e77f 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -231,9 +231,11 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
if (skb_headroom(skb) < 2) {
pr_debug("reallocating skb\n");
skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
- kfree_skb(skb);
- if (skb2 == NULL)
+ if (unlikely(!skb2)) {
+ kfree_skb(skb);
return NETDEV_TX_OK;
+ }
+ consume_skb(skb);
skb = skb2;
}
skb_push(skb, 2);
@@ -1602,7 +1604,7 @@ static void lec_arp_expire_vcc(unsigned long data)
{
unsigned long flags;
struct lec_arp_table *to_remove = (struct lec_arp_table *)data;
- struct lec_priv *priv = (struct lec_priv *)to_remove->priv;
+ struct lec_priv *priv = to_remove->priv;
del_timer(&to_remove->timer);
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index ce1e59fdae7..226dca98944 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -283,7 +283,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
kfree_skb(n);
goto nospace;
}
- kfree_skb(skb);
+ consume_skb(skb);
skb = n;
if (skb == NULL)
return DROP_PACKET;
diff --git a/net/ax25/ax25_addr.c b/net/ax25/ax25_addr.c
index 9162409559c..e7c9b0ea17a 100644
--- a/net/ax25/ax25_addr.c
+++ b/net/ax25/ax25_addr.c
@@ -189,8 +189,10 @@ const unsigned char *ax25_addr_parse(const unsigned char *buf, int len,
digi->ndigi = 0;
while (!(buf[-1] & AX25_EBIT)) {
- if (d >= AX25_MAX_DIGIS) return NULL; /* Max of 6 digis */
- if (len < 7) return NULL; /* Short packet */
+ if (d >= AX25_MAX_DIGIS)
+ return NULL;
+ if (len < AX25_ADDR_LEN)
+ return NULL;
memcpy(&digi->calls[d], buf, AX25_ADDR_LEN);
digi->ndigi = d + 1;
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index be8a25e0db6..be2acab9be9 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -350,7 +350,7 @@ void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
- kfree_skb(skb);
+ consume_skb(skb);
skb = skbn;
}
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index a65588040b9..d39097737e3 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -474,7 +474,7 @@ struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src,
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
- kfree_skb(skb);
+ consume_skb(skb);
skb = skbn;
}
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 6d5c1940667..8676d2b1d57 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -19,11 +19,10 @@
#
obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
-batman-adv-y += bat_debugfs.o
batman-adv-y += bat_iv_ogm.o
-batman-adv-y += bat_sysfs.o
batman-adv-y += bitarray.o
batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
+batman-adv-y += debugfs.o
batman-adv-y += gateway_client.o
batman-adv-y += gateway_common.o
batman-adv-y += hard-interface.o
@@ -35,6 +34,7 @@ batman-adv-y += ring_buffer.o
batman-adv-y += routing.o
batman-adv-y += send.o
batman-adv-y += soft-interface.o
+batman-adv-y += sysfs.o
batman-adv-y += translation-table.o
batman-adv-y += unicast.o
batman-adv-y += vis.o
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 9852a688ba4..a0ba3bff9b3 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,12 +15,11 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_
#define _NET_BATMAN_ADV_BAT_ALGO_H_
-int bat_iv_init(void);
+int batadv_iv_init(void);
#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
deleted file mode 100644
index 3b588f86d77..00000000000
--- a/net/batman-adv/bat_debugfs.c
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-
-#include <linux/debugfs.h>
-
-#include "bat_debugfs.h"
-#include "translation-table.h"
-#include "originator.h"
-#include "hard-interface.h"
-#include "gateway_common.h"
-#include "gateway_client.h"
-#include "soft-interface.h"
-#include "vis.h"
-#include "icmp_socket.h"
-#include "bridge_loop_avoidance.h"
-
-static struct dentry *bat_debugfs;
-
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-#define LOG_BUFF_MASK (log_buff_len-1)
-#define LOG_BUFF(idx) (debug_log->log_buff[(idx) & LOG_BUFF_MASK])
-
-static int log_buff_len = LOG_BUF_LEN;
-
-static void emit_log_char(struct debug_log *debug_log, char c)
-{
- LOG_BUFF(debug_log->log_end) = c;
- debug_log->log_end++;
-
- if (debug_log->log_end - debug_log->log_start > log_buff_len)
- debug_log->log_start = debug_log->log_end - log_buff_len;
-}
-
-__printf(2, 3)
-static int fdebug_log(struct debug_log *debug_log, const char *fmt, ...)
-{
- va_list args;
- static char debug_log_buf[256];
- char *p;
-
- if (!debug_log)
- return 0;
-
- spin_lock_bh(&debug_log->lock);
- va_start(args, fmt);
- vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
- va_end(args);
-
- for (p = debug_log_buf; *p != 0; p++)
- emit_log_char(debug_log, *p);
-
- spin_unlock_bh(&debug_log->lock);
-
- wake_up(&debug_log->queue_wait);
-
- return 0;
-}
-
-int debug_log(struct bat_priv *bat_priv, const char *fmt, ...)
-{
- va_list args;
- char tmp_log_buf[256];
-
- va_start(args, fmt);
- vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
- fdebug_log(bat_priv->debug_log, "[%10u] %s",
- jiffies_to_msecs(jiffies), tmp_log_buf);
- va_end(args);
-
- return 0;
-}
-
-static int log_open(struct inode *inode, struct file *file)
-{
- nonseekable_open(inode, file);
- file->private_data = inode->i_private;
- inc_module_count();
- return 0;
-}
-
-static int log_release(struct inode *inode, struct file *file)
-{
- dec_module_count();
- return 0;
-}
-
-static ssize_t log_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct bat_priv *bat_priv = file->private_data;
- struct debug_log *debug_log = bat_priv->debug_log;
- int error, i = 0;
- char c;
-
- if ((file->f_flags & O_NONBLOCK) &&
- !(debug_log->log_end - debug_log->log_start))
- return -EAGAIN;
-
- if (!buf)
- return -EINVAL;
-
- if (count == 0)
- return 0;
-
- if (!access_ok(VERIFY_WRITE, buf, count))
- return -EFAULT;
-
- error = wait_event_interruptible(debug_log->queue_wait,
- (debug_log->log_start - debug_log->log_end));
-
- if (error)
- return error;
-
- spin_lock_bh(&debug_log->lock);
-
- while ((!error) && (i < count) &&
- (debug_log->log_start != debug_log->log_end)) {
- c = LOG_BUFF(debug_log->log_start);
-
- debug_log->log_start++;
-
- spin_unlock_bh(&debug_log->lock);
-
- error = __put_user(c, buf);
-
- spin_lock_bh(&debug_log->lock);
-
- buf++;
- i++;
-
- }
-
- spin_unlock_bh(&debug_log->lock);
-
- if (!error)
- return i;
-
- return error;
-}
-
-static unsigned int log_poll(struct file *file, poll_table *wait)
-{
- struct bat_priv *bat_priv = file->private_data;
- struct debug_log *debug_log = bat_priv->debug_log;
-
- poll_wait(file, &debug_log->queue_wait, wait);
-
- if (debug_log->log_end - debug_log->log_start)
- return POLLIN | POLLRDNORM;
-
- return 0;
-}
-
-static const struct file_operations log_fops = {
- .open = log_open,
- .release = log_release,
- .read = log_read,
- .poll = log_poll,
- .llseek = no_llseek,
-};
-
-static int debug_log_setup(struct bat_priv *bat_priv)
-{
- struct dentry *d;
-
- if (!bat_priv->debug_dir)
- goto err;
-
- bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC);
- if (!bat_priv->debug_log)
- goto err;
-
- spin_lock_init(&bat_priv->debug_log->lock);
- init_waitqueue_head(&bat_priv->debug_log->queue_wait);
-
- d = debugfs_create_file("log", S_IFREG | S_IRUSR,
- bat_priv->debug_dir, bat_priv, &log_fops);
- if (d)
- goto err;
-
- return 0;
-
-err:
- return 1;
-}
-
-static void debug_log_cleanup(struct bat_priv *bat_priv)
-{
- kfree(bat_priv->debug_log);
- bat_priv->debug_log = NULL;
-}
-#else /* CONFIG_BATMAN_ADV_DEBUG */
-static int debug_log_setup(struct bat_priv *bat_priv)
-{
- bat_priv->debug_log = NULL;
- return 0;
-}
-
-static void debug_log_cleanup(struct bat_priv *bat_priv)
-{
- return;
-}
-#endif
-
-static int bat_algorithms_open(struct inode *inode, struct file *file)
-{
- return single_open(file, bat_algo_seq_print_text, NULL);
-}
-
-static int originators_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, orig_seq_print_text, net_dev);
-}
-
-static int gateways_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, gw_client_seq_print_text, net_dev);
-}
-
-static int transtable_global_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, tt_global_seq_print_text, net_dev);
-}
-
-#ifdef CONFIG_BATMAN_ADV_BLA
-static int bla_claim_table_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, bla_claim_table_seq_print_text, net_dev);
-}
-#endif
-
-static int transtable_local_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, tt_local_seq_print_text, net_dev);
-}
-
-static int vis_data_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, vis_seq_print_text, net_dev);
-}
-
-struct bat_debuginfo {
- struct attribute attr;
- const struct file_operations fops;
-};
-
-#define BAT_DEBUGINFO(_name, _mode, _open) \
-struct bat_debuginfo bat_debuginfo_##_name = { \
- .attr = { .name = __stringify(_name), \
- .mode = _mode, }, \
- .fops = { .owner = THIS_MODULE, \
- .open = _open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
- } \
-};
-
-static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open);
-static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
-static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
-static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
-#ifdef CONFIG_BATMAN_ADV_BLA
-static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open);
-#endif
-static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
-static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
-
-static struct bat_debuginfo *mesh_debuginfos[] = {
- &bat_debuginfo_originators,
- &bat_debuginfo_gateways,
- &bat_debuginfo_transtable_global,
-#ifdef CONFIG_BATMAN_ADV_BLA
- &bat_debuginfo_bla_claim_table,
-#endif
- &bat_debuginfo_transtable_local,
- &bat_debuginfo_vis_data,
- NULL,
-};
-
-void debugfs_init(void)
-{
- struct bat_debuginfo *bat_debug;
- struct dentry *file;
-
- bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL);
- if (bat_debugfs == ERR_PTR(-ENODEV))
- bat_debugfs = NULL;
-
- if (!bat_debugfs)
- goto out;
-
- bat_debug = &bat_debuginfo_routing_algos;
- file = debugfs_create_file(bat_debug->attr.name,
- S_IFREG | bat_debug->attr.mode,
- bat_debugfs, NULL, &bat_debug->fops);
- if (!file)
- pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name);
-
-out:
- return;
-}
-
-void debugfs_destroy(void)
-{
- if (bat_debugfs) {
- debugfs_remove_recursive(bat_debugfs);
- bat_debugfs = NULL;
- }
-}
-
-int debugfs_add_meshif(struct net_device *dev)
-{
- struct bat_priv *bat_priv = netdev_priv(dev);
- struct bat_debuginfo **bat_debug;
- struct dentry *file;
-
- if (!bat_debugfs)
- goto out;
-
- bat_priv->debug_dir = debugfs_create_dir(dev->name, bat_debugfs);
- if (!bat_priv->debug_dir)
- goto out;
-
- bat_socket_setup(bat_priv);
- debug_log_setup(bat_priv);
-
- for (bat_debug = mesh_debuginfos; *bat_debug; ++bat_debug) {
- file = debugfs_create_file(((*bat_debug)->attr).name,
- S_IFREG | ((*bat_debug)->attr).mode,
- bat_priv->debug_dir,
- dev, &(*bat_debug)->fops);
- if (!file) {
- bat_err(dev, "Can't add debugfs file: %s/%s\n",
- dev->name, ((*bat_debug)->attr).name);
- goto rem_attr;
- }
- }
-
- return 0;
-rem_attr:
- debugfs_remove_recursive(bat_priv->debug_dir);
- bat_priv->debug_dir = NULL;
-out:
-#ifdef CONFIG_DEBUG_FS
- return -ENOMEM;
-#else
- return 0;
-#endif /* CONFIG_DEBUG_FS */
-}
-
-void debugfs_del_meshif(struct net_device *dev)
-{
- struct bat_priv *bat_priv = netdev_priv(dev);
-
- debug_log_cleanup(bat_priv);
-
- if (bat_debugfs) {
- debugfs_remove_recursive(bat_priv->debug_dir);
- bat_priv->debug_dir = NULL;
- }
-}
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index dc53798ebb4..e877af8bdd1 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -30,15 +28,16 @@
#include "send.h"
#include "bat_algo.h"
-static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface,
- const uint8_t *neigh_addr,
- struct orig_node *orig_node,
- struct orig_node *orig_neigh,
- uint32_t seqno)
+static struct batadv_neigh_node *
+batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
+ const uint8_t *neigh_addr,
+ struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh, __be32 seqno)
{
- struct neigh_node *neigh_node;
+ struct batadv_neigh_node *neigh_node;
- neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno);
+ neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr,
+ ntohl(seqno));
if (!neigh_node)
goto out;
@@ -55,30 +54,30 @@ out:
return neigh_node;
}
-static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
+static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
{
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_ogm_packet *batadv_ogm_packet;
uint32_t random_seqno;
- int res = -1;
+ int res = -ENOMEM;
/* randomize initial seqno to avoid collision */
get_random_bytes(&random_seqno, sizeof(random_seqno));
atomic_set(&hard_iface->seqno, random_seqno);
- hard_iface->packet_len = BATMAN_OGM_HLEN;
+ hard_iface->packet_len = BATADV_OGM_HLEN;
hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
if (!hard_iface->packet_buff)
goto out;
- batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
- batman_ogm_packet->header.packet_type = BAT_IV_OGM;
- batman_ogm_packet->header.version = COMPAT_VERSION;
- batman_ogm_packet->header.ttl = 2;
- batman_ogm_packet->flags = NO_FLAGS;
- batman_ogm_packet->tq = TQ_MAX_VALUE;
- batman_ogm_packet->tt_num_changes = 0;
- batman_ogm_packet->ttvn = 0;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
+ batadv_ogm_packet->header.packet_type = BATADV_IV_OGM;
+ batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
+ batadv_ogm_packet->header.ttl = 2;
+ batadv_ogm_packet->flags = BATADV_NO_FLAGS;
+ batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
+ batadv_ogm_packet->tt_num_changes = 0;
+ batadv_ogm_packet->ttvn = 0;
res = 0;
@@ -86,133 +85,152 @@ out:
return res;
}
-static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface)
+static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
{
kfree(hard_iface->packet_buff);
hard_iface->packet_buff = NULL;
}
-static void bat_iv_ogm_iface_update_mac(struct hard_iface *hard_iface)
+static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
{
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_ogm_packet *batadv_ogm_packet;
- batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
- memcpy(batman_ogm_packet->orig,
+ batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
+ memcpy(batadv_ogm_packet->orig,
hard_iface->net_dev->dev_addr, ETH_ALEN);
- memcpy(batman_ogm_packet->prev_sender,
+ memcpy(batadv_ogm_packet->prev_sender,
hard_iface->net_dev->dev_addr, ETH_ALEN);
}
-static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface)
+static void
+batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
{
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_ogm_packet *batadv_ogm_packet;
- batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
- batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
- batman_ogm_packet->header.ttl = TTL;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
+ batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
+ batadv_ogm_packet->header.ttl = BATADV_TTL;
}
/* when do we schedule our own ogm to be sent */
-static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv)
+static unsigned long
+batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
{
- return jiffies + msecs_to_jiffies(
- atomic_read(&bat_priv->orig_interval) -
- JITTER + (random32() % 2*JITTER));
+ unsigned int msecs;
+
+ msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
+ msecs += (random32() % 2 * BATADV_JITTER);
+
+ return jiffies + msecs_to_jiffies(msecs);
}
/* when do we schedule a ogm packet to be sent */
-static unsigned long bat_iv_ogm_fwd_send_time(void)
+static unsigned long batadv_iv_ogm_fwd_send_time(void)
{
- return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
+ return jiffies + msecs_to_jiffies(random32() % (BATADV_JITTER / 2));
}
/* apply hop penalty for a normal link */
-static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
+static uint8_t batadv_hop_penalty(uint8_t tq,
+ const struct batadv_priv *bat_priv)
{
int hop_penalty = atomic_read(&bat_priv->hop_penalty);
- return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
+ int new_tq;
+
+ new_tq = tq * (BATADV_TQ_MAX_VALUE - hop_penalty);
+ new_tq /= BATADV_TQ_MAX_VALUE;
+
+ return new_tq;
}
/* is there another aggregated packet here? */
-static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len,
- int tt_num_changes)
+static int batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+ int tt_num_changes)
{
- int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes);
+ int next_buff_pos = 0;
+
+ next_buff_pos += buff_pos + BATADV_OGM_HLEN;
+ next_buff_pos += batadv_tt_len(tt_num_changes);
return (next_buff_pos <= packet_len) &&
- (next_buff_pos <= MAX_AGGREGATION_BYTES);
+ (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
}
/* send a batman ogm to a given interface */
-static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
- struct hard_iface *hard_iface)
+static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
+ struct batadv_hard_iface *hard_iface)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
char *fwd_str;
uint8_t packet_num;
int16_t buff_pos;
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_ogm_packet *batadv_ogm_packet;
struct sk_buff *skb;
- if (hard_iface->if_status != IF_ACTIVE)
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
return;
packet_num = 0;
buff_pos = 0;
- batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
/* adjust all flags and log packets */
- while (bat_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
- batman_ogm_packet->tt_num_changes)) {
+ while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
+ batadv_ogm_packet->tt_num_changes)) {
/* we might have aggregated direct link packets with an
- * ordinary base packet */
+ * ordinary base packet
+ */
if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
(forw_packet->if_incoming == hard_iface))
- batman_ogm_packet->flags |= DIRECTLINK;
+ batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
else
- batman_ogm_packet->flags &= ~DIRECTLINK;
+ batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
"Sending own" :
"Forwarding"));
- bat_dbg(DBG_BATMAN, bat_priv,
- "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
- fwd_str, (packet_num > 0 ? "aggregated " : ""),
- batman_ogm_packet->orig,
- ntohl(batman_ogm_packet->seqno),
- batman_ogm_packet->tq, batman_ogm_packet->header.ttl,
- (batman_ogm_packet->flags & DIRECTLINK ?
- "on" : "off"),
- batman_ogm_packet->ttvn, hard_iface->net_dev->name,
- hard_iface->net_dev->dev_addr);
-
- buff_pos += BATMAN_OGM_HLEN +
- tt_len(batman_ogm_packet->tt_num_changes);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
+ fwd_str, (packet_num > 0 ? "aggregated " : ""),
+ batadv_ogm_packet->orig,
+ ntohl(batadv_ogm_packet->seqno),
+ batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl,
+ (batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
+ "on" : "off"),
+ batadv_ogm_packet->ttvn, hard_iface->net_dev->name,
+ hard_iface->net_dev->dev_addr);
+
+ buff_pos += BATADV_OGM_HLEN;
+ buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
packet_num++;
- batman_ogm_packet = (struct batman_ogm_packet *)
+ batadv_ogm_packet = (struct batadv_ogm_packet *)
(forw_packet->skb->data + buff_pos);
}
/* create clone because function is called more than once */
skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
- if (skb)
- send_skb_packet(skb, hard_iface, broadcast_addr);
+ if (skb) {
+ batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
+ batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
+ skb->len + ETH_HLEN);
+ batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
+ }
}
/* send a batman ogm packet */
-static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
+static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
{
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
struct net_device *soft_iface;
- struct bat_priv *bat_priv;
- struct hard_iface *primary_if = NULL;
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_ogm_packet *batadv_ogm_packet;
unsigned char directlink;
- batman_ogm_packet = (struct batman_ogm_packet *)
+ batadv_ogm_packet = (struct batadv_ogm_packet *)
(forw_packet->skb->data);
- directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
+ directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0);
if (!forw_packet->if_incoming) {
pr_err("Error - can't forward packet: incoming iface not specified\n");
@@ -222,31 +240,33 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
soft_iface = forw_packet->if_incoming->soft_iface;
bat_priv = netdev_priv(soft_iface);
- if (forw_packet->if_incoming->if_status != IF_ACTIVE)
+ if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
goto out;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
- /* multihomed peer assumed */
- /* non-primary OGMs are only broadcasted on their interface */
- if ((directlink && (batman_ogm_packet->header.ttl == 1)) ||
+ /* multihomed peer assumed
+ * non-primary OGMs are only broadcasted on their interface
+ */
+ if ((directlink && (batadv_ogm_packet->header.ttl == 1)) ||
(forw_packet->own && (forw_packet->if_incoming != primary_if))) {
/* FIXME: what about aggregated packets ? */
- bat_dbg(DBG_BATMAN, bat_priv,
- "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
- (forw_packet->own ? "Sending own" : "Forwarding"),
- batman_ogm_packet->orig,
- ntohl(batman_ogm_packet->seqno),
- batman_ogm_packet->header.ttl,
- forw_packet->if_incoming->net_dev->name,
- forw_packet->if_incoming->net_dev->dev_addr);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
+ (forw_packet->own ? "Sending own" : "Forwarding"),
+ batadv_ogm_packet->orig,
+ ntohl(batadv_ogm_packet->seqno),
+ batadv_ogm_packet->header.ttl,
+ forw_packet->if_incoming->net_dev->name,
+ forw_packet->if_incoming->net_dev->dev_addr);
/* skb is only used once and than forw_packet is free'd */
- send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
- broadcast_addr);
+ batadv_send_skb_packet(forw_packet->skb,
+ forw_packet->if_incoming,
+ batadv_broadcast_addr);
forw_packet->skb = NULL;
goto out;
@@ -254,70 +274,70 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
/* broadcast on every interface */
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface != soft_iface)
continue;
- bat_iv_ogm_send_to_if(forw_packet, hard_iface);
+ batadv_iv_ogm_send_to_if(forw_packet, hard_iface);
}
rcu_read_unlock();
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
/* return true if new_packet can be aggregated with forw_packet */
-static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
- *new_batman_ogm_packet,
- struct bat_priv *bat_priv,
- int packet_len, unsigned long send_time,
- bool directlink,
- const struct hard_iface *if_incoming,
- const struct forw_packet *forw_packet)
+static bool
+batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
+ struct batadv_priv *bat_priv,
+ int packet_len, unsigned long send_time,
+ bool directlink,
+ const struct batadv_hard_iface *if_incoming,
+ const struct batadv_forw_packet *forw_packet)
{
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_ogm_packet *batadv_ogm_packet;
int aggregated_bytes = forw_packet->packet_len + packet_len;
- struct hard_iface *primary_if = NULL;
+ struct batadv_hard_iface *primary_if = NULL;
bool res = false;
+ unsigned long aggregation_end_time;
- batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
+ aggregation_end_time = send_time;
+ aggregation_end_time += msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
- /**
- * we can aggregate the current packet to this aggregated packet
+ /* we can aggregate the current packet to this aggregated packet
* if:
*
* - the send time is within our MAX_AGGREGATION_MS time
* - the resulting packet wont be bigger than
* MAX_AGGREGATION_BYTES
*/
-
if (time_before(send_time, forw_packet->send_time) &&
- time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
- forw_packet->send_time) &&
- (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
+ time_after_eq(aggregation_end_time, forw_packet->send_time) &&
+ (aggregated_bytes <= BATADV_MAX_AGGREGATION_BYTES)) {
- /**
- * check aggregation compatibility
+ /* check aggregation compatibility
* -> direct link packets are broadcasted on
* their interface only
* -> aggregate packet if the current packet is
* a "global" packet as well as the base
* packet
*/
-
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* packets without direct link flag and high TTL
- * are flooded through the net */
+ * are flooded through the net
+ */
if ((!directlink) &&
- (!(batman_ogm_packet->flags & DIRECTLINK)) &&
- (batman_ogm_packet->header.ttl != 1) &&
+ (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) &&
+ (batadv_ogm_packet->header.ttl != 1) &&
/* own packets originating non-primary
- * interfaces leave only that interface */
+ * interfaces leave only that interface
+ */
((!forw_packet->own) ||
(forw_packet->if_incoming == primary_if))) {
res = true;
@@ -325,15 +345,17 @@ static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
}
/* if the incoming packet is sent via this one
- * interface only - we still can aggregate */
+ * interface only - we still can aggregate
+ */
if ((directlink) &&
- (new_batman_ogm_packet->header.ttl == 1) &&
+ (new_bat_ogm_packet->header.ttl == 1) &&
(forw_packet->if_incoming == if_incoming) &&
/* packets from direct neighbors or
* own secondary interface packets
- * (= secondary interface packets in general) */
- (batman_ogm_packet->flags & DIRECTLINK ||
+ * (= secondary interface packets in general)
+ */
+ (batadv_ogm_packet->flags & BATADV_DIRECTLINK ||
(forw_packet->own &&
forw_packet->if_incoming != primary_if))) {
res = true;
@@ -343,29 +365,30 @@ static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return res;
}
/* create a new aggregated packet and add this packet to it */
-static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
- int packet_len, unsigned long send_time,
- bool direct_link,
- struct hard_iface *if_incoming,
- int own_packet)
+static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
+ int packet_len, unsigned long send_time,
+ bool direct_link,
+ struct batadv_hard_iface *if_incoming,
+ int own_packet)
{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct forw_packet *forw_packet_aggr;
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_forw_packet *forw_packet_aggr;
unsigned char *skb_buff;
+ unsigned int skb_size;
if (!atomic_inc_not_zero(&if_incoming->refcount))
return;
/* own packet should always be scheduled */
if (!own_packet) {
- if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "batman packet queue full\n");
+ if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "batman packet queue full\n");
goto out;
}
}
@@ -378,12 +401,12 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
}
if ((atomic_read(&bat_priv->aggregated_ogms)) &&
- (packet_len < MAX_AGGREGATION_BYTES))
- forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
- ETH_HLEN);
+ (packet_len < BATADV_MAX_AGGREGATION_BYTES))
+ skb_size = BATADV_MAX_AGGREGATION_BYTES + ETH_HLEN;
else
- forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN);
+ skb_size = packet_len + ETH_HLEN;
+ forw_packet_aggr->skb = dev_alloc_skb(skb_size);
if (!forw_packet_aggr->skb) {
if (!own_packet)
atomic_inc(&bat_priv->batman_queue_left);
@@ -401,7 +424,7 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
forw_packet_aggr->own = own_packet;
forw_packet_aggr->if_incoming = if_incoming;
forw_packet_aggr->num_packets = 0;
- forw_packet_aggr->direct_link_flags = NO_FLAGS;
+ forw_packet_aggr->direct_link_flags = BATADV_NO_FLAGS;
forw_packet_aggr->send_time = send_time;
/* save packet direct link flag status */
@@ -415,20 +438,20 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
- send_outstanding_bat_ogm_packet);
- queue_delayed_work(bat_event_workqueue,
+ batadv_send_outstanding_bat_ogm_packet);
+ queue_delayed_work(batadv_event_workqueue,
&forw_packet_aggr->delayed_work,
send_time - jiffies);
return;
out:
- hardif_free_ref(if_incoming);
+ batadv_hardif_free_ref(if_incoming);
}
/* aggregate a new packet into the existing ogm packet */
-static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr,
- const unsigned char *packet_buff,
- int packet_len, bool direct_link)
+static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
+ const unsigned char *packet_buff,
+ int packet_len, bool direct_link)
{
unsigned char *skb_buff;
@@ -443,22 +466,25 @@ static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr,
(1 << forw_packet_aggr->num_packets);
}
-static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
- unsigned char *packet_buff,
- int packet_len, struct hard_iface *if_incoming,
- int own_packet, unsigned long send_time)
+static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
+ unsigned char *packet_buff,
+ int packet_len,
+ struct batadv_hard_iface *if_incoming,
+ int own_packet, unsigned long send_time)
{
- /**
- * _aggr -> pointer to the packet we want to aggregate with
+ /* _aggr -> pointer to the packet we want to aggregate with
* _pos -> pointer to the position in the queue
*/
- struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
+ struct batadv_forw_packet *forw_packet_aggr = NULL;
+ struct batadv_forw_packet *forw_packet_pos = NULL;
struct hlist_node *tmp_node;
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_ogm_packet *batadv_ogm_packet;
bool direct_link;
+ unsigned long max_aggregation_jiffies;
- batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
- direct_link = batman_ogm_packet->flags & DIRECTLINK ? 1 : 0;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
+ direct_link = batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0;
+ max_aggregation_jiffies = msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
/* find position for the packet in the forward queue */
spin_lock_bh(&bat_priv->forw_bat_list_lock);
@@ -466,11 +492,11 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
hlist_for_each_entry(forw_packet_pos, tmp_node,
&bat_priv->forw_bat_list, list) {
- if (bat_iv_ogm_can_aggregate(batman_ogm_packet,
- bat_priv, packet_len,
- send_time, direct_link,
- if_incoming,
- forw_packet_pos)) {
+ if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet,
+ bat_priv, packet_len,
+ send_time, direct_link,
+ if_incoming,
+ forw_packet_pos)) {
forw_packet_aggr = forw_packet_pos;
break;
}
@@ -478,42 +504,41 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
}
/* nothing to aggregate with - either aggregation disabled or no
- * suitable aggregation packet found */
+ * suitable aggregation packet found
+ */
if (!forw_packet_aggr) {
/* the following section can run without the lock */
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
- /**
- * if we could not aggregate this packet with one of the others
+ /* if we could not aggregate this packet with one of the others
* we hold it back for a while, so that it might be aggregated
* later on
*/
- if ((!own_packet) &&
- (atomic_read(&bat_priv->aggregated_ogms)))
- send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
+ if (!own_packet && atomic_read(&bat_priv->aggregated_ogms))
+ send_time += max_aggregation_jiffies;
- bat_iv_ogm_aggregate_new(packet_buff, packet_len,
- send_time, direct_link,
- if_incoming, own_packet);
+ batadv_iv_ogm_aggregate_new(packet_buff, packet_len,
+ send_time, direct_link,
+ if_incoming, own_packet);
} else {
- bat_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
- packet_len, direct_link);
+ batadv_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
+ packet_len, direct_link);
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
}
}
-static void bat_iv_ogm_forward(struct orig_node *orig_node,
- const struct ethhdr *ethhdr,
- struct batman_ogm_packet *batman_ogm_packet,
- bool is_single_hop_neigh,
- bool is_from_best_next_hop,
- struct hard_iface *if_incoming)
+static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
+ const struct ethhdr *ethhdr,
+ struct batadv_ogm_packet *batadv_ogm_packet,
+ bool is_single_hop_neigh,
+ bool is_from_best_next_hop,
+ struct batadv_hard_iface *if_incoming)
{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
uint8_t tt_num_changes;
- if (batman_ogm_packet->header.ttl <= 1) {
- bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
+ if (batadv_ogm_packet->header.ttl <= 1) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
return;
}
@@ -525,110 +550,113 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node,
* simply drop the ogm.
*/
if (is_single_hop_neigh)
- batman_ogm_packet->flags |= NOT_BEST_NEXT_HOP;
+ batadv_ogm_packet->flags |= BATADV_NOT_BEST_NEXT_HOP;
else
return;
}
- tt_num_changes = batman_ogm_packet->tt_num_changes;
+ tt_num_changes = batadv_ogm_packet->tt_num_changes;
- batman_ogm_packet->header.ttl--;
- memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
+ batadv_ogm_packet->header.ttl--;
+ memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
/* apply hop penalty */
- batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
+ batadv_ogm_packet->tq = batadv_hop_penalty(batadv_ogm_packet->tq,
+ bat_priv);
- bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: tq: %i, ttl: %i\n",
- batman_ogm_packet->tq, batman_ogm_packet->header.ttl);
-
- batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
- batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Forwarding packet: tq: %i, ttl: %i\n",
+ batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl);
/* switch of primaries first hop flag when forwarding */
- batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
+ batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP;
if (is_single_hop_neigh)
- batman_ogm_packet->flags |= DIRECTLINK;
+ batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
else
- batman_ogm_packet->flags &= ~DIRECTLINK;
+ batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
- bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
- BATMAN_OGM_HLEN + tt_len(tt_num_changes),
- if_incoming, 0, bat_iv_ogm_fwd_send_time());
+ batadv_iv_ogm_queue_add(bat_priv, (unsigned char *)batadv_ogm_packet,
+ BATADV_OGM_HLEN + batadv_tt_len(tt_num_changes),
+ if_incoming, 0, batadv_iv_ogm_fwd_send_time());
}
-static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
- int tt_num_changes)
+static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct batman_ogm_packet *batman_ogm_packet;
- struct hard_iface *primary_if;
- int vis_server;
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_ogm_packet *batadv_ogm_packet;
+ struct batadv_hard_iface *primary_if;
+ int vis_server, tt_num_changes = 0;
vis_server = atomic_read(&bat_priv->vis_mode);
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+
+ if (hard_iface == primary_if)
+ tt_num_changes = batadv_tt_append_diff(bat_priv,
+ &hard_iface->packet_buff,
+ &hard_iface->packet_len,
+ BATADV_OGM_HLEN);
- batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
/* change sequence number to network order */
- batman_ogm_packet->seqno =
+ batadv_ogm_packet->seqno =
htonl((uint32_t)atomic_read(&hard_iface->seqno));
+ atomic_inc(&hard_iface->seqno);
- batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
- batman_ogm_packet->tt_crc = htons((uint16_t)
- atomic_read(&bat_priv->tt_crc));
+ batadv_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
+ batadv_ogm_packet->tt_crc = htons(bat_priv->tt_crc);
if (tt_num_changes >= 0)
- batman_ogm_packet->tt_num_changes = tt_num_changes;
+ batadv_ogm_packet->tt_num_changes = tt_num_changes;
- if (vis_server == VIS_TYPE_SERVER_SYNC)
- batman_ogm_packet->flags |= VIS_SERVER;
+ if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC)
+ batadv_ogm_packet->flags |= BATADV_VIS_SERVER;
else
- batman_ogm_packet->flags &= ~VIS_SERVER;
+ batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
if ((hard_iface == primary_if) &&
- (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
- batman_ogm_packet->gw_flags =
+ (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER))
+ batadv_ogm_packet->gw_flags =
(uint8_t)atomic_read(&bat_priv->gw_bandwidth);
else
- batman_ogm_packet->gw_flags = NO_FLAGS;
-
- atomic_inc(&hard_iface->seqno);
+ batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
- slide_own_bcast_window(hard_iface);
- bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
- hard_iface->packet_len, hard_iface, 1,
- bat_iv_ogm_emit_send_time(bat_priv));
+ batadv_slide_own_bcast_window(hard_iface);
+ batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
+ hard_iface->packet_len, hard_iface, 1,
+ batadv_iv_ogm_emit_send_time(bat_priv));
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
-static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- const struct ethhdr *ethhdr,
- const struct batman_ogm_packet
- *batman_ogm_packet,
- struct hard_iface *if_incoming,
- const unsigned char *tt_buff,
- int is_duplicate)
+static void
+batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const struct ethhdr *ethhdr,
+ const struct batadv_ogm_packet *batadv_ogm_packet,
+ struct batadv_hard_iface *if_incoming,
+ const unsigned char *tt_buff,
+ int is_duplicate)
{
- struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
- struct neigh_node *router = NULL;
- struct orig_node *orig_node_tmp;
+ struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
+ struct batadv_neigh_node *router = NULL;
+ struct batadv_orig_node *orig_node_tmp;
struct hlist_node *node;
uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
+ uint8_t *neigh_addr;
- bat_dbg(DBG_BATMAN, bat_priv,
- "update_originator(): Searching and updating originator entry of received packet\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "update_originator(): Searching and updating originator entry of received packet\n");
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node, node,
&orig_node->neigh_list, list) {
- if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
- (tmp_neigh_node->if_incoming == if_incoming) &&
- atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
+ neigh_addr = tmp_neigh_node->addr;
+ if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
+ tmp_neigh_node->if_incoming == if_incoming &&
+ atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
neigh_node = tmp_neigh_node;
continue;
}
@@ -637,53 +665,55 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
continue;
spin_lock_bh(&tmp_neigh_node->lq_update_lock);
- ring_buffer_set(tmp_neigh_node->tq_recv,
- &tmp_neigh_node->tq_index, 0);
+ batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
+ &tmp_neigh_node->tq_index, 0);
tmp_neigh_node->tq_avg =
- ring_buffer_avg(tmp_neigh_node->tq_recv);
+ batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
}
if (!neigh_node) {
- struct orig_node *orig_tmp;
+ struct batadv_orig_node *orig_tmp;
- orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
+ orig_tmp = batadv_get_orig_node(bat_priv, ethhdr->h_source);
if (!orig_tmp)
goto unlock;
- neigh_node = bat_iv_ogm_neigh_new(if_incoming, ethhdr->h_source,
- orig_node, orig_tmp,
- batman_ogm_packet->seqno);
+ neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
+ ethhdr->h_source,
+ orig_node, orig_tmp,
+ batadv_ogm_packet->seqno);
- orig_node_free_ref(orig_tmp);
+ batadv_orig_node_free_ref(orig_tmp);
if (!neigh_node)
goto unlock;
} else
- bat_dbg(DBG_BATMAN, bat_priv,
- "Updating existing last-hop neighbor of originator\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Updating existing last-hop neighbor of originator\n");
rcu_read_unlock();
- orig_node->flags = batman_ogm_packet->flags;
+ orig_node->flags = batadv_ogm_packet->flags;
neigh_node->last_seen = jiffies;
spin_lock_bh(&neigh_node->lq_update_lock);
- ring_buffer_set(neigh_node->tq_recv,
- &neigh_node->tq_index,
- batman_ogm_packet->tq);
- neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
+ batadv_ring_buffer_set(neigh_node->tq_recv,
+ &neigh_node->tq_index,
+ batadv_ogm_packet->tq);
+ neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv);
spin_unlock_bh(&neigh_node->lq_update_lock);
if (!is_duplicate) {
- orig_node->last_ttl = batman_ogm_packet->header.ttl;
- neigh_node->last_ttl = batman_ogm_packet->header.ttl;
+ orig_node->last_ttl = batadv_ogm_packet->header.ttl;
+ neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
}
- bonding_candidate_add(orig_node, neigh_node);
+ batadv_bonding_candidate_add(orig_node, neigh_node);
/* if this neighbor already is our next hop there is nothing
- * to change */
- router = orig_node_get_router(orig_node);
+ * to change
+ */
+ router = batadv_orig_node_get_router(orig_node);
if (router == neigh_node)
goto update_tt;
@@ -692,7 +722,8 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
goto update_tt;
/* if the TQ is the same and the link not more symmetric we
- * won't consider it either */
+ * won't consider it either
+ */
if (router && (neigh_node->tq_avg == router->tq_avg)) {
orig_node_tmp = router->orig_node;
spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
@@ -710,30 +741,31 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
goto update_tt;
}
- update_route(bat_priv, orig_node, neigh_node);
+ batadv_update_route(bat_priv, orig_node, neigh_node);
update_tt:
/* I have to check for transtable changes only if the OGM has been
- * sent through a primary interface */
- if (((batman_ogm_packet->orig != ethhdr->h_source) &&
- (batman_ogm_packet->header.ttl > 2)) ||
- (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
- tt_update_orig(bat_priv, orig_node, tt_buff,
- batman_ogm_packet->tt_num_changes,
- batman_ogm_packet->ttvn,
- batman_ogm_packet->tt_crc);
+ * sent through a primary interface
+ */
+ if (((batadv_ogm_packet->orig != ethhdr->h_source) &&
+ (batadv_ogm_packet->header.ttl > 2)) ||
+ (batadv_ogm_packet->flags & BATADV_PRIMARIES_FIRST_HOP))
+ batadv_tt_update_orig(bat_priv, orig_node, tt_buff,
+ batadv_ogm_packet->tt_num_changes,
+ batadv_ogm_packet->ttvn,
+ ntohs(batadv_ogm_packet->tt_crc));
- if (orig_node->gw_flags != batman_ogm_packet->gw_flags)
- gw_node_update(bat_priv, orig_node,
- batman_ogm_packet->gw_flags);
+ if (orig_node->gw_flags != batadv_ogm_packet->gw_flags)
+ batadv_gw_node_update(bat_priv, orig_node,
+ batadv_ogm_packet->gw_flags);
- orig_node->gw_flags = batman_ogm_packet->gw_flags;
+ orig_node->gw_flags = batadv_ogm_packet->gw_flags;
/* restart gateway selection if fast or late switching was enabled */
if ((orig_node->gw_flags) &&
- (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
+ (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_CLIENT) &&
(atomic_read(&bat_priv->gw_sel_class) > 2))
- gw_check_election(bat_priv, orig_node);
+ batadv_gw_check_election(bat_priv, orig_node);
goto out;
@@ -741,29 +773,32 @@ unlock:
rcu_read_unlock();
out:
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
}
-static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- struct batman_ogm_packet *batman_ogm_packet,
- struct hard_iface *if_incoming)
+static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh_node,
+ struct batadv_ogm_packet *batadv_ogm_packet,
+ struct batadv_hard_iface *if_incoming)
{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
struct hlist_node *node;
uint8_t total_count;
- uint8_t orig_eq_count, neigh_rq_count, tq_own;
- int tq_asym_penalty, ret = 0;
+ uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
+ unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
+ int tq_asym_penalty, inv_asym_penalty, ret = 0;
+ unsigned int combined_tq;
/* find corresponding one hop neighbor */
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node, node,
&orig_neigh_node->neigh_list, list) {
- if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
+ if (!batadv_compare_eth(tmp_neigh_node->addr,
+ orig_neigh_node->orig))
continue;
if (tmp_neigh_node->if_incoming != if_incoming)
@@ -778,11 +813,11 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
rcu_read_unlock();
if (!neigh_node)
- neigh_node = bat_iv_ogm_neigh_new(if_incoming,
- orig_neigh_node->orig,
- orig_neigh_node,
- orig_neigh_node,
- batman_ogm_packet->seqno);
+ neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
+ orig_neigh_node->orig,
+ orig_neigh_node,
+ orig_neigh_node,
+ batadv_ogm_packet->seqno);
if (!neigh_node)
goto out;
@@ -803,47 +838,52 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
total_count = (orig_eq_count > neigh_rq_count ?
neigh_rq_count : orig_eq_count);
- /* if we have too few packets (too less data) we set tq_own to zero */
- /* if we receive too few packets it is not considered bidirectional */
- if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
- (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
+ /* if we have too few packets (too less data) we set tq_own to zero
+ * if we receive too few packets it is not considered bidirectional
+ */
+ if (total_count < BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM ||
+ neigh_rq_count < BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM)
tq_own = 0;
else
/* neigh_node->real_packet_count is never zero as we
* only purge old information when getting new
- * information */
- tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
+ * information
+ */
+ tq_own = (BATADV_TQ_MAX_VALUE * total_count) / neigh_rq_count;
/* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
* affect the nearly-symmetric links only a little, but
* punishes asymmetric links more. This will give a value
* between 0 and TQ_MAX_VALUE
*/
- tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
- (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
- (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
- (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
- (TQ_LOCAL_WINDOW_SIZE *
- TQ_LOCAL_WINDOW_SIZE *
- TQ_LOCAL_WINDOW_SIZE);
-
- batman_ogm_packet->tq = ((batman_ogm_packet->tq * tq_own
- * tq_asym_penalty) /
- (TQ_MAX_VALUE * TQ_MAX_VALUE));
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n",
- orig_node->orig, orig_neigh_node->orig, total_count,
- neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq);
+ neigh_rq_inv = BATADV_TQ_LOCAL_WINDOW_SIZE - neigh_rq_count;
+ neigh_rq_inv_cube = neigh_rq_inv * neigh_rq_inv * neigh_rq_inv;
+ neigh_rq_max_cube = BATADV_TQ_LOCAL_WINDOW_SIZE *
+ BATADV_TQ_LOCAL_WINDOW_SIZE *
+ BATADV_TQ_LOCAL_WINDOW_SIZE;
+ inv_asym_penalty = BATADV_TQ_MAX_VALUE * neigh_rq_inv_cube;
+ inv_asym_penalty /= neigh_rq_max_cube;
+ tq_asym_penalty = BATADV_TQ_MAX_VALUE - inv_asym_penalty;
+
+ combined_tq = batadv_ogm_packet->tq * tq_own * tq_asym_penalty;
+ combined_tq /= BATADV_TQ_MAX_VALUE * BATADV_TQ_MAX_VALUE;
+ batadv_ogm_packet->tq = combined_tq;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n",
+ orig_node->orig, orig_neigh_node->orig, total_count,
+ neigh_rq_count, tq_own,
+ tq_asym_penalty, batadv_ogm_packet->tq);
/* if link has the minimum required transmission quality
- * consider it bidirectional */
- if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
+ * consider it bidirectional
+ */
+ if (batadv_ogm_packet->tq >= BATADV_TQ_TOTAL_BIDRECT_LIMIT)
ret = 1;
out:
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
return ret;
}
@@ -855,90 +895,94 @@ out:
* -1 the packet is old and has been received while the seqno window
* was protected. Caller should drop it.
*/
-static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
- const struct batman_ogm_packet
- *batman_ogm_packet,
- const struct hard_iface *if_incoming)
+static int
+batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
+ const struct batadv_ogm_packet *batadv_ogm_packet,
+ const struct batadv_hard_iface *if_incoming)
{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct orig_node *orig_node;
- struct neigh_node *tmp_neigh_node;
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_orig_node *orig_node;
+ struct batadv_neigh_node *tmp_neigh_node;
struct hlist_node *node;
int is_duplicate = 0;
int32_t seq_diff;
int need_update = 0;
int set_mark, ret = -1;
+ uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
+ uint8_t *neigh_addr;
- orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
+ orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
if (!orig_node)
return 0;
spin_lock_bh(&orig_node->ogm_cnt_lock);
- seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
+ seq_diff = seqno - orig_node->last_real_seqno;
/* signalize caller that the packet is to be dropped. */
if (!hlist_empty(&orig_node->neigh_list) &&
- window_protected(bat_priv, seq_diff,
- &orig_node->batman_seqno_reset))
+ batadv_window_protected(bat_priv, seq_diff,
+ &orig_node->batman_seqno_reset))
goto out;
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node, node,
&orig_node->neigh_list, list) {
- is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits,
- orig_node->last_real_seqno,
- batman_ogm_packet->seqno);
+ is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
+ orig_node->last_real_seqno,
+ seqno);
- if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
- (tmp_neigh_node->if_incoming == if_incoming))
+ neigh_addr = tmp_neigh_node->addr;
+ if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
+ tmp_neigh_node->if_incoming == if_incoming)
set_mark = 1;
else
set_mark = 0;
/* if the window moved, set the update flag. */
- need_update |= bit_get_packet(bat_priv,
- tmp_neigh_node->real_bits,
- seq_diff, set_mark);
+ need_update |= batadv_bit_get_packet(bat_priv,
+ tmp_neigh_node->real_bits,
+ seq_diff, set_mark);
tmp_neigh_node->real_packet_count =
bitmap_weight(tmp_neigh_node->real_bits,
- TQ_LOCAL_WINDOW_SIZE);
+ BATADV_TQ_LOCAL_WINDOW_SIZE);
}
rcu_read_unlock();
if (need_update) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "updating last_seqno: old %u, new %u\n",
- orig_node->last_real_seqno, batman_ogm_packet->seqno);
- orig_node->last_real_seqno = batman_ogm_packet->seqno;
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "updating last_seqno: old %u, new %u\n",
+ orig_node->last_real_seqno, seqno);
+ orig_node->last_real_seqno = seqno;
}
ret = is_duplicate;
out:
spin_unlock_bh(&orig_node->ogm_cnt_lock);
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return ret;
}
-static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
- struct batman_ogm_packet *batman_ogm_packet,
- const unsigned char *tt_buff,
- struct hard_iface *if_incoming)
+static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
+ struct batadv_ogm_packet *batadv_ogm_packet,
+ const unsigned char *tt_buff,
+ struct batadv_hard_iface *if_incoming)
{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct hard_iface *hard_iface;
- struct orig_node *orig_neigh_node, *orig_node;
- struct neigh_node *router = NULL, *router_router = NULL;
- struct neigh_node *orig_neigh_router = NULL;
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_orig_node *orig_neigh_node, *orig_node;
+ struct batadv_neigh_node *router = NULL, *router_router = NULL;
+ struct batadv_neigh_node *orig_neigh_router = NULL;
int has_directlink_flag;
int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
- int is_broadcast = 0, is_bidirectional;
+ int is_broadcast = 0, is_bidirect;
bool is_single_hop_neigh = false;
bool is_from_best_next_hop = false;
- int is_duplicate;
+ int is_duplicate, sameseq, simlar_ttl;
uint32_t if_incoming_seqno;
+ uint8_t *prev_sender;
/* Silently drop when the batman packet is actually not a
* correct packet.
@@ -948,49 +992,53 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
* it as an additional length.
*
* TODO: A more sane solution would be to have a bit in the
- * batman_ogm_packet to detect whether the packet is the last
+ * batadv_ogm_packet to detect whether the packet is the last
* packet in an aggregation. Here we expect that the padding
* is always zero (or not 0x01)
*/
- if (batman_ogm_packet->header.packet_type != BAT_IV_OGM)
+ if (batadv_ogm_packet->header.packet_type != BATADV_IV_OGM)
return;
/* could be changed by schedule_own_packet() */
if_incoming_seqno = atomic_read(&if_incoming->seqno);
- has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
+ if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
+ has_directlink_flag = 1;
+ else
+ has_directlink_flag = 0;
- if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig))
+ if (batadv_compare_eth(ethhdr->h_source, batadv_ogm_packet->orig))
is_single_hop_neigh = true;
- bat_dbg(DBG_BATMAN, bat_priv,
- "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
- ethhdr->h_source, if_incoming->net_dev->name,
- if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
- batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
- batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
- batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
- batman_ogm_packet->header.ttl,
- batman_ogm_packet->header.version, has_directlink_flag);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
+ ethhdr->h_source, if_incoming->net_dev->name,
+ if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
+ batadv_ogm_packet->prev_sender,
+ ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->ttvn,
+ ntohs(batadv_ogm_packet->tt_crc),
+ batadv_ogm_packet->tt_num_changes, batadv_ogm_packet->tq,
+ batadv_ogm_packet->header.ttl,
+ batadv_ogm_packet->header.version, has_directlink_flag);
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
- if (hard_iface->if_status != IF_ACTIVE)
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
if (hard_iface->soft_iface != if_incoming->soft_iface)
continue;
- if (compare_eth(ethhdr->h_source,
- hard_iface->net_dev->dev_addr))
+ if (batadv_compare_eth(ethhdr->h_source,
+ hard_iface->net_dev->dev_addr))
is_my_addr = 1;
- if (compare_eth(batman_ogm_packet->orig,
- hard_iface->net_dev->dev_addr))
+ if (batadv_compare_eth(batadv_ogm_packet->orig,
+ hard_iface->net_dev->dev_addr))
is_my_orig = 1;
- if (compare_eth(batman_ogm_packet->prev_sender,
- hard_iface->net_dev->dev_addr))
+ if (batadv_compare_eth(batadv_ogm_packet->prev_sender,
+ hard_iface->net_dev->dev_addr))
is_my_oldorig = 1;
if (is_broadcast_ether_addr(ethhdr->h_source))
@@ -998,268 +1046,278 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
}
rcu_read_unlock();
- if (batman_ogm_packet->header.version != COMPAT_VERSION) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: incompatible batman version (%i)\n",
- batman_ogm_packet->header.version);
+ if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: incompatible batman version (%i)\n",
+ batadv_ogm_packet->header.version);
return;
}
if (is_my_addr) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: received my own broadcast (sender: %pM)\n",
- ethhdr->h_source);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: received my own broadcast (sender: %pM)\n",
+ ethhdr->h_source);
return;
}
if (is_broadcast) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n",
- ethhdr->h_source);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n",
+ ethhdr->h_source);
return;
}
if (is_my_orig) {
unsigned long *word;
int offset;
+ int32_t bit_pos;
+ int16_t if_num;
+ uint8_t *weight;
- orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
+ orig_neigh_node = batadv_get_orig_node(bat_priv,
+ ethhdr->h_source);
if (!orig_neigh_node)
return;
/* neighbor has to indicate direct link and it has to
- * come via the corresponding interface */
- /* save packet seqno for bidirectional check */
+ * come via the corresponding interface
+ * save packet seqno for bidirectional check
+ */
if (has_directlink_flag &&
- compare_eth(if_incoming->net_dev->dev_addr,
- batman_ogm_packet->orig)) {
- offset = if_incoming->if_num * NUM_WORDS;
+ batadv_compare_eth(if_incoming->net_dev->dev_addr,
+ batadv_ogm_packet->orig)) {
+ if_num = if_incoming->if_num;
+ offset = if_num * BATADV_NUM_WORDS;
spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
word = &(orig_neigh_node->bcast_own[offset]);
- bat_set_bit(word,
- if_incoming_seqno -
- batman_ogm_packet->seqno - 2);
- orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
- bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
+ bit_pos = if_incoming_seqno - 2;
+ bit_pos -= ntohl(batadv_ogm_packet->seqno);
+ batadv_set_bit(word, bit_pos);
+ weight = &orig_neigh_node->bcast_own_sum[if_num];
+ *weight = bitmap_weight(word,
+ BATADV_TQ_LOCAL_WINDOW_SIZE);
spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
}
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: originator packet from myself (via neighbor)\n");
- orig_node_free_ref(orig_neigh_node);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet from myself (via neighbor)\n");
+ batadv_orig_node_free_ref(orig_neigh_node);
return;
}
if (is_my_oldorig) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
- ethhdr->h_source);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
+ ethhdr->h_source);
return;
}
- if (batman_ogm_packet->flags & NOT_BEST_NEXT_HOP) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n",
- ethhdr->h_source);
+ if (batadv_ogm_packet->flags & BATADV_NOT_BEST_NEXT_HOP) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n",
+ ethhdr->h_source);
return;
}
- orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
+ orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
if (!orig_node)
return;
- is_duplicate = bat_iv_ogm_update_seqnos(ethhdr, batman_ogm_packet,
- if_incoming);
+ is_duplicate = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet,
+ if_incoming);
if (is_duplicate == -1) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: packet within seqno protection time (sender: %pM)\n",
- ethhdr->h_source);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: packet within seqno protection time (sender: %pM)\n",
+ ethhdr->h_source);
goto out;
}
- if (batman_ogm_packet->tq == 0) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: originator packet with tq equal 0\n");
+ if (batadv_ogm_packet->tq == 0) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet with tq equal 0\n");
goto out;
}
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (router)
- router_router = orig_node_get_router(router->orig_node);
+ router_router = batadv_orig_node_get_router(router->orig_node);
if ((router && router->tq_avg != 0) &&
- (compare_eth(router->addr, ethhdr->h_source)))
+ (batadv_compare_eth(router->addr, ethhdr->h_source)))
is_from_best_next_hop = true;
+ prev_sender = batadv_ogm_packet->prev_sender;
/* avoid temporary routing loops */
if (router && router_router &&
- (compare_eth(router->addr, batman_ogm_packet->prev_sender)) &&
- !(compare_eth(batman_ogm_packet->orig,
- batman_ogm_packet->prev_sender)) &&
- (compare_eth(router->addr, router_router->addr))) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
- ethhdr->h_source);
+ (batadv_compare_eth(router->addr, prev_sender)) &&
+ !(batadv_compare_eth(batadv_ogm_packet->orig, prev_sender)) &&
+ (batadv_compare_eth(router->addr, router_router->addr))) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
+ ethhdr->h_source);
goto out;
}
/* if sender is a direct neighbor the sender mac equals
- * originator mac */
+ * originator mac
+ */
orig_neigh_node = (is_single_hop_neigh ?
orig_node :
- get_orig_node(bat_priv, ethhdr->h_source));
+ batadv_get_orig_node(bat_priv, ethhdr->h_source));
if (!orig_neigh_node)
goto out;
- orig_neigh_router = orig_node_get_router(orig_neigh_node);
+ orig_neigh_router = batadv_orig_node_get_router(orig_neigh_node);
/* drop packet if sender is not a direct neighbor and if we
- * don't route towards it */
+ * don't route towards it
+ */
if (!is_single_hop_neigh && (!orig_neigh_router)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: OGM via unknown neighbor!\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: OGM via unknown neighbor!\n");
goto out_neigh;
}
- is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node,
- batman_ogm_packet, if_incoming);
+ is_bidirect = batadv_iv_ogm_calc_tq(orig_node, orig_neigh_node,
+ batadv_ogm_packet, if_incoming);
- bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet);
+ batadv_bonding_save_primary(orig_node, orig_neigh_node,
+ batadv_ogm_packet);
/* update ranking if it is not a duplicate or has the same
- * seqno and similar ttl as the non-duplicate */
- if (is_bidirectional &&
- (!is_duplicate ||
- ((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
- (orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl))))
- bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
- batman_ogm_packet, if_incoming,
- tt_buff, is_duplicate);
+ * seqno and similar ttl as the non-duplicate
+ */
+ sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
+ simlar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
+ if (is_bidirect && (!is_duplicate || (sameseq && simlar_ttl)))
+ batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
+ batadv_ogm_packet, if_incoming,
+ tt_buff, is_duplicate);
/* is single hop (direct) neighbor */
if (is_single_hop_neigh) {
/* mark direct link on incoming interface */
- bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
- is_single_hop_neigh, is_from_best_next_hop,
- if_incoming);
+ batadv_iv_ogm_forward(orig_node, ethhdr, batadv_ogm_packet,
+ is_single_hop_neigh,
+ is_from_best_next_hop, if_incoming);
- bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
goto out_neigh;
}
/* multihop originator */
- if (!is_bidirectional) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: not received via bidirectional link\n");
+ if (!is_bidirect) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: not received via bidirectional link\n");
goto out_neigh;
}
if (is_duplicate) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: duplicate packet received\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: duplicate packet received\n");
goto out_neigh;
}
- bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: rebroadcast originator packet\n");
- bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
- is_single_hop_neigh, is_from_best_next_hop,
- if_incoming);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Forwarding packet: rebroadcast originator packet\n");
+ batadv_iv_ogm_forward(orig_node, ethhdr, batadv_ogm_packet,
+ is_single_hop_neigh, is_from_best_next_hop,
+ if_incoming);
out_neigh:
if ((orig_neigh_node) && (!is_single_hop_neigh))
- orig_node_free_ref(orig_neigh_node);
+ batadv_orig_node_free_ref(orig_neigh_node);
out:
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
if (router_router)
- neigh_node_free_ref(router_router);
+ batadv_neigh_node_free_ref(router_router);
if (orig_neigh_router)
- neigh_node_free_ref(orig_neigh_router);
+ batadv_neigh_node_free_ref(orig_neigh_router);
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
}
-static int bat_iv_ogm_receive(struct sk_buff *skb,
- struct hard_iface *if_incoming)
+static int batadv_iv_ogm_receive(struct sk_buff *skb,
+ struct batadv_hard_iface *if_incoming)
{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_ogm_packet *batadv_ogm_packet;
struct ethhdr *ethhdr;
int buff_pos = 0, packet_len;
unsigned char *tt_buff, *packet_buff;
bool ret;
- ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN);
+ ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
if (!ret)
return NET_RX_DROP;
/* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
* that does not have B.A.T.M.A.N. IV enabled ?
*/
- if (bat_priv->bat_algo_ops->bat_ogm_emit != bat_iv_ogm_emit)
+ if (bat_priv->bat_algo_ops->bat_ogm_emit != batadv_iv_ogm_emit)
return NET_RX_DROP;
+ batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
+ skb->len + ETH_HLEN);
+
packet_len = skb_headlen(skb);
ethhdr = (struct ethhdr *)skb_mac_header(skb);
packet_buff = skb->data;
- batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
/* unpack the aggregated packets and process them one by one */
do {
- /* network to host order for our 32bit seqno and the
- orig_interval */
- batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
- batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
-
- tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN;
+ tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
- bat_iv_ogm_process(ethhdr, batman_ogm_packet,
- tt_buff, if_incoming);
+ batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff,
+ if_incoming);
- buff_pos += BATMAN_OGM_HLEN +
- tt_len(batman_ogm_packet->tt_num_changes);
+ buff_pos += BATADV_OGM_HLEN;
+ buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
- batman_ogm_packet = (struct batman_ogm_packet *)
+ batadv_ogm_packet = (struct batadv_ogm_packet *)
(packet_buff + buff_pos);
- } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len,
- batman_ogm_packet->tt_num_changes));
+ } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
+ batadv_ogm_packet->tt_num_changes));
kfree_skb(skb);
return NET_RX_SUCCESS;
}
-static struct bat_algo_ops batman_iv __read_mostly = {
- .name = "BATMAN IV",
- .bat_iface_enable = bat_iv_ogm_iface_enable,
- .bat_iface_disable = bat_iv_ogm_iface_disable,
- .bat_iface_update_mac = bat_iv_ogm_iface_update_mac,
- .bat_primary_iface_set = bat_iv_ogm_primary_iface_set,
- .bat_ogm_schedule = bat_iv_ogm_schedule,
- .bat_ogm_emit = bat_iv_ogm_emit,
+static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
+ .name = "BATMAN_IV",
+ .bat_iface_enable = batadv_iv_ogm_iface_enable,
+ .bat_iface_disable = batadv_iv_ogm_iface_disable,
+ .bat_iface_update_mac = batadv_iv_ogm_iface_update_mac,
+ .bat_primary_iface_set = batadv_iv_ogm_primary_iface_set,
+ .bat_ogm_schedule = batadv_iv_ogm_schedule,
+ .bat_ogm_emit = batadv_iv_ogm_emit,
};
-int __init bat_iv_init(void)
+int __init batadv_iv_init(void)
{
int ret;
/* batman originator packet */
- ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive);
+ ret = batadv_recv_handler_register(BATADV_IV_OGM,
+ batadv_iv_ogm_receive);
if (ret < 0)
goto out;
- ret = bat_algo_register(&batman_iv);
+ ret = batadv_algo_register(&batadv_batman_iv);
if (ret < 0)
goto handler_unregister;
goto out;
handler_unregister:
- recv_handler_unregister(BAT_IV_OGM);
+ batadv_recv_handler_unregister(BATADV_IV_OGM);
out:
return ret;
}
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
deleted file mode 100644
index 5bc7b66d32d..00000000000
--- a/net/batman-adv/bat_sysfs.c
+++ /dev/null
@@ -1,735 +0,0 @@
-/*
- * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "bat_sysfs.h"
-#include "translation-table.h"
-#include "originator.h"
-#include "hard-interface.h"
-#include "gateway_common.h"
-#include "gateway_client.h"
-#include "vis.h"
-
-static struct net_device *kobj_to_netdev(struct kobject *obj)
-{
- struct device *dev = container_of(obj->parent, struct device, kobj);
- return to_net_dev(dev);
-}
-
-static struct bat_priv *kobj_to_batpriv(struct kobject *obj)
-{
- struct net_device *net_dev = kobj_to_netdev(obj);
- return netdev_priv(net_dev);
-}
-
-#define UEV_TYPE_VAR "BATTYPE="
-#define UEV_ACTION_VAR "BATACTION="
-#define UEV_DATA_VAR "BATDATA="
-
-static char *uev_action_str[] = {
- "add",
- "del",
- "change"
-};
-
-static char *uev_type_str[] = {
- "gw"
-};
-
-/* Use this, if you have customized show and store functions */
-#define BAT_ATTR(_name, _mode, _show, _store) \
-struct bat_attribute bat_attr_##_name = { \
- .attr = {.name = __stringify(_name), \
- .mode = _mode }, \
- .show = _show, \
- .store = _store, \
-};
-
-#define BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \
-ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
- char *buff, size_t count) \
-{ \
- struct net_device *net_dev = kobj_to_netdev(kobj); \
- struct bat_priv *bat_priv = netdev_priv(net_dev); \
- return __store_bool_attr(buff, count, _post_func, attr, \
- &bat_priv->_name, net_dev); \
-}
-
-#define BAT_ATTR_SIF_SHOW_BOOL(_name) \
-ssize_t show_##_name(struct kobject *kobj, \
- struct attribute *attr, char *buff) \
-{ \
- struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \
- return sprintf(buff, "%s\n", \
- atomic_read(&bat_priv->_name) == 0 ? \
- "disabled" : "enabled"); \
-} \
-
-/* Use this, if you are going to turn a [name] in the soft-interface
- * (bat_priv) on or off */
-#define BAT_ATTR_SIF_BOOL(_name, _mode, _post_func) \
- static BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \
- static BAT_ATTR_SIF_SHOW_BOOL(_name) \
- static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
-
-
-#define BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
-ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
- char *buff, size_t count) \
-{ \
- struct net_device *net_dev = kobj_to_netdev(kobj); \
- struct bat_priv *bat_priv = netdev_priv(net_dev); \
- return __store_uint_attr(buff, count, _min, _max, _post_func, \
- attr, &bat_priv->_name, net_dev); \
-}
-
-#define BAT_ATTR_SIF_SHOW_UINT(_name) \
-ssize_t show_##_name(struct kobject *kobj, \
- struct attribute *attr, char *buff) \
-{ \
- struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \
- return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
-} \
-
-/* Use this, if you are going to set [name] in the soft-interface
- * (bat_priv) to an unsigned integer value */
-#define BAT_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \
- static BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
- static BAT_ATTR_SIF_SHOW_UINT(_name) \
- static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
-
-
-#define BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
-ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
- char *buff, size_t count) \
-{ \
- struct net_device *net_dev = kobj_to_netdev(kobj); \
- struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \
- ssize_t length; \
- \
- if (!hard_iface) \
- return 0; \
- \
- length = __store_uint_attr(buff, count, _min, _max, _post_func, \
- attr, &hard_iface->_name, net_dev); \
- \
- hardif_free_ref(hard_iface); \
- return length; \
-}
-
-#define BAT_ATTR_HIF_SHOW_UINT(_name) \
-ssize_t show_##_name(struct kobject *kobj, \
- struct attribute *attr, char *buff) \
-{ \
- struct net_device *net_dev = kobj_to_netdev(kobj); \
- struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \
- ssize_t length; \
- \
- if (!hard_iface) \
- return 0; \
- \
- length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\
- \
- hardif_free_ref(hard_iface); \
- return length; \
-}
-
-/* Use this, if you are going to set [name] in hard_iface to an
- * unsigned integer value*/
-#define BAT_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \
- static BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
- static BAT_ATTR_HIF_SHOW_UINT(_name) \
- static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
-
-
-static int store_bool_attr(char *buff, size_t count,
- struct net_device *net_dev,
- const char *attr_name, atomic_t *attr)
-{
- int enabled = -1;
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- if ((strncmp(buff, "1", 2) == 0) ||
- (strncmp(buff, "enable", 7) == 0) ||
- (strncmp(buff, "enabled", 8) == 0))
- enabled = 1;
-
- if ((strncmp(buff, "0", 2) == 0) ||
- (strncmp(buff, "disable", 8) == 0) ||
- (strncmp(buff, "disabled", 9) == 0))
- enabled = 0;
-
- if (enabled < 0) {
- bat_info(net_dev,
- "%s: Invalid parameter received: %s\n",
- attr_name, buff);
- return -EINVAL;
- }
-
- if (atomic_read(attr) == enabled)
- return count;
-
- bat_info(net_dev, "%s: Changing from: %s to: %s\n", attr_name,
- atomic_read(attr) == 1 ? "enabled" : "disabled",
- enabled == 1 ? "enabled" : "disabled");
-
- atomic_set(attr, (unsigned int)enabled);
- return count;
-}
-
-static inline ssize_t __store_bool_attr(char *buff, size_t count,
- void (*post_func)(struct net_device *),
- struct attribute *attr,
- atomic_t *attr_store, struct net_device *net_dev)
-{
- int ret;
-
- ret = store_bool_attr(buff, count, net_dev, attr->name, attr_store);
- if (post_func && ret)
- post_func(net_dev);
-
- return ret;
-}
-
-static int store_uint_attr(const char *buff, size_t count,
- struct net_device *net_dev, const char *attr_name,
- unsigned int min, unsigned int max, atomic_t *attr)
-{
- unsigned long uint_val;
- int ret;
-
- ret = kstrtoul(buff, 10, &uint_val);
- if (ret) {
- bat_info(net_dev,
- "%s: Invalid parameter received: %s\n",
- attr_name, buff);
- return -EINVAL;
- }
-
- if (uint_val < min) {
- bat_info(net_dev, "%s: Value is too small: %lu min: %u\n",
- attr_name, uint_val, min);
- return -EINVAL;
- }
-
- if (uint_val > max) {
- bat_info(net_dev, "%s: Value is too big: %lu max: %u\n",
- attr_name, uint_val, max);
- return -EINVAL;
- }
-
- if (atomic_read(attr) == uint_val)
- return count;
-
- bat_info(net_dev, "%s: Changing from: %i to: %lu\n",
- attr_name, atomic_read(attr), uint_val);
-
- atomic_set(attr, uint_val);
- return count;
-}
-
-static inline ssize_t __store_uint_attr(const char *buff, size_t count,
- int min, int max,
- void (*post_func)(struct net_device *),
- const struct attribute *attr,
- atomic_t *attr_store, struct net_device *net_dev)
-{
- int ret;
-
- ret = store_uint_attr(buff, count, net_dev, attr->name,
- min, max, attr_store);
- if (post_func && ret)
- post_func(net_dev);
-
- return ret;
-}
-
-static ssize_t show_vis_mode(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
- int vis_mode = atomic_read(&bat_priv->vis_mode);
-
- return sprintf(buff, "%s\n",
- vis_mode == VIS_TYPE_CLIENT_UPDATE ?
- "client" : "server");
-}
-
-static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct net_device *net_dev = kobj_to_netdev(kobj);
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- unsigned long val;
- int ret, vis_mode_tmp = -1;
-
- ret = kstrtoul(buff, 10, &val);
-
- if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
- (strncmp(buff, "client", 6) == 0) ||
- (strncmp(buff, "off", 3) == 0))
- vis_mode_tmp = VIS_TYPE_CLIENT_UPDATE;
-
- if (((count == 2) && (!ret) && (val == VIS_TYPE_SERVER_SYNC)) ||
- (strncmp(buff, "server", 6) == 0))
- vis_mode_tmp = VIS_TYPE_SERVER_SYNC;
-
- if (vis_mode_tmp < 0) {
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- bat_info(net_dev,
- "Invalid parameter for 'vis mode' setting received: %s\n",
- buff);
- return -EINVAL;
- }
-
- if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
- return count;
-
- bat_info(net_dev, "Changing vis mode from: %s to: %s\n",
- atomic_read(&bat_priv->vis_mode) == VIS_TYPE_CLIENT_UPDATE ?
- "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
- "client" : "server");
-
- atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
- return count;
-}
-
-static ssize_t show_bat_algo(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
- return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
-}
-
-static void post_gw_deselect(struct net_device *net_dev)
-{
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- gw_deselect(bat_priv);
-}
-
-static ssize_t show_gw_mode(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
- int bytes_written;
-
- switch (atomic_read(&bat_priv->gw_mode)) {
- case GW_MODE_CLIENT:
- bytes_written = sprintf(buff, "%s\n", GW_MODE_CLIENT_NAME);
- break;
- case GW_MODE_SERVER:
- bytes_written = sprintf(buff, "%s\n", GW_MODE_SERVER_NAME);
- break;
- default:
- bytes_written = sprintf(buff, "%s\n", GW_MODE_OFF_NAME);
- break;
- }
-
- return bytes_written;
-}
-
-static ssize_t store_gw_mode(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct net_device *net_dev = kobj_to_netdev(kobj);
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- char *curr_gw_mode_str;
- int gw_mode_tmp = -1;
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- if (strncmp(buff, GW_MODE_OFF_NAME, strlen(GW_MODE_OFF_NAME)) == 0)
- gw_mode_tmp = GW_MODE_OFF;
-
- if (strncmp(buff, GW_MODE_CLIENT_NAME,
- strlen(GW_MODE_CLIENT_NAME)) == 0)
- gw_mode_tmp = GW_MODE_CLIENT;
-
- if (strncmp(buff, GW_MODE_SERVER_NAME,
- strlen(GW_MODE_SERVER_NAME)) == 0)
- gw_mode_tmp = GW_MODE_SERVER;
-
- if (gw_mode_tmp < 0) {
- bat_info(net_dev,
- "Invalid parameter for 'gw mode' setting received: %s\n",
- buff);
- return -EINVAL;
- }
-
- if (atomic_read(&bat_priv->gw_mode) == gw_mode_tmp)
- return count;
-
- switch (atomic_read(&bat_priv->gw_mode)) {
- case GW_MODE_CLIENT:
- curr_gw_mode_str = GW_MODE_CLIENT_NAME;
- break;
- case GW_MODE_SERVER:
- curr_gw_mode_str = GW_MODE_SERVER_NAME;
- break;
- default:
- curr_gw_mode_str = GW_MODE_OFF_NAME;
- break;
- }
-
- bat_info(net_dev, "Changing gw mode from: %s to: %s\n",
- curr_gw_mode_str, buff);
-
- gw_deselect(bat_priv);
- atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
- return count;
-}
-
-static ssize_t show_gw_bwidth(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
- int down, up;
- int gw_bandwidth = atomic_read(&bat_priv->gw_bandwidth);
-
- gw_bandwidth_to_kbit(gw_bandwidth, &down, &up);
- return sprintf(buff, "%i%s/%i%s\n",
- (down > 2048 ? down / 1024 : down),
- (down > 2048 ? "MBit" : "KBit"),
- (up > 2048 ? up / 1024 : up),
- (up > 2048 ? "MBit" : "KBit"));
-}
-
-static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct net_device *net_dev = kobj_to_netdev(kobj);
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- return gw_bandwidth_set(net_dev, buff, count);
-}
-
-BAT_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
-BAT_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
-#ifdef CONFIG_BATMAN_ADV_BLA
-BAT_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
-#endif
-BAT_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
-BAT_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
-static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
-static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL);
-static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
-BAT_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
-BAT_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
-BAT_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
- post_gw_deselect);
-static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
- store_gw_bwidth);
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
-#endif
-
-static struct bat_attribute *mesh_attrs[] = {
- &bat_attr_aggregated_ogms,
- &bat_attr_bonding,
-#ifdef CONFIG_BATMAN_ADV_BLA
- &bat_attr_bridge_loop_avoidance,
-#endif
- &bat_attr_fragmentation,
- &bat_attr_ap_isolation,
- &bat_attr_vis_mode,
- &bat_attr_routing_algo,
- &bat_attr_gw_mode,
- &bat_attr_orig_interval,
- &bat_attr_hop_penalty,
- &bat_attr_gw_sel_class,
- &bat_attr_gw_bandwidth,
-#ifdef CONFIG_BATMAN_ADV_DEBUG
- &bat_attr_log_level,
-#endif
- NULL,
-};
-
-int sysfs_add_meshif(struct net_device *dev)
-{
- struct kobject *batif_kobject = &dev->dev.kobj;
- struct bat_priv *bat_priv = netdev_priv(dev);
- struct bat_attribute **bat_attr;
- int err;
-
- bat_priv->mesh_obj = kobject_create_and_add(SYSFS_IF_MESH_SUBDIR,
- batif_kobject);
- if (!bat_priv->mesh_obj) {
- bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
- SYSFS_IF_MESH_SUBDIR);
- goto out;
- }
-
- for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr) {
- err = sysfs_create_file(bat_priv->mesh_obj,
- &((*bat_attr)->attr));
- if (err) {
- bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
- dev->name, SYSFS_IF_MESH_SUBDIR,
- ((*bat_attr)->attr).name);
- goto rem_attr;
- }
- }
-
- return 0;
-
-rem_attr:
- for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
- sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
-
- kobject_put(bat_priv->mesh_obj);
- bat_priv->mesh_obj = NULL;
-out:
- return -ENOMEM;
-}
-
-void sysfs_del_meshif(struct net_device *dev)
-{
- struct bat_priv *bat_priv = netdev_priv(dev);
- struct bat_attribute **bat_attr;
-
- for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
- sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
-
- kobject_put(bat_priv->mesh_obj);
- bat_priv->mesh_obj = NULL;
-}
-
-static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct net_device *net_dev = kobj_to_netdev(kobj);
- struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
- ssize_t length;
-
- if (!hard_iface)
- return 0;
-
- length = sprintf(buff, "%s\n", hard_iface->if_status == IF_NOT_IN_USE ?
- "none" : hard_iface->soft_iface->name);
-
- hardif_free_ref(hard_iface);
-
- return length;
-}
-
-static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct net_device *net_dev = kobj_to_netdev(kobj);
- struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
- int status_tmp = -1;
- int ret = count;
-
- if (!hard_iface)
- return count;
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- if (strlen(buff) >= IFNAMSIZ) {
- pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
- buff);
- hardif_free_ref(hard_iface);
- return -EINVAL;
- }
-
- if (strncmp(buff, "none", 4) == 0)
- status_tmp = IF_NOT_IN_USE;
- else
- status_tmp = IF_I_WANT_YOU;
-
- if (hard_iface->if_status == status_tmp)
- goto out;
-
- if ((hard_iface->soft_iface) &&
- (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
- goto out;
-
- if (!rtnl_trylock()) {
- ret = -ERESTARTSYS;
- goto out;
- }
-
- if (status_tmp == IF_NOT_IN_USE) {
- hardif_disable_interface(hard_iface);
- goto unlock;
- }
-
- /* if the interface already is in use */
- if (hard_iface->if_status != IF_NOT_IN_USE)
- hardif_disable_interface(hard_iface);
-
- ret = hardif_enable_interface(hard_iface, buff);
-
-unlock:
- rtnl_unlock();
-out:
- hardif_free_ref(hard_iface);
- return ret;
-}
-
-static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct net_device *net_dev = kobj_to_netdev(kobj);
- struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
- ssize_t length;
-
- if (!hard_iface)
- return 0;
-
- switch (hard_iface->if_status) {
- case IF_TO_BE_REMOVED:
- length = sprintf(buff, "disabling\n");
- break;
- case IF_INACTIVE:
- length = sprintf(buff, "inactive\n");
- break;
- case IF_ACTIVE:
- length = sprintf(buff, "active\n");
- break;
- case IF_TO_BE_ACTIVATED:
- length = sprintf(buff, "enabling\n");
- break;
- case IF_NOT_IN_USE:
- default:
- length = sprintf(buff, "not in use\n");
- break;
- }
-
- hardif_free_ref(hard_iface);
-
- return length;
-}
-
-static BAT_ATTR(mesh_iface, S_IRUGO | S_IWUSR,
- show_mesh_iface, store_mesh_iface);
-static BAT_ATTR(iface_status, S_IRUGO, show_iface_status, NULL);
-
-static struct bat_attribute *batman_attrs[] = {
- &bat_attr_mesh_iface,
- &bat_attr_iface_status,
- NULL,
-};
-
-int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
-{
- struct kobject *hardif_kobject = &dev->dev.kobj;
- struct bat_attribute **bat_attr;
- int err;
-
- *hardif_obj = kobject_create_and_add(SYSFS_IF_BAT_SUBDIR,
- hardif_kobject);
-
- if (!*hardif_obj) {
- bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
- SYSFS_IF_BAT_SUBDIR);
- goto out;
- }
-
- for (bat_attr = batman_attrs; *bat_attr; ++bat_attr) {
- err = sysfs_create_file(*hardif_obj, &((*bat_attr)->attr));
- if (err) {
- bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
- dev->name, SYSFS_IF_BAT_SUBDIR,
- ((*bat_attr)->attr).name);
- goto rem_attr;
- }
- }
-
- return 0;
-
-rem_attr:
- for (bat_attr = batman_attrs; *bat_attr; ++bat_attr)
- sysfs_remove_file(*hardif_obj, &((*bat_attr)->attr));
-out:
- return -ENOMEM;
-}
-
-void sysfs_del_hardif(struct kobject **hardif_obj)
-{
- kobject_put(*hardif_obj);
- *hardif_obj = NULL;
-}
-
-int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
- enum uev_action action, const char *data)
-{
- int ret = -1;
- struct hard_iface *primary_if = NULL;
- struct kobject *bat_kobj;
- char *uevent_env[4] = { NULL, NULL, NULL, NULL };
-
- primary_if = primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
-
- bat_kobj = &primary_if->soft_iface->dev.kobj;
-
- uevent_env[0] = kmalloc(strlen(UEV_TYPE_VAR) +
- strlen(uev_type_str[type]) + 1,
- GFP_ATOMIC);
- if (!uevent_env[0])
- goto out;
-
- sprintf(uevent_env[0], "%s%s", UEV_TYPE_VAR, uev_type_str[type]);
-
- uevent_env[1] = kmalloc(strlen(UEV_ACTION_VAR) +
- strlen(uev_action_str[action]) + 1,
- GFP_ATOMIC);
- if (!uevent_env[1])
- goto out;
-
- sprintf(uevent_env[1], "%s%s", UEV_ACTION_VAR, uev_action_str[action]);
-
- /* If the event is DEL, ignore the data field */
- if (action != UEV_DEL) {
- uevent_env[2] = kmalloc(strlen(UEV_DATA_VAR) +
- strlen(data) + 1, GFP_ATOMIC);
- if (!uevent_env[2])
- goto out;
-
- sprintf(uevent_env[2], "%s%s", UEV_DATA_VAR, data);
- }
-
- ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
-out:
- kfree(uevent_env[0]);
- kfree(uevent_env[1]);
- kfree(uevent_env[2]);
-
- if (primary_if)
- hardif_free_ref(primary_if);
-
- if (ret)
- bat_dbg(DBG_BATMAN, bat_priv,
- "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
- uev_type_str[type], uev_action_str[action],
- (action == UEV_DEL ? "NULL" : data), ret);
- return ret;
-}
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 07ae6e1b8ac..aea174cdbfb 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -25,12 +23,12 @@
#include <linux/bitops.h>
/* shift the packet array by n places. */
-static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
+static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
{
- if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE)
+ if (n <= 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
return;
- bitmap_shift_left(seq_bits, seq_bits, n, TQ_LOCAL_WINDOW_SIZE);
+ bitmap_shift_left(seq_bits, seq_bits, n, BATADV_TQ_LOCAL_WINDOW_SIZE);
}
@@ -40,58 +38,57 @@ static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
* 1 if the window was moved (either new or very old)
* 0 if the window was not moved/shifted.
*/
-int bit_get_packet(void *priv, unsigned long *seq_bits,
- int32_t seq_num_diff, int set_mark)
+int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
+ int32_t seq_num_diff, int set_mark)
{
- struct bat_priv *bat_priv = priv;
+ struct batadv_priv *bat_priv = priv;
/* sequence number is slightly older. We already got a sequence number
- * higher than this one, so we just mark it. */
-
- if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) {
+ * higher than this one, so we just mark it.
+ */
+ if (seq_num_diff <= 0 && seq_num_diff > -BATADV_TQ_LOCAL_WINDOW_SIZE) {
if (set_mark)
- bat_set_bit(seq_bits, -seq_num_diff);
+ batadv_set_bit(seq_bits, -seq_num_diff);
return 0;
}
/* sequence number is slightly newer, so we shift the window and
- * set the mark if required */
-
- if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) {
- bat_bitmap_shift_left(seq_bits, seq_num_diff);
+ * set the mark if required
+ */
+ if (seq_num_diff > 0 && seq_num_diff < BATADV_TQ_LOCAL_WINDOW_SIZE) {
+ batadv_bitmap_shift_left(seq_bits, seq_num_diff);
if (set_mark)
- bat_set_bit(seq_bits, 0);
+ batadv_set_bit(seq_bits, 0);
return 1;
}
/* sequence number is much newer, probably missed a lot of packets */
-
- if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE) &&
- (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "We missed a lot of packets (%i) !\n",
- seq_num_diff - 1);
- bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
+ if (seq_num_diff >= BATADV_TQ_LOCAL_WINDOW_SIZE &&
+ seq_num_diff < BATADV_EXPECTED_SEQNO_RANGE) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "We missed a lot of packets (%i) !\n",
+ seq_num_diff - 1);
+ bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
if (set_mark)
- bat_set_bit(seq_bits, 0);
+ batadv_set_bit(seq_bits, 0);
return 1;
}
/* received a much older packet. The other host either restarted
* or the old packet got delayed somewhere in the network. The
* packet should be dropped without calling this function if the
- * seqno window is protected. */
-
- if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
- (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
+ * seqno window is protected.
+ */
+ if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE ||
+ seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Other host probably restarted!\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Other host probably restarted!\n");
- bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
+ bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
if (set_mark)
- bat_set_bit(seq_bits, 0);
+ batadv_set_bit(seq_bits, 0);
return 1;
}
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 1835c15cda4..a081ce1c051 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -16,39 +15,40 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_BITARRAY_H_
#define _NET_BATMAN_ADV_BITARRAY_H_
/* returns true if the corresponding bit in the given seq_bits indicates true
- * and curr_seqno is within range of last_seqno */
-static inline int bat_test_bit(const unsigned long *seq_bits,
- uint32_t last_seqno, uint32_t curr_seqno)
+ * and curr_seqno is within range of last_seqno
+ */
+static inline int batadv_test_bit(const unsigned long *seq_bits,
+ uint32_t last_seqno, uint32_t curr_seqno)
{
int32_t diff;
diff = last_seqno - curr_seqno;
- if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE)
+ if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE)
return 0;
else
return test_bit(diff, seq_bits);
}
/* turn corresponding bit on, so we can remember that we got the packet */
-static inline void bat_set_bit(unsigned long *seq_bits, int32_t n)
+static inline void batadv_set_bit(unsigned long *seq_bits, int32_t n)
{
/* if too old, just drop it */
- if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
+ if (n < 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
return;
set_bit(n, seq_bits); /* turn the position on */
}
/* receive and process one packet, returns 1 if received seq_num is considered
- * new, 0 if old */
-int bit_get_packet(void *priv, unsigned long *seq_bits,
- int32_t seq_num_diff, int set_mark);
+ * new, 0 if old
+ */
+int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
+ int32_t seq_num_diff, int set_mark);
#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index c5863f49913..6705d35b17c 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -33,14 +31,14 @@
#include <net/arp.h>
#include <linux/if_vlan.h>
-static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
+static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
-static void bla_periodic_work(struct work_struct *work);
-static void bla_send_announce(struct bat_priv *bat_priv,
- struct backbone_gw *backbone_gw);
+static void batadv_bla_periodic_work(struct work_struct *work);
+static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
+ struct batadv_backbone_gw *backbone_gw);
/* return the index of the claim */
-static inline uint32_t choose_claim(const void *data, uint32_t size)
+static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
{
const unsigned char *key = data;
uint32_t hash = 0;
@@ -60,7 +58,8 @@ static inline uint32_t choose_claim(const void *data, uint32_t size)
}
/* return the index of the backbone gateway */
-static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
+static inline uint32_t batadv_choose_backbone_gw(const void *data,
+ uint32_t size)
{
const unsigned char *key = data;
uint32_t hash = 0;
@@ -81,74 +80,75 @@ static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
/* compares address and vid of two backbone gws */
-static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
+static int batadv_compare_backbone_gw(const struct hlist_node *node,
+ const void *data2)
{
- const void *data1 = container_of(node, struct backbone_gw,
+ const void *data1 = container_of(node, struct batadv_backbone_gw,
hash_entry);
return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
}
/* compares address and vid of two claims */
-static int compare_claim(const struct hlist_node *node, const void *data2)
+static int batadv_compare_claim(const struct hlist_node *node,
+ const void *data2)
{
- const void *data1 = container_of(node, struct claim,
+ const void *data1 = container_of(node, struct batadv_claim,
hash_entry);
return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
}
/* free a backbone gw */
-static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
+static void batadv_backbone_gw_free_ref(struct batadv_backbone_gw *backbone_gw)
{
if (atomic_dec_and_test(&backbone_gw->refcount))
kfree_rcu(backbone_gw, rcu);
}
/* finally deinitialize the claim */
-static void claim_free_rcu(struct rcu_head *rcu)
+static void batadv_claim_free_rcu(struct rcu_head *rcu)
{
- struct claim *claim;
+ struct batadv_claim *claim;
- claim = container_of(rcu, struct claim, rcu);
+ claim = container_of(rcu, struct batadv_claim, rcu);
- backbone_gw_free_ref(claim->backbone_gw);
+ batadv_backbone_gw_free_ref(claim->backbone_gw);
kfree(claim);
}
/* free a claim, call claim_free_rcu if its the last reference */
-static void claim_free_ref(struct claim *claim)
+static void batadv_claim_free_ref(struct batadv_claim *claim)
{
if (atomic_dec_and_test(&claim->refcount))
- call_rcu(&claim->rcu, claim_free_rcu);
+ call_rcu(&claim->rcu, batadv_claim_free_rcu);
}
-/**
- * @bat_priv: the bat priv with all the soft interface information
+/* @bat_priv: the bat priv with all the soft interface information
* @data: search data (may be local/static data)
*
* looks for a claim in the hash, and returns it if found
* or NULL otherwise.
*/
-static struct claim *claim_hash_find(struct bat_priv *bat_priv,
- struct claim *data)
+static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
+ struct batadv_claim *data)
{
- struct hashtable_t *hash = bat_priv->claim_hash;
+ struct batadv_hashtable *hash = bat_priv->claim_hash;
struct hlist_head *head;
struct hlist_node *node;
- struct claim *claim;
- struct claim *claim_tmp = NULL;
+ struct batadv_claim *claim;
+ struct batadv_claim *claim_tmp = NULL;
int index;
if (!hash)
return NULL;
- index = choose_claim(data, hash->size);
+ index = batadv_choose_claim(data, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
- if (!compare_claim(&claim->hash_entry, data))
+ if (!batadv_compare_claim(&claim->hash_entry, data))
continue;
if (!atomic_inc_not_zero(&claim->refcount))
@@ -163,21 +163,22 @@ static struct claim *claim_hash_find(struct bat_priv *bat_priv,
}
/**
+ * batadv_backbone_hash_find - looks for a claim in the hash
* @bat_priv: the bat priv with all the soft interface information
* @addr: the address of the originator
* @vid: the VLAN ID
*
- * looks for a claim in the hash, and returns it if found
- * or NULL otherwise.
+ * Returns claim if found or NULL otherwise.
*/
-static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
- uint8_t *addr, short vid)
+static struct batadv_backbone_gw *
+batadv_backbone_hash_find(struct batadv_priv *bat_priv,
+ uint8_t *addr, short vid)
{
- struct hashtable_t *hash = bat_priv->backbone_hash;
+ struct batadv_hashtable *hash = bat_priv->backbone_hash;
struct hlist_head *head;
struct hlist_node *node;
- struct backbone_gw search_entry, *backbone_gw;
- struct backbone_gw *backbone_gw_tmp = NULL;
+ struct batadv_backbone_gw search_entry, *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw_tmp = NULL;
int index;
if (!hash)
@@ -186,13 +187,13 @@ static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
memcpy(search_entry.orig, addr, ETH_ALEN);
search_entry.vid = vid;
- index = choose_backbone_gw(&search_entry, hash->size);
+ index = batadv_choose_backbone_gw(&search_entry, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
- if (!compare_backbone_gw(&backbone_gw->hash_entry,
- &search_entry))
+ if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
+ &search_entry))
continue;
if (!atomic_inc_not_zero(&backbone_gw->refcount))
@@ -207,12 +208,13 @@ static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
}
/* delete all claims for a backbone */
-static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
+static void
+batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
{
- struct hashtable_t *hash;
+ struct batadv_hashtable *hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
- struct claim *claim;
+ struct batadv_claim *claim;
int i;
spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -231,36 +233,35 @@ static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
if (claim->backbone_gw != backbone_gw)
continue;
- claim_free_ref(claim);
+ batadv_claim_free_ref(claim);
hlist_del_rcu(node);
}
spin_unlock_bh(list_lock);
}
/* all claims gone, intialize CRC */
- backbone_gw->crc = BLA_CRC_INIT;
+ backbone_gw->crc = BATADV_BLA_CRC_INIT;
}
/**
+ * batadv_bla_send_claim - sends a claim frame according to the provided info
* @bat_priv: the bat priv with all the soft interface information
* @orig: the mac address to be announced within the claim
* @vid: the VLAN ID
* @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
- *
- * sends a claim frame according to the provided info.
*/
-static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
- short vid, int claimtype)
+static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
+ short vid, int claimtype)
{
struct sk_buff *skb;
struct ethhdr *ethhdr;
- struct hard_iface *primary_if;
+ struct batadv_hard_iface *primary_if;
struct net_device *soft_iface;
uint8_t *hw_src;
- struct bla_claim_dst local_claim_dest;
- uint32_t zeroip = 0;
+ struct batadv_bla_claim_dst local_claim_dest;
+ __be32 zeroip = 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
return;
@@ -294,40 +295,41 @@ static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
/* now we pretend that the client would have sent this ... */
switch (claimtype) {
- case CLAIM_TYPE_ADD:
+ case BATADV_CLAIM_TYPE_ADD:
/* normal claim frame
* set Ethernet SRC to the clients mac
*/
memcpy(ethhdr->h_source, mac, ETH_ALEN);
- bat_dbg(DBG_BLA, bat_priv,
- "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
break;
- case CLAIM_TYPE_DEL:
+ case BATADV_CLAIM_TYPE_DEL:
/* unclaim frame
* set HW SRC to the clients mac
*/
memcpy(hw_src, mac, ETH_ALEN);
- bat_dbg(DBG_BLA, bat_priv,
- "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
+ vid);
break;
- case CLAIM_TYPE_ANNOUNCE:
+ case BATADV_CLAIM_TYPE_ANNOUNCE:
/* announcement frame
* set HW SRC to the special mac containg the crc
*/
memcpy(hw_src, mac, ETH_ALEN);
- bat_dbg(DBG_BLA, bat_priv,
- "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
- ethhdr->h_source, vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
+ ethhdr->h_source, vid);
break;
- case CLAIM_TYPE_REQUEST:
+ case BATADV_CLAIM_TYPE_REQUEST:
/* request frame
* set HW SRC to the special mac containg the crc
*/
memcpy(hw_src, mac, ETH_ALEN);
memcpy(ethhdr->h_dest, mac, ETH_ALEN);
- bat_dbg(DBG_BLA, bat_priv,
- "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
- ethhdr->h_source, ethhdr->h_dest, vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
+ ethhdr->h_source, ethhdr->h_dest, vid);
break;
}
@@ -344,10 +346,11 @@ static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
netif_rx(skb);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
/**
+ * batadv_bla_get_backbone_gw
* @bat_priv: the bat priv with all the soft interface information
* @orig: the mac address of the originator
* @vid: the VLAN ID
@@ -355,21 +358,22 @@ out:
* searches for the backbone gw or creates a new one if it could not
* be found.
*/
-static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
- uint8_t *orig, short vid)
+static struct batadv_backbone_gw *
+batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
+ short vid)
{
- struct backbone_gw *entry;
- struct orig_node *orig_node;
+ struct batadv_backbone_gw *entry;
+ struct batadv_orig_node *orig_node;
int hash_added;
- entry = backbone_hash_find(bat_priv, orig, vid);
+ entry = batadv_backbone_hash_find(bat_priv, orig, vid);
if (entry)
return entry;
- bat_dbg(DBG_BLA, bat_priv,
- "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
- orig, vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
+ orig, vid);
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
@@ -377,7 +381,7 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
entry->vid = vid;
entry->lasttime = jiffies;
- entry->crc = BLA_CRC_INIT;
+ entry->crc = BATADV_BLA_CRC_INIT;
entry->bat_priv = bat_priv;
atomic_set(&entry->request_sent, 0);
memcpy(entry->orig, orig, ETH_ALEN);
@@ -385,8 +389,10 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
/* one for the hash, one for returning */
atomic_set(&entry->refcount, 2);
- hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
- choose_backbone_gw, entry, &entry->hash_entry);
+ hash_added = batadv_hash_add(bat_priv->backbone_hash,
+ batadv_compare_backbone_gw,
+ batadv_choose_backbone_gw, entry,
+ &entry->hash_entry);
if (unlikely(hash_added != 0)) {
/* hash failed, free the structure */
@@ -395,11 +401,11 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
}
/* this is a gateway now, remove any tt entries */
- orig_node = orig_hash_find(bat_priv, orig);
+ orig_node = batadv_orig_hash_find(bat_priv, orig);
if (orig_node) {
- tt_global_del_orig(bat_priv, orig_node,
- "became a backbone gateway");
- orig_node_free_ref(orig_node);
+ batadv_tt_global_del_orig(bat_priv, orig_node,
+ "became a backbone gateway");
+ batadv_orig_node_free_ref(orig_node);
}
return entry;
}
@@ -407,43 +413,46 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
/* update or add the own backbone gw to make sure we announce
* where we receive other backbone gws
*/
-static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- short vid)
+static void
+batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ short vid)
{
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
- backbone_gw = bla_get_backbone_gw(bat_priv,
- primary_if->net_dev->dev_addr, vid);
+ backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
+ primary_if->net_dev->dev_addr,
+ vid);
if (unlikely(!backbone_gw))
return;
backbone_gw->lasttime = jiffies;
- backbone_gw_free_ref(backbone_gw);
+ batadv_backbone_gw_free_ref(backbone_gw);
}
-/**
- * @bat_priv: the bat priv with all the soft interface information
+/* @bat_priv: the bat priv with all the soft interface information
* @vid: the vid where the request came on
*
* Repeat all of our own claims, and finally send an ANNOUNCE frame
* to allow the requester another check if the CRC is correct now.
*/
-static void bla_answer_request(struct bat_priv *bat_priv,
- struct hard_iface *primary_if, short vid)
+static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ short vid)
{
struct hlist_node *node;
struct hlist_head *head;
- struct hashtable_t *hash;
- struct claim *claim;
- struct backbone_gw *backbone_gw;
+ struct batadv_hashtable *hash;
+ struct batadv_claim *claim;
+ struct batadv_backbone_gw *backbone_gw;
int i;
- bat_dbg(DBG_BLA, bat_priv,
- "bla_answer_request(): received a claim request, send all of our own claims again\n");
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_answer_request(): received a claim request, send all of our own claims again\n");
- backbone_gw = backbone_hash_find(bat_priv,
- primary_if->net_dev->dev_addr, vid);
+ backbone_gw = batadv_backbone_hash_find(bat_priv,
+ primary_if->net_dev->dev_addr,
+ vid);
if (!backbone_gw)
return;
@@ -457,36 +466,34 @@ static void bla_answer_request(struct bat_priv *bat_priv,
if (claim->backbone_gw != backbone_gw)
continue;
- bla_send_claim(bat_priv, claim->addr, claim->vid,
- CLAIM_TYPE_ADD);
+ batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
+ BATADV_CLAIM_TYPE_ADD);
}
rcu_read_unlock();
}
/* finally, send an announcement frame */
- bla_send_announce(bat_priv, backbone_gw);
- backbone_gw_free_ref(backbone_gw);
+ batadv_bla_send_announce(bat_priv, backbone_gw);
+ batadv_backbone_gw_free_ref(backbone_gw);
}
-/**
- * @backbone_gw: the backbone gateway from whom we are out of sync
+/* @backbone_gw: the backbone gateway from whom we are out of sync
*
* When the crc is wrong, ask the backbone gateway for a full table update.
* After the request, it will repeat all of his own claims and finally
* send an announcement claim with which we can check again.
*/
-static void bla_send_request(struct backbone_gw *backbone_gw)
+static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
{
/* first, remove all old entries */
- bla_del_backbone_claims(backbone_gw);
+ batadv_bla_del_backbone_claims(backbone_gw);
- bat_dbg(DBG_BLA, backbone_gw->bat_priv,
- "Sending REQUEST to %pM\n",
- backbone_gw->orig);
+ batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
+ "Sending REQUEST to %pM\n", backbone_gw->orig);
/* send request */
- bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
- backbone_gw->vid, CLAIM_TYPE_REQUEST);
+ batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
+ backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
/* no local broadcasts should be sent or received, for now. */
if (!atomic_read(&backbone_gw->request_sent)) {
@@ -495,45 +502,45 @@ static void bla_send_request(struct backbone_gw *backbone_gw)
}
}
-/**
- * @bat_priv: the bat priv with all the soft interface information
+/* @bat_priv: the bat priv with all the soft interface information
* @backbone_gw: our backbone gateway which should be announced
*
* This function sends an announcement. It is called from multiple
* places.
*/
-static void bla_send_announce(struct bat_priv *bat_priv,
- struct backbone_gw *backbone_gw)
+static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
+ struct batadv_backbone_gw *backbone_gw)
{
uint8_t mac[ETH_ALEN];
- uint16_t crc;
+ __be16 crc;
- memcpy(mac, announce_mac, 4);
+ memcpy(mac, batadv_announce_mac, 4);
crc = htons(backbone_gw->crc);
- memcpy(&mac[4], (uint8_t *)&crc, 2);
+ memcpy(&mac[4], &crc, 2);
- bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
+ batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
+ BATADV_CLAIM_TYPE_ANNOUNCE);
}
/**
+ * batadv_bla_add_claim - Adds a claim in the claim hash
* @bat_priv: the bat priv with all the soft interface information
* @mac: the mac address of the claim
* @vid: the VLAN ID of the frame
* @backbone_gw: the backbone gateway which claims it
- *
- * Adds a claim in the claim hash.
*/
-static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
- const short vid, struct backbone_gw *backbone_gw)
+static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
+ const uint8_t *mac, const short vid,
+ struct batadv_backbone_gw *backbone_gw)
{
- struct claim *claim;
- struct claim search_claim;
+ struct batadv_claim *claim;
+ struct batadv_claim search_claim;
int hash_added;
memcpy(search_claim.addr, mac, ETH_ALEN);
search_claim.vid = vid;
- claim = claim_hash_find(bat_priv, &search_claim);
+ claim = batadv_claim_hash_find(bat_priv, &search_claim);
/* create a new claim entry if it does not exist yet. */
if (!claim) {
@@ -547,11 +554,13 @@ static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
claim->backbone_gw = backbone_gw;
atomic_set(&claim->refcount, 2);
- bat_dbg(DBG_BLA, bat_priv,
- "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
- mac, vid);
- hash_added = hash_add(bat_priv->claim_hash, compare_claim,
- choose_claim, claim, &claim->hash_entry);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
+ mac, vid);
+ hash_added = batadv_hash_add(bat_priv->claim_hash,
+ batadv_compare_claim,
+ batadv_choose_claim, claim,
+ &claim->hash_entry);
if (unlikely(hash_added != 0)) {
/* only local changes happened. */
@@ -564,13 +573,13 @@ static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
/* no need to register a new backbone */
goto claim_free_ref;
- bat_dbg(DBG_BLA, bat_priv,
- "bla_add_claim(): changing ownership for %pM, vid %d\n",
- mac, vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_add_claim(): changing ownership for %pM, vid %d\n",
+ mac, vid);
claim->backbone_gw->crc ^=
crc16(0, claim->addr, ETH_ALEN);
- backbone_gw_free_ref(claim->backbone_gw);
+ batadv_backbone_gw_free_ref(claim->backbone_gw);
}
/* set (new) backbone gw */
@@ -581,45 +590,48 @@ static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
backbone_gw->lasttime = jiffies;
claim_free_ref:
- claim_free_ref(claim);
+ batadv_claim_free_ref(claim);
}
/* Delete a claim from the claim hash which has the
* given mac address and vid.
*/
-static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
- const short vid)
+static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
+ const uint8_t *mac, const short vid)
{
- struct claim search_claim, *claim;
+ struct batadv_claim search_claim, *claim;
memcpy(search_claim.addr, mac, ETH_ALEN);
search_claim.vid = vid;
- claim = claim_hash_find(bat_priv, &search_claim);
+ claim = batadv_claim_hash_find(bat_priv, &search_claim);
if (!claim)
return;
- bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
+ mac, vid);
- hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
- claim_free_ref(claim); /* reference from the hash is gone */
+ batadv_hash_remove(bat_priv->claim_hash, batadv_compare_claim,
+ batadv_choose_claim, claim);
+ batadv_claim_free_ref(claim); /* reference from the hash is gone */
claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
/* don't need the reference from hash_find() anymore */
- claim_free_ref(claim);
+ batadv_claim_free_ref(claim);
}
/* check for ANNOUNCE frame, return 1 if handled */
-static int handle_announce(struct bat_priv *bat_priv,
- uint8_t *an_addr, uint8_t *backbone_addr, short vid)
+static int batadv_handle_announce(struct batadv_priv *bat_priv,
+ uint8_t *an_addr, uint8_t *backbone_addr,
+ short vid)
{
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
uint16_t crc;
- if (memcmp(an_addr, announce_mac, 4) != 0)
+ if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
return 0;
- backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
+ backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid);
if (unlikely(!backbone_gw))
return 1;
@@ -627,19 +639,19 @@ static int handle_announce(struct bat_priv *bat_priv,
/* handle as ANNOUNCE frame */
backbone_gw->lasttime = jiffies;
- crc = ntohs(*((uint16_t *)(&an_addr[4])));
+ crc = ntohs(*((__be16 *)(&an_addr[4])));
- bat_dbg(DBG_BLA, bat_priv,
- "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
- vid, backbone_gw->orig, crc);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
+ vid, backbone_gw->orig, crc);
if (backbone_gw->crc != crc) {
- bat_dbg(DBG_BLA, backbone_gw->bat_priv,
- "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
- backbone_gw->orig, backbone_gw->vid, backbone_gw->crc,
- crc);
+ batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
+ "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
+ backbone_gw->orig, backbone_gw->vid,
+ backbone_gw->crc, crc);
- bla_send_request(backbone_gw);
+ batadv_bla_send_request(backbone_gw);
} else {
/* if we have sent a request and the crc was OK,
* we can allow traffic again.
@@ -650,88 +662,92 @@ static int handle_announce(struct bat_priv *bat_priv,
}
}
- backbone_gw_free_ref(backbone_gw);
+ batadv_backbone_gw_free_ref(backbone_gw);
return 1;
}
/* check for REQUEST frame, return 1 if handled */
-static int handle_request(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- uint8_t *backbone_addr,
- struct ethhdr *ethhdr, short vid)
+static int batadv_handle_request(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ uint8_t *backbone_addr,
+ struct ethhdr *ethhdr, short vid)
{
/* check for REQUEST frame */
- if (!compare_eth(backbone_addr, ethhdr->h_dest))
+ if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
return 0;
/* sanity check, this should not happen on a normal switch,
* we ignore it in this case.
*/
- if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
+ if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
return 1;
- bat_dbg(DBG_BLA, bat_priv,
- "handle_request(): REQUEST vid %d (sent by %pM)...\n",
- vid, ethhdr->h_source);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "handle_request(): REQUEST vid %d (sent by %pM)...\n",
+ vid, ethhdr->h_source);
- bla_answer_request(bat_priv, primary_if, vid);
+ batadv_bla_answer_request(bat_priv, primary_if, vid);
return 1;
}
/* check for UNCLAIM frame, return 1 if handled */
-static int handle_unclaim(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- uint8_t *backbone_addr,
- uint8_t *claim_addr, short vid)
+static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ uint8_t *backbone_addr,
+ uint8_t *claim_addr, short vid)
{
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
/* unclaim in any case if it is our own */
- if (primary_if && compare_eth(backbone_addr,
- primary_if->net_dev->dev_addr))
- bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
+ if (primary_if && batadv_compare_eth(backbone_addr,
+ primary_if->net_dev->dev_addr))
+ batadv_bla_send_claim(bat_priv, claim_addr, vid,
+ BATADV_CLAIM_TYPE_DEL);
- backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
+ backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
if (!backbone_gw)
return 1;
/* this must be an UNCLAIM frame */
- bat_dbg(DBG_BLA, bat_priv,
- "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
- claim_addr, vid, backbone_gw->orig);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
+ claim_addr, vid, backbone_gw->orig);
- bla_del_claim(bat_priv, claim_addr, vid);
- backbone_gw_free_ref(backbone_gw);
+ batadv_bla_del_claim(bat_priv, claim_addr, vid);
+ batadv_backbone_gw_free_ref(backbone_gw);
return 1;
}
/* check for CLAIM frame, return 1 if handled */
-static int handle_claim(struct bat_priv *bat_priv,
- struct hard_iface *primary_if, uint8_t *backbone_addr,
- uint8_t *claim_addr, short vid)
+static int batadv_handle_claim(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ uint8_t *backbone_addr, uint8_t *claim_addr,
+ short vid)
{
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
/* register the gateway if not yet available, and add the claim. */
- backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
+ backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid);
if (unlikely(!backbone_gw))
return 1;
/* this must be a CLAIM frame */
- bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
- if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
- bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
+ batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
+ if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
+ batadv_bla_send_claim(bat_priv, claim_addr, vid,
+ BATADV_CLAIM_TYPE_ADD);
/* TODO: we could call something like tt_local_del() here. */
- backbone_gw_free_ref(backbone_gw);
+ batadv_backbone_gw_free_ref(backbone_gw);
return 1;
}
/**
+ * batadv_check_claim_group
* @bat_priv: the bat priv with all the soft interface information
* @hw_src: the Hardware source in the ARP Header
* @hw_dst: the Hardware destination in the ARP Header
@@ -746,16 +762,16 @@ static int handle_claim(struct bat_priv *bat_priv,
* 1 - if is a claim packet from another group
* 0 - if it is not a claim packet
*/
-static int check_claim_group(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- uint8_t *hw_src, uint8_t *hw_dst,
- struct ethhdr *ethhdr)
+static int batadv_check_claim_group(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ uint8_t *hw_src, uint8_t *hw_dst,
+ struct ethhdr *ethhdr)
{
uint8_t *backbone_addr;
- struct orig_node *orig_node;
- struct bla_claim_dst *bla_dst, *bla_dst_own;
+ struct batadv_orig_node *orig_node;
+ struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
- bla_dst = (struct bla_claim_dst *)hw_dst;
+ bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
bla_dst_own = &bat_priv->claim_dest;
/* check if it is a claim packet in general */
@@ -767,12 +783,12 @@ static int check_claim_group(struct bat_priv *bat_priv,
* otherwise assume it is in the hw_src
*/
switch (bla_dst->type) {
- case CLAIM_TYPE_ADD:
+ case BATADV_CLAIM_TYPE_ADD:
backbone_addr = hw_src;
break;
- case CLAIM_TYPE_REQUEST:
- case CLAIM_TYPE_ANNOUNCE:
- case CLAIM_TYPE_DEL:
+ case BATADV_CLAIM_TYPE_REQUEST:
+ case BATADV_CLAIM_TYPE_ANNOUNCE:
+ case BATADV_CLAIM_TYPE_DEL:
backbone_addr = ethhdr->h_source;
break;
default:
@@ -780,7 +796,7 @@ static int check_claim_group(struct bat_priv *bat_priv,
}
/* don't accept claim frames from ourselves */
- if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
+ if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
return 0;
/* if its already the same group, it is fine. */
@@ -788,7 +804,7 @@ static int check_claim_group(struct bat_priv *bat_priv,
return 2;
/* lets see if this originator is in our mesh */
- orig_node = orig_hash_find(bat_priv, backbone_addr);
+ orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
/* dont accept claims from gateways which are not in
* the same mesh or group.
@@ -798,20 +814,19 @@ static int check_claim_group(struct bat_priv *bat_priv,
/* if our mesh friends mac is bigger, use it for ourselves. */
if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
- bat_dbg(DBG_BLA, bat_priv,
- "taking other backbones claim group: %04x\n",
- ntohs(bla_dst->group));
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "taking other backbones claim group: %04x\n",
+ ntohs(bla_dst->group));
bla_dst_own->group = bla_dst->group;
}
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return 2;
}
-/**
- * @bat_priv: the bat priv with all the soft interface information
+/* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
*
* Check if this is a claim frame, and process it accordingly.
@@ -819,15 +834,15 @@ static int check_claim_group(struct bat_priv *bat_priv,
* returns 1 if it was a claim frame, otherwise return 0 to
* tell the callee that it can use the frame on its own.
*/
-static int bla_process_claim(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- struct sk_buff *skb)
+static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ struct sk_buff *skb)
{
struct ethhdr *ethhdr;
struct vlan_ethhdr *vhdr;
struct arphdr *arphdr;
uint8_t *hw_src, *hw_dst;
- struct bla_claim_dst *bla_dst;
+ struct batadv_bla_claim_dst *bla_dst;
uint16_t proto;
int headlen;
short vid = -1;
@@ -860,7 +875,6 @@ static int bla_process_claim(struct bat_priv *bat_priv,
/* Check whether the ARP frame carries a valid
* IP information
*/
-
if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
return 0;
if (arphdr->ar_pro != htons(ETH_P_IP))
@@ -872,59 +886,62 @@ static int bla_process_claim(struct bat_priv *bat_priv,
hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
hw_dst = hw_src + ETH_ALEN + 4;
- bla_dst = (struct bla_claim_dst *)hw_dst;
+ bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
/* check if it is a claim frame. */
- ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr);
+ ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
+ ethhdr);
if (ret == 1)
- bat_dbg(DBG_BLA, bat_priv,
- "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
- ethhdr->h_source, vid, hw_src, hw_dst);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+ ethhdr->h_source, vid, hw_src, hw_dst);
if (ret < 2)
return ret;
/* become a backbone gw ourselves on this vlan if not happened yet */
- bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+ batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
/* check for the different types of claim frames ... */
switch (bla_dst->type) {
- case CLAIM_TYPE_ADD:
- if (handle_claim(bat_priv, primary_if, hw_src,
- ethhdr->h_source, vid))
+ case BATADV_CLAIM_TYPE_ADD:
+ if (batadv_handle_claim(bat_priv, primary_if, hw_src,
+ ethhdr->h_source, vid))
return 1;
break;
- case CLAIM_TYPE_DEL:
- if (handle_unclaim(bat_priv, primary_if,
- ethhdr->h_source, hw_src, vid))
+ case BATADV_CLAIM_TYPE_DEL:
+ if (batadv_handle_unclaim(bat_priv, primary_if,
+ ethhdr->h_source, hw_src, vid))
return 1;
break;
- case CLAIM_TYPE_ANNOUNCE:
- if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
+ case BATADV_CLAIM_TYPE_ANNOUNCE:
+ if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
+ vid))
return 1;
break;
- case CLAIM_TYPE_REQUEST:
- if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
+ case BATADV_CLAIM_TYPE_REQUEST:
+ if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
+ vid))
return 1;
break;
}
- bat_dbg(DBG_BLA, bat_priv,
- "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
- ethhdr->h_source, vid, hw_src, hw_dst);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+ ethhdr->h_source, vid, hw_src, hw_dst);
return 1;
}
/* Check when we last heard from other nodes, and remove them in case of
* a time out, or clean all backbone gws if now is set.
*/
-static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
+static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
{
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
- struct hashtable_t *hash;
+ struct batadv_hashtable *hash;
spinlock_t *list_lock; /* protects write access to the hash lists */
int i;
@@ -941,29 +958,30 @@ static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
head, hash_entry) {
if (now)
goto purge_now;
- if (!has_timed_out(backbone_gw->lasttime,
- BLA_BACKBONE_TIMEOUT))
+ if (!batadv_has_timed_out(backbone_gw->lasttime,
+ BATADV_BLA_BACKBONE_TIMEOUT))
continue;
- bat_dbg(DBG_BLA, backbone_gw->bat_priv,
- "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
- backbone_gw->orig);
+ batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
+ "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
+ backbone_gw->orig);
purge_now:
/* don't wait for the pending request anymore */
if (atomic_read(&backbone_gw->request_sent))
atomic_dec(&bat_priv->bla_num_requests);
- bla_del_backbone_claims(backbone_gw);
+ batadv_bla_del_backbone_claims(backbone_gw);
hlist_del_rcu(node);
- backbone_gw_free_ref(backbone_gw);
+ batadv_backbone_gw_free_ref(backbone_gw);
}
spin_unlock_bh(list_lock);
}
}
/**
+ * batadv_bla_purge_claims
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the selected primary interface, may be NULL if now is set
* @now: whether the whole hash shall be wiped now
@@ -971,13 +989,14 @@ purge_now:
* Check when we heard last time from our own claims, and remove them in case of
* a time out, or clean all claims if now is set
*/
-static void bla_purge_claims(struct bat_priv *bat_priv,
- struct hard_iface *primary_if, int now)
+static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ int now)
{
- struct claim *claim;
+ struct batadv_claim *claim;
struct hlist_node *node;
struct hlist_head *head;
- struct hashtable_t *hash;
+ struct batadv_hashtable *hash;
int i;
hash = bat_priv->claim_hash;
@@ -991,42 +1010,42 @@ static void bla_purge_claims(struct bat_priv *bat_priv,
hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
if (now)
goto purge_now;
- if (!compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr))
+ if (!batadv_compare_eth(claim->backbone_gw->orig,
+ primary_if->net_dev->dev_addr))
continue;
- if (!has_timed_out(claim->lasttime,
- BLA_CLAIM_TIMEOUT))
+ if (!batadv_has_timed_out(claim->lasttime,
+ BATADV_BLA_CLAIM_TIMEOUT))
continue;
- bat_dbg(DBG_BLA, bat_priv,
- "bla_purge_claims(): %pM, vid %d, time out\n",
- claim->addr, claim->vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_purge_claims(): %pM, vid %d, time out\n",
+ claim->addr, claim->vid);
purge_now:
- handle_unclaim(bat_priv, primary_if,
- claim->backbone_gw->orig,
- claim->addr, claim->vid);
+ batadv_handle_unclaim(bat_priv, primary_if,
+ claim->backbone_gw->orig,
+ claim->addr, claim->vid);
}
rcu_read_unlock();
}
}
/**
+ * batadv_bla_update_orig_address
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the new selected primary_if
* @oldif: the old primary interface, may be NULL
*
* Update the backbone gateways when the own orig address changes.
- *
*/
-void bla_update_orig_address(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- struct hard_iface *oldif)
+void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_hard_iface *oldif)
{
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
struct hlist_node *node;
struct hlist_head *head;
- struct hashtable_t *hash;
+ struct batadv_hashtable *hash;
int i;
/* reset bridge loop avoidance group id */
@@ -1034,8 +1053,8 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
if (!oldif) {
- bla_purge_claims(bat_priv, NULL, 1);
- bla_purge_backbone_gw(bat_priv, 1);
+ batadv_bla_purge_claims(bat_priv, NULL, 1);
+ batadv_bla_purge_backbone_gw(bat_priv, 1);
return;
}
@@ -1049,8 +1068,8 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
rcu_read_lock();
hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
/* own orig still holds the old value. */
- if (!compare_eth(backbone_gw->orig,
- oldif->net_dev->dev_addr))
+ if (!batadv_compare_eth(backbone_gw->orig,
+ oldif->net_dev->dev_addr))
continue;
memcpy(backbone_gw->orig,
@@ -1058,7 +1077,7 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
/* send an announce frame so others will ask for our
* claims and update their tables.
*/
- bla_send_announce(bat_priv, backbone_gw);
+ batadv_bla_send_announce(bat_priv, backbone_gw);
}
rcu_read_unlock();
}
@@ -1067,36 +1086,36 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
/* (re)start the timer */
-static void bla_start_timer(struct bat_priv *bat_priv)
+static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
{
- INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
- queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
- msecs_to_jiffies(BLA_PERIOD_LENGTH));
+ INIT_DELAYED_WORK(&bat_priv->bla_work, batadv_bla_periodic_work);
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->bla_work,
+ msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
}
/* periodic work to do:
* * purge structures when they are too old
* * send announcements
*/
-static void bla_periodic_work(struct work_struct *work)
+static void batadv_bla_periodic_work(struct work_struct *work)
{
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
- struct bat_priv *bat_priv =
- container_of(delayed_work, struct bat_priv, bla_work);
+ struct batadv_priv *bat_priv;
struct hlist_node *node;
struct hlist_head *head;
- struct backbone_gw *backbone_gw;
- struct hashtable_t *hash;
- struct hard_iface *primary_if;
+ struct batadv_backbone_gw *backbone_gw;
+ struct batadv_hashtable *hash;
+ struct batadv_hard_iface *primary_if;
int i;
- primary_if = primary_if_get_selected(bat_priv);
+ bat_priv = container_of(delayed_work, struct batadv_priv, bla_work);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
- bla_purge_claims(bat_priv, primary_if, 0);
- bla_purge_backbone_gw(bat_priv, 0);
+ batadv_bla_purge_claims(bat_priv, primary_if, 0);
+ batadv_bla_purge_backbone_gw(bat_priv, 0);
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
goto out;
@@ -1110,67 +1129,81 @@ static void bla_periodic_work(struct work_struct *work)
rcu_read_lock();
hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
- if (!compare_eth(backbone_gw->orig,
- primary_if->net_dev->dev_addr))
+ if (!batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr))
continue;
backbone_gw->lasttime = jiffies;
- bla_send_announce(bat_priv, backbone_gw);
+ batadv_bla_send_announce(bat_priv, backbone_gw);
}
rcu_read_unlock();
}
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
- bla_start_timer(bat_priv);
+ batadv_bla_start_timer(bat_priv);
}
+/* The hash for claim and backbone hash receive the same key because they
+ * are getting initialized by hash_new with the same key. Reinitializing
+ * them with to different keys to allow nested locking without generating
+ * lockdep warnings
+ */
+static struct lock_class_key batadv_claim_hash_lock_class_key;
+static struct lock_class_key batadv_backbone_hash_lock_class_key;
+
/* initialize all bla structures */
-int bla_init(struct bat_priv *bat_priv)
+int batadv_bla_init(struct batadv_priv *bat_priv)
{
int i;
uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
- struct hard_iface *primary_if;
+ struct batadv_hard_iface *primary_if;
- bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
/* setting claim destination address */
memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
bat_priv->claim_dest.type = 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (primary_if) {
bat_priv->claim_dest.group =
htons(crc16(0, primary_if->net_dev->dev_addr,
ETH_ALEN));
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
} else {
bat_priv->claim_dest.group = 0; /* will be set later */
}
/* initialize the duplicate list */
- for (i = 0; i < DUPLIST_SIZE; i++)
+ for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
bat_priv->bcast_duplist[i].entrytime =
- jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT);
+ jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
bat_priv->bcast_duplist_curr = 0;
if (bat_priv->claim_hash)
- return 1;
+ return 0;
- bat_priv->claim_hash = hash_new(128);
- bat_priv->backbone_hash = hash_new(32);
+ bat_priv->claim_hash = batadv_hash_new(128);
+ bat_priv->backbone_hash = batadv_hash_new(32);
if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
- return -1;
+ return -ENOMEM;
- bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
+ batadv_hash_set_lock_class(bat_priv->claim_hash,
+ &batadv_claim_hash_lock_class_key);
+ batadv_hash_set_lock_class(bat_priv->backbone_hash,
+ &batadv_backbone_hash_lock_class_key);
- bla_start_timer(bat_priv);
- return 1;
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
+
+ batadv_bla_start_timer(bat_priv);
+ return 0;
}
/**
+ * batadv_bla_check_bcast_duplist
* @bat_priv: the bat priv with all the soft interface information
* @bcast_packet: originator mac address
* @hdr_size: maximum length of the frame
@@ -1183,17 +1216,15 @@ int bla_init(struct bat_priv *bat_priv)
* with a good chance that it is the same packet. If it is furthermore
* sent by another host, drop it. We allow equal packets from
* the same host however as this might be intended.
- *
- **/
-
-int bla_check_bcast_duplist(struct bat_priv *bat_priv,
- struct bcast_packet *bcast_packet,
- int hdr_size)
+ */
+int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+ struct batadv_bcast_packet *bcast_packet,
+ int hdr_size)
{
int i, length, curr;
uint8_t *content;
uint16_t crc;
- struct bcast_duplist_entry *entry;
+ struct batadv_bcast_duplist_entry *entry;
length = hdr_size - sizeof(*bcast_packet);
content = (uint8_t *)bcast_packet;
@@ -1202,20 +1233,21 @@ int bla_check_bcast_duplist(struct bat_priv *bat_priv,
/* calculate the crc ... */
crc = crc16(0, content, length);
- for (i = 0 ; i < DUPLIST_SIZE; i++) {
- curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE;
+ for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
+ curr = (bat_priv->bcast_duplist_curr + i) % BATADV_DUPLIST_SIZE;
entry = &bat_priv->bcast_duplist[curr];
/* we can stop searching if the entry is too old ;
* later entries will be even older
*/
- if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT))
+ if (batadv_has_timed_out(entry->entrytime,
+ BATADV_DUPLIST_TIMEOUT))
break;
if (entry->crc != crc)
continue;
- if (compare_eth(entry->orig, bcast_packet->orig))
+ if (batadv_compare_eth(entry->orig, bcast_packet->orig))
continue;
/* this entry seems to match: same crc, not too old,
@@ -1224,7 +1256,8 @@ int bla_check_bcast_duplist(struct bat_priv *bat_priv,
return 1;
}
/* not found, add a new entry (overwrite the oldest entry) */
- curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE;
+ curr = (bat_priv->bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
+ curr %= BATADV_DUPLIST_SIZE;
entry = &bat_priv->bcast_duplist[curr];
entry->crc = crc;
entry->entrytime = jiffies;
@@ -1237,22 +1270,19 @@ int bla_check_bcast_duplist(struct bat_priv *bat_priv,
-/**
- * @bat_priv: the bat priv with all the soft interface information
+/* @bat_priv: the bat priv with all the soft interface information
* @orig: originator mac address
*
* check if the originator is a gateway for any VLAN ID.
*
* returns 1 if it is found, 0 otherwise
- *
*/
-
-int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
+int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
{
- struct hashtable_t *hash = bat_priv->backbone_hash;
+ struct batadv_hashtable *hash = bat_priv->backbone_hash;
struct hlist_head *head;
struct hlist_node *node;
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
int i;
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
@@ -1266,7 +1296,7 @@ int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
rcu_read_lock();
hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
- if (compare_eth(backbone_gw->orig, orig)) {
+ if (batadv_compare_eth(backbone_gw->orig, orig)) {
rcu_read_unlock();
return 1;
}
@@ -1279,6 +1309,7 @@ int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
/**
+ * batadv_bla_is_backbone_gw
* @skb: the frame to be checked
* @orig_node: the orig_node of the frame
* @hdr_size: maximum length of the frame
@@ -1286,14 +1317,13 @@ int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
* bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
* if the orig_node is also a gateway on the soft interface, otherwise it
* returns 0.
- *
*/
-int bla_is_backbone_gw(struct sk_buff *skb,
- struct orig_node *orig_node, int hdr_size)
+int batadv_bla_is_backbone_gw(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node, int hdr_size)
{
struct ethhdr *ethhdr;
struct vlan_ethhdr *vhdr;
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
short vid = -1;
if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
@@ -1315,39 +1345,39 @@ int bla_is_backbone_gw(struct sk_buff *skb,
}
/* see if this originator is a backbone gw for this VLAN */
-
- backbone_gw = backbone_hash_find(orig_node->bat_priv,
- orig_node->orig, vid);
+ backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
+ orig_node->orig, vid);
if (!backbone_gw)
return 0;
- backbone_gw_free_ref(backbone_gw);
+ batadv_backbone_gw_free_ref(backbone_gw);
return 1;
}
/* free all bla structures (for softinterface free or module unload) */
-void bla_free(struct bat_priv *bat_priv)
+void batadv_bla_free(struct batadv_priv *bat_priv)
{
- struct hard_iface *primary_if;
+ struct batadv_hard_iface *primary_if;
cancel_delayed_work_sync(&bat_priv->bla_work);
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (bat_priv->claim_hash) {
- bla_purge_claims(bat_priv, primary_if, 1);
- hash_destroy(bat_priv->claim_hash);
+ batadv_bla_purge_claims(bat_priv, primary_if, 1);
+ batadv_hash_destroy(bat_priv->claim_hash);
bat_priv->claim_hash = NULL;
}
if (bat_priv->backbone_hash) {
- bla_purge_backbone_gw(bat_priv, 1);
- hash_destroy(bat_priv->backbone_hash);
+ batadv_bla_purge_backbone_gw(bat_priv, 1);
+ batadv_hash_destroy(bat_priv->backbone_hash);
bat_priv->backbone_hash = NULL;
}
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
/**
+ * batadv_bla_rx
* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
* @vid: the VLAN ID of the frame
@@ -1360,19 +1390,18 @@ void bla_free(struct bat_priv *bat_priv)
* in these cases, the skb is further handled by this function and
* returns 1, otherwise it returns 0 and the caller shall further
* process the skb.
- *
*/
-int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
- bool is_bcast)
+int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
+ bool is_bcast)
{
struct ethhdr *ethhdr;
- struct claim search_claim, *claim = NULL;
- struct hard_iface *primary_if;
+ struct batadv_claim search_claim, *claim = NULL;
+ struct batadv_hard_iface *primary_if;
int ret;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto handled;
@@ -1387,21 +1416,21 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
search_claim.vid = vid;
- claim = claim_hash_find(bat_priv, &search_claim);
+ claim = batadv_claim_hash_find(bat_priv, &search_claim);
if (!claim) {
/* possible optimization: race for a claim */
/* No claim exists yet, claim it for us!
*/
- handle_claim(bat_priv, primary_if,
- primary_if->net_dev->dev_addr,
- ethhdr->h_source, vid);
+ batadv_handle_claim(bat_priv, primary_if,
+ primary_if->net_dev->dev_addr,
+ ethhdr->h_source, vid);
goto allow;
}
/* if it is our own claim ... */
- if (compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr)) {
+ if (batadv_compare_eth(claim->backbone_gw->orig,
+ primary_if->net_dev->dev_addr)) {
/* ... allow it in any case */
claim->lasttime = jiffies;
goto allow;
@@ -1421,13 +1450,13 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
* send a claim and update the claim table
* immediately.
*/
- handle_claim(bat_priv, primary_if,
- primary_if->net_dev->dev_addr,
- ethhdr->h_source, vid);
+ batadv_handle_claim(bat_priv, primary_if,
+ primary_if->net_dev->dev_addr,
+ ethhdr->h_source, vid);
goto allow;
}
allow:
- bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+ batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
ret = 0;
goto out;
@@ -1437,13 +1466,14 @@ handled:
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (claim)
- claim_free_ref(claim);
+ batadv_claim_free_ref(claim);
return ret;
}
/**
+ * batadv_bla_tx
* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
* @vid: the VLAN ID of the frame
@@ -1455,16 +1485,15 @@ out:
* in these cases, the skb is further handled by this function and
* returns 1, otherwise it returns 0 and the caller shall further
* process the skb.
- *
*/
-int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
+int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
{
struct ethhdr *ethhdr;
- struct claim search_claim, *claim = NULL;
- struct hard_iface *primary_if;
+ struct batadv_claim search_claim, *claim = NULL;
+ struct batadv_hard_iface *primary_if;
int ret = 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
@@ -1474,7 +1503,7 @@ int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
/* in VLAN case, the mac header might not be set. */
skb_reset_mac_header(skb);
- if (bla_process_claim(bat_priv, primary_if, skb))
+ if (batadv_bla_process_claim(bat_priv, primary_if, skb))
goto handled;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1487,21 +1516,21 @@ int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
search_claim.vid = vid;
- claim = claim_hash_find(bat_priv, &search_claim);
+ claim = batadv_claim_hash_find(bat_priv, &search_claim);
/* if no claim exists, allow it. */
if (!claim)
goto allow;
/* check if we are responsible. */
- if (compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr)) {
+ if (batadv_compare_eth(claim->backbone_gw->orig,
+ primary_if->net_dev->dev_addr)) {
/* if yes, the client has roamed and we have
* to unclaim it.
*/
- handle_unclaim(bat_priv, primary_if,
- primary_if->net_dev->dev_addr,
- ethhdr->h_source, vid);
+ batadv_handle_unclaim(bat_priv, primary_if,
+ primary_if->net_dev->dev_addr,
+ ethhdr->h_source, vid);
goto allow;
}
@@ -1518,33 +1547,34 @@ int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
goto allow;
}
allow:
- bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+ batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
ret = 0;
goto out;
handled:
ret = 1;
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (claim)
- claim_free_ref(claim);
+ batadv_claim_free_ref(claim);
return ret;
}
-int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
+int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct hashtable_t *hash = bat_priv->claim_hash;
- struct claim *claim;
- struct hard_iface *primary_if;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hashtable *hash = bat_priv->claim_hash;
+ struct batadv_claim *claim;
+ struct batadv_hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
bool is_own;
int ret = 0;
+ uint8_t *primary_addr;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -1552,16 +1582,17 @@ int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
goto out;
}
- if (primary_if->if_status != IF_ACTIVE) {
+ if (primary_if->if_status != BATADV_IF_ACTIVE) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
goto out;
}
+ primary_addr = primary_if->net_dev->dev_addr;
seq_printf(seq,
"Claims announced for the mesh %s (orig %pM, group id %04x)\n",
- net_dev->name, primary_if->net_dev->dev_addr,
+ net_dev->name, primary_addr,
ntohs(bat_priv->claim_dest.group));
seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
"Client", "VID", "Originator", "CRC");
@@ -1570,8 +1601,8 @@ int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
- is_own = compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr);
+ is_own = batadv_compare_eth(claim->backbone_gw->orig,
+ primary_addr);
seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
claim->addr, claim->vid,
claim->backbone_gw->orig,
@@ -1582,6 +1613,6 @@ int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
}
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return ret;
}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index dc5227b398d..563cfbf94a7 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
@@ -16,81 +15,84 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_BLA_H_
#define _NET_BATMAN_ADV_BLA_H_
#ifdef CONFIG_BATMAN_ADV_BLA
-int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid,
- bool is_bcast);
-int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
-int bla_is_backbone_gw(struct sk_buff *skb,
- struct orig_node *orig_node, int hdr_size);
-int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
-int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig);
-int bla_check_bcast_duplist(struct bat_priv *bat_priv,
- struct bcast_packet *bcast_packet, int hdr_size);
-void bla_update_orig_address(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- struct hard_iface *oldif);
-int bla_init(struct bat_priv *bat_priv);
-void bla_free(struct bat_priv *bat_priv);
+int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
+ bool is_bcast);
+int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
+int batadv_bla_is_backbone_gw(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node, int hdr_size);
+int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
+int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+ struct batadv_bcast_packet *bcast_packet,
+ int hdr_size);
+void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_hard_iface *oldif);
+int batadv_bla_init(struct batadv_priv *bat_priv);
+void batadv_bla_free(struct batadv_priv *bat_priv);
-#define BLA_CRC_INIT 0
+#define BATADV_BLA_CRC_INIT 0
#else /* ifdef CONFIG_BATMAN_ADV_BLA */
-static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
- short vid, bool is_bcast)
+static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, short vid,
+ bool is_bcast)
{
return 0;
}
-static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb,
- short vid)
+static inline int batadv_bla_tx(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, short vid)
{
return 0;
}
-static inline int bla_is_backbone_gw(struct sk_buff *skb,
- struct orig_node *orig_node,
- int hdr_size)
+static inline int batadv_bla_is_backbone_gw(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node,
+ int hdr_size)
{
return 0;
}
-static inline int bla_claim_table_seq_print_text(struct seq_file *seq,
- void *offset)
+static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq,
+ void *offset)
{
return 0;
}
-static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv,
- uint8_t *orig)
+static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
+ uint8_t *orig)
{
return 0;
}
-static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv,
- struct bcast_packet *bcast_packet,
- int hdr_size)
+static inline int
+batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+ struct batadv_bcast_packet *bcast_packet,
+ int hdr_size)
{
return 0;
}
-static inline void bla_update_orig_address(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- struct hard_iface *oldif)
+static inline void
+batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_hard_iface *oldif)
{
}
-static inline int bla_init(struct bat_priv *bat_priv)
+static inline int batadv_bla_init(struct batadv_priv *bat_priv)
{
return 1;
}
-static inline void bla_free(struct bat_priv *bat_priv)
+static inline void batadv_bla_free(struct batadv_priv *bat_priv)
{
}
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
new file mode 100644
index 00000000000..34fbb1667bc
--- /dev/null
+++ b/net/batman-adv/debugfs.c
@@ -0,0 +1,409 @@
+/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "main.h"
+
+#include <linux/debugfs.h>
+
+#include "debugfs.h"
+#include "translation-table.h"
+#include "originator.h"
+#include "hard-interface.h"
+#include "gateway_common.h"
+#include "gateway_client.h"
+#include "soft-interface.h"
+#include "vis.h"
+#include "icmp_socket.h"
+#include "bridge_loop_avoidance.h"
+
+static struct dentry *batadv_debugfs;
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+#define BATADV_LOG_BUFF_MASK (batadv_log_buff_len - 1)
+
+static const int batadv_log_buff_len = BATADV_LOG_BUF_LEN;
+
+static char *batadv_log_char_addr(struct batadv_debug_log *debug_log,
+ size_t idx)
+{
+ return &debug_log->log_buff[idx & BATADV_LOG_BUFF_MASK];
+}
+
+static void batadv_emit_log_char(struct batadv_debug_log *debug_log, char c)
+{
+ char *char_addr;
+
+ char_addr = batadv_log_char_addr(debug_log, debug_log->log_end);
+ *char_addr = c;
+ debug_log->log_end++;
+
+ if (debug_log->log_end - debug_log->log_start > batadv_log_buff_len)
+ debug_log->log_start = debug_log->log_end - batadv_log_buff_len;
+}
+
+__printf(2, 3)
+static int batadv_fdebug_log(struct batadv_debug_log *debug_log,
+ const char *fmt, ...)
+{
+ va_list args;
+ static char debug_log_buf[256];
+ char *p;
+
+ if (!debug_log)
+ return 0;
+
+ spin_lock_bh(&debug_log->lock);
+ va_start(args, fmt);
+ vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
+ va_end(args);
+
+ for (p = debug_log_buf; *p != 0; p++)
+ batadv_emit_log_char(debug_log, *p);
+
+ spin_unlock_bh(&debug_log->lock);
+
+ wake_up(&debug_log->queue_wait);
+
+ return 0;
+}
+
+int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
+{
+ va_list args;
+ char tmp_log_buf[256];
+
+ va_start(args, fmt);
+ vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
+ batadv_fdebug_log(bat_priv->debug_log, "[%10u] %s",
+ jiffies_to_msecs(jiffies), tmp_log_buf);
+ va_end(args);
+
+ return 0;
+}
+
+static int batadv_log_open(struct inode *inode, struct file *file)
+{
+ nonseekable_open(inode, file);
+ file->private_data = inode->i_private;
+ batadv_inc_module_count();
+ return 0;
+}
+
+static int batadv_log_release(struct inode *inode, struct file *file)
+{
+ batadv_dec_module_count();
+ return 0;
+}
+
+static int batadv_log_empty(struct batadv_debug_log *debug_log)
+{
+ return !(debug_log->log_start - debug_log->log_end);
+}
+
+static ssize_t batadv_log_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct batadv_priv *bat_priv = file->private_data;
+ struct batadv_debug_log *debug_log = bat_priv->debug_log;
+ int error, i = 0;
+ char *char_addr;
+ char c;
+
+ if ((file->f_flags & O_NONBLOCK) && batadv_log_empty(debug_log))
+ return -EAGAIN;
+
+ if (!buf)
+ return -EINVAL;
+
+ if (count == 0)
+ return 0;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ error = wait_event_interruptible(debug_log->queue_wait,
+ (!batadv_log_empty(debug_log)));
+
+ if (error)
+ return error;
+
+ spin_lock_bh(&debug_log->lock);
+
+ while ((!error) && (i < count) &&
+ (debug_log->log_start != debug_log->log_end)) {
+ char_addr = batadv_log_char_addr(debug_log,
+ debug_log->log_start);
+ c = *char_addr;
+
+ debug_log->log_start++;
+
+ spin_unlock_bh(&debug_log->lock);
+
+ error = __put_user(c, buf);
+
+ spin_lock_bh(&debug_log->lock);
+
+ buf++;
+ i++;
+
+ }
+
+ spin_unlock_bh(&debug_log->lock);
+
+ if (!error)
+ return i;
+
+ return error;
+}
+
+static unsigned int batadv_log_poll(struct file *file, poll_table *wait)
+{
+ struct batadv_priv *bat_priv = file->private_data;
+ struct batadv_debug_log *debug_log = bat_priv->debug_log;
+
+ poll_wait(file, &debug_log->queue_wait, wait);
+
+ if (!batadv_log_empty(debug_log))
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static const struct file_operations batadv_log_fops = {
+ .open = batadv_log_open,
+ .release = batadv_log_release,
+ .read = batadv_log_read,
+ .poll = batadv_log_poll,
+ .llseek = no_llseek,
+};
+
+static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
+{
+ struct dentry *d;
+
+ if (!bat_priv->debug_dir)
+ goto err;
+
+ bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC);
+ if (!bat_priv->debug_log)
+ goto err;
+
+ spin_lock_init(&bat_priv->debug_log->lock);
+ init_waitqueue_head(&bat_priv->debug_log->queue_wait);
+
+ d = debugfs_create_file("log", S_IFREG | S_IRUSR,
+ bat_priv->debug_dir, bat_priv,
+ &batadv_log_fops);
+ if (!d)
+ goto err;
+
+ return 0;
+
+err:
+ return -ENOMEM;
+}
+
+static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
+{
+ kfree(bat_priv->debug_log);
+ bat_priv->debug_log = NULL;
+}
+#else /* CONFIG_BATMAN_ADV_DEBUG */
+static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
+{
+ bat_priv->debug_log = NULL;
+ return 0;
+}
+
+static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
+{
+ return;
+}
+#endif
+
+static int batadv_algorithms_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, batadv_algo_seq_print_text, NULL);
+}
+
+static int batadv_originators_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, batadv_orig_seq_print_text, net_dev);
+}
+
+static int batadv_gateways_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, batadv_gw_client_seq_print_text, net_dev);
+}
+
+static int batadv_transtable_global_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, batadv_tt_global_seq_print_text, net_dev);
+}
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, batadv_bla_claim_table_seq_print_text,
+ net_dev);
+}
+#endif
+
+static int batadv_transtable_local_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, batadv_tt_local_seq_print_text, net_dev);
+}
+
+static int batadv_vis_data_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, batadv_vis_seq_print_text, net_dev);
+}
+
+struct batadv_debuginfo {
+ struct attribute attr;
+ const struct file_operations fops;
+};
+
+#define BATADV_DEBUGINFO(_name, _mode, _open) \
+struct batadv_debuginfo batadv_debuginfo_##_name = { \
+ .attr = { .name = __stringify(_name), \
+ .mode = _mode, }, \
+ .fops = { .owner = THIS_MODULE, \
+ .open = _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ } \
+};
+
+static BATADV_DEBUGINFO(routing_algos, S_IRUGO, batadv_algorithms_open);
+static BATADV_DEBUGINFO(originators, S_IRUGO, batadv_originators_open);
+static BATADV_DEBUGINFO(gateways, S_IRUGO, batadv_gateways_open);
+static BATADV_DEBUGINFO(transtable_global, S_IRUGO,
+ batadv_transtable_global_open);
+#ifdef CONFIG_BATMAN_ADV_BLA
+static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open);
+#endif
+static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
+ batadv_transtable_local_open);
+static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open);
+
+static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
+ &batadv_debuginfo_originators,
+ &batadv_debuginfo_gateways,
+ &batadv_debuginfo_transtable_global,
+#ifdef CONFIG_BATMAN_ADV_BLA
+ &batadv_debuginfo_bla_claim_table,
+#endif
+ &batadv_debuginfo_transtable_local,
+ &batadv_debuginfo_vis_data,
+ NULL,
+};
+
+void batadv_debugfs_init(void)
+{
+ struct batadv_debuginfo *bat_debug;
+ struct dentry *file;
+
+ batadv_debugfs = debugfs_create_dir(BATADV_DEBUGFS_SUBDIR, NULL);
+ if (batadv_debugfs == ERR_PTR(-ENODEV))
+ batadv_debugfs = NULL;
+
+ if (!batadv_debugfs)
+ goto out;
+
+ bat_debug = &batadv_debuginfo_routing_algos;
+ file = debugfs_create_file(bat_debug->attr.name,
+ S_IFREG | bat_debug->attr.mode,
+ batadv_debugfs, NULL, &bat_debug->fops);
+ if (!file)
+ pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name);
+
+out:
+ return;
+}
+
+void batadv_debugfs_destroy(void)
+{
+ if (batadv_debugfs) {
+ debugfs_remove_recursive(batadv_debugfs);
+ batadv_debugfs = NULL;
+ }
+}
+
+int batadv_debugfs_add_meshif(struct net_device *dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct batadv_debuginfo **bat_debug;
+ struct dentry *file;
+
+ if (!batadv_debugfs)
+ goto out;
+
+ bat_priv->debug_dir = debugfs_create_dir(dev->name, batadv_debugfs);
+ if (!bat_priv->debug_dir)
+ goto out;
+
+ if (batadv_socket_setup(bat_priv) < 0)
+ goto rem_attr;
+
+ if (batadv_debug_log_setup(bat_priv) < 0)
+ goto rem_attr;
+
+ for (bat_debug = batadv_mesh_debuginfos; *bat_debug; ++bat_debug) {
+ file = debugfs_create_file(((*bat_debug)->attr).name,
+ S_IFREG | ((*bat_debug)->attr).mode,
+ bat_priv->debug_dir,
+ dev, &(*bat_debug)->fops);
+ if (!file) {
+ batadv_err(dev, "Can't add debugfs file: %s/%s\n",
+ dev->name, ((*bat_debug)->attr).name);
+ goto rem_attr;
+ }
+ }
+
+ return 0;
+rem_attr:
+ debugfs_remove_recursive(bat_priv->debug_dir);
+ bat_priv->debug_dir = NULL;
+out:
+#ifdef CONFIG_DEBUG_FS
+ return -ENOMEM;
+#else
+ return 0;
+#endif /* CONFIG_DEBUG_FS */
+}
+
+void batadv_debugfs_del_meshif(struct net_device *dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+
+ batadv_debug_log_cleanup(bat_priv);
+
+ if (batadv_debugfs) {
+ debugfs_remove_recursive(bat_priv->debug_dir);
+ bat_priv->debug_dir = NULL;
+ }
+}
diff --git a/net/batman-adv/bat_debugfs.h b/net/batman-adv/debugfs.h
index d605c674642..3319e1f21f5 100644
--- a/net/batman-adv/bat_debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,18 +15,16 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
-
#ifndef _NET_BATMAN_ADV_DEBUGFS_H_
#define _NET_BATMAN_ADV_DEBUGFS_H_
-#define DEBUGFS_BAT_SUBDIR "batman_adv"
+#define BATADV_DEBUGFS_SUBDIR "batman_adv"
-void debugfs_init(void);
-void debugfs_destroy(void);
-int debugfs_add_meshif(struct net_device *dev);
-void debugfs_del_meshif(struct net_device *dev);
+void batadv_debugfs_init(void);
+void batadv_debugfs_destroy(void);
+int batadv_debugfs_add_meshif(struct net_device *dev);
+void batadv_debugfs_del_meshif(struct net_device *dev);
#endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 47f7186dcef..b421cc49d2c 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,11 +15,10 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
-#include "bat_sysfs.h"
+#include "sysfs.h"
#include "gateway_client.h"
#include "gateway_common.h"
#include "hard-interface.h"
@@ -33,19 +31,21 @@
#include <linux/if_vlan.h>
/* This is the offset of the options field in a dhcp packet starting at
- * the beginning of the dhcp header */
-#define DHCP_OPTIONS_OFFSET 240
-#define DHCP_REQUEST 3
+ * the beginning of the dhcp header
+ */
+#define BATADV_DHCP_OPTIONS_OFFSET 240
+#define BATADV_DHCP_REQUEST 3
-static void gw_node_free_ref(struct gw_node *gw_node)
+static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
{
if (atomic_dec_and_test(&gw_node->refcount))
kfree_rcu(gw_node, rcu);
}
-static struct gw_node *gw_get_selected_gw_node(struct bat_priv *bat_priv)
+static struct batadv_gw_node *
+batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
{
- struct gw_node *gw_node;
+ struct batadv_gw_node *gw_node;
rcu_read_lock();
gw_node = rcu_dereference(bat_priv->curr_gw);
@@ -60,12 +60,13 @@ out:
return gw_node;
}
-struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv)
+struct batadv_orig_node *
+batadv_gw_get_selected_orig(struct batadv_priv *bat_priv)
{
- struct gw_node *gw_node;
- struct orig_node *orig_node = NULL;
+ struct batadv_gw_node *gw_node;
+ struct batadv_orig_node *orig_node = NULL;
- gw_node = gw_get_selected_gw_node(bat_priv);
+ gw_node = batadv_gw_get_selected_gw_node(bat_priv);
if (!gw_node)
goto out;
@@ -81,13 +82,14 @@ unlock:
rcu_read_unlock();
out:
if (gw_node)
- gw_node_free_ref(gw_node);
+ batadv_gw_node_free_ref(gw_node);
return orig_node;
}
-static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
+static void batadv_gw_select(struct batadv_priv *bat_priv,
+ struct batadv_gw_node *new_gw_node)
{
- struct gw_node *curr_gw_node;
+ struct batadv_gw_node *curr_gw_node;
spin_lock_bh(&bat_priv->gw_list_lock);
@@ -98,31 +100,34 @@ static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
if (curr_gw_node)
- gw_node_free_ref(curr_gw_node);
+ batadv_gw_node_free_ref(curr_gw_node);
spin_unlock_bh(&bat_priv->gw_list_lock);
}
-void gw_deselect(struct bat_priv *bat_priv)
+void batadv_gw_deselect(struct batadv_priv *bat_priv)
{
atomic_set(&bat_priv->gw_reselect, 1);
}
-static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
+static struct batadv_gw_node *
+batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
{
- struct neigh_node *router;
+ struct batadv_neigh_node *router;
struct hlist_node *node;
- struct gw_node *gw_node, *curr_gw = NULL;
+ struct batadv_gw_node *gw_node, *curr_gw = NULL;
uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
uint8_t max_tq = 0;
int down, up;
+ struct batadv_orig_node *orig_node;
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
if (gw_node->deleted)
continue;
- router = orig_node_get_router(gw_node->orig_node);
+ orig_node = gw_node->orig_node;
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
continue;
@@ -131,35 +136,34 @@ static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
switch (atomic_read(&bat_priv->gw_sel_class)) {
case 1: /* fast connection */
- gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
- &down, &up);
+ batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
+ &down, &up);
tmp_gw_factor = (router->tq_avg * router->tq_avg *
down * 100 * 100) /
- (TQ_LOCAL_WINDOW_SIZE *
- TQ_LOCAL_WINDOW_SIZE * 64);
+ (BATADV_TQ_LOCAL_WINDOW_SIZE *
+ BATADV_TQ_LOCAL_WINDOW_SIZE * 64);
if ((tmp_gw_factor > max_gw_factor) ||
((tmp_gw_factor == max_gw_factor) &&
(router->tq_avg > max_tq))) {
if (curr_gw)
- gw_node_free_ref(curr_gw);
+ batadv_gw_node_free_ref(curr_gw);
curr_gw = gw_node;
atomic_inc(&curr_gw->refcount);
}
break;
- default: /**
- * 2: stable connection (use best statistic)
+ default: /* 2: stable connection (use best statistic)
* 3: fast-switch (use best statistic but change as
* soon as a better gateway appears)
* XX: late-switch (use best statistic but change as
* soon as a better gateway appears which has
* $routing_class more tq points)
- **/
+ */
if (router->tq_avg > max_tq) {
if (curr_gw)
- gw_node_free_ref(curr_gw);
+ batadv_gw_node_free_ref(curr_gw);
curr_gw = gw_node;
atomic_inc(&curr_gw->refcount);
}
@@ -172,37 +176,36 @@ static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
if (tmp_gw_factor > max_gw_factor)
max_gw_factor = tmp_gw_factor;
- gw_node_free_ref(gw_node);
+ batadv_gw_node_free_ref(gw_node);
next:
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
}
rcu_read_unlock();
return curr_gw;
}
-void gw_election(struct bat_priv *bat_priv)
+void batadv_gw_election(struct batadv_priv *bat_priv)
{
- struct gw_node *curr_gw = NULL, *next_gw = NULL;
- struct neigh_node *router = NULL;
+ struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL;
+ struct batadv_neigh_node *router = NULL;
char gw_addr[18] = { '\0' };
- /**
- * The batman daemon checks here if we already passed a full originator
+ /* The batman daemon checks here if we already passed a full originator
* cycle in order to make sure we don't choose the first gateway we
* hear about. This check is based on the daemon's uptime which we
* don't have.
- **/
- if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
+ */
+ if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
goto out;
- if (!atomic_dec_not_zero(&bat_priv->gw_reselect))
+ if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect))
goto out;
- curr_gw = gw_get_selected_gw_node(bat_priv);
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
- next_gw = gw_get_best_gw_node(bat_priv);
+ next_gw = batadv_gw_get_best_gw_node(bat_priv);
if (curr_gw == next_gw)
goto out;
@@ -210,53 +213,57 @@ void gw_election(struct bat_priv *bat_priv)
if (next_gw) {
sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
- router = orig_node_get_router(next_gw->orig_node);
+ router = batadv_orig_node_get_router(next_gw->orig_node);
if (!router) {
- gw_deselect(bat_priv);
+ batadv_gw_deselect(bat_priv);
goto out;
}
}
if ((curr_gw) && (!next_gw)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Removing selected gateway - no gateway in range\n");
- throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Removing selected gateway - no gateway in range\n");
+ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL,
+ NULL);
} else if ((!curr_gw) && (next_gw)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
- next_gw->orig_node->orig, next_gw->orig_node->gw_flags,
- router->tq_avg);
- throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
+ next_gw->orig_node->orig,
+ next_gw->orig_node->gw_flags, router->tq_avg);
+ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD,
+ gw_addr);
} else {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
- next_gw->orig_node->orig, next_gw->orig_node->gw_flags,
- router->tq_avg);
- throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
+ next_gw->orig_node->orig,
+ next_gw->orig_node->gw_flags, router->tq_avg);
+ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE,
+ gw_addr);
}
- gw_select(bat_priv, next_gw);
+ batadv_gw_select(bat_priv, next_gw);
out:
if (curr_gw)
- gw_node_free_ref(curr_gw);
+ batadv_gw_node_free_ref(curr_gw);
if (next_gw)
- gw_node_free_ref(next_gw);
+ batadv_gw_node_free_ref(next_gw);
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
}
-void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
+void batadv_gw_check_election(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
{
- struct orig_node *curr_gw_orig;
- struct neigh_node *router_gw = NULL, *router_orig = NULL;
+ struct batadv_orig_node *curr_gw_orig;
+ struct batadv_neigh_node *router_gw = NULL, *router_orig = NULL;
uint8_t gw_tq_avg, orig_tq_avg;
- curr_gw_orig = gw_get_selected_orig(bat_priv);
+ curr_gw_orig = batadv_gw_get_selected_orig(bat_priv);
if (!curr_gw_orig)
goto deselect;
- router_gw = orig_node_get_router(curr_gw_orig);
+ router_gw = batadv_orig_node_get_router(curr_gw_orig);
if (!router_gw)
goto deselect;
@@ -264,7 +271,7 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
if (curr_gw_orig == orig_node)
goto out;
- router_orig = orig_node_get_router(orig_node);
+ router_orig = batadv_orig_node_get_router(orig_node);
if (!router_orig)
goto out;
@@ -275,35 +282,35 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
if (orig_tq_avg < gw_tq_avg)
goto out;
- /**
- * if the routing class is greater than 3 the value tells us how much
+ /* if the routing class is greater than 3 the value tells us how much
* greater the TQ value of the new gateway must be
- **/
+ */
if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
(orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
goto out;
- bat_dbg(DBG_BATMAN, bat_priv,
- "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
- gw_tq_avg, orig_tq_avg);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
+ gw_tq_avg, orig_tq_avg);
deselect:
- gw_deselect(bat_priv);
+ batadv_gw_deselect(bat_priv);
out:
if (curr_gw_orig)
- orig_node_free_ref(curr_gw_orig);
+ batadv_orig_node_free_ref(curr_gw_orig);
if (router_gw)
- neigh_node_free_ref(router_gw);
+ batadv_neigh_node_free_ref(router_gw);
if (router_orig)
- neigh_node_free_ref(router_orig);
+ batadv_neigh_node_free_ref(router_orig);
return;
}
-static void gw_node_add(struct bat_priv *bat_priv,
- struct orig_node *orig_node, uint8_t new_gwflags)
+static void batadv_gw_node_add(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ uint8_t new_gwflags)
{
- struct gw_node *gw_node;
+ struct batadv_gw_node *gw_node;
int down, up;
gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
@@ -318,47 +325,47 @@ static void gw_node_add(struct bat_priv *bat_priv,
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
spin_unlock_bh(&bat_priv->gw_list_lock);
- gw_bandwidth_to_kbit(new_gwflags, &down, &up);
- bat_dbg(DBG_BATMAN, bat_priv,
- "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
- orig_node->orig, new_gwflags,
- (down > 2048 ? down / 1024 : down),
- (down > 2048 ? "MBit" : "KBit"),
- (up > 2048 ? up / 1024 : up),
- (up > 2048 ? "MBit" : "KBit"));
+ batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
+ orig_node->orig, new_gwflags,
+ (down > 2048 ? down / 1024 : down),
+ (down > 2048 ? "MBit" : "KBit"),
+ (up > 2048 ? up / 1024 : up),
+ (up > 2048 ? "MBit" : "KBit"));
}
-void gw_node_update(struct bat_priv *bat_priv,
- struct orig_node *orig_node, uint8_t new_gwflags)
+void batadv_gw_node_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ uint8_t new_gwflags)
{
struct hlist_node *node;
- struct gw_node *gw_node, *curr_gw;
+ struct batadv_gw_node *gw_node, *curr_gw;
- /**
- * Note: We don't need a NULL check here, since curr_gw never gets
+ /* Note: We don't need a NULL check here, since curr_gw never gets
* dereferenced. If curr_gw is NULL we also should not exit as we may
* have this gateway in our list (duplication check!) even though we
* have no currently selected gateway.
*/
- curr_gw = gw_get_selected_gw_node(bat_priv);
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
if (gw_node->orig_node != orig_node)
continue;
- bat_dbg(DBG_BATMAN, bat_priv,
- "Gateway class of originator %pM changed from %i to %i\n",
- orig_node->orig, gw_node->orig_node->gw_flags,
- new_gwflags);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Gateway class of originator %pM changed from %i to %i\n",
+ orig_node->orig, gw_node->orig_node->gw_flags,
+ new_gwflags);
gw_node->deleted = 0;
- if (new_gwflags == NO_FLAGS) {
+ if (new_gwflags == BATADV_NO_FLAGS) {
gw_node->deleted = jiffies;
- bat_dbg(DBG_BATMAN, bat_priv,
- "Gateway %pM removed from gateway list\n",
- orig_node->orig);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Gateway %pM removed from gateway list\n",
+ orig_node->orig);
if (gw_node == curr_gw)
goto deselect;
@@ -367,34 +374,35 @@ void gw_node_update(struct bat_priv *bat_priv,
goto unlock;
}
- if (new_gwflags == NO_FLAGS)
+ if (new_gwflags == BATADV_NO_FLAGS)
goto unlock;
- gw_node_add(bat_priv, orig_node, new_gwflags);
+ batadv_gw_node_add(bat_priv, orig_node, new_gwflags);
goto unlock;
deselect:
- gw_deselect(bat_priv);
+ batadv_gw_deselect(bat_priv);
unlock:
rcu_read_unlock();
if (curr_gw)
- gw_node_free_ref(curr_gw);
+ batadv_gw_node_free_ref(curr_gw);
}
-void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
+void batadv_gw_node_delete(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
{
- gw_node_update(bat_priv, orig_node, 0);
+ batadv_gw_node_update(bat_priv, orig_node, 0);
}
-void gw_node_purge(struct bat_priv *bat_priv)
+void batadv_gw_node_purge(struct batadv_priv *bat_priv)
{
- struct gw_node *gw_node, *curr_gw;
+ struct batadv_gw_node *gw_node, *curr_gw;
struct hlist_node *node, *node_tmp;
- unsigned long timeout = msecs_to_jiffies(2 * PURGE_TIMEOUT);
+ unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT);
int do_deselect = 0;
- curr_gw = gw_get_selected_gw_node(bat_priv);
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
spin_lock_bh(&bat_priv->gw_list_lock);
@@ -402,43 +410,42 @@ void gw_node_purge(struct bat_priv *bat_priv)
&bat_priv->gw_list, list) {
if (((!gw_node->deleted) ||
(time_before(jiffies, gw_node->deleted + timeout))) &&
- atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
+ atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
continue;
if (curr_gw == gw_node)
do_deselect = 1;
hlist_del_rcu(&gw_node->list);
- gw_node_free_ref(gw_node);
+ batadv_gw_node_free_ref(gw_node);
}
spin_unlock_bh(&bat_priv->gw_list_lock);
/* gw_deselect() needs to acquire the gw_list_lock */
if (do_deselect)
- gw_deselect(bat_priv);
+ batadv_gw_deselect(bat_priv);
if (curr_gw)
- gw_node_free_ref(curr_gw);
+ batadv_gw_node_free_ref(curr_gw);
}
-/**
- * fails if orig_node has no router
- */
-static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq,
- const struct gw_node *gw_node)
+/* fails if orig_node has no router */
+static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
+ struct seq_file *seq,
+ const struct batadv_gw_node *gw_node)
{
- struct gw_node *curr_gw;
- struct neigh_node *router;
+ struct batadv_gw_node *curr_gw;
+ struct batadv_neigh_node *router;
int down, up, ret = -1;
- gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
+ batadv_gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
- router = orig_node_get_router(gw_node->orig_node);
+ router = batadv_orig_node_get_router(gw_node->orig_node);
if (!router)
goto out;
- curr_gw = gw_get_selected_gw_node(bat_priv);
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
(curr_gw == gw_node ? "=>" : " "),
@@ -451,23 +458,23 @@ static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq,
(up > 2048 ? up / 1024 : up),
(up > 2048 ? "MBit" : "KBit"));
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
if (curr_gw)
- gw_node_free_ref(curr_gw);
+ batadv_gw_node_free_ref(curr_gw);
out:
return ret;
}
-int gw_client_seq_print_text(struct seq_file *seq, void *offset)
+int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct hard_iface *primary_if;
- struct gw_node *gw_node;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hard_iface *primary_if;
+ struct batadv_gw_node *gw_node;
struct hlist_node *node;
int gw_count = 0, ret = 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -475,7 +482,7 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
goto out;
}
- if (primary_if->if_status != IF_ACTIVE) {
+ if (primary_if->if_status != BATADV_IF_ACTIVE) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
@@ -484,8 +491,8 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq,
" %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
- "Gateway", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF",
- SOURCE_VERSION, primary_if->net_dev->name,
+ "Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF",
+ BATADV_SOURCE_VERSION, primary_if->net_dev->name,
primary_if->net_dev->dev_addr, net_dev->name);
rcu_read_lock();
@@ -494,7 +501,7 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
continue;
/* fails if orig_node has no router */
- if (_write_buffer_text(bat_priv, seq, gw_node) < 0)
+ if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0)
continue;
gw_count++;
@@ -506,11 +513,11 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return ret;
}
-static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
+static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
{
int ret = false;
unsigned char *p;
@@ -521,27 +528,29 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
pkt_len = skb_headlen(skb);
- if (pkt_len < header_len + DHCP_OPTIONS_OFFSET + 1)
+ if (pkt_len < header_len + BATADV_DHCP_OPTIONS_OFFSET + 1)
goto out;
- p = skb->data + header_len + DHCP_OPTIONS_OFFSET;
- pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1;
+ p = skb->data + header_len + BATADV_DHCP_OPTIONS_OFFSET;
+ pkt_len -= header_len + BATADV_DHCP_OPTIONS_OFFSET + 1;
/* Access the dhcp option lists. Each entry is made up by:
* - octet 1: option type
* - octet 2: option data len (only if type != 255 and 0)
- * - octet 3: option data */
+ * - octet 3: option data
+ */
while (*p != 255 && !ret) {
/* p now points to the first octet: option type */
if (*p == 53) {
/* type 53 is the message type option.
- * Jump the len octet and go to the data octet */
+ * Jump the len octet and go to the data octet
+ */
if (pkt_len < 2)
goto out;
p += 2;
/* check if the message type is what we need */
- if (*p == DHCP_REQUEST)
+ if (*p == BATADV_DHCP_REQUEST)
ret = true;
break;
} else if (*p == 0) {
@@ -568,7 +577,7 @@ out:
return ret;
}
-bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
+bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
{
struct ethhdr *ethhdr;
struct iphdr *iphdr;
@@ -634,40 +643,41 @@ bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
return true;
}
-bool gw_out_of_range(struct bat_priv *bat_priv,
- struct sk_buff *skb, struct ethhdr *ethhdr)
+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, struct ethhdr *ethhdr)
{
- struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
- struct orig_node *orig_dst_node = NULL;
- struct gw_node *curr_gw = NULL;
+ struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
+ struct batadv_orig_node *orig_dst_node = NULL;
+ struct batadv_gw_node *curr_gw = NULL;
bool ret, out_of_range = false;
unsigned int header_len = 0;
uint8_t curr_tq_avg;
- ret = gw_is_dhcp_target(skb, &header_len);
+ ret = batadv_gw_is_dhcp_target(skb, &header_len);
if (!ret)
goto out;
- orig_dst_node = transtable_search(bat_priv, ethhdr->h_source,
- ethhdr->h_dest);
+ orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
+ ethhdr->h_dest);
if (!orig_dst_node)
goto out;
if (!orig_dst_node->gw_flags)
goto out;
- ret = is_type_dhcprequest(skb, header_len);
+ ret = batadv_is_type_dhcprequest(skb, header_len);
if (!ret)
goto out;
switch (atomic_read(&bat_priv->gw_mode)) {
- case GW_MODE_SERVER:
+ case BATADV_GW_MODE_SERVER:
/* If we are a GW then we are our best GW. We can artificially
- * set the tq towards ourself as the maximum value */
- curr_tq_avg = TQ_MAX_VALUE;
+ * set the tq towards ourself as the maximum value
+ */
+ curr_tq_avg = BATADV_TQ_MAX_VALUE;
break;
- case GW_MODE_CLIENT:
- curr_gw = gw_get_selected_gw_node(bat_priv);
+ case BATADV_GW_MODE_CLIENT:
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
if (!curr_gw)
goto out;
@@ -677,33 +687,35 @@ bool gw_out_of_range(struct bat_priv *bat_priv,
/* If the dhcp packet has been sent to a different gw,
* we have to evaluate whether the old gw is still
- * reliable enough */
- neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL);
+ * reliable enough
+ */
+ neigh_curr = batadv_find_router(bat_priv, curr_gw->orig_node,
+ NULL);
if (!neigh_curr)
goto out;
curr_tq_avg = neigh_curr->tq_avg;
break;
- case GW_MODE_OFF:
+ case BATADV_GW_MODE_OFF:
default:
goto out;
}
- neigh_old = find_router(bat_priv, orig_dst_node, NULL);
+ neigh_old = batadv_find_router(bat_priv, orig_dst_node, NULL);
if (!neigh_old)
goto out;
- if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD)
+ if (curr_tq_avg - neigh_old->tq_avg > BATADV_GW_THRESHOLD)
out_of_range = true;
out:
if (orig_dst_node)
- orig_node_free_ref(orig_dst_node);
+ batadv_orig_node_free_ref(orig_dst_node);
if (curr_gw)
- gw_node_free_ref(curr_gw);
+ batadv_gw_node_free_ref(curr_gw);
if (neigh_old)
- neigh_node_free_ref(neigh_old);
+ batadv_neigh_node_free_ref(neigh_old);
if (neigh_curr)
- neigh_node_free_ref(neigh_curr);
+ batadv_neigh_node_free_ref(neigh_curr);
return out_of_range;
}
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index bf56a5aea10..f0d129e323c 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,23 +15,26 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
#define _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
-void gw_deselect(struct bat_priv *bat_priv);
-void gw_election(struct bat_priv *bat_priv);
-struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv);
-void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node);
-void gw_node_update(struct bat_priv *bat_priv,
- struct orig_node *orig_node, uint8_t new_gwflags);
-void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node);
-void gw_node_purge(struct bat_priv *bat_priv);
-int gw_client_seq_print_text(struct seq_file *seq, void *offset);
-bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
-bool gw_out_of_range(struct bat_priv *bat_priv,
- struct sk_buff *skb, struct ethhdr *ethhdr);
+void batadv_gw_deselect(struct batadv_priv *bat_priv);
+void batadv_gw_election(struct batadv_priv *bat_priv);
+struct batadv_orig_node *
+batadv_gw_get_selected_orig(struct batadv_priv *bat_priv);
+void batadv_gw_check_election(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node);
+void batadv_gw_node_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ uint8_t new_gwflags);
+void batadv_gw_node_delete(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node);
+void batadv_gw_node_purge(struct batadv_priv *bat_priv);
+int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
+bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, struct ethhdr *ethhdr);
#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index ca57ac7d73b..9001208d175 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -24,7 +22,7 @@
#include "gateway_client.h"
/* calculates the gateway class from kbit */
-static void kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
+static void batadv_kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
{
int mdown = 0, tdown, tup, difference;
uint8_t sbit, part;
@@ -59,7 +57,7 @@ static void kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
}
/* returns the up and downspeeds in kbit, calculated from the class */
-void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
+void batadv_gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
{
int sbit = (gw_srv_class & 0x80) >> 7;
int dpart = (gw_srv_class & 0x78) >> 3;
@@ -75,8 +73,8 @@ void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
*up = ((upart + 1) * (*down)) / 8;
}
-static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
- int *up, int *down)
+static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
+ int *up, int *down)
{
int ret, multi = 1;
char *slash_ptr, *tmp_ptr;
@@ -99,9 +97,9 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
ret = kstrtol(buff, 10, &ldown);
if (ret) {
- bat_err(net_dev,
- "Download speed of gateway mode invalid: %s\n",
- buff);
+ batadv_err(net_dev,
+ "Download speed of gateway mode invalid: %s\n",
+ buff);
return false;
}
@@ -124,9 +122,9 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
ret = kstrtol(slash_ptr + 1, 10, &lup);
if (ret) {
- bat_err(net_dev,
- "Upload speed of gateway mode invalid: %s\n",
- slash_ptr + 1);
+ batadv_err(net_dev,
+ "Upload speed of gateway mode invalid: %s\n",
+ slash_ptr + 1);
return false;
}
@@ -136,14 +134,15 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
return true;
}
-ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count)
+ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
+ size_t count)
{
- struct bat_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
long gw_bandwidth_tmp = 0;
int up = 0, down = 0;
bool ret;
- ret = parse_gw_bandwidth(net_dev, buff, &up, &down);
+ ret = batadv_parse_gw_bandwidth(net_dev, buff, &up, &down);
if (!ret)
goto end;
@@ -153,23 +152,25 @@ ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count)
if (!up)
up = down / 5;
- kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp);
+ batadv_kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp);
- /**
- * the gw bandwidth we guessed above might not match the given
+ /* the gw bandwidth we guessed above might not match the given
* speeds, hence we need to calculate it back to show the number
* that is going to be propagated
- **/
- gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
-
- gw_deselect(bat_priv);
- bat_info(net_dev,
- "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n",
- atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
- (down > 2048 ? down / 1024 : down),
- (down > 2048 ? "MBit" : "KBit"),
- (up > 2048 ? up / 1024 : up),
- (up > 2048 ? "MBit" : "KBit"));
+ */
+ batadv_gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
+
+ if (atomic_read(&bat_priv->gw_bandwidth) == gw_bandwidth_tmp)
+ return count;
+
+ batadv_gw_deselect(bat_priv);
+ batadv_info(net_dev,
+ "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n",
+ atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
+ (down > 2048 ? down / 1024 : down),
+ (down > 2048 ? "MBit" : "KBit"),
+ (up > 2048 ? up / 1024 : up),
+ (up > 2048 ? "MBit" : "KBit"));
atomic_set(&bat_priv->gw_bandwidth, gw_bandwidth_tmp);
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index b8fb11c4f92..13697f6e711 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,23 +15,23 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_GATEWAY_COMMON_H_
#define _NET_BATMAN_ADV_GATEWAY_COMMON_H_
-enum gw_modes {
- GW_MODE_OFF,
- GW_MODE_CLIENT,
- GW_MODE_SERVER,
+enum batadv_gw_modes {
+ BATADV_GW_MODE_OFF,
+ BATADV_GW_MODE_CLIENT,
+ BATADV_GW_MODE_SERVER,
};
-#define GW_MODE_OFF_NAME "off"
-#define GW_MODE_CLIENT_NAME "client"
-#define GW_MODE_SERVER_NAME "server"
+#define BATADV_GW_MODE_OFF_NAME "off"
+#define BATADV_GW_MODE_CLIENT_NAME "client"
+#define BATADV_GW_MODE_SERVER_NAME "server"
-void gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up);
-ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count);
+void batadv_gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up);
+ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
+ size_t count);
#endif /* _NET_BATMAN_ADV_GATEWAY_COMMON_H_ */
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index dc334fa8984..282bf6e9353 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -25,28 +23,29 @@
#include "send.h"
#include "translation-table.h"
#include "routing.h"
-#include "bat_sysfs.h"
+#include "sysfs.h"
#include "originator.h"
#include "hash.h"
#include "bridge_loop_avoidance.h"
#include <linux/if_arp.h>
-void hardif_free_rcu(struct rcu_head *rcu)
+void batadv_hardif_free_rcu(struct rcu_head *rcu)
{
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
- hard_iface = container_of(rcu, struct hard_iface, rcu);
+ hard_iface = container_of(rcu, struct batadv_hard_iface, rcu);
dev_put(hard_iface->net_dev);
kfree(hard_iface);
}
-struct hard_iface *hardif_get_by_netdev(const struct net_device *net_dev)
+struct batadv_hard_iface *
+batadv_hardif_get_by_netdev(const struct net_device *net_dev)
{
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->net_dev == net_dev &&
atomic_inc_not_zero(&hard_iface->refcount))
goto out;
@@ -59,7 +58,7 @@ out:
return hard_iface;
}
-static int is_valid_iface(const struct net_device *net_dev)
+static int batadv_is_valid_iface(const struct net_device *net_dev)
{
if (net_dev->flags & IFF_LOOPBACK)
return 0;
@@ -71,26 +70,23 @@ static int is_valid_iface(const struct net_device *net_dev)
return 0;
/* no batman over batman */
- if (softif_is_valid(net_dev))
+ if (batadv_softif_is_valid(net_dev))
return 0;
- /* Device is being bridged */
- /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
- return 0; */
-
return 1;
}
-static struct hard_iface *hardif_get_active(const struct net_device *soft_iface)
+static struct batadv_hard_iface *
+batadv_hardif_get_active(const struct net_device *soft_iface)
{
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface != soft_iface)
continue;
- if (hard_iface->if_status == IF_ACTIVE &&
+ if (hard_iface->if_status == BATADV_IF_ACTIVE &&
atomic_inc_not_zero(&hard_iface->refcount))
goto out;
}
@@ -102,32 +98,32 @@ out:
return hard_iface;
}
-static void primary_if_update_addr(struct bat_priv *bat_priv,
- struct hard_iface *oldif)
+static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *oldif)
{
- struct vis_packet *vis_packet;
- struct hard_iface *primary_if;
+ struct batadv_vis_packet *vis_packet;
+ struct batadv_hard_iface *primary_if;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
- vis_packet = (struct vis_packet *)
+ vis_packet = (struct batadv_vis_packet *)
bat_priv->my_vis_info->skb_packet->data;
memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(vis_packet->sender_orig,
primary_if->net_dev->dev_addr, ETH_ALEN);
- bla_update_orig_address(bat_priv, primary_if, oldif);
+ batadv_bla_update_orig_address(bat_priv, primary_if, oldif);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
-static void primary_if_select(struct bat_priv *bat_priv,
- struct hard_iface *new_hard_iface)
+static void batadv_primary_if_select(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *new_hard_iface)
{
- struct hard_iface *curr_hard_iface;
+ struct batadv_hard_iface *curr_hard_iface;
ASSERT_RTNL();
@@ -141,14 +137,15 @@ static void primary_if_select(struct bat_priv *bat_priv,
goto out;
bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface);
- primary_if_update_addr(bat_priv, curr_hard_iface);
+ batadv_primary_if_update_addr(bat_priv, curr_hard_iface);
out:
if (curr_hard_iface)
- hardif_free_ref(curr_hard_iface);
+ batadv_hardif_free_ref(curr_hard_iface);
}
-static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
+static bool
+batadv_hardif_is_iface_up(const struct batadv_hard_iface *hard_iface)
{
if (hard_iface->net_dev->flags & IFF_UP)
return true;
@@ -156,21 +153,21 @@ static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
return false;
}
-static void check_known_mac_addr(const struct net_device *net_dev)
+static void batadv_check_known_mac_addr(const struct net_device *net_dev)
{
- const struct hard_iface *hard_iface;
+ const struct batadv_hard_iface *hard_iface;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
- if ((hard_iface->if_status != IF_ACTIVE) &&
- (hard_iface->if_status != IF_TO_BE_ACTIVATED))
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
+ (hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED))
continue;
if (hard_iface->net_dev == net_dev)
continue;
- if (!compare_eth(hard_iface->net_dev->dev_addr,
- net_dev->dev_addr))
+ if (!batadv_compare_eth(hard_iface->net_dev->dev_addr,
+ net_dev->dev_addr))
continue;
pr_warn("The newly added mac address (%pM) already exists on: %s\n",
@@ -180,27 +177,29 @@ static void check_known_mac_addr(const struct net_device *net_dev)
rcu_read_unlock();
}
-int hardif_min_mtu(struct net_device *soft_iface)
+int batadv_hardif_min_mtu(struct net_device *soft_iface)
{
- const struct bat_priv *bat_priv = netdev_priv(soft_iface);
- const struct hard_iface *hard_iface;
+ const struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ const struct batadv_hard_iface *hard_iface;
/* allow big frames if all devices are capable to do so
- * (have MTU > 1500 + BAT_HEADER_LEN) */
+ * (have MTU > 1500 + BAT_HEADER_LEN)
+ */
int min_mtu = ETH_DATA_LEN;
if (atomic_read(&bat_priv->fragmentation))
goto out;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
- if ((hard_iface->if_status != IF_ACTIVE) &&
- (hard_iface->if_status != IF_TO_BE_ACTIVATED))
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
+ (hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED))
continue;
if (hard_iface->soft_iface != soft_iface)
continue;
- min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN,
+ min_mtu = min_t(int,
+ hard_iface->net_dev->mtu - BATADV_HEADER_LEN,
min_mtu);
}
rcu_read_unlock();
@@ -209,68 +208,70 @@ out:
}
/* adjusts the MTU if a new interface with a smaller MTU appeared. */
-void update_min_mtu(struct net_device *soft_iface)
+void batadv_update_min_mtu(struct net_device *soft_iface)
{
int min_mtu;
- min_mtu = hardif_min_mtu(soft_iface);
+ min_mtu = batadv_hardif_min_mtu(soft_iface);
if (soft_iface->mtu != min_mtu)
soft_iface->mtu = min_mtu;
}
-static void hardif_activate_interface(struct hard_iface *hard_iface)
+static void
+batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
{
- struct bat_priv *bat_priv;
- struct hard_iface *primary_if = NULL;
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
- if (hard_iface->if_status != IF_INACTIVE)
+ if (hard_iface->if_status != BATADV_IF_INACTIVE)
goto out;
bat_priv = netdev_priv(hard_iface->soft_iface);
bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
- hard_iface->if_status = IF_TO_BE_ACTIVATED;
+ hard_iface->if_status = BATADV_IF_TO_BE_ACTIVATED;
- /**
- * the first active interface becomes our primary interface or
+ /* the first active interface becomes our primary interface or
* the next active interface after the old primary interface was removed
*/
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
- primary_if_select(bat_priv, hard_iface);
+ batadv_primary_if_select(bat_priv, hard_iface);
- bat_info(hard_iface->soft_iface, "Interface activated: %s\n",
- hard_iface->net_dev->name);
+ batadv_info(hard_iface->soft_iface, "Interface activated: %s\n",
+ hard_iface->net_dev->name);
- update_min_mtu(hard_iface->soft_iface);
+ batadv_update_min_mtu(hard_iface->soft_iface);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
-static void hardif_deactivate_interface(struct hard_iface *hard_iface)
+static void
+batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
{
- if ((hard_iface->if_status != IF_ACTIVE) &&
- (hard_iface->if_status != IF_TO_BE_ACTIVATED))
+ if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
+ (hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED))
return;
- hard_iface->if_status = IF_INACTIVE;
+ hard_iface->if_status = BATADV_IF_INACTIVE;
- bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
- hard_iface->net_dev->name);
+ batadv_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
+ hard_iface->net_dev->name);
- update_min_mtu(hard_iface->soft_iface);
+ batadv_update_min_mtu(hard_iface->soft_iface);
}
-int hardif_enable_interface(struct hard_iface *hard_iface,
- const char *iface_name)
+int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
+ const char *iface_name)
{
- struct bat_priv *bat_priv;
+ struct batadv_priv *bat_priv;
struct net_device *soft_iface;
+ __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
int ret;
- if (hard_iface->if_status != IF_NOT_IN_USE)
+ if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
goto out;
if (!atomic_inc_not_zero(&hard_iface->refcount))
@@ -284,7 +285,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
soft_iface = dev_get_by_name(&init_net, iface_name);
if (!soft_iface) {
- soft_iface = softif_create(iface_name);
+ soft_iface = batadv_softif_create(iface_name);
if (!soft_iface) {
ret = -ENOMEM;
@@ -295,7 +296,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
dev_hold(soft_iface);
}
- if (!softif_is_valid(soft_iface)) {
+ if (!batadv_softif_is_valid(soft_iface)) {
pr_err("Can't create batman mesh interface %s: already exists as regular interface\n",
soft_iface->name);
ret = -EINVAL;
@@ -306,48 +307,46 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
bat_priv = netdev_priv(hard_iface->soft_iface);
ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface);
- if (ret < 0) {
- ret = -ENOMEM;
+ if (ret < 0)
goto err_dev;
- }
hard_iface->if_num = bat_priv->num_ifaces;
bat_priv->num_ifaces++;
- hard_iface->if_status = IF_INACTIVE;
- orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
+ hard_iface->if_status = BATADV_IF_INACTIVE;
+ batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
- hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
- hard_iface->batman_adv_ptype.func = batman_skb_recv;
+ hard_iface->batman_adv_ptype.type = ethertype;
+ hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
dev_add_pack(&hard_iface->batman_adv_ptype);
atomic_set(&hard_iface->frag_seqno, 1);
- bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
- hard_iface->net_dev->name);
-
- if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
- ETH_DATA_LEN + BAT_HEADER_LEN)
- bat_info(hard_iface->soft_iface,
- "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
- hard_iface->net_dev->name, hard_iface->net_dev->mtu,
- ETH_DATA_LEN + BAT_HEADER_LEN);
-
- if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
- ETH_DATA_LEN + BAT_HEADER_LEN)
- bat_info(hard_iface->soft_iface,
- "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
- hard_iface->net_dev->name, hard_iface->net_dev->mtu,
- ETH_DATA_LEN + BAT_HEADER_LEN);
-
- if (hardif_is_iface_up(hard_iface))
- hardif_activate_interface(hard_iface);
+ batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
+ hard_iface->net_dev->name);
+
+ if (atomic_read(&bat_priv->fragmentation) &&
+ hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
+ batadv_info(hard_iface->soft_iface,
+ "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
+ hard_iface->net_dev->name, hard_iface->net_dev->mtu,
+ ETH_DATA_LEN + BATADV_HEADER_LEN);
+
+ if (!atomic_read(&bat_priv->fragmentation) &&
+ hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
+ batadv_info(hard_iface->soft_iface,
+ "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
+ hard_iface->net_dev->name, hard_iface->net_dev->mtu,
+ ETH_DATA_LEN + BATADV_HEADER_LEN);
+
+ if (batadv_hardif_is_iface_up(hard_iface))
+ batadv_hardif_activate_interface(hard_iface);
else
- bat_err(hard_iface->soft_iface,
- "Not using interface %s (retrying later): interface not active\n",
- hard_iface->net_dev->name);
+ batadv_err(hard_iface->soft_iface,
+ "Not using interface %s (retrying later): interface not active\n",
+ hard_iface->net_dev->name);
/* begin scheduling originator messages on that interface */
- schedule_bat_ogm(hard_iface);
+ batadv_schedule_bat_ogm(hard_iface);
out:
return 0;
@@ -355,67 +354,68 @@ out:
err_dev:
dev_put(soft_iface);
err:
- hardif_free_ref(hard_iface);
+ batadv_hardif_free_ref(hard_iface);
return ret;
}
-void hardif_disable_interface(struct hard_iface *hard_iface)
+void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct hard_iface *primary_if = NULL;
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hard_iface *primary_if = NULL;
- if (hard_iface->if_status == IF_ACTIVE)
- hardif_deactivate_interface(hard_iface);
+ if (hard_iface->if_status == BATADV_IF_ACTIVE)
+ batadv_hardif_deactivate_interface(hard_iface);
- if (hard_iface->if_status != IF_INACTIVE)
+ if (hard_iface->if_status != BATADV_IF_INACTIVE)
goto out;
- bat_info(hard_iface->soft_iface, "Removing interface: %s\n",
- hard_iface->net_dev->name);
+ batadv_info(hard_iface->soft_iface, "Removing interface: %s\n",
+ hard_iface->net_dev->name);
dev_remove_pack(&hard_iface->batman_adv_ptype);
bat_priv->num_ifaces--;
- orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
+ batadv_orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (hard_iface == primary_if) {
- struct hard_iface *new_if;
+ struct batadv_hard_iface *new_if;
- new_if = hardif_get_active(hard_iface->soft_iface);
- primary_if_select(bat_priv, new_if);
+ new_if = batadv_hardif_get_active(hard_iface->soft_iface);
+ batadv_primary_if_select(bat_priv, new_if);
if (new_if)
- hardif_free_ref(new_if);
+ batadv_hardif_free_ref(new_if);
}
bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
- hard_iface->if_status = IF_NOT_IN_USE;
+ hard_iface->if_status = BATADV_IF_NOT_IN_USE;
/* delete all references to this hard_iface */
- purge_orig_ref(bat_priv);
- purge_outstanding_packets(bat_priv, hard_iface);
+ batadv_purge_orig_ref(bat_priv);
+ batadv_purge_outstanding_packets(bat_priv, hard_iface);
dev_put(hard_iface->soft_iface);
/* nobody uses this interface anymore */
if (!bat_priv->num_ifaces)
- softif_destroy(hard_iface->soft_iface);
+ batadv_softif_destroy(hard_iface->soft_iface);
hard_iface->soft_iface = NULL;
- hardif_free_ref(hard_iface);
+ batadv_hardif_free_ref(hard_iface);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
-static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
+static struct batadv_hard_iface *
+batadv_hardif_add_interface(struct net_device *net_dev)
{
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
int ret;
ASSERT_RTNL();
- ret = is_valid_iface(net_dev);
+ ret = batadv_is_valid_iface(net_dev);
if (ret != 1)
goto out;
@@ -425,23 +425,22 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
if (!hard_iface)
goto release_dev;
- ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
+ ret = batadv_sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
if (ret)
goto free_if;
hard_iface->if_num = -1;
hard_iface->net_dev = net_dev;
hard_iface->soft_iface = NULL;
- hard_iface->if_status = IF_NOT_IN_USE;
+ hard_iface->if_status = BATADV_IF_NOT_IN_USE;
INIT_LIST_HEAD(&hard_iface->list);
/* extra reference for return */
atomic_set(&hard_iface->refcount, 2);
- check_known_mac_addr(hard_iface->net_dev);
- list_add_tail_rcu(&hard_iface->list, &hardif_list);
+ batadv_check_known_mac_addr(hard_iface->net_dev);
+ list_add_tail_rcu(&hard_iface->list, &batadv_hardif_list);
- /**
- * This can't be called via a bat_priv callback because
+ /* This can't be called via a bat_priv callback because
* we have no bat_priv yet.
*/
atomic_set(&hard_iface->seqno, 1);
@@ -457,102 +456,104 @@ out:
return NULL;
}
-static void hardif_remove_interface(struct hard_iface *hard_iface)
+static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
{
ASSERT_RTNL();
/* first deactivate interface */
- if (hard_iface->if_status != IF_NOT_IN_USE)
- hardif_disable_interface(hard_iface);
+ if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
+ batadv_hardif_disable_interface(hard_iface);
- if (hard_iface->if_status != IF_NOT_IN_USE)
+ if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
return;
- hard_iface->if_status = IF_TO_BE_REMOVED;
- sysfs_del_hardif(&hard_iface->hardif_obj);
- hardif_free_ref(hard_iface);
+ hard_iface->if_status = BATADV_IF_TO_BE_REMOVED;
+ batadv_sysfs_del_hardif(&hard_iface->hardif_obj);
+ batadv_hardif_free_ref(hard_iface);
}
-void hardif_remove_interfaces(void)
+void batadv_hardif_remove_interfaces(void)
{
- struct hard_iface *hard_iface, *hard_iface_tmp;
+ struct batadv_hard_iface *hard_iface, *hard_iface_tmp;
rtnl_lock();
list_for_each_entry_safe(hard_iface, hard_iface_tmp,
- &hardif_list, list) {
+ &batadv_hardif_list, list) {
list_del_rcu(&hard_iface->list);
- hardif_remove_interface(hard_iface);
+ batadv_hardif_remove_interface(hard_iface);
}
rtnl_unlock();
}
-static int hard_if_event(struct notifier_block *this,
- unsigned long event, void *ptr)
+static int batadv_hard_if_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
{
struct net_device *net_dev = ptr;
- struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
- struct hard_iface *primary_if = NULL;
- struct bat_priv *bat_priv;
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_priv *bat_priv;
+ hard_iface = batadv_hardif_get_by_netdev(net_dev);
if (!hard_iface && event == NETDEV_REGISTER)
- hard_iface = hardif_add_interface(net_dev);
+ hard_iface = batadv_hardif_add_interface(net_dev);
if (!hard_iface)
goto out;
switch (event) {
case NETDEV_UP:
- hardif_activate_interface(hard_iface);
+ batadv_hardif_activate_interface(hard_iface);
break;
case NETDEV_GOING_DOWN:
case NETDEV_DOWN:
- hardif_deactivate_interface(hard_iface);
+ batadv_hardif_deactivate_interface(hard_iface);
break;
case NETDEV_UNREGISTER:
list_del_rcu(&hard_iface->list);
- hardif_remove_interface(hard_iface);
+ batadv_hardif_remove_interface(hard_iface);
break;
case NETDEV_CHANGEMTU:
if (hard_iface->soft_iface)
- update_min_mtu(hard_iface->soft_iface);
+ batadv_update_min_mtu(hard_iface->soft_iface);
break;
case NETDEV_CHANGEADDR:
- if (hard_iface->if_status == IF_NOT_IN_USE)
+ if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
goto hardif_put;
- check_known_mac_addr(hard_iface->net_dev);
+ batadv_check_known_mac_addr(hard_iface->net_dev);
bat_priv = netdev_priv(hard_iface->soft_iface);
bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto hardif_put;
if (hard_iface == primary_if)
- primary_if_update_addr(bat_priv, NULL);
+ batadv_primary_if_update_addr(bat_priv, NULL);
break;
default:
break;
}
hardif_put:
- hardif_free_ref(hard_iface);
+ batadv_hardif_free_ref(hard_iface);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return NOTIFY_DONE;
}
/* This function returns true if the interface represented by ifindex is a
- * 802.11 wireless device */
-bool is_wifi_iface(int ifindex)
+ * 802.11 wireless device
+ */
+bool batadv_is_wifi_iface(int ifindex)
{
struct net_device *net_device = NULL;
bool ret = false;
- if (ifindex == NULL_IFINDEX)
+ if (ifindex == BATADV_NULL_IFINDEX)
goto out;
net_device = dev_get_by_index(&init_net, ifindex);
@@ -561,7 +562,8 @@ bool is_wifi_iface(int ifindex)
#ifdef CONFIG_WIRELESS_EXT
/* pre-cfg80211 drivers have to implement WEXT, so it is possible to
- * check for wireless_handlers != NULL */
+ * check for wireless_handlers != NULL
+ */
if (net_device->wireless_handlers)
ret = true;
else
@@ -575,6 +577,6 @@ out:
return ret;
}
-struct notifier_block hard_if_notifier = {
- .notifier_call = hard_if_event,
+struct notifier_block batadv_hard_if_notifier = {
+ .notifier_call = batadv_hard_if_event,
};
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index e68c5655e61..3732366e744 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,44 +15,44 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_
#define _NET_BATMAN_ADV_HARD_INTERFACE_H_
-enum hard_if_state {
- IF_NOT_IN_USE,
- IF_TO_BE_REMOVED,
- IF_INACTIVE,
- IF_ACTIVE,
- IF_TO_BE_ACTIVATED,
- IF_I_WANT_YOU
+enum batadv_hard_if_state {
+ BATADV_IF_NOT_IN_USE,
+ BATADV_IF_TO_BE_REMOVED,
+ BATADV_IF_INACTIVE,
+ BATADV_IF_ACTIVE,
+ BATADV_IF_TO_BE_ACTIVATED,
+ BATADV_IF_I_WANT_YOU,
};
-extern struct notifier_block hard_if_notifier;
+extern struct notifier_block batadv_hard_if_notifier;
-struct hard_iface*
-hardif_get_by_netdev(const struct net_device *net_dev);
-int hardif_enable_interface(struct hard_iface *hard_iface,
- const char *iface_name);
-void hardif_disable_interface(struct hard_iface *hard_iface);
-void hardif_remove_interfaces(void);
-int hardif_min_mtu(struct net_device *soft_iface);
-void update_min_mtu(struct net_device *soft_iface);
-void hardif_free_rcu(struct rcu_head *rcu);
-bool is_wifi_iface(int ifindex);
+struct batadv_hard_iface*
+batadv_hardif_get_by_netdev(const struct net_device *net_dev);
+int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
+ const char *iface_name);
+void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface);
+void batadv_hardif_remove_interfaces(void);
+int batadv_hardif_min_mtu(struct net_device *soft_iface);
+void batadv_update_min_mtu(struct net_device *soft_iface);
+void batadv_hardif_free_rcu(struct rcu_head *rcu);
+bool batadv_is_wifi_iface(int ifindex);
-static inline void hardif_free_ref(struct hard_iface *hard_iface)
+static inline void
+batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
{
if (atomic_dec_and_test(&hard_iface->refcount))
- call_rcu(&hard_iface->rcu, hardif_free_rcu);
+ call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu);
}
-static inline struct hard_iface *primary_if_get_selected(
- struct bat_priv *bat_priv)
+static inline struct batadv_hard_iface *
+batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
{
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
rcu_read_lock();
hard_iface = rcu_dereference(bat_priv->primary_if);
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 117687bedf2..15a849c2d41 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -16,25 +15,24 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
#include "hash.h"
/* clears the hash */
-static void hash_init(struct hashtable_t *hash)
+static void batadv_hash_init(struct batadv_hashtable *hash)
{
uint32_t i;
- for (i = 0 ; i < hash->size; i++) {
+ for (i = 0; i < hash->size; i++) {
INIT_HLIST_HEAD(&hash->table[i]);
spin_lock_init(&hash->list_locks[i]);
}
}
/* free only the hashtable and the hash itself. */
-void hash_destroy(struct hashtable_t *hash)
+void batadv_hash_destroy(struct batadv_hashtable *hash)
{
kfree(hash->list_locks);
kfree(hash->table);
@@ -42,9 +40,9 @@ void hash_destroy(struct hashtable_t *hash)
}
/* allocates and clears the hash */
-struct hashtable_t *hash_new(uint32_t size)
+struct batadv_hashtable *batadv_hash_new(uint32_t size)
{
- struct hashtable_t *hash;
+ struct batadv_hashtable *hash;
hash = kmalloc(sizeof(*hash), GFP_ATOMIC);
if (!hash)
@@ -60,7 +58,7 @@ struct hashtable_t *hash_new(uint32_t size)
goto free_table;
hash->size = size;
- hash_init(hash);
+ batadv_hash_init(hash);
return hash;
free_table:
@@ -69,3 +67,12 @@ free_hash:
kfree(hash);
return NULL;
}
+
+void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
+ struct lock_class_key *key)
+{
+ uint32_t i;
+
+ for (i = 0; i < hash->size; i++)
+ lockdep_set_class(&hash->list_locks[i], key);
+}
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index d4bd7862719..977de9c75fc 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_HASH_H_
@@ -24,35 +22,42 @@
#include <linux/list.h>
-/* callback to a compare function. should
- * compare 2 element datas for their keys,
- * return 0 if same and not 0 if not
- * same */
-typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *);
+/* callback to a compare function. should compare 2 element datas for their
+ * keys, return 0 if same and not 0 if not same
+ */
+typedef int (*batadv_hashdata_compare_cb)(const struct hlist_node *,
+ const void *);
/* the hashfunction, should return an index
* based on the key in the data of the first
- * argument and the size the second */
-typedef uint32_t (*hashdata_choose_cb)(const void *, uint32_t);
-typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
+ * argument and the size the second
+ */
+typedef uint32_t (*batadv_hashdata_choose_cb)(const void *, uint32_t);
+typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
-struct hashtable_t {
+struct batadv_hashtable {
struct hlist_head *table; /* the hashtable itself with the buckets */
spinlock_t *list_locks; /* spinlock for each hash list entry */
uint32_t size; /* size of hashtable */
};
/* allocates and clears the hash */
-struct hashtable_t *hash_new(uint32_t size);
+struct batadv_hashtable *batadv_hash_new(uint32_t size);
+
+/* set class key for all locks */
+void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
+ struct lock_class_key *key);
/* free only the hashtable and the hash itself. */
-void hash_destroy(struct hashtable_t *hash);
+void batadv_hash_destroy(struct batadv_hashtable *hash);
/* remove the hash structure. if hashdata_free_cb != NULL, this function will be
* called to remove the elements inside of the hash. if you don't remove the
- * elements, memory might be leaked. */
-static inline void hash_delete(struct hashtable_t *hash,
- hashdata_free_cb free_cb, void *arg)
+ * elements, memory might be leaked.
+ */
+static inline void batadv_hash_delete(struct batadv_hashtable *hash,
+ batadv_hashdata_free_cb free_cb,
+ void *arg)
{
struct hlist_head *head;
struct hlist_node *node, *node_tmp;
@@ -73,11 +78,11 @@ static inline void hash_delete(struct hashtable_t *hash,
spin_unlock_bh(list_lock);
}
- hash_destroy(hash);
+ batadv_hash_destroy(hash);
}
/**
- * hash_add - adds data to the hashtable
+ * batadv_hash_add - adds data to the hashtable
* @hash: storage hash table
* @compare: callback to determine if 2 hash elements are identical
* @choose: callback calculating the hash index
@@ -87,11 +92,11 @@ static inline void hash_delete(struct hashtable_t *hash,
* Returns 0 on success, 1 if the element already is in the hash
* and -1 on error.
*/
-
-static inline int hash_add(struct hashtable_t *hash,
- hashdata_compare_cb compare,
- hashdata_choose_cb choose,
- const void *data, struct hlist_node *data_node)
+static inline int batadv_hash_add(struct batadv_hashtable *hash,
+ batadv_hashdata_compare_cb compare,
+ batadv_hashdata_choose_cb choose,
+ const void *data,
+ struct hlist_node *data_node)
{
uint32_t index;
int ret = -1;
@@ -106,26 +111,23 @@ static inline int hash_add(struct hashtable_t *hash,
head = &hash->table[index];
list_lock = &hash->list_locks[index];
- rcu_read_lock();
- __hlist_for_each_rcu(node, head) {
+ spin_lock_bh(list_lock);
+
+ hlist_for_each(node, head) {
if (!compare(node, data))
continue;
ret = 1;
- goto err_unlock;
+ goto unlock;
}
- rcu_read_unlock();
/* no duplicate found in list, add new element */
- spin_lock_bh(list_lock);
hlist_add_head_rcu(data_node, head);
- spin_unlock_bh(list_lock);
ret = 0;
- goto out;
-err_unlock:
- rcu_read_unlock();
+unlock:
+ spin_unlock_bh(list_lock);
out:
return ret;
}
@@ -133,10 +135,12 @@ out:
/* removes data from hash, if found. returns pointer do data on success, so you
* can remove the used structure yourself, or NULL on error . data could be the
* structure you use with just the key filled, we just need the key for
- * comparing. */
-static inline void *hash_remove(struct hashtable_t *hash,
- hashdata_compare_cb compare,
- hashdata_choose_cb choose, void *data)
+ * comparing.
+ */
+static inline void *batadv_hash_remove(struct batadv_hashtable *hash,
+ batadv_hashdata_compare_cb compare,
+ batadv_hashdata_choose_cb choose,
+ void *data)
{
uint32_t index;
struct hlist_node *node;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 2e98a57f340..bde3cf74750 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -28,21 +26,21 @@
#include "originator.h"
#include "hard-interface.h"
-static struct socket_client *socket_client_hash[256];
+static struct batadv_socket_client *batadv_socket_client_hash[256];
-static void bat_socket_add_packet(struct socket_client *socket_client,
- struct icmp_packet_rr *icmp_packet,
- size_t icmp_len);
+static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
+ struct batadv_icmp_packet_rr *icmp_packet,
+ size_t icmp_len);
-void bat_socket_init(void)
+void batadv_socket_init(void)
{
- memset(socket_client_hash, 0, sizeof(socket_client_hash));
+ memset(batadv_socket_client_hash, 0, sizeof(batadv_socket_client_hash));
}
-static int bat_socket_open(struct inode *inode, struct file *file)
+static int batadv_socket_open(struct inode *inode, struct file *file)
{
unsigned int i;
- struct socket_client *socket_client;
+ struct batadv_socket_client *socket_client;
nonseekable_open(inode, file);
@@ -51,14 +49,14 @@ static int bat_socket_open(struct inode *inode, struct file *file)
if (!socket_client)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(socket_client_hash); i++) {
- if (!socket_client_hash[i]) {
- socket_client_hash[i] = socket_client;
+ for (i = 0; i < ARRAY_SIZE(batadv_socket_client_hash); i++) {
+ if (!batadv_socket_client_hash[i]) {
+ batadv_socket_client_hash[i] = socket_client;
break;
}
}
- if (i == ARRAY_SIZE(socket_client_hash)) {
+ if (i == ARRAY_SIZE(batadv_socket_client_hash)) {
pr_err("Error - can't add another packet client: maximum number of clients reached\n");
kfree(socket_client);
return -EXFULL;
@@ -73,14 +71,14 @@ static int bat_socket_open(struct inode *inode, struct file *file)
file->private_data = socket_client;
- inc_module_count();
+ batadv_inc_module_count();
return 0;
}
-static int bat_socket_release(struct inode *inode, struct file *file)
+static int batadv_socket_release(struct inode *inode, struct file *file)
{
- struct socket_client *socket_client = file->private_data;
- struct socket_packet *socket_packet;
+ struct batadv_socket_client *socket_client = file->private_data;
+ struct batadv_socket_packet *socket_packet;
struct list_head *list_pos, *list_pos_tmp;
spin_lock_bh(&socket_client->lock);
@@ -88,33 +86,33 @@ static int bat_socket_release(struct inode *inode, struct file *file)
/* for all packets in the queue ... */
list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) {
socket_packet = list_entry(list_pos,
- struct socket_packet, list);
+ struct batadv_socket_packet, list);
list_del(list_pos);
kfree(socket_packet);
}
- socket_client_hash[socket_client->index] = NULL;
+ batadv_socket_client_hash[socket_client->index] = NULL;
spin_unlock_bh(&socket_client->lock);
kfree(socket_client);
- dec_module_count();
+ batadv_dec_module_count();
return 0;
}
-static ssize_t bat_socket_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t batadv_socket_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
- struct socket_client *socket_client = file->private_data;
- struct socket_packet *socket_packet;
+ struct batadv_socket_client *socket_client = file->private_data;
+ struct batadv_socket_packet *socket_packet;
size_t packet_len;
int error;
if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0))
return -EAGAIN;
- if ((!buf) || (count < sizeof(struct icmp_packet)))
+ if ((!buf) || (count < sizeof(struct batadv_icmp_packet)))
return -EINVAL;
if (!access_ok(VERIFY_WRITE, buf, count))
@@ -129,7 +127,7 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
spin_lock_bh(&socket_client->lock);
socket_packet = list_first_entry(&socket_client->queue_list,
- struct socket_packet, list);
+ struct batadv_socket_packet, list);
list_del(&socket_packet->list);
socket_client->queue_len--;
@@ -146,34 +144,34 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
return packet_len;
}
-static ssize_t bat_socket_write(struct file *file, const char __user *buff,
- size_t len, loff_t *off)
+static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
+ size_t len, loff_t *off)
{
- struct socket_client *socket_client = file->private_data;
- struct bat_priv *bat_priv = socket_client->bat_priv;
- struct hard_iface *primary_if = NULL;
+ struct batadv_socket_client *socket_client = file->private_data;
+ struct batadv_priv *bat_priv = socket_client->bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
struct sk_buff *skb;
- struct icmp_packet_rr *icmp_packet;
+ struct batadv_icmp_packet_rr *icmp_packet;
- struct orig_node *orig_node = NULL;
- struct neigh_node *neigh_node = NULL;
- size_t packet_len = sizeof(struct icmp_packet);
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
+ size_t packet_len = sizeof(struct batadv_icmp_packet);
- if (len < sizeof(struct icmp_packet)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: invalid packet size\n");
+ if (len < sizeof(struct batadv_icmp_packet)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Error - can't send packet from char device: invalid packet size\n");
return -EINVAL;
}
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
len = -EFAULT;
goto out;
}
- if (len >= sizeof(struct icmp_packet_rr))
- packet_len = sizeof(struct icmp_packet_rr);
+ if (len >= sizeof(struct batadv_icmp_packet_rr))
+ packet_len = sizeof(struct batadv_icmp_packet_rr);
skb = dev_alloc_skb(packet_len + ETH_HLEN);
if (!skb) {
@@ -182,81 +180,82 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
}
skb_reserve(skb, ETH_HLEN);
- icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
+ icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len);
if (copy_from_user(icmp_packet, buff, packet_len)) {
len = -EFAULT;
goto free_skb;
}
- if (icmp_packet->header.packet_type != BAT_ICMP) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
+ if (icmp_packet->header.packet_type != BATADV_ICMP) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
len = -EINVAL;
goto free_skb;
}
- if (icmp_packet->msg_type != ECHO_REQUEST) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
+ if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
len = -EINVAL;
goto free_skb;
}
icmp_packet->uid = socket_client->index;
- if (icmp_packet->header.version != COMPAT_VERSION) {
- icmp_packet->msg_type = PARAMETER_PROBLEM;
- icmp_packet->header.version = COMPAT_VERSION;
- bat_socket_add_packet(socket_client, icmp_packet, packet_len);
+ if (icmp_packet->header.version != BATADV_COMPAT_VERSION) {
+ icmp_packet->msg_type = BATADV_PARAMETER_PROBLEM;
+ icmp_packet->header.version = BATADV_COMPAT_VERSION;
+ batadv_socket_add_packet(socket_client, icmp_packet,
+ packet_len);
goto free_skb;
}
- if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
+ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
goto dst_unreach;
- orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
+ orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
if (!orig_node)
goto dst_unreach;
- neigh_node = orig_node_get_router(orig_node);
+ neigh_node = batadv_orig_node_get_router(orig_node);
if (!neigh_node)
goto dst_unreach;
if (!neigh_node->if_incoming)
goto dst_unreach;
- if (neigh_node->if_incoming->if_status != IF_ACTIVE)
+ if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE)
goto dst_unreach;
memcpy(icmp_packet->orig,
primary_if->net_dev->dev_addr, ETH_ALEN);
- if (packet_len == sizeof(struct icmp_packet_rr))
+ if (packet_len == sizeof(struct batadv_icmp_packet_rr))
memcpy(icmp_packet->rr,
neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
goto out;
dst_unreach:
- icmp_packet->msg_type = DESTINATION_UNREACHABLE;
- bat_socket_add_packet(socket_client, icmp_packet, packet_len);
+ icmp_packet->msg_type = BATADV_DESTINATION_UNREACHABLE;
+ batadv_socket_add_packet(socket_client, icmp_packet, packet_len);
free_skb:
kfree_skb(skb);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return len;
}
-static unsigned int bat_socket_poll(struct file *file, poll_table *wait)
+static unsigned int batadv_socket_poll(struct file *file, poll_table *wait)
{
- struct socket_client *socket_client = file->private_data;
+ struct batadv_socket_client *socket_client = file->private_data;
poll_wait(file, &socket_client->queue_wait, wait);
@@ -266,39 +265,39 @@ static unsigned int bat_socket_poll(struct file *file, poll_table *wait)
return 0;
}
-static const struct file_operations fops = {
+static const struct file_operations batadv_fops = {
.owner = THIS_MODULE,
- .open = bat_socket_open,
- .release = bat_socket_release,
- .read = bat_socket_read,
- .write = bat_socket_write,
- .poll = bat_socket_poll,
+ .open = batadv_socket_open,
+ .release = batadv_socket_release,
+ .read = batadv_socket_read,
+ .write = batadv_socket_write,
+ .poll = batadv_socket_poll,
.llseek = no_llseek,
};
-int bat_socket_setup(struct bat_priv *bat_priv)
+int batadv_socket_setup(struct batadv_priv *bat_priv)
{
struct dentry *d;
if (!bat_priv->debug_dir)
goto err;
- d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
- bat_priv->debug_dir, bat_priv, &fops);
- if (d)
+ d = debugfs_create_file(BATADV_ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
+ bat_priv->debug_dir, bat_priv, &batadv_fops);
+ if (!d)
goto err;
return 0;
err:
- return 1;
+ return -ENOMEM;
}
-static void bat_socket_add_packet(struct socket_client *socket_client,
- struct icmp_packet_rr *icmp_packet,
- size_t icmp_len)
+static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
+ struct batadv_icmp_packet_rr *icmp_packet,
+ size_t icmp_len)
{
- struct socket_packet *socket_packet;
+ struct batadv_socket_packet *socket_packet;
socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC);
@@ -312,8 +311,9 @@ static void bat_socket_add_packet(struct socket_client *socket_client,
spin_lock_bh(&socket_client->lock);
/* while waiting for the lock the socket_client could have been
- * deleted */
- if (!socket_client_hash[icmp_packet->uid]) {
+ * deleted
+ */
+ if (!batadv_socket_client_hash[icmp_packet->uid]) {
spin_unlock_bh(&socket_client->lock);
kfree(socket_packet);
return;
@@ -324,7 +324,8 @@ static void bat_socket_add_packet(struct socket_client *socket_client,
if (socket_client->queue_len > 100) {
socket_packet = list_first_entry(&socket_client->queue_list,
- struct socket_packet, list);
+ struct batadv_socket_packet,
+ list);
list_del(&socket_packet->list);
kfree(socket_packet);
@@ -336,11 +337,12 @@ static void bat_socket_add_packet(struct socket_client *socket_client,
wake_up(&socket_client->queue_wait);
}
-void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet,
- size_t icmp_len)
+void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
+ size_t icmp_len)
{
- struct socket_client *hash = socket_client_hash[icmp_packet->uid];
+ struct batadv_socket_client *hash;
+ hash = batadv_socket_client_hash[icmp_packet->uid];
if (hash)
- bat_socket_add_packet(hash, icmp_packet, icmp_len);
+ batadv_socket_add_packet(hash, icmp_packet, icmp_len);
}
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 380ed4c2443..29443a1dbb5 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,17 +15,16 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
#define _NET_BATMAN_ADV_ICMP_SOCKET_H_
-#define ICMP_SOCKET "socket"
+#define BATADV_ICMP_SOCKET "socket"
-void bat_socket_init(void);
-int bat_socket_setup(struct bat_priv *bat_priv);
-void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet,
- size_t icmp_len);
+void batadv_socket_init(void);
+int batadv_socket_setup(struct batadv_priv *bat_priv);
+void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
+ size_t icmp_len);
#endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 083a2993efe..13c88b25ab3 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,12 +15,11 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
-#include "bat_sysfs.h"
-#include "bat_debugfs.h"
+#include "sysfs.h"
+#include "debugfs.h"
#include "routing.h"
#include "send.h"
#include "originator.h"
@@ -37,61 +35,65 @@
/* List manipulations on hardif_list have to be rtnl_lock()'ed,
- * list traversals just rcu-locked */
-struct list_head hardif_list;
-static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *);
-char bat_routing_algo[20] = "BATMAN IV";
-static struct hlist_head bat_algo_list;
+ * list traversals just rcu-locked
+ */
+struct list_head batadv_hardif_list;
+static int (*batadv_rx_handler[256])(struct sk_buff *,
+ struct batadv_hard_iface *);
+char batadv_routing_algo[20] = "BATMAN_IV";
+static struct hlist_head batadv_algo_list;
-unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-struct workqueue_struct *bat_event_workqueue;
+struct workqueue_struct *batadv_event_workqueue;
-static void recv_handler_init(void);
+static void batadv_recv_handler_init(void);
-static int __init batman_init(void)
+static int __init batadv_init(void)
{
- INIT_LIST_HEAD(&hardif_list);
- INIT_HLIST_HEAD(&bat_algo_list);
+ INIT_LIST_HEAD(&batadv_hardif_list);
+ INIT_HLIST_HEAD(&batadv_algo_list);
- recv_handler_init();
+ batadv_recv_handler_init();
- bat_iv_init();
+ batadv_iv_init();
/* the name should not be longer than 10 chars - see
- * http://lwn.net/Articles/23634/ */
- bat_event_workqueue = create_singlethread_workqueue("bat_events");
+ * http://lwn.net/Articles/23634/
+ */
+ batadv_event_workqueue = create_singlethread_workqueue("bat_events");
- if (!bat_event_workqueue)
+ if (!batadv_event_workqueue)
return -ENOMEM;
- bat_socket_init();
- debugfs_init();
+ batadv_socket_init();
+ batadv_debugfs_init();
- register_netdevice_notifier(&hard_if_notifier);
+ register_netdevice_notifier(&batadv_hard_if_notifier);
pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
- SOURCE_VERSION, COMPAT_VERSION);
+ BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
return 0;
}
-static void __exit batman_exit(void)
+static void __exit batadv_exit(void)
{
- debugfs_destroy();
- unregister_netdevice_notifier(&hard_if_notifier);
- hardif_remove_interfaces();
+ batadv_debugfs_destroy();
+ unregister_netdevice_notifier(&batadv_hard_if_notifier);
+ batadv_hardif_remove_interfaces();
- flush_workqueue(bat_event_workqueue);
- destroy_workqueue(bat_event_workqueue);
- bat_event_workqueue = NULL;
+ flush_workqueue(batadv_event_workqueue);
+ destroy_workqueue(batadv_event_workqueue);
+ batadv_event_workqueue = NULL;
rcu_barrier();
}
-int mesh_init(struct net_device *soft_iface)
+int batadv_mesh_init(struct net_device *soft_iface)
{
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ int ret;
spin_lock_init(&bat_priv->forw_bat_list_lock);
spin_lock_init(&bat_priv->forw_bcast_list_lock);
@@ -110,72 +112,77 @@ int mesh_init(struct net_device *soft_iface)
INIT_LIST_HEAD(&bat_priv->tt_req_list);
INIT_LIST_HEAD(&bat_priv->tt_roam_list);
- if (originator_init(bat_priv) < 1)
+ ret = batadv_originator_init(bat_priv);
+ if (ret < 0)
goto err;
- if (tt_init(bat_priv) < 1)
+ ret = batadv_tt_init(bat_priv);
+ if (ret < 0)
goto err;
- tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX);
+ batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
+ BATADV_NULL_IFINDEX);
- if (vis_init(bat_priv) < 1)
+ ret = batadv_vis_init(bat_priv);
+ if (ret < 0)
goto err;
- if (bla_init(bat_priv) < 1)
+ ret = batadv_bla_init(bat_priv);
+ if (ret < 0)
goto err;
atomic_set(&bat_priv->gw_reselect, 0);
- atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
- goto end;
-
-err:
- mesh_free(soft_iface);
- return -1;
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
-end:
return 0;
+
+err:
+ batadv_mesh_free(soft_iface);
+ return ret;
}
-void mesh_free(struct net_device *soft_iface)
+void batadv_mesh_free(struct net_device *soft_iface)
{
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
- atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING);
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
- purge_outstanding_packets(bat_priv, NULL);
+ batadv_purge_outstanding_packets(bat_priv, NULL);
- vis_quit(bat_priv);
+ batadv_vis_quit(bat_priv);
- gw_node_purge(bat_priv);
- originator_free(bat_priv);
+ batadv_gw_node_purge(bat_priv);
+ batadv_originator_free(bat_priv);
- tt_free(bat_priv);
+ batadv_tt_free(bat_priv);
- bla_free(bat_priv);
+ batadv_bla_free(bat_priv);
- atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
+ free_percpu(bat_priv->bat_counters);
+
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
}
-void inc_module_count(void)
+void batadv_inc_module_count(void)
{
try_module_get(THIS_MODULE);
}
-void dec_module_count(void)
+void batadv_dec_module_count(void)
{
module_put(THIS_MODULE);
}
-int is_my_mac(const uint8_t *addr)
+int batadv_is_my_mac(const uint8_t *addr)
{
- const struct hard_iface *hard_iface;
+ const struct batadv_hard_iface *hard_iface;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
- if (hard_iface->if_status != IF_ACTIVE)
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
- if (compare_eth(hard_iface->net_dev->dev_addr, addr)) {
+ if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
rcu_read_unlock();
return 1;
}
@@ -184,8 +191,8 @@ int is_my_mac(const uint8_t *addr)
return 0;
}
-static int recv_unhandled_packet(struct sk_buff *skb,
- struct hard_iface *recv_if)
+static int batadv_recv_unhandled_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
return NET_RX_DROP;
}
@@ -193,16 +200,18 @@ static int recv_unhandled_packet(struct sk_buff *skb,
/* incoming packets with the batman ethertype received on any active hard
* interface
*/
-int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype, struct net_device *orig_dev)
+int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
{
- struct bat_priv *bat_priv;
- struct batman_ogm_packet *batman_ogm_packet;
- struct hard_iface *hard_iface;
+ struct batadv_priv *bat_priv;
+ struct batadv_ogm_packet *batadv_ogm_packet;
+ struct batadv_hard_iface *hard_iface;
uint8_t idx;
int ret;
- hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
+ hard_iface = container_of(ptype, struct batadv_hard_iface,
+ batman_adv_ptype);
skb = skb_share_check(skb, GFP_ATOMIC);
/* skb was released by skb_share_check() */
@@ -222,27 +231,27 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
bat_priv = netdev_priv(hard_iface->soft_iface);
- if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
+ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
goto err_free;
/* discard frames on not active interfaces */
- if (hard_iface->if_status != IF_ACTIVE)
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
goto err_free;
- batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
- if (batman_ogm_packet->header.version != COMPAT_VERSION) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: incompatible batman version (%i)\n",
- batman_ogm_packet->header.version);
+ if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: incompatible batman version (%i)\n",
+ batadv_ogm_packet->header.version);
goto err_free;
}
/* all receive handlers return whether they received or reused
* the supplied skb. if not, we have to free the skb.
*/
- idx = batman_ogm_packet->header.packet_type;
- ret = (*recv_packet_handler[idx])(skb, hard_iface);
+ idx = batadv_ogm_packet->header.packet_type;
+ ret = (*batadv_rx_handler[idx])(skb, hard_iface);
if (ret == NET_RX_DROP)
kfree_skb(skb);
@@ -259,51 +268,52 @@ err_out:
return NET_RX_DROP;
}
-static void recv_handler_init(void)
+static void batadv_recv_handler_init(void)
{
int i;
- for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++)
- recv_packet_handler[i] = recv_unhandled_packet;
+ for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
+ batadv_rx_handler[i] = batadv_recv_unhandled_packet;
/* batman icmp packet */
- recv_packet_handler[BAT_ICMP] = recv_icmp_packet;
+ batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
/* unicast packet */
- recv_packet_handler[BAT_UNICAST] = recv_unicast_packet;
+ batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
/* fragmented unicast packet */
- recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet;
+ batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
/* broadcast packet */
- recv_packet_handler[BAT_BCAST] = recv_bcast_packet;
+ batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
/* vis packet */
- recv_packet_handler[BAT_VIS] = recv_vis_packet;
+ batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
/* Translation table query (request or response) */
- recv_packet_handler[BAT_TT_QUERY] = recv_tt_query;
+ batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
/* Roaming advertisement */
- recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv;
+ batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
}
-int recv_handler_register(uint8_t packet_type,
- int (*recv_handler)(struct sk_buff *,
- struct hard_iface *))
+int
+batadv_recv_handler_register(uint8_t packet_type,
+ int (*recv_handler)(struct sk_buff *,
+ struct batadv_hard_iface *))
{
- if (recv_packet_handler[packet_type] != &recv_unhandled_packet)
+ if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
return -EBUSY;
- recv_packet_handler[packet_type] = recv_handler;
+ batadv_rx_handler[packet_type] = recv_handler;
return 0;
}
-void recv_handler_unregister(uint8_t packet_type)
+void batadv_recv_handler_unregister(uint8_t packet_type)
{
- recv_packet_handler[packet_type] = recv_unhandled_packet;
+ batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
}
-static struct bat_algo_ops *bat_algo_get(char *name)
+static struct batadv_algo_ops *batadv_algo_get(char *name)
{
- struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
+ struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
struct hlist_node *node;
- hlist_for_each_entry(bat_algo_ops_tmp, node, &bat_algo_list, list) {
+ hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) {
if (strcmp(bat_algo_ops_tmp->name, name) != 0)
continue;
@@ -314,15 +324,16 @@ static struct bat_algo_ops *bat_algo_get(char *name)
return bat_algo_ops;
}
-int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
+int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
{
- struct bat_algo_ops *bat_algo_ops_tmp;
- int ret = -1;
+ struct batadv_algo_ops *bat_algo_ops_tmp;
+ int ret;
- bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name);
+ bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
if (bat_algo_ops_tmp) {
pr_info("Trying to register already registered routing algorithm: %s\n",
bat_algo_ops->name);
+ ret = -EEXIST;
goto out;
}
@@ -335,23 +346,24 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
!bat_algo_ops->bat_ogm_emit) {
pr_info("Routing algo '%s' does not implement required ops\n",
bat_algo_ops->name);
+ ret = -EINVAL;
goto out;
}
INIT_HLIST_NODE(&bat_algo_ops->list);
- hlist_add_head(&bat_algo_ops->list, &bat_algo_list);
+ hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
ret = 0;
out:
return ret;
}
-int bat_algo_select(struct bat_priv *bat_priv, char *name)
+int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
{
- struct bat_algo_ops *bat_algo_ops;
- int ret = -1;
+ struct batadv_algo_ops *bat_algo_ops;
+ int ret = -EINVAL;
- bat_algo_ops = bat_algo_get(name);
+ bat_algo_ops = batadv_algo_get(name);
if (!bat_algo_ops)
goto out;
@@ -362,50 +374,56 @@ out:
return ret;
}
-int bat_algo_seq_print_text(struct seq_file *seq, void *offset)
+int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
{
- struct bat_algo_ops *bat_algo_ops;
+ struct batadv_algo_ops *bat_algo_ops;
struct hlist_node *node;
seq_printf(seq, "Available routing algorithms:\n");
- hlist_for_each_entry(bat_algo_ops, node, &bat_algo_list, list) {
+ hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) {
seq_printf(seq, "%s\n", bat_algo_ops->name);
}
return 0;
}
-static int param_set_ra(const char *val, const struct kernel_param *kp)
+static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
{
- struct bat_algo_ops *bat_algo_ops;
+ struct batadv_algo_ops *bat_algo_ops;
+ char *algo_name = (char *)val;
+ size_t name_len = strlen(algo_name);
+
+ if (algo_name[name_len - 1] == '\n')
+ algo_name[name_len - 1] = '\0';
- bat_algo_ops = bat_algo_get((char *)val);
+ bat_algo_ops = batadv_algo_get(algo_name);
if (!bat_algo_ops) {
- pr_err("Routing algorithm '%s' is not supported\n", val);
+ pr_err("Routing algorithm '%s' is not supported\n", algo_name);
return -EINVAL;
}
- return param_set_copystring(val, kp);
+ return param_set_copystring(algo_name, kp);
}
-static const struct kernel_param_ops param_ops_ra = {
- .set = param_set_ra,
+static const struct kernel_param_ops batadv_param_ops_ra = {
+ .set = batadv_param_set_ra,
.get = param_get_string,
};
-static struct kparam_string __param_string_ra = {
- .maxlen = sizeof(bat_routing_algo),
- .string = bat_routing_algo,
+static struct kparam_string batadv_param_string_ra = {
+ .maxlen = sizeof(batadv_routing_algo),
+ .string = batadv_routing_algo,
};
-module_param_cb(routing_algo, &param_ops_ra, &__param_string_ra, 0644);
-module_init(batman_init);
-module_exit(batman_exit);
+module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
+ 0644);
+module_init(batadv_init);
+module_exit(batadv_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE);
-MODULE_VERSION(SOURCE_VERSION);
+MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
+MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
+MODULE_VERSION(BATADV_SOURCE_VERSION);
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index f4a3ec00347..5d8fa075794 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,100 +15,106 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_MAIN_H_
#define _NET_BATMAN_ADV_MAIN_H_
-#define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
- "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
-#define DRIVER_DESC "B.A.T.M.A.N. advanced"
-#define DRIVER_DEVICE "batman-adv"
+#define BATADV_DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
+ "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
+#define BATADV_DRIVER_DESC "B.A.T.M.A.N. advanced"
+#define BATADV_DRIVER_DEVICE "batman-adv"
-#ifndef SOURCE_VERSION
-#define SOURCE_VERSION "2012.2.0"
+#ifndef BATADV_SOURCE_VERSION
+#define BATADV_SOURCE_VERSION "2012.3.0"
#endif
/* B.A.T.M.A.N. parameters */
-#define TQ_MAX_VALUE 255
-#define JITTER 20
+#define BATADV_TQ_MAX_VALUE 255
+#define BATADV_JITTER 20
- /* Time To Live of broadcast messages */
-#define TTL 50
+/* Time To Live of broadcast messages */
+#define BATADV_TTL 50
/* purge originators after time in seconds if no valid packet comes in
- * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */
-#define PURGE_TIMEOUT 200000 /* 200 seconds */
-#define TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */
-#define TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */
+ * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
+ */
+#define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
+#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */
+#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */
/* sliding packet range of received originator messages in sequence numbers
- * (should be a multiple of our word size) */
-#define TQ_LOCAL_WINDOW_SIZE 64
-#define TT_REQUEST_TIMEOUT 3000 /* miliseconds we have to keep
- * pending tt_req */
+ * (should be a multiple of our word size)
+ */
+#define BATADV_TQ_LOCAL_WINDOW_SIZE 64
+/* miliseconds we have to keep pending tt_req */
+#define BATADV_TT_REQUEST_TIMEOUT 3000
-#define TQ_GLOBAL_WINDOW_SIZE 5
-#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
-#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
-#define TQ_TOTAL_BIDRECT_LIMIT 1
+#define BATADV_TQ_GLOBAL_WINDOW_SIZE 5
+#define BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
+#define BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
+#define BATADV_TQ_TOTAL_BIDRECT_LIMIT 1
-#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */
+/* number of OGMs sent with the last tt diff */
+#define BATADV_TT_OGM_APPEND_MAX 3
-#define ROAMING_MAX_TIME 20000 /* Time in which a client can roam at most
- * ROAMING_MAX_COUNT times in miliseconds*/
-#define ROAMING_MAX_COUNT 5
+/* Time in which a client can roam at most ROAMING_MAX_COUNT times in
+ * miliseconds
+ */
+#define BATADV_ROAMING_MAX_TIME 20000
+#define BATADV_ROAMING_MAX_COUNT 5
-#define NO_FLAGS 0
+#define BATADV_NO_FLAGS 0
-#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
+#define BATADV_NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
-#define NUM_WORDS BITS_TO_LONGS(TQ_LOCAL_WINDOW_SIZE)
+#define BATADV_NUM_WORDS BITS_TO_LONGS(BATADV_TQ_LOCAL_WINDOW_SIZE)
-#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
+#define BATADV_LOG_BUF_LEN 8192 /* has to be a power of 2 */
-#define VIS_INTERVAL 5000 /* 5 seconds */
+#define BATADV_VIS_INTERVAL 5000 /* 5 seconds */
/* how much worse secondary interfaces may be to be considered as bonding
- * candidates */
-#define BONDING_TQ_THRESHOLD 50
+ * candidates
+ */
+#define BATADV_BONDING_TQ_THRESHOLD 50
/* should not be bigger than 512 bytes or change the size of
- * forw_packet->direct_link_flags */
-#define MAX_AGGREGATION_BYTES 512
-#define MAX_AGGREGATION_MS 100
+ * forw_packet->direct_link_flags
+ */
+#define BATADV_MAX_AGGREGATION_BYTES 512
+#define BATADV_MAX_AGGREGATION_MS 100
-#define BLA_PERIOD_LENGTH 10000 /* 10 seconds */
-#define BLA_BACKBONE_TIMEOUT (BLA_PERIOD_LENGTH * 3)
-#define BLA_CLAIM_TIMEOUT (BLA_PERIOD_LENGTH * 10)
+#define BATADV_BLA_PERIOD_LENGTH 10000 /* 10 seconds */
+#define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 3)
+#define BATADV_BLA_CLAIM_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 10)
-#define DUPLIST_SIZE 16
-#define DUPLIST_TIMEOUT 500 /* 500 ms */
+#define BATADV_DUPLIST_SIZE 16
+#define BATADV_DUPLIST_TIMEOUT 500 /* 500 ms */
/* don't reset again within 30 seconds */
-#define RESET_PROTECTION_MS 30000
-#define EXPECTED_SEQNO_RANGE 65536
+#define BATADV_RESET_PROTECTION_MS 30000
+#define BATADV_EXPECTED_SEQNO_RANGE 65536
-enum mesh_state {
- MESH_INACTIVE,
- MESH_ACTIVE,
- MESH_DEACTIVATING
+enum batadv_mesh_state {
+ BATADV_MESH_INACTIVE,
+ BATADV_MESH_ACTIVE,
+ BATADV_MESH_DEACTIVATING,
};
-#define BCAST_QUEUE_LEN 256
-#define BATMAN_QUEUE_LEN 256
+#define BATADV_BCAST_QUEUE_LEN 256
+#define BATADV_BATMAN_QUEUE_LEN 256
-enum uev_action {
- UEV_ADD = 0,
- UEV_DEL,
- UEV_CHANGE
+enum batadv_uev_action {
+ BATADV_UEV_ADD = 0,
+ BATADV_UEV_DEL,
+ BATADV_UEV_CHANGE,
};
-enum uev_type {
- UEV_GW = 0
+enum batadv_uev_type {
+ BATADV_UEV_GW = 0,
};
-#define GW_THRESHOLD 50
+#define BATADV_GW_THRESHOLD 50
/* Debug Messages */
#ifdef pr_fmt
@@ -119,12 +124,12 @@ enum uev_type {
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* all messages related to routing / flooding / broadcasting / etc */
-enum dbg_level {
- DBG_BATMAN = 1 << 0,
- DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
- DBG_TT = 1 << 2, /* translation table operations */
- DBG_BLA = 1 << 3, /* bridge loop avoidance */
- DBG_ALL = 15
+enum batadv_dbg_level {
+ BATADV_DBG_BATMAN = 1 << 0,
+ BATADV_DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
+ BATADV_DBG_TT = 1 << 2, /* translation table operations */
+ BATADV_DBG_BLA = 1 << 3, /* bridge loop avoidance */
+ BATADV_DBG_ALL = 15,
};
/* Kernel headers */
@@ -138,73 +143,75 @@ enum dbg_level {
#include <linux/kthread.h> /* kernel threads */
#include <linux/pkt_sched.h> /* schedule types */
#include <linux/workqueue.h> /* workqueue */
+#include <linux/percpu.h>
#include <linux/slab.h>
#include <net/sock.h> /* struct sock */
#include <linux/jiffies.h>
#include <linux/seq_file.h>
#include "types.h"
-extern char bat_routing_algo[];
-extern struct list_head hardif_list;
-
-extern unsigned char broadcast_addr[];
-extern struct workqueue_struct *bat_event_workqueue;
-
-int mesh_init(struct net_device *soft_iface);
-void mesh_free(struct net_device *soft_iface);
-void inc_module_count(void);
-void dec_module_count(void);
-int is_my_mac(const uint8_t *addr);
-int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype, struct net_device *orig_dev);
-int recv_handler_register(uint8_t packet_type,
- int (*recv_handler)(struct sk_buff *,
- struct hard_iface *));
-void recv_handler_unregister(uint8_t packet_type);
-int bat_algo_register(struct bat_algo_ops *bat_algo_ops);
-int bat_algo_select(struct bat_priv *bat_priv, char *name);
-int bat_algo_seq_print_text(struct seq_file *seq, void *offset);
+extern char batadv_routing_algo[];
+extern struct list_head batadv_hardif_list;
+
+extern unsigned char batadv_broadcast_addr[];
+extern struct workqueue_struct *batadv_event_workqueue;
+
+int batadv_mesh_init(struct net_device *soft_iface);
+void batadv_mesh_free(struct net_device *soft_iface);
+void batadv_inc_module_count(void);
+void batadv_dec_module_count(void);
+int batadv_is_my_mac(const uint8_t *addr);
+int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev);
+int
+batadv_recv_handler_register(uint8_t packet_type,
+ int (*recv_handler)(struct sk_buff *,
+ struct batadv_hard_iface *));
+void batadv_recv_handler_unregister(uint8_t packet_type);
+int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
+int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
+int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
#ifdef CONFIG_BATMAN_ADV_DEBUG
-int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3);
+int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
+__printf(2, 3);
-#define bat_dbg(type, bat_priv, fmt, arg...) \
+#define batadv_dbg(type, bat_priv, fmt, arg...) \
do { \
if (atomic_read(&bat_priv->log_level) & type) \
- debug_log(bat_priv, fmt, ## arg); \
+ batadv_debug_log(bat_priv, fmt, ## arg);\
} \
while (0)
#else /* !CONFIG_BATMAN_ADV_DEBUG */
__printf(3, 4)
-static inline void bat_dbg(int type __always_unused,
- struct bat_priv *bat_priv __always_unused,
- const char *fmt __always_unused, ...)
+static inline void batadv_dbg(int type __always_unused,
+ struct batadv_priv *bat_priv __always_unused,
+ const char *fmt __always_unused, ...)
{
}
#endif
-#define bat_info(net_dev, fmt, arg...) \
+#define batadv_info(net_dev, fmt, arg...) \
do { \
struct net_device *_netdev = (net_dev); \
- struct bat_priv *_batpriv = netdev_priv(_netdev); \
- bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \
+ struct batadv_priv *_batpriv = netdev_priv(_netdev); \
+ batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
pr_info("%s: " fmt, _netdev->name, ## arg); \
} while (0)
-#define bat_err(net_dev, fmt, arg...) \
+#define batadv_err(net_dev, fmt, arg...) \
do { \
struct net_device *_netdev = (net_dev); \
- struct bat_priv *_batpriv = netdev_priv(_netdev); \
- bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \
+ struct batadv_priv *_batpriv = netdev_priv(_netdev); \
+ batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
pr_err("%s: " fmt, _netdev->name, ## arg); \
} while (0)
-/**
- * returns 1 if they are the same ethernet addr
+/* returns 1 if they are the same ethernet addr
*
* note: can't use compare_ether_addr() as it requires aligned memory
*/
-
-static inline int compare_eth(const void *data1, const void *data2)
+static inline int batadv_compare_eth(const void *data1, const void *data2)
{
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
}
@@ -216,15 +223,16 @@ static inline int compare_eth(const void *data1, const void *data2)
*
* Returns true if current time is after timestamp + timeout
*/
-static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout)
+static inline bool batadv_has_timed_out(unsigned long timestamp,
+ unsigned int timeout)
{
return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
}
-#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
+#define batadv_atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
/* Returns the smallest signed integer in two's complement with the sizeof x */
-#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
+#define batadv_smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
/* Checks if a sequence number x is a predecessor/successor of y.
* they handle overflows/underflows and can correctly check for a
@@ -234,12 +242,39 @@ static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout)
* - when adding nothing - it is neither a predecessor nor a successor
* - before adding more than 127 to the starting value - it is a predecessor,
* - when adding 128 - it is neither a predecessor nor a successor,
- * - after adding more than 127 to the starting value - it is a successor */
-#define seq_before(x, y) ({typeof(x) _d1 = (x); \
- typeof(y) _d2 = (y); \
- typeof(x) _dummy = (_d1 - _d2); \
- (void) (&_d1 == &_d2); \
- _dummy > smallest_signed_int(_dummy); })
-#define seq_after(x, y) seq_before(y, x)
+ * - after adding more than 127 to the starting value - it is a successor
+ */
+#define batadv_seq_before(x, y) ({typeof(x) _d1 = (x); \
+ typeof(y) _d2 = (y); \
+ typeof(x) _dummy = (_d1 - _d2); \
+ (void) (&_d1 == &_d2); \
+ _dummy > batadv_smallest_signed_int(_dummy); })
+#define batadv_seq_after(x, y) batadv_seq_before(y, x)
+
+/* Stop preemption on local cpu while incrementing the counter */
+static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
+ size_t count)
+{
+ int cpu = get_cpu();
+ per_cpu_ptr(bat_priv->bat_counters, cpu)[idx] += count;
+ put_cpu();
+}
+
+#define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
+
+/* Sum and return the cpu-local counters for index 'idx' */
+static inline uint64_t batadv_sum_counter(struct batadv_priv *bat_priv,
+ size_t idx)
+{
+ uint64_t *counters, sum = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
+ sum += counters[idx];
+ }
+
+ return sum;
+}
#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 41147942ba5..ac9bdf8f80a 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -30,50 +28,52 @@
#include "soft-interface.h"
#include "bridge_loop_avoidance.h"
-static void purge_orig(struct work_struct *work);
+static void batadv_purge_orig(struct work_struct *work);
-static void start_purge_timer(struct bat_priv *bat_priv)
+static void batadv_start_purge_timer(struct batadv_priv *bat_priv)
{
- INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
- queue_delayed_work(bat_event_workqueue,
+ INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
+ queue_delayed_work(batadv_event_workqueue,
&bat_priv->orig_work, msecs_to_jiffies(1000));
}
/* returns 1 if they are the same originator */
-static int compare_orig(const struct hlist_node *node, const void *data2)
+static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
{
- const void *data1 = container_of(node, struct orig_node, hash_entry);
+ const void *data1 = container_of(node, struct batadv_orig_node,
+ hash_entry);
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
}
-int originator_init(struct bat_priv *bat_priv)
+int batadv_originator_init(struct batadv_priv *bat_priv)
{
if (bat_priv->orig_hash)
- return 1;
+ return 0;
- bat_priv->orig_hash = hash_new(1024);
+ bat_priv->orig_hash = batadv_hash_new(1024);
if (!bat_priv->orig_hash)
goto err;
- start_purge_timer(bat_priv);
- return 1;
+ batadv_start_purge_timer(bat_priv);
+ return 0;
err:
- return 0;
+ return -ENOMEM;
}
-void neigh_node_free_ref(struct neigh_node *neigh_node)
+void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
{
if (atomic_dec_and_test(&neigh_node->refcount))
kfree_rcu(neigh_node, rcu);
}
/* increases the refcounter of a found router */
-struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
+struct batadv_neigh_node *
+batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
{
- struct neigh_node *router;
+ struct batadv_neigh_node *router;
rcu_read_lock();
router = rcu_dereference(orig_node->router);
@@ -85,12 +85,12 @@ struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
return router;
}
-struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
- const uint8_t *neigh_addr,
- uint32_t seqno)
+struct batadv_neigh_node *
+batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
+ const uint8_t *neigh_addr, uint32_t seqno)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct neigh_node *neigh_node;
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_neigh_node *neigh_node;
neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
if (!neigh_node)
@@ -104,21 +104,21 @@ struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
/* extra reference for return */
atomic_set(&neigh_node->refcount, 2);
- bat_dbg(DBG_BATMAN, bat_priv,
- "Creating new neighbor %pM, initial seqno %d\n",
- neigh_addr, seqno);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Creating new neighbor %pM, initial seqno %d\n",
+ neigh_addr, seqno);
out:
return neigh_node;
}
-static void orig_node_free_rcu(struct rcu_head *rcu)
+static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
{
struct hlist_node *node, *node_tmp;
- struct neigh_node *neigh_node, *tmp_neigh_node;
- struct orig_node *orig_node;
+ struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
+ struct batadv_orig_node *orig_node;
- orig_node = container_of(rcu, struct orig_node, rcu);
+ orig_node = container_of(rcu, struct batadv_orig_node, rcu);
spin_lock_bh(&orig_node->neigh_list_lock);
@@ -126,21 +126,21 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
list_for_each_entry_safe(neigh_node, tmp_neigh_node,
&orig_node->bond_list, bonding_list) {
list_del_rcu(&neigh_node->bonding_list);
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
}
/* for all neighbors towards this originator ... */
hlist_for_each_entry_safe(neigh_node, node, node_tmp,
&orig_node->neigh_list, list) {
hlist_del_rcu(&neigh_node->list);
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
}
spin_unlock_bh(&orig_node->neigh_list_lock);
- frag_list_free(&orig_node->frag_list);
- tt_global_del_orig(orig_node->bat_priv, orig_node,
- "originator timed out");
+ batadv_frag_list_free(&orig_node->frag_list);
+ batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
+ "originator timed out");
kfree(orig_node->tt_buff);
kfree(orig_node->bcast_own);
@@ -148,19 +148,19 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
kfree(orig_node);
}
-void orig_node_free_ref(struct orig_node *orig_node)
+void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
{
if (atomic_dec_and_test(&orig_node->refcount))
- call_rcu(&orig_node->rcu, orig_node_free_rcu);
+ call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
}
-void originator_free(struct bat_priv *bat_priv)
+void batadv_originator_free(struct batadv_priv *bat_priv)
{
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* spinlock to protect write access */
- struct orig_node *orig_node;
+ struct batadv_orig_node *orig_node;
uint32_t i;
if (!hash)
@@ -179,28 +179,31 @@ void originator_free(struct bat_priv *bat_priv)
head, hash_entry) {
hlist_del_rcu(node);
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
}
spin_unlock_bh(list_lock);
}
- hash_destroy(hash);
+ batadv_hash_destroy(hash);
}
/* this function finds or creates an originator entry for the given
- * address if it does not exits */
-struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
+ * address if it does not exits
+ */
+struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
+ const uint8_t *addr)
{
- struct orig_node *orig_node;
+ struct batadv_orig_node *orig_node;
int size;
int hash_added;
+ unsigned long reset_time;
- orig_node = orig_hash_find(bat_priv, addr);
+ orig_node = batadv_orig_hash_find(bat_priv, addr);
if (orig_node)
return orig_node;
- bat_dbg(DBG_BATMAN, bat_priv,
- "Creating new originator: %pM\n", addr);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Creating new originator: %pM\n", addr);
orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
if (!orig_node)
@@ -226,14 +229,13 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
orig_node->tt_buff = NULL;
orig_node->tt_buff_len = 0;
atomic_set(&orig_node->tt_size, 0);
- orig_node->bcast_seqno_reset = jiffies - 1
- - msecs_to_jiffies(RESET_PROTECTION_MS);
- orig_node->batman_seqno_reset = jiffies - 1
- - msecs_to_jiffies(RESET_PROTECTION_MS);
+ reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
+ orig_node->bcast_seqno_reset = reset_time;
+ orig_node->batman_seqno_reset = reset_time;
atomic_set(&orig_node->bond_candidates, 0);
- size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
+ size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
if (!orig_node->bcast_own)
@@ -248,8 +250,9 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
if (!orig_node->bcast_own_sum)
goto free_bcast_own;
- hash_added = hash_add(bat_priv->orig_hash, compare_orig,
- choose_orig, orig_node, &orig_node->hash_entry);
+ hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
+ batadv_choose_orig, orig_node,
+ &orig_node->hash_entry);
if (hash_added != 0)
goto free_bcast_own_sum;
@@ -263,14 +266,16 @@ free_orig_node:
return NULL;
}
-static bool purge_orig_neighbors(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- struct neigh_node **best_neigh_node)
+static bool
+batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node **best_neigh_node)
{
struct hlist_node *node, *node_tmp;
- struct neigh_node *neigh_node;
+ struct batadv_neigh_node *neigh_node;
bool neigh_purged = false;
unsigned long last_seen;
+ struct batadv_hard_iface *if_incoming;
*best_neigh_node = NULL;
@@ -280,34 +285,32 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
hlist_for_each_entry_safe(neigh_node, node, node_tmp,
&orig_node->neigh_list, list) {
- if ((has_timed_out(neigh_node->last_seen, PURGE_TIMEOUT)) ||
- (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
- (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
- (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
-
- last_seen = neigh_node->last_seen;
-
- if ((neigh_node->if_incoming->if_status ==
- IF_INACTIVE) ||
- (neigh_node->if_incoming->if_status ==
- IF_NOT_IN_USE) ||
- (neigh_node->if_incoming->if_status ==
- IF_TO_BE_REMOVED))
- bat_dbg(DBG_BATMAN, bat_priv,
- "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
- orig_node->orig, neigh_node->addr,
- neigh_node->if_incoming->net_dev->name);
+ last_seen = neigh_node->last_seen;
+ if_incoming = neigh_node->if_incoming;
+
+ if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
+ (if_incoming->if_status == BATADV_IF_INACTIVE) ||
+ (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
+ (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
+
+ if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
+ (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
+ (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
+ orig_node->orig, neigh_node->addr,
+ if_incoming->net_dev->name);
else
- bat_dbg(DBG_BATMAN, bat_priv,
- "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
- orig_node->orig, neigh_node->addr,
- jiffies_to_msecs(last_seen));
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
+ orig_node->orig, neigh_node->addr,
+ jiffies_to_msecs(last_seen));
neigh_purged = true;
hlist_del_rcu(&neigh_node->list);
- bonding_candidate_del(orig_node, neigh_node);
- neigh_node_free_ref(neigh_node);
+ batadv_bonding_candidate_del(orig_node, neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
} else {
if ((!*best_neigh_node) ||
(neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
@@ -319,33 +322,35 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
return neigh_purged;
}
-static bool purge_orig_node(struct bat_priv *bat_priv,
- struct orig_node *orig_node)
+static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
{
- struct neigh_node *best_neigh_node;
-
- if (has_timed_out(orig_node->last_seen, 2 * PURGE_TIMEOUT)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Originator timeout: originator %pM, last_seen %u\n",
- orig_node->orig,
- jiffies_to_msecs(orig_node->last_seen));
+ struct batadv_neigh_node *best_neigh_node;
+
+ if (batadv_has_timed_out(orig_node->last_seen,
+ 2 * BATADV_PURGE_TIMEOUT)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Originator timeout: originator %pM, last_seen %u\n",
+ orig_node->orig,
+ jiffies_to_msecs(orig_node->last_seen));
return true;
} else {
- if (purge_orig_neighbors(bat_priv, orig_node,
- &best_neigh_node))
- update_route(bat_priv, orig_node, best_neigh_node);
+ if (batadv_purge_orig_neighbors(bat_priv, orig_node,
+ &best_neigh_node))
+ batadv_update_route(bat_priv, orig_node,
+ best_neigh_node);
}
return false;
}
-static void _purge_orig(struct bat_priv *bat_priv)
+static void _batadv_purge_orig(struct batadv_priv *bat_priv)
{
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* spinlock to protect write access */
- struct orig_node *orig_node;
+ struct batadv_orig_node *orig_node;
uint32_t i;
if (!hash)
@@ -359,58 +364,60 @@ static void _purge_orig(struct bat_priv *bat_priv)
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(orig_node, node, node_tmp,
head, hash_entry) {
- if (purge_orig_node(bat_priv, orig_node)) {
+ if (batadv_purge_orig_node(bat_priv, orig_node)) {
if (orig_node->gw_flags)
- gw_node_delete(bat_priv, orig_node);
+ batadv_gw_node_delete(bat_priv,
+ orig_node);
hlist_del_rcu(node);
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
continue;
}
- if (has_timed_out(orig_node->last_frag_packet,
- FRAG_TIMEOUT))
- frag_list_free(&orig_node->frag_list);
+ if (batadv_has_timed_out(orig_node->last_frag_packet,
+ BATADV_FRAG_TIMEOUT))
+ batadv_frag_list_free(&orig_node->frag_list);
}
spin_unlock_bh(list_lock);
}
- gw_node_purge(bat_priv);
- gw_election(bat_priv);
+ batadv_gw_node_purge(bat_priv);
+ batadv_gw_election(bat_priv);
}
-static void purge_orig(struct work_struct *work)
+static void batadv_purge_orig(struct work_struct *work)
{
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
- struct bat_priv *bat_priv =
- container_of(delayed_work, struct bat_priv, orig_work);
+ struct delayed_work *delayed_work;
+ struct batadv_priv *bat_priv;
- _purge_orig(bat_priv);
- start_purge_timer(bat_priv);
+ delayed_work = container_of(work, struct delayed_work, work);
+ bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
+ _batadv_purge_orig(bat_priv);
+ batadv_start_purge_timer(bat_priv);
}
-void purge_orig_ref(struct bat_priv *bat_priv)
+void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
{
- _purge_orig(bat_priv);
+ _batadv_purge_orig(bat_priv);
}
-int orig_seq_print_text(struct seq_file *seq, void *offset)
+int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
- struct hard_iface *primary_if;
- struct orig_node *orig_node;
- struct neigh_node *neigh_node, *neigh_node_tmp;
+ struct batadv_hard_iface *primary_if;
+ struct batadv_orig_node *orig_node;
+ struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
int batman_count = 0;
int last_seen_secs;
int last_seen_msecs;
+ unsigned long last_seen_jiffies;
uint32_t i;
int ret = 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
ret = seq_printf(seq,
@@ -419,7 +426,7 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
goto out;
}
- if (primary_if->if_status != IF_ACTIVE) {
+ if (primary_if->if_status != BATADV_IF_ACTIVE) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
@@ -427,28 +434,28 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
}
seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
- SOURCE_VERSION, primary_if->net_dev->name,
+ BATADV_SOURCE_VERSION, primary_if->net_dev->name,
primary_if->net_dev->dev_addr, net_dev->name);
seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
- "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
- "outgoingIF", "Potential nexthops");
+ "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
+ "Nexthop", "outgoingIF", "Potential nexthops");
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
- neigh_node = orig_node_get_router(orig_node);
+ neigh_node = batadv_orig_node_get_router(orig_node);
if (!neigh_node)
continue;
if (neigh_node->tq_avg == 0)
goto next;
- last_seen_secs = jiffies_to_msecs(jiffies -
- orig_node->last_seen) / 1000;
- last_seen_msecs = jiffies_to_msecs(jiffies -
- orig_node->last_seen) % 1000;
+ last_seen_jiffies = jiffies - orig_node->last_seen;
+ last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
+ last_seen_secs = last_seen_msecs / 1000;
+ last_seen_msecs = last_seen_msecs % 1000;
seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
orig_node->orig, last_seen_secs,
@@ -467,7 +474,7 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
batman_count++;
next:
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
}
rcu_read_unlock();
}
@@ -477,27 +484,29 @@ next:
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return ret;
}
-static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
+static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
+ int max_if_num)
{
void *data_ptr;
+ size_t data_size, old_size;
- data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
- GFP_ATOMIC);
+ data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
+ old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
+ data_ptr = kmalloc(data_size, GFP_ATOMIC);
if (!data_ptr)
- return -1;
+ return -ENOMEM;
- memcpy(data_ptr, orig_node->bcast_own,
- (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
+ memcpy(data_ptr, orig_node->bcast_own, old_size);
kfree(orig_node->bcast_own);
orig_node->bcast_own = data_ptr;
data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
if (!data_ptr)
- return -1;
+ return -ENOMEM;
memcpy(data_ptr, orig_node->bcast_own_sum,
(max_if_num - 1) * sizeof(uint8_t));
@@ -507,28 +516,30 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
return 0;
}
-int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
+int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
+ int max_if_num)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
- struct orig_node *orig_node;
+ struct batadv_orig_node *orig_node;
uint32_t i;
int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
- * if_num */
+ * if_num
+ */
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
spin_lock_bh(&orig_node->ogm_cnt_lock);
- ret = orig_node_add_if(orig_node, max_if_num);
+ ret = batadv_orig_node_add_if(orig_node, max_if_num);
spin_unlock_bh(&orig_node->ogm_cnt_lock);
- if (ret == -1)
+ if (ret == -ENOMEM)
goto err;
}
rcu_read_unlock();
@@ -541,8 +552,8 @@ err:
return -ENOMEM;
}
-static int orig_node_del_if(struct orig_node *orig_node,
- int max_if_num, int del_if_num)
+static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
+ int max_if_num, int del_if_num)
{
void *data_ptr = NULL;
int chunk_size;
@@ -551,10 +562,10 @@ static int orig_node_del_if(struct orig_node *orig_node,
if (max_if_num == 0)
goto free_bcast_own;
- chunk_size = sizeof(unsigned long) * NUM_WORDS;
+ chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
if (!data_ptr)
- return -1;
+ return -ENOMEM;
/* copy first part */
memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
@@ -573,7 +584,7 @@ free_bcast_own:
data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
if (!data_ptr)
- return -1;
+ return -ENOMEM;
memcpy(data_ptr, orig_node->bcast_own_sum,
del_if_num * sizeof(uint8_t));
@@ -589,30 +600,32 @@ free_own_sum:
return 0;
}
-int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
+int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
+ int max_if_num)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
- struct hard_iface *hard_iface_tmp;
- struct orig_node *orig_node;
+ struct batadv_hard_iface *hard_iface_tmp;
+ struct batadv_orig_node *orig_node;
uint32_t i;
int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
- * if_num */
+ * if_num
+ */
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
spin_lock_bh(&orig_node->ogm_cnt_lock);
- ret = orig_node_del_if(orig_node, max_if_num,
- hard_iface->if_num);
+ ret = batadv_orig_node_del_if(orig_node, max_if_num,
+ hard_iface->if_num);
spin_unlock_bh(&orig_node->ogm_cnt_lock);
- if (ret == -1)
+ if (ret == -ENOMEM)
goto err;
}
rcu_read_unlock();
@@ -620,8 +633,8 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
/* renumber remaining batman interfaces _inside_ of orig_hash_lock */
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) {
- if (hard_iface_tmp->if_status == IF_NOT_IN_USE)
+ list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
+ if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
continue;
if (hard_iface == hard_iface_tmp)
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index f74d0d69335..9778e656dec 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
@@ -24,24 +22,29 @@
#include "hash.h"
-int originator_init(struct bat_priv *bat_priv);
-void originator_free(struct bat_priv *bat_priv);
-void purge_orig_ref(struct bat_priv *bat_priv);
-void orig_node_free_ref(struct orig_node *orig_node);
-struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr);
-struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
- const uint8_t *neigh_addr,
- uint32_t seqno);
-void neigh_node_free_ref(struct neigh_node *neigh_node);
-struct neigh_node *orig_node_get_router(struct orig_node *orig_node);
-int orig_seq_print_text(struct seq_file *seq, void *offset);
-int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num);
-int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
-
-
-/* hashfunction to choose an entry in a hash table of given size */
-/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static inline uint32_t choose_orig(const void *data, uint32_t size)
+int batadv_originator_init(struct batadv_priv *bat_priv);
+void batadv_originator_free(struct batadv_priv *bat_priv);
+void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
+void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
+struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
+ const uint8_t *addr);
+struct batadv_neigh_node *
+batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
+ const uint8_t *neigh_addr, uint32_t seqno);
+void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node);
+struct batadv_neigh_node *
+batadv_orig_node_get_router(struct batadv_orig_node *orig_node);
+int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
+ int max_if_num);
+int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
+ int max_if_num);
+
+
+/* hashfunction to choose an entry in a hash table of given size
+ * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
+ */
+static inline uint32_t batadv_choose_orig(const void *data, uint32_t size)
{
const unsigned char *key = data;
uint32_t hash = 0;
@@ -60,24 +63,24 @@ static inline uint32_t choose_orig(const void *data, uint32_t size)
return hash % size;
}
-static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv,
- const void *data)
+static inline struct batadv_orig_node *
+batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
{
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_head *head;
struct hlist_node *node;
- struct orig_node *orig_node, *orig_node_tmp = NULL;
+ struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
int index;
if (!hash)
return NULL;
- index = choose_orig(data, hash->size);
+ index = batadv_choose_orig(data, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
- if (!compare_eth(orig_node, data))
+ if (!batadv_compare_eth(orig_node, data))
continue;
if (!atomic_inc_not_zero(&orig_node->refcount))
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 0ee1af77079..8d3e55a96ad 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,171 +15,172 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_PACKET_H_
#define _NET_BATMAN_ADV_PACKET_H_
-#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
-
-enum bat_packettype {
- BAT_IV_OGM = 0x01,
- BAT_ICMP = 0x02,
- BAT_UNICAST = 0x03,
- BAT_BCAST = 0x04,
- BAT_VIS = 0x05,
- BAT_UNICAST_FRAG = 0x06,
- BAT_TT_QUERY = 0x07,
- BAT_ROAM_ADV = 0x08
+#define BATADV_ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
+
+enum batadv_packettype {
+ BATADV_IV_OGM = 0x01,
+ BATADV_ICMP = 0x02,
+ BATADV_UNICAST = 0x03,
+ BATADV_BCAST = 0x04,
+ BATADV_VIS = 0x05,
+ BATADV_UNICAST_FRAG = 0x06,
+ BATADV_TT_QUERY = 0x07,
+ BATADV_ROAM_ADV = 0x08,
};
/* this file is included by batctl which needs these defines */
-#define COMPAT_VERSION 14
+#define BATADV_COMPAT_VERSION 14
-enum batman_iv_flags {
- NOT_BEST_NEXT_HOP = 1 << 3,
- PRIMARIES_FIRST_HOP = 1 << 4,
- VIS_SERVER = 1 << 5,
- DIRECTLINK = 1 << 6
+enum batadv_iv_flags {
+ BATADV_NOT_BEST_NEXT_HOP = 1 << 3,
+ BATADV_PRIMARIES_FIRST_HOP = 1 << 4,
+ BATADV_VIS_SERVER = 1 << 5,
+ BATADV_DIRECTLINK = 1 << 6,
};
/* ICMP message types */
-enum icmp_packettype {
- ECHO_REPLY = 0,
- DESTINATION_UNREACHABLE = 3,
- ECHO_REQUEST = 8,
- TTL_EXCEEDED = 11,
- PARAMETER_PROBLEM = 12
+enum batadv_icmp_packettype {
+ BATADV_ECHO_REPLY = 0,
+ BATADV_DESTINATION_UNREACHABLE = 3,
+ BATADV_ECHO_REQUEST = 8,
+ BATADV_TTL_EXCEEDED = 11,
+ BATADV_PARAMETER_PROBLEM = 12,
};
/* vis defines */
-enum vis_packettype {
- VIS_TYPE_SERVER_SYNC = 0,
- VIS_TYPE_CLIENT_UPDATE = 1
+enum batadv_vis_packettype {
+ BATADV_VIS_TYPE_SERVER_SYNC = 0,
+ BATADV_VIS_TYPE_CLIENT_UPDATE = 1,
};
/* fragmentation defines */
-enum unicast_frag_flags {
- UNI_FRAG_HEAD = 1 << 0,
- UNI_FRAG_LARGETAIL = 1 << 1
+enum batadv_unicast_frag_flags {
+ BATADV_UNI_FRAG_HEAD = 1 << 0,
+ BATADV_UNI_FRAG_LARGETAIL = 1 << 1,
};
/* TT_QUERY subtypes */
-#define TT_QUERY_TYPE_MASK 0x3
+#define BATADV_TT_QUERY_TYPE_MASK 0x3
-enum tt_query_packettype {
- TT_REQUEST = 0,
- TT_RESPONSE = 1
+enum batadv_tt_query_packettype {
+ BATADV_TT_REQUEST = 0,
+ BATADV_TT_RESPONSE = 1,
};
/* TT_QUERY flags */
-enum tt_query_flags {
- TT_FULL_TABLE = 1 << 2
+enum batadv_tt_query_flags {
+ BATADV_TT_FULL_TABLE = 1 << 2,
};
-/* TT_CLIENT flags.
+/* BATADV_TT_CLIENT flags.
* Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to
- * 1 << 15 are used for local computation only */
-enum tt_client_flags {
- TT_CLIENT_DEL = 1 << 0,
- TT_CLIENT_ROAM = 1 << 1,
- TT_CLIENT_WIFI = 1 << 2,
- TT_CLIENT_NOPURGE = 1 << 8,
- TT_CLIENT_NEW = 1 << 9,
- TT_CLIENT_PENDING = 1 << 10
+ * 1 << 15 are used for local computation only
+ */
+enum batadv_tt_client_flags {
+ BATADV_TT_CLIENT_DEL = 1 << 0,
+ BATADV_TT_CLIENT_ROAM = 1 << 1,
+ BATADV_TT_CLIENT_WIFI = 1 << 2,
+ BATADV_TT_CLIENT_NOPURGE = 1 << 8,
+ BATADV_TT_CLIENT_NEW = 1 << 9,
+ BATADV_TT_CLIENT_PENDING = 1 << 10,
};
/* claim frame types for the bridge loop avoidance */
-enum bla_claimframe {
- CLAIM_TYPE_ADD = 0x00,
- CLAIM_TYPE_DEL = 0x01,
- CLAIM_TYPE_ANNOUNCE = 0x02,
- CLAIM_TYPE_REQUEST = 0x03
+enum batadv_bla_claimframe {
+ BATADV_CLAIM_TYPE_ADD = 0x00,
+ BATADV_CLAIM_TYPE_DEL = 0x01,
+ BATADV_CLAIM_TYPE_ANNOUNCE = 0x02,
+ BATADV_CLAIM_TYPE_REQUEST = 0x03,
};
/* the destination hardware field in the ARP frame is used to
* transport the claim type and the group id
*/
-struct bla_claim_dst {
+struct batadv_bla_claim_dst {
uint8_t magic[3]; /* FF:43:05 */
uint8_t type; /* bla_claimframe */
- uint16_t group; /* group id */
+ __be16 group; /* group id */
} __packed;
-struct batman_header {
+struct batadv_header {
uint8_t packet_type;
uint8_t version; /* batman version field */
uint8_t ttl;
} __packed;
-struct batman_ogm_packet {
- struct batman_header header;
+struct batadv_ogm_packet {
+ struct batadv_header header;
uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
- uint32_t seqno;
+ __be32 seqno;
uint8_t orig[ETH_ALEN];
uint8_t prev_sender[ETH_ALEN];
uint8_t gw_flags; /* flags related to gateway class */
uint8_t tq;
uint8_t tt_num_changes;
uint8_t ttvn; /* translation table version number */
- uint16_t tt_crc;
+ __be16 tt_crc;
} __packed;
-#define BATMAN_OGM_HLEN sizeof(struct batman_ogm_packet)
+#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
-struct icmp_packet {
- struct batman_header header;
+struct batadv_icmp_packet {
+ struct batadv_header header;
uint8_t msg_type; /* see ICMP message types above */
uint8_t dst[ETH_ALEN];
uint8_t orig[ETH_ALEN];
- uint16_t seqno;
+ __be16 seqno;
uint8_t uid;
uint8_t reserved;
} __packed;
-#define BAT_RR_LEN 16
+#define BATADV_RR_LEN 16
/* icmp_packet_rr must start with all fields from imcp_packet
- * as this is assumed by code that handles ICMP packets */
-struct icmp_packet_rr {
- struct batman_header header;
+ * as this is assumed by code that handles ICMP packets
+ */
+struct batadv_icmp_packet_rr {
+ struct batadv_header header;
uint8_t msg_type; /* see ICMP message types above */
uint8_t dst[ETH_ALEN];
uint8_t orig[ETH_ALEN];
- uint16_t seqno;
+ __be16 seqno;
uint8_t uid;
uint8_t rr_cur;
- uint8_t rr[BAT_RR_LEN][ETH_ALEN];
+ uint8_t rr[BATADV_RR_LEN][ETH_ALEN];
} __packed;
-struct unicast_packet {
- struct batman_header header;
+struct batadv_unicast_packet {
+ struct batadv_header header;
uint8_t ttvn; /* destination translation table version number */
uint8_t dest[ETH_ALEN];
} __packed;
-struct unicast_frag_packet {
- struct batman_header header;
+struct batadv_unicast_frag_packet {
+ struct batadv_header header;
uint8_t ttvn; /* destination translation table version number */
uint8_t dest[ETH_ALEN];
uint8_t flags;
uint8_t align;
uint8_t orig[ETH_ALEN];
- uint16_t seqno;
+ __be16 seqno;
} __packed;
-struct bcast_packet {
- struct batman_header header;
+struct batadv_bcast_packet {
+ struct batadv_header header;
uint8_t reserved;
- uint32_t seqno;
+ __be32 seqno;
uint8_t orig[ETH_ALEN];
} __packed;
-struct vis_packet {
- struct batman_header header;
+struct batadv_vis_packet {
+ struct batadv_header header;
uint8_t vis_type; /* which type of vis-participant sent this? */
- uint32_t seqno; /* sequence number */
+ __be32 seqno; /* sequence number */
uint8_t entries; /* number of entries behind this struct */
uint8_t reserved;
uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */
@@ -188,11 +188,12 @@ struct vis_packet {
uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
} __packed;
-struct tt_query_packet {
- struct batman_header header;
+struct batadv_tt_query_packet {
+ struct batadv_header header;
/* the flag field is a combination of:
* - TT_REQUEST or TT_RESPONSE
- * - TT_FULL_TABLE */
+ * - TT_FULL_TABLE
+ */
uint8_t flags;
uint8_t dst[ETH_ALEN];
uint8_t src[ETH_ALEN];
@@ -200,24 +201,26 @@ struct tt_query_packet {
* if TT_REQUEST: ttvn that triggered the
* request
* if TT_RESPONSE: new ttvn for the src
- * orig_node */
+ * orig_node
+ */
uint8_t ttvn;
/* tt_data field is:
* if TT_REQUEST: crc associated with the
* ttvn
- * if TT_RESPONSE: table_size */
- uint16_t tt_data;
+ * if TT_RESPONSE: table_size
+ */
+ __be16 tt_data;
} __packed;
-struct roam_adv_packet {
- struct batman_header header;
+struct batadv_roam_adv_packet {
+ struct batadv_header header;
uint8_t reserved;
uint8_t dst[ETH_ALEN];
uint8_t src[ETH_ALEN];
uint8_t client[ETH_ALEN];
} __packed;
-struct tt_change {
+struct batadv_tt_change {
uint8_t flags;
uint8_t addr[ETH_ALEN];
} __packed;
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
index fd63951d118..c8f61e395b7 100644
--- a/net/batman-adv/ring_buffer.c
+++ b/net/batman-adv/ring_buffer.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,26 +15,26 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
#include "ring_buffer.h"
-void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value)
+void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
+ uint8_t value)
{
lq_recv[*lq_index] = value;
- *lq_index = (*lq_index + 1) % TQ_GLOBAL_WINDOW_SIZE;
+ *lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE;
}
-uint8_t ring_buffer_avg(const uint8_t lq_recv[])
+uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
{
const uint8_t *ptr;
uint16_t count = 0, i = 0, sum = 0;
ptr = lq_recv;
- while (i < TQ_GLOBAL_WINDOW_SIZE) {
+ while (i < BATADV_TQ_GLOBAL_WINDOW_SIZE) {
if (*ptr != 0) {
count++;
sum += *ptr;
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
index 8b58bd82767..fda8c17df27 100644
--- a/net/batman-adv/ring_buffer.h
+++ b/net/batman-adv/ring_buffer.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,13 +15,13 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_RING_BUFFER_H_
#define _NET_BATMAN_ADV_RING_BUFFER_H_
-void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value);
-uint8_t ring_buffer_avg(const uint8_t lq_recv[]);
+void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
+ uint8_t value);
+uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[]);
#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 015471d801b..bc2b88bbea1 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -31,19 +29,20 @@
#include "unicast.h"
#include "bridge_loop_avoidance.h"
-static int route_unicast_packet(struct sk_buff *skb,
- struct hard_iface *recv_if);
+static int batadv_route_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
-void slide_own_bcast_window(struct hard_iface *hard_iface)
+void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
- struct orig_node *orig_node;
+ struct batadv_orig_node *orig_node;
unsigned long *word;
uint32_t i;
size_t word_index;
+ uint8_t *w;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -51,49 +50,49 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
spin_lock_bh(&orig_node->ogm_cnt_lock);
- word_index = hard_iface->if_num * NUM_WORDS;
+ word_index = hard_iface->if_num * BATADV_NUM_WORDS;
word = &(orig_node->bcast_own[word_index]);
- bit_get_packet(bat_priv, word, 1, 0);
- orig_node->bcast_own_sum[hard_iface->if_num] =
- bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
+ batadv_bit_get_packet(bat_priv, word, 1, 0);
+ w = &orig_node->bcast_own_sum[hard_iface->if_num];
+ *w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE);
spin_unlock_bh(&orig_node->ogm_cnt_lock);
}
rcu_read_unlock();
}
}
-static void _update_route(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- struct neigh_node *neigh_node)
+static void _batadv_update_route(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node)
{
- struct neigh_node *curr_router;
+ struct batadv_neigh_node *curr_router;
- curr_router = orig_node_get_router(orig_node);
+ curr_router = batadv_orig_node_get_router(orig_node);
/* route deleted */
if ((curr_router) && (!neigh_node)) {
- bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
- orig_node->orig);
- tt_global_del_orig(bat_priv, orig_node,
- "Deleted route towards originator");
+ batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+ "Deleting route towards: %pM\n", orig_node->orig);
+ batadv_tt_global_del_orig(bat_priv, orig_node,
+ "Deleted route towards originator");
/* route added */
} else if ((!curr_router) && (neigh_node)) {
- bat_dbg(DBG_ROUTES, bat_priv,
- "Adding route towards: %pM (via %pM)\n",
- orig_node->orig, neigh_node->addr);
+ batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+ "Adding route towards: %pM (via %pM)\n",
+ orig_node->orig, neigh_node->addr);
/* route changed */
} else if (neigh_node && curr_router) {
- bat_dbg(DBG_ROUTES, bat_priv,
- "Changing route towards: %pM (now via %pM - was via %pM)\n",
- orig_node->orig, neigh_node->addr,
- curr_router->addr);
+ batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+ "Changing route towards: %pM (now via %pM - was via %pM)\n",
+ orig_node->orig, neigh_node->addr,
+ curr_router->addr);
}
if (curr_router)
- neigh_node_free_ref(curr_router);
+ batadv_neigh_node_free_ref(curr_router);
/* increase refcount of new best neighbor */
if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
@@ -105,30 +104,31 @@ static void _update_route(struct bat_priv *bat_priv,
/* decrease refcount of previous best neighbor */
if (curr_router)
- neigh_node_free_ref(curr_router);
+ batadv_neigh_node_free_ref(curr_router);
}
-void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
- struct neigh_node *neigh_node)
+void batadv_update_route(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node)
{
- struct neigh_node *router = NULL;
+ struct batadv_neigh_node *router = NULL;
if (!orig_node)
goto out;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (router != neigh_node)
- _update_route(bat_priv, orig_node, neigh_node);
+ _batadv_update_route(bat_priv, orig_node, neigh_node);
out:
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
}
/* caller must hold the neigh_list_lock */
-void bonding_candidate_del(struct orig_node *orig_node,
- struct neigh_node *neigh_node)
+void batadv_bonding_candidate_del(struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node)
{
/* this neighbor is not part of our candidate list */
if (list_empty(&neigh_node->bonding_list))
@@ -136,37 +136,36 @@ void bonding_candidate_del(struct orig_node *orig_node,
list_del_rcu(&neigh_node->bonding_list);
INIT_LIST_HEAD(&neigh_node->bonding_list);
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
atomic_dec(&orig_node->bond_candidates);
out:
return;
}
-void bonding_candidate_add(struct orig_node *orig_node,
- struct neigh_node *neigh_node)
+void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node)
{
struct hlist_node *node;
- struct neigh_node *tmp_neigh_node, *router = NULL;
+ struct batadv_neigh_node *tmp_neigh_node, *router = NULL;
uint8_t interference_candidate = 0;
spin_lock_bh(&orig_node->neigh_list_lock);
/* only consider if it has the same primary address ... */
- if (!compare_eth(orig_node->orig,
- neigh_node->orig_node->primary_addr))
+ if (!batadv_compare_eth(orig_node->orig,
+ neigh_node->orig_node->primary_addr))
goto candidate_del;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
goto candidate_del;
/* ... and is good enough to be considered */
- if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
+ if (neigh_node->tq_avg < router->tq_avg - BATADV_BONDING_TQ_THRESHOLD)
goto candidate_del;
- /**
- * check if we have another candidate with the same mac address or
+ /* check if we have another candidate with the same mac address or
* interface. If we do, we won't select this candidate because of
* possible interference.
*/
@@ -177,12 +176,14 @@ void bonding_candidate_add(struct orig_node *orig_node,
continue;
/* we only care if the other candidate is even
- * considered as candidate. */
+ * considered as candidate.
+ */
if (list_empty(&tmp_neigh_node->bonding_list))
continue;
if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
- (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
+ (batadv_compare_eth(neigh_node->addr,
+ tmp_neigh_node->addr))) {
interference_candidate = 1;
break;
}
@@ -204,21 +205,22 @@ void bonding_candidate_add(struct orig_node *orig_node,
goto out;
candidate_del:
- bonding_candidate_del(orig_node, neigh_node);
+ batadv_bonding_candidate_del(orig_node, neigh_node);
out:
spin_unlock_bh(&orig_node->neigh_list_lock);
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
}
/* copy primary address for bonding */
-void bonding_save_primary(const struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- const struct batman_ogm_packet *batman_ogm_packet)
+void
+batadv_bonding_save_primary(const struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh_node,
+ const struct batadv_ogm_packet *batman_ogm_packet)
{
- if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
+ if (!(batman_ogm_packet->flags & BATADV_PRIMARIES_FIRST_HOP))
return;
memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
@@ -229,25 +231,26 @@ void bonding_save_primary(const struct orig_node *orig_node,
* 0 if the packet is to be accepted
* 1 if the packet is to be ignored.
*/
-int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
- unsigned long *last_reset)
+int batadv_window_protected(struct batadv_priv *bat_priv, int32_t seq_num_diff,
+ unsigned long *last_reset)
{
- if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
- (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
- if (!has_timed_out(*last_reset, RESET_PROTECTION_MS))
+ if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE ||
+ seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
+ if (!batadv_has_timed_out(*last_reset,
+ BATADV_RESET_PROTECTION_MS))
return 1;
*last_reset = jiffies;
- bat_dbg(DBG_BATMAN, bat_priv,
- "old packet received, start protection\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "old packet received, start protection\n");
}
return 0;
}
-bool check_management_packet(struct sk_buff *skb,
- struct hard_iface *hard_iface,
- int header_len)
+bool batadv_check_management_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface,
+ int header_len)
{
struct ethhdr *ethhdr;
@@ -276,34 +279,34 @@ bool check_management_packet(struct sk_buff *skb,
return true;
}
-static int recv_my_icmp_packet(struct bat_priv *bat_priv,
- struct sk_buff *skb, size_t icmp_len)
+static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, size_t icmp_len)
{
- struct hard_iface *primary_if = NULL;
- struct orig_node *orig_node = NULL;
- struct neigh_node *router = NULL;
- struct icmp_packet_rr *icmp_packet;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_neigh_node *router = NULL;
+ struct batadv_icmp_packet_rr *icmp_packet;
int ret = NET_RX_DROP;
- icmp_packet = (struct icmp_packet_rr *)skb->data;
+ icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
/* add data to device queue */
- if (icmp_packet->msg_type != ECHO_REQUEST) {
- bat_socket_receive_packet(icmp_packet, icmp_len);
+ if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
+ batadv_socket_receive_packet(icmp_packet, icmp_len);
goto out;
}
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* answer echo request (ping) */
/* get routing information */
- orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
+ orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
if (!orig_node)
goto out;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
goto out;
@@ -311,54 +314,54 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
- icmp_packet = (struct icmp_packet_rr *)skb->data;
+ icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
- icmp_packet->msg_type = ECHO_REPLY;
- icmp_packet->header.ttl = TTL;
+ icmp_packet->msg_type = BATADV_ECHO_REPLY;
+ icmp_packet->header.ttl = BATADV_TTL;
- send_skb_packet(skb, router->if_incoming, router->addr);
+ batadv_send_skb_packet(skb, router->if_incoming, router->addr);
ret = NET_RX_SUCCESS;
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return ret;
}
-static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
- struct sk_buff *skb)
+static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
{
- struct hard_iface *primary_if = NULL;
- struct orig_node *orig_node = NULL;
- struct neigh_node *router = NULL;
- struct icmp_packet *icmp_packet;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_neigh_node *router = NULL;
+ struct batadv_icmp_packet *icmp_packet;
int ret = NET_RX_DROP;
- icmp_packet = (struct icmp_packet *)skb->data;
+ icmp_packet = (struct batadv_icmp_packet *)skb->data;
/* send TTL exceeded if packet is an echo request (traceroute) */
- if (icmp_packet->msg_type != ECHO_REQUEST) {
+ if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
icmp_packet->orig, icmp_packet->dst);
goto out;
}
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* get routing information */
- orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
+ orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
if (!orig_node)
goto out;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
goto out;
@@ -366,42 +369,41 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
- icmp_packet = (struct icmp_packet *)skb->data;
+ icmp_packet = (struct batadv_icmp_packet *)skb->data;
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
- icmp_packet->msg_type = TTL_EXCEEDED;
- icmp_packet->header.ttl = TTL;
+ icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
+ icmp_packet->header.ttl = BATADV_TTL;
- send_skb_packet(skb, router->if_incoming, router->addr);
+ batadv_send_skb_packet(skb, router->if_incoming, router->addr);
ret = NET_RX_SUCCESS;
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return ret;
}
-int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+int batadv_recv_icmp_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct icmp_packet_rr *icmp_packet;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_icmp_packet_rr *icmp_packet;
struct ethhdr *ethhdr;
- struct orig_node *orig_node = NULL;
- struct neigh_node *router = NULL;
- int hdr_size = sizeof(struct icmp_packet);
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_neigh_node *router = NULL;
+ int hdr_size = sizeof(struct batadv_icmp_packet);
int ret = NET_RX_DROP;
- /**
- * we truncate all incoming icmp packets if they don't match our size
- */
- if (skb->len >= sizeof(struct icmp_packet_rr))
- hdr_size = sizeof(struct icmp_packet_rr);
+ /* we truncate all incoming icmp packets if they don't match our size */
+ if (skb->len >= sizeof(struct batadv_icmp_packet_rr))
+ hdr_size = sizeof(struct batadv_icmp_packet_rr);
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, hdr_size)))
@@ -418,33 +420,33 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
goto out;
/* not for me */
- if (!is_my_mac(ethhdr->h_dest))
+ if (!batadv_is_my_mac(ethhdr->h_dest))
goto out;
- icmp_packet = (struct icmp_packet_rr *)skb->data;
+ icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
/* add record route information if not full */
- if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
- (icmp_packet->rr_cur < BAT_RR_LEN)) {
+ if ((hdr_size == sizeof(struct batadv_icmp_packet_rr)) &&
+ (icmp_packet->rr_cur < BATADV_RR_LEN)) {
memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
ethhdr->h_dest, ETH_ALEN);
icmp_packet->rr_cur++;
}
/* packet for me */
- if (is_my_mac(icmp_packet->dst))
- return recv_my_icmp_packet(bat_priv, skb, hdr_size);
+ if (batadv_is_my_mac(icmp_packet->dst))
+ return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);
/* TTL exceeded */
if (icmp_packet->header.ttl < 2)
- return recv_icmp_ttl_exceeded(bat_priv, skb);
+ return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
/* get routing information */
- orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
+ orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
if (!orig_node)
goto out;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
goto out;
@@ -452,20 +454,20 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
- icmp_packet = (struct icmp_packet_rr *)skb->data;
+ icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
/* decrement ttl */
icmp_packet->header.ttl--;
/* route it */
- send_skb_packet(skb, router->if_incoming, router->addr);
+ batadv_send_skb_packet(skb, router->if_incoming, router->addr);
ret = NET_RX_SUCCESS;
out:
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return ret;
}
@@ -473,12 +475,14 @@ out:
* robin fashion over the remaining interfaces.
*
* This method rotates the bonding list and increases the
- * returned router's refcount. */
-static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
- const struct hard_iface *recv_if)
+ * returned router's refcount.
+ */
+static struct batadv_neigh_node *
+batadv_find_bond_router(struct batadv_orig_node *primary_orig,
+ const struct batadv_hard_iface *recv_if)
{
- struct neigh_node *tmp_neigh_node;
- struct neigh_node *router = NULL, *first_candidate = NULL;
+ struct batadv_neigh_node *tmp_neigh_node;
+ struct batadv_neigh_node *router = NULL, *first_candidate = NULL;
rcu_read_lock();
list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
@@ -506,10 +510,12 @@ static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
goto out;
/* selected should point to the next element
- * after the current router */
+ * after the current router
+ */
spin_lock_bh(&primary_orig->neigh_list_lock);
/* this is a list_move(), which unfortunately
- * does not exist as rcu version */
+ * does not exist as rcu version
+ */
list_del_rcu(&primary_orig->bond_list);
list_add_rcu(&primary_orig->bond_list,
&router->bonding_list);
@@ -524,12 +530,14 @@ out:
* remaining candidates which are not using
* this interface.
*
- * Increases the returned router's refcount */
-static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
- const struct hard_iface *recv_if)
+ * Increases the returned router's refcount
+ */
+static struct batadv_neigh_node *
+batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
+ const struct batadv_hard_iface *recv_if)
{
- struct neigh_node *tmp_neigh_node;
- struct neigh_node *router = NULL, *first_candidate = NULL;
+ struct batadv_neigh_node *tmp_neigh_node;
+ struct batadv_neigh_node *router = NULL, *first_candidate = NULL;
rcu_read_lock();
list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
@@ -545,19 +553,21 @@ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
continue;
/* if we don't have a router yet
- * or this one is better, choose it. */
+ * or this one is better, choose it.
+ */
if ((!router) ||
(tmp_neigh_node->tq_avg > router->tq_avg)) {
/* decrement refcount of
- * previously selected router */
+ * previously selected router
+ */
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
router = tmp_neigh_node;
atomic_inc_not_zero(&router->refcount);
}
- neigh_node_free_ref(tmp_neigh_node);
+ batadv_neigh_node_free_ref(tmp_neigh_node);
}
/* use the first candidate if nothing was found. */
@@ -569,19 +579,22 @@ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
return router;
}
-int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
+int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct tt_query_packet *tt_query;
- uint16_t tt_len;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_tt_query_packet *tt_query;
+ uint16_t tt_size;
struct ethhdr *ethhdr;
+ char tt_flag;
+ size_t packet_size;
/* drop packet if it has not necessary minimum size */
- if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet))))
+ if (unlikely(!pskb_may_pull(skb,
+ sizeof(struct batadv_tt_query_packet))))
goto out;
/* I could need to modify it */
- if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0)
+ if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
goto out;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -594,47 +607,59 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
if (is_broadcast_ether_addr(ethhdr->h_source))
goto out;
- tt_query = (struct tt_query_packet *)skb->data;
+ tt_query = (struct batadv_tt_query_packet *)skb->data;
- tt_query->tt_data = ntohs(tt_query->tt_data);
+ switch (tt_query->flags & BATADV_TT_QUERY_TYPE_MASK) {
+ case BATADV_TT_REQUEST:
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX);
- switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
- case TT_REQUEST:
/* If we cannot provide an answer the tt_request is
- * forwarded */
- if (!send_tt_response(bat_priv, tt_query)) {
- bat_dbg(DBG_TT, bat_priv,
- "Routing TT_REQUEST to %pM [%c]\n",
- tt_query->dst,
- (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
- tt_query->tt_data = htons(tt_query->tt_data);
- return route_unicast_packet(skb, recv_if);
+ * forwarded
+ */
+ if (!batadv_send_tt_response(bat_priv, tt_query)) {
+ if (tt_query->flags & BATADV_TT_FULL_TABLE)
+ tt_flag = 'F';
+ else
+ tt_flag = '.';
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Routing TT_REQUEST to %pM [%c]\n",
+ tt_query->dst,
+ tt_flag);
+ return batadv_route_unicast_packet(skb, recv_if);
}
break;
- case TT_RESPONSE:
- if (is_my_mac(tt_query->dst)) {
+ case BATADV_TT_RESPONSE:
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
+
+ if (batadv_is_my_mac(tt_query->dst)) {
/* packet needs to be linearized to access the TT
- * changes */
+ * changes
+ */
if (skb_linearize(skb) < 0)
goto out;
/* skb_linearize() possibly changed skb->data */
- tt_query = (struct tt_query_packet *)skb->data;
+ tt_query = (struct batadv_tt_query_packet *)skb->data;
- tt_len = tt_query->tt_data * sizeof(struct tt_change);
+ tt_size = batadv_tt_len(ntohs(tt_query->tt_data));
/* Ensure we have all the claimed data */
- if (unlikely(skb_headlen(skb) <
- sizeof(struct tt_query_packet) + tt_len))
+ packet_size = sizeof(struct batadv_tt_query_packet);
+ packet_size += tt_size;
+ if (unlikely(skb_headlen(skb) < packet_size))
goto out;
- handle_tt_response(bat_priv, tt_query);
+ batadv_handle_tt_response(bat_priv, tt_query);
} else {
- bat_dbg(DBG_TT, bat_priv,
- "Routing TT_RESPONSE to %pM [%c]\n",
- tt_query->dst,
- (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
- tt_query->tt_data = htons(tt_query->tt_data);
- return route_unicast_packet(skb, recv_if);
+ if (tt_query->flags & BATADV_TT_FULL_TABLE)
+ tt_flag = 'F';
+ else
+ tt_flag = '.';
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Routing TT_RESPONSE to %pM [%c]\n",
+ tt_query->dst,
+ tt_flag);
+ return batadv_route_unicast_packet(skb, recv_if);
}
break;
}
@@ -644,15 +669,16 @@ out:
return NET_RX_DROP;
}
-int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
+int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct roam_adv_packet *roam_adv_packet;
- struct orig_node *orig_node;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_roam_adv_packet *roam_adv_packet;
+ struct batadv_orig_node *orig_node;
struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */
- if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
+ if (unlikely(!pskb_may_pull(skb,
+ sizeof(struct batadv_roam_adv_packet))))
goto out;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -665,35 +691,39 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
if (is_broadcast_ether_addr(ethhdr->h_source))
goto out;
- roam_adv_packet = (struct roam_adv_packet *)skb->data;
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
+
+ roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data;
- if (!is_my_mac(roam_adv_packet->dst))
- return route_unicast_packet(skb, recv_if);
+ if (!batadv_is_my_mac(roam_adv_packet->dst))
+ return batadv_route_unicast_packet(skb, recv_if);
/* check if it is a backbone gateway. we don't accept
* roaming advertisement from it, as it has the same
* entries as we have.
*/
- if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
goto out;
- orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
+ orig_node = batadv_orig_hash_find(bat_priv, roam_adv_packet->src);
if (!orig_node)
goto out;
- bat_dbg(DBG_TT, bat_priv,
- "Received ROAMING_ADV from %pM (client %pM)\n",
- roam_adv_packet->src, roam_adv_packet->client);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Received ROAMING_ADV from %pM (client %pM)\n",
+ roam_adv_packet->src, roam_adv_packet->client);
- tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
- atomic_read(&orig_node->last_ttvn) + 1, true, false);
+ batadv_tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
+ BATADV_TT_CLIENT_ROAM,
+ atomic_read(&orig_node->last_ttvn) + 1);
/* Roaming phase starts: I have new information but the ttvn has not
* been incremented yet. This flag will make me check all the incoming
- * packets for the correct destination. */
+ * packets for the correct destination.
+ */
bat_priv->tt_poss_change = true;
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
out:
/* returning NET_RX_DROP will make the caller function kfree the skb */
return NET_RX_DROP;
@@ -701,26 +731,30 @@ out:
/* find a suitable router for this originator, and use
* bonding if possible. increases the found neighbors
- * refcount.*/
-struct neigh_node *find_router(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- const struct hard_iface *recv_if)
+ * refcount.
+ */
+struct batadv_neigh_node *
+batadv_find_router(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const struct batadv_hard_iface *recv_if)
{
- struct orig_node *primary_orig_node;
- struct orig_node *router_orig;
- struct neigh_node *router;
+ struct batadv_orig_node *primary_orig_node;
+ struct batadv_orig_node *router_orig;
+ struct batadv_neigh_node *router;
static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
int bonding_enabled;
+ uint8_t *primary_addr;
if (!orig_node)
return NULL;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
goto err;
/* without bonding, the first node should
- * always choose the default router. */
+ * always choose the default router.
+ */
bonding_enabled = atomic_read(&bat_priv->bonding);
rcu_read_lock();
@@ -732,43 +766,47 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
if ((!recv_if) && (!bonding_enabled))
goto return_router;
+ primary_addr = router_orig->primary_addr;
+
/* if we have something in the primary_addr, we can search
- * for a potential bonding candidate. */
- if (compare_eth(router_orig->primary_addr, zero_mac))
+ * for a potential bonding candidate.
+ */
+ if (batadv_compare_eth(primary_addr, zero_mac))
goto return_router;
/* find the orig_node which has the primary interface. might
- * even be the same as our router_orig in many cases */
-
- if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
+ * even be the same as our router_orig in many cases
+ */
+ if (batadv_compare_eth(primary_addr, router_orig->orig)) {
primary_orig_node = router_orig;
} else {
- primary_orig_node = orig_hash_find(bat_priv,
- router_orig->primary_addr);
+ primary_orig_node = batadv_orig_hash_find(bat_priv,
+ primary_addr);
if (!primary_orig_node)
goto return_router;
- orig_node_free_ref(primary_orig_node);
+ batadv_orig_node_free_ref(primary_orig_node);
}
/* with less than 2 candidates, we can't do any
- * bonding and prefer the original router. */
+ * bonding and prefer the original router.
+ */
if (atomic_read(&primary_orig_node->bond_candidates) < 2)
goto return_router;
/* all nodes between should choose a candidate which
* is is not on the interface where the packet came
- * in. */
-
- neigh_node_free_ref(router);
+ * in.
+ */
+ batadv_neigh_node_free_ref(router);
if (bonding_enabled)
- router = find_bond_router(primary_orig_node, recv_if);
+ router = batadv_find_bond_router(primary_orig_node, recv_if);
else
- router = find_ifalter_router(primary_orig_node, recv_if);
+ router = batadv_find_ifalter_router(primary_orig_node, recv_if);
return_router:
- if (router && router->if_incoming->if_status != IF_ACTIVE)
+ if (router && router->if_incoming->if_status != BATADV_IF_ACTIVE)
goto err_unlock;
rcu_read_unlock();
@@ -777,11 +815,11 @@ err_unlock:
rcu_read_unlock();
err:
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
return NULL;
}
-static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
+static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
{
struct ethhdr *ethhdr;
@@ -800,23 +838,24 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
return -1;
/* not for me */
- if (!is_my_mac(ethhdr->h_dest))
+ if (!batadv_is_my_mac(ethhdr->h_dest))
return -1;
return 0;
}
-static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+static int batadv_route_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct orig_node *orig_node = NULL;
- struct neigh_node *neigh_node = NULL;
- struct unicast_packet *unicast_packet;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct batadv_unicast_packet *unicast_packet;
struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
int ret = NET_RX_DROP;
struct sk_buff *new_skb;
- unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* TTL exceeded */
if (unicast_packet->header.ttl < 2) {
@@ -826,13 +865,13 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
}
/* get routing information */
- orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
+ orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->dest);
if (!orig_node)
goto out;
/* find_router() increases neigh_nodes refcount if found. */
- neigh_node = find_router(bat_priv, orig_node, recv_if);
+ neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
if (!neigh_node)
goto out;
@@ -841,20 +880,22 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
- unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
- if (unicast_packet->header.packet_type == BAT_UNICAST &&
+ if (unicast_packet->header.packet_type == BATADV_UNICAST &&
atomic_read(&bat_priv->fragmentation) &&
skb->len > neigh_node->if_incoming->net_dev->mtu) {
- ret = frag_send_skb(skb, bat_priv,
- neigh_node->if_incoming, neigh_node->addr);
+ ret = batadv_frag_send_skb(skb, bat_priv,
+ neigh_node->if_incoming,
+ neigh_node->addr);
goto out;
}
- if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG &&
- frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
+ if (unicast_packet->header.packet_type == BATADV_UNICAST_FRAG &&
+ batadv_frag_can_reassemble(skb,
+ neigh_node->if_incoming->net_dev->mtu)) {
- ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
+ ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
if (ret == NET_RX_DROP)
goto out;
@@ -866,141 +907,153 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
}
skb = new_skb;
- unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
}
/* decrement ttl */
unicast_packet->header.ttl--;
+ /* Update stats counter */
+ batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
+ batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
+ skb->len + ETH_HLEN);
+
/* route it */
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = NET_RX_SUCCESS;
out:
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return ret;
}
-static int check_unicast_ttvn(struct bat_priv *bat_priv,
- struct sk_buff *skb) {
+static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
+ struct sk_buff *skb) {
uint8_t curr_ttvn;
- struct orig_node *orig_node;
+ struct batadv_orig_node *orig_node;
struct ethhdr *ethhdr;
- struct hard_iface *primary_if;
- struct unicast_packet *unicast_packet;
+ struct batadv_hard_iface *primary_if;
+ struct batadv_unicast_packet *unicast_packet;
bool tt_poss_change;
+ int is_old_ttvn;
/* I could need to modify it */
- if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
+ if (skb_cow(skb, sizeof(struct batadv_unicast_packet)) < 0)
return 0;
- unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
- if (is_my_mac(unicast_packet->dest)) {
+ if (batadv_is_my_mac(unicast_packet->dest)) {
tt_poss_change = bat_priv->tt_poss_change;
curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
} else {
- orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
+ orig_node = batadv_orig_hash_find(bat_priv,
+ unicast_packet->dest);
if (!orig_node)
return 0;
curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
tt_poss_change = orig_node->tt_poss_change;
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
}
/* Check whether I have to reroute the packet */
- if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
+ is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn);
+ if (is_old_ttvn || tt_poss_change) {
/* check if there is enough data before accessing it */
- if (pskb_may_pull(skb, sizeof(struct unicast_packet) +
+ if (pskb_may_pull(skb, sizeof(struct batadv_unicast_packet) +
ETH_HLEN) < 0)
return 0;
- ethhdr = (struct ethhdr *)(skb->data +
- sizeof(struct unicast_packet));
+ ethhdr = (struct ethhdr *)(skb->data + sizeof(*unicast_packet));
/* we don't have an updated route for this client, so we should
* not try to reroute the packet!!
*/
- if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
+ if (batadv_tt_global_client_is_roaming(bat_priv,
+ ethhdr->h_dest))
return 1;
- orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
+ orig_node = batadv_transtable_search(bat_priv, NULL,
+ ethhdr->h_dest);
if (!orig_node) {
- if (!is_my_client(bat_priv, ethhdr->h_dest))
+ if (!batadv_is_my_client(bat_priv, ethhdr->h_dest))
return 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
return 0;
memcpy(unicast_packet->dest,
primary_if->net_dev->dev_addr, ETH_ALEN);
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
} else {
memcpy(unicast_packet->dest, orig_node->orig,
ETH_ALEN);
curr_ttvn = (uint8_t)
atomic_read(&orig_node->last_ttvn);
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
}
- bat_dbg(DBG_ROUTES, bat_priv,
- "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
- unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
- unicast_packet->dest);
+ batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+ "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
+ unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
+ unicast_packet->dest);
unicast_packet->ttvn = curr_ttvn;
}
return 1;
}
-int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+int batadv_recv_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct unicast_packet *unicast_packet;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_unicast_packet *unicast_packet;
int hdr_size = sizeof(*unicast_packet);
- if (check_unicast_packet(skb, hdr_size) < 0)
+ if (batadv_check_unicast_packet(skb, hdr_size) < 0)
return NET_RX_DROP;
- if (!check_unicast_ttvn(bat_priv, skb))
+ if (!batadv_check_unicast_ttvn(bat_priv, skb))
return NET_RX_DROP;
- unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* packet for me */
- if (is_my_mac(unicast_packet->dest)) {
- interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
+ if (batadv_is_my_mac(unicast_packet->dest)) {
+ batadv_interface_rx(recv_if->soft_iface, skb, recv_if,
+ hdr_size);
return NET_RX_SUCCESS;
}
- return route_unicast_packet(skb, recv_if);
+ return batadv_route_unicast_packet(skb, recv_if);
}
-int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct unicast_frag_packet *unicast_packet;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_unicast_frag_packet *unicast_packet;
int hdr_size = sizeof(*unicast_packet);
struct sk_buff *new_skb = NULL;
int ret;
- if (check_unicast_packet(skb, hdr_size) < 0)
+ if (batadv_check_unicast_packet(skb, hdr_size) < 0)
return NET_RX_DROP;
- if (!check_unicast_ttvn(bat_priv, skb))
+ if (!batadv_check_unicast_ttvn(bat_priv, skb))
return NET_RX_DROP;
- unicast_packet = (struct unicast_frag_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
/* packet for me */
- if (is_my_mac(unicast_packet->dest)) {
+ if (batadv_is_my_mac(unicast_packet->dest)) {
- ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
+ ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
if (ret == NET_RX_DROP)
return NET_RX_DROP;
@@ -1009,20 +1062,21 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
if (!new_skb)
return NET_RX_SUCCESS;
- interface_rx(recv_if->soft_iface, new_skb, recv_if,
- sizeof(struct unicast_packet));
+ batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
+ sizeof(struct batadv_unicast_packet));
return NET_RX_SUCCESS;
}
- return route_unicast_packet(skb, recv_if);
+ return batadv_route_unicast_packet(skb, recv_if);
}
-int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+int batadv_recv_bcast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct orig_node *orig_node = NULL;
- struct bcast_packet *bcast_packet;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_bcast_packet *bcast_packet;
struct ethhdr *ethhdr;
int hdr_size = sizeof(*bcast_packet);
int ret = NET_RX_DROP;
@@ -1043,19 +1097,19 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
goto out;
/* ignore broadcasts sent by myself */
- if (is_my_mac(ethhdr->h_source))
+ if (batadv_is_my_mac(ethhdr->h_source))
goto out;
- bcast_packet = (struct bcast_packet *)skb->data;
+ bcast_packet = (struct batadv_bcast_packet *)skb->data;
/* ignore broadcasts originated by myself */
- if (is_my_mac(bcast_packet->orig))
+ if (batadv_is_my_mac(bcast_packet->orig))
goto out;
if (bcast_packet->header.ttl < 2)
goto out;
- orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
+ orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
if (!orig_node)
goto out;
@@ -1063,39 +1117,40 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
spin_lock_bh(&orig_node->bcast_seqno_lock);
/* check whether the packet is a duplicate */
- if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
- ntohl(bcast_packet->seqno)))
+ if (batadv_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
+ ntohl(bcast_packet->seqno)))
goto spin_unlock;
seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
/* check whether the packet is old and the host just restarted. */
- if (window_protected(bat_priv, seq_diff,
- &orig_node->bcast_seqno_reset))
+ if (batadv_window_protected(bat_priv, seq_diff,
+ &orig_node->bcast_seqno_reset))
goto spin_unlock;
/* mark broadcast in flood history, update window position
- * if required. */
- if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
+ * if required.
+ */
+ if (batadv_bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
spin_unlock_bh(&orig_node->bcast_seqno_lock);
/* check whether this has been sent by another originator before */
- if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
+ if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
goto out;
/* rebroadcast packet */
- add_bcast_packet_to_list(bat_priv, skb, 1);
+ batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
/* don't hand the broadcast up if it is from an originator
* from the same backbone.
*/
- if (bla_is_backbone_gw(skb, orig_node, hdr_size))
+ if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size))
goto out;
/* broadcast for me */
- interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
+ batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
ret = NET_RX_SUCCESS;
goto out;
@@ -1103,15 +1158,16 @@ spin_unlock:
spin_unlock_bh(&orig_node->bcast_seqno_lock);
out:
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return ret;
}
-int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+int batadv_recv_vis_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
- struct vis_packet *vis_packet;
+ struct batadv_vis_packet *vis_packet;
struct ethhdr *ethhdr;
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
int hdr_size = sizeof(*vis_packet);
/* keep skb linear */
@@ -1121,29 +1177,29 @@ int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
if (unlikely(!pskb_may_pull(skb, hdr_size)))
return NET_RX_DROP;
- vis_packet = (struct vis_packet *)skb->data;
+ vis_packet = (struct batadv_vis_packet *)skb->data;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* not for me */
- if (!is_my_mac(ethhdr->h_dest))
+ if (!batadv_is_my_mac(ethhdr->h_dest))
return NET_RX_DROP;
/* ignore own packets */
- if (is_my_mac(vis_packet->vis_orig))
+ if (batadv_is_my_mac(vis_packet->vis_orig))
return NET_RX_DROP;
- if (is_my_mac(vis_packet->sender_orig))
+ if (batadv_is_my_mac(vis_packet->sender_orig))
return NET_RX_DROP;
switch (vis_packet->vis_type) {
- case VIS_TYPE_SERVER_SYNC:
- receive_server_sync_packet(bat_priv, vis_packet,
- skb_headlen(skb));
+ case BATADV_VIS_TYPE_SERVER_SYNC:
+ batadv_receive_server_sync_packet(bat_priv, vis_packet,
+ skb_headlen(skb));
break;
- case VIS_TYPE_CLIENT_UPDATE:
- receive_client_update_packet(bat_priv, vis_packet,
- skb_headlen(skb));
+ case BATADV_VIS_TYPE_CLIENT_UPDATE:
+ batadv_receive_client_update_packet(bat_priv, vis_packet,
+ skb_headlen(skb));
break;
default: /* ignore unknown packet */
@@ -1151,6 +1207,7 @@ int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
}
/* We take a copy of the data in the packet, so we should
- always free the skbuf. */
+ * always free the skbuf.
+ */
return NET_RX_DROP;
}
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index d6bbbebb656..9262279ea66 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,36 +15,45 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_ROUTING_H_
#define _NET_BATMAN_ADV_ROUTING_H_
-void slide_own_bcast_window(struct hard_iface *hard_iface);
-bool check_management_packet(struct sk_buff *skb,
- struct hard_iface *hard_iface,
- int header_len);
-void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
- struct neigh_node *neigh_node);
-int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if);
-struct neigh_node *find_router(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- const struct hard_iface *recv_if);
-void bonding_candidate_del(struct orig_node *orig_node,
- struct neigh_node *neigh_node);
-void bonding_candidate_add(struct orig_node *orig_node,
- struct neigh_node *neigh_node);
-void bonding_save_primary(const struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- const struct batman_ogm_packet *batman_ogm_packet);
-int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
- unsigned long *last_reset);
+void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface);
+bool batadv_check_management_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface,
+ int header_len);
+void batadv_update_route(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node);
+int batadv_recv_icmp_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_bcast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_vis_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_tt_query(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_roam_adv(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+struct batadv_neigh_node *
+batadv_find_router(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const struct batadv_hard_iface *recv_if);
+void batadv_bonding_candidate_del(struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node);
+void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node);
+void batadv_bonding_save_primary(const struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh_node,
+ const struct batadv_ogm_packet
+ *batman_ogm_packet);
+int batadv_window_protected(struct batadv_priv *bat_priv, int32_t seq_num_diff,
+ unsigned long *last_reset);
#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index f47299f22c6..3b4b2daa3b3 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -29,16 +27,18 @@
#include "gateway_common.h"
#include "originator.h"
-static void send_outstanding_bcast_packet(struct work_struct *work);
+static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
/* send out an already prepared packet to the given address via the
- * specified batman interface */
-int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
- const uint8_t *dst_addr)
+ * specified batman interface
+ */
+int batadv_send_skb_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface,
+ const uint8_t *dst_addr)
{
struct ethhdr *ethhdr;
- if (hard_iface->if_status != IF_ACTIVE)
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
goto send_skb_err;
if (unlikely(!hard_iface->net_dev))
@@ -51,7 +51,7 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
}
/* push to the ethernet header. */
- if (my_skb_head_push(skb, ETH_HLEN) < 0)
+ if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
goto send_skb_err;
skb_reset_mac_header(skb);
@@ -59,129 +59,57 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
ethhdr = (struct ethhdr *)skb_mac_header(skb);
memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
- ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
+ ethhdr->h_proto = __constant_htons(BATADV_ETH_P_BATMAN);
skb_set_network_header(skb, ETH_HLEN);
skb->priority = TC_PRIO_CONTROL;
- skb->protocol = __constant_htons(ETH_P_BATMAN);
+ skb->protocol = __constant_htons(BATADV_ETH_P_BATMAN);
skb->dev = hard_iface->net_dev;
/* dev_queue_xmit() returns a negative result on error. However on
* congestion and traffic shaping, it drops and returns NET_XMIT_DROP
- * (which is > 0). This will not be treated as an error. */
-
+ * (which is > 0). This will not be treated as an error.
+ */
return dev_queue_xmit(skb);
send_skb_err:
kfree_skb(skb);
return NET_XMIT_DROP;
}
-static void realloc_packet_buffer(struct hard_iface *hard_iface,
- int new_len)
+void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
{
- unsigned char *new_buff;
-
- new_buff = kmalloc(new_len, GFP_ATOMIC);
-
- /* keep old buffer if kmalloc should fail */
- if (new_buff) {
- memcpy(new_buff, hard_iface->packet_buff,
- BATMAN_OGM_HLEN);
-
- kfree(hard_iface->packet_buff);
- hard_iface->packet_buff = new_buff;
- hard_iface->packet_len = new_len;
- }
-}
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
-/* when calling this function (hard_iface == primary_if) has to be true */
-static int prepare_packet_buffer(struct bat_priv *bat_priv,
- struct hard_iface *hard_iface)
-{
- int new_len;
-
- new_len = BATMAN_OGM_HLEN +
- tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
-
- /* if we have too many changes for one packet don't send any
- * and wait for the tt table request which will be fragmented */
- if (new_len > hard_iface->soft_iface->mtu)
- new_len = BATMAN_OGM_HLEN;
-
- realloc_packet_buffer(hard_iface, new_len);
-
- atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
-
- /* reset the sending counter */
- atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
-
- return tt_changes_fill_buffer(bat_priv,
- hard_iface->packet_buff + BATMAN_OGM_HLEN,
- hard_iface->packet_len - BATMAN_OGM_HLEN);
-}
-
-static int reset_packet_buffer(struct bat_priv *bat_priv,
- struct hard_iface *hard_iface)
-{
- realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN);
- return 0;
-}
-
-void schedule_bat_ogm(struct hard_iface *hard_iface)
-{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct hard_iface *primary_if;
- int tt_num_changes = -1;
-
- if ((hard_iface->if_status == IF_NOT_IN_USE) ||
- (hard_iface->if_status == IF_TO_BE_REMOVED))
+ if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
+ (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
return;
- /**
- * the interface gets activated here to avoid race conditions between
+ /* the interface gets activated here to avoid race conditions between
* the moment of activating the interface in
* hardif_activate_interface() where the originator mac is set and
* outdated packets (especially uninitialized mac addresses) in the
* packet queue
*/
- if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
- hard_iface->if_status = IF_ACTIVE;
-
- primary_if = primary_if_get_selected(bat_priv);
-
- if (hard_iface == primary_if) {
- /* if at least one change happened */
- if (atomic_read(&bat_priv->tt_local_changes) > 0) {
- tt_commit_changes(bat_priv);
- tt_num_changes = prepare_packet_buffer(bat_priv,
- hard_iface);
- }
-
- /* if the changes have been sent often enough */
- if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
- tt_num_changes = reset_packet_buffer(bat_priv,
- hard_iface);
- }
-
- if (primary_if)
- hardif_free_ref(primary_if);
+ if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
+ hard_iface->if_status = BATADV_IF_ACTIVE;
- bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes);
+ bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
}
-static void forw_packet_free(struct forw_packet *forw_packet)
+static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
{
if (forw_packet->skb)
kfree_skb(forw_packet->skb);
if (forw_packet->if_incoming)
- hardif_free_ref(forw_packet->if_incoming);
+ batadv_hardif_free_ref(forw_packet->if_incoming);
kfree(forw_packet);
}
-static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
- struct forw_packet *forw_packet,
- unsigned long send_time)
+static void
+_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
+ struct batadv_forw_packet *forw_packet,
+ unsigned long send_time)
{
INIT_HLIST_NODE(&forw_packet->list);
@@ -192,8 +120,8 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet->delayed_work,
- send_outstanding_bcast_packet);
- queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
+ batadv_send_outstanding_bcast_packet);
+ queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
send_time);
}
@@ -204,21 +132,24 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
* errors.
*
* The skb is not consumed, so the caller should make sure that the
- * skb is freed. */
-int add_bcast_packet_to_list(struct bat_priv *bat_priv,
- const struct sk_buff *skb, unsigned long delay)
+ * skb is freed.
+ */
+int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
+ const struct sk_buff *skb,
+ unsigned long delay)
{
- struct hard_iface *primary_if = NULL;
- struct forw_packet *forw_packet;
- struct bcast_packet *bcast_packet;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_forw_packet *forw_packet;
+ struct batadv_bcast_packet *bcast_packet;
struct sk_buff *newskb;
- if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
- bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
+ if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "bcast packet queue full\n");
goto out;
}
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out_and_inc;
@@ -232,7 +163,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
goto packet_free;
/* as we have a copy now, it is safe to decrease the TTL */
- bcast_packet = (struct bcast_packet *)newskb->data;
+ bcast_packet = (struct batadv_bcast_packet *)newskb->data;
bcast_packet->header.ttl--;
skb_reset_mac_header(newskb);
@@ -243,7 +174,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
/* how often did we send the bcast packet ? */
forw_packet->num_packets = 0;
- _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
+ _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
return NETDEV_TX_OK;
packet_free:
@@ -252,38 +183,43 @@ out_and_inc:
atomic_inc(&bat_priv->bcast_queue_left);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return NETDEV_TX_BUSY;
}
-static void send_outstanding_bcast_packet(struct work_struct *work)
+static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
{
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
- struct forw_packet *forw_packet =
- container_of(delayed_work, struct forw_packet, delayed_work);
+ struct batadv_forw_packet *forw_packet;
struct sk_buff *skb1;
- struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
+ struct net_device *soft_iface;
+ struct batadv_priv *bat_priv;
+
+ forw_packet = container_of(delayed_work, struct batadv_forw_packet,
+ delayed_work);
+ soft_iface = forw_packet->if_incoming->soft_iface;
+ bat_priv = netdev_priv(soft_iface);
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
hlist_del(&forw_packet->list);
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
- if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
+ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
goto out;
/* rebroadcast packet */
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface != soft_iface)
continue;
/* send a copy of the saved skb */
skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
if (skb1)
- send_skb_packet(skb1, hard_iface, broadcast_addr);
+ batadv_send_skb_packet(skb1, hard_iface,
+ batadv_broadcast_addr);
}
rcu_read_unlock();
@@ -291,72 +227,72 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
/* if we still have some more bcasts to send */
if (forw_packet->num_packets < 3) {
- _add_bcast_packet_to_list(bat_priv, forw_packet,
- msecs_to_jiffies(5));
+ _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
+ msecs_to_jiffies(5));
return;
}
out:
- forw_packet_free(forw_packet);
+ batadv_forw_packet_free(forw_packet);
atomic_inc(&bat_priv->bcast_queue_left);
}
-void send_outstanding_bat_ogm_packet(struct work_struct *work)
+void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
{
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
- struct forw_packet *forw_packet =
- container_of(delayed_work, struct forw_packet, delayed_work);
- struct bat_priv *bat_priv;
+ struct batadv_forw_packet *forw_packet;
+ struct batadv_priv *bat_priv;
+ forw_packet = container_of(delayed_work, struct batadv_forw_packet,
+ delayed_work);
bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
spin_lock_bh(&bat_priv->forw_bat_list_lock);
hlist_del(&forw_packet->list);
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
- if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
+ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
goto out;
bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
- /**
- * we have to have at least one packet in the queue
+ /* we have to have at least one packet in the queue
* to determine the queues wake up time unless we are
* shutting down
*/
if (forw_packet->own)
- schedule_bat_ogm(forw_packet->if_incoming);
+ batadv_schedule_bat_ogm(forw_packet->if_incoming);
out:
/* don't count own packet */
if (!forw_packet->own)
atomic_inc(&bat_priv->batman_queue_left);
- forw_packet_free(forw_packet);
+ batadv_forw_packet_free(forw_packet);
}
-void purge_outstanding_packets(struct bat_priv *bat_priv,
- const struct hard_iface *hard_iface)
+void
+batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ const struct batadv_hard_iface *hard_iface)
{
- struct forw_packet *forw_packet;
+ struct batadv_forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node;
bool pending;
if (hard_iface)
- bat_dbg(DBG_BATMAN, bat_priv,
- "purge_outstanding_packets(): %s\n",
- hard_iface->net_dev->name);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "purge_outstanding_packets(): %s\n",
+ hard_iface->net_dev->name);
else
- bat_dbg(DBG_BATMAN, bat_priv,
- "purge_outstanding_packets()\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "purge_outstanding_packets()\n");
/* free bcast list */
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&bat_priv->forw_bcast_list, list) {
- /**
- * if purge_outstanding_packets() was called with an argument
+ /* if purge_outstanding_packets() was called with an argument
* we delete only packets belonging to the given interface
*/
if ((hard_iface) &&
@@ -365,8 +301,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
- /**
- * send_outstanding_bcast_packet() will lock the list to
+ /* batadv_send_outstanding_bcast_packet() will lock the list to
* delete the item from the list
*/
pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
@@ -374,7 +309,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
- forw_packet_free(forw_packet);
+ batadv_forw_packet_free(forw_packet);
}
}
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
@@ -384,8 +319,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&bat_priv->forw_bat_list, list) {
- /**
- * if purge_outstanding_packets() was called with an argument
+ /* if purge_outstanding_packets() was called with an argument
* we delete only packets belonging to the given interface
*/
if ((hard_iface) &&
@@ -394,8 +328,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
- /**
- * send_outstanding_bat_packet() will lock the list to
+ /* send_outstanding_bat_packet() will lock the list to
* delete the item from the list
*/
pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
@@ -403,7 +336,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
- forw_packet_free(forw_packet);
+ batadv_forw_packet_free(forw_packet);
}
}
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 824ef06f9b0..643329b787e 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,19 +15,21 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_SEND_H_
#define _NET_BATMAN_ADV_SEND_H_
-int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
- const uint8_t *dst_addr);
-void schedule_bat_ogm(struct hard_iface *hard_iface);
-int add_bcast_packet_to_list(struct bat_priv *bat_priv,
- const struct sk_buff *skb, unsigned long delay);
-void send_outstanding_bat_ogm_packet(struct work_struct *work);
-void purge_outstanding_packets(struct bat_priv *bat_priv,
- const struct hard_iface *hard_iface);
+int batadv_send_skb_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface,
+ const uint8_t *dst_addr);
+void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface);
+int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
+ const struct sk_buff *skb,
+ unsigned long delay);
+void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work);
+void
+batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ const struct batadv_hard_iface *hard_iface);
#endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index a0ec0e4ada4..109ea2aae96 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -24,12 +22,12 @@
#include "hard-interface.h"
#include "routing.h"
#include "send.h"
-#include "bat_debugfs.h"
+#include "debugfs.h"
#include "translation-table.h"
#include "hash.h"
#include "gateway_common.h"
#include "gateway_client.h"
-#include "bat_sysfs.h"
+#include "sysfs.h"
#include "originator.h"
#include <linux/slab.h>
#include <linux/ethtool.h>
@@ -39,27 +37,33 @@
#include "bridge_loop_avoidance.h"
-static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
-static void bat_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info);
-static u32 bat_get_msglevel(struct net_device *dev);
-static void bat_set_msglevel(struct net_device *dev, u32 value);
-static u32 bat_get_link(struct net_device *dev);
-
-static const struct ethtool_ops bat_ethtool_ops = {
- .get_settings = bat_get_settings,
- .get_drvinfo = bat_get_drvinfo,
- .get_msglevel = bat_get_msglevel,
- .set_msglevel = bat_set_msglevel,
- .get_link = bat_get_link,
+static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static void batadv_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info);
+static u32 batadv_get_msglevel(struct net_device *dev);
+static void batadv_set_msglevel(struct net_device *dev, u32 value);
+static u32 batadv_get_link(struct net_device *dev);
+static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data);
+static void batadv_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data);
+static int batadv_get_sset_count(struct net_device *dev, int stringset);
+
+static const struct ethtool_ops batadv_ethtool_ops = {
+ .get_settings = batadv_get_settings,
+ .get_drvinfo = batadv_get_drvinfo,
+ .get_msglevel = batadv_get_msglevel,
+ .set_msglevel = batadv_set_msglevel,
+ .get_link = batadv_get_link,
+ .get_strings = batadv_get_strings,
+ .get_ethtool_stats = batadv_get_ethtool_stats,
+ .get_sset_count = batadv_get_sset_count,
};
-int my_skb_head_push(struct sk_buff *skb, unsigned int len)
+int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
{
int result;
- /**
- * TODO: We must check if we can release all references to non-payload
+ /* TODO: We must check if we can release all references to non-payload
* data using skb_header_release in our skbs to allow skb_cow_header to
* work optimally. This means that those skbs are not allowed to read
* or write any data which is before the current position of skb->data
@@ -74,37 +78,37 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
return 0;
}
-static int interface_open(struct net_device *dev)
+static int batadv_interface_open(struct net_device *dev)
{
netif_start_queue(dev);
return 0;
}
-static int interface_release(struct net_device *dev)
+static int batadv_interface_release(struct net_device *dev)
{
netif_stop_queue(dev);
return 0;
}
-static struct net_device_stats *interface_stats(struct net_device *dev)
+static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
{
- struct bat_priv *bat_priv = netdev_priv(dev);
+ struct batadv_priv *bat_priv = netdev_priv(dev);
return &bat_priv->stats;
}
-static int interface_set_mac_addr(struct net_device *dev, void *p)
+static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
{
- struct bat_priv *bat_priv = netdev_priv(dev);
+ struct batadv_priv *bat_priv = netdev_priv(dev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
/* only modify transtable if it has been initialized before */
- if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
- tt_local_remove(bat_priv, dev->dev_addr,
- "mac address changed", false);
- tt_local_add(dev, addr->sa_data, NULL_IFINDEX);
+ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
+ batadv_tt_local_remove(bat_priv, dev->dev_addr,
+ "mac address changed", false);
+ batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX);
}
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -112,10 +116,10 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static int interface_change_mtu(struct net_device *dev, int new_mtu)
+static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
{
/* check ranges */
- if ((new_mtu < 68) || (new_mtu > hardif_min_mtu(dev)))
+ if ((new_mtu < 68) || (new_mtu > batadv_hardif_min_mtu(dev)))
return -EINVAL;
dev->mtu = new_mtu;
@@ -123,13 +127,15 @@ static int interface_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
+static int batadv_interface_tx(struct sk_buff *skb,
+ struct net_device *soft_iface)
{
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
- struct hard_iface *primary_if = NULL;
- struct bcast_packet *bcast_packet;
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_bcast_packet *bcast_packet;
struct vlan_ethhdr *vhdr;
+ __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00,
0x00};
unsigned int header_len = 0;
@@ -137,7 +143,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
short vid __maybe_unused = -1;
bool do_bcast = false;
- if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
+ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
goto dropped;
soft_iface->trans_start = jiffies;
@@ -147,45 +153,47 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
vhdr = (struct vlan_ethhdr *)skb->data;
vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
- if (ntohs(vhdr->h_vlan_encapsulated_proto) != ETH_P_BATMAN)
+ if (vhdr->h_vlan_encapsulated_proto != ethertype)
break;
/* fall through */
- case ETH_P_BATMAN:
+ case BATADV_ETH_P_BATMAN:
goto dropped;
}
- if (bla_tx(bat_priv, skb, vid))
+ if (batadv_bla_tx(bat_priv, skb, vid))
goto dropped;
/* Register the client MAC in the transtable */
- tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
+ batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
/* don't accept stp packets. STP does not help in meshes.
* better use the bridge loop avoidance ...
*/
- if (compare_eth(ethhdr->h_dest, stp_addr))
+ if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
goto dropped;
if (is_multicast_ether_addr(ethhdr->h_dest)) {
do_bcast = true;
switch (atomic_read(&bat_priv->gw_mode)) {
- case GW_MODE_SERVER:
+ case BATADV_GW_MODE_SERVER:
/* gateway servers should not send dhcp
- * requests into the mesh */
- ret = gw_is_dhcp_target(skb, &header_len);
+ * requests into the mesh
+ */
+ ret = batadv_gw_is_dhcp_target(skb, &header_len);
if (ret)
goto dropped;
break;
- case GW_MODE_CLIENT:
+ case BATADV_GW_MODE_CLIENT:
/* gateway clients should send dhcp requests
- * via unicast to their gateway */
- ret = gw_is_dhcp_target(skb, &header_len);
+ * via unicast to their gateway
+ */
+ ret = batadv_gw_is_dhcp_target(skb, &header_len);
if (ret)
do_bcast = false;
break;
- case GW_MODE_OFF:
+ case BATADV_GW_MODE_OFF:
default:
break;
}
@@ -193,22 +201,24 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
/* ethernet packet should be broadcasted */
if (do_bcast) {
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto dropped;
- if (my_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
+ if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
goto dropped;
- bcast_packet = (struct bcast_packet *)skb->data;
- bcast_packet->header.version = COMPAT_VERSION;
- bcast_packet->header.ttl = TTL;
+ bcast_packet = (struct batadv_bcast_packet *)skb->data;
+ bcast_packet->header.version = BATADV_COMPAT_VERSION;
+ bcast_packet->header.ttl = BATADV_TTL;
/* batman packet type: broadcast */
- bcast_packet->header.packet_type = BAT_BCAST;
+ bcast_packet->header.packet_type = BATADV_BCAST;
+ bcast_packet->reserved = 0;
/* hw address of first interface is the orig mac because only
- * this mac is known throughout the mesh */
+ * this mac is known throughout the mesh
+ */
memcpy(bcast_packet->orig,
primary_if->net_dev->dev_addr, ETH_ALEN);
@@ -216,21 +226,22 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
bcast_packet->seqno =
htonl(atomic_inc_return(&bat_priv->bcast_seqno));
- add_bcast_packet_to_list(bat_priv, skb, 1);
+ batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
/* a copy is stored in the bcast list, therefore removing
- * the original skb. */
+ * the original skb.
+ */
kfree_skb(skb);
/* unicast packet */
} else {
- if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) {
- ret = gw_out_of_range(bat_priv, skb, ethhdr);
+ if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
+ ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
if (ret)
goto dropped;
}
- ret = unicast_send_skb(skb, bat_priv);
+ ret = batadv_unicast_send_skb(skb, bat_priv);
if (ret != 0)
goto dropped_freed;
}
@@ -245,22 +256,23 @@ dropped_freed:
bat_priv->stats.tx_dropped++;
end:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return NETDEV_TX_OK;
}
-void interface_rx(struct net_device *soft_iface,
- struct sk_buff *skb, struct hard_iface *recv_if,
- int hdr_size)
+void batadv_interface_rx(struct net_device *soft_iface,
+ struct sk_buff *skb, struct batadv_hard_iface *recv_if,
+ int hdr_size)
{
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct ethhdr *ethhdr;
struct vlan_ethhdr *vhdr;
- struct batman_header *batadv_header = (struct batman_header *)skb->data;
+ struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
short vid __maybe_unused = -1;
+ __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
bool is_bcast;
- is_bcast = (batadv_header->packet_type == BAT_BCAST);
+ is_bcast = (batadv_header->packet_type == BATADV_BCAST);
/* check if enough space is available for pulling, and pull */
if (!pskb_may_pull(skb, hdr_size))
@@ -276,11 +288,11 @@ void interface_rx(struct net_device *soft_iface,
vhdr = (struct vlan_ethhdr *)skb->data;
vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
- if (ntohs(vhdr->h_vlan_encapsulated_proto) != ETH_P_BATMAN)
+ if (vhdr->h_vlan_encapsulated_proto != ethertype)
break;
/* fall through */
- case ETH_P_BATMAN:
+ case BATADV_ETH_P_BATMAN:
goto dropped;
}
@@ -291,22 +303,23 @@ void interface_rx(struct net_device *soft_iface,
/* should not be necessary anymore as we use skb_pull_rcsum()
* TODO: please verify this and remove this TODO
- * -- Dec 21st 2009, Simon Wunderlich */
+ * -- Dec 21st 2009, Simon Wunderlich
+ */
-/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/
+ /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
bat_priv->stats.rx_packets++;
bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
soft_iface->last_rx = jiffies;
- if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
+ if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
goto dropped;
/* Let the bridge loop avoidance check the packet. If will
* not handle it, we can safely push it up.
*/
- if (bla_rx(bat_priv, skb, vid, is_bcast))
+ if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
goto out;
netif_rx(skb);
@@ -318,49 +331,50 @@ out:
return;
}
-static const struct net_device_ops bat_netdev_ops = {
- .ndo_open = interface_open,
- .ndo_stop = interface_release,
- .ndo_get_stats = interface_stats,
- .ndo_set_mac_address = interface_set_mac_addr,
- .ndo_change_mtu = interface_change_mtu,
- .ndo_start_xmit = interface_tx,
+static const struct net_device_ops batadv_netdev_ops = {
+ .ndo_open = batadv_interface_open,
+ .ndo_stop = batadv_interface_release,
+ .ndo_get_stats = batadv_interface_stats,
+ .ndo_set_mac_address = batadv_interface_set_mac_addr,
+ .ndo_change_mtu = batadv_interface_change_mtu,
+ .ndo_start_xmit = batadv_interface_tx,
.ndo_validate_addr = eth_validate_addr
};
-static void interface_setup(struct net_device *dev)
+static void batadv_interface_setup(struct net_device *dev)
{
- struct bat_priv *priv = netdev_priv(dev);
+ struct batadv_priv *priv = netdev_priv(dev);
ether_setup(dev);
- dev->netdev_ops = &bat_netdev_ops;
+ dev->netdev_ops = &batadv_netdev_ops;
dev->destructor = free_netdev;
dev->tx_queue_len = 0;
- /**
- * can't call min_mtu, because the needed variables
+ /* can't call min_mtu, because the needed variables
* have not been initialized yet
*/
dev->mtu = ETH_DATA_LEN;
/* reserve more space in the skbuff for our header */
- dev->hard_header_len = BAT_HEADER_LEN;
+ dev->hard_header_len = BATADV_HEADER_LEN;
/* generate random address */
eth_hw_addr_random(dev);
- SET_ETHTOOL_OPS(dev, &bat_ethtool_ops);
+ SET_ETHTOOL_OPS(dev, &batadv_ethtool_ops);
memset(priv, 0, sizeof(*priv));
}
-struct net_device *softif_create(const char *name)
+struct net_device *batadv_softif_create(const char *name)
{
struct net_device *soft_iface;
- struct bat_priv *bat_priv;
+ struct batadv_priv *bat_priv;
int ret;
+ size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;
- soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup);
+ soft_iface = alloc_netdev(sizeof(*bat_priv), name,
+ batadv_interface_setup);
if (!soft_iface)
goto out;
@@ -378,18 +392,18 @@ struct net_device *softif_create(const char *name)
atomic_set(&bat_priv->bonding, 0);
atomic_set(&bat_priv->bridge_loop_avoidance, 0);
atomic_set(&bat_priv->ap_isolation, 0);
- atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
- atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
+ atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE);
+ atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF);
atomic_set(&bat_priv->gw_sel_class, 20);
atomic_set(&bat_priv->gw_bandwidth, 41);
atomic_set(&bat_priv->orig_interval, 1000);
atomic_set(&bat_priv->hop_penalty, 30);
atomic_set(&bat_priv->log_level, 0);
atomic_set(&bat_priv->fragmentation, 1);
- atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
- atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
+ atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
+ atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
- atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
atomic_set(&bat_priv->bcast_seqno, 1);
atomic_set(&bat_priv->ttvn, 0);
atomic_set(&bat_priv->tt_local_changes, 0);
@@ -403,28 +417,34 @@ struct net_device *softif_create(const char *name)
bat_priv->primary_if = NULL;
bat_priv->num_ifaces = 0;
- ret = bat_algo_select(bat_priv, bat_routing_algo);
- if (ret < 0)
+ bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
+ if (!bat_priv->bat_counters)
goto unreg_soft_iface;
- ret = sysfs_add_meshif(soft_iface);
+ ret = batadv_algo_select(bat_priv, batadv_routing_algo);
if (ret < 0)
- goto unreg_soft_iface;
+ goto free_bat_counters;
- ret = debugfs_add_meshif(soft_iface);
+ ret = batadv_sysfs_add_meshif(soft_iface);
+ if (ret < 0)
+ goto free_bat_counters;
+
+ ret = batadv_debugfs_add_meshif(soft_iface);
if (ret < 0)
goto unreg_sysfs;
- ret = mesh_init(soft_iface);
+ ret = batadv_mesh_init(soft_iface);
if (ret < 0)
goto unreg_debugfs;
return soft_iface;
unreg_debugfs:
- debugfs_del_meshif(soft_iface);
+ batadv_debugfs_del_meshif(soft_iface);
unreg_sysfs:
- sysfs_del_meshif(soft_iface);
+ batadv_sysfs_del_meshif(soft_iface);
+free_bat_counters:
+ free_percpu(bat_priv->bat_counters);
unreg_soft_iface:
unregister_netdevice(soft_iface);
return NULL;
@@ -435,24 +455,24 @@ out:
return NULL;
}
-void softif_destroy(struct net_device *soft_iface)
+void batadv_softif_destroy(struct net_device *soft_iface)
{
- debugfs_del_meshif(soft_iface);
- sysfs_del_meshif(soft_iface);
- mesh_free(soft_iface);
+ batadv_debugfs_del_meshif(soft_iface);
+ batadv_sysfs_del_meshif(soft_iface);
+ batadv_mesh_free(soft_iface);
unregister_netdevice(soft_iface);
}
-int softif_is_valid(const struct net_device *net_dev)
+int batadv_softif_is_valid(const struct net_device *net_dev)
{
- if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
+ if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
return 1;
return 0;
}
/* ethtool */
-static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
cmd->supported = 0;
cmd->advertising = 0;
@@ -468,25 +488,73 @@ static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
-static void bat_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+static void batadv_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
strcpy(info->driver, "B.A.T.M.A.N. advanced");
- strcpy(info->version, SOURCE_VERSION);
+ strcpy(info->version, BATADV_SOURCE_VERSION);
strcpy(info->fw_version, "N/A");
strcpy(info->bus_info, "batman");
}
-static u32 bat_get_msglevel(struct net_device *dev)
+static u32 batadv_get_msglevel(struct net_device *dev)
{
return -EOPNOTSUPP;
}
-static void bat_set_msglevel(struct net_device *dev, u32 value)
+static void batadv_set_msglevel(struct net_device *dev, u32 value)
{
}
-static u32 bat_get_link(struct net_device *dev)
+static u32 batadv_get_link(struct net_device *dev)
{
return 1;
}
+
+/* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
+ * Declare each description string in struct.name[] to get fixed sized buffer
+ * and compile time checking for strings longer than ETH_GSTRING_LEN.
+ */
+static const struct {
+ const char name[ETH_GSTRING_LEN];
+} batadv_counters_strings[] = {
+ { "forward" },
+ { "forward_bytes" },
+ { "mgmt_tx" },
+ { "mgmt_tx_bytes" },
+ { "mgmt_rx" },
+ { "mgmt_rx_bytes" },
+ { "tt_request_tx" },
+ { "tt_request_rx" },
+ { "tt_response_tx" },
+ { "tt_response_rx" },
+ { "tt_roam_adv_tx" },
+ { "tt_roam_adv_rx" },
+};
+
+static void batadv_get_strings(struct net_device *dev, uint32_t stringset,
+ uint8_t *data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, batadv_counters_strings,
+ sizeof(batadv_counters_strings));
+}
+
+static void batadv_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ uint64_t *data)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < BATADV_CNT_NUM; i++)
+ data[i] = batadv_sum_counter(bat_priv, i);
+}
+
+static int batadv_get_sset_count(struct net_device *dev, int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return BATADV_CNT_NUM;
+
+ return -EOPNOTSUPP;
+}
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 02030067388..852c683b06a 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,18 +15,16 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_
#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
-int my_skb_head_push(struct sk_buff *skb, unsigned int len);
-void interface_rx(struct net_device *soft_iface,
- struct sk_buff *skb, struct hard_iface *recv_if,
- int hdr_size);
-struct net_device *softif_create(const char *name);
-void softif_destroy(struct net_device *soft_iface);
-int softif_is_valid(const struct net_device *net_dev);
+int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
+void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if, int hdr_size);
+struct net_device *batadv_softif_create(const char *name);
+void batadv_softif_destroy(struct net_device *soft_iface);
+int batadv_softif_is_valid(const struct net_device *net_dev);
#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
new file mode 100644
index 00000000000..66518c75c21
--- /dev/null
+++ b/net/batman-adv/sysfs.c
@@ -0,0 +1,787 @@
+/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "main.h"
+#include "sysfs.h"
+#include "translation-table.h"
+#include "originator.h"
+#include "hard-interface.h"
+#include "gateway_common.h"
+#include "gateway_client.h"
+#include "vis.h"
+
+static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
+{
+ struct device *dev = container_of(obj->parent, struct device, kobj);
+ return to_net_dev(dev);
+}
+
+static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(obj);
+ return netdev_priv(net_dev);
+}
+
+#define BATADV_UEV_TYPE_VAR "BATTYPE="
+#define BATADV_UEV_ACTION_VAR "BATACTION="
+#define BATADV_UEV_DATA_VAR "BATDATA="
+
+static char *batadv_uev_action_str[] = {
+ "add",
+ "del",
+ "change"
+};
+
+static char *batadv_uev_type_str[] = {
+ "gw"
+};
+
+/* Use this, if you have customized show and store functions */
+#define BATADV_ATTR(_name, _mode, _show, _store) \
+struct batadv_attribute batadv_attr_##_name = { \
+ .attr = {.name = __stringify(_name), \
+ .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+#define BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \
+ssize_t batadv_store_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff, \
+ size_t count) \
+{ \
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
+ struct batadv_priv *bat_priv = netdev_priv(net_dev); \
+ return __batadv_store_bool_attr(buff, count, _post_func, attr, \
+ &bat_priv->_name, net_dev); \
+}
+
+#define BATADV_ATTR_SIF_SHOW_BOOL(_name) \
+ssize_t batadv_show_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
+{ \
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
+ return sprintf(buff, "%s\n", \
+ atomic_read(&bat_priv->_name) == 0 ? \
+ "disabled" : "enabled"); \
+} \
+
+/* Use this, if you are going to turn a [name] in the soft-interface
+ * (bat_priv) on or off
+ */
+#define BATADV_ATTR_SIF_BOOL(_name, _mode, _post_func) \
+ static BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \
+ static BATADV_ATTR_SIF_SHOW_BOOL(_name) \
+ static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
+ batadv_store_##_name)
+
+
+#define BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
+ssize_t batadv_store_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff, \
+ size_t count) \
+{ \
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
+ struct batadv_priv *bat_priv = netdev_priv(net_dev); \
+ return __batadv_store_uint_attr(buff, count, _min, _max, \
+ _post_func, attr, \
+ &bat_priv->_name, net_dev); \
+}
+
+#define BATADV_ATTR_SIF_SHOW_UINT(_name) \
+ssize_t batadv_show_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
+{ \
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
+ return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
+} \
+
+/* Use this, if you are going to set [name] in the soft-interface
+ * (bat_priv) to an unsigned integer value
+ */
+#define BATADV_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \
+ static BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func)\
+ static BATADV_ATTR_SIF_SHOW_UINT(_name) \
+ static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
+ batadv_store_##_name)
+
+
+#define BATADV_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
+ssize_t batadv_store_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff, \
+ size_t count) \
+{ \
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
+ struct batadv_hard_iface *hard_iface; \
+ ssize_t length; \
+ \
+ hard_iface = batadv_hardif_get_by_netdev(net_dev); \
+ if (!hard_iface) \
+ return 0; \
+ \
+ length = __batadv_store_uint_attr(buff, count, _min, _max, \
+ _post_func, attr, \
+ &hard_iface->_name, net_dev); \
+ \
+ batadv_hardif_free_ref(hard_iface); \
+ return length; \
+}
+
+#define BATADV_ATTR_HIF_SHOW_UINT(_name) \
+ssize_t batadv_show_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
+{ \
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
+ struct batadv_hard_iface *hard_iface; \
+ ssize_t length; \
+ \
+ hard_iface = batadv_hardif_get_by_netdev(net_dev); \
+ if (!hard_iface) \
+ return 0; \
+ \
+ length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\
+ \
+ batadv_hardif_free_ref(hard_iface); \
+ return length; \
+}
+
+/* Use this, if you are going to set [name] in hard_iface to an
+ * unsigned integer value
+ */
+#define BATADV_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \
+ static BATADV_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func)\
+ static BATADV_ATTR_HIF_SHOW_UINT(_name) \
+ static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
+ batadv_store_##_name)
+
+
+static int batadv_store_bool_attr(char *buff, size_t count,
+ struct net_device *net_dev,
+ const char *attr_name, atomic_t *attr)
+{
+ int enabled = -1;
+
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ if ((strncmp(buff, "1", 2) == 0) ||
+ (strncmp(buff, "enable", 7) == 0) ||
+ (strncmp(buff, "enabled", 8) == 0))
+ enabled = 1;
+
+ if ((strncmp(buff, "0", 2) == 0) ||
+ (strncmp(buff, "disable", 8) == 0) ||
+ (strncmp(buff, "disabled", 9) == 0))
+ enabled = 0;
+
+ if (enabled < 0) {
+ batadv_info(net_dev, "%s: Invalid parameter received: %s\n",
+ attr_name, buff);
+ return -EINVAL;
+ }
+
+ if (atomic_read(attr) == enabled)
+ return count;
+
+ batadv_info(net_dev, "%s: Changing from: %s to: %s\n", attr_name,
+ atomic_read(attr) == 1 ? "enabled" : "disabled",
+ enabled == 1 ? "enabled" : "disabled");
+
+ atomic_set(attr, (unsigned int)enabled);
+ return count;
+}
+
+static inline ssize_t
+__batadv_store_bool_attr(char *buff, size_t count,
+ void (*post_func)(struct net_device *),
+ struct attribute *attr,
+ atomic_t *attr_store, struct net_device *net_dev)
+{
+ int ret;
+
+ ret = batadv_store_bool_attr(buff, count, net_dev, attr->name,
+ attr_store);
+ if (post_func && ret)
+ post_func(net_dev);
+
+ return ret;
+}
+
+static int batadv_store_uint_attr(const char *buff, size_t count,
+ struct net_device *net_dev,
+ const char *attr_name,
+ unsigned int min, unsigned int max,
+ atomic_t *attr)
+{
+ unsigned long uint_val;
+ int ret;
+
+ ret = kstrtoul(buff, 10, &uint_val);
+ if (ret) {
+ batadv_info(net_dev, "%s: Invalid parameter received: %s\n",
+ attr_name, buff);
+ return -EINVAL;
+ }
+
+ if (uint_val < min) {
+ batadv_info(net_dev, "%s: Value is too small: %lu min: %u\n",
+ attr_name, uint_val, min);
+ return -EINVAL;
+ }
+
+ if (uint_val > max) {
+ batadv_info(net_dev, "%s: Value is too big: %lu max: %u\n",
+ attr_name, uint_val, max);
+ return -EINVAL;
+ }
+
+ if (atomic_read(attr) == uint_val)
+ return count;
+
+ batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
+ attr_name, atomic_read(attr), uint_val);
+
+ atomic_set(attr, uint_val);
+ return count;
+}
+
+static inline ssize_t
+__batadv_store_uint_attr(const char *buff, size_t count,
+ int min, int max,
+ void (*post_func)(struct net_device *),
+ const struct attribute *attr,
+ atomic_t *attr_store, struct net_device *net_dev)
+{
+ int ret;
+
+ ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
+ attr_store);
+ if (post_func && ret)
+ post_func(net_dev);
+
+ return ret;
+}
+
+static ssize_t batadv_show_vis_mode(struct kobject *kobj,
+ struct attribute *attr, char *buff)
+{
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+ int vis_mode = atomic_read(&bat_priv->vis_mode);
+ const char *mode;
+
+ if (vis_mode == BATADV_VIS_TYPE_CLIENT_UPDATE)
+ mode = "client";
+ else
+ mode = "server";
+
+ return sprintf(buff, "%s\n", mode);
+}
+
+static ssize_t batadv_store_vis_mode(struct kobject *kobj,
+ struct attribute *attr, char *buff,
+ size_t count)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ unsigned long val;
+ int ret, vis_mode_tmp = -1;
+ const char *old_mode, *new_mode;
+
+ ret = kstrtoul(buff, 10, &val);
+
+ if (((count == 2) && (!ret) &&
+ (val == BATADV_VIS_TYPE_CLIENT_UPDATE)) ||
+ (strncmp(buff, "client", 6) == 0) ||
+ (strncmp(buff, "off", 3) == 0))
+ vis_mode_tmp = BATADV_VIS_TYPE_CLIENT_UPDATE;
+
+ if (((count == 2) && (!ret) &&
+ (val == BATADV_VIS_TYPE_SERVER_SYNC)) ||
+ (strncmp(buff, "server", 6) == 0))
+ vis_mode_tmp = BATADV_VIS_TYPE_SERVER_SYNC;
+
+ if (vis_mode_tmp < 0) {
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ batadv_info(net_dev,
+ "Invalid parameter for 'vis mode' setting received: %s\n",
+ buff);
+ return -EINVAL;
+ }
+
+ if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
+ return count;
+
+ if (atomic_read(&bat_priv->vis_mode) == BATADV_VIS_TYPE_CLIENT_UPDATE)
+ old_mode = "client";
+ else
+ old_mode = "server";
+
+ if (vis_mode_tmp == BATADV_VIS_TYPE_CLIENT_UPDATE)
+ new_mode = "client";
+ else
+ new_mode = "server";
+
+ batadv_info(net_dev, "Changing vis mode from: %s to: %s\n", old_mode,
+ new_mode);
+
+ atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
+ return count;
+}
+
+static ssize_t batadv_show_bat_algo(struct kobject *kobj,
+ struct attribute *attr, char *buff)
+{
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+ return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
+}
+
+static void batadv_post_gw_deselect(struct net_device *net_dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ batadv_gw_deselect(bat_priv);
+}
+
+static ssize_t batadv_show_gw_mode(struct kobject *kobj, struct attribute *attr,
+ char *buff)
+{
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+ int bytes_written;
+
+ switch (atomic_read(&bat_priv->gw_mode)) {
+ case BATADV_GW_MODE_CLIENT:
+ bytes_written = sprintf(buff, "%s\n",
+ BATADV_GW_MODE_CLIENT_NAME);
+ break;
+ case BATADV_GW_MODE_SERVER:
+ bytes_written = sprintf(buff, "%s\n",
+ BATADV_GW_MODE_SERVER_NAME);
+ break;
+ default:
+ bytes_written = sprintf(buff, "%s\n",
+ BATADV_GW_MODE_OFF_NAME);
+ break;
+ }
+
+ return bytes_written;
+}
+
+static ssize_t batadv_store_gw_mode(struct kobject *kobj,
+ struct attribute *attr, char *buff,
+ size_t count)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ char *curr_gw_mode_str;
+ int gw_mode_tmp = -1;
+
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ if (strncmp(buff, BATADV_GW_MODE_OFF_NAME,
+ strlen(BATADV_GW_MODE_OFF_NAME)) == 0)
+ gw_mode_tmp = BATADV_GW_MODE_OFF;
+
+ if (strncmp(buff, BATADV_GW_MODE_CLIENT_NAME,
+ strlen(BATADV_GW_MODE_CLIENT_NAME)) == 0)
+ gw_mode_tmp = BATADV_GW_MODE_CLIENT;
+
+ if (strncmp(buff, BATADV_GW_MODE_SERVER_NAME,
+ strlen(BATADV_GW_MODE_SERVER_NAME)) == 0)
+ gw_mode_tmp = BATADV_GW_MODE_SERVER;
+
+ if (gw_mode_tmp < 0) {
+ batadv_info(net_dev,
+ "Invalid parameter for 'gw mode' setting received: %s\n",
+ buff);
+ return -EINVAL;
+ }
+
+ if (atomic_read(&bat_priv->gw_mode) == gw_mode_tmp)
+ return count;
+
+ switch (atomic_read(&bat_priv->gw_mode)) {
+ case BATADV_GW_MODE_CLIENT:
+ curr_gw_mode_str = BATADV_GW_MODE_CLIENT_NAME;
+ break;
+ case BATADV_GW_MODE_SERVER:
+ curr_gw_mode_str = BATADV_GW_MODE_SERVER_NAME;
+ break;
+ default:
+ curr_gw_mode_str = BATADV_GW_MODE_OFF_NAME;
+ break;
+ }
+
+ batadv_info(net_dev, "Changing gw mode from: %s to: %s\n",
+ curr_gw_mode_str, buff);
+
+ batadv_gw_deselect(bat_priv);
+ atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
+ return count;
+}
+
+static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
+ struct attribute *attr, char *buff)
+{
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+ int down, up;
+ int gw_bandwidth = atomic_read(&bat_priv->gw_bandwidth);
+
+ batadv_gw_bandwidth_to_kbit(gw_bandwidth, &down, &up);
+ return sprintf(buff, "%i%s/%i%s\n",
+ (down > 2048 ? down / 1024 : down),
+ (down > 2048 ? "MBit" : "KBit"),
+ (up > 2048 ? up / 1024 : up),
+ (up > 2048 ? "MBit" : "KBit"));
+}
+
+static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
+ struct attribute *attr, char *buff,
+ size_t count)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ return batadv_gw_bandwidth_set(net_dev, buff, count);
+}
+
+BATADV_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
+BATADV_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
+#ifdef CONFIG_BATMAN_ADV_BLA
+BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
+#endif
+BATADV_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu);
+BATADV_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
+static BATADV_ATTR(vis_mode, S_IRUGO | S_IWUSR, batadv_show_vis_mode,
+ batadv_store_vis_mode);
+static BATADV_ATTR(routing_algo, S_IRUGO, batadv_show_bat_algo, NULL);
+static BATADV_ATTR(gw_mode, S_IRUGO | S_IWUSR, batadv_show_gw_mode,
+ batadv_store_gw_mode);
+BATADV_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * BATADV_JITTER,
+ INT_MAX, NULL);
+BATADV_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, BATADV_TQ_MAX_VALUE,
+ NULL);
+BATADV_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, BATADV_TQ_MAX_VALUE,
+ batadv_post_gw_deselect);
+static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth,
+ batadv_store_gw_bwidth);
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+BATADV_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, BATADV_DBG_ALL, NULL);
+#endif
+
+static struct batadv_attribute *batadv_mesh_attrs[] = {
+ &batadv_attr_aggregated_ogms,
+ &batadv_attr_bonding,
+#ifdef CONFIG_BATMAN_ADV_BLA
+ &batadv_attr_bridge_loop_avoidance,
+#endif
+ &batadv_attr_fragmentation,
+ &batadv_attr_ap_isolation,
+ &batadv_attr_vis_mode,
+ &batadv_attr_routing_algo,
+ &batadv_attr_gw_mode,
+ &batadv_attr_orig_interval,
+ &batadv_attr_hop_penalty,
+ &batadv_attr_gw_sel_class,
+ &batadv_attr_gw_bandwidth,
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+ &batadv_attr_log_level,
+#endif
+ NULL,
+};
+
+int batadv_sysfs_add_meshif(struct net_device *dev)
+{
+ struct kobject *batif_kobject = &dev->dev.kobj;
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct batadv_attribute **bat_attr;
+ int err;
+
+ bat_priv->mesh_obj = kobject_create_and_add(BATADV_SYSFS_IF_MESH_SUBDIR,
+ batif_kobject);
+ if (!bat_priv->mesh_obj) {
+ batadv_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
+ BATADV_SYSFS_IF_MESH_SUBDIR);
+ goto out;
+ }
+
+ for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr) {
+ err = sysfs_create_file(bat_priv->mesh_obj,
+ &((*bat_attr)->attr));
+ if (err) {
+ batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
+ dev->name, BATADV_SYSFS_IF_MESH_SUBDIR,
+ ((*bat_attr)->attr).name);
+ goto rem_attr;
+ }
+ }
+
+ return 0;
+
+rem_attr:
+ for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr)
+ sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
+
+ kobject_put(bat_priv->mesh_obj);
+ bat_priv->mesh_obj = NULL;
+out:
+ return -ENOMEM;
+}
+
+void batadv_sysfs_del_meshif(struct net_device *dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct batadv_attribute **bat_attr;
+
+ for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr)
+ sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
+
+ kobject_put(bat_priv->mesh_obj);
+ bat_priv->mesh_obj = NULL;
+}
+
+static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
+ struct attribute *attr, char *buff)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+ struct batadv_hard_iface *hard_iface;
+ ssize_t length;
+ const char *ifname;
+
+ hard_iface = batadv_hardif_get_by_netdev(net_dev);
+ if (!hard_iface)
+ return 0;
+
+ if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
+ ifname = "none";
+ else
+ ifname = hard_iface->soft_iface->name;
+
+ length = sprintf(buff, "%s\n", ifname);
+
+ batadv_hardif_free_ref(hard_iface);
+
+ return length;
+}
+
+static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
+ struct attribute *attr, char *buff,
+ size_t count)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+ struct batadv_hard_iface *hard_iface;
+ int status_tmp = -1;
+ int ret = count;
+
+ hard_iface = batadv_hardif_get_by_netdev(net_dev);
+ if (!hard_iface)
+ return count;
+
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ if (strlen(buff) >= IFNAMSIZ) {
+ pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
+ buff);
+ batadv_hardif_free_ref(hard_iface);
+ return -EINVAL;
+ }
+
+ if (strncmp(buff, "none", 4) == 0)
+ status_tmp = BATADV_IF_NOT_IN_USE;
+ else
+ status_tmp = BATADV_IF_I_WANT_YOU;
+
+ if (hard_iface->if_status == status_tmp)
+ goto out;
+
+ if ((hard_iface->soft_iface) &&
+ (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
+ goto out;
+
+ if (!rtnl_trylock()) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ if (status_tmp == BATADV_IF_NOT_IN_USE) {
+ batadv_hardif_disable_interface(hard_iface);
+ goto unlock;
+ }
+
+ /* if the interface already is in use */
+ if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
+ batadv_hardif_disable_interface(hard_iface);
+
+ ret = batadv_hardif_enable_interface(hard_iface, buff);
+
+unlock:
+ rtnl_unlock();
+out:
+ batadv_hardif_free_ref(hard_iface);
+ return ret;
+}
+
+static ssize_t batadv_show_iface_status(struct kobject *kobj,
+ struct attribute *attr, char *buff)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+ struct batadv_hard_iface *hard_iface;
+ ssize_t length;
+
+ hard_iface = batadv_hardif_get_by_netdev(net_dev);
+ if (!hard_iface)
+ return 0;
+
+ switch (hard_iface->if_status) {
+ case BATADV_IF_TO_BE_REMOVED:
+ length = sprintf(buff, "disabling\n");
+ break;
+ case BATADV_IF_INACTIVE:
+ length = sprintf(buff, "inactive\n");
+ break;
+ case BATADV_IF_ACTIVE:
+ length = sprintf(buff, "active\n");
+ break;
+ case BATADV_IF_TO_BE_ACTIVATED:
+ length = sprintf(buff, "enabling\n");
+ break;
+ case BATADV_IF_NOT_IN_USE:
+ default:
+ length = sprintf(buff, "not in use\n");
+ break;
+ }
+
+ batadv_hardif_free_ref(hard_iface);
+
+ return length;
+}
+
+static BATADV_ATTR(mesh_iface, S_IRUGO | S_IWUSR, batadv_show_mesh_iface,
+ batadv_store_mesh_iface);
+static BATADV_ATTR(iface_status, S_IRUGO, batadv_show_iface_status, NULL);
+
+static struct batadv_attribute *batadv_batman_attrs[] = {
+ &batadv_attr_mesh_iface,
+ &batadv_attr_iface_status,
+ NULL,
+};
+
+int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
+{
+ struct kobject *hardif_kobject = &dev->dev.kobj;
+ struct batadv_attribute **bat_attr;
+ int err;
+
+ *hardif_obj = kobject_create_and_add(BATADV_SYSFS_IF_BAT_SUBDIR,
+ hardif_kobject);
+
+ if (!*hardif_obj) {
+ batadv_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
+ BATADV_SYSFS_IF_BAT_SUBDIR);
+ goto out;
+ }
+
+ for (bat_attr = batadv_batman_attrs; *bat_attr; ++bat_attr) {
+ err = sysfs_create_file(*hardif_obj, &((*bat_attr)->attr));
+ if (err) {
+ batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
+ dev->name, BATADV_SYSFS_IF_BAT_SUBDIR,
+ ((*bat_attr)->attr).name);
+ goto rem_attr;
+ }
+ }
+
+ return 0;
+
+rem_attr:
+ for (bat_attr = batadv_batman_attrs; *bat_attr; ++bat_attr)
+ sysfs_remove_file(*hardif_obj, &((*bat_attr)->attr));
+out:
+ return -ENOMEM;
+}
+
+void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
+{
+ kobject_put(*hardif_obj);
+ *hardif_obj = NULL;
+}
+
+int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
+ enum batadv_uev_action action, const char *data)
+{
+ int ret = -ENOMEM;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct kobject *bat_kobj;
+ char *uevent_env[4] = { NULL, NULL, NULL, NULL };
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ bat_kobj = &primary_if->soft_iface->dev.kobj;
+
+ uevent_env[0] = kmalloc(strlen(BATADV_UEV_TYPE_VAR) +
+ strlen(batadv_uev_type_str[type]) + 1,
+ GFP_ATOMIC);
+ if (!uevent_env[0])
+ goto out;
+
+ sprintf(uevent_env[0], "%s%s", BATADV_UEV_TYPE_VAR,
+ batadv_uev_type_str[type]);
+
+ uevent_env[1] = kmalloc(strlen(BATADV_UEV_ACTION_VAR) +
+ strlen(batadv_uev_action_str[action]) + 1,
+ GFP_ATOMIC);
+ if (!uevent_env[1])
+ goto out;
+
+ sprintf(uevent_env[1], "%s%s", BATADV_UEV_ACTION_VAR,
+ batadv_uev_action_str[action]);
+
+ /* If the event is DEL, ignore the data field */
+ if (action != BATADV_UEV_DEL) {
+ uevent_env[2] = kmalloc(strlen(BATADV_UEV_DATA_VAR) +
+ strlen(data) + 1, GFP_ATOMIC);
+ if (!uevent_env[2])
+ goto out;
+
+ sprintf(uevent_env[2], "%s%s", BATADV_UEV_DATA_VAR, data);
+ }
+
+ ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
+out:
+ kfree(uevent_env[0]);
+ kfree(uevent_env[1]);
+ kfree(uevent_env[2]);
+
+ if (primary_if)
+ batadv_hardif_free_ref(primary_if);
+
+ if (ret)
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
+ batadv_uev_type_str[type],
+ batadv_uev_action_str[action],
+ (action == BATADV_UEV_DEL ? "NULL" : data), ret);
+ return ret;
+}
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/sysfs.h
index fece77ae586..3fd1412b062 100644
--- a/net/batman-adv/bat_sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,17 +15,15 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
-
#ifndef _NET_BATMAN_ADV_SYSFS_H_
#define _NET_BATMAN_ADV_SYSFS_H_
-#define SYSFS_IF_MESH_SUBDIR "mesh"
-#define SYSFS_IF_BAT_SUBDIR "batman_adv"
+#define BATADV_SYSFS_IF_MESH_SUBDIR "mesh"
+#define BATADV_SYSFS_IF_BAT_SUBDIR "batman_adv"
-struct bat_attribute {
+struct batadv_attribute {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
char *buf);
@@ -34,11 +31,12 @@ struct bat_attribute {
char *buf, size_t count);
};
-int sysfs_add_meshif(struct net_device *dev);
-void sysfs_del_meshif(struct net_device *dev);
-int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev);
-void sysfs_del_hardif(struct kobject **hardif_obj);
-int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
- enum uev_action action, const char *data);
+int batadv_sysfs_add_meshif(struct net_device *dev);
+void batadv_sysfs_del_meshif(struct net_device *dev);
+int batadv_sysfs_add_hardif(struct kobject **hardif_obj,
+ struct net_device *dev);
+void batadv_sysfs_del_hardif(struct kobject **hardif_obj);
+int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
+ enum batadv_uev_action action, const char *data);
#endif /* _NET_BATMAN_ADV_SYSFS_H_ */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 2ab83d7fb1f..a438f4b582f 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -31,44 +29,46 @@
#include <linux/crc16.h>
-static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
- struct orig_node *orig_node);
-static void tt_purge(struct work_struct *work);
-static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
+static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+ struct batadv_orig_node *orig_node);
+static void batadv_tt_purge(struct work_struct *work);
+static void
+batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
/* returns 1 if they are the same mac addr */
-static int compare_tt(const struct hlist_node *node, const void *data2)
+static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
{
- const void *data1 = container_of(node, struct tt_common_entry,
+ const void *data1 = container_of(node, struct batadv_tt_common_entry,
hash_entry);
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
}
-static void tt_start_timer(struct bat_priv *bat_priv)
+static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
{
- INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
- queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work,
+ INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge);
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
msecs_to_jiffies(5000));
}
-static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
- const void *data)
+static struct batadv_tt_common_entry *
+batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
{
struct hlist_head *head;
struct hlist_node *node;
- struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
uint32_t index;
if (!hash)
return NULL;
- index = choose_orig(data, hash->size);
+ index = batadv_choose_orig(data, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
- if (!compare_eth(tt_common_entry, data))
+ if (!batadv_compare_eth(tt_common_entry, data))
continue;
if (!atomic_inc_not_zero(&tt_common_entry->refcount))
@@ -82,80 +82,87 @@ static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
return tt_common_entry_tmp;
}
-static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
- const void *data)
+static struct batadv_tt_local_entry *
+batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
{
- struct tt_common_entry *tt_common_entry;
- struct tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_local_entry *tt_local_entry = NULL;
- tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
+ tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data);
if (tt_common_entry)
tt_local_entry = container_of(tt_common_entry,
- struct tt_local_entry, common);
+ struct batadv_tt_local_entry,
+ common);
return tt_local_entry;
}
-static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
- const void *data)
+static struct batadv_tt_global_entry *
+batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
{
- struct tt_common_entry *tt_common_entry;
- struct tt_global_entry *tt_global_entry = NULL;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_global_entry *tt_global_entry = NULL;
- tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
+ tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data);
if (tt_common_entry)
tt_global_entry = container_of(tt_common_entry,
- struct tt_global_entry, common);
+ struct batadv_tt_global_entry,
+ common);
return tt_global_entry;
}
-static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
+static void
+batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry)
{
if (atomic_dec_and_test(&tt_local_entry->common.refcount))
kfree_rcu(tt_local_entry, common.rcu);
}
-static void tt_global_entry_free_rcu(struct rcu_head *rcu)
+static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
{
- struct tt_common_entry *tt_common_entry;
- struct tt_global_entry *tt_global_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_global_entry *tt_global_entry;
- tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
- tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
- common);
+ tt_common_entry = container_of(rcu, struct batadv_tt_common_entry, rcu);
+ tt_global_entry = container_of(tt_common_entry,
+ struct batadv_tt_global_entry, common);
kfree(tt_global_entry);
}
-static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
+static void
+batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
{
if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
- tt_global_del_orig_list(tt_global_entry);
+ batadv_tt_global_del_orig_list(tt_global_entry);
call_rcu(&tt_global_entry->common.rcu,
- tt_global_entry_free_rcu);
+ batadv_tt_global_entry_free_rcu);
}
}
-static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
+static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
{
- struct tt_orig_list_entry *orig_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
- orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
- orig_node_free_ref(orig_entry->orig_node);
+ orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
+ batadv_orig_node_free_ref(orig_entry->orig_node);
kfree(orig_entry);
}
-static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
+static void
+batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
{
/* to avoid race conditions, immediately decrease the tt counter */
atomic_dec(&orig_entry->orig_node->tt_size);
- call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
+ call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
}
-static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
- uint8_t flags)
+static void batadv_tt_local_event(struct batadv_priv *bat_priv,
+ const uint8_t *addr, uint8_t flags)
{
- struct tt_change_node *tt_change_node;
+ struct batadv_tt_change_node *tt_change_node, *entry, *safe;
+ bool event_removed = false;
+ bool del_op_requested, del_op_entry;
tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
@@ -165,50 +172,82 @@ static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
tt_change_node->change.flags = flags;
memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
+ del_op_requested = flags & BATADV_TT_CLIENT_DEL;
+
+ /* check for ADD+DEL or DEL+ADD events */
spin_lock_bh(&bat_priv->tt_changes_list_lock);
+ list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+ list) {
+ if (!batadv_compare_eth(entry->change.addr, addr))
+ continue;
+
+ /* DEL+ADD in the same orig interval have no effect and can be
+ * removed to avoid silly behaviour on the receiver side. The
+ * other way around (ADD+DEL) can happen in case of roaming of
+ * a client still in the NEW state. Roaming of NEW clients is
+ * now possible due to automatically recognition of "temporary"
+ * clients
+ */
+ del_op_entry = entry->change.flags & BATADV_TT_CLIENT_DEL;
+ if (!del_op_requested && del_op_entry)
+ goto del;
+ if (del_op_requested && !del_op_entry)
+ goto del;
+ continue;
+del:
+ list_del(&entry->list);
+ kfree(entry);
+ event_removed = true;
+ goto unlock;
+ }
+
/* track the change in the OGMinterval list */
list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
- atomic_inc(&bat_priv->tt_local_changes);
+
+unlock:
spin_unlock_bh(&bat_priv->tt_changes_list_lock);
- atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
+ if (event_removed)
+ atomic_dec(&bat_priv->tt_local_changes);
+ else
+ atomic_inc(&bat_priv->tt_local_changes);
}
-int tt_len(int changes_num)
+int batadv_tt_len(int changes_num)
{
- return changes_num * sizeof(struct tt_change);
+ return changes_num * sizeof(struct batadv_tt_change);
}
-static int tt_local_init(struct bat_priv *bat_priv)
+static int batadv_tt_local_init(struct batadv_priv *bat_priv)
{
if (bat_priv->tt_local_hash)
- return 1;
+ return 0;
- bat_priv->tt_local_hash = hash_new(1024);
+ bat_priv->tt_local_hash = batadv_hash_new(1024);
if (!bat_priv->tt_local_hash)
- return 0;
+ return -ENOMEM;
- return 1;
+ return 0;
}
-void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
- int ifindex)
+void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+ int ifindex)
{
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
- struct tt_local_entry *tt_local_entry = NULL;
- struct tt_global_entry *tt_global_entry = NULL;
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_global_entry *tt_global_entry = NULL;
struct hlist_head *head;
struct hlist_node *node;
- struct tt_orig_list_entry *orig_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
int hash_added;
- tt_local_entry = tt_local_hash_find(bat_priv, addr);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
if (tt_local_entry) {
tt_local_entry->last_seen = jiffies;
- /* possibly unset the TT_CLIENT_PENDING flag */
- tt_local_entry->common.flags &= ~TT_CLIENT_PENDING;
+ /* possibly unset the BATADV_TT_CLIENT_PENDING flag */
+ tt_local_entry->common.flags &= ~BATADV_TT_CLIENT_PENDING;
goto out;
}
@@ -216,40 +255,42 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
if (!tt_local_entry)
goto out;
- bat_dbg(DBG_TT, bat_priv,
- "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
- (uint8_t)atomic_read(&bat_priv->ttvn));
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
+ (uint8_t)atomic_read(&bat_priv->ttvn));
memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
- tt_local_entry->common.flags = NO_FLAGS;
- if (is_wifi_iface(ifindex))
- tt_local_entry->common.flags |= TT_CLIENT_WIFI;
+ tt_local_entry->common.flags = BATADV_NO_FLAGS;
+ if (batadv_is_wifi_iface(ifindex))
+ tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
atomic_set(&tt_local_entry->common.refcount, 2);
tt_local_entry->last_seen = jiffies;
/* the batman interface mac address should never be purged */
- if (compare_eth(addr, soft_iface->dev_addr))
- tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
+ if (batadv_compare_eth(addr, soft_iface->dev_addr))
+ tt_local_entry->common.flags |= BATADV_TT_CLIENT_NOPURGE;
/* The local entry has to be marked as NEW to avoid to send it in
* a full table response going out before the next ttvn increment
- * (consistency check) */
- tt_local_entry->common.flags |= TT_CLIENT_NEW;
+ * (consistency check)
+ */
+ tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
- hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
- &tt_local_entry->common,
- &tt_local_entry->common.hash_entry);
+ hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt,
+ batadv_choose_orig,
+ &tt_local_entry->common,
+ &tt_local_entry->common.hash_entry);
if (unlikely(hash_added != 0)) {
/* remove the reference for the hash */
- tt_local_entry_free_ref(tt_local_entry);
+ batadv_tt_local_entry_free_ref(tt_local_entry);
goto out;
}
- tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
+ batadv_tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
/* remove address from global hash if present */
- tt_global_entry = tt_global_hash_find(bat_priv, addr);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
/* Check whether it is a roaming! */
if (tt_global_entry) {
@@ -259,31 +300,85 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
hlist_for_each_entry_rcu(orig_entry, node, head, list) {
orig_entry->orig_node->tt_poss_change = true;
- send_roam_adv(bat_priv, tt_global_entry->common.addr,
- orig_entry->orig_node);
+ batadv_send_roam_adv(bat_priv,
+ tt_global_entry->common.addr,
+ orig_entry->orig_node);
}
rcu_read_unlock();
/* The global entry has to be marked as ROAMING and
* has to be kept for consistency purpose
*/
- tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+ tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
tt_global_entry->roam_at = jiffies;
}
out:
if (tt_local_entry)
- tt_local_entry_free_ref(tt_local_entry);
+ batadv_tt_local_entry_free_ref(tt_local_entry);
if (tt_global_entry)
- tt_global_entry_free_ref(tt_global_entry);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
+}
+
+static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
+ int *packet_buff_len,
+ int min_packet_len,
+ int new_packet_len)
+{
+ unsigned char *new_buff;
+
+ new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
+
+ /* keep old buffer if kmalloc should fail */
+ if (new_buff) {
+ memcpy(new_buff, *packet_buff, min_packet_len);
+ kfree(*packet_buff);
+ *packet_buff = new_buff;
+ *packet_buff_len = new_packet_len;
+ }
+}
+
+static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff,
+ int *packet_buff_len,
+ int min_packet_len)
+{
+ struct batadv_hard_iface *primary_if;
+ int req_len;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+
+ req_len = min_packet_len;
+ req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
+
+ /* if we have too many changes for one packet don't send any
+ * and wait for the tt table request which will be fragmented
+ */
+ if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
+ req_len = min_packet_len;
+
+ batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
+ min_packet_len, req_len);
+
+ if (primary_if)
+ batadv_hardif_free_ref(primary_if);
}
-int tt_changes_fill_buffer(struct bat_priv *bat_priv,
- unsigned char *buff, int buff_len)
+static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff,
+ int *packet_buff_len,
+ int min_packet_len)
{
- int count = 0, tot_changes = 0;
- struct tt_change_node *entry, *safe;
+ struct batadv_tt_change_node *entry, *safe;
+ int count = 0, tot_changes = 0, new_len;
+ unsigned char *tt_buff;
- if (buff_len > 0)
- tot_changes = buff_len / tt_len(1);
+ batadv_tt_prepare_packet_buff(bat_priv, packet_buff,
+ packet_buff_len, min_packet_len);
+
+ new_len = *packet_buff_len - min_packet_len;
+ tt_buff = *packet_buff + min_packet_len;
+
+ if (new_len > 0)
+ tot_changes = new_len / batadv_tt_len(1);
spin_lock_bh(&bat_priv->tt_changes_list_lock);
atomic_set(&bat_priv->tt_local_changes, 0);
@@ -291,8 +386,8 @@ int tt_changes_fill_buffer(struct bat_priv *bat_priv,
list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
list) {
if (count < tot_changes) {
- memcpy(buff + tt_len(count),
- &entry->change, sizeof(struct tt_change));
+ memcpy(tt_buff + batadv_tt_len(count),
+ &entry->change, sizeof(struct batadv_tt_change));
count++;
}
list_del(&entry->list);
@@ -305,37 +400,35 @@ int tt_changes_fill_buffer(struct bat_priv *bat_priv,
kfree(bat_priv->tt_buff);
bat_priv->tt_buff_len = 0;
bat_priv->tt_buff = NULL;
- /* We check whether this new OGM has no changes due to size
- * problems */
- if (buff_len > 0) {
- /**
- * if kmalloc() fails we will reply with the full table
+ /* check whether this new OGM has no changes due to size problems */
+ if (new_len > 0) {
+ /* if kmalloc() fails we will reply with the full table
* instead of providing the diff
*/
- bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC);
+ bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
if (bat_priv->tt_buff) {
- memcpy(bat_priv->tt_buff, buff, buff_len);
- bat_priv->tt_buff_len = buff_len;
+ memcpy(bat_priv->tt_buff, tt_buff, new_len);
+ bat_priv->tt_buff_len = new_len;
}
}
spin_unlock_bh(&bat_priv->tt_buff_lock);
- return tot_changes;
+ return count;
}
-int tt_local_seq_print_text(struct seq_file *seq, void *offset)
+int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct hashtable_t *hash = bat_priv->tt_local_hash;
- struct tt_common_entry *tt_common_entry;
- struct hard_iface *primary_if;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
int ret = 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -343,7 +436,7 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
goto out;
}
- if (primary_if->if_status != IF_ACTIVE) {
+ if (primary_if->if_status != BATADV_IF_ACTIVE) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
@@ -363,63 +456,94 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
tt_common_entry->addr,
(tt_common_entry->flags &
- TT_CLIENT_ROAM ? 'R' : '.'),
+ BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
(tt_common_entry->flags &
- TT_CLIENT_NOPURGE ? 'P' : '.'),
+ BATADV_TT_CLIENT_NOPURGE ? 'P' : '.'),
(tt_common_entry->flags &
- TT_CLIENT_NEW ? 'N' : '.'),
+ BATADV_TT_CLIENT_NEW ? 'N' : '.'),
(tt_common_entry->flags &
- TT_CLIENT_PENDING ? 'X' : '.'),
+ BATADV_TT_CLIENT_PENDING ? 'X' : '.'),
(tt_common_entry->flags &
- TT_CLIENT_WIFI ? 'W' : '.'));
+ BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
}
rcu_read_unlock();
}
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return ret;
}
-static void tt_local_set_pending(struct bat_priv *bat_priv,
- struct tt_local_entry *tt_local_entry,
- uint16_t flags, const char *message)
+static void
+batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
+ struct batadv_tt_local_entry *tt_local_entry,
+ uint16_t flags, const char *message)
{
- tt_local_event(bat_priv, tt_local_entry->common.addr,
- tt_local_entry->common.flags | flags);
+ batadv_tt_local_event(bat_priv, tt_local_entry->common.addr,
+ tt_local_entry->common.flags | flags);
/* The local client has to be marked as "pending to be removed" but has
* to be kept in the table in order to send it in a full table
- * response issued before the net ttvn increment (consistency check) */
- tt_local_entry->common.flags |= TT_CLIENT_PENDING;
+ * response issued before the net ttvn increment (consistency check)
+ */
+ tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
- bat_dbg(DBG_TT, bat_priv,
- "Local tt entry (%pM) pending to be removed: %s\n",
- tt_local_entry->common.addr, message);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Local tt entry (%pM) pending to be removed: %s\n",
+ tt_local_entry->common.addr, message);
}
-void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
- const char *message, bool roaming)
+void batadv_tt_local_remove(struct batadv_priv *bat_priv, const uint8_t *addr,
+ const char *message, bool roaming)
{
- struct tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_local_entry *tt_local_entry = NULL;
+ uint16_t flags;
- tt_local_entry = tt_local_hash_find(bat_priv, addr);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
if (!tt_local_entry)
goto out;
- tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
- (roaming ? TT_CLIENT_ROAM : NO_FLAGS), message);
+ flags = BATADV_TT_CLIENT_DEL;
+ if (roaming)
+ flags |= BATADV_TT_CLIENT_ROAM;
+
+ batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, message);
out:
if (tt_local_entry)
- tt_local_entry_free_ref(tt_local_entry);
+ batadv_tt_local_entry_free_ref(tt_local_entry);
}
-static void tt_local_purge(struct bat_priv *bat_priv)
+static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
+ struct hlist_head *head)
{
- struct hashtable_t *hash = bat_priv->tt_local_hash;
- struct tt_local_entry *tt_local_entry;
- struct tt_common_entry *tt_common_entry;
+ struct batadv_tt_local_entry *tt_local_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
struct hlist_node *node, *node_tmp;
+
+ hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
+ hash_entry) {
+ tt_local_entry = container_of(tt_common_entry,
+ struct batadv_tt_local_entry,
+ common);
+ if (tt_local_entry->common.flags & BATADV_TT_CLIENT_NOPURGE)
+ continue;
+
+ /* entry already marked for deletion */
+ if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
+ continue;
+
+ if (!batadv_has_timed_out(tt_local_entry->last_seen,
+ BATADV_TT_LOCAL_TIMEOUT))
+ continue;
+
+ batadv_tt_local_set_pending(bat_priv, tt_local_entry,
+ BATADV_TT_CLIENT_DEL, "timed out");
+ }
+}
+
+static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
+{
+ struct batadv_hashtable *hash = bat_priv->tt_local_hash;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
uint32_t i;
@@ -429,36 +553,18 @@ static void tt_local_purge(struct bat_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
- head, hash_entry) {
- tt_local_entry = container_of(tt_common_entry,
- struct tt_local_entry,
- common);
- if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
- continue;
-
- /* entry already marked for deletion */
- if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
- continue;
-
- if (!has_timed_out(tt_local_entry->last_seen,
- TT_LOCAL_TIMEOUT))
- continue;
-
- tt_local_set_pending(bat_priv, tt_local_entry,
- TT_CLIENT_DEL, "timed out");
- }
+ batadv_tt_local_purge_list(bat_priv, head);
spin_unlock_bh(list_lock);
}
}
-static void tt_local_table_free(struct bat_priv *bat_priv)
+static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
{
- struct hashtable_t *hash;
+ struct batadv_hashtable *hash;
spinlock_t *list_lock; /* protects write access to the hash lists */
- struct tt_common_entry *tt_common_entry;
- struct tt_local_entry *tt_local_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_local_entry *tt_local;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
uint32_t i;
@@ -476,35 +582,35 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
head, hash_entry) {
hlist_del_rcu(node);
- tt_local_entry = container_of(tt_common_entry,
- struct tt_local_entry,
- common);
- tt_local_entry_free_ref(tt_local_entry);
+ tt_local = container_of(tt_common_entry,
+ struct batadv_tt_local_entry,
+ common);
+ batadv_tt_local_entry_free_ref(tt_local);
}
spin_unlock_bh(list_lock);
}
- hash_destroy(hash);
+ batadv_hash_destroy(hash);
bat_priv->tt_local_hash = NULL;
}
-static int tt_global_init(struct bat_priv *bat_priv)
+static int batadv_tt_global_init(struct batadv_priv *bat_priv)
{
if (bat_priv->tt_global_hash)
- return 1;
+ return 0;
- bat_priv->tt_global_hash = hash_new(1024);
+ bat_priv->tt_global_hash = batadv_hash_new(1024);
if (!bat_priv->tt_global_hash)
- return 0;
+ return -ENOMEM;
- return 1;
+ return 0;
}
-static void tt_changes_list_free(struct bat_priv *bat_priv)
+static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
{
- struct tt_change_node *entry, *safe;
+ struct batadv_tt_change_node *entry, *safe;
spin_lock_bh(&bat_priv->tt_changes_list_lock);
@@ -521,10 +627,11 @@ static void tt_changes_list_free(struct bat_priv *bat_priv)
/* find out if an orig_node is already in the list of a tt_global_entry.
* returns 1 if found, 0 otherwise
*/
-static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
- const struct orig_node *orig_node)
+static bool
+batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
+ const struct batadv_orig_node *orig_node)
{
- struct tt_orig_list_entry *tmp_orig_entry;
+ struct batadv_tt_orig_list_entry *tmp_orig_entry;
const struct hlist_head *head;
struct hlist_node *node;
bool found = false;
@@ -541,11 +648,11 @@ static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
return found;
}
-static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
- struct orig_node *orig_node,
- int ttvn)
+static void
+batadv_tt_global_add_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
+ struct batadv_orig_node *orig_node, int ttvn)
{
- struct tt_orig_list_entry *orig_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
if (!orig_entry)
@@ -564,91 +671,95 @@ static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
}
/* caller must hold orig_node refcount */
-int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
- bool wifi)
+int batadv_tt_global_add(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *tt_addr, uint8_t flags,
+ uint8_t ttvn)
{
- struct tt_global_entry *tt_global_entry = NULL;
+ struct batadv_tt_global_entry *tt_global_entry = NULL;
int ret = 0;
int hash_added;
+ struct batadv_tt_common_entry *common;
- tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
if (!tt_global_entry) {
- tt_global_entry = kzalloc(sizeof(*tt_global_entry),
- GFP_ATOMIC);
+ tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC);
if (!tt_global_entry)
goto out;
- memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
+ common = &tt_global_entry->common;
+ memcpy(common->addr, tt_addr, ETH_ALEN);
- tt_global_entry->common.flags = NO_FLAGS;
+ common->flags = flags;
tt_global_entry->roam_at = 0;
- atomic_set(&tt_global_entry->common.refcount, 2);
+ atomic_set(&common->refcount, 2);
INIT_HLIST_HEAD(&tt_global_entry->orig_list);
spin_lock_init(&tt_global_entry->list_lock);
- hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
- choose_orig, &tt_global_entry->common,
- &tt_global_entry->common.hash_entry);
+ hash_added = batadv_hash_add(bat_priv->tt_global_hash,
+ batadv_compare_tt,
+ batadv_choose_orig, common,
+ &common->hash_entry);
if (unlikely(hash_added != 0)) {
/* remove the reference for the hash */
- tt_global_entry_free_ref(tt_global_entry);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
goto out_remove;
}
- tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
+ batadv_tt_global_add_orig_entry(tt_global_entry, orig_node,
+ ttvn);
} else {
/* there is already a global entry, use this one. */
- /* If there is the TT_CLIENT_ROAM flag set, there is only one
- * originator left in the list and we previously received a
+ /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
+ * one originator left in the list and we previously received a
* delete + roaming change for this originator.
*
* We should first delete the old originator before adding the
* new one.
*/
- if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
- tt_global_del_orig_list(tt_global_entry);
- tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
+ if (tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM) {
+ batadv_tt_global_del_orig_list(tt_global_entry);
+ tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
tt_global_entry->roam_at = 0;
}
- if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
- tt_global_add_orig_entry(tt_global_entry, orig_node,
- ttvn);
+ if (!batadv_tt_global_entry_has_orig(tt_global_entry,
+ orig_node))
+ batadv_tt_global_add_orig_entry(tt_global_entry,
+ orig_node, ttvn);
}
- if (wifi)
- tt_global_entry->common.flags |= TT_CLIENT_WIFI;
-
- bat_dbg(DBG_TT, bat_priv,
- "Creating new global tt entry: %pM (via %pM)\n",
- tt_global_entry->common.addr, orig_node->orig);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Creating new global tt entry: %pM (via %pM)\n",
+ tt_global_entry->common.addr, orig_node->orig);
out_remove:
/* remove address from local hash if present */
- tt_local_remove(bat_priv, tt_global_entry->common.addr,
- "global tt received", roaming);
+ batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
+ "global tt received",
+ flags & BATADV_TT_CLIENT_ROAM);
ret = 1;
out:
if (tt_global_entry)
- tt_global_entry_free_ref(tt_global_entry);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
return ret;
}
/* print all orig nodes who announce the address for this global entry.
* it is assumed that the caller holds rcu_read_lock();
*/
-static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
- struct seq_file *seq)
+static void
+batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
+ struct seq_file *seq)
{
struct hlist_head *head;
struct hlist_node *node;
- struct tt_orig_list_entry *orig_entry;
- struct tt_common_entry *tt_common_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
uint16_t flags;
uint8_t last_ttvn;
@@ -662,25 +773,25 @@ static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
tt_global_entry->common.addr, orig_entry->ttvn,
orig_entry->orig_node->orig, last_ttvn,
- (flags & TT_CLIENT_ROAM ? 'R' : '.'),
- (flags & TT_CLIENT_WIFI ? 'W' : '.'));
+ (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
+ (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
}
}
-int tt_global_seq_print_text(struct seq_file *seq, void *offset)
+int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct hashtable_t *hash = bat_priv->tt_global_hash;
- struct tt_common_entry *tt_common_entry;
- struct tt_global_entry *tt_global_entry;
- struct hard_iface *primary_if;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_global_entry *tt_global;
+ struct batadv_hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
int ret = 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -688,7 +799,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
goto out;
}
- if (primary_if->if_status != IF_ACTIVE) {
+ if (primary_if->if_status != BATADV_IF_ACTIVE) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
@@ -707,87 +818,91 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
hlist_for_each_entry_rcu(tt_common_entry, node,
head, hash_entry) {
- tt_global_entry = container_of(tt_common_entry,
- struct tt_global_entry,
- common);
- tt_global_print_entry(tt_global_entry, seq);
+ tt_global = container_of(tt_common_entry,
+ struct batadv_tt_global_entry,
+ common);
+ batadv_tt_global_print_entry(tt_global, seq);
}
rcu_read_unlock();
}
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return ret;
}
/* deletes the orig list of a tt_global_entry */
-static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
+static void
+batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
{
struct hlist_head *head;
struct hlist_node *node, *safe;
- struct tt_orig_list_entry *orig_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
spin_lock_bh(&tt_global_entry->list_lock);
head = &tt_global_entry->orig_list;
hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
hlist_del_rcu(node);
- tt_orig_list_entry_free_ref(orig_entry);
+ batadv_tt_orig_list_entry_free_ref(orig_entry);
}
spin_unlock_bh(&tt_global_entry->list_lock);
}
-static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
- struct tt_global_entry *tt_global_entry,
- struct orig_node *orig_node,
- const char *message)
+static void
+batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
+ struct batadv_tt_global_entry *tt_global_entry,
+ struct batadv_orig_node *orig_node,
+ const char *message)
{
struct hlist_head *head;
struct hlist_node *node, *safe;
- struct tt_orig_list_entry *orig_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
spin_lock_bh(&tt_global_entry->list_lock);
head = &tt_global_entry->orig_list;
hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
if (orig_entry->orig_node == orig_node) {
- bat_dbg(DBG_TT, bat_priv,
- "Deleting %pM from global tt entry %pM: %s\n",
- orig_node->orig, tt_global_entry->common.addr,
- message);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting %pM from global tt entry %pM: %s\n",
+ orig_node->orig,
+ tt_global_entry->common.addr, message);
hlist_del_rcu(node);
- tt_orig_list_entry_free_ref(orig_entry);
+ batadv_tt_orig_list_entry_free_ref(orig_entry);
}
}
spin_unlock_bh(&tt_global_entry->list_lock);
}
-static void tt_global_del_struct(struct bat_priv *bat_priv,
- struct tt_global_entry *tt_global_entry,
- const char *message)
+static void
+batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
+ struct batadv_tt_global_entry *tt_global_entry,
+ const char *message)
{
- bat_dbg(DBG_TT, bat_priv,
- "Deleting global tt entry %pM: %s\n",
- tt_global_entry->common.addr, message);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting global tt entry %pM: %s\n",
+ tt_global_entry->common.addr, message);
- hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
- tt_global_entry->common.addr);
- tt_global_entry_free_ref(tt_global_entry);
+ batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt,
+ batadv_choose_orig, tt_global_entry->common.addr);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
}
/* If the client is to be deleted, we check if it is the last origantor entry
- * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
- * otherwise we simply remove the originator scheduled for deletion.
+ * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
+ * timer, otherwise we simply remove the originator scheduled for deletion.
*/
-static void tt_global_del_roaming(struct bat_priv *bat_priv,
- struct tt_global_entry *tt_global_entry,
- struct orig_node *orig_node,
- const char *message)
+static void
+batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
+ struct batadv_tt_global_entry *tt_global_entry,
+ struct batadv_orig_node *orig_node,
+ const char *message)
{
bool last_entry = true;
struct hlist_head *head;
struct hlist_node *node;
- struct tt_orig_list_entry *orig_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
/* no local entry exists, case 1:
* Check if this is the last one or if other entries exist.
@@ -805,37 +920,37 @@ static void tt_global_del_roaming(struct bat_priv *bat_priv,
if (last_entry) {
/* its the last one, mark for roaming. */
- tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+ tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
tt_global_entry->roam_at = jiffies;
} else
/* there is another entry, we can simply delete this
* one and can still use the other one.
*/
- tt_global_del_orig_entry(bat_priv, tt_global_entry,
- orig_node, message);
+ batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
+ orig_node, message);
}
-static void tt_global_del(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- const unsigned char *addr,
- const char *message, bool roaming)
+static void batadv_tt_global_del(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *addr,
+ const char *message, bool roaming)
{
- struct tt_global_entry *tt_global_entry = NULL;
- struct tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_global_entry *tt_global_entry = NULL;
+ struct batadv_tt_local_entry *local_entry = NULL;
- tt_global_entry = tt_global_hash_find(bat_priv, addr);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
if (!tt_global_entry)
goto out;
if (!roaming) {
- tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
- message);
+ batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
+ orig_node, message);
if (hlist_empty(&tt_global_entry->orig_list))
- tt_global_del_struct(bat_priv, tt_global_entry,
- message);
+ batadv_tt_global_del_struct(bat_priv, tt_global_entry,
+ message);
goto out;
}
@@ -844,41 +959,42 @@ static void tt_global_del(struct bat_priv *bat_priv,
* event, there are two possibilities:
* 1) the client roamed from node A to node B => if there
* is only one originator left for this client, we mark
- * it with TT_CLIENT_ROAM, we start a timer and we
+ * it with BATADV_TT_CLIENT_ROAM, we start a timer and we
* wait for node B to claim it. In case of timeout
* the entry is purged.
*
* If there are other originators left, we directly delete
* the originator.
* 2) the client roamed to us => we can directly delete
- * the global entry, since it is useless now. */
-
- tt_local_entry = tt_local_hash_find(bat_priv,
- tt_global_entry->common.addr);
- if (tt_local_entry) {
+ * the global entry, since it is useless now.
+ */
+ local_entry = batadv_tt_local_hash_find(bat_priv,
+ tt_global_entry->common.addr);
+ if (local_entry) {
/* local entry exists, case 2: client roamed to us. */
- tt_global_del_orig_list(tt_global_entry);
- tt_global_del_struct(bat_priv, tt_global_entry, message);
+ batadv_tt_global_del_orig_list(tt_global_entry);
+ batadv_tt_global_del_struct(bat_priv, tt_global_entry, message);
} else
/* no local entry exists, case 1: check for roaming */
- tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
- message);
+ batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
+ orig_node, message);
out:
if (tt_global_entry)
- tt_global_entry_free_ref(tt_global_entry);
- if (tt_local_entry)
- tt_local_entry_free_ref(tt_local_entry);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
+ if (local_entry)
+ batadv_tt_local_entry_free_ref(local_entry);
}
-void tt_global_del_orig(struct bat_priv *bat_priv,
- struct orig_node *orig_node, const char *message)
+void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const char *message)
{
- struct tt_global_entry *tt_global_entry;
- struct tt_common_entry *tt_common_entry;
+ struct batadv_tt_global_entry *tt_global;
+ struct batadv_tt_common_entry *tt_common_entry;
uint32_t i;
- struct hashtable_t *hash = bat_priv->tt_global_hash;
+ struct batadv_hashtable *hash = bat_priv->tt_global_hash;
struct hlist_node *node, *safe;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -893,20 +1009,19 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(tt_common_entry, node, safe,
head, hash_entry) {
- tt_global_entry = container_of(tt_common_entry,
- struct tt_global_entry,
- common);
-
- tt_global_del_orig_entry(bat_priv, tt_global_entry,
- orig_node, message);
-
- if (hlist_empty(&tt_global_entry->orig_list)) {
- bat_dbg(DBG_TT, bat_priv,
- "Deleting global tt entry %pM: %s\n",
- tt_global_entry->common.addr,
- message);
+ tt_global = container_of(tt_common_entry,
+ struct batadv_tt_global_entry,
+ common);
+
+ batadv_tt_global_del_orig_entry(bat_priv, tt_global,
+ orig_node, message);
+
+ if (hlist_empty(&tt_global->orig_list)) {
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting global tt entry %pM: %s\n",
+ tt_global->common.addr, message);
hlist_del_rcu(node);
- tt_global_entry_free_ref(tt_global_entry);
+ batadv_tt_global_entry_free_ref(tt_global);
}
}
spin_unlock_bh(list_lock);
@@ -914,12 +1029,36 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
orig_node->tt_initialised = false;
}
-static void tt_global_roam_purge(struct bat_priv *bat_priv)
+static void batadv_tt_global_roam_purge_list(struct batadv_priv *bat_priv,
+ struct hlist_head *head)
{
- struct hashtable_t *hash = bat_priv->tt_global_hash;
- struct tt_common_entry *tt_common_entry;
- struct tt_global_entry *tt_global_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_global_entry *tt_global_entry;
struct hlist_node *node, *node_tmp;
+
+ hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
+ hash_entry) {
+ tt_global_entry = container_of(tt_common_entry,
+ struct batadv_tt_global_entry,
+ common);
+ if (!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM))
+ continue;
+ if (!batadv_has_timed_out(tt_global_entry->roam_at,
+ BATADV_TT_CLIENT_ROAM_TIMEOUT))
+ continue;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting global tt entry (%pM): Roaming timeout\n",
+ tt_global_entry->common.addr);
+
+ hlist_del_rcu(node);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
+ }
+}
+
+static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv)
+{
+ struct batadv_hashtable *hash = bat_priv->tt_global_hash;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
uint32_t i;
@@ -929,35 +1068,18 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
- head, hash_entry) {
- tt_global_entry = container_of(tt_common_entry,
- struct tt_global_entry,
- common);
- if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
- continue;
- if (!has_timed_out(tt_global_entry->roam_at,
- TT_CLIENT_ROAM_TIMEOUT))
- continue;
-
- bat_dbg(DBG_TT, bat_priv,
- "Deleting global tt entry (%pM): Roaming timeout\n",
- tt_global_entry->common.addr);
-
- hlist_del_rcu(node);
- tt_global_entry_free_ref(tt_global_entry);
- }
+ batadv_tt_global_roam_purge_list(bat_priv, head);
spin_unlock_bh(list_lock);
}
}
-static void tt_global_table_free(struct bat_priv *bat_priv)
+static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
{
- struct hashtable_t *hash;
+ struct batadv_hashtable *hash;
spinlock_t *list_lock; /* protects write access to the hash lists */
- struct tt_common_entry *tt_common_entry;
- struct tt_global_entry *tt_global_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_global_entry *tt_global;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
uint32_t i;
@@ -975,56 +1097,60 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
head, hash_entry) {
hlist_del_rcu(node);
- tt_global_entry = container_of(tt_common_entry,
- struct tt_global_entry,
- common);
- tt_global_entry_free_ref(tt_global_entry);
+ tt_global = container_of(tt_common_entry,
+ struct batadv_tt_global_entry,
+ common);
+ batadv_tt_global_entry_free_ref(tt_global);
}
spin_unlock_bh(list_lock);
}
- hash_destroy(hash);
+ batadv_hash_destroy(hash);
bat_priv->tt_global_hash = NULL;
}
-static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
- struct tt_global_entry *tt_global_entry)
+static bool
+_batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
+ struct batadv_tt_global_entry *tt_global_entry)
{
bool ret = false;
- if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
- tt_global_entry->common.flags & TT_CLIENT_WIFI)
+ if (tt_local_entry->common.flags & BATADV_TT_CLIENT_WIFI &&
+ tt_global_entry->common.flags & BATADV_TT_CLIENT_WIFI)
ret = true;
return ret;
}
-struct orig_node *transtable_search(struct bat_priv *bat_priv,
- const uint8_t *src, const uint8_t *addr)
+struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
+ const uint8_t *src,
+ const uint8_t *addr)
{
- struct tt_local_entry *tt_local_entry = NULL;
- struct tt_global_entry *tt_global_entry = NULL;
- struct orig_node *orig_node = NULL;
- struct neigh_node *router = NULL;
+ struct batadv_tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_global_entry *tt_global_entry = NULL;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_neigh_node *router = NULL;
struct hlist_head *head;
struct hlist_node *node;
- struct tt_orig_list_entry *orig_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
int best_tq;
if (src && atomic_read(&bat_priv->ap_isolation)) {
- tt_local_entry = tt_local_hash_find(bat_priv, src);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
if (!tt_local_entry)
goto out;
}
- tt_global_entry = tt_global_hash_find(bat_priv, addr);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
if (!tt_global_entry)
goto out;
/* check whether the clients should not communicate due to AP
- * isolation */
- if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
+ * isolation
+ */
+ if (tt_local_entry &&
+ _batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
goto out;
best_tq = 0;
@@ -1032,7 +1158,7 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
rcu_read_lock();
head = &tt_global_entry->orig_list;
hlist_for_each_entry_rcu(orig_entry, node, head, list) {
- router = orig_node_get_router(orig_entry->orig_node);
+ router = batadv_orig_node_get_router(orig_entry->orig_node);
if (!router)
continue;
@@ -1040,7 +1166,7 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
orig_node = orig_entry->orig_node;
best_tq = router->tq_avg;
}
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
}
/* found anything? */
if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
@@ -1048,21 +1174,21 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
rcu_read_unlock();
out:
if (tt_global_entry)
- tt_global_entry_free_ref(tt_global_entry);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
if (tt_local_entry)
- tt_local_entry_free_ref(tt_local_entry);
+ batadv_tt_local_entry_free_ref(tt_local_entry);
return orig_node;
}
/* Calculates the checksum of the local table of a given orig_node */
-static uint16_t tt_global_crc(struct bat_priv *bat_priv,
- struct orig_node *orig_node)
+static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
{
uint16_t total = 0, total_one;
- struct hashtable_t *hash = bat_priv->tt_global_hash;
- struct tt_common_entry *tt_common_entry;
- struct tt_global_entry *tt_global_entry;
+ struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+ struct batadv_tt_common_entry *tt_common;
+ struct batadv_tt_global_entry *tt_global;
struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
@@ -1072,30 +1198,29 @@ static uint16_t tt_global_crc(struct bat_priv *bat_priv,
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common_entry, node,
- head, hash_entry) {
- tt_global_entry = container_of(tt_common_entry,
- struct tt_global_entry,
- common);
+ hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
+ tt_global = container_of(tt_common,
+ struct batadv_tt_global_entry,
+ common);
/* Roaming clients are in the global table for
* consistency only. They don't have to be
* taken into account while computing the
* global crc
*/
- if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
+ if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
continue;
/* find out if this global entry is announced by this
* originator
*/
- if (!tt_global_entry_has_orig(tt_global_entry,
- orig_node))
+ if (!batadv_tt_global_entry_has_orig(tt_global,
+ orig_node))
continue;
total_one = 0;
for (j = 0; j < ETH_ALEN; j++)
total_one = crc16_byte(total_one,
- tt_global_entry->common.addr[j]);
+ tt_common->addr[j]);
total ^= total_one;
}
rcu_read_unlock();
@@ -1105,11 +1230,11 @@ static uint16_t tt_global_crc(struct bat_priv *bat_priv,
}
/* Calculates the checksum of the local table */
-uint16_t tt_local_crc(struct bat_priv *bat_priv)
+static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
{
uint16_t total = 0, total_one;
- struct hashtable_t *hash = bat_priv->tt_local_hash;
- struct tt_common_entry *tt_common_entry;
+ struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+ struct batadv_tt_common_entry *tt_common;
struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
@@ -1119,16 +1244,16 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common_entry, node,
- head, hash_entry) {
+ hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
/* not yet committed clients have not to be taken into
- * account while computing the CRC */
- if (tt_common_entry->flags & TT_CLIENT_NEW)
+ * account while computing the CRC
+ */
+ if (tt_common->flags & BATADV_TT_CLIENT_NEW)
continue;
total_one = 0;
for (j = 0; j < ETH_ALEN; j++)
total_one = crc16_byte(total_one,
- tt_common_entry->addr[j]);
+ tt_common->addr[j]);
total ^= total_one;
}
rcu_read_unlock();
@@ -1137,9 +1262,9 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
return total;
}
-static void tt_req_list_free(struct bat_priv *bat_priv)
+static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
{
- struct tt_req_node *node, *safe;
+ struct batadv_tt_req_node *node, *safe;
spin_lock_bh(&bat_priv->tt_req_list_lock);
@@ -1151,15 +1276,16 @@ static void tt_req_list_free(struct bat_priv *bat_priv)
spin_unlock_bh(&bat_priv->tt_req_list_lock);
}
-static void tt_save_orig_buffer(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- const unsigned char *tt_buff,
- uint8_t tt_num_changes)
+static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *tt_buff,
+ uint8_t tt_num_changes)
{
- uint16_t tt_buff_len = tt_len(tt_num_changes);
+ uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
/* Replace the old buffer only if I received something in the
- * last OGM (the OGM could carry no changes) */
+ * last OGM (the OGM could carry no changes)
+ */
spin_lock_bh(&orig_node->tt_buff_lock);
if (tt_buff_len > 0) {
kfree(orig_node->tt_buff);
@@ -1173,13 +1299,14 @@ static void tt_save_orig_buffer(struct bat_priv *bat_priv,
spin_unlock_bh(&orig_node->tt_buff_lock);
}
-static void tt_req_purge(struct bat_priv *bat_priv)
+static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
{
- struct tt_req_node *node, *safe;
+ struct batadv_tt_req_node *node, *safe;
spin_lock_bh(&bat_priv->tt_req_list_lock);
list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
- if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) {
+ if (batadv_has_timed_out(node->issued_at,
+ BATADV_TT_REQUEST_TIMEOUT)) {
list_del(&node->list);
kfree(node);
}
@@ -1188,17 +1315,19 @@ static void tt_req_purge(struct bat_priv *bat_priv)
}
/* returns the pointer to the new tt_req_node struct if no request
- * has already been issued for this orig_node, NULL otherwise */
-static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
- struct orig_node *orig_node)
+ * has already been issued for this orig_node, NULL otherwise
+ */
+static struct batadv_tt_req_node *
+batadv_new_tt_req_node(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
{
- struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
+ struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
spin_lock_bh(&bat_priv->tt_req_list_lock);
list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
- if (compare_eth(tt_req_node_tmp, orig_node) &&
- !has_timed_out(tt_req_node_tmp->issued_at,
- TT_REQUEST_TIMEOUT))
+ if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
+ !batadv_has_timed_out(tt_req_node_tmp->issued_at,
+ BATADV_TT_REQUEST_TIMEOUT))
goto unlock;
}
@@ -1216,63 +1345,67 @@ unlock:
}
/* data_ptr is useless here, but has to be kept to respect the prototype */
-static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
+static int batadv_tt_local_valid_entry(const void *entry_ptr,
+ const void *data_ptr)
{
- const struct tt_common_entry *tt_common_entry = entry_ptr;
+ const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
- if (tt_common_entry->flags & TT_CLIENT_NEW)
+ if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
return 0;
return 1;
}
-static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
+static int batadv_tt_global_valid(const void *entry_ptr,
+ const void *data_ptr)
{
- const struct tt_common_entry *tt_common_entry = entry_ptr;
- const struct tt_global_entry *tt_global_entry;
- const struct orig_node *orig_node = data_ptr;
+ const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
+ const struct batadv_tt_global_entry *tt_global_entry;
+ const struct batadv_orig_node *orig_node = data_ptr;
- if (tt_common_entry->flags & TT_CLIENT_ROAM)
+ if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM)
return 0;
- tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
+ tt_global_entry = container_of(tt_common_entry,
+ struct batadv_tt_global_entry,
common);
- return tt_global_entry_has_orig(tt_global_entry, orig_node);
+ return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
}
-static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
- struct hashtable_t *hash,
- struct hard_iface *primary_if,
- int (*valid_cb)(const void *,
- const void *),
- void *cb_data)
+static struct sk_buff *
+batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
+ struct batadv_hashtable *hash,
+ struct batadv_hard_iface *primary_if,
+ int (*valid_cb)(const void *, const void *),
+ void *cb_data)
{
- struct tt_common_entry *tt_common_entry;
- struct tt_query_packet *tt_response;
- struct tt_change *tt_change;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_query_packet *tt_response;
+ struct batadv_tt_change *tt_change;
struct hlist_node *node;
struct hlist_head *head;
struct sk_buff *skb = NULL;
uint16_t tt_tot, tt_count;
- ssize_t tt_query_size = sizeof(struct tt_query_packet);
+ ssize_t tt_query_size = sizeof(struct batadv_tt_query_packet);
uint32_t i;
+ size_t len;
if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
tt_len = primary_if->soft_iface->mtu - tt_query_size;
- tt_len -= tt_len % sizeof(struct tt_change);
+ tt_len -= tt_len % sizeof(struct batadv_tt_change);
}
- tt_tot = tt_len / sizeof(struct tt_change);
+ tt_tot = tt_len / sizeof(struct batadv_tt_change);
- skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
+ len = tt_query_size + tt_len;
+ skb = dev_alloc_skb(len + ETH_HLEN);
if (!skb)
goto out;
skb_reserve(skb, ETH_HLEN);
- tt_response = (struct tt_query_packet *)skb_put(skb,
- tt_query_size + tt_len);
+ tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
tt_response->ttvn = ttvn;
- tt_change = (struct tt_change *)(skb->data + tt_query_size);
+ tt_change = (struct batadv_tt_change *)(skb->data + tt_query_size);
tt_count = 0;
rcu_read_lock();
@@ -1289,7 +1422,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
memcpy(tt_change->addr, tt_common_entry->addr,
ETH_ALEN);
- tt_change->flags = NO_FLAGS;
+ tt_change->flags = BATADV_NO_FLAGS;
tt_count++;
tt_change++;
@@ -1298,72 +1431,78 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
rcu_read_unlock();
/* store in the message the number of entries we have successfully
- * copied */
+ * copied
+ */
tt_response->tt_data = htons(tt_count);
out:
return skb;
}
-static int send_tt_request(struct bat_priv *bat_priv,
- struct orig_node *dst_orig_node,
- uint8_t ttvn, uint16_t tt_crc, bool full_table)
+static int batadv_send_tt_request(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *dst_orig_node,
+ uint8_t ttvn, uint16_t tt_crc,
+ bool full_table)
{
struct sk_buff *skb = NULL;
- struct tt_query_packet *tt_request;
- struct neigh_node *neigh_node = NULL;
- struct hard_iface *primary_if;
- struct tt_req_node *tt_req_node = NULL;
+ struct batadv_tt_query_packet *tt_request;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct batadv_hard_iface *primary_if;
+ struct batadv_tt_req_node *tt_req_node = NULL;
int ret = 1;
+ size_t tt_req_len;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* The new tt_req will be issued only if I'm not waiting for a
- * reply from the same orig_node yet */
- tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
+ * reply from the same orig_node yet
+ */
+ tt_req_node = batadv_new_tt_req_node(bat_priv, dst_orig_node);
if (!tt_req_node)
goto out;
- skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
+ skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN);
if (!skb)
goto out;
skb_reserve(skb, ETH_HLEN);
- tt_request = (struct tt_query_packet *)skb_put(skb,
- sizeof(struct tt_query_packet));
+ tt_req_len = sizeof(*tt_request);
+ tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
- tt_request->header.packet_type = BAT_TT_QUERY;
- tt_request->header.version = COMPAT_VERSION;
+ tt_request->header.packet_type = BATADV_TT_QUERY;
+ tt_request->header.version = BATADV_COMPAT_VERSION;
memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
- tt_request->header.ttl = TTL;
+ tt_request->header.ttl = BATADV_TTL;
tt_request->ttvn = ttvn;
tt_request->tt_data = htons(tt_crc);
- tt_request->flags = TT_REQUEST;
+ tt_request->flags = BATADV_TT_REQUEST;
if (full_table)
- tt_request->flags |= TT_FULL_TABLE;
+ tt_request->flags |= BATADV_TT_FULL_TABLE;
- neigh_node = orig_node_get_router(dst_orig_node);
+ neigh_node = batadv_orig_node_get_router(dst_orig_node);
if (!neigh_node)
goto out;
- bat_dbg(DBG_TT, bat_priv,
- "Sending TT_REQUEST to %pM via %pM [%c]\n",
- dst_orig_node->orig, neigh_node->addr,
- (full_table ? 'F' : '.'));
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Sending TT_REQUEST to %pM via %pM [%c]\n",
+ dst_orig_node->orig, neigh_node->addr,
+ (full_table ? 'F' : '.'));
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = 0;
out:
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (ret)
kfree_skb(skb);
if (ret && tt_req_node) {
@@ -1375,39 +1514,42 @@ out:
return ret;
}
-static bool send_other_tt_response(struct bat_priv *bat_priv,
- struct tt_query_packet *tt_request)
+static bool
+batadv_send_other_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tt_query_packet *tt_request)
{
- struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
- struct neigh_node *neigh_node = NULL;
- struct hard_iface *primary_if = NULL;
+ struct batadv_orig_node *req_dst_orig_node = NULL;
+ struct batadv_orig_node *res_dst_orig_node = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct batadv_hard_iface *primary_if = NULL;
uint8_t orig_ttvn, req_ttvn, ttvn;
int ret = false;
unsigned char *tt_buff;
bool full_table;
uint16_t tt_len, tt_tot;
struct sk_buff *skb = NULL;
- struct tt_query_packet *tt_response;
+ struct batadv_tt_query_packet *tt_response;
+ size_t len;
- bat_dbg(DBG_TT, bat_priv,
- "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
- tt_request->src, tt_request->ttvn, tt_request->dst,
- (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
+ tt_request->src, tt_request->ttvn, tt_request->dst,
+ (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
/* Let's get the orig node of the REAL destination */
- req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
+ req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
if (!req_dst_orig_node)
goto out;
- res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
+ res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
if (!res_dst_orig_node)
goto out;
- neigh_node = orig_node_get_router(res_dst_orig_node);
+ neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
if (!neigh_node)
goto out;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
@@ -1416,71 +1558,75 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
/* I don't have the requested data */
if (orig_ttvn != req_ttvn ||
- tt_request->tt_data != req_dst_orig_node->tt_crc)
+ tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
goto out;
/* If the full table has been explicitly requested */
- if (tt_request->flags & TT_FULL_TABLE ||
+ if (tt_request->flags & BATADV_TT_FULL_TABLE ||
!req_dst_orig_node->tt_buff)
full_table = true;
else
full_table = false;
/* In this version, fragmentation is not implemented, then
- * I'll send only one packet with as much TT entries as I can */
+ * I'll send only one packet with as much TT entries as I can
+ */
if (!full_table) {
spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
tt_len = req_dst_orig_node->tt_buff_len;
- tt_tot = tt_len / sizeof(struct tt_change);
+ tt_tot = tt_len / sizeof(struct batadv_tt_change);
- skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
- tt_len + ETH_HLEN);
+ len = sizeof(*tt_response) + tt_len;
+ skb = dev_alloc_skb(len + ETH_HLEN);
if (!skb)
goto unlock;
skb_reserve(skb, ETH_HLEN);
- tt_response = (struct tt_query_packet *)skb_put(skb,
- sizeof(struct tt_query_packet) + tt_len);
+ tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
+ len);
tt_response->ttvn = req_ttvn;
tt_response->tt_data = htons(tt_tot);
- tt_buff = skb->data + sizeof(struct tt_query_packet);
+ tt_buff = skb->data + sizeof(*tt_response);
/* Copy the last orig_node's OGM buffer */
memcpy(tt_buff, req_dst_orig_node->tt_buff,
req_dst_orig_node->tt_buff_len);
spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
} else {
- tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
- sizeof(struct tt_change);
+ tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size);
+ tt_len *= sizeof(struct batadv_tt_change);
ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
- skb = tt_response_fill_table(tt_len, ttvn,
- bat_priv->tt_global_hash,
- primary_if, tt_global_valid_entry,
- req_dst_orig_node);
+ skb = batadv_tt_response_fill_table(tt_len, ttvn,
+ bat_priv->tt_global_hash,
+ primary_if,
+ batadv_tt_global_valid,
+ req_dst_orig_node);
if (!skb)
goto out;
- tt_response = (struct tt_query_packet *)skb->data;
+ tt_response = (struct batadv_tt_query_packet *)skb->data;
}
- tt_response->header.packet_type = BAT_TT_QUERY;
- tt_response->header.version = COMPAT_VERSION;
- tt_response->header.ttl = TTL;
+ tt_response->header.packet_type = BATADV_TT_QUERY;
+ tt_response->header.version = BATADV_COMPAT_VERSION;
+ tt_response->header.ttl = BATADV_TTL;
memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
- tt_response->flags = TT_RESPONSE;
+ tt_response->flags = BATADV_TT_RESPONSE;
if (full_table)
- tt_response->flags |= TT_FULL_TABLE;
+ tt_response->flags |= BATADV_TT_FULL_TABLE;
- bat_dbg(DBG_TT, bat_priv,
- "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
- res_dst_orig_node->orig, neigh_node->addr,
- req_dst_orig_node->orig, req_ttvn);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
+ res_dst_orig_node->orig, neigh_node->addr,
+ req_dst_orig_node->orig, req_ttvn);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
+
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = true;
goto out;
@@ -1489,114 +1635,122 @@ unlock:
out:
if (res_dst_orig_node)
- orig_node_free_ref(res_dst_orig_node);
+ batadv_orig_node_free_ref(res_dst_orig_node);
if (req_dst_orig_node)
- orig_node_free_ref(req_dst_orig_node);
+ batadv_orig_node_free_ref(req_dst_orig_node);
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (!ret)
kfree_skb(skb);
return ret;
}
-static bool send_my_tt_response(struct bat_priv *bat_priv,
- struct tt_query_packet *tt_request)
+
+static bool
+batadv_send_my_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tt_query_packet *tt_request)
{
- struct orig_node *orig_node = NULL;
- struct neigh_node *neigh_node = NULL;
- struct hard_iface *primary_if = NULL;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct batadv_hard_iface *primary_if = NULL;
uint8_t my_ttvn, req_ttvn, ttvn;
int ret = false;
unsigned char *tt_buff;
bool full_table;
uint16_t tt_len, tt_tot;
struct sk_buff *skb = NULL;
- struct tt_query_packet *tt_response;
+ struct batadv_tt_query_packet *tt_response;
+ size_t len;
- bat_dbg(DBG_TT, bat_priv,
- "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
- tt_request->src, tt_request->ttvn,
- (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
+ tt_request->src, tt_request->ttvn,
+ (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
req_ttvn = tt_request->ttvn;
- orig_node = orig_hash_find(bat_priv, tt_request->src);
+ orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
if (!orig_node)
goto out;
- neigh_node = orig_node_get_router(orig_node);
+ neigh_node = batadv_orig_node_get_router(orig_node);
if (!neigh_node)
goto out;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* If the full table has been explicitly requested or the gap
- * is too big send the whole local translation table */
- if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
+ * is too big send the whole local translation table
+ */
+ if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
!bat_priv->tt_buff)
full_table = true;
else
full_table = false;
/* In this version, fragmentation is not implemented, then
- * I'll send only one packet with as much TT entries as I can */
+ * I'll send only one packet with as much TT entries as I can
+ */
if (!full_table) {
spin_lock_bh(&bat_priv->tt_buff_lock);
tt_len = bat_priv->tt_buff_len;
- tt_tot = tt_len / sizeof(struct tt_change);
+ tt_tot = tt_len / sizeof(struct batadv_tt_change);
- skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
- tt_len + ETH_HLEN);
+ len = sizeof(*tt_response) + tt_len;
+ skb = dev_alloc_skb(len + ETH_HLEN);
if (!skb)
goto unlock;
skb_reserve(skb, ETH_HLEN);
- tt_response = (struct tt_query_packet *)skb_put(skb,
- sizeof(struct tt_query_packet) + tt_len);
+ tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
+ len);
tt_response->ttvn = req_ttvn;
tt_response->tt_data = htons(tt_tot);
- tt_buff = skb->data + sizeof(struct tt_query_packet);
+ tt_buff = skb->data + sizeof(*tt_response);
memcpy(tt_buff, bat_priv->tt_buff,
bat_priv->tt_buff_len);
spin_unlock_bh(&bat_priv->tt_buff_lock);
} else {
- tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
- sizeof(struct tt_change);
+ tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt);
+ tt_len *= sizeof(struct batadv_tt_change);
ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
- skb = tt_response_fill_table(tt_len, ttvn,
- bat_priv->tt_local_hash,
- primary_if, tt_local_valid_entry,
- NULL);
+ skb = batadv_tt_response_fill_table(tt_len, ttvn,
+ bat_priv->tt_local_hash,
+ primary_if,
+ batadv_tt_local_valid_entry,
+ NULL);
if (!skb)
goto out;
- tt_response = (struct tt_query_packet *)skb->data;
+ tt_response = (struct batadv_tt_query_packet *)skb->data;
}
- tt_response->header.packet_type = BAT_TT_QUERY;
- tt_response->header.version = COMPAT_VERSION;
- tt_response->header.ttl = TTL;
+ tt_response->header.packet_type = BATADV_TT_QUERY;
+ tt_response->header.version = BATADV_COMPAT_VERSION;
+ tt_response->header.ttl = BATADV_TTL;
memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
- tt_response->flags = TT_RESPONSE;
+ tt_response->flags = BATADV_TT_RESPONSE;
if (full_table)
- tt_response->flags |= TT_FULL_TABLE;
+ tt_response->flags |= BATADV_TT_FULL_TABLE;
- bat_dbg(DBG_TT, bat_priv,
- "Sending TT_RESPONSE to %pM via %pM [%c]\n",
- orig_node->orig, neigh_node->addr,
- (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Sending TT_RESPONSE to %pM via %pM [%c]\n",
+ orig_node->orig, neigh_node->addr,
+ (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
+
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = true;
goto out;
@@ -1604,49 +1758,50 @@ unlock:
spin_unlock_bh(&bat_priv->tt_buff_lock);
out:
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (!ret)
kfree_skb(skb);
/* This packet was for me, so it doesn't need to be re-routed */
return true;
}
-bool send_tt_response(struct bat_priv *bat_priv,
- struct tt_query_packet *tt_request)
+bool batadv_send_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tt_query_packet *tt_request)
{
- if (is_my_mac(tt_request->dst)) {
+ if (batadv_is_my_mac(tt_request->dst)) {
/* don't answer backbone gws! */
- if (bla_is_backbone_gw_orig(bat_priv, tt_request->src))
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
return true;
- return send_my_tt_response(bat_priv, tt_request);
+ return batadv_send_my_tt_response(bat_priv, tt_request);
} else {
- return send_other_tt_response(bat_priv, tt_request);
+ return batadv_send_other_tt_response(bat_priv, tt_request);
}
}
-static void _tt_update_changes(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- struct tt_change *tt_change,
- uint16_t tt_num_changes, uint8_t ttvn)
+static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_tt_change *tt_change,
+ uint16_t tt_num_changes, uint8_t ttvn)
{
int i;
+ int roams;
for (i = 0; i < tt_num_changes; i++) {
- if ((tt_change + i)->flags & TT_CLIENT_DEL)
- tt_global_del(bat_priv, orig_node,
- (tt_change + i)->addr,
- "tt removed by changes",
- (tt_change + i)->flags & TT_CLIENT_ROAM);
- else
- if (!tt_global_add(bat_priv, orig_node,
- (tt_change + i)->addr, ttvn, false,
- (tt_change + i)->flags &
- TT_CLIENT_WIFI))
+ if ((tt_change + i)->flags & BATADV_TT_CLIENT_DEL) {
+ roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
+ batadv_tt_global_del(bat_priv, orig_node,
+ (tt_change + i)->addr,
+ "tt removed by changes",
+ roams);
+ } else {
+ if (!batadv_tt_global_add(bat_priv, orig_node,
+ (tt_change + i)->addr,
+ (tt_change + i)->flags, ttvn))
/* In case of problem while storing a
* global_entry, we stop the updating
* procedure without committing the
@@ -1654,25 +1809,27 @@ static void _tt_update_changes(struct bat_priv *bat_priv,
* corrupted data on tt_request
*/
return;
+ }
}
orig_node->tt_initialised = true;
}
-static void tt_fill_gtable(struct bat_priv *bat_priv,
- struct tt_query_packet *tt_response)
+static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
+ struct batadv_tt_query_packet *tt_response)
{
- struct orig_node *orig_node = NULL;
+ struct batadv_orig_node *orig_node = NULL;
- orig_node = orig_hash_find(bat_priv, tt_response->src);
+ orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
if (!orig_node)
goto out;
/* Purge the old table first.. */
- tt_global_del_orig(bat_priv, orig_node, "Received full table");
+ batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
- _tt_update_changes(bat_priv, orig_node,
- (struct tt_change *)(tt_response + 1),
- tt_response->tt_data, tt_response->ttvn);
+ _batadv_tt_update_changes(bat_priv, orig_node,
+ (struct batadv_tt_change *)(tt_response + 1),
+ ntohs(tt_response->tt_data),
+ tt_response->ttvn);
spin_lock_bh(&orig_node->tt_buff_lock);
kfree(orig_node->tt_buff);
@@ -1684,71 +1841,76 @@ static void tt_fill_gtable(struct bat_priv *bat_priv,
out:
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
}
-static void tt_update_changes(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- uint16_t tt_num_changes, uint8_t ttvn,
- struct tt_change *tt_change)
+static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ uint16_t tt_num_changes, uint8_t ttvn,
+ struct batadv_tt_change *tt_change)
{
- _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
- ttvn);
+ _batadv_tt_update_changes(bat_priv, orig_node, tt_change,
+ tt_num_changes, ttvn);
- tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
- tt_num_changes);
+ batadv_tt_save_orig_buffer(bat_priv, orig_node,
+ (unsigned char *)tt_change, tt_num_changes);
atomic_set(&orig_node->last_ttvn, ttvn);
}
-bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
{
- struct tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_local_entry *tt_local_entry = NULL;
bool ret = false;
- tt_local_entry = tt_local_hash_find(bat_priv, addr);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
if (!tt_local_entry)
goto out;
/* Check if the client has been logically deleted (but is kept for
- * consistency purpose) */
- if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
+ * consistency purpose)
+ */
+ if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
goto out;
ret = true;
out:
if (tt_local_entry)
- tt_local_entry_free_ref(tt_local_entry);
+ batadv_tt_local_entry_free_ref(tt_local_entry);
return ret;
}
-void handle_tt_response(struct bat_priv *bat_priv,
- struct tt_query_packet *tt_response)
+void batadv_handle_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tt_query_packet *tt_response)
{
- struct tt_req_node *node, *safe;
- struct orig_node *orig_node = NULL;
+ struct batadv_tt_req_node *node, *safe;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_tt_change *tt_change;
- bat_dbg(DBG_TT, bat_priv,
- "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
- tt_response->src, tt_response->ttvn, tt_response->tt_data,
- (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
+ tt_response->src, tt_response->ttvn,
+ ntohs(tt_response->tt_data),
+ (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
/* we should have never asked a backbone gw */
- if (bla_is_backbone_gw_orig(bat_priv, tt_response->src))
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
goto out;
- orig_node = orig_hash_find(bat_priv, tt_response->src);
+ orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
if (!orig_node)
goto out;
- if (tt_response->flags & TT_FULL_TABLE)
- tt_fill_gtable(bat_priv, tt_response);
- else
- tt_update_changes(bat_priv, orig_node, tt_response->tt_data,
- tt_response->ttvn,
- (struct tt_change *)(tt_response + 1));
+ if (tt_response->flags & BATADV_TT_FULL_TABLE) {
+ batadv_tt_fill_gtable(bat_priv, tt_response);
+ } else {
+ tt_change = (struct batadv_tt_change *)(tt_response + 1);
+ batadv_tt_update_changes(bat_priv, orig_node,
+ ntohs(tt_response->tt_data),
+ tt_response->ttvn, tt_change);
+ }
/* Delete the tt_req_node from pending tt_requests list */
spin_lock_bh(&bat_priv->tt_req_list_lock);
list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
- if (!compare_eth(node->addr, tt_response->src))
+ if (!batadv_compare_eth(node->addr, tt_response->src))
continue;
list_del(&node->list);
kfree(node);
@@ -1756,31 +1918,36 @@ void handle_tt_response(struct bat_priv *bat_priv,
spin_unlock_bh(&bat_priv->tt_req_list_lock);
/* Recalculate the CRC for this orig_node and store it */
- orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
+ orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
/* Roaming phase is over: tables are in sync again. I can
- * unset the flag */
+ * unset the flag
+ */
orig_node->tt_poss_change = false;
out:
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
}
-int tt_init(struct bat_priv *bat_priv)
+int batadv_tt_init(struct batadv_priv *bat_priv)
{
- if (!tt_local_init(bat_priv))
- return 0;
+ int ret;
- if (!tt_global_init(bat_priv))
- return 0;
+ ret = batadv_tt_local_init(bat_priv);
+ if (ret < 0)
+ return ret;
- tt_start_timer(bat_priv);
+ ret = batadv_tt_global_init(bat_priv);
+ if (ret < 0)
+ return ret;
+
+ batadv_tt_start_timer(bat_priv);
return 1;
}
-static void tt_roam_list_free(struct bat_priv *bat_priv)
+static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
{
- struct tt_roam_node *node, *safe;
+ struct batadv_tt_roam_node *node, *safe;
spin_lock_bh(&bat_priv->tt_roam_list_lock);
@@ -1792,13 +1959,14 @@ static void tt_roam_list_free(struct bat_priv *bat_priv)
spin_unlock_bh(&bat_priv->tt_roam_list_lock);
}
-static void tt_roam_purge(struct bat_priv *bat_priv)
+static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
{
- struct tt_roam_node *node, *safe;
+ struct batadv_tt_roam_node *node, *safe;
spin_lock_bh(&bat_priv->tt_roam_list_lock);
list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
- if (!has_timed_out(node->first_time, ROAMING_MAX_TIME))
+ if (!batadv_has_timed_out(node->first_time,
+ BATADV_ROAMING_MAX_TIME))
continue;
list_del(&node->list);
@@ -1811,24 +1979,27 @@ static void tt_roam_purge(struct bat_priv *bat_priv)
* maximum number of possible roaming phases. In this case the ROAMING_ADV
* will not be sent.
*
- * returns true if the ROAMING_ADV can be sent, false otherwise */
-static bool tt_check_roam_count(struct bat_priv *bat_priv,
- uint8_t *client)
+ * returns true if the ROAMING_ADV can be sent, false otherwise
+ */
+static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
+ uint8_t *client)
{
- struct tt_roam_node *tt_roam_node;
+ struct batadv_tt_roam_node *tt_roam_node;
bool ret = false;
spin_lock_bh(&bat_priv->tt_roam_list_lock);
/* The new tt_req will be issued only if I'm not waiting for a
- * reply from the same orig_node yet */
+ * reply from the same orig_node yet
+ */
list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
- if (!compare_eth(tt_roam_node->addr, client))
+ if (!batadv_compare_eth(tt_roam_node->addr, client))
continue;
- if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME))
+ if (batadv_has_timed_out(tt_roam_node->first_time,
+ BATADV_ROAMING_MAX_TIME))
continue;
- if (!atomic_dec_not_zero(&tt_roam_node->counter))
+ if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter))
/* Sorry, you roamed too many times! */
goto unlock;
ret = true;
@@ -1841,7 +2012,8 @@ static bool tt_check_roam_count(struct bat_priv *bat_priv,
goto unlock;
tt_roam_node->first_time = jiffies;
- atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
+ atomic_set(&tt_roam_node->counter,
+ BATADV_ROAMING_MAX_COUNT - 1);
memcpy(tt_roam_node->addr, client, ETH_ALEN);
list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
@@ -1853,97 +2025,103 @@ unlock:
return ret;
}
-static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
- struct orig_node *orig_node)
+static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+ struct batadv_orig_node *orig_node)
{
- struct neigh_node *neigh_node = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
struct sk_buff *skb = NULL;
- struct roam_adv_packet *roam_adv_packet;
+ struct batadv_roam_adv_packet *roam_adv_packet;
int ret = 1;
- struct hard_iface *primary_if;
+ struct batadv_hard_iface *primary_if;
+ size_t len = sizeof(*roam_adv_packet);
/* before going on we have to check whether the client has
- * already roamed to us too many times */
- if (!tt_check_roam_count(bat_priv, client))
+ * already roamed to us too many times
+ */
+ if (!batadv_tt_check_roam_count(bat_priv, client))
goto out;
- skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
+ skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN);
if (!skb)
goto out;
skb_reserve(skb, ETH_HLEN);
- roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
- sizeof(struct roam_adv_packet));
+ roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
- roam_adv_packet->header.packet_type = BAT_ROAM_ADV;
- roam_adv_packet->header.version = COMPAT_VERSION;
- roam_adv_packet->header.ttl = TTL;
- primary_if = primary_if_get_selected(bat_priv);
+ roam_adv_packet->header.packet_type = BATADV_ROAM_ADV;
+ roam_adv_packet->header.version = BATADV_COMPAT_VERSION;
+ roam_adv_packet->header.ttl = BATADV_TTL;
+ roam_adv_packet->reserved = 0;
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
memcpy(roam_adv_packet->client, client, ETH_ALEN);
- neigh_node = orig_node_get_router(orig_node);
+ neigh_node = batadv_orig_node_get_router(orig_node);
if (!neigh_node)
goto out;
- bat_dbg(DBG_TT, bat_priv,
- "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
- orig_node->orig, client, neigh_node->addr);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
+ orig_node->orig, client, neigh_node->addr);
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = 0;
out:
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (ret)
kfree_skb(skb);
return;
}
-static void tt_purge(struct work_struct *work)
+static void batadv_tt_purge(struct work_struct *work)
{
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
- struct bat_priv *bat_priv =
- container_of(delayed_work, struct bat_priv, tt_work);
+ struct delayed_work *delayed_work;
+ struct batadv_priv *bat_priv;
- tt_local_purge(bat_priv);
- tt_global_roam_purge(bat_priv);
- tt_req_purge(bat_priv);
- tt_roam_purge(bat_priv);
+ delayed_work = container_of(work, struct delayed_work, work);
+ bat_priv = container_of(delayed_work, struct batadv_priv, tt_work);
- tt_start_timer(bat_priv);
+ batadv_tt_local_purge(bat_priv);
+ batadv_tt_global_roam_purge(bat_priv);
+ batadv_tt_req_purge(bat_priv);
+ batadv_tt_roam_purge(bat_priv);
+
+ batadv_tt_start_timer(bat_priv);
}
-void tt_free(struct bat_priv *bat_priv)
+void batadv_tt_free(struct batadv_priv *bat_priv)
{
cancel_delayed_work_sync(&bat_priv->tt_work);
- tt_local_table_free(bat_priv);
- tt_global_table_free(bat_priv);
- tt_req_list_free(bat_priv);
- tt_changes_list_free(bat_priv);
- tt_roam_list_free(bat_priv);
+ batadv_tt_local_table_free(bat_priv);
+ batadv_tt_global_table_free(bat_priv);
+ batadv_tt_req_list_free(bat_priv);
+ batadv_tt_changes_list_free(bat_priv);
+ batadv_tt_roam_list_free(bat_priv);
kfree(bat_priv->tt_buff);
}
/* This function will enable or disable the specified flags for all the entries
- * in the given hash table and returns the number of modified entries */
-static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
- bool enable)
+ * in the given hash table and returns the number of modified entries
+ */
+static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
+ uint16_t flags, bool enable)
{
uint32_t i;
uint16_t changed_num = 0;
struct hlist_head *head;
struct hlist_node *node;
- struct tt_common_entry *tt_common_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
if (!hash)
goto out;
@@ -1971,12 +2149,12 @@ out:
return changed_num;
}
-/* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
-static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
+/* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
+static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
{
- struct hashtable_t *hash = bat_priv->tt_local_hash;
- struct tt_common_entry *tt_common_entry;
- struct tt_local_entry *tt_local_entry;
+ struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+ struct batadv_tt_common_entry *tt_common;
+ struct batadv_tt_local_entry *tt_local;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -1990,103 +2168,149 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
- head, hash_entry) {
- if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
+ hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
+ hash_entry) {
+ if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
continue;
- bat_dbg(DBG_TT, bat_priv,
- "Deleting local tt entry (%pM): pending\n",
- tt_common_entry->addr);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting local tt entry (%pM): pending\n",
+ tt_common->addr);
atomic_dec(&bat_priv->num_local_tt);
hlist_del_rcu(node);
- tt_local_entry = container_of(tt_common_entry,
- struct tt_local_entry,
- common);
- tt_local_entry_free_ref(tt_local_entry);
+ tt_local = container_of(tt_common,
+ struct batadv_tt_local_entry,
+ common);
+ batadv_tt_local_entry_free_ref(tt_local);
}
spin_unlock_bh(list_lock);
}
}
-void tt_commit_changes(struct bat_priv *bat_priv)
+static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff,
+ int *packet_buff_len, int packet_min_len)
{
- uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash,
- TT_CLIENT_NEW, false);
- /* all the reset entries have now to be effectively counted as local
- * entries */
+ uint16_t changed_num = 0;
+
+ if (atomic_read(&bat_priv->tt_local_changes) < 1)
+ return -ENOENT;
+
+ changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash,
+ BATADV_TT_CLIENT_NEW, false);
+
+ /* all reset entries have to be counted as local entries */
atomic_add(changed_num, &bat_priv->num_local_tt);
- tt_local_purge_pending_clients(bat_priv);
+ batadv_tt_local_purge_pending_clients(bat_priv);
+ bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
/* Increment the TTVN only once per OGM interval */
atomic_inc(&bat_priv->ttvn);
- bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
- (uint8_t)atomic_read(&bat_priv->ttvn));
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Local changes committed, updating to ttvn %u\n",
+ (uint8_t)atomic_read(&bat_priv->ttvn));
bat_priv->tt_poss_change = false;
+
+ /* reset the sending counter */
+ atomic_set(&bat_priv->tt_ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
+
+ return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
+ packet_buff_len, packet_min_len);
+}
+
+/* when calling this function (hard_iface == primary_if) has to be true */
+int batadv_tt_append_diff(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff, int *packet_buff_len,
+ int packet_min_len)
+{
+ int tt_num_changes;
+
+ /* if at least one change happened */
+ tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff,
+ packet_buff_len,
+ packet_min_len);
+
+ /* if the changes have been sent often enough */
+ if ((tt_num_changes < 0) &&
+ (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
+ batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
+ packet_min_len, packet_min_len);
+ tt_num_changes = 0;
+ }
+
+ return tt_num_changes;
}
-bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
+bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
+ uint8_t *dst)
{
- struct tt_local_entry *tt_local_entry = NULL;
- struct tt_global_entry *tt_global_entry = NULL;
+ struct batadv_tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_global_entry *tt_global_entry = NULL;
bool ret = false;
if (!atomic_read(&bat_priv->ap_isolation))
goto out;
- tt_local_entry = tt_local_hash_find(bat_priv, dst);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst);
if (!tt_local_entry)
goto out;
- tt_global_entry = tt_global_hash_find(bat_priv, src);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, src);
if (!tt_global_entry)
goto out;
- if (!_is_ap_isolated(tt_local_entry, tt_global_entry))
+ if (!_batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
goto out;
ret = true;
out:
if (tt_global_entry)
- tt_global_entry_free_ref(tt_global_entry);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
if (tt_local_entry)
- tt_local_entry_free_ref(tt_local_entry);
+ batadv_tt_local_entry_free_ref(tt_local_entry);
return ret;
}
-void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *tt_buff, uint8_t tt_num_changes,
- uint8_t ttvn, uint16_t tt_crc)
+void batadv_tt_update_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *tt_buff, uint8_t tt_num_changes,
+ uint8_t ttvn, uint16_t tt_crc)
{
uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
bool full_table = true;
+ struct batadv_tt_change *tt_change;
/* don't care about a backbone gateways updates. */
- if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
return;
/* orig table not initialised AND first diff is in the OGM OR the ttvn
- * increased by one -> we can apply the attached changes */
+ * increased by one -> we can apply the attached changes
+ */
if ((!orig_node->tt_initialised && ttvn == 1) ||
ttvn - orig_ttvn == 1) {
/* the OGM could not contain the changes due to their size or
- * because they have already been sent TT_OGM_APPEND_MAX times.
- * In this case send a tt request */
+ * because they have already been sent BATADV_TT_OGM_APPEND_MAX
+ * times.
+ * In this case send a tt request
+ */
if (!tt_num_changes) {
full_table = false;
goto request_table;
}
- tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
- (struct tt_change *)tt_buff);
+ tt_change = (struct batadv_tt_change *)tt_buff;
+ batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
+ ttvn, tt_change);
/* Even if we received the precomputed crc with the OGM, we
* prefer to recompute it to spot any possible inconsistency
- * in the global table */
- orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
+ * in the global table
+ */
+ orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
/* The ttvn alone is not enough to guarantee consistency
* because a single value could represent different states
@@ -2095,26 +2319,28 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
* consistent or not. E.g. a node could disconnect while its
* ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
* checking the CRC value is mandatory to detect the
- * inconsistency */
+ * inconsistency
+ */
if (orig_node->tt_crc != tt_crc)
goto request_table;
/* Roaming phase is over: tables are in sync again. I can
- * unset the flag */
+ * unset the flag
+ */
orig_node->tt_poss_change = false;
} else {
/* if we missed more than one change or our tables are not
- * in sync anymore -> request fresh tt data */
-
+ * in sync anymore -> request fresh tt data
+ */
if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
orig_node->tt_crc != tt_crc) {
request_table:
- bat_dbg(DBG_TT, bat_priv,
- "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
- orig_node->orig, ttvn, orig_ttvn, tt_crc,
- orig_node->tt_crc, tt_num_changes);
- send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
- full_table);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
+ orig_node->orig, ttvn, orig_ttvn, tt_crc,
+ orig_node->tt_crc, tt_num_changes);
+ batadv_send_tt_request(bat_priv, orig_node, ttvn,
+ tt_crc, full_table);
return;
}
}
@@ -2124,17 +2350,18 @@ request_table:
* originator to another one. This entry is kept is still kept for consistency
* purposes
*/
-bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr)
+bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
+ uint8_t *addr)
{
- struct tt_global_entry *tt_global_entry;
+ struct batadv_tt_global_entry *tt_global_entry;
bool ret = false;
- tt_global_entry = tt_global_hash_find(bat_priv, addr);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
if (!tt_global_entry)
goto out;
- ret = tt_global_entry->common.flags & TT_CLIENT_ROAM;
- tt_global_entry_free_ref(tt_global_entry);
+ ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM;
+ batadv_tt_global_entry_free_ref(tt_global_entry);
out:
return ret;
}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index c43374dc364..ffa87355096 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
@@ -16,44 +15,50 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
-int tt_len(int changes_num);
-int tt_changes_fill_buffer(struct bat_priv *bat_priv,
- unsigned char *buff, int buff_len);
-int tt_init(struct bat_priv *bat_priv);
-void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
- int ifindex);
-void tt_local_remove(struct bat_priv *bat_priv,
- const uint8_t *addr, const char *message, bool roaming);
-int tt_local_seq_print_text(struct seq_file *seq, void *offset);
-void tt_global_add_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *tt_buff, int tt_buff_len);
-int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *addr, uint8_t ttvn, bool roaming,
- bool wifi);
-int tt_global_seq_print_text(struct seq_file *seq, void *offset);
-void tt_global_del_orig(struct bat_priv *bat_priv,
- struct orig_node *orig_node, const char *message);
-struct orig_node *transtable_search(struct bat_priv *bat_priv,
- const uint8_t *src, const uint8_t *addr);
-uint16_t tt_local_crc(struct bat_priv *bat_priv);
-void tt_free(struct bat_priv *bat_priv);
-bool send_tt_response(struct bat_priv *bat_priv,
- struct tt_query_packet *tt_request);
-bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
-void handle_tt_response(struct bat_priv *bat_priv,
- struct tt_query_packet *tt_response);
-void tt_commit_changes(struct bat_priv *bat_priv);
-bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
-void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *tt_buff, uint8_t tt_num_changes,
- uint8_t ttvn, uint16_t tt_crc);
-bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr);
+int batadv_tt_len(int changes_num);
+int batadv_tt_init(struct batadv_priv *bat_priv);
+void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+ int ifindex);
+void batadv_tt_local_remove(struct batadv_priv *bat_priv,
+ const uint8_t *addr, const char *message,
+ bool roaming);
+int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
+void batadv_tt_global_add_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *tt_buff, int tt_buff_len);
+int batadv_tt_global_add(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *addr, uint8_t flags,
+ uint8_t ttvn);
+int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
+void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const char *message);
+struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
+ const uint8_t *src,
+ const uint8_t *addr);
+void batadv_tt_free(struct batadv_priv *bat_priv);
+bool batadv_send_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tt_query_packet *tt_request);
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr);
+void batadv_handle_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tt_query_packet *tt_response);
+bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
+ uint8_t *dst);
+void batadv_tt_update_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *tt_buff, uint8_t tt_num_changes,
+ uint8_t ttvn, uint16_t tt_crc);
+int batadv_tt_append_diff(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff, int *packet_buff_len,
+ int packet_min_len);
+bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
+ uint8_t *addr);
#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 61308e8016f..12635fd2c3d 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,24 +15,20 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
-
-
#ifndef _NET_BATMAN_ADV_TYPES_H_
#define _NET_BATMAN_ADV_TYPES_H_
#include "packet.h"
#include "bitarray.h"
+#include <linux/kernel.h>
-#define BAT_HEADER_LEN (ETH_HLEN + \
- ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \
- sizeof(struct unicast_packet) : \
- sizeof(struct bcast_packet))))
-
+#define BATADV_HEADER_LEN \
+ (ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \
+ sizeof(struct batadv_bcast_packet)))
-struct hard_iface {
+struct batadv_hard_iface {
struct list_head list;
int16_t if_num;
char if_status;
@@ -50,7 +45,7 @@ struct hard_iface {
};
/**
- * orig_node - structure for orig_list maintaining nodes of mesh
+ * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
* @primary_addr: hosts primary interface address
* @last_seen: when last packet from this node was received
* @bcast_seqno_reset: time when the broadcast seqno window was reset
@@ -64,10 +59,10 @@ struct hard_iface {
* @candidates: how many candidates are available
* @selected: next bonding candidate
*/
-struct orig_node {
+struct batadv_orig_node {
uint8_t orig[ETH_ALEN];
uint8_t primary_addr[ETH_ALEN];
- struct neigh_node __rcu *router; /* rcu protected pointer */
+ struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
unsigned long *bcast_own;
uint8_t *bcast_own_sum;
unsigned long last_seen;
@@ -86,11 +81,12 @@ struct orig_node {
* If true, then I sent a Roaming_adv to this orig_node and I have to
* inspect every packet directed to it to check whether it is still
* the true destination or not. This flag will be reset to false as
- * soon as I receive a new TTVN from this orig_node */
+ * soon as I receive a new TTVN from this orig_node
+ */
bool tt_poss_change;
uint32_t last_real_seqno;
uint8_t last_ttl;
- DECLARE_BITMAP(bcast_bits, TQ_LOCAL_WINDOW_SIZE);
+ DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
uint32_t last_bcast_seqno;
struct hlist_head neigh_list;
struct list_head frag_list;
@@ -98,10 +94,11 @@ struct orig_node {
atomic_t refcount;
struct rcu_head rcu;
struct hlist_node hash_entry;
- struct bat_priv *bat_priv;
+ struct batadv_priv *bat_priv;
unsigned long last_frag_packet;
/* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
- * neigh_node->real_bits, neigh_node->real_packet_count */
+ * neigh_node->real_bits, neigh_node->real_packet_count
+ */
spinlock_t ogm_cnt_lock;
/* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */
spinlock_t bcast_seqno_lock;
@@ -110,47 +107,63 @@ struct orig_node {
struct list_head bond_list;
};
-struct gw_node {
+struct batadv_gw_node {
struct hlist_node list;
- struct orig_node *orig_node;
+ struct batadv_orig_node *orig_node;
unsigned long deleted;
atomic_t refcount;
struct rcu_head rcu;
};
-/**
- * neigh_node
+/* batadv_neigh_node
* @last_seen: when last packet via this neighbor was received
*/
-struct neigh_node {
+struct batadv_neigh_node {
struct hlist_node list;
uint8_t addr[ETH_ALEN];
uint8_t real_packet_count;
- uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE];
+ uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
uint8_t tq_index;
uint8_t tq_avg;
uint8_t last_ttl;
struct list_head bonding_list;
unsigned long last_seen;
- DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE);
+ DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
atomic_t refcount;
struct rcu_head rcu;
- struct orig_node *orig_node;
- struct hard_iface *if_incoming;
+ struct batadv_orig_node *orig_node;
+ struct batadv_hard_iface *if_incoming;
spinlock_t lq_update_lock; /* protects: tq_recv, tq_index */
};
#ifdef CONFIG_BATMAN_ADV_BLA
-struct bcast_duplist_entry {
+struct batadv_bcast_duplist_entry {
uint8_t orig[ETH_ALEN];
uint16_t crc;
unsigned long entrytime;
};
#endif
-struct bat_priv {
+enum batadv_counters {
+ BATADV_CNT_FORWARD,
+ BATADV_CNT_FORWARD_BYTES,
+ BATADV_CNT_MGMT_TX,
+ BATADV_CNT_MGMT_TX_BYTES,
+ BATADV_CNT_MGMT_RX,
+ BATADV_CNT_MGMT_RX_BYTES,
+ BATADV_CNT_TT_REQUEST_TX,
+ BATADV_CNT_TT_REQUEST_RX,
+ BATADV_CNT_TT_RESPONSE_TX,
+ BATADV_CNT_TT_RESPONSE_RX,
+ BATADV_CNT_TT_ROAM_ADV_TX,
+ BATADV_CNT_TT_ROAM_ADV_RX,
+ BATADV_CNT_NUM,
+};
+
+struct batadv_priv {
atomic_t mesh_state;
struct net_device_stats stats;
+ uint64_t __percpu *bat_counters; /* Per cpu counters */
atomic_t aggregated_ogms; /* boolean */
atomic_t bonding; /* boolean */
atomic_t fragmentation; /* boolean */
@@ -174,10 +187,11 @@ struct bat_priv {
* If true, then I received a Roaming_adv and I have to inspect every
* packet directed to me to check whether I am still the true
* destination or not. This flag will be reset to false as soon as I
- * increase my TTVN */
+ * increase my TTVN
+ */
bool tt_poss_change;
char num_ifaces;
- struct debug_log *debug_log;
+ struct batadv_debug_log *debug_log;
struct kobject *mesh_obj;
struct dentry *debug_dir;
struct hlist_head forw_bat_list;
@@ -185,20 +199,20 @@ struct bat_priv {
struct hlist_head gw_list;
struct list_head tt_changes_list; /* tracks changes in a OGM int */
struct list_head vis_send_list;
- struct hashtable_t *orig_hash;
- struct hashtable_t *tt_local_hash;
- struct hashtable_t *tt_global_hash;
+ struct batadv_hashtable *orig_hash;
+ struct batadv_hashtable *tt_local_hash;
+ struct batadv_hashtable *tt_global_hash;
#ifdef CONFIG_BATMAN_ADV_BLA
- struct hashtable_t *claim_hash;
- struct hashtable_t *backbone_hash;
+ struct batadv_hashtable *claim_hash;
+ struct batadv_hashtable *backbone_hash;
#endif
struct list_head tt_req_list; /* list of pending tt_requests */
struct list_head tt_roam_list;
- struct hashtable_t *vis_hash;
+ struct batadv_hashtable *vis_hash;
#ifdef CONFIG_BATMAN_ADV_BLA
- struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE];
+ struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
int bcast_duplist_curr;
- struct bla_claim_dst claim_dest;
+ struct batadv_bla_claim_dst claim_dest;
#endif
spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
spinlock_t forw_bcast_list_lock; /* protects */
@@ -210,7 +224,7 @@ struct bat_priv {
spinlock_t vis_list_lock; /* protects vis_info::recv_list */
atomic_t num_local_tt;
/* Checksum of the local table, recomputed before sending a new OGM */
- atomic_t tt_crc;
+ uint16_t tt_crc;
unsigned char *tt_buff;
int16_t tt_buff_len;
spinlock_t tt_buff_lock; /* protects tt_buff */
@@ -218,29 +232,29 @@ struct bat_priv {
struct delayed_work orig_work;
struct delayed_work vis_work;
struct delayed_work bla_work;
- struct gw_node __rcu *curr_gw; /* rcu protected pointer */
+ struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */
atomic_t gw_reselect;
- struct hard_iface __rcu *primary_if; /* rcu protected pointer */
- struct vis_info *my_vis_info;
- struct bat_algo_ops *bat_algo_ops;
+ struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
+ struct batadv_vis_info *my_vis_info;
+ struct batadv_algo_ops *bat_algo_ops;
};
-struct socket_client {
+struct batadv_socket_client {
struct list_head queue_list;
unsigned int queue_len;
unsigned char index;
spinlock_t lock; /* protects queue_list, queue_len, index */
wait_queue_head_t queue_wait;
- struct bat_priv *bat_priv;
+ struct batadv_priv *bat_priv;
};
-struct socket_packet {
+struct batadv_socket_packet {
struct list_head list;
size_t icmp_len;
- struct icmp_packet_rr icmp_packet;
+ struct batadv_icmp_packet_rr icmp_packet;
};
-struct tt_common_entry {
+struct batadv_tt_common_entry {
uint8_t addr[ETH_ALEN];
struct hlist_node hash_entry;
uint16_t flags;
@@ -248,31 +262,31 @@ struct tt_common_entry {
struct rcu_head rcu;
};
-struct tt_local_entry {
- struct tt_common_entry common;
+struct batadv_tt_local_entry {
+ struct batadv_tt_common_entry common;
unsigned long last_seen;
};
-struct tt_global_entry {
- struct tt_common_entry common;
+struct batadv_tt_global_entry {
+ struct batadv_tt_common_entry common;
struct hlist_head orig_list;
spinlock_t list_lock; /* protects the list */
unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
};
-struct tt_orig_list_entry {
- struct orig_node *orig_node;
+struct batadv_tt_orig_list_entry {
+ struct batadv_orig_node *orig_node;
uint8_t ttvn;
struct rcu_head rcu;
struct hlist_node list;
};
#ifdef CONFIG_BATMAN_ADV_BLA
-struct backbone_gw {
+struct batadv_backbone_gw {
uint8_t orig[ETH_ALEN];
short vid; /* used VLAN ID */
struct hlist_node hash_entry;
- struct bat_priv *bat_priv;
+ struct batadv_priv *bat_priv;
unsigned long lasttime; /* last time we heard of this backbone gw */
atomic_t request_sent;
atomic_t refcount;
@@ -280,10 +294,10 @@ struct backbone_gw {
uint16_t crc; /* crc checksum over all claims */
};
-struct claim {
+struct batadv_claim {
uint8_t addr[ETH_ALEN];
short vid;
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
unsigned long lasttime; /* last time we heard of claim (locals only) */
struct rcu_head rcu;
atomic_t refcount;
@@ -291,29 +305,28 @@ struct claim {
};
#endif
-struct tt_change_node {
+struct batadv_tt_change_node {
struct list_head list;
- struct tt_change change;
+ struct batadv_tt_change change;
};
-struct tt_req_node {
+struct batadv_tt_req_node {
uint8_t addr[ETH_ALEN];
unsigned long issued_at;
struct list_head list;
};
-struct tt_roam_node {
+struct batadv_tt_roam_node {
uint8_t addr[ETH_ALEN];
atomic_t counter;
unsigned long first_time;
struct list_head list;
};
-/**
- * forw_packet - structure for forw_list maintaining packets to be
+/* forw_packet - structure for forw_list maintaining packets to be
* send/forwarded
*/
-struct forw_packet {
+struct batadv_forw_packet {
struct hlist_node list;
unsigned long send_time;
uint8_t own;
@@ -322,76 +335,76 @@ struct forw_packet {
uint32_t direct_link_flags;
uint8_t num_packets;
struct delayed_work delayed_work;
- struct hard_iface *if_incoming;
+ struct batadv_hard_iface *if_incoming;
};
/* While scanning for vis-entries of a particular vis-originator
* this list collects its interfaces to create a subgraph/cluster
* out of them later
*/
-struct if_list_entry {
+struct batadv_if_list_entry {
uint8_t addr[ETH_ALEN];
bool primary;
struct hlist_node list;
};
-struct debug_log {
- char log_buff[LOG_BUF_LEN];
+struct batadv_debug_log {
+ char log_buff[BATADV_LOG_BUF_LEN];
unsigned long log_start;
unsigned long log_end;
spinlock_t lock; /* protects log_buff, log_start and log_end */
wait_queue_head_t queue_wait;
};
-struct frag_packet_list_entry {
+struct batadv_frag_packet_list_entry {
struct list_head list;
uint16_t seqno;
struct sk_buff *skb;
};
-struct vis_info {
+struct batadv_vis_info {
unsigned long first_seen;
/* list of server-neighbors we received a vis-packet
- * from. we should not reply to them. */
+ * from. we should not reply to them.
+ */
struct list_head recv_list;
struct list_head send_list;
struct kref refcount;
struct hlist_node hash_entry;
- struct bat_priv *bat_priv;
+ struct batadv_priv *bat_priv;
/* this packet might be part of the vis send queue. */
struct sk_buff *skb_packet;
- /* vis_info may follow here*/
+ /* vis_info may follow here */
} __packed;
-struct vis_info_entry {
+struct batadv_vis_info_entry {
uint8_t src[ETH_ALEN];
uint8_t dest[ETH_ALEN];
uint8_t quality; /* quality = 0 client */
} __packed;
-struct recvlist_node {
+struct batadv_recvlist_node {
struct list_head list;
uint8_t mac[ETH_ALEN];
};
-struct bat_algo_ops {
+struct batadv_algo_ops {
struct hlist_node list;
char *name;
/* init routing info when hard-interface is enabled */
- int (*bat_iface_enable)(struct hard_iface *hard_iface);
+ int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface);
/* de-init routing info when hard-interface is disabled */
- void (*bat_iface_disable)(struct hard_iface *hard_iface);
+ void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface);
/* (re-)init mac addresses of the protocol information
* belonging to this hard-interface
*/
- void (*bat_iface_update_mac)(struct hard_iface *hard_iface);
+ void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface);
/* called when primary interface is selected / changed */
- void (*bat_primary_iface_set)(struct hard_iface *hard_iface);
+ void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface);
/* prepare a new outgoing OGM for the send queue */
- void (*bat_ogm_schedule)(struct hard_iface *hard_iface,
- int tt_num_changes);
+ void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface);
/* send scheduled OGM */
- void (*bat_ogm_emit)(struct forw_packet *forw_packet);
+ void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
};
#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 74175c21085..00164645b3f 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
*
* Andreas Langer
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -31,19 +29,20 @@
#include "hard-interface.h"
-static struct sk_buff *frag_merge_packet(struct list_head *head,
- struct frag_packet_list_entry *tfp,
- struct sk_buff *skb)
+static struct sk_buff *
+batadv_frag_merge_packet(struct list_head *head,
+ struct batadv_frag_packet_list_entry *tfp,
+ struct sk_buff *skb)
{
- struct unicast_frag_packet *up =
- (struct unicast_frag_packet *)skb->data;
+ struct batadv_unicast_frag_packet *up;
struct sk_buff *tmp_skb;
- struct unicast_packet *unicast_packet;
+ struct batadv_unicast_packet *unicast_packet;
int hdr_len = sizeof(*unicast_packet);
int uni_diff = sizeof(*up) - hdr_len;
+ up = (struct batadv_unicast_frag_packet *)skb->data;
/* set skb to the first part and tmp_skb to the second part */
- if (up->flags & UNI_FRAG_HEAD) {
+ if (up->flags & BATADV_UNI_FRAG_HEAD) {
tmp_skb = tfp->skb;
} else {
tmp_skb = skb;
@@ -66,8 +65,9 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
kfree_skb(tmp_skb);
memmove(skb->data + uni_diff, skb->data, hdr_len);
- unicast_packet = (struct unicast_packet *)skb_pull(skb, uni_diff);
- unicast_packet->header.packet_type = BAT_UNICAST;
+ unicast_packet = (struct batadv_unicast_packet *)skb_pull(skb,
+ uni_diff);
+ unicast_packet->header.packet_type = BATADV_UNICAST;
return skb;
@@ -77,11 +77,13 @@ err:
return NULL;
}
-static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
+static void batadv_frag_create_entry(struct list_head *head,
+ struct sk_buff *skb)
{
- struct frag_packet_list_entry *tfp;
- struct unicast_frag_packet *up =
- (struct unicast_frag_packet *)skb->data;
+ struct batadv_frag_packet_list_entry *tfp;
+ struct batadv_unicast_frag_packet *up;
+
+ up = (struct batadv_unicast_frag_packet *)skb->data;
/* free and oldest packets stand at the end */
tfp = list_entry((head)->prev, typeof(*tfp), list);
@@ -93,15 +95,15 @@ static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
return;
}
-static int frag_create_buffer(struct list_head *head)
+static int batadv_frag_create_buffer(struct list_head *head)
{
int i;
- struct frag_packet_list_entry *tfp;
+ struct batadv_frag_packet_list_entry *tfp;
- for (i = 0; i < FRAG_BUFFER_SIZE; i++) {
+ for (i = 0; i < BATADV_FRAG_BUFFER_SIZE; i++) {
tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC);
if (!tfp) {
- frag_list_free(head);
+ batadv_frag_list_free(head);
return -ENOMEM;
}
tfp->skb = NULL;
@@ -113,14 +115,15 @@ static int frag_create_buffer(struct list_head *head)
return 0;
}
-static struct frag_packet_list_entry *frag_search_packet(struct list_head *head,
- const struct unicast_frag_packet *up)
+static struct batadv_frag_packet_list_entry *
+batadv_frag_search_packet(struct list_head *head,
+ const struct batadv_unicast_frag_packet *up)
{
- struct frag_packet_list_entry *tfp;
- struct unicast_frag_packet *tmp_up = NULL;
+ struct batadv_frag_packet_list_entry *tfp;
+ struct batadv_unicast_frag_packet *tmp_up = NULL;
uint16_t search_seqno;
- if (up->flags & UNI_FRAG_HEAD)
+ if (up->flags & BATADV_UNI_FRAG_HEAD)
search_seqno = ntohs(up->seqno)+1;
else
search_seqno = ntohs(up->seqno)-1;
@@ -133,12 +136,12 @@ static struct frag_packet_list_entry *frag_search_packet(struct list_head *head,
if (tfp->seqno == ntohs(up->seqno))
goto mov_tail;
- tmp_up = (struct unicast_frag_packet *)tfp->skb->data;
+ tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
if (tfp->seqno == search_seqno) {
- if ((tmp_up->flags & UNI_FRAG_HEAD) !=
- (up->flags & UNI_FRAG_HEAD))
+ if ((tmp_up->flags & BATADV_UNI_FRAG_HEAD) !=
+ (up->flags & BATADV_UNI_FRAG_HEAD))
return tfp;
else
goto mov_tail;
@@ -151,9 +154,9 @@ mov_tail:
return NULL;
}
-void frag_list_free(struct list_head *head)
+void batadv_frag_list_free(struct list_head *head)
{
- struct frag_packet_list_entry *pf, *tmp_pf;
+ struct batadv_frag_packet_list_entry *pf, *tmp_pf;
if (!list_empty(head)) {
@@ -172,64 +175,66 @@ void frag_list_free(struct list_head *head)
* or the skb could be reassembled (skb_new will point to the new packet and
* skb was freed)
*/
-int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
- struct sk_buff **new_skb)
+int batadv_frag_reassemble_skb(struct sk_buff *skb,
+ struct batadv_priv *bat_priv,
+ struct sk_buff **new_skb)
{
- struct orig_node *orig_node;
- struct frag_packet_list_entry *tmp_frag_entry;
+ struct batadv_orig_node *orig_node;
+ struct batadv_frag_packet_list_entry *tmp_frag_entry;
int ret = NET_RX_DROP;
- struct unicast_frag_packet *unicast_packet =
- (struct unicast_frag_packet *)skb->data;
+ struct batadv_unicast_frag_packet *unicast_packet;
+ unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
*new_skb = NULL;
- orig_node = orig_hash_find(bat_priv, unicast_packet->orig);
+ orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->orig);
if (!orig_node)
goto out;
orig_node->last_frag_packet = jiffies;
if (list_empty(&orig_node->frag_list) &&
- frag_create_buffer(&orig_node->frag_list)) {
+ batadv_frag_create_buffer(&orig_node->frag_list)) {
pr_debug("couldn't create frag buffer\n");
goto out;
}
- tmp_frag_entry = frag_search_packet(&orig_node->frag_list,
- unicast_packet);
+ tmp_frag_entry = batadv_frag_search_packet(&orig_node->frag_list,
+ unicast_packet);
if (!tmp_frag_entry) {
- frag_create_entry(&orig_node->frag_list, skb);
+ batadv_frag_create_entry(&orig_node->frag_list, skb);
ret = NET_RX_SUCCESS;
goto out;
}
- *new_skb = frag_merge_packet(&orig_node->frag_list, tmp_frag_entry,
- skb);
+ *new_skb = batadv_frag_merge_packet(&orig_node->frag_list,
+ tmp_frag_entry, skb);
/* if not, merge failed */
if (*new_skb)
ret = NET_RX_SUCCESS;
out:
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return ret;
}
-int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
- struct hard_iface *hard_iface, const uint8_t dstaddr[])
+int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *hard_iface,
+ const uint8_t dstaddr[])
{
- struct unicast_packet tmp_uc, *unicast_packet;
- struct hard_iface *primary_if;
+ struct batadv_unicast_packet tmp_uc, *unicast_packet;
+ struct batadv_hard_iface *primary_if;
struct sk_buff *frag_skb;
- struct unicast_frag_packet *frag1, *frag2;
+ struct batadv_unicast_frag_packet *frag1, *frag2;
int uc_hdr_len = sizeof(*unicast_packet);
int ucf_hdr_len = sizeof(*frag1);
int data_len = skb->len - uc_hdr_len;
int large_tail = 0, ret = NET_RX_DROP;
uint16_t seqno;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto dropped;
@@ -238,38 +243,38 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
goto dropped;
skb_reserve(frag_skb, ucf_hdr_len);
- unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
- if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
- my_skb_head_push(frag_skb, ucf_hdr_len) < 0)
+ if (batadv_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
+ batadv_skb_head_push(frag_skb, ucf_hdr_len) < 0)
goto drop_frag;
- frag1 = (struct unicast_frag_packet *)skb->data;
- frag2 = (struct unicast_frag_packet *)frag_skb->data;
+ frag1 = (struct batadv_unicast_frag_packet *)skb->data;
+ frag2 = (struct batadv_unicast_frag_packet *)frag_skb->data;
memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
frag1->header.ttl--;
- frag1->header.version = COMPAT_VERSION;
- frag1->header.packet_type = BAT_UNICAST_FRAG;
+ frag1->header.version = BATADV_COMPAT_VERSION;
+ frag1->header.packet_type = BATADV_UNICAST_FRAG;
memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(frag2, frag1, sizeof(*frag2));
if (data_len & 1)
- large_tail = UNI_FRAG_LARGETAIL;
+ large_tail = BATADV_UNI_FRAG_LARGETAIL;
- frag1->flags = UNI_FRAG_HEAD | large_tail;
+ frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
frag2->flags = large_tail;
seqno = atomic_add_return(2, &hard_iface->frag_seqno);
frag1->seqno = htons(seqno - 1);
frag2->seqno = htons(seqno);
- send_skb_packet(skb, hard_iface, dstaddr);
- send_skb_packet(frag_skb, hard_iface, dstaddr);
+ batadv_send_skb_packet(skb, hard_iface, dstaddr);
+ batadv_send_skb_packet(frag_skb, hard_iface, dstaddr);
ret = NET_RX_SUCCESS;
goto out;
@@ -279,52 +284,53 @@ dropped:
kfree_skb(skb);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return ret;
}
-int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
+int batadv_unicast_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv)
{
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
- struct unicast_packet *unicast_packet;
- struct orig_node *orig_node;
- struct neigh_node *neigh_node;
+ struct batadv_unicast_packet *unicast_packet;
+ struct batadv_orig_node *orig_node;
+ struct batadv_neigh_node *neigh_node;
int data_len = skb->len;
int ret = 1;
+ unsigned int dev_mtu;
/* get routing information */
if (is_multicast_ether_addr(ethhdr->h_dest)) {
- orig_node = gw_get_selected_orig(bat_priv);
+ orig_node = batadv_gw_get_selected_orig(bat_priv);
if (orig_node)
goto find_router;
}
/* check for tt host - increases orig_node refcount.
- * returns NULL in case of AP isolation */
- orig_node = transtable_search(bat_priv, ethhdr->h_source,
- ethhdr->h_dest);
+ * returns NULL in case of AP isolation
+ */
+ orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
+ ethhdr->h_dest);
find_router:
- /**
- * find_router():
+ /* find_router():
* - if orig_node is NULL it returns NULL
* - increases neigh_nodes refcount if found.
*/
- neigh_node = find_router(bat_priv, orig_node, NULL);
+ neigh_node = batadv_find_router(bat_priv, orig_node, NULL);
if (!neigh_node)
goto out;
- if (my_skb_head_push(skb, sizeof(*unicast_packet)) < 0)
+ if (batadv_skb_head_push(skb, sizeof(*unicast_packet)) < 0)
goto out;
- unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_packet->header.version = COMPAT_VERSION;
+ unicast_packet->header.version = BATADV_COMPAT_VERSION;
/* batman packet type: unicast */
- unicast_packet->header.packet_type = BAT_UNICAST;
+ unicast_packet->header.packet_type = BATADV_UNICAST;
/* set unicast ttl */
- unicast_packet->header.ttl = TTL;
+ unicast_packet->header.ttl = BATADV_TTL;
/* copy the destination for faster routing */
memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
/* set the destination tt version number */
@@ -336,28 +342,29 @@ find_router:
* try to reroute it because the ttvn contained in the header is less
* than the current one
*/
- if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
+ if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
unicast_packet->ttvn = unicast_packet->ttvn - 1;
+ dev_mtu = neigh_node->if_incoming->net_dev->mtu;
if (atomic_read(&bat_priv->fragmentation) &&
- data_len + sizeof(*unicast_packet) >
- neigh_node->if_incoming->net_dev->mtu) {
+ data_len + sizeof(*unicast_packet) > dev_mtu) {
/* send frag skb decreases ttl */
unicast_packet->header.ttl++;
- ret = frag_send_skb(skb, bat_priv,
- neigh_node->if_incoming, neigh_node->addr);
+ ret = batadv_frag_send_skb(skb, bat_priv,
+ neigh_node->if_incoming,
+ neigh_node->addr);
goto out;
}
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = 0;
goto out;
out:
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
if (ret == 1)
kfree_skb(skb);
return ret;
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index a9faf6b1db1..1c46e2eb1ef 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
*
* Andreas Langer
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_UNICAST_H_
@@ -24,33 +22,35 @@
#include "packet.h"
-#define FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
-#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
+#define BATADV_FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
+#define BATADV_FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
-int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
- struct sk_buff **new_skb);
-void frag_list_free(struct list_head *head);
-int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
-int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
- struct hard_iface *hard_iface, const uint8_t dstaddr[]);
+int batadv_frag_reassemble_skb(struct sk_buff *skb,
+ struct batadv_priv *bat_priv,
+ struct sk_buff **new_skb);
+void batadv_frag_list_free(struct list_head *head);
+int batadv_unicast_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv);
+int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *hard_iface,
+ const uint8_t dstaddr[]);
-static inline int frag_can_reassemble(const struct sk_buff *skb, int mtu)
+static inline int batadv_frag_can_reassemble(const struct sk_buff *skb, int mtu)
{
- const struct unicast_frag_packet *unicast_packet;
+ const struct batadv_unicast_frag_packet *unicast_packet;
int uneven_correction = 0;
unsigned int merged_size;
- unicast_packet = (struct unicast_frag_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
- if (unicast_packet->flags & UNI_FRAG_LARGETAIL) {
- if (unicast_packet->flags & UNI_FRAG_HEAD)
+ if (unicast_packet->flags & BATADV_UNI_FRAG_LARGETAIL) {
+ if (unicast_packet->flags & BATADV_UNI_FRAG_HEAD)
uneven_correction = 1;
else
uneven_correction = -1;
}
merged_size = (skb->len - sizeof(*unicast_packet)) * 2;
- merged_size += sizeof(struct unicast_packet) + uneven_correction;
+ merged_size += sizeof(struct batadv_unicast_packet) + uneven_correction;
return merged_size <= mtu;
}
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index cec216fb77c..2a2ea068146 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -28,16 +26,19 @@
#include "hash.h"
#include "originator.h"
-#define MAX_VIS_PACKET_SIZE 1000
+#define BATADV_MAX_VIS_PACKET_SIZE 1000
-static void start_vis_timer(struct bat_priv *bat_priv);
+static void batadv_start_vis_timer(struct batadv_priv *bat_priv);
/* free the info */
-static void free_info(struct kref *ref)
+static void batadv_free_info(struct kref *ref)
{
- struct vis_info *info = container_of(ref, struct vis_info, refcount);
- struct bat_priv *bat_priv = info->bat_priv;
- struct recvlist_node *entry, *tmp;
+ struct batadv_vis_info *info;
+ struct batadv_priv *bat_priv;
+ struct batadv_recvlist_node *entry, *tmp;
+
+ info = container_of(ref, struct batadv_vis_info, refcount);
+ bat_priv = info->bat_priv;
list_del_init(&info->send_list);
spin_lock_bh(&bat_priv->vis_list_lock);
@@ -52,29 +53,30 @@ static void free_info(struct kref *ref)
}
/* Compare two vis packets, used by the hashing algorithm */
-static int vis_info_cmp(const struct hlist_node *node, const void *data2)
+static int batadv_vis_info_cmp(const struct hlist_node *node, const void *data2)
{
- const struct vis_info *d1, *d2;
- const struct vis_packet *p1, *p2;
+ const struct batadv_vis_info *d1, *d2;
+ const struct batadv_vis_packet *p1, *p2;
- d1 = container_of(node, struct vis_info, hash_entry);
+ d1 = container_of(node, struct batadv_vis_info, hash_entry);
d2 = data2;
- p1 = (struct vis_packet *)d1->skb_packet->data;
- p2 = (struct vis_packet *)d2->skb_packet->data;
- return compare_eth(p1->vis_orig, p2->vis_orig);
+ p1 = (struct batadv_vis_packet *)d1->skb_packet->data;
+ p2 = (struct batadv_vis_packet *)d2->skb_packet->data;
+ return batadv_compare_eth(p1->vis_orig, p2->vis_orig);
}
-/* hash function to choose an entry in a hash table of given size */
-/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static uint32_t vis_info_choose(const void *data, uint32_t size)
+/* hash function to choose an entry in a hash table of given size
+ * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
+ */
+static uint32_t batadv_vis_info_choose(const void *data, uint32_t size)
{
- const struct vis_info *vis_info = data;
- const struct vis_packet *packet;
+ const struct batadv_vis_info *vis_info = data;
+ const struct batadv_vis_packet *packet;
const unsigned char *key;
uint32_t hash = 0;
size_t i;
- packet = (struct vis_packet *)vis_info->skb_packet->data;
+ packet = (struct batadv_vis_packet *)vis_info->skb_packet->data;
key = packet->vis_orig;
for (i = 0; i < ETH_ALEN; i++) {
hash += key[i];
@@ -89,24 +91,24 @@ static uint32_t vis_info_choose(const void *data, uint32_t size)
return hash % size;
}
-static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
- const void *data)
+static struct batadv_vis_info *
+batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
{
- struct hashtable_t *hash = bat_priv->vis_hash;
+ struct batadv_hashtable *hash = bat_priv->vis_hash;
struct hlist_head *head;
struct hlist_node *node;
- struct vis_info *vis_info, *vis_info_tmp = NULL;
+ struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
uint32_t index;
if (!hash)
return NULL;
- index = vis_info_choose(data, hash->size);
+ index = batadv_vis_info_choose(data, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
- if (!vis_info_cmp(node, data))
+ if (!batadv_vis_info_cmp(node, data))
continue;
vis_info_tmp = vis_info;
@@ -118,16 +120,17 @@ static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
}
/* insert interface to the list of interfaces of one originator, if it
- * does not already exist in the list */
-static void vis_data_insert_interface(const uint8_t *interface,
- struct hlist_head *if_list,
- bool primary)
+ * does not already exist in the list
+ */
+static void batadv_vis_data_insert_interface(const uint8_t *interface,
+ struct hlist_head *if_list,
+ bool primary)
{
- struct if_list_entry *entry;
+ struct batadv_if_list_entry *entry;
struct hlist_node *pos;
hlist_for_each_entry(entry, pos, if_list, list) {
- if (compare_eth(entry->addr, interface))
+ if (batadv_compare_eth(entry->addr, interface))
return;
}
@@ -140,195 +143,145 @@ static void vis_data_insert_interface(const uint8_t *interface,
hlist_add_head(&entry->list, if_list);
}
-static ssize_t vis_data_read_prim_sec(char *buff,
- const struct hlist_head *if_list)
+static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
+ const struct hlist_head *if_list)
{
- struct if_list_entry *entry;
+ struct batadv_if_list_entry *entry;
struct hlist_node *pos;
- size_t len = 0;
hlist_for_each_entry(entry, pos, if_list, list) {
if (entry->primary)
- len += sprintf(buff + len, "PRIMARY, ");
+ seq_printf(seq, "PRIMARY, ");
else
- len += sprintf(buff + len, "SEC %pM, ", entry->addr);
+ seq_printf(seq, "SEC %pM, ", entry->addr);
}
+}
- return len;
+/* read an entry */
+static ssize_t
+batadv_vis_data_read_entry(struct seq_file *seq,
+ const struct batadv_vis_info_entry *entry,
+ const uint8_t *src, bool primary)
+{
+ if (primary && entry->quality == 0)
+ return seq_printf(seq, "TT %pM, ", entry->dest);
+ else if (batadv_compare_eth(entry->src, src))
+ return seq_printf(seq, "TQ %pM %d, ", entry->dest,
+ entry->quality);
+
+ return 0;
}
-static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
+static void
+batadv_vis_data_insert_interfaces(struct hlist_head *list,
+ struct batadv_vis_packet *packet,
+ struct batadv_vis_info_entry *entries)
{
- struct if_list_entry *entry;
- struct hlist_node *pos;
- size_t count = 0;
+ int i;
- hlist_for_each_entry(entry, pos, if_list, list) {
- if (entry->primary)
- count += 9;
- else
- count += 23;
- }
+ for (i = 0; i < packet->entries; i++) {
+ if (entries[i].quality == 0)
+ continue;
- return count;
+ if (batadv_compare_eth(entries[i].src, packet->vis_orig))
+ continue;
+
+ batadv_vis_data_insert_interface(entries[i].src, list, false);
+ }
}
-/* read an entry */
-static ssize_t vis_data_read_entry(char *buff,
- const struct vis_info_entry *entry,
- const uint8_t *src, bool primary)
+static void batadv_vis_data_read_entries(struct seq_file *seq,
+ struct hlist_head *list,
+ struct batadv_vis_packet *packet,
+ struct batadv_vis_info_entry *entries)
{
- /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
- if (primary && entry->quality == 0)
- return sprintf(buff, "TT %pM, ", entry->dest);
- else if (compare_eth(entry->src, src))
- return sprintf(buff, "TQ %pM %d, ", entry->dest,
- entry->quality);
+ int i;
+ struct batadv_if_list_entry *entry;
+ struct hlist_node *pos;
- return 0;
+ hlist_for_each_entry(entry, pos, list, list) {
+ seq_printf(seq, "%pM,", entry->addr);
+
+ for (i = 0; i < packet->entries; i++)
+ batadv_vis_data_read_entry(seq, &entries[i],
+ entry->addr, entry->primary);
+
+ /* add primary/secondary records */
+ if (batadv_compare_eth(entry->addr, packet->vis_orig))
+ batadv_vis_data_read_prim_sec(seq, list);
+
+ seq_printf(seq, "\n");
+ }
}
-int vis_seq_print_text(struct seq_file *seq, void *offset)
+static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
+ const struct hlist_head *head)
{
- struct hard_iface *primary_if;
struct hlist_node *node;
+ struct batadv_vis_info *info;
+ struct batadv_vis_packet *packet;
+ uint8_t *entries_pos;
+ struct batadv_vis_info_entry *entries;
+ struct batadv_if_list_entry *entry;
+ struct hlist_node *pos, *n;
+
+ HLIST_HEAD(vis_if_list);
+
+ hlist_for_each_entry_rcu(info, node, head, hash_entry) {
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
+ entries_pos = (uint8_t *)packet + sizeof(*packet);
+ entries = (struct batadv_vis_info_entry *)entries_pos;
+
+ batadv_vis_data_insert_interface(packet->vis_orig, &vis_if_list,
+ true);
+ batadv_vis_data_insert_interfaces(&vis_if_list, packet,
+ entries);
+ batadv_vis_data_read_entries(seq, &vis_if_list, packet,
+ entries);
+
+ hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
+ hlist_del(&entry->list);
+ kfree(entry);
+ }
+ }
+}
+
+int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
+{
+ struct batadv_hard_iface *primary_if;
struct hlist_head *head;
- struct vis_info *info;
- struct vis_packet *packet;
- struct vis_info_entry *entries;
struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct hashtable_t *hash = bat_priv->vis_hash;
- HLIST_HEAD(vis_if_list);
- struct if_list_entry *entry;
- struct hlist_node *pos, *n;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hashtable *hash = bat_priv->vis_hash;
uint32_t i;
- int j, ret = 0;
+ int ret = 0;
int vis_server = atomic_read(&bat_priv->vis_mode);
- size_t buff_pos, buf_size;
- char *buff;
- int compare;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
- if (vis_server == VIS_TYPE_CLIENT_UPDATE)
+ if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
goto out;
- buf_size = 1;
- /* Estimate length */
spin_lock_bh(&bat_priv->vis_hash_lock);
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(info, node, head, hash_entry) {
- packet = (struct vis_packet *)info->skb_packet->data;
- entries = (struct vis_info_entry *)
- ((char *)packet + sizeof(*packet));
-
- for (j = 0; j < packet->entries; j++) {
- if (entries[j].quality == 0)
- continue;
- compare =
- compare_eth(entries[j].src, packet->vis_orig);
- vis_data_insert_interface(entries[j].src,
- &vis_if_list,
- compare);
- }
-
- hlist_for_each_entry(entry, pos, &vis_if_list, list) {
- buf_size += 18 + 26 * packet->entries;
-
- /* add primary/secondary records */
- if (compare_eth(entry->addr, packet->vis_orig))
- buf_size +=
- vis_data_count_prim_sec(&vis_if_list);
-
- buf_size += 1;
- }
-
- hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
- list) {
- hlist_del(&entry->list);
- kfree(entry);
- }
- }
- rcu_read_unlock();
- }
-
- buff = kmalloc(buf_size, GFP_ATOMIC);
- if (!buff) {
- spin_unlock_bh(&bat_priv->vis_hash_lock);
- ret = -ENOMEM;
- goto out;
- }
- buff[0] = '\0';
- buff_pos = 0;
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(info, node, head, hash_entry) {
- packet = (struct vis_packet *)info->skb_packet->data;
- entries = (struct vis_info_entry *)
- ((char *)packet + sizeof(*packet));
-
- for (j = 0; j < packet->entries; j++) {
- if (entries[j].quality == 0)
- continue;
- compare =
- compare_eth(entries[j].src, packet->vis_orig);
- vis_data_insert_interface(entries[j].src,
- &vis_if_list,
- compare);
- }
-
- hlist_for_each_entry(entry, pos, &vis_if_list, list) {
- buff_pos += sprintf(buff + buff_pos, "%pM,",
- entry->addr);
-
- for (j = 0; j < packet->entries; j++)
- buff_pos += vis_data_read_entry(
- buff + buff_pos,
- &entries[j],
- entry->addr,
- entry->primary);
-
- /* add primary/secondary records */
- if (compare_eth(entry->addr, packet->vis_orig))
- buff_pos +=
- vis_data_read_prim_sec(buff + buff_pos,
- &vis_if_list);
-
- buff_pos += sprintf(buff + buff_pos, "\n");
- }
-
- hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
- list) {
- hlist_del(&entry->list);
- kfree(entry);
- }
- }
- rcu_read_unlock();
+ batadv_vis_seq_print_text_bucket(seq, head);
}
-
spin_unlock_bh(&bat_priv->vis_hash_lock);
- seq_printf(seq, "%s", buff);
- kfree(buff);
-
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return ret;
}
/* add the info packet to the send list, if it was not
- * already linked in. */
-static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
+ * already linked in.
+ */
+static void batadv_send_list_add(struct batadv_priv *bat_priv,
+ struct batadv_vis_info *info)
{
if (list_empty(&info->send_list)) {
kref_get(&info->refcount);
@@ -337,20 +290,21 @@ static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
}
/* delete the info packet from the send list, if it was
- * linked in. */
-static void send_list_del(struct vis_info *info)
+ * linked in.
+ */
+static void batadv_send_list_del(struct batadv_vis_info *info)
{
if (!list_empty(&info->send_list)) {
list_del_init(&info->send_list);
- kref_put(&info->refcount, free_info);
+ kref_put(&info->refcount, batadv_free_info);
}
}
/* tries to add one entry to the receive list. */
-static void recv_list_add(struct bat_priv *bat_priv,
- struct list_head *recv_list, const char *mac)
+static void batadv_recv_list_add(struct batadv_priv *bat_priv,
+ struct list_head *recv_list, const char *mac)
{
- struct recvlist_node *entry;
+ struct batadv_recvlist_node *entry;
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
@@ -363,14 +317,15 @@ static void recv_list_add(struct bat_priv *bat_priv,
}
/* returns 1 if this mac is in the recv_list */
-static int recv_list_is_in(struct bat_priv *bat_priv,
- const struct list_head *recv_list, const char *mac)
+static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
+ const struct list_head *recv_list,
+ const char *mac)
{
- const struct recvlist_node *entry;
+ const struct batadv_recvlist_node *entry;
spin_lock_bh(&bat_priv->vis_list_lock);
list_for_each_entry(entry, recv_list, list) {
- if (compare_eth(entry->mac, mac)) {
+ if (batadv_compare_eth(entry->mac, mac)) {
spin_unlock_bh(&bat_priv->vis_list_lock);
return 1;
}
@@ -381,17 +336,21 @@ static int recv_list_is_in(struct bat_priv *bat_priv,
/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
* broken.. ). vis hash must be locked outside. is_new is set when the packet
- * is newer than old entries in the hash. */
-static struct vis_info *add_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len, int *is_new,
- int make_broadcast)
+ * is newer than old entries in the hash.
+ */
+static struct batadv_vis_info *
+batadv_add_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_packet *vis_packet, int vis_info_len,
+ int *is_new, int make_broadcast)
{
- struct vis_info *info, *old_info;
- struct vis_packet *search_packet, *old_packet;
- struct vis_info search_elem;
- struct vis_packet *packet;
+ struct batadv_vis_info *info, *old_info;
+ struct batadv_vis_packet *search_packet, *old_packet;
+ struct batadv_vis_info search_elem;
+ struct batadv_vis_packet *packet;
+ struct sk_buff *tmp_skb;
int hash_added;
+ size_t len;
+ size_t max_entries;
*is_new = 0;
/* sanity check */
@@ -402,20 +361,23 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet));
if (!search_elem.skb_packet)
return NULL;
- search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
- sizeof(*search_packet));
+ len = sizeof(*search_packet);
+ tmp_skb = search_elem.skb_packet;
+ search_packet = (struct batadv_vis_packet *)skb_put(tmp_skb, len);
memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
- old_info = vis_hash_find(bat_priv, &search_elem);
+ old_info = batadv_vis_hash_find(bat_priv, &search_elem);
kfree_skb(search_elem.skb_packet);
if (old_info) {
- old_packet = (struct vis_packet *)old_info->skb_packet->data;
- if (!seq_after(ntohl(vis_packet->seqno),
- ntohl(old_packet->seqno))) {
+ tmp_skb = old_info->skb_packet;
+ old_packet = (struct batadv_vis_packet *)tmp_skb->data;
+ if (!batadv_seq_after(ntohl(vis_packet->seqno),
+ ntohl(old_packet->seqno))) {
if (old_packet->seqno == vis_packet->seqno) {
- recv_list_add(bat_priv, &old_info->recv_list,
- vis_packet->sender_orig);
+ batadv_recv_list_add(bat_priv,
+ &old_info->recv_list,
+ vis_packet->sender_orig);
return old_info;
} else {
/* newer packet is already in hash. */
@@ -423,52 +385,53 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
}
}
/* remove old entry */
- hash_remove(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
- old_info);
- send_list_del(old_info);
- kref_put(&old_info->refcount, free_info);
+ batadv_hash_remove(bat_priv->vis_hash, batadv_vis_info_cmp,
+ batadv_vis_info_choose, old_info);
+ batadv_send_list_del(old_info);
+ kref_put(&old_info->refcount, batadv_free_info);
}
info = kmalloc(sizeof(*info), GFP_ATOMIC);
if (!info)
return NULL;
- info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len +
- ETH_HLEN);
+ len = sizeof(*packet) + vis_info_len;
+ info->skb_packet = dev_alloc_skb(len + ETH_HLEN);
if (!info->skb_packet) {
kfree(info);
return NULL;
}
skb_reserve(info->skb_packet, ETH_HLEN);
- packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet)
- + vis_info_len);
+ packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len);
kref_init(&info->refcount);
INIT_LIST_HEAD(&info->send_list);
INIT_LIST_HEAD(&info->recv_list);
info->first_seen = jiffies;
info->bat_priv = bat_priv;
- memcpy(packet, vis_packet, sizeof(*packet) + vis_info_len);
+ memcpy(packet, vis_packet, len);
/* initialize and add new packet. */
*is_new = 1;
/* Make it a broadcast packet, if required */
if (make_broadcast)
- memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
+ memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
/* repair if entries is longer than packet. */
- if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len)
- packet->entries = vis_info_len / sizeof(struct vis_info_entry);
+ max_entries = vis_info_len / sizeof(struct batadv_vis_info_entry);
+ if (packet->entries > max_entries)
+ packet->entries = max_entries;
- recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
+ batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
/* try to add it */
- hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
- info, &info->hash_entry);
+ hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
+ batadv_vis_info_choose, info,
+ &info->hash_entry);
if (hash_added != 0) {
/* did not work (for some reason) */
- kref_put(&info->refcount, free_info);
+ kref_put(&info->refcount, batadv_free_info);
info = NULL;
}
@@ -476,37 +439,38 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
}
/* handle the server sync packet, forward if needed. */
-void receive_server_sync_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len)
+void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_packet *vis_packet,
+ int vis_info_len)
{
- struct vis_info *info;
+ struct batadv_vis_info *info;
int is_new, make_broadcast;
int vis_server = atomic_read(&bat_priv->vis_mode);
- make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
+ make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
spin_lock_bh(&bat_priv->vis_hash_lock);
- info = add_packet(bat_priv, vis_packet, vis_info_len,
- &is_new, make_broadcast);
+ info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
+ &is_new, make_broadcast);
if (!info)
goto end;
/* only if we are server ourselves and packet is newer than the one in
- * hash.*/
- if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
- send_list_add(bat_priv, info);
+ * hash.
+ */
+ if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
+ batadv_send_list_add(bat_priv, info);
end:
spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* handle an incoming client update packet and schedule forward if needed. */
-void receive_client_update_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len)
+void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_packet *vis_packet,
+ int vis_info_len)
{
- struct vis_info *info;
- struct vis_packet *packet;
+ struct batadv_vis_info *info;
+ struct batadv_vis_packet *packet;
int is_new;
int vis_server = atomic_read(&bat_priv->vis_mode);
int are_target = 0;
@@ -516,28 +480,28 @@ void receive_client_update_packet(struct bat_priv *bat_priv,
return;
/* Are we the target for this VIS packet? */
- if (vis_server == VIS_TYPE_SERVER_SYNC &&
- is_my_mac(vis_packet->target_orig))
+ if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC &&
+ batadv_is_my_mac(vis_packet->target_orig))
are_target = 1;
spin_lock_bh(&bat_priv->vis_hash_lock);
- info = add_packet(bat_priv, vis_packet, vis_info_len,
- &is_new, are_target);
+ info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
+ &is_new, are_target);
if (!info)
goto end;
/* note that outdated packets will be dropped at this point. */
- packet = (struct vis_packet *)info->skb_packet->data;
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
/* send only if we're the target server or ... */
if (are_target && is_new) {
- packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
- send_list_add(bat_priv, info);
+ packet->vis_type = BATADV_VIS_TYPE_SERVER_SYNC; /* upgrade! */
+ batadv_send_list_add(bat_priv, info);
/* ... we're not the recipient (and thus need to forward). */
- } else if (!is_my_mac(packet->target_orig)) {
- send_list_add(bat_priv, info);
+ } else if (!batadv_is_my_mac(packet->target_orig)) {
+ batadv_send_list_add(bat_priv, info);
}
end:
@@ -547,37 +511,38 @@ end:
/* Walk the originators and find the VIS server with the best tq. Set the packet
* address to its address and return the best_tq.
*
- * Must be called with the originator hash locked */
-static int find_best_vis_server(struct bat_priv *bat_priv,
- struct vis_info *info)
+ * Must be called with the originator hash locked
+ */
+static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
+ struct batadv_vis_info *info)
{
- struct hashtable_t *hash = bat_priv->orig_hash;
- struct neigh_node *router;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ struct batadv_neigh_node *router;
struct hlist_node *node;
struct hlist_head *head;
- struct orig_node *orig_node;
- struct vis_packet *packet;
+ struct batadv_orig_node *orig_node;
+ struct batadv_vis_packet *packet;
int best_tq = -1;
uint32_t i;
- packet = (struct vis_packet *)info->skb_packet->data;
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
continue;
- if ((orig_node->flags & VIS_SERVER) &&
+ if ((orig_node->flags & BATADV_VIS_SERVER) &&
(router->tq_avg > best_tq)) {
best_tq = router->tq_avg;
memcpy(packet->target_orig, orig_node->orig,
ETH_ALEN);
}
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
}
rcu_read_unlock();
}
@@ -586,47 +551,52 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
}
/* Return true if the vis packet is full. */
-static bool vis_packet_full(const struct vis_info *info)
+static bool batadv_vis_packet_full(const struct batadv_vis_info *info)
{
- const struct vis_packet *packet;
- packet = (struct vis_packet *)info->skb_packet->data;
+ const struct batadv_vis_packet *packet;
+ size_t num;
+
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
+ num = BATADV_MAX_VIS_PACKET_SIZE / sizeof(struct batadv_vis_info_entry);
- if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
- < packet->entries + 1)
+ if (num < packet->entries + 1)
return true;
return false;
}
/* generates a packet of own vis data,
- * returns 0 on success, -1 if no packet could be generated */
-static int generate_vis_packet(struct bat_priv *bat_priv)
+ * returns 0 on success, -1 if no packet could be generated
+ */
+static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
{
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
- struct orig_node *orig_node;
- struct neigh_node *router;
- struct vis_info *info = bat_priv->my_vis_info;
- struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
- struct vis_info_entry *entry;
- struct tt_common_entry *tt_common_entry;
+ struct batadv_orig_node *orig_node;
+ struct batadv_neigh_node *router;
+ struct batadv_vis_info *info = bat_priv->my_vis_info;
+ struct batadv_vis_packet *packet;
+ struct batadv_vis_info_entry *entry;
+ struct batadv_tt_common_entry *tt_common_entry;
int best_tq = -1;
uint32_t i;
info->first_seen = jiffies;
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
packet->vis_type = atomic_read(&bat_priv->vis_mode);
- memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
- packet->header.ttl = TTL;
+ memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
+ packet->header.ttl = BATADV_TTL;
packet->seqno = htonl(ntohl(packet->seqno) + 1);
packet->entries = 0;
+ packet->reserved = 0;
skb_trim(info->skb_packet, sizeof(*packet));
- if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
- best_tq = find_best_vis_server(bat_priv, info);
+ if (packet->vis_type == BATADV_VIS_TYPE_CLIENT_UPDATE) {
+ best_tq = batadv_find_best_vis_server(bat_priv, info);
if (best_tq < 0)
- return -1;
+ return best_tq;
}
for (i = 0; i < hash->size; i++) {
@@ -634,21 +604,21 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
continue;
- if (!compare_eth(router->addr, orig_node->orig))
+ if (!batadv_compare_eth(router->addr, orig_node->orig))
goto next;
- if (router->if_incoming->if_status != IF_ACTIVE)
+ if (router->if_incoming->if_status != BATADV_IF_ACTIVE)
goto next;
if (router->tq_avg < 1)
goto next;
/* fill one entry into buffer. */
- entry = (struct vis_info_entry *)
+ entry = (struct batadv_vis_info_entry *)
skb_put(info->skb_packet, sizeof(*entry));
memcpy(entry->src,
router->if_incoming->net_dev->dev_addr,
@@ -658,9 +628,9 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
packet->entries++;
next:
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
- if (vis_packet_full(info))
+ if (batadv_vis_packet_full(info))
goto unlock;
}
rcu_read_unlock();
@@ -674,7 +644,7 @@ next:
rcu_read_lock();
hlist_for_each_entry_rcu(tt_common_entry, node, head,
hash_entry) {
- entry = (struct vis_info_entry *)
+ entry = (struct batadv_vis_info_entry *)
skb_put(info->skb_packet,
sizeof(*entry));
memset(entry->src, 0, ETH_ALEN);
@@ -682,7 +652,7 @@ next:
entry->quality = 0; /* 0 means TT */
packet->entries++;
- if (vis_packet_full(info))
+ if (batadv_vis_packet_full(info))
goto unlock;
}
rcu_read_unlock();
@@ -696,14 +666,15 @@ unlock:
}
/* free old vis packets. Must be called with this vis_hash_lock
- * held */
-static void purge_vis_packets(struct bat_priv *bat_priv)
+ * held
+ */
+static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
{
uint32_t i;
- struct hashtable_t *hash = bat_priv->vis_hash;
+ struct batadv_hashtable *hash = bat_priv->vis_hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
- struct vis_info *info;
+ struct batadv_vis_info *info;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -714,31 +685,32 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
if (info == bat_priv->my_vis_info)
continue;
- if (has_timed_out(info->first_seen, VIS_TIMEOUT)) {
+ if (batadv_has_timed_out(info->first_seen,
+ BATADV_VIS_TIMEOUT)) {
hlist_del(node);
- send_list_del(info);
- kref_put(&info->refcount, free_info);
+ batadv_send_list_del(info);
+ kref_put(&info->refcount, batadv_free_info);
}
}
}
}
-static void broadcast_vis_packet(struct bat_priv *bat_priv,
- struct vis_info *info)
+static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_info *info)
{
- struct neigh_node *router;
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_neigh_node *router;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
- struct orig_node *orig_node;
- struct vis_packet *packet;
+ struct batadv_orig_node *orig_node;
+ struct batadv_vis_packet *packet;
struct sk_buff *skb;
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
uint8_t dstaddr[ETH_ALEN];
uint32_t i;
- packet = (struct vis_packet *)info->skb_packet->data;
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
/* send to all routers in range. */
for (i = 0; i < hash->size; i++) {
@@ -747,18 +719,19 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
/* if it's a vis server and reachable, send it. */
- if (!(orig_node->flags & VIS_SERVER))
+ if (!(orig_node->flags & BATADV_VIS_SERVER))
continue;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
continue;
/* don't send it if we already received the packet from
- * this node. */
- if (recv_list_is_in(bat_priv, &info->recv_list,
- orig_node->orig)) {
- neigh_node_free_ref(router);
+ * this node.
+ */
+ if (batadv_recv_list_is_in(bat_priv, &info->recv_list,
+ orig_node->orig)) {
+ batadv_neigh_node_free_ref(router);
continue;
}
@@ -766,57 +739,59 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
hard_iface = router->if_incoming;
memcpy(dstaddr, router->addr, ETH_ALEN);
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
- send_skb_packet(skb, hard_iface, dstaddr);
+ batadv_send_skb_packet(skb, hard_iface,
+ dstaddr);
}
rcu_read_unlock();
}
}
-static void unicast_vis_packet(struct bat_priv *bat_priv,
- struct vis_info *info)
+static void batadv_unicast_vis_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_info *info)
{
- struct orig_node *orig_node;
- struct neigh_node *router = NULL;
+ struct batadv_orig_node *orig_node;
+ struct batadv_neigh_node *router = NULL;
struct sk_buff *skb;
- struct vis_packet *packet;
+ struct batadv_vis_packet *packet;
- packet = (struct vis_packet *)info->skb_packet->data;
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
- orig_node = orig_hash_find(bat_priv, packet->target_orig);
+ orig_node = batadv_orig_hash_find(bat_priv, packet->target_orig);
if (!orig_node)
goto out;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
goto out;
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
- send_skb_packet(skb, router->if_incoming, router->addr);
+ batadv_send_skb_packet(skb, router->if_incoming, router->addr);
out:
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
}
-/* only send one vis packet. called from send_vis_packets() */
-static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
+/* only send one vis packet. called from batadv_send_vis_packets() */
+static void batadv_send_vis_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_info *info)
{
- struct hard_iface *primary_if;
- struct vis_packet *packet;
+ struct batadv_hard_iface *primary_if;
+ struct batadv_vis_packet *packet;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
- packet = (struct vis_packet *)info->skb_packet->data;
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
if (packet->header.ttl < 2) {
pr_debug("Error - can't send vis packet: ttl exceeded\n");
goto out;
@@ -826,31 +801,31 @@ static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
packet->header.ttl--;
if (is_broadcast_ether_addr(packet->target_orig))
- broadcast_vis_packet(bat_priv, info);
+ batadv_broadcast_vis_packet(bat_priv, info);
else
- unicast_vis_packet(bat_priv, info);
+ batadv_unicast_vis_packet(bat_priv, info);
packet->header.ttl++; /* restore TTL */
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
/* called from timer; send (and maybe generate) vis packet. */
-static void send_vis_packets(struct work_struct *work)
+static void batadv_send_vis_packets(struct work_struct *work)
{
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
- struct bat_priv *bat_priv =
- container_of(delayed_work, struct bat_priv, vis_work);
- struct vis_info *info;
+ struct batadv_priv *bat_priv;
+ struct batadv_vis_info *info;
+ bat_priv = container_of(delayed_work, struct batadv_priv, vis_work);
spin_lock_bh(&bat_priv->vis_hash_lock);
- purge_vis_packets(bat_priv);
+ batadv_purge_vis_packets(bat_priv);
- if (generate_vis_packet(bat_priv) == 0) {
+ if (batadv_generate_vis_packet(bat_priv) == 0) {
/* schedule if generation was successful */
- send_list_add(bat_priv, bat_priv->my_vis_info);
+ batadv_send_list_add(bat_priv, bat_priv->my_vis_info);
}
while (!list_empty(&bat_priv->vis_send_list)) {
@@ -860,98 +835,103 @@ static void send_vis_packets(struct work_struct *work)
kref_get(&info->refcount);
spin_unlock_bh(&bat_priv->vis_hash_lock);
- send_vis_packet(bat_priv, info);
+ batadv_send_vis_packet(bat_priv, info);
spin_lock_bh(&bat_priv->vis_hash_lock);
- send_list_del(info);
- kref_put(&info->refcount, free_info);
+ batadv_send_list_del(info);
+ kref_put(&info->refcount, batadv_free_info);
}
spin_unlock_bh(&bat_priv->vis_hash_lock);
- start_vis_timer(bat_priv);
+ batadv_start_vis_timer(bat_priv);
}
/* init the vis server. this may only be called when if_list is already
- * initialized (e.g. bat0 is initialized, interfaces have been added) */
-int vis_init(struct bat_priv *bat_priv)
+ * initialized (e.g. bat0 is initialized, interfaces have been added)
+ */
+int batadv_vis_init(struct batadv_priv *bat_priv)
{
- struct vis_packet *packet;
+ struct batadv_vis_packet *packet;
int hash_added;
+ unsigned int len;
+ unsigned long first_seen;
+ struct sk_buff *tmp_skb;
if (bat_priv->vis_hash)
- return 1;
+ return 0;
spin_lock_bh(&bat_priv->vis_hash_lock);
- bat_priv->vis_hash = hash_new(256);
+ bat_priv->vis_hash = batadv_hash_new(256);
if (!bat_priv->vis_hash) {
pr_err("Can't initialize vis_hash\n");
goto err;
}
- bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
+ bat_priv->my_vis_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
if (!bat_priv->my_vis_info)
goto err;
- bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) +
- MAX_VIS_PACKET_SIZE +
- ETH_HLEN);
+ len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
+ bat_priv->my_vis_info->skb_packet = dev_alloc_skb(len);
if (!bat_priv->my_vis_info->skb_packet)
goto free_info;
skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN);
- packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet,
- sizeof(*packet));
+ tmp_skb = bat_priv->my_vis_info->skb_packet;
+ packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
/* prefill the vis info */
- bat_priv->my_vis_info->first_seen = jiffies -
- msecs_to_jiffies(VIS_INTERVAL);
+ first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
+ bat_priv->my_vis_info->first_seen = first_seen;
INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
kref_init(&bat_priv->my_vis_info->refcount);
bat_priv->my_vis_info->bat_priv = bat_priv;
- packet->header.version = COMPAT_VERSION;
- packet->header.packet_type = BAT_VIS;
- packet->header.ttl = TTL;
+ packet->header.version = BATADV_COMPAT_VERSION;
+ packet->header.packet_type = BATADV_VIS;
+ packet->header.ttl = BATADV_TTL;
packet->seqno = 0;
+ packet->reserved = 0;
packet->entries = 0;
INIT_LIST_HEAD(&bat_priv->vis_send_list);
- hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
- bat_priv->my_vis_info,
- &bat_priv->my_vis_info->hash_entry);
+ hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
+ batadv_vis_info_choose,
+ bat_priv->my_vis_info,
+ &bat_priv->my_vis_info->hash_entry);
if (hash_added != 0) {
pr_err("Can't add own vis packet into hash\n");
/* not in hash, need to remove it manually. */
- kref_put(&bat_priv->my_vis_info->refcount, free_info);
+ kref_put(&bat_priv->my_vis_info->refcount, batadv_free_info);
goto err;
}
spin_unlock_bh(&bat_priv->vis_hash_lock);
- start_vis_timer(bat_priv);
- return 1;
+ batadv_start_vis_timer(bat_priv);
+ return 0;
free_info:
kfree(bat_priv->my_vis_info);
bat_priv->my_vis_info = NULL;
err:
spin_unlock_bh(&bat_priv->vis_hash_lock);
- vis_quit(bat_priv);
- return 0;
+ batadv_vis_quit(bat_priv);
+ return -ENOMEM;
}
/* Decrease the reference count on a hash item info */
-static void free_info_ref(struct hlist_node *node, void *arg)
+static void batadv_free_info_ref(struct hlist_node *node, void *arg)
{
- struct vis_info *info;
+ struct batadv_vis_info *info;
- info = container_of(node, struct vis_info, hash_entry);
- send_list_del(info);
- kref_put(&info->refcount, free_info);
+ info = container_of(node, struct batadv_vis_info, hash_entry);
+ batadv_send_list_del(info);
+ kref_put(&info->refcount, batadv_free_info);
}
/* shutdown vis-server */
-void vis_quit(struct bat_priv *bat_priv)
+void batadv_vis_quit(struct batadv_priv *bat_priv)
{
if (!bat_priv->vis_hash)
return;
@@ -960,16 +940,16 @@ void vis_quit(struct bat_priv *bat_priv)
spin_lock_bh(&bat_priv->vis_hash_lock);
/* properly remove, kill timers ... */
- hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
+ batadv_hash_delete(bat_priv->vis_hash, batadv_free_info_ref, NULL);
bat_priv->vis_hash = NULL;
bat_priv->my_vis_info = NULL;
spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* schedule packets for (re)transmission */
-static void start_vis_timer(struct bat_priv *bat_priv)
+static void batadv_start_vis_timer(struct batadv_priv *bat_priv)
{
- INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets);
- queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work,
- msecs_to_jiffies(VIS_INTERVAL));
+ INIT_DELAYED_WORK(&bat_priv->vis_work, batadv_send_vis_packets);
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->vis_work,
+ msecs_to_jiffies(BATADV_VIS_INTERVAL));
}
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index ee2e46e5347..84e716ed896 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -16,23 +15,22 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_VIS_H_
#define _NET_BATMAN_ADV_VIS_H_
-#define VIS_TIMEOUT 200000 /* timeout of vis packets
- * in miliseconds */
+/* timeout of vis packets in miliseconds */
+#define BATADV_VIS_TIMEOUT 200000
-int vis_seq_print_text(struct seq_file *seq, void *offset);
-void receive_server_sync_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len);
-void receive_client_update_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len);
-int vis_init(struct bat_priv *bat_priv);
-void vis_quit(struct bat_priv *bat_priv);
+int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
+void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_packet *vis_packet,
+ int vis_info_len);
+void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_packet *vis_packet,
+ int vis_info_len);
+int batadv_vis_init(struct batadv_priv *bat_priv);
+void batadv_vis_quit(struct batadv_priv *bat_priv);
#endif /* _NET_BATMAN_ADV_VIS_H_ */
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 2dc5a5700f5..fa6d94a4602 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_BT_CMTP) += cmtp/
obj-$(CONFIG_BT_HIDP) += hidp/
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
- hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o
+ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
+ a2mp.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
new file mode 100644
index 00000000000..4ff0bf3ba9a
--- /dev/null
+++ b/net/bluetooth/a2mp.c
@@ -0,0 +1,568 @@
+/*
+ Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
+ Copyright (c) 2011,2012 Intel Corp.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+#include <net/bluetooth/a2mp.h>
+
+/* A2MP build & send command helper functions */
+static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
+{
+ struct a2mp_cmd *cmd;
+ int plen;
+
+ plen = sizeof(*cmd) + len;
+ cmd = kzalloc(plen, GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->code = code;
+ cmd->ident = ident;
+ cmd->len = cpu_to_le16(len);
+
+ memcpy(cmd->data, data, len);
+
+ return cmd;
+}
+
+static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len,
+ void *data)
+{
+ struct l2cap_chan *chan = mgr->a2mp_chan;
+ struct a2mp_cmd *cmd;
+ u16 total_len = len + sizeof(*cmd);
+ struct kvec iv;
+ struct msghdr msg;
+
+ cmd = __a2mp_build(code, ident, len, data);
+ if (!cmd)
+ return;
+
+ iv.iov_base = cmd;
+ iv.iov_len = total_len;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.msg_iov = (struct iovec *) &iv;
+ msg.msg_iovlen = 1;
+
+ l2cap_chan_send(chan, &msg, total_len, 0);
+
+ kfree(cmd);
+}
+
+static inline void __a2mp_cl_bredr(struct a2mp_cl *cl)
+{
+ cl->id = 0;
+ cl->type = 0;
+ cl->status = 1;
+}
+
+/* hci_dev_list shall be locked */
+static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl)
+{
+ int i = 0;
+ struct hci_dev *hdev;
+
+ __a2mp_cl_bredr(cl);
+
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ /* Iterate through AMP controllers */
+ if (hdev->id == HCI_BREDR_ID)
+ continue;
+
+ /* Starting from second entry */
+ if (++i >= num_ctrl)
+ return;
+
+ cl[i].id = hdev->id;
+ cl[i].type = hdev->amp_type;
+ cl[i].status = hdev->amp_status;
+ }
+}
+
+/* Processing A2MP messages */
+static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_cmd_rej *rej = (void *) skb->data;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*rej))
+ return -EINVAL;
+
+ BT_DBG("ident %d reason %d", hdr->ident, le16_to_cpu(rej->reason));
+
+ skb_pull(skb, sizeof(*rej));
+
+ return 0;
+}
+
+static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_discov_req *req = (void *) skb->data;
+ u16 len = le16_to_cpu(hdr->len);
+ struct a2mp_discov_rsp *rsp;
+ u16 ext_feat;
+ u8 num_ctrl;
+
+ if (len < sizeof(*req))
+ return -EINVAL;
+
+ skb_pull(skb, sizeof(*req));
+
+ ext_feat = le16_to_cpu(req->ext_feat);
+
+ BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
+
+ /* check that packet is not broken for now */
+ while (ext_feat & A2MP_FEAT_EXT) {
+ if (len < sizeof(ext_feat))
+ return -EINVAL;
+
+ ext_feat = get_unaligned_le16(skb->data);
+ BT_DBG("efm 0x%4.4x", ext_feat);
+ len -= sizeof(ext_feat);
+ skb_pull(skb, sizeof(ext_feat));
+ }
+
+ read_lock(&hci_dev_list_lock);
+
+ num_ctrl = __hci_num_ctrl();
+ len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);
+ rsp = kmalloc(len, GFP_ATOMIC);
+ if (!rsp) {
+ read_unlock(&hci_dev_list_lock);
+ return -ENOMEM;
+ }
+
+ rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+ rsp->ext_feat = 0;
+
+ __a2mp_add_cl(mgr, rsp->cl, num_ctrl);
+
+ read_unlock(&hci_dev_list_lock);
+
+ a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
+
+ kfree(rsp);
+ return 0;
+}
+
+static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_cl *cl = (void *) skb->data;
+
+ while (skb->len >= sizeof(*cl)) {
+ BT_DBG("Controller id %d type %d status %d", cl->id, cl->type,
+ cl->status);
+ cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
+ }
+
+ /* TODO send A2MP_CHANGE_RSP */
+
+ return 0;
+}
+
+static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_info_req *req = (void *) skb->data;
+ struct a2mp_info_rsp rsp;
+ struct hci_dev *hdev;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("id %d", req->id);
+
+ rsp.id = req->id;
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+
+ hdev = hci_dev_get(req->id);
+ if (hdev && hdev->amp_type != HCI_BREDR) {
+ rsp.status = 0;
+ rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
+ rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
+ rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
+ rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
+ rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
+ }
+
+ if (hdev)
+ hci_dev_put(hdev);
+
+ a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp);
+
+ skb_pull(skb, sizeof(*req));
+ return 0;
+}
+
+static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_amp_assoc_req *req = (void *) skb->data;
+ struct hci_dev *hdev;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("id %d", req->id);
+
+ hdev = hci_dev_get(req->id);
+ if (!hdev || hdev->amp_type == HCI_BREDR) {
+ struct a2mp_amp_assoc_rsp rsp;
+ rsp.id = req->id;
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+
+ a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
+ &rsp);
+ goto clean;
+ }
+
+ /* Placeholder for HCI Read AMP Assoc */
+
+clean:
+ if (hdev)
+ hci_dev_put(hdev);
+
+ skb_pull(skb, sizeof(*req));
+ return 0;
+}
+
+static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_physlink_req *req = (void *) skb->data;
+
+ struct a2mp_physlink_rsp rsp;
+ struct hci_dev *hdev;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
+
+ rsp.local_id = req->remote_id;
+ rsp.remote_id = req->local_id;
+
+ hdev = hci_dev_get(req->remote_id);
+ if (!hdev || hdev->amp_type != HCI_AMP) {
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+ goto send_rsp;
+ }
+
+ /* TODO process physlink create */
+
+ rsp.status = A2MP_STATUS_SUCCESS;
+
+send_rsp:
+ if (hdev)
+ hci_dev_put(hdev);
+
+ a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp),
+ &rsp);
+
+ skb_pull(skb, le16_to_cpu(hdr->len));
+ return 0;
+}
+
+static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_physlink_req *req = (void *) skb->data;
+ struct a2mp_physlink_rsp rsp;
+ struct hci_dev *hdev;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
+
+ rsp.local_id = req->remote_id;
+ rsp.remote_id = req->local_id;
+ rsp.status = A2MP_STATUS_SUCCESS;
+
+ hdev = hci_dev_get(req->local_id);
+ if (!hdev) {
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+ goto send_rsp;
+ }
+
+ /* TODO Disconnect Phys Link here */
+
+ hci_dev_put(hdev);
+
+send_rsp:
+ a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
+
+ skb_pull(skb, sizeof(*req));
+ return 0;
+}
+
+static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ BT_DBG("ident %d code %d", hdr->ident, hdr->code);
+
+ skb_pull(skb, le16_to_cpu(hdr->len));
+ return 0;
+}
+
+/* Handle A2MP signalling */
+static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ struct a2mp_cmd *hdr = (void *) skb->data;
+ struct amp_mgr *mgr = chan->data;
+ int err = 0;
+
+ amp_mgr_get(mgr);
+
+ while (skb->len >= sizeof(*hdr)) {
+ struct a2mp_cmd *hdr = (void *) skb->data;
+ u16 len = le16_to_cpu(hdr->len);
+
+ BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len);
+
+ skb_pull(skb, sizeof(*hdr));
+
+ if (len > skb->len || !hdr->ident) {
+ err = -EINVAL;
+ break;
+ }
+
+ mgr->ident = hdr->ident;
+
+ switch (hdr->code) {
+ case A2MP_COMMAND_REJ:
+ a2mp_command_rej(mgr, skb, hdr);
+ break;
+
+ case A2MP_DISCOVER_REQ:
+ err = a2mp_discover_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_CHANGE_NOTIFY:
+ err = a2mp_change_notify(mgr, skb, hdr);
+ break;
+
+ case A2MP_GETINFO_REQ:
+ err = a2mp_getinfo_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_GETAMPASSOC_REQ:
+ err = a2mp_getampassoc_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_CREATEPHYSLINK_REQ:
+ err = a2mp_createphyslink_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_DISCONNPHYSLINK_REQ:
+ err = a2mp_discphyslink_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_CHANGE_RSP:
+ case A2MP_DISCOVER_RSP:
+ case A2MP_GETINFO_RSP:
+ case A2MP_GETAMPASSOC_RSP:
+ case A2MP_CREATEPHYSLINK_RSP:
+ case A2MP_DISCONNPHYSLINK_RSP:
+ err = a2mp_cmd_rsp(mgr, skb, hdr);
+ break;
+
+ default:
+ BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ if (err) {
+ struct a2mp_cmd_rej rej;
+ rej.reason = __constant_cpu_to_le16(0);
+
+ BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
+
+ a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
+ &rej);
+ }
+
+ /* Always free skb and return success error code to prevent
+ from sending L2CAP Disconnect over A2MP channel */
+ kfree_skb(skb);
+
+ amp_mgr_put(mgr);
+
+ return 0;
+}
+
+static void a2mp_chan_close_cb(struct l2cap_chan *chan)
+{
+ l2cap_chan_destroy(chan);
+}
+
+static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
+{
+ struct amp_mgr *mgr = chan->data;
+
+ if (!mgr)
+ return;
+
+ BT_DBG("chan %p state %s", chan, state_to_string(state));
+
+ chan->state = state;
+
+ switch (state) {
+ case BT_CLOSED:
+ if (mgr)
+ amp_mgr_put(mgr);
+ break;
+ }
+}
+
+static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
+ unsigned long len, int nb)
+{
+ return bt_skb_alloc(len, GFP_KERNEL);
+}
+
+static struct l2cap_ops a2mp_chan_ops = {
+ .name = "L2CAP A2MP channel",
+ .recv = a2mp_chan_recv_cb,
+ .close = a2mp_chan_close_cb,
+ .state_change = a2mp_chan_state_change_cb,
+ .alloc_skb = a2mp_chan_alloc_skb_cb,
+
+ /* Not implemented for A2MP */
+ .new_connection = l2cap_chan_no_new_connection,
+ .teardown = l2cap_chan_no_teardown,
+ .ready = l2cap_chan_no_ready,
+};
+
+static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn)
+{
+ struct l2cap_chan *chan;
+ int err;
+
+ chan = l2cap_chan_create();
+ if (!chan)
+ return NULL;
+
+ BT_DBG("chan %p", chan);
+
+ chan->chan_type = L2CAP_CHAN_CONN_FIX_A2MP;
+ chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+
+ chan->ops = &a2mp_chan_ops;
+
+ l2cap_chan_set_defaults(chan);
+ chan->remote_max_tx = chan->max_tx;
+ chan->remote_tx_win = chan->tx_win;
+
+ chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
+ chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+
+ skb_queue_head_init(&chan->tx_q);
+
+ chan->mode = L2CAP_MODE_ERTM;
+
+ err = l2cap_ertm_init(chan);
+ if (err < 0) {
+ l2cap_chan_del(chan, 0);
+ return NULL;
+ }
+
+ chan->conf_state = 0;
+
+ l2cap_chan_add(conn, chan);
+
+ chan->remote_mps = chan->omtu;
+ chan->mps = chan->omtu;
+
+ chan->state = BT_CONNECTED;
+
+ return chan;
+}
+
+/* AMP Manager functions */
+void amp_mgr_get(struct amp_mgr *mgr)
+{
+ BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
+
+ kref_get(&mgr->kref);
+}
+
+static void amp_mgr_destroy(struct kref *kref)
+{
+ struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
+
+ BT_DBG("mgr %p", mgr);
+
+ kfree(mgr);
+}
+
+int amp_mgr_put(struct amp_mgr *mgr)
+{
+ BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
+
+ return kref_put(&mgr->kref, &amp_mgr_destroy);
+}
+
+static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn)
+{
+ struct amp_mgr *mgr;
+ struct l2cap_chan *chan;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return NULL;
+
+ BT_DBG("conn %p mgr %p", conn, mgr);
+
+ mgr->l2cap_conn = conn;
+
+ chan = a2mp_chan_open(conn);
+ if (!chan) {
+ kfree(mgr);
+ return NULL;
+ }
+
+ mgr->a2mp_chan = chan;
+ chan->data = mgr;
+
+ conn->hcon->amp_mgr = mgr;
+
+ kref_init(&mgr->kref);
+
+ return mgr;
+}
+
+struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+ struct sk_buff *skb)
+{
+ struct amp_mgr *mgr;
+
+ mgr = amp_mgr_create(conn);
+ if (!mgr) {
+ BT_ERR("Could not create AMP manager");
+ return NULL;
+ }
+
+ BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
+
+ return mgr->a2mp_chan;
+}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 3e18af4dadc..f7db5792ec6 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -25,18 +25,7 @@
/* Bluetooth address family and sockets. */
#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <net/sock.h>
#include <asm/ioctls.h>
-#include <linux/kmod.h>
#include <net/bluetooth/bluetooth.h>
@@ -418,7 +407,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
return 0;
}
-unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
+unsigned int bt_sock_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask = 0;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 031d7d65675..4a6620bc157 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -26,26 +26,9 @@
*/
#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/freezer.h>
-#include <linux/errno.h>
-#include <linux/net.h>
-#include <linux/slab.h>
#include <linux/kthread.h>
-#include <net/sock.h>
-
-#include <linux/socket.h>
#include <linux/file.h>
-
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -306,7 +289,7 @@ static u8 __bnep_rx_hlen[] = {
ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */
};
-static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
+static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
{
struct net_device *dev = s->dev;
struct sk_buff *nskb;
@@ -404,7 +387,7 @@ static u8 __bnep_tx_types[] = {
BNEP_COMPRESSED
};
-static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
+static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
{
struct ethhdr *eh = (void *) skb->data;
struct socket *sock = s->sock;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index bc4086480d9..98f86f91d47 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -25,16 +25,8 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <linux/socket.h>
-#include <linux/netdevice.h>
+#include <linux/export.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/wait.h>
-
-#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -128,7 +120,7 @@ static void bnep_net_timeout(struct net_device *dev)
}
#ifdef CONFIG_BT_BNEP_MC_FILTER
-static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
+static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
{
struct ethhdr *eh = (void *) skb->data;
@@ -140,7 +132,7 @@ static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
/* Determine ether protocol. Based on eth_type_trans. */
-static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
+static u16 bnep_net_eth_proto(struct sk_buff *skb)
{
struct ethhdr *eh = (void *) skb->data;
u16 proto = ntohs(eh->h_proto);
@@ -154,7 +146,7 @@ static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
return ETH_P_802_2;
}
-static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
+static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
{
u16 proto = bnep_net_eth_proto(skb);
struct bnep_proto_filter *f = s->proto_filter;
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 180bfc45810..5e5f5b410e0 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -24,24 +24,8 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
+#include <linux/export.h>
#include <linux/file.h>
-#include <linux/init.h>
-#include <linux/compat.h>
-#include <linux/gfp.h>
-#include <linux/uaccess.h>
-#include <net/sock.h>
-
#include "bnep.h"
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3f18a6ed973..5ad7da21747 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -24,24 +24,11 @@
/* Bluetooth HCI connection handling. */
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/interrupt.h>
-#include <net/sock.h>
-
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
+#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/a2mp.h>
static void hci_le_connect(struct hci_conn *conn)
{
@@ -54,15 +41,15 @@ static void hci_le_connect(struct hci_conn *conn)
conn->sec_level = BT_SECURITY_LOW;
memset(&cp, 0, sizeof(cp));
- cp.scan_interval = cpu_to_le16(0x0060);
- cp.scan_window = cpu_to_le16(0x0030);
+ cp.scan_interval = __constant_cpu_to_le16(0x0060);
+ cp.scan_window = __constant_cpu_to_le16(0x0030);
bacpy(&cp.peer_addr, &conn->dst);
cp.peer_addr_type = conn->dst_type;
- cp.conn_interval_min = cpu_to_le16(0x0028);
- cp.conn_interval_max = cpu_to_le16(0x0038);
- cp.supervision_timeout = cpu_to_le16(0x002a);
- cp.min_ce_len = cpu_to_le16(0x0000);
- cp.max_ce_len = cpu_to_le16(0x0000);
+ cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
+ cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
+ cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
+ cp.min_ce_len = __constant_cpu_to_le16(0x0000);
+ cp.max_ce_len = __constant_cpu_to_le16(0x0000);
hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
}
@@ -99,7 +86,7 @@ void hci_acl_connect(struct hci_conn *conn)
cp.pscan_rep_mode = ie->data.pscan_rep_mode;
cp.pscan_mode = ie->data.pscan_mode;
cp.clock_offset = ie->data.clock_offset |
- cpu_to_le16(0x8000);
+ __constant_cpu_to_le16(0x8000);
}
memcpy(conn->dev_class, ie->data.dev_class, 3);
@@ -120,7 +107,7 @@ static void hci_acl_connect_cancel(struct hci_conn *conn)
{
struct hci_cp_create_conn_cancel cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
return;
@@ -133,7 +120,7 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
{
struct hci_cp_disconnect cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
conn->state = BT_DISCONN;
@@ -147,7 +134,7 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle)
struct hci_dev *hdev = conn->hdev;
struct hci_cp_add_sco cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
conn->state = BT_CONNECT;
conn->out = true;
@@ -165,7 +152,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
struct hci_dev *hdev = conn->hdev;
struct hci_cp_setup_sync_conn cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
conn->state = BT_CONNECT;
conn->out = true;
@@ -175,9 +162,9 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
cp.handle = cpu_to_le16(handle);
cp.pkt_type = cpu_to_le16(conn->pkt_type);
- cp.tx_bandwidth = cpu_to_le32(0x00001f40);
- cp.rx_bandwidth = cpu_to_le32(0x00001f40);
- cp.max_latency = cpu_to_le16(0xffff);
+ cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
+ cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
+ cp.max_latency = __constant_cpu_to_le16(0xffff);
cp.voice_setting = cpu_to_le16(hdev->voice_setting);
cp.retrans_effort = 0xff;
@@ -185,7 +172,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
}
void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
- u16 latency, u16 to_multiplier)
+ u16 latency, u16 to_multiplier)
{
struct hci_cp_le_conn_update cp;
struct hci_dev *hdev = conn->hdev;
@@ -197,20 +184,19 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
cp.conn_interval_max = cpu_to_le16(max);
cp.conn_latency = cpu_to_le16(latency);
cp.supervision_timeout = cpu_to_le16(to_multiplier);
- cp.min_ce_len = cpu_to_le16(0x0001);
- cp.max_ce_len = cpu_to_le16(0x0001);
+ cp.min_ce_len = __constant_cpu_to_le16(0x0001);
+ cp.max_ce_len = __constant_cpu_to_le16(0x0001);
hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
}
-EXPORT_SYMBOL(hci_le_conn_update);
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
- __u8 ltk[16])
+ __u8 ltk[16])
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_le_start_enc cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
memset(&cp, 0, sizeof(cp));
@@ -221,18 +207,17 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
}
-EXPORT_SYMBOL(hci_le_start_enc);
/* Device _must_ be locked */
void hci_sco_setup(struct hci_conn *conn, __u8 status)
{
struct hci_conn *sco = conn->link;
- BT_DBG("%p", conn);
-
if (!sco)
return;
+ BT_DBG("hcon %p", conn);
+
if (!status) {
if (lmp_esco_capable(conn->hdev))
hci_setup_sync(sco, conn->handle);
@@ -247,10 +232,10 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
static void hci_conn_timeout(struct work_struct *work)
{
struct hci_conn *conn = container_of(work, struct hci_conn,
- disc_work.work);
+ disc_work.work);
__u8 reason;
- BT_DBG("conn %p state %s", conn, state_to_string(conn->state));
+ BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
if (atomic_read(&conn->refcnt))
return;
@@ -281,7 +266,7 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p mode %d", conn, conn->mode);
+ BT_DBG("hcon %p mode %d", conn, conn->mode);
if (test_bit(HCI_RAW, &hdev->flags))
return;
@@ -295,9 +280,9 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
struct hci_cp_sniff_subrate cp;
cp.handle = cpu_to_le16(conn->handle);
- cp.max_latency = cpu_to_le16(0);
- cp.min_remote_timeout = cpu_to_le16(0);
- cp.min_local_timeout = cpu_to_le16(0);
+ cp.max_latency = __constant_cpu_to_le16(0);
+ cp.min_remote_timeout = __constant_cpu_to_le16(0);
+ cp.min_local_timeout = __constant_cpu_to_le16(0);
hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
}
@@ -306,8 +291,8 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
cp.handle = cpu_to_le16(conn->handle);
cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
- cp.attempt = cpu_to_le16(4);
- cp.timeout = cpu_to_le16(1);
+ cp.attempt = __constant_cpu_to_le16(4);
+ cp.timeout = __constant_cpu_to_le16(1);
hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
}
}
@@ -316,7 +301,7 @@ static void hci_conn_idle(unsigned long arg)
{
struct hci_conn *conn = (void *) arg;
- BT_DBG("conn %p mode %d", conn, conn->mode);
+ BT_DBG("hcon %p mode %d", conn, conn->mode);
hci_conn_enter_sniff_mode(conn);
}
@@ -327,7 +312,7 @@ static void hci_conn_auto_accept(unsigned long arg)
struct hci_dev *hdev = conn->hdev;
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
- &conn->dst);
+ &conn->dst);
}
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
@@ -376,7 +361,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
- (unsigned long) conn);
+ (unsigned long) conn);
atomic_set(&conn->refcnt, 0);
@@ -397,7 +382,7 @@ int hci_conn_del(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
+ BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
del_timer(&conn->idle_timer);
@@ -425,9 +410,11 @@ int hci_conn_del(struct hci_conn *conn)
}
}
-
hci_chan_list_flush(conn);
+ if (conn->amp_mgr)
+ amp_mgr_put(conn->amp_mgr);
+
hci_conn_hash_del(hdev, conn);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
@@ -454,7 +441,9 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
read_lock(&hci_dev_list_lock);
list_for_each_entry(d, &hci_dev_list, list) {
- if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
+ if (!test_bit(HCI_UP, &d->flags) ||
+ test_bit(HCI_RAW, &d->flags) ||
+ d->dev_type != HCI_BREDR)
continue;
/* Simple routing:
@@ -495,6 +484,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
if (type == LE_LINK) {
le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
if (!le) {
+ le = hci_conn_hash_lookup_state(hdev, LE_LINK,
+ BT_CONNECT);
+ if (le)
+ return ERR_PTR(-EBUSY);
+
le = hci_conn_add(hdev, LE_LINK, dst);
if (!le)
return ERR_PTR(-ENOMEM);
@@ -545,7 +539,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
hci_conn_hold(sco);
if (acl->state == BT_CONNECTED &&
- (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
+ (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
@@ -560,24 +554,22 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
return sco;
}
-EXPORT_SYMBOL(hci_connect);
/* Check link security requirement */
int hci_conn_check_link_mode(struct hci_conn *conn)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
return 0;
return 1;
}
-EXPORT_SYMBOL(hci_conn_check_link_mode);
/* Authenticate remote device */
static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (conn->pending_sec_level > sec_level)
sec_level = conn->pending_sec_level;
@@ -600,7 +592,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
if (conn->key_type != 0xff)
set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
}
@@ -611,21 +603,21 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
/* Encrypt the the link */
static void hci_conn_encrypt(struct hci_conn *conn)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
struct hci_cp_set_conn_encrypt cp;
cp.handle = cpu_to_le16(conn->handle);
cp.encrypt = 0x01;
hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
- &cp);
+ &cp);
}
}
/* Enable security */
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
/* For sdp we don't need the link key. */
if (sec_level == BT_SECURITY_SDP)
@@ -648,8 +640,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
/* An unauthenticated combination key has sufficient security for
security level 1 and 2. */
if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
- (sec_level == BT_SECURITY_MEDIUM ||
- sec_level == BT_SECURITY_LOW))
+ (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
goto encrypt;
/* A combination key has always sufficient security for the security
@@ -657,8 +648,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
is generated using maximum PIN code length (16).
For pre 2.1 units. */
if (conn->key_type == HCI_LK_COMBINATION &&
- (sec_level != BT_SECURITY_HIGH ||
- conn->pin_length == 16))
+ (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
goto encrypt;
auth:
@@ -680,7 +670,7 @@ EXPORT_SYMBOL(hci_conn_security);
/* Check secure link requirement */
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (sec_level != BT_SECURITY_HIGH)
return 1; /* Accept if non-secure is required */
@@ -695,23 +685,22 @@ EXPORT_SYMBOL(hci_conn_check_secure);
/* Change link key */
int hci_conn_change_link_key(struct hci_conn *conn)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
struct hci_cp_change_conn_link_key cp;
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
}
return 0;
}
-EXPORT_SYMBOL(hci_conn_change_link_key);
/* Switch role */
int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (!role && conn->link_mode & HCI_LM_MASTER)
return 1;
@@ -732,7 +721,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p mode %d", conn, conn->mode);
+ BT_DBG("hcon %p mode %d", conn, conn->mode);
if (test_bit(HCI_RAW, &hdev->flags))
return;
@@ -752,7 +741,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
timer:
if (hdev->idle_timeout > 0)
mod_timer(&conn->idle_timer,
- jiffies + msecs_to_jiffies(hdev->idle_timeout));
+ jiffies + msecs_to_jiffies(hdev->idle_timeout));
}
/* Drop all connection on the device */
@@ -802,7 +791,7 @@ EXPORT_SYMBOL(hci_conn_put_device);
int hci_get_conn_list(void __user *arg)
{
- register struct hci_conn *c;
+ struct hci_conn *c;
struct hci_conn_list_req req, *cl;
struct hci_conn_info *ci;
struct hci_dev *hdev;
@@ -906,7 +895,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
struct hci_dev *hdev = conn->hdev;
struct hci_chan *chan;
- BT_DBG("%s conn %p", hdev->name, conn);
+ BT_DBG("%s hcon %p", hdev->name, conn);
chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
if (!chan)
@@ -925,7 +914,7 @@ int hci_chan_del(struct hci_chan *chan)
struct hci_conn *conn = chan->conn;
struct hci_dev *hdev = conn->hdev;
- BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
+ BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
list_del_rcu(&chan->list);
@@ -941,7 +930,7 @@ void hci_chan_list_flush(struct hci_conn *conn)
{
struct hci_chan *chan, *n;
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
list_for_each_entry_safe(chan, n, &conn->chan_list, list)
hci_chan_del(chan);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 411ace8e647..d4de5db18d5 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -25,34 +25,14 @@
/* Bluetooth HCI core. */
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/kmod.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/rfkill.h>
-#include <linux/timer.h>
-#include <linux/crypto.h>
-#include <net/sock.h>
+#include <linux/export.h>
+#include <linux/idr.h>
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
+#include <linux/rfkill.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#define AUTO_OFF_TIMEOUT 2000
-
static void hci_rx_work(struct work_struct *work);
static void hci_cmd_work(struct work_struct *work);
static void hci_tx_work(struct work_struct *work);
@@ -65,6 +45,9 @@ DEFINE_RWLOCK(hci_dev_list_lock);
LIST_HEAD(hci_cb_list);
DEFINE_RWLOCK(hci_cb_list_lock);
+/* HCI ID Numbering */
+static DEFINE_IDA(hci_index_ida);
+
/* ---- HCI notifications ---- */
static void hci_notify(struct hci_dev *hdev, int event)
@@ -76,7 +59,7 @@ static void hci_notify(struct hci_dev *hdev, int event)
void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
{
- BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
+ BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
/* If this is the init phase check if the completed command matches
* the last init command, and if not just return.
@@ -124,8 +107,9 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
}
/* Execute request and wait for completion. */
-static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
- unsigned long opt, __u32 timeout)
+static int __hci_request(struct hci_dev *hdev,
+ void (*req)(struct hci_dev *hdev, unsigned long opt),
+ unsigned long opt, __u32 timeout)
{
DECLARE_WAITQUEUE(wait, current);
int err = 0;
@@ -166,8 +150,9 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
return err;
}
-static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
- unsigned long opt, __u32 timeout)
+static int hci_request(struct hci_dev *hdev,
+ void (*req)(struct hci_dev *hdev, unsigned long opt),
+ unsigned long opt, __u32 timeout)
{
int ret;
@@ -201,12 +186,6 @@ static void bredr_init(struct hci_dev *hdev)
/* Mandatory initialization */
- /* Reset */
- if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
- set_bit(HCI_RESET, &hdev->flags);
- hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
- }
-
/* Read Local Supported Features */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
@@ -235,7 +214,7 @@ static void bredr_init(struct hci_dev *hdev)
hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
/* Connection accept timeout ~20 secs */
- param = cpu_to_le16(0x7d00);
+ param = __constant_cpu_to_le16(0x7d00);
hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
bacpy(&cp.bdaddr, BDADDR_ANY);
@@ -247,9 +226,6 @@ static void amp_init(struct hci_dev *hdev)
{
hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
- /* Reset */
- hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
-
/* Read Local Version */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
@@ -275,6 +251,10 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
}
skb_queue_purge(&hdev->driver_init);
+ /* Reset */
+ if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
+ hci_reset_req(hdev, 0);
+
switch (hdev->dev_type) {
case HCI_BREDR:
bredr_init(hdev);
@@ -417,7 +397,8 @@ static void inquiry_cache_flush(struct hci_dev *hdev)
INIT_LIST_HEAD(&cache->resolve);
}
-struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
+struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr)
{
struct discovery_state *cache = &hdev->discovery;
struct inquiry_entry *e;
@@ -478,7 +459,7 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
list_for_each_entry(p, &cache->resolve, list) {
if (p->name_state != NAME_PENDING &&
- abs(p->data.rssi) >= abs(ie->data.rssi))
+ abs(p->data.rssi) >= abs(ie->data.rssi))
break;
pos = &p->list;
}
@@ -503,7 +484,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
*ssp = true;
if (ie->name_state == NAME_NEEDED &&
- data->rssi != ie->data.rssi) {
+ data->rssi != ie->data.rssi) {
ie->data.rssi = data->rssi;
hci_inquiry_cache_update_resolve(hdev, ie);
}
@@ -527,7 +508,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
update:
if (name_known && ie->name_state != NAME_KNOWN &&
- ie->name_state != NAME_PENDING) {
+ ie->name_state != NAME_PENDING) {
ie->name_state = NAME_KNOWN;
list_del(&ie->list);
}
@@ -605,8 +586,7 @@ int hci_inquiry(void __user *arg)
hci_dev_lock(hdev);
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
- inquiry_cache_empty(hdev) ||
- ir.flags & IREQ_CACHE_FLUSH) {
+ inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
inquiry_cache_flush(hdev);
do_inquiry = 1;
}
@@ -620,7 +600,9 @@ int hci_inquiry(void __user *arg)
goto done;
}
- /* for unlimited number of responses we will use buffer with 255 entries */
+ /* for unlimited number of responses we will use buffer with
+ * 255 entries
+ */
max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
@@ -641,7 +623,7 @@ int hci_inquiry(void __user *arg)
if (!copy_to_user(ptr, &ir, sizeof(ir))) {
ptr += sizeof(ir);
if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
- ir.num_rsp))
+ ir.num_rsp))
err = -EFAULT;
} else
err = -EFAULT;
@@ -701,12 +683,11 @@ int hci_dev_open(__u16 dev)
set_bit(HCI_INIT, &hdev->flags);
hdev->init_last_cmd = 0;
- ret = __hci_request(hdev, hci_init_req, 0,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
if (lmp_host_le_capable(hdev))
ret = __hci_request(hdev, hci_le_init_req, 0,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ HCI_INIT_TIMEOUT);
clear_bit(HCI_INIT, &hdev->flags);
}
@@ -791,10 +772,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
skb_queue_purge(&hdev->cmd_q);
atomic_set(&hdev->cmd_cnt, 1);
if (!test_bit(HCI_RAW, &hdev->flags) &&
- test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
+ test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
set_bit(HCI_INIT, &hdev->flags);
- __hci_request(hdev, hci_reset_req, 0,
- msecs_to_jiffies(250));
+ __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
clear_bit(HCI_INIT, &hdev->flags);
}
@@ -883,8 +863,7 @@ int hci_dev_reset(__u16 dev)
hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
if (!test_bit(HCI_RAW, &hdev->flags))
- ret = __hci_request(hdev, hci_reset_req, 0,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
done:
hci_req_unlock(hdev);
@@ -924,7 +903,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
switch (cmd) {
case HCISETAUTH:
err = hci_request(hdev, hci_auth_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ HCI_INIT_TIMEOUT);
break;
case HCISETENCRYPT:
@@ -936,23 +915,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
if (!test_bit(HCI_AUTH, &hdev->flags)) {
/* Auth must be enabled first */
err = hci_request(hdev, hci_auth_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ HCI_INIT_TIMEOUT);
if (err)
break;
}
err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ HCI_INIT_TIMEOUT);
break;
case HCISETSCAN:
err = hci_request(hdev, hci_scan_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ HCI_INIT_TIMEOUT);
break;
case HCISETLINKPOL:
err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ HCI_INIT_TIMEOUT);
break;
case HCISETLINKMODE:
@@ -1102,8 +1081,7 @@ static void hci_power_on(struct work_struct *work)
return;
if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
- schedule_delayed_work(&hdev->power_off,
- msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+ schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
mgmt_index_added(hdev);
@@ -1112,7 +1090,7 @@ static void hci_power_on(struct work_struct *work)
static void hci_power_off(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
- power_off.work);
+ power_off.work);
BT_DBG("%s", hdev->name);
@@ -1193,7 +1171,7 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
}
static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
- u8 key_type, u8 old_key_type)
+ u8 key_type, u8 old_key_type)
{
/* Legacy key */
if (key_type < 0x03)
@@ -1234,7 +1212,7 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
list_for_each_entry(k, &hdev->long_term_keys, list) {
if (k->ediv != ediv ||
- memcmp(rand, k->rand, sizeof(k->rand)))
+ memcmp(rand, k->rand, sizeof(k->rand)))
continue;
return k;
@@ -1242,7 +1220,6 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
return NULL;
}
-EXPORT_SYMBOL(hci_find_ltk);
struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 addr_type)
@@ -1251,12 +1228,11 @@ struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
list_for_each_entry(k, &hdev->long_term_keys, list)
if (addr_type == k->bdaddr_type &&
- bacmp(bdaddr, &k->bdaddr) == 0)
+ bacmp(bdaddr, &k->bdaddr) == 0)
return k;
return NULL;
}
-EXPORT_SYMBOL(hci_find_ltk_by_addr);
int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
@@ -1283,15 +1259,14 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
* combination key for legacy pairing even when there's no
* previous key */
if (type == HCI_LK_CHANGED_COMBINATION &&
- (!conn || conn->remote_auth == 0xff) &&
- old_key_type == 0xff) {
+ (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
type = HCI_LK_COMBINATION;
if (conn)
conn->key_type = type;
}
bacpy(&key->bdaddr, bdaddr);
- memcpy(key->val, val, 16);
+ memcpy(key->val, val, HCI_LINK_KEY_SIZE);
key->pin_len = pin_len;
if (type == HCI_LK_CHANGED_COMBINATION)
@@ -1383,11 +1358,19 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
}
/* HCI command timer function */
-static void hci_cmd_timer(unsigned long arg)
+static void hci_cmd_timeout(unsigned long arg)
{
struct hci_dev *hdev = (void *) arg;
- BT_ERR("%s command tx timeout", hdev->name);
+ if (hdev->sent_cmd) {
+ struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
+ u16 opcode = __le16_to_cpu(sent->opcode);
+
+ BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
+ } else {
+ BT_ERR("%s command tx timeout", hdev->name);
+ }
+
atomic_set(&hdev->cmd_cnt, 1);
queue_work(hdev->workqueue, &hdev->cmd_work);
}
@@ -1540,6 +1523,7 @@ static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
memset(&cp, 0, sizeof(cp));
cp.enable = 1;
+ cp.filter_dup = 1;
hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
}
@@ -1684,7 +1668,7 @@ struct hci_dev *hci_alloc_dev(void)
init_waitqueue_head(&hdev->req_wait_q);
- setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
+ setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
hci_init_sysfs(hdev);
discovery_init(hdev);
@@ -1707,41 +1691,39 @@ EXPORT_SYMBOL(hci_free_dev);
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
- struct list_head *head, *p;
int id, error;
if (!hdev->open || !hdev->close)
return -EINVAL;
- write_lock(&hci_dev_list_lock);
-
/* Do not allow HCI_AMP devices to register at index 0,
* so the index can be used as the AMP controller ID.
*/
- id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
- head = &hci_dev_list;
-
- /* Find first available device id */
- list_for_each(p, &hci_dev_list) {
- int nid = list_entry(p, struct hci_dev, list)->id;
- if (nid > id)
- break;
- if (nid == id)
- id++;
- head = p;
+ switch (hdev->dev_type) {
+ case HCI_BREDR:
+ id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
+ break;
+ case HCI_AMP:
+ id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
+ break;
+ default:
+ return -EINVAL;
}
+ if (id < 0)
+ return id;
+
sprintf(hdev->name, "hci%d", id);
hdev->id = id;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- list_add(&hdev->list, head);
-
+ write_lock(&hci_dev_list_lock);
+ list_add(&hdev->list, &hci_dev_list);
write_unlock(&hci_dev_list_lock);
hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
- WQ_MEM_RECLAIM, 1);
+ WQ_MEM_RECLAIM, 1);
if (!hdev->workqueue) {
error = -ENOMEM;
goto err;
@@ -1752,7 +1734,8 @@ int hci_register_dev(struct hci_dev *hdev)
goto err_wqueue;
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
- RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
+ RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
+ hdev);
if (hdev->rfkill) {
if (rfkill_register(hdev->rfkill) < 0) {
rfkill_destroy(hdev->rfkill);
@@ -1760,8 +1743,11 @@ int hci_register_dev(struct hci_dev *hdev)
}
}
- set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
set_bit(HCI_SETUP, &hdev->dev_flags);
+
+ if (hdev->dev_type != HCI_AMP)
+ set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+
schedule_work(&hdev->power_on);
hci_notify(hdev, HCI_DEV_REG);
@@ -1772,6 +1758,7 @@ int hci_register_dev(struct hci_dev *hdev)
err_wqueue:
destroy_workqueue(hdev->workqueue);
err:
+ ida_simple_remove(&hci_index_ida, hdev->id);
write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
write_unlock(&hci_dev_list_lock);
@@ -1783,12 +1770,14 @@ EXPORT_SYMBOL(hci_register_dev);
/* Unregister HCI device */
void hci_unregister_dev(struct hci_dev *hdev)
{
- int i;
+ int i, id;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
set_bit(HCI_UNREGISTER, &hdev->dev_flags);
+ id = hdev->id;
+
write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
write_unlock(&hci_dev_list_lock);
@@ -1799,7 +1788,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
kfree_skb(hdev->reassembly[i]);
if (!test_bit(HCI_INIT, &hdev->flags) &&
- !test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ !test_bit(HCI_SETUP, &hdev->dev_flags)) {
hci_dev_lock(hdev);
mgmt_index_removed(hdev);
hci_dev_unlock(hdev);
@@ -1829,6 +1818,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_dev_unlock(hdev);
hci_dev_put(hdev);
+
+ ida_simple_remove(&hci_index_ida, id);
}
EXPORT_SYMBOL(hci_unregister_dev);
@@ -1853,7 +1844,7 @@ int hci_recv_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
- && !test_bit(HCI_INIT, &hdev->flags))) {
+ && !test_bit(HCI_INIT, &hdev->flags))) {
kfree_skb(skb);
return -ENXIO;
}
@@ -1872,7 +1863,7 @@ int hci_recv_frame(struct sk_buff *skb)
EXPORT_SYMBOL(hci_recv_frame);
static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
- int count, __u8 index)
+ int count, __u8 index)
{
int len = 0;
int hlen = 0;
@@ -1881,7 +1872,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
struct bt_skb_cb *scb;
if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
- index >= NUM_REASSEMBLY)
+ index >= NUM_REASSEMBLY)
return -EILSEQ;
skb = hdev->reassembly[index];
@@ -2023,7 +2014,7 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
type = bt_cb(skb)->pkt_type;
rem = hci_reassembly(hdev, type, data, count,
- STREAM_REASSEMBLY);
+ STREAM_REASSEMBLY);
if (rem < 0)
return rem;
@@ -2096,7 +2087,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
struct hci_command_hdr *hdr;
struct sk_buff *skb;
- BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
+ BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!skb) {
@@ -2138,7 +2129,7 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
if (hdr->opcode != cpu_to_le16(opcode))
return NULL;
- BT_DBG("%s opcode 0x%x", hdev->name, opcode);
+ BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
}
@@ -2157,7 +2148,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
}
static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
- struct sk_buff *skb, __u16 flags)
+ struct sk_buff *skb, __u16 flags)
{
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
@@ -2208,7 +2199,7 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
struct hci_conn *conn = chan->conn;
struct hci_dev *hdev = conn->hdev;
- BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
+ BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
skb->dev = (void *) hdev;
@@ -2216,7 +2207,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
queue_work(hdev->workqueue, &hdev->tx_work);
}
-EXPORT_SYMBOL(hci_send_acl);
/* Send SCO data */
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
@@ -2239,12 +2229,12 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
skb_queue_tail(&conn->data_q, skb);
queue_work(hdev->workqueue, &hdev->tx_work);
}
-EXPORT_SYMBOL(hci_send_sco);
/* ---- HCI TX task (outgoing data) ---- */
/* HCI Connection scheduler */
-static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
+static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
+ int *quote)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *conn = NULL, *c;
@@ -2303,7 +2293,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
return conn;
}
-static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
+static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@@ -2316,16 +2306,16 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
list_for_each_entry_rcu(c, &h->list, list) {
if (c->type == type && c->sent) {
BT_ERR("%s killing stalled connection %s",
- hdev->name, batostr(&c->dst));
- hci_acl_disconn(c, 0x13);
+ hdev->name, batostr(&c->dst));
+ hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
}
}
rcu_read_unlock();
}
-static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
- int *quote)
+static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
+ int *quote)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_chan *chan = NULL;
@@ -2442,7 +2432,7 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
skb->priority = HCI_PRIO_MAX - 1;
BT_DBG("chan %p skb %p promoted to %d", chan, skb,
- skb->priority);
+ skb->priority);
}
if (hci_conn_num(hdev, type) == num)
@@ -2459,18 +2449,18 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
}
-static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
+static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
{
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!cnt && time_after(jiffies, hdev->acl_last_tx +
- msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
+ HCI_ACL_TX_TIMEOUT))
hci_link_tx_to(hdev, ACL_LINK);
}
}
-static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
+static void hci_sched_acl_pkt(struct hci_dev *hdev)
{
unsigned int cnt = hdev->acl_cnt;
struct hci_chan *chan;
@@ -2480,11 +2470,11 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
__check_timeout(hdev, cnt);
while (hdev->acl_cnt &&
- (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+ (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
while (quote-- && (skb = skb_peek(&chan->data_q))) {
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
- skb->len, skb->priority);
+ skb->len, skb->priority);
/* Stop if priority has changed */
if (skb->priority < priority)
@@ -2508,7 +2498,7 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
hci_prio_recalculate(hdev, ACL_LINK);
}
-static inline void hci_sched_acl_blk(struct hci_dev *hdev)
+static void hci_sched_acl_blk(struct hci_dev *hdev)
{
unsigned int cnt = hdev->block_cnt;
struct hci_chan *chan;
@@ -2518,13 +2508,13 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
__check_timeout(hdev, cnt);
while (hdev->block_cnt > 0 &&
- (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+ (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
int blocks;
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
- skb->len, skb->priority);
+ skb->len, skb->priority);
/* Stop if priority has changed */
if (skb->priority < priority)
@@ -2537,7 +2527,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
return;
hci_conn_enter_active_mode(chan->conn,
- bt_cb(skb)->force_active);
+ bt_cb(skb)->force_active);
hci_send_frame(skb);
hdev->acl_last_tx = jiffies;
@@ -2554,7 +2544,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
hci_prio_recalculate(hdev, ACL_LINK);
}
-static inline void hci_sched_acl(struct hci_dev *hdev)
+static void hci_sched_acl(struct hci_dev *hdev)
{
BT_DBG("%s", hdev->name);
@@ -2573,7 +2563,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
}
/* Schedule SCO */
-static inline void hci_sched_sco(struct hci_dev *hdev)
+static void hci_sched_sco(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
@@ -2596,7 +2586,7 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
}
}
-static inline void hci_sched_esco(struct hci_dev *hdev)
+static void hci_sched_esco(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
@@ -2607,7 +2597,8 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
if (!hci_conn_num(hdev, ESCO_LINK))
return;
- while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
+ while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
+ &quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
hci_send_frame(skb);
@@ -2619,7 +2610,7 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
}
}
-static inline void hci_sched_le(struct hci_dev *hdev)
+static void hci_sched_le(struct hci_dev *hdev)
{
struct hci_chan *chan;
struct sk_buff *skb;
@@ -2634,7 +2625,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
/* LE tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!hdev->le_cnt && hdev->le_pkts &&
- time_after(jiffies, hdev->le_last_tx + HZ * 45))
+ time_after(jiffies, hdev->le_last_tx + HZ * 45))
hci_link_tx_to(hdev, LE_LINK);
}
@@ -2644,7 +2635,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
u32 priority = (skb_peek(&chan->data_q))->priority;
while (quote-- && (skb = skb_peek(&chan->data_q))) {
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
- skb->len, skb->priority);
+ skb->len, skb->priority);
/* Stop if priority has changed */
if (skb->priority < priority)
@@ -2676,7 +2667,7 @@ static void hci_tx_work(struct work_struct *work)
struct sk_buff *skb;
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
- hdev->sco_cnt, hdev->le_cnt);
+ hdev->sco_cnt, hdev->le_cnt);
/* Schedule queues and send stuff to HCI driver */
@@ -2696,7 +2687,7 @@ static void hci_tx_work(struct work_struct *work)
/* ----- HCI RX task (incoming data processing) ----- */
/* ACL data packet */
-static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_acl_hdr *hdr = (void *) skb->data;
struct hci_conn *conn;
@@ -2708,7 +2699,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
flags = hci_flags(handle);
handle = hci_handle(handle);
- BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
+ BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
+ handle, flags);
hdev->stat.acl_rx++;
@@ -2732,14 +2724,14 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
return;
} else {
BT_ERR("%s ACL packet for unknown connection handle %d",
- hdev->name, handle);
+ hdev->name, handle);
}
kfree_skb(skb);
}
/* SCO data packet */
-static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_sco_hdr *hdr = (void *) skb->data;
struct hci_conn *conn;
@@ -2749,7 +2741,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
handle = __le16_to_cpu(hdr->handle);
- BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
+ BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
hdev->stat.sco_rx++;
@@ -2763,7 +2755,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
return;
} else {
BT_ERR("%s SCO packet for unknown connection handle %d",
- hdev->name, handle);
+ hdev->name, handle);
}
kfree_skb(skb);
@@ -2829,7 +2821,8 @@ static void hci_cmd_work(struct work_struct *work)
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
struct sk_buff *skb;
- BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
+ BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
+ atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
/* Send queued commands */
if (atomic_read(&hdev->cmd_cnt)) {
@@ -2847,7 +2840,7 @@ static void hci_cmd_work(struct work_struct *work)
del_timer(&hdev->cmd_timer);
else
mod_timer(&hdev->cmd_timer,
- jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
+ jiffies + HCI_CMD_TIMEOUT);
} else {
skb_queue_head(&hdev->cmd_q, skb);
queue_work(hdev->workqueue, &hdev->cmd_work);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 94ad124a4ea..41ff978a33f 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -24,20 +24,7 @@
/* Bluetooth HCI event handling. */
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/interrupt.h>
-#include <net/sock.h>
-
-#include <linux/uaccess.h>
+#include <linux/export.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -49,7 +36,7 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status) {
hci_dev_lock(hdev);
@@ -73,7 +60,7 @@ static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
@@ -85,7 +72,7 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
@@ -95,7 +82,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_check_pending(hdev);
}
-static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
}
@@ -105,7 +93,7 @@ static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_rp_role_discovery *rp = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -128,7 +116,7 @@ static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_rp_read_link_policy *rp = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -148,7 +136,7 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_conn *conn;
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -166,11 +154,12 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
-static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -178,12 +167,13 @@ static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *sk
hdev->link_policy = __le16_to_cpu(rp->policy);
}
-static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
if (!sent)
@@ -199,7 +189,7 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
clear_bit(HCI_RESET, &hdev->flags);
@@ -217,7 +207,7 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
if (!sent)
@@ -239,7 +229,7 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_local_name *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -253,7 +243,7 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
if (!sent)
@@ -279,7 +269,7 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
if (!sent)
@@ -303,7 +293,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
int old_pscan, old_iscan;
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
if (!sent)
@@ -329,7 +319,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (hdev->discov_timeout > 0) {
int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
queue_delayed_work(hdev->workqueue, &hdev->discov_off,
- to);
+ to);
}
} else if (old_iscan)
mgmt_discoverable(hdev, 0);
@@ -350,7 +340,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -358,7 +348,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
memcpy(hdev->dev_class, rp->dev_class, 3);
BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
- hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
+ hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
}
static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
@@ -366,7 +356,7 @@ static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
if (!sent)
@@ -388,7 +378,7 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_rp_read_voice_setting *rp = (void *) skb->data;
__u16 setting;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -400,19 +390,20 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
hdev->voice_setting = setting;
- BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
+ BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
}
-static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_write_voice_setting(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
__u16 setting;
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
@@ -428,7 +419,7 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb
hdev->voice_setting = setting;
- BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
+ BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
@@ -438,7 +429,7 @@ static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
}
@@ -448,7 +439,7 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
if (!sent)
@@ -473,7 +464,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
return 1;
if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
- hdev->lmp_subver == 0x0757)
+ hdev->lmp_subver == 0x0757)
return 1;
if (hdev->manufacturer == 15) {
@@ -486,7 +477,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
}
if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
- hdev->lmp_subver == 0x1805)
+ hdev->lmp_subver == 0x1805)
return 1;
return 0;
@@ -566,7 +557,7 @@ static void hci_setup(struct hci_dev *hdev)
if (hdev->hci_ver > BLUETOOTH_VER_1_1)
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
- if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+ if (lmp_ssp_capable(hdev)) {
if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
u8 mode = 0x01;
hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
@@ -606,7 +597,7 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_local_version *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
goto done;
@@ -617,9 +608,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
- BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
- hdev->manufacturer,
- hdev->hci_ver, hdev->hci_rev);
+ BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
+ hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
if (test_bit(HCI_INIT, &hdev->flags))
hci_setup(hdev);
@@ -646,11 +636,12 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
}
-static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_local_commands(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_rp_read_local_commands *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
goto done;
@@ -664,11 +655,12 @@ done:
hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
}
-static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_local_features(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_rp_read_local_features *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -713,10 +705,10 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb
hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
- hdev->features[0], hdev->features[1],
- hdev->features[2], hdev->features[3],
- hdev->features[4], hdev->features[5],
- hdev->features[6], hdev->features[7]);
+ hdev->features[0], hdev->features[1],
+ hdev->features[2], hdev->features[3],
+ hdev->features[4], hdev->features[5],
+ hdev->features[6], hdev->features[7]);
}
static void hci_set_le_support(struct hci_dev *hdev)
@@ -736,11 +728,11 @@ static void hci_set_le_support(struct hci_dev *hdev)
}
static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
goto done;
@@ -762,11 +754,11 @@ done:
}
static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -780,7 +772,7 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_buffer_size *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -798,16 +790,15 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
hdev->acl_cnt = hdev->acl_pkts;
hdev->sco_cnt = hdev->sco_pkts;
- BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
- hdev->acl_mtu, hdev->acl_pkts,
- hdev->sco_mtu, hdev->sco_pkts);
+ BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
+ hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
}
static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_bd_addr *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (!rp->status)
bacpy(&hdev->bdaddr, &rp->bdaddr);
@@ -816,11 +807,11 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
}
static void hci_cc_read_data_block_size(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_data_block_size *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -832,7 +823,7 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev,
hdev->block_cnt = hdev->num_blocks;
BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
- hdev->block_cnt, hdev->block_len);
+ hdev->block_cnt, hdev->block_len);
hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
}
@@ -841,17 +832,17 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
}
static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -871,11 +862,11 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
}
static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
}
@@ -884,27 +875,27 @@ static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
}
static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
}
static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (!rp->status)
hdev->inq_tx_power = rp->tx_power;
@@ -916,7 +907,7 @@ static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
}
@@ -927,7 +918,7 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_cp_pin_code_reply *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
@@ -953,13 +944,13 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
if (test_bit(HCI_MGMT, &hdev->dev_flags))
mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
- rp->status);
+ rp->status);
hci_dev_unlock(hdev);
}
@@ -969,7 +960,7 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
{
struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -988,7 +979,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
@@ -1000,11 +991,11 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
}
static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
@@ -1019,7 +1010,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
@@ -1031,11 +1022,11 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
}
static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
@@ -1047,11 +1038,11 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
}
static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
@@ -1063,7 +1054,7 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
@@ -1076,12 +1067,12 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
}
static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_cp_le_set_scan_enable *cp;
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
if (!cp)
@@ -1136,7 +1127,7 @@ static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -1148,7 +1139,7 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -1156,13 +1147,13 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
}
-static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_cp_write_le_host_supported *sent;
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
if (!sent)
@@ -1176,15 +1167,15 @@ static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
}
if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
- !test_bit(HCI_INIT, &hdev->flags))
+ !test_bit(HCI_INIT, &hdev->flags))
mgmt_le_enable_complete(hdev, sent->le, status);
hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
}
-static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
+static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
{
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status) {
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
@@ -1203,12 +1194,12 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
hci_dev_unlock(hdev);
}
-static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
+static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_create_conn *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
if (!cp)
@@ -1218,7 +1209,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
+ BT_DBG("%s bdaddr %s hcon %p", hdev->name, batostr(&cp->bdaddr), conn);
if (status) {
if (conn && conn->state == BT_CONNECT) {
@@ -1249,7 +1240,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
struct hci_conn *acl, *sco;
__u16 handle;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1260,7 +1251,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
handle = __le16_to_cpu(cp->handle);
- BT_DBG("%s handle %d", hdev->name, handle);
+ BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
hci_dev_lock(hdev);
@@ -1283,7 +1274,7 @@ static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
struct hci_cp_auth_requested *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1310,7 +1301,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
struct hci_cp_set_conn_encrypt *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1333,7 +1324,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
}
static int hci_outgoing_auth_needed(struct hci_dev *hdev,
- struct hci_conn *conn)
+ struct hci_conn *conn)
{
if (conn->state != BT_CONFIG || !conn->out)
return 0;
@@ -1343,15 +1334,14 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
/* Only request authentication for SSP connections or non-SSP
* devices with sec_level HIGH or if MITM protection is requested */
- if (!hci_conn_ssp_enabled(conn) &&
- conn->pending_sec_level != BT_SECURITY_HIGH &&
- !(conn->auth_type & 0x01))
+ if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
+ conn->pending_sec_level != BT_SECURITY_HIGH)
return 0;
return 1;
}
-static inline int hci_resolve_name(struct hci_dev *hdev,
+static int hci_resolve_name(struct hci_dev *hdev,
struct inquiry_entry *e)
{
struct hci_cp_remote_name_req cp;
@@ -1423,7 +1413,7 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
struct hci_cp_remote_name_req *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
/* If successful wait for the name req complete event before
* checking for the need to do authentication */
@@ -1462,7 +1452,7 @@ static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
struct hci_cp_read_remote_features *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1489,7 +1479,7 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
struct hci_cp_read_remote_ext_features *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1517,7 +1507,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
struct hci_conn *acl, *sco;
__u16 handle;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1528,7 +1518,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
handle = __le16_to_cpu(cp->handle);
- BT_DBG("%s handle %d", hdev->name, handle);
+ BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
hci_dev_lock(hdev);
@@ -1551,7 +1541,7 @@ static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
struct hci_cp_sniff_mode *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1578,7 +1568,7 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
struct hci_cp_exit_sniff_mode *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1627,7 +1617,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
struct hci_cp_le_create_conn *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
if (!cp)
@@ -1638,7 +1628,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
- conn);
+ conn);
if (status) {
if (conn && conn->state == BT_CONNECT) {
@@ -1665,16 +1655,16 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
{
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
}
-static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
struct discovery_state *discov = &hdev->discovery;
struct inquiry_entry *e;
- BT_DBG("%s status %d", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
@@ -1708,7 +1698,7 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct inquiry_data data;
struct inquiry_info *info = (void *) (skb->data + 1);
@@ -1745,7 +1735,7 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
hci_dev_unlock(hdev);
}
-static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -1823,18 +1813,18 @@ unlock:
hci_conn_check_pending(hdev);
}
-static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_request *ev = (void *) skb->data;
int mask = hdev->link_mode;
- BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
- batostr(&ev->bdaddr), ev->link_type);
+ BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
+ ev->link_type);
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
if ((mask & HCI_LM_ACCEPT) &&
- !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
+ !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
/* Connection accepted */
struct inquiry_entry *ie;
struct hci_conn *conn;
@@ -1845,7 +1835,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
if (ie)
memcpy(ie->data.dev_class, ev->dev_class, 3);
- conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
+ conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
+ &ev->bdaddr);
if (!conn) {
conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
@@ -1878,9 +1869,9 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
bacpy(&cp.bdaddr, &ev->bdaddr);
cp.pkt_type = cpu_to_le16(conn->pkt_type);
- cp.tx_bandwidth = cpu_to_le32(0x00001f40);
- cp.rx_bandwidth = cpu_to_le32(0x00001f40);
- cp.max_latency = cpu_to_le16(0xffff);
+ cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
+ cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
+ cp.max_latency = __constant_cpu_to_le16(0xffff);
cp.content_format = cpu_to_le16(hdev->voice_setting);
cp.retrans_effort = 0xff;
@@ -1897,12 +1888,12 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
}
}
-static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_disconn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -1914,10 +1905,10 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
conn->state = BT_CLOSED;
if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
- (conn->type == ACL_LINK || conn->type == LE_LINK)) {
+ (conn->type == ACL_LINK || conn->type == LE_LINK)) {
if (ev->status != 0)
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
- conn->dst_type, ev->status);
+ conn->dst_type, ev->status);
else
mgmt_device_disconnected(hdev, &conn->dst, conn->type,
conn->dst_type);
@@ -1934,12 +1925,12 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_auth_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -1949,7 +1940,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
if (!ev->status) {
if (!hci_conn_ssp_enabled(conn) &&
- test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
+ test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
BT_INFO("re-auth of legacy device is not possible.");
} else {
conn->link_mode |= HCI_LM_AUTH;
@@ -1969,7 +1960,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
cp.handle = ev->handle;
cp.encrypt = 0x01;
hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
- &cp);
+ &cp);
} else {
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
@@ -1989,7 +1980,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
cp.handle = ev->handle;
cp.encrypt = 0x01;
hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
- &cp);
+ &cp);
} else {
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
hci_encrypt_cfm(conn, ev->status, 0x00);
@@ -2000,7 +1991,7 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_remote_name *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -2039,12 +2030,12 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_encrypt_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2082,12 +2073,13 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2104,12 +2096,13 @@ static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct
hci_dev_unlock(hdev);
}
-static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_features_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_features *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2128,7 +2121,7 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
cp.handle = ev->handle;
cp.page = 0x01;
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
goto unlock;
}
@@ -2153,17 +2146,18 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
}
-static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
}
-static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_cmd_complete *ev = (void *) skb->data;
__u16 opcode;
@@ -2370,7 +2364,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
break;
default:
- BT_DBG("%s opcode 0x%x", hdev->name, opcode);
+ BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
break;
}
@@ -2384,7 +2378,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
}
}
-static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_cmd_status *ev = (void *) skb->data;
__u16 opcode;
@@ -2451,7 +2445,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
break;
default:
- BT_DBG("%s opcode 0x%x", hdev->name, opcode);
+ BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
break;
}
@@ -2465,12 +2459,12 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
}
-static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_role_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2491,7 +2485,7 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb
hci_dev_unlock(hdev);
}
-static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
int i;
@@ -2502,7 +2496,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
}
if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
- ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
+ ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
BT_DBG("%s bad parameters", hdev->name);
return;
}
@@ -2557,8 +2551,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
queue_work(hdev->workqueue, &hdev->tx_work);
}
-static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
int i;
@@ -2569,13 +2562,13 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
}
if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
- ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
+ ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
BT_DBG("%s bad parameters", hdev->name);
return;
}
BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
- ev->num_hndl);
+ ev->num_hndl);
for (i = 0; i < ev->num_hndl; i++) {
struct hci_comp_blocks_info *info = &ev->handles[i];
@@ -2607,12 +2600,12 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
queue_work(hdev->workqueue, &hdev->tx_work);
}
-static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_mode_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2621,7 +2614,8 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
conn->mode = ev->mode;
conn->interval = __le16_to_cpu(ev->interval);
- if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
+ if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
+ &conn->flags)) {
if (conn->mode == HCI_CM_ACTIVE)
set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
else
@@ -2635,7 +2629,7 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
hci_dev_unlock(hdev);
}
-static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pin_code_req *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -2656,7 +2650,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
- sizeof(ev->bdaddr), &ev->bdaddr);
+ sizeof(ev->bdaddr), &ev->bdaddr);
else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
u8 secure;
@@ -2672,7 +2666,7 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_link_key_req *ev = (void *) skb->data;
struct hci_cp_link_key_reply cp;
@@ -2689,15 +2683,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
key = hci_find_link_key(hdev, &ev->bdaddr);
if (!key) {
BT_DBG("%s link key not found for %s", hdev->name,
- batostr(&ev->bdaddr));
+ batostr(&ev->bdaddr));
goto not_found;
}
BT_DBG("%s found key type %u for %s", hdev->name, key->type,
- batostr(&ev->bdaddr));
+ batostr(&ev->bdaddr));
if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
- key->type == HCI_LK_DEBUG_COMBINATION) {
+ key->type == HCI_LK_DEBUG_COMBINATION) {
BT_DBG("%s ignoring debug key", hdev->name);
goto not_found;
}
@@ -2705,16 +2699,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (conn) {
if (key->type == HCI_LK_UNAUTH_COMBINATION &&
- conn->auth_type != 0xff &&
- (conn->auth_type & 0x01)) {
+ conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
BT_DBG("%s ignoring unauthenticated key", hdev->name);
goto not_found;
}
if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
- conn->pending_sec_level == BT_SECURITY_HIGH) {
- BT_DBG("%s ignoring key unauthenticated for high \
- security", hdev->name);
+ conn->pending_sec_level == BT_SECURITY_HIGH) {
+ BT_DBG("%s ignoring key unauthenticated for high security",
+ hdev->name);
goto not_found;
}
@@ -2723,7 +2716,7 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
}
bacpy(&cp.bdaddr, &ev->bdaddr);
- memcpy(cp.link_key, key->val, 16);
+ memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
@@ -2736,7 +2729,7 @@ not_found:
hci_dev_unlock(hdev);
}
-static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_link_key_notify *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -2760,17 +2753,17 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
- ev->key_type, pin_len);
+ ev->key_type, pin_len);
hci_dev_unlock(hdev);
}
-static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_clock_offset *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2788,12 +2781,12 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_dev_unlock(hdev);
}
-static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pkt_type_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2804,7 +2797,7 @@ static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff
hci_dev_unlock(hdev);
}
-static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
struct inquiry_entry *ie;
@@ -2822,7 +2815,8 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *
hci_dev_unlock(hdev);
}
-static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct inquiry_data data;
int num_rsp = *((__u8 *) skb->data);
@@ -2881,7 +2875,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
hci_dev_unlock(hdev);
}
-static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_ext_features_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_ext_features *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -2929,12 +2924,13 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2984,19 +2980,20 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
}
-static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_sniff_subrate *ev = (void *) skb->data;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
}
-static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct inquiry_data data;
struct extended_inquiry_info *info = (void *) (skb->data + 1);
@@ -3049,7 +3046,7 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %u handle %u", hdev->name, ev->status,
+ BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
__le16_to_cpu(ev->handle));
hci_dev_lock(hdev);
@@ -3087,7 +3084,7 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline u8 hci_get_auth_req(struct hci_conn *conn)
+static u8 hci_get_auth_req(struct hci_conn *conn)
{
/* If remote requests dedicated bonding follow that lead */
if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
@@ -3106,7 +3103,7 @@ static inline u8 hci_get_auth_req(struct hci_conn *conn)
return conn->auth_type;
}
-static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_io_capa_request *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -3125,7 +3122,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
goto unlock;
if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
- (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
+ (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
struct hci_cp_io_capability_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -3136,14 +3133,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
conn->auth_type = hci_get_auth_req(conn);
cp.authentication = conn->auth_type;
- if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
- hci_find_remote_oob_data(hdev, &conn->dst))
+ if (hci_find_remote_oob_data(hdev, &conn->dst) &&
+ (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
cp.oob_data = 0x01;
else
cp.oob_data = 0x00;
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
} else {
struct hci_cp_io_capability_neg_reply cp;
@@ -3151,14 +3148,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
}
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_io_capa_reply *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -3180,8 +3177,8 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_user_confirm_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_user_confirm_req *ev = (void *) skb->data;
int loc_mitm, rem_mitm, confirm_hint = 0;
@@ -3209,13 +3206,13 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
BT_DBG("Rejecting request: remote device can't provide MITM");
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
- sizeof(ev->bdaddr), &ev->bdaddr);
+ sizeof(ev->bdaddr), &ev->bdaddr);
goto unlock;
}
/* If no side requires MITM protection; auto-accept */
if ((!loc_mitm || conn->remote_cap == 0x03) &&
- (!rem_mitm || conn->io_capability == 0x03)) {
+ (!rem_mitm || conn->io_capability == 0x03)) {
/* If we're not the initiators request authorization to
* proceed from user space (mgmt_user_confirm with
@@ -3227,7 +3224,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
}
BT_DBG("Auto-accept of user confirmation with %ums delay",
- hdev->auto_accept_delay);
+ hdev->auto_accept_delay);
if (hdev->auto_accept_delay > 0) {
int delay = msecs_to_jiffies(hdev->auto_accept_delay);
@@ -3236,7 +3233,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
}
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
- sizeof(ev->bdaddr), &ev->bdaddr);
+ sizeof(ev->bdaddr), &ev->bdaddr);
goto unlock;
}
@@ -3248,8 +3245,8 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_user_passkey_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_user_passkey_req *ev = (void *) skb->data;
@@ -3263,7 +3260,8 @@ static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
hci_dev_unlock(hdev);
}
-static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -3291,7 +3289,8 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_host_features_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_host_features *ev = (void *) skb->data;
struct inquiry_entry *ie;
@@ -3307,8 +3306,8 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
hci_dev_unlock(hdev);
}
-static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
struct oob_data *data;
@@ -3329,28 +3328,41 @@ static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
- &cp);
+ &cp);
} else {
struct hci_cp_remote_oob_data_neg_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
- &cp);
+ &cp);
}
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
+ if (ev->status) {
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (!conn)
+ goto unlock;
+
+ mgmt_connect_failed(hdev, &conn->dst, conn->type,
+ conn->dst_type, ev->status);
+ hci_proto_connect_cfm(conn, ev->status);
+ conn->state = BT_CLOSED;
+ hci_conn_del(conn);
+ goto unlock;
+ }
+
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
if (!conn) {
conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
@@ -3363,15 +3375,6 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
conn->dst_type = ev->bdaddr_type;
}
- if (ev->status) {
- mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
- conn->dst_type, ev->status);
- hci_proto_connect_cfm(conn, ev->status);
- conn->state = BT_CLOSED;
- hci_conn_del(conn);
- goto unlock;
- }
-
if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
conn->dst_type, 0, NULL, 0, NULL);
@@ -3389,8 +3392,7 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 num_reports = skb->data[0];
void *ptr = &skb->data[1];
@@ -3411,8 +3413,7 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
hci_dev_unlock(hdev);
}
-static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_ltk_req *ev = (void *) skb->data;
struct hci_cp_le_ltk_reply cp;
@@ -3420,7 +3421,7 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
struct hci_conn *conn;
struct smp_ltk *ltk;
- BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle));
+ BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
hci_dev_lock(hdev);
@@ -3455,7 +3456,7 @@ not_found:
hci_dev_unlock(hdev);
}
-static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_meta *le_ev = (void *) skb->data;
@@ -3644,7 +3645,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
break;
default:
- BT_DBG("%s event 0x%x", hdev->name, event);
+ BT_DBG("%s event 0x%2.2x", hdev->name, event);
break;
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 5914623f426..a7f04de03d7 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -24,25 +24,7 @@
/* Bluetooth HCI sockets. */
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/compat.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
-#include <net/sock.h>
-
-#include <linux/uaccess.h>
+#include <linux/export.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -113,11 +95,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
flt = &hci_pi(sk)->filter;
if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
- 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
+ 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
+ &flt->type_mask))
continue;
if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
- register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
+ int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
if (!hci_test_bit(evt, &flt->event_mask))
continue;
@@ -240,7 +223,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_mon_hdr *hdr;
/* Create a private copy with headroom */
- skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
+ skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
+ GFP_ATOMIC);
if (!skb_copy)
continue;
@@ -495,7 +479,8 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
}
/* Ioctls that require bound socket */
-static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
+ unsigned long arg)
{
struct hci_dev *hdev = hci_pi(sk)->hdev;
@@ -540,7 +525,8 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign
}
}
-static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *) arg;
@@ -601,7 +587,8 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
}
}
-static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
{
struct sockaddr_hci haddr;
struct sock *sk = sock->sk;
@@ -690,7 +677,8 @@ done:
return err;
}
-static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
+static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
+ int *addr_len, int peer)
{
struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
struct sock *sk = sock->sk;
@@ -711,13 +699,15 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
return 0;
}
-static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
+static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
+ struct sk_buff *skb)
{
__u32 mask = hci_pi(sk)->cmsg_mask;
if (mask & HCI_CMSG_DIR) {
int incoming = bt_cb(skb)->incoming;
- put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
+ put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
+ &incoming);
}
if (mask & HCI_CMSG_TSTAMP) {
@@ -747,7 +737,7 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
}
static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+ struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
@@ -857,8 +847,9 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
u16 ocf = hci_opcode_ocf(opcode);
if (((ogf > HCI_SFLT_MAX_OGF) ||
- !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
- !capable(CAP_NET_RAW)) {
+ !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
+ &hci_sec_filter.ocf_mask[ogf])) &&
+ !capable(CAP_NET_RAW)) {
err = -EPERM;
goto drop;
}
@@ -891,7 +882,8 @@ drop:
goto done;
}
-static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
+static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int len)
{
struct hci_ufilter uf = { .opcode = 0 };
struct sock *sk = sock->sk;
@@ -973,7 +965,8 @@ done:
return err;
}
-static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
+static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
{
struct hci_ufilter uf;
struct sock *sk = sock->sk;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 937f3187eaf..a20e61c3653 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,10 +1,6 @@
/* Bluetooth HCI driver model support. */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
#include <linux/module.h>
#include <net/bluetooth/bluetooth.h>
@@ -31,27 +27,30 @@ static inline char *link_typetostr(int type)
}
}
-static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_link_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "%s\n", link_typetostr(conn->type));
}
-static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_link_address(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "%s\n", batostr(&conn->dst));
}
-static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_link_features(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- conn->features[0], conn->features[1],
- conn->features[2], conn->features[3],
- conn->features[4], conn->features[5],
- conn->features[6], conn->features[7]);
+ conn->features[0], conn->features[1],
+ conn->features[2], conn->features[3],
+ conn->features[4], conn->features[5],
+ conn->features[6], conn->features[7]);
}
#define LINK_ATTR(_name, _mode, _show, _store) \
@@ -185,19 +184,22 @@ static inline char *host_typetostr(int type)
}
}
-static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_bus(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
}
-static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
}
-static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
char name[HCI_MAX_NAME_LENGTH + 1];
@@ -210,55 +212,64 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr, char
return sprintf(buf, "%s\n", name);
}
-static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_class(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
- return sprintf(buf, "0x%.2x%.2x%.2x\n",
- hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
+ return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
+ hdev->dev_class[1], hdev->dev_class[0]);
}
-static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_address(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
}
-static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_features(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- hdev->features[0], hdev->features[1],
- hdev->features[2], hdev->features[3],
- hdev->features[4], hdev->features[5],
- hdev->features[6], hdev->features[7]);
+ hdev->features[0], hdev->features[1],
+ hdev->features[2], hdev->features[3],
+ hdev->features[4], hdev->features[5],
+ hdev->features[6], hdev->features[7]);
}
-static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_manufacturer(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->manufacturer);
}
-static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_hci_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->hci_ver);
}
-static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_hci_revision(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->hci_rev);
}
-static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_idle_timeout(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->idle_timeout);
}
-static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t store_idle_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct hci_dev *hdev = to_hci_dev(dev);
unsigned int val;
@@ -276,13 +287,16 @@ static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *a
return count;
}
-static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_sniff_max_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->sniff_max_interval);
}
-static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t store_sniff_max_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct hci_dev *hdev = to_hci_dev(dev);
u16 val;
@@ -300,13 +314,16 @@ static ssize_t store_sniff_max_interval(struct device *dev, struct device_attrib
return count;
}
-static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_sniff_min_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->sniff_min_interval);
}
-static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t store_sniff_min_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct hci_dev *hdev = to_hci_dev(dev);
u16 val;
@@ -335,11 +352,11 @@ static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
- show_idle_timeout, store_idle_timeout);
+ show_idle_timeout, store_idle_timeout);
static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
- show_sniff_max_interval, store_sniff_max_interval);
+ show_sniff_max_interval, store_sniff_max_interval);
static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
- show_sniff_min_interval, store_sniff_min_interval);
+ show_sniff_min_interval, store_sniff_min_interval);
static struct attribute *bt_host_attrs[] = {
&dev_attr_bus.attr,
@@ -455,8 +472,8 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid)
memcpy(&data5, &uuid[14], 2);
seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
- ntohl(data0), ntohs(data1), ntohs(data2),
- ntohs(data3), ntohl(data4), ntohs(data5));
+ ntohl(data0), ntohs(data1), ntohs(data2), ntohs(data3),
+ ntohl(data4), ntohs(data5));
}
static int uuids_show(struct seq_file *f, void *p)
@@ -513,7 +530,7 @@ static int auto_accept_delay_get(void *data, u64 *val)
}
DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
- auto_accept_delay_set, "%llu\n");
+ auto_accept_delay_set, "%llu\n");
void hci_init_sysfs(struct hci_dev *hdev)
{
@@ -547,15 +564,15 @@ int hci_add_sysfs(struct hci_dev *hdev)
return 0;
debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
- hdev, &inquiry_cache_fops);
+ hdev, &inquiry_cache_fops);
debugfs_create_file("blacklist", 0444, hdev->debugfs,
- hdev, &blacklist_fops);
+ hdev, &blacklist_fops);
debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
- &auto_accept_delay_fops);
+ &auto_accept_delay_fops);
return 0;
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 2c20d765b39..ccd985da651 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -21,27 +21,8 @@
*/
#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/freezer.h>
-#include <linux/fcntl.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
#include <linux/file.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
#include <linux/kthread.h>
-#include <net/sock.h>
-
-#include <linux/input.h>
-#include <linux/hid.h>
#include <linux/hidraw.h>
#include <net/bluetooth/bluetooth.h>
@@ -244,7 +225,8 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
}
static int __hidp_send_ctrl_message(struct hidp_session *session,
- unsigned char hdr, unsigned char *data, int size)
+ unsigned char hdr, unsigned char *data,
+ int size)
{
struct sk_buff *skb;
@@ -268,7 +250,7 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
return 0;
}
-static inline int hidp_send_ctrl_message(struct hidp_session *session,
+static int hidp_send_ctrl_message(struct hidp_session *session,
unsigned char hdr, unsigned char *data, int size)
{
int err;
@@ -471,7 +453,7 @@ static void hidp_set_timer(struct hidp_session *session)
mod_timer(&session->timer, jiffies + HZ * session->idle_to);
}
-static inline void hidp_del_timer(struct hidp_session *session)
+static void hidp_del_timer(struct hidp_session *session)
{
if (session->idle_to > 0)
del_timer(&session->timer);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 73a32d705c1..18b3f6892a3 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -20,22 +20,8 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
+#include <linux/export.h>
#include <linux/file.h>
-#include <linux/init.h>
-#include <linux/compat.h>
-#include <linux/gfp.h>
-#include <net/sock.h>
#include "hidp.h"
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 4554e80d16a..a8964db04bf 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -30,32 +30,14 @@
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/list.h>
-#include <linux/device.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
#include <linux/crc16.h>
-#include <net/sock.h>
-
-#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/smp.h>
+#include <net/bluetooth/a2mp.h>
bool disable_ertm;
@@ -73,6 +55,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
static void l2cap_send_disconn_req(struct l2cap_conn *conn,
struct l2cap_chan *chan, int err);
+static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event);
+
/* ---- L2CAP channels ---- */
static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
@@ -196,7 +181,7 @@ static void __l2cap_state_change(struct l2cap_chan *chan, int state)
state_to_string(state));
chan->state = state;
- chan->ops->state_change(chan->data, state);
+ chan->ops->state_change(chan, state);
}
static void l2cap_state_change(struct l2cap_chan *chan, int state)
@@ -224,6 +209,37 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
release_sock(sk);
}
+static void __set_retrans_timer(struct l2cap_chan *chan)
+{
+ if (!delayed_work_pending(&chan->monitor_timer) &&
+ chan->retrans_timeout) {
+ l2cap_set_timer(chan, &chan->retrans_timer,
+ msecs_to_jiffies(chan->retrans_timeout));
+ }
+}
+
+static void __set_monitor_timer(struct l2cap_chan *chan)
+{
+ __clear_retrans_timer(chan);
+ if (chan->monitor_timeout) {
+ l2cap_set_timer(chan, &chan->monitor_timer,
+ msecs_to_jiffies(chan->monitor_timeout));
+ }
+}
+
+static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
+ u16 seq)
+{
+ struct sk_buff *skb;
+
+ skb_queue_walk(head, skb) {
+ if (bt_cb(skb)->control.txseq == seq)
+ return skb;
+ }
+
+ return NULL;
+}
+
/* ---- L2CAP sequence number lists ---- */
/* For ERTM, ordered lists of sequence numbers must be tracked for
@@ -366,7 +382,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
l2cap_chan_unlock(chan);
- chan->ops->close(chan->data);
+ chan->ops->close(chan);
mutex_unlock(&conn->chan_lock);
l2cap_chan_put(chan);
@@ -392,6 +408,9 @@ struct l2cap_chan *l2cap_chan_create(void)
atomic_set(&chan->refcnt, 1);
+ /* This flag is cleared in l2cap_chan_ready() */
+ set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
+
BT_DBG("chan %p", chan);
return chan;
@@ -412,6 +431,7 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
chan->max_tx = L2CAP_DEFAULT_MAX_TX;
chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+ chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
chan->sec_level = BT_SECURITY_LOW;
set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
@@ -430,7 +450,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
case L2CAP_CHAN_CONN_ORIENTED:
if (conn->hcon->type == LE_LINK) {
/* LE connection */
- chan->omtu = L2CAP_LE_DEFAULT_MTU;
+ chan->omtu = L2CAP_DEFAULT_MTU;
chan->scid = L2CAP_CID_LE_DATA;
chan->dcid = L2CAP_CID_LE_DATA;
} else {
@@ -447,6 +467,13 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
chan->omtu = L2CAP_DEFAULT_MTU;
break;
+ case L2CAP_CHAN_CONN_FIX_A2MP:
+ chan->scid = L2CAP_CID_A2MP;
+ chan->dcid = L2CAP_CID_A2MP;
+ chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
+ chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
+ break;
+
default:
/* Raw socket can send/recv signalling messages only */
chan->scid = L2CAP_CID_SIGNALING;
@@ -466,18 +493,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
list_add(&chan->list, &conn->chan_l);
}
-static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
mutex_lock(&conn->chan_lock);
__l2cap_chan_add(conn, chan);
mutex_unlock(&conn->chan_lock);
}
-static void l2cap_chan_del(struct l2cap_chan *chan, int err)
+void l2cap_chan_del(struct l2cap_chan *chan, int err)
{
- struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
- struct sock *parent = bt_sk(sk)->parent;
__clear_chan_timer(chan);
@@ -490,34 +515,22 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
l2cap_chan_put(chan);
chan->conn = NULL;
- hci_conn_put(conn->hcon);
- }
-
- lock_sock(sk);
-
- __l2cap_state_change(chan, BT_CLOSED);
- sock_set_flag(sk, SOCK_ZAPPED);
-
- if (err)
- __l2cap_chan_set_err(chan, err);
- if (parent) {
- bt_accept_unlink(sk);
- parent->sk_data_ready(parent, 0);
- } else
- sk->sk_state_change(sk);
+ if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
+ hci_conn_put(conn->hcon);
+ }
- release_sock(sk);
+ if (chan->ops->teardown)
+ chan->ops->teardown(chan, err);
- if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
- test_bit(CONF_INPUT_DONE, &chan->conf_state)))
+ if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
return;
- skb_queue_purge(&chan->tx_q);
-
- if (chan->mode == L2CAP_MODE_ERTM) {
- struct srej_list *l, *tmp;
+ switch(chan->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
__clear_retrans_timer(chan);
__clear_monitor_timer(chan);
__clear_ack_timer(chan);
@@ -526,30 +539,15 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
l2cap_seq_list_free(&chan->srej_list);
l2cap_seq_list_free(&chan->retrans_list);
- list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
- list_del(&l->list);
- kfree(l);
- }
- }
-}
-static void l2cap_chan_cleanup_listen(struct sock *parent)
-{
- struct sock *sk;
-
- BT_DBG("parent %p", parent);
-
- /* Close not yet accepted channels */
- while ((sk = bt_accept_dequeue(parent, NULL))) {
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
-
- l2cap_chan_lock(chan);
- __clear_chan_timer(chan);
- l2cap_chan_close(chan, ECONNRESET);
- l2cap_chan_unlock(chan);
+ /* fall through */
- chan->ops->close(chan->data);
+ case L2CAP_MODE_STREAMING:
+ skb_queue_purge(&chan->tx_q);
+ break;
}
+
+ return;
}
void l2cap_chan_close(struct l2cap_chan *chan, int reason)
@@ -562,12 +560,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
switch (chan->state) {
case BT_LISTEN:
- lock_sock(sk);
- l2cap_chan_cleanup_listen(sk);
-
- __l2cap_state_change(chan, BT_CLOSED);
- sock_set_flag(sk, SOCK_ZAPPED);
- release_sock(sk);
+ if (chan->ops->teardown)
+ chan->ops->teardown(chan, 0);
break;
case BT_CONNECTED:
@@ -595,7 +589,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
rsp.scid = cpu_to_le16(chan->dcid);
rsp.dcid = cpu_to_le16(chan->scid);
rsp.result = cpu_to_le16(result);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
sizeof(rsp), &rsp);
}
@@ -609,9 +603,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
break;
default:
- lock_sock(sk);
- sock_set_flag(sk, SOCK_ZAPPED);
- release_sock(sk);
+ if (chan->ops->teardown)
+ chan->ops->teardown(chan, 0);
break;
}
}
@@ -627,7 +620,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
default:
return HCI_AT_NO_BONDING;
}
- } else if (chan->psm == cpu_to_le16(0x0001)) {
+ } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
if (chan->sec_level == BT_SECURITY_LOW)
chan->sec_level = BT_SECURITY_SDP;
@@ -773,9 +766,11 @@ static inline void __unpack_control(struct l2cap_chan *chan,
if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
__unpack_extended_control(get_unaligned_le32(skb->data),
&bt_cb(skb)->control);
+ skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
} else {
__unpack_enhanced_control(get_unaligned_le16(skb->data),
&bt_cb(skb)->control);
+ skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
}
}
@@ -830,66 +825,102 @@ static inline void __pack_control(struct l2cap_chan *chan,
}
}
-static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
+static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
{
- struct sk_buff *skb;
- struct l2cap_hdr *lh;
- struct l2cap_conn *conn = chan->conn;
- int count, hlen;
-
- if (chan->state != BT_CONNECTED)
- return;
-
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- hlen = L2CAP_EXT_HDR_SIZE;
+ return L2CAP_EXT_HDR_SIZE;
else
- hlen = L2CAP_ENH_HDR_SIZE;
+ return L2CAP_ENH_HDR_SIZE;
+}
+
+static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
+ u32 control)
+{
+ struct sk_buff *skb;
+ struct l2cap_hdr *lh;
+ int hlen = __ertm_hdr_size(chan);
if (chan->fcs == L2CAP_FCS_CRC16)
hlen += L2CAP_FCS_SIZE;
- BT_DBG("chan %p, control 0x%8.8x", chan, control);
-
- count = min_t(unsigned int, conn->mtu, hlen);
+ skb = bt_skb_alloc(hlen, GFP_KERNEL);
- control |= __set_sframe(chan);
-
- if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= __set_ctrl_final(chan);
-
- if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
- control |= __set_ctrl_poll(chan);
-
- skb = bt_skb_alloc(count, GFP_ATOMIC);
if (!skb)
- return;
+ return ERR_PTR(-ENOMEM);
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
- __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
+ else
+ put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
if (chan->fcs == L2CAP_FCS_CRC16) {
- u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
+ u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
}
skb->priority = HCI_PRIO_MAX;
- l2cap_do_send(chan, skb);
+ return skb;
}
-static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
+static void l2cap_send_sframe(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
{
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
+ struct sk_buff *skb;
+ u32 control_field;
+
+ BT_DBG("chan %p, control %p", chan, control);
+
+ if (!control->sframe)
+ return;
+
+ if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
+ !control->poll)
+ control->final = 1;
+
+ if (control->super == L2CAP_SUPER_RR)
+ clear_bit(CONN_RNR_SENT, &chan->conn_state);
+ else if (control->super == L2CAP_SUPER_RNR)
set_bit(CONN_RNR_SENT, &chan->conn_state);
- } else
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- control |= __set_reqseq(chan, chan->buffer_seq);
+ if (control->super != L2CAP_SUPER_SREJ) {
+ chan->last_acked_seq = control->reqseq;
+ __clear_ack_timer(chan);
+ }
- l2cap_send_sframe(chan, control);
+ BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
+ control->final, control->poll, control->super);
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ control_field = __pack_extended_control(control);
+ else
+ control_field = __pack_enhanced_control(control);
+
+ skb = l2cap_create_sframe_pdu(chan, control_field);
+ if (!IS_ERR(skb))
+ l2cap_do_send(chan, skb);
+}
+
+static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
+{
+ struct l2cap_ctrl control;
+
+ BT_DBG("chan %p, poll %d", chan, poll);
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.poll = poll;
+
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
+ control.super = L2CAP_SUPER_RNR;
+ else
+ control.super = L2CAP_SUPER_RR;
+
+ control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &control);
}
static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
@@ -914,25 +945,13 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan)
static void l2cap_chan_ready(struct l2cap_chan *chan)
{
- struct sock *sk = chan->sk;
- struct sock *parent;
-
- lock_sock(sk);
-
- parent = bt_sk(sk)->parent;
-
- BT_DBG("sk %p, parent %p", sk, parent);
-
+ /* This clears all conf flags, including CONF_NOT_COMPLETE */
chan->conf_state = 0;
__clear_chan_timer(chan);
- __l2cap_state_change(chan, BT_CONNECTED);
- sk->sk_state_change(sk);
-
- if (parent)
- parent->sk_data_ready(parent, 0);
+ chan->state = BT_CONNECTED;
- release_sock(sk);
+ chan->ops->ready(chan);
}
static void l2cap_do_start(struct l2cap_chan *chan)
@@ -953,7 +972,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
l2cap_send_conn_req(chan);
} else {
struct l2cap_info_req req;
- req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+ req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
conn->info_ident = l2cap_get_ident(conn);
@@ -995,6 +1014,11 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
__clear_ack_timer(chan);
}
+ if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
+ __l2cap_state_change(chan, BT_DISCONN);
+ return;
+ }
+
req.dcid = cpu_to_le16(chan->dcid);
req.scid = cpu_to_le16(chan->scid);
l2cap_send_cmd(conn, l2cap_get_ident(conn),
@@ -1053,20 +1077,20 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
if (test_bit(BT_SK_DEFER_SETUP,
&bt_sk(sk)->flags)) {
struct sock *parent = bt_sk(sk)->parent;
- rsp.result = cpu_to_le16(L2CAP_CR_PEND);
- rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
if (parent)
parent->sk_data_ready(parent, 0);
} else {
__l2cap_state_change(chan, BT_CONFIG);
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
}
release_sock(sk);
} else {
- rsp.result = cpu_to_le16(L2CAP_CR_PEND);
- rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
}
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
@@ -1150,13 +1174,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
lock_sock(parent);
- /* Check for backlog size */
- if (sk_acceptq_is_full(parent)) {
- BT_DBG("backlog full %d", parent->sk_ack_backlog);
- goto clean;
- }
-
- chan = pchan->ops->new_connection(pchan->data);
+ chan = pchan->ops->new_connection(pchan);
if (!chan)
goto clean;
@@ -1171,10 +1189,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
l2cap_chan_add(conn, chan);
- __set_chan_timer(chan, sk->sk_sndtimeo);
-
- __l2cap_state_change(chan, BT_CONNECTED);
- parent->sk_data_ready(parent, 0);
+ l2cap_chan_ready(chan);
clean:
release_sock(parent);
@@ -1198,6 +1213,11 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
l2cap_chan_lock(chan);
+ if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
+ l2cap_chan_unlock(chan);
+ continue;
+ }
+
if (conn->hcon->type == LE_LINK) {
if (smp_conn_security(conn, chan->sec_level))
l2cap_chan_ready(chan);
@@ -1270,7 +1290,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
l2cap_chan_unlock(chan);
- chan->ops->close(chan->data);
+ chan->ops->close(chan);
l2cap_chan_put(chan);
}
@@ -1444,21 +1464,17 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
goto done;
}
- lock_sock(sk);
-
- switch (sk->sk_state) {
+ switch (chan->state) {
case BT_CONNECT:
case BT_CONNECT2:
case BT_CONFIG:
/* Already connecting */
err = 0;
- release_sock(sk);
goto done;
case BT_CONNECTED:
/* Already connected */
err = -EISCONN;
- release_sock(sk);
goto done;
case BT_OPEN:
@@ -1468,13 +1484,12 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
default:
err = -EBADFD;
- release_sock(sk);
goto done;
}
/* Set destination address and psm */
+ lock_sock(sk);
bacpy(&bt_sk(sk)->dst, dst);
-
release_sock(sk);
chan->psm = psm;
@@ -1576,23 +1591,20 @@ int __l2cap_wait_ack(struct sock *sk)
static void l2cap_monitor_timeout(struct work_struct *work)
{
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
- monitor_timer.work);
+ monitor_timer.work);
BT_DBG("chan %p", chan);
l2cap_chan_lock(chan);
- if (chan->retry_count >= chan->remote_max_tx) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
+ if (!chan->conn) {
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
return;
}
- chan->retry_count++;
- __set_monitor_timer(chan);
+ l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
- l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
}
@@ -1600,234 +1612,293 @@ static void l2cap_monitor_timeout(struct work_struct *work)
static void l2cap_retrans_timeout(struct work_struct *work)
{
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
- retrans_timer.work);
+ retrans_timer.work);
BT_DBG("chan %p", chan);
l2cap_chan_lock(chan);
- chan->retry_count = 1;
- __set_monitor_timer(chan);
-
- set_bit(CONN_WAIT_F, &chan->conn_state);
-
- l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
+ if (!chan->conn) {
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+ return;
+ }
+ l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
}
-static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
+static void l2cap_streaming_send(struct l2cap_chan *chan,
+ struct sk_buff_head *skbs)
{
struct sk_buff *skb;
+ struct l2cap_ctrl *control;
- while ((skb = skb_peek(&chan->tx_q)) &&
- chan->unacked_frames) {
- if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
- break;
+ BT_DBG("chan %p, skbs %p", chan, skbs);
- skb = skb_dequeue(&chan->tx_q);
- kfree_skb(skb);
+ skb_queue_splice_tail_init(skbs, &chan->tx_q);
- chan->unacked_frames--;
- }
+ while (!skb_queue_empty(&chan->tx_q)) {
- if (!chan->unacked_frames)
- __clear_retrans_timer(chan);
-}
+ skb = skb_dequeue(&chan->tx_q);
-static void l2cap_streaming_send(struct l2cap_chan *chan)
-{
- struct sk_buff *skb;
- u32 control;
- u16 fcs;
+ bt_cb(skb)->control.retries = 1;
+ control = &bt_cb(skb)->control;
+
+ control->reqseq = 0;
+ control->txseq = chan->next_tx_seq;
- while ((skb = skb_dequeue(&chan->tx_q))) {
- control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
- control |= __set_txseq(chan, chan->next_tx_seq);
- control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
- __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
+ __pack_control(chan, control, skb);
if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data,
- skb->len - L2CAP_FCS_SIZE);
- put_unaligned_le16(fcs,
- skb->data + skb->len - L2CAP_FCS_SIZE);
+ u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
}
l2cap_do_send(chan, skb);
+ BT_DBG("Sent txseq %u", control->txseq);
+
chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
+ chan->frames_sent++;
}
}
-static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
+static int l2cap_ertm_send(struct l2cap_chan *chan)
{
struct sk_buff *skb, *tx_skb;
- u16 fcs;
- u32 control;
+ struct l2cap_ctrl *control;
+ int sent = 0;
- skb = skb_peek(&chan->tx_q);
- if (!skb)
- return;
+ BT_DBG("chan %p", chan);
- while (bt_cb(skb)->control.txseq != tx_seq) {
- if (skb_queue_is_last(&chan->tx_q, skb))
- return;
+ if (chan->state != BT_CONNECTED)
+ return -ENOTCONN;
- skb = skb_queue_next(&chan->tx_q, skb);
- }
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
+ return 0;
- if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
- chan->remote_max_tx) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
- return;
- }
+ while (chan->tx_send_head &&
+ chan->unacked_frames < chan->remote_tx_win &&
+ chan->tx_state == L2CAP_TX_STATE_XMIT) {
+
+ skb = chan->tx_send_head;
- tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->control.retries++;
+ bt_cb(skb)->control.retries = 1;
+ control = &bt_cb(skb)->control;
- control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
- control &= __get_sar_mask(chan);
+ if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
+ control->final = 1;
+
+ control->reqseq = chan->buffer_seq;
+ chan->last_acked_seq = chan->buffer_seq;
+ control->txseq = chan->next_tx_seq;
- if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= __set_ctrl_final(chan);
+ __pack_control(chan, control, skb);
- control |= __set_reqseq(chan, chan->buffer_seq);
- control |= __set_txseq(chan, tx_seq);
+ if (chan->fcs == L2CAP_FCS_CRC16) {
+ u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
+ }
- __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
+ /* Clone after data has been modified. Data is assumed to be
+ read-only (for locking purposes) on cloned sk_buffs.
+ */
+ tx_skb = skb_clone(skb, GFP_KERNEL);
- if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)tx_skb->data,
- tx_skb->len - L2CAP_FCS_SIZE);
- put_unaligned_le16(fcs,
- tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
+ if (!tx_skb)
+ break;
+
+ __set_retrans_timer(chan);
+
+ chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
+ chan->unacked_frames++;
+ chan->frames_sent++;
+ sent++;
+
+ if (skb_queue_is_last(&chan->tx_q, skb))
+ chan->tx_send_head = NULL;
+ else
+ chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
+
+ l2cap_do_send(chan, tx_skb);
+ BT_DBG("Sent txseq %u", control->txseq);
}
- l2cap_do_send(chan, tx_skb);
+ BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
+ chan->unacked_frames, skb_queue_len(&chan->tx_q));
+
+ return sent;
}
-static int l2cap_ertm_send(struct l2cap_chan *chan)
+static void l2cap_ertm_resend(struct l2cap_chan *chan)
{
- struct sk_buff *skb, *tx_skb;
- u16 fcs;
- u32 control;
- int nsent = 0;
+ struct l2cap_ctrl control;
+ struct sk_buff *skb;
+ struct sk_buff *tx_skb;
+ u16 seq;
- if (chan->state != BT_CONNECTED)
- return -ENOTCONN;
+ BT_DBG("chan %p", chan);
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
- return 0;
+ return;
- while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
+ while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
+ seq = l2cap_seq_list_pop(&chan->retrans_list);
- if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
- chan->remote_max_tx) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
- break;
+ skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
+ if (!skb) {
+ BT_DBG("Error: Can't retransmit seq %d, frame missing",
+ seq);
+ continue;
}
- tx_skb = skb_clone(skb, GFP_ATOMIC);
-
bt_cb(skb)->control.retries++;
+ control = bt_cb(skb)->control;
- control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
- control &= __get_sar_mask(chan);
+ if (chan->max_tx != 0 &&
+ bt_cb(skb)->control.retries > chan->max_tx) {
+ BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+ l2cap_seq_list_clear(&chan->retrans_list);
+ break;
+ }
+ control.reqseq = chan->buffer_seq;
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= __set_ctrl_final(chan);
+ control.final = 1;
+ else
+ control.final = 0;
- control |= __set_reqseq(chan, chan->buffer_seq);
- control |= __set_txseq(chan, chan->next_tx_seq);
- control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
+ if (skb_cloned(skb)) {
+ /* Cloned sk_buffs are read-only, so we need a
+ * writeable copy
+ */
+ tx_skb = skb_copy(skb, GFP_ATOMIC);
+ } else {
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+ }
- __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
+ if (!tx_skb) {
+ l2cap_seq_list_clear(&chan->retrans_list);
+ break;
+ }
+
+ /* Update skb contents */
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ put_unaligned_le32(__pack_extended_control(&control),
+ tx_skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(&control),
+ tx_skb->data + L2CAP_HDR_SIZE);
+ }
if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data,
- tx_skb->len - L2CAP_FCS_SIZE);
- put_unaligned_le16(fcs, skb->data +
- tx_skb->len - L2CAP_FCS_SIZE);
+ u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
+ put_unaligned_le16(fcs, skb_put(tx_skb,
+ L2CAP_FCS_SIZE));
}
l2cap_do_send(chan, tx_skb);
- __set_retrans_timer(chan);
-
- bt_cb(skb)->control.txseq = chan->next_tx_seq;
+ BT_DBG("Resent txseq %d", control.txseq);
- chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
-
- if (bt_cb(skb)->control.retries == 1) {
- chan->unacked_frames++;
-
- if (!nsent++)
- __clear_ack_timer(chan);
- }
-
- chan->frames_sent++;
-
- if (skb_queue_is_last(&chan->tx_q, skb))
- chan->tx_send_head = NULL;
- else
- chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
+ chan->last_acked_seq = chan->buffer_seq;
}
-
- return nsent;
}
-static int l2cap_retransmit_frames(struct l2cap_chan *chan)
+static void l2cap_retransmit(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
{
- int ret;
+ BT_DBG("chan %p, control %p", chan, control);
- if (!skb_queue_empty(&chan->tx_q))
- chan->tx_send_head = chan->tx_q.next;
-
- chan->next_tx_seq = chan->expected_ack_seq;
- ret = l2cap_ertm_send(chan);
- return ret;
+ l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
+ l2cap_ertm_resend(chan);
}
-static void __l2cap_send_ack(struct l2cap_chan *chan)
+static void l2cap_retransmit_all(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
{
- u32 control = 0;
+ struct sk_buff *skb;
- control |= __set_reqseq(chan, chan->buffer_seq);
+ BT_DBG("chan %p, control %p", chan, control);
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
- set_bit(CONN_RNR_SENT, &chan->conn_state);
- l2cap_send_sframe(chan, control);
- return;
- }
+ if (control->poll)
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+
+ l2cap_seq_list_clear(&chan->retrans_list);
- if (l2cap_ertm_send(chan) > 0)
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
return;
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- l2cap_send_sframe(chan, control);
+ if (chan->unacked_frames) {
+ skb_queue_walk(&chan->tx_q, skb) {
+ if (bt_cb(skb)->control.txseq == control->reqseq ||
+ skb == chan->tx_send_head)
+ break;
+ }
+
+ skb_queue_walk_from(&chan->tx_q, skb) {
+ if (skb == chan->tx_send_head)
+ break;
+
+ l2cap_seq_list_append(&chan->retrans_list,
+ bt_cb(skb)->control.txseq);
+ }
+
+ l2cap_ertm_resend(chan);
+ }
}
static void l2cap_send_ack(struct l2cap_chan *chan)
{
- __clear_ack_timer(chan);
- __l2cap_send_ack(chan);
-}
+ struct l2cap_ctrl control;
+ u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
+ chan->last_acked_seq);
+ int threshold;
-static void l2cap_send_srejtail(struct l2cap_chan *chan)
-{
- struct srej_list *tail;
- u32 control;
+ BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
+ chan, chan->last_acked_seq, chan->buffer_seq);
- control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
- control |= __set_ctrl_final(chan);
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
- tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
- control |= __set_reqseq(chan, tail->tx_seq);
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
+ chan->rx_state == L2CAP_RX_STATE_RECV) {
+ __clear_ack_timer(chan);
+ control.super = L2CAP_SUPER_RNR;
+ control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &control);
+ } else {
+ if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
+ l2cap_ertm_send(chan);
+ /* If any i-frames were sent, they included an ack */
+ if (chan->buffer_seq == chan->last_acked_seq)
+ frames_to_ack = 0;
+ }
- l2cap_send_sframe(chan, control);
+ /* Ack now if the window is 3/4ths full.
+ * Calculate without mul or div
+ */
+ threshold = chan->ack_win;
+ threshold += threshold << 1;
+ threshold >>= 2;
+
+ BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
+ threshold);
+
+ if (frames_to_ack >= threshold) {
+ __clear_ack_timer(chan);
+ control.super = L2CAP_SUPER_RR;
+ control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &control);
+ frames_to_ack = 0;
+ }
+
+ if (frames_to_ack)
+ __set_ack_timer(chan);
+ }
}
static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
@@ -1876,15 +1947,15 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
}
static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
- struct msghdr *msg, size_t len,
- u32 priority)
+ struct msghdr *msg, size_t len,
+ u32 priority)
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
struct l2cap_hdr *lh;
- BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
+ BT_DBG("chan %p len %zu priority %u", chan, len, priority);
count = min_t(unsigned int, (conn->mtu - hlen), len);
@@ -1910,15 +1981,15 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
}
static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
- struct msghdr *msg, size_t len,
- u32 priority)
+ struct msghdr *msg, size_t len,
+ u32 priority)
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
int err, count;
struct l2cap_hdr *lh;
- BT_DBG("chan %p len %d", chan, (int)len);
+ BT_DBG("chan %p len %zu", chan, len);
count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
@@ -1943,23 +2014,20 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
}
static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
- struct msghdr *msg, size_t len,
- u16 sdulen)
+ struct msghdr *msg, size_t len,
+ u16 sdulen)
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
int err, count, hlen;
struct l2cap_hdr *lh;
- BT_DBG("chan %p len %d", chan, (int)len);
+ BT_DBG("chan %p len %zu", chan, len);
if (!conn)
return ERR_PTR(-ENOTCONN);
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- hlen = L2CAP_EXT_HDR_SIZE;
- else
- hlen = L2CAP_ENH_HDR_SIZE;
+ hlen = __ertm_hdr_size(chan);
if (sdulen)
hlen += L2CAP_SDULEN_SIZE;
@@ -1979,7 +2047,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
+ /* Control header is populated later */
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
+ else
+ put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
if (sdulen)
put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
@@ -1990,9 +2062,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
return ERR_PTR(err);
}
- if (chan->fcs == L2CAP_FCS_CRC16)
- put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
-
+ bt_cb(skb)->control.fcs = chan->fcs;
bt_cb(skb)->control.retries = 0;
return skb;
}
@@ -2004,10 +2074,9 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
struct sk_buff *skb;
u16 sdu_len;
size_t pdu_len;
- int err = 0;
u8 sar;
- BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
+ BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
/* It is critical that ERTM PDUs fit in a single HCI fragment,
* so fragmented skbs are not used. The HCI layer's handling
@@ -2020,7 +2089,10 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
/* Adjust for largest possible L2CAP overhead. */
- pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
+ if (chan->fcs)
+ pdu_len -= L2CAP_FCS_SIZE;
+
+ pdu_len -= __ertm_hdr_size(chan);
/* Remote device may have requested smaller PDUs */
pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
@@ -2060,7 +2132,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
}
}
- return err;
+ return 0;
}
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
@@ -2122,17 +2194,12 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
if (err)
break;
- if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
- chan->tx_send_head = seg_queue.next;
- skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
-
if (chan->mode == L2CAP_MODE_ERTM)
- err = l2cap_ertm_send(chan);
+ l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
else
- l2cap_streaming_send(chan);
+ l2cap_streaming_send(chan, &seg_queue);
- if (err >= 0)
- err = len;
+ err = len;
/* If the skbs were not queued for sending, they'll still be in
* seg_queue and need to be purged.
@@ -2148,6 +2215,296 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
return err;
}
+static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
+{
+ struct l2cap_ctrl control;
+ u16 seq;
+
+ BT_DBG("chan %p, txseq %u", chan, txseq);
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.super = L2CAP_SUPER_SREJ;
+
+ for (seq = chan->expected_tx_seq; seq != txseq;
+ seq = __next_seq(chan, seq)) {
+ if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
+ control.reqseq = seq;
+ l2cap_send_sframe(chan, &control);
+ l2cap_seq_list_append(&chan->srej_list, seq);
+ }
+ }
+
+ chan->expected_tx_seq = __next_seq(chan, txseq);
+}
+
+static void l2cap_send_srej_tail(struct l2cap_chan *chan)
+{
+ struct l2cap_ctrl control;
+
+ BT_DBG("chan %p", chan);
+
+ if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.super = L2CAP_SUPER_SREJ;
+ control.reqseq = chan->srej_list.tail;
+ l2cap_send_sframe(chan, &control);
+}
+
+static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
+{
+ struct l2cap_ctrl control;
+ u16 initial_head;
+ u16 seq;
+
+ BT_DBG("chan %p, txseq %u", chan, txseq);
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.super = L2CAP_SUPER_SREJ;
+
+ /* Capture initial list head to allow only one pass through the list. */
+ initial_head = chan->srej_list.head;
+
+ do {
+ seq = l2cap_seq_list_pop(&chan->srej_list);
+ if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
+ break;
+
+ control.reqseq = seq;
+ l2cap_send_sframe(chan, &control);
+ l2cap_seq_list_append(&chan->srej_list, seq);
+ } while (chan->srej_list.head != initial_head);
+}
+
+static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
+{
+ struct sk_buff *acked_skb;
+ u16 ackseq;
+
+ BT_DBG("chan %p, reqseq %u", chan, reqseq);
+
+ if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
+ return;
+
+ BT_DBG("expected_ack_seq %u, unacked_frames %u",
+ chan->expected_ack_seq, chan->unacked_frames);
+
+ for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
+ ackseq = __next_seq(chan, ackseq)) {
+
+ acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
+ if (acked_skb) {
+ skb_unlink(acked_skb, &chan->tx_q);
+ kfree_skb(acked_skb);
+ chan->unacked_frames--;
+ }
+ }
+
+ chan->expected_ack_seq = reqseq;
+
+ if (chan->unacked_frames == 0)
+ __clear_retrans_timer(chan);
+
+ BT_DBG("unacked_frames %u", chan->unacked_frames);
+}
+
+static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
+{
+ BT_DBG("chan %p", chan);
+
+ chan->expected_tx_seq = chan->buffer_seq;
+ l2cap_seq_list_clear(&chan->srej_list);
+ skb_queue_purge(&chan->srej_q);
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+}
+
+static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
+ event);
+
+ switch (event) {
+ case L2CAP_EV_DATA_REQUEST:
+ if (chan->tx_send_head == NULL)
+ chan->tx_send_head = skb_peek(skbs);
+
+ skb_queue_splice_tail_init(skbs, &chan->tx_q);
+ l2cap_ertm_send(chan);
+ break;
+ case L2CAP_EV_LOCAL_BUSY_DETECTED:
+ BT_DBG("Enter LOCAL_BUSY");
+ set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
+ /* The SREJ_SENT state must be aborted if we are to
+ * enter the LOCAL_BUSY state.
+ */
+ l2cap_abort_rx_srej_sent(chan);
+ }
+
+ l2cap_send_ack(chan);
+
+ break;
+ case L2CAP_EV_LOCAL_BUSY_CLEAR:
+ BT_DBG("Exit LOCAL_BUSY");
+ clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
+ struct l2cap_ctrl local_control;
+
+ memset(&local_control, 0, sizeof(local_control));
+ local_control.sframe = 1;
+ local_control.super = L2CAP_SUPER_RR;
+ local_control.poll = 1;
+ local_control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &local_control);
+
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ }
+ break;
+ case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
+ l2cap_process_reqseq(chan, control->reqseq);
+ break;
+ case L2CAP_EV_EXPLICIT_POLL:
+ l2cap_send_rr_or_rnr(chan, 1);
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ __clear_ack_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ break;
+ case L2CAP_EV_RETRANS_TO:
+ l2cap_send_rr_or_rnr(chan, 1);
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ break;
+ case L2CAP_EV_RECV_FBIT:
+ /* Nothing to process */
+ break;
+ default:
+ break;
+ }
+}
+
+static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
+ event);
+
+ switch (event) {
+ case L2CAP_EV_DATA_REQUEST:
+ if (chan->tx_send_head == NULL)
+ chan->tx_send_head = skb_peek(skbs);
+ /* Queue data, but don't send. */
+ skb_queue_splice_tail_init(skbs, &chan->tx_q);
+ break;
+ case L2CAP_EV_LOCAL_BUSY_DETECTED:
+ BT_DBG("Enter LOCAL_BUSY");
+ set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
+ /* The SREJ_SENT state must be aborted if we are to
+ * enter the LOCAL_BUSY state.
+ */
+ l2cap_abort_rx_srej_sent(chan);
+ }
+
+ l2cap_send_ack(chan);
+
+ break;
+ case L2CAP_EV_LOCAL_BUSY_CLEAR:
+ BT_DBG("Exit LOCAL_BUSY");
+ clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
+ struct l2cap_ctrl local_control;
+ memset(&local_control, 0, sizeof(local_control));
+ local_control.sframe = 1;
+ local_control.super = L2CAP_SUPER_RR;
+ local_control.poll = 1;
+ local_control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &local_control);
+
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ }
+ break;
+ case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
+ l2cap_process_reqseq(chan, control->reqseq);
+
+ /* Fall through */
+
+ case L2CAP_EV_RECV_FBIT:
+ if (control && control->final) {
+ __clear_monitor_timer(chan);
+ if (chan->unacked_frames > 0)
+ __set_retrans_timer(chan);
+ chan->retry_count = 0;
+ chan->tx_state = L2CAP_TX_STATE_XMIT;
+ BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
+ }
+ break;
+ case L2CAP_EV_EXPLICIT_POLL:
+ /* Ignore */
+ break;
+ case L2CAP_EV_MONITOR_TO:
+ if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
+ l2cap_send_rr_or_rnr(chan, 1);
+ __set_monitor_timer(chan);
+ chan->retry_count++;
+ } else {
+ l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
+ chan, control, skbs, event, chan->tx_state);
+
+ switch (chan->tx_state) {
+ case L2CAP_TX_STATE_XMIT:
+ l2cap_tx_state_xmit(chan, control, skbs, event);
+ break;
+ case L2CAP_TX_STATE_WAIT_F:
+ l2cap_tx_state_wait_f(chan, control, skbs, event);
+ break;
+ default:
+ /* Ignore event */
+ break;
+ }
+}
+
+static void l2cap_pass_to_tx(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ BT_DBG("chan %p, control %p", chan, control);
+ l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
+}
+
+static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ BT_DBG("chan %p, control %p", chan, control);
+ l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
+}
+
/* Copy frame to all raw sockets on that connection */
static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
{
@@ -2170,7 +2527,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
if (!nskb)
continue;
- if (chan->ops->recv(chan->data, nskb))
+ if (chan->ops->recv(chan, nskb))
kfree_skb(nskb);
}
@@ -2178,16 +2535,16 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
}
/* ---- L2CAP signalling commands ---- */
-static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
- u8 code, u8 ident, u16 dlen, void *data)
+static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
+ u8 ident, u16 dlen, void *data)
{
struct sk_buff *skb, **frag;
struct l2cap_cmd_hdr *cmd;
struct l2cap_hdr *lh;
int len, count;
- BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
- conn, code, ident, dlen);
+ BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
+ conn, code, ident, dlen);
len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
count = min_t(unsigned int, conn->mtu, len);
@@ -2200,9 +2557,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
if (conn->hcon->type == LE_LINK)
- lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
+ lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
else
- lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
+ lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
cmd->code = code;
@@ -2270,7 +2627,7 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned
break;
}
- BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
+ BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
return len;
}
@@ -2278,7 +2635,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
{
struct l2cap_conf_opt *opt = *ptr;
- BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
+ BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
opt->type = type;
opt->len = len;
@@ -2314,8 +2671,8 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
efs.stype = chan->local_stype;
efs.msdu = cpu_to_le16(chan->local_msdu);
efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
- efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
- efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
+ efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
+ efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
break;
case L2CAP_MODE_STREAMING:
@@ -2338,20 +2695,24 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
static void l2cap_ack_timeout(struct work_struct *work)
{
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
- ack_timer.work);
+ ack_timer.work);
+ u16 frames_to_ack;
BT_DBG("chan %p", chan);
l2cap_chan_lock(chan);
- __l2cap_send_ack(chan);
+ frames_to_ack = __seq_offset(chan, chan->buffer_seq,
+ chan->last_acked_seq);
- l2cap_chan_unlock(chan);
+ if (frames_to_ack)
+ l2cap_send_rr_or_rnr(chan, 0);
+ l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
}
-static inline int l2cap_ertm_init(struct l2cap_chan *chan)
+int l2cap_ertm_init(struct l2cap_chan *chan)
{
int err;
@@ -2360,7 +2721,6 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan)
chan->expected_ack_seq = 0;
chan->unacked_frames = 0;
chan->buffer_seq = 0;
- chan->num_acked = 0;
chan->frames_sent = 0;
chan->last_acked_seq = 0;
chan->sdu = NULL;
@@ -2381,12 +2741,15 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan)
skb_queue_head_init(&chan->srej_q);
- INIT_LIST_HEAD(&chan->srej_l);
err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
if (err < 0)
return err;
- return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
+ err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
+ if (err < 0)
+ l2cap_seq_list_free(&chan->srej_list);
+
+ return err;
}
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2424,6 +2787,7 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
L2CAP_DEFAULT_TX_WINDOW);
chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
}
+ chan->ack_win = chan->tx_win;
}
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
@@ -2512,6 +2876,7 @@ done:
break;
case L2CAP_MODE_STREAMING:
+ l2cap_txwin_setup(chan);
rfc.mode = L2CAP_MODE_STREAMING;
rfc.txwin_size = 0;
rfc.max_transmit = 0;
@@ -2542,7 +2907,7 @@ done:
}
req->dcid = cpu_to_le16(chan->dcid);
- req->flags = cpu_to_le16(0);
+ req->flags = __constant_cpu_to_le16(0);
return ptr - data;
}
@@ -2762,7 +3127,7 @@ done:
}
rsp->scid = cpu_to_le16(chan->dcid);
rsp->result = cpu_to_le16(result);
- rsp->flags = cpu_to_le16(0x0000);
+ rsp->flags = __constant_cpu_to_le16(0);
return ptr - data;
}
@@ -2812,10 +3177,9 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
break;
case L2CAP_CONF_EWS:
- chan->tx_win = min_t(u16, val,
- L2CAP_DEFAULT_EXT_WINDOW);
+ chan->ack_win = min_t(u16, val, chan->ack_win);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
- chan->tx_win);
+ chan->tx_win);
break;
case L2CAP_CONF_EFS:
@@ -2844,6 +3208,9 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
+ chan->ack_win = min_t(u16, chan->ack_win,
+ rfc.txwin_size);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
chan->local_msdu = le16_to_cpu(efs.msdu);
@@ -2861,7 +3228,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
}
req->dcid = cpu_to_le16(chan->dcid);
- req->flags = cpu_to_le16(0x0000);
+ req->flags = __constant_cpu_to_le16(0);
return ptr - data;
}
@@ -2888,8 +3255,8 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
rsp.scid = cpu_to_le16(chan->dcid);
rsp.dcid = cpu_to_le16(chan->scid);
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
l2cap_send_cmd(conn, chan->ident,
L2CAP_CONN_RSP, sizeof(rsp), &rsp);
@@ -2905,7 +3272,17 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
{
int type, olen;
unsigned long val;
- struct l2cap_conf_rfc rfc;
+ /* Use sane default values in case a misbehaving remote device
+ * did not send an RFC or extended window size option.
+ */
+ u16 txwin_ext = chan->ack_win;
+ struct l2cap_conf_rfc rfc = {
+ .mode = chan->mode,
+ .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
+ .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
+ .max_pdu_size = cpu_to_le16(chan->imtu),
+ .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
+ };
BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
@@ -2915,32 +3292,27 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
- if (type != L2CAP_CONF_RFC)
- continue;
-
- if (olen != sizeof(rfc))
+ switch (type) {
+ case L2CAP_CONF_RFC:
+ if (olen == sizeof(rfc))
+ memcpy(&rfc, (void *)val, olen);
break;
-
- memcpy(&rfc, (void *)val, olen);
- goto done;
+ case L2CAP_CONF_EWS:
+ txwin_ext = val;
+ break;
+ }
}
- /* Use sane default values in case a misbehaving remote device
- * did not send an RFC option.
- */
- rfc.mode = chan->mode;
- rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
- rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
- rfc.max_pdu_size = cpu_to_le16(chan->imtu);
-
- BT_ERR("Expected RFC option was not found, using defaults");
-
-done:
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
- chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
+ else
+ chan->ack_win = min_t(u16, chan->ack_win,
+ rfc.txwin_size);
break;
case L2CAP_MODE_STREAMING:
chan->mps = le16_to_cpu(rfc.max_pdu_size);
@@ -2993,7 +3365,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
lock_sock(parent);
/* Check if the ACL is secure enough (if not SDP) */
- if (psm != cpu_to_le16(0x0001) &&
+ if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
!hci_conn_check_link_mode(conn->hcon)) {
conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
result = L2CAP_CR_SEC_BLOCK;
@@ -3002,25 +3374,16 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
result = L2CAP_CR_NO_MEM;
- /* Check for backlog size */
- if (sk_acceptq_is_full(parent)) {
- BT_DBG("backlog full %d", parent->sk_ack_backlog);
+ /* Check if we already have channel with that dcid */
+ if (__l2cap_get_chan_by_dcid(conn, scid))
goto response;
- }
- chan = pchan->ops->new_connection(pchan->data);
+ chan = pchan->ops->new_connection(pchan);
if (!chan)
goto response;
sk = chan->sk;
- /* Check if we already have channel with that dcid */
- if (__l2cap_get_chan_by_dcid(conn, scid)) {
- sock_set_flag(sk, SOCK_ZAPPED);
- chan->ops->close(chan->data);
- goto response;
- }
-
hci_conn_hold(conn->hcon);
bacpy(&bt_sk(sk)->src, conn->src);
@@ -3074,7 +3437,7 @@ sendresp:
if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
struct l2cap_info_req info;
- info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+ info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
conn->info_ident = l2cap_get_ident(conn);
@@ -3196,7 +3559,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
struct l2cap_cmd_rej_cid rej;
- rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
+ rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
rej.scid = cpu_to_le16(chan->scid);
rej.dcid = cpu_to_le16(chan->dcid);
@@ -3218,11 +3581,11 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
memcpy(chan->conf_req + chan->conf_len, req->data, len);
chan->conf_len += len;
- if (flags & 0x0001) {
+ if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
/* Incomplete config. Send empty response. */
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
l2cap_build_conf_rsp(chan, rsp,
- L2CAP_CONF_SUCCESS, 0x0001), rsp);
+ L2CAP_CONF_SUCCESS, flags), rsp);
goto unlock;
}
@@ -3245,8 +3608,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
set_default_fcs(chan);
- l2cap_state_change(chan, BT_CONNECTED);
-
if (chan->mode == L2CAP_MODE_ERTM ||
chan->mode == L2CAP_MODE_STREAMING)
err = l2cap_ertm_init(chan);
@@ -3278,7 +3639,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
l2cap_build_conf_rsp(chan, rsp,
- L2CAP_CONF_SUCCESS, 0x0000), rsp);
+ L2CAP_CONF_SUCCESS, flags), rsp);
}
unlock:
@@ -3369,7 +3730,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
goto done;
}
- if (flags & 0x01)
+ if (flags & L2CAP_CONF_FLAG_CONTINUATION)
goto done;
set_bit(CONF_INPUT_DONE, &chan->conf_state);
@@ -3377,7 +3738,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
set_default_fcs(chan);
- l2cap_state_change(chan, BT_CONNECTED);
if (chan->mode == L2CAP_MODE_ERTM ||
chan->mode == L2CAP_MODE_STREAMING)
err = l2cap_ertm_init(chan);
@@ -3431,7 +3791,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
l2cap_chan_unlock(chan);
- chan->ops->close(chan->data);
+ chan->ops->close(chan);
l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -3465,7 +3825,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
l2cap_chan_unlock(chan);
- chan->ops->close(chan->data);
+ chan->ops->close(chan);
l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -3486,8 +3846,8 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
u8 buf[8];
u32 feat_mask = l2cap_feat_mask;
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
- rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
- rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
+ rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
+ rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
if (!disable_ertm)
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
| L2CAP_FEAT_FCS;
@@ -3507,15 +3867,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
else
l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
- rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
- rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
+ rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
+ rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(buf), buf);
} else {
struct l2cap_info_rsp rsp;
rsp.type = cpu_to_le16(type);
- rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
+ rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(rsp), &rsp);
}
@@ -3555,7 +3915,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
struct l2cap_info_req req;
- req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
+ req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
conn->info_ident = l2cap_get_ident(conn);
@@ -3598,7 +3958,7 @@ static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
psm = le16_to_cpu(req->psm);
scid = le16_to_cpu(req->scid);
- BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
+ BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
/* Placeholder: Always reject */
rsp.dcid = 0;
@@ -3621,11 +3981,11 @@ static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
}
static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
- u16 icid, u16 result)
+ u16 icid, u16 result)
{
struct l2cap_move_chan_rsp rsp;
- BT_DBG("icid %d, result %d", icid, result);
+ BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
rsp.icid = cpu_to_le16(icid);
rsp.result = cpu_to_le16(result);
@@ -3634,12 +3994,13 @@ static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
}
static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
- struct l2cap_chan *chan, u16 icid, u16 result)
+ struct l2cap_chan *chan,
+ u16 icid, u16 result)
{
struct l2cap_move_chan_cfm cfm;
u8 ident;
- BT_DBG("icid %d, result %d", icid, result);
+ BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
ident = l2cap_get_ident(conn);
if (chan)
@@ -3652,18 +4013,19 @@ static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
}
static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
- u16 icid)
+ u16 icid)
{
struct l2cap_move_chan_cfm_rsp rsp;
- BT_DBG("icid %d", icid);
+ BT_DBG("icid 0x%4.4x", icid);
rsp.icid = cpu_to_le16(icid);
l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
}
static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
{
struct l2cap_move_chan_req *req = data;
u16 icid = 0;
@@ -3674,7 +4036,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
icid = le16_to_cpu(req->icid);
- BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
+ BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
if (!enable_hs)
return -EINVAL;
@@ -3686,7 +4048,8 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
}
static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
{
struct l2cap_move_chan_rsp *rsp = data;
u16 icid, result;
@@ -3697,7 +4060,7 @@ static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
icid = le16_to_cpu(rsp->icid);
result = le16_to_cpu(rsp->result);
- BT_DBG("icid %d, result %d", icid, result);
+ BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
/* Placeholder: Always unconfirmed */
l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
@@ -3706,7 +4069,8 @@ static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
}
static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
{
struct l2cap_move_chan_cfm *cfm = data;
u16 icid, result;
@@ -3717,7 +4081,7 @@ static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
icid = le16_to_cpu(cfm->icid);
result = le16_to_cpu(cfm->result);
- BT_DBG("icid %d, result %d", icid, result);
+ BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
@@ -3725,7 +4089,8 @@ static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
}
static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
{
struct l2cap_move_chan_cfm_rsp *rsp = data;
u16 icid;
@@ -3735,7 +4100,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
icid = le16_to_cpu(rsp->icid);
- BT_DBG("icid %d", icid);
+ BT_DBG("icid 0x%4.4x", icid);
return 0;
}
@@ -3790,9 +4155,9 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
err = l2cap_check_conn_param(min, max, latency, to_multiplier);
if (err)
- rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
else
- rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
sizeof(rsp), &rsp);
@@ -3940,7 +4305,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
BT_ERR("Wrong link type (%d)", err);
/* FIXME: Map err to a valid reason */
- rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+ rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
}
@@ -3972,65 +4337,38 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
return 0;
}
-static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
+static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
{
- u32 control = 0;
+ struct l2cap_ctrl control;
- chan->frames_sent = 0;
+ BT_DBG("chan %p", chan);
- control |= __set_reqseq(chan, chan->buffer_seq);
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.final = 1;
+ control.reqseq = chan->buffer_seq;
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
- l2cap_send_sframe(chan, control);
- set_bit(CONN_RNR_SENT, &chan->conn_state);
+ control.super = L2CAP_SUPER_RNR;
+ l2cap_send_sframe(chan, &control);
}
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
- l2cap_retransmit_frames(chan);
+ if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
+ chan->unacked_frames > 0)
+ __set_retrans_timer(chan);
+ /* Send pending iframes */
l2cap_ertm_send(chan);
if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
- chan->frames_sent == 0) {
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- l2cap_send_sframe(chan, control);
- }
-}
-
-static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
-{
- struct sk_buff *next_skb;
- int tx_seq_offset, next_tx_seq_offset;
-
- bt_cb(skb)->control.txseq = tx_seq;
- bt_cb(skb)->control.sar = sar;
-
- next_skb = skb_peek(&chan->srej_q);
-
- tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
-
- while (next_skb) {
- if (bt_cb(next_skb)->control.txseq == tx_seq)
- return -EINVAL;
-
- next_tx_seq_offset = __seq_offset(chan,
- bt_cb(next_skb)->control.txseq, chan->buffer_seq);
-
- if (next_tx_seq_offset > tx_seq_offset) {
- __skb_queue_before(&chan->srej_q, next_skb, skb);
- return 0;
- }
-
- if (skb_queue_is_last(&chan->srej_q, next_skb))
- next_skb = NULL;
- else
- next_skb = skb_queue_next(&chan->srej_q, next_skb);
+ test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
+ /* F-bit wasn't sent in an s-frame or i-frame yet, so
+ * send it now.
+ */
+ control.super = L2CAP_SUPER_RR;
+ l2cap_send_sframe(chan, &control);
}
-
- __skb_queue_tail(&chan->srej_q, skb);
-
- return 0;
}
static void append_skb_frag(struct sk_buff *skb,
@@ -4052,16 +4390,17 @@ static void append_skb_frag(struct sk_buff *skb,
skb->truesize += new_frag->truesize;
}
-static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
+static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
+ struct l2cap_ctrl *control)
{
int err = -EINVAL;
- switch (__get_ctrl_sar(chan, control)) {
+ switch (control->sar) {
case L2CAP_SAR_UNSEGMENTED:
if (chan->sdu)
break;
- err = chan->ops->recv(chan->data, skb);
+ err = chan->ops->recv(chan, skb);
break;
case L2CAP_SAR_START:
@@ -4111,7 +4450,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
if (chan->sdu->len != chan->sdu_len)
break;
- err = chan->ops->recv(chan->data, chan->sdu);
+ err = chan->ops->recv(chan, chan->sdu);
if (!err) {
/* Reassembly complete */
@@ -4133,448 +4472,609 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
return err;
}
-static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
+void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
{
- BT_DBG("chan %p, Enter local busy", chan);
+ u8 event;
- set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
- l2cap_seq_list_clear(&chan->srej_list);
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return;
- __set_ack_timer(chan);
+ event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
+ l2cap_tx(chan, NULL, NULL, event);
}
-static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
+static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
{
- u32 control;
-
- if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
- goto done;
+ int err = 0;
+ /* Pass sequential frames to l2cap_reassemble_sdu()
+ * until a gap is encountered.
+ */
- control = __set_reqseq(chan, chan->buffer_seq);
- control |= __set_ctrl_poll(chan);
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- l2cap_send_sframe(chan, control);
- chan->retry_count = 1;
+ BT_DBG("chan %p", chan);
- __clear_retrans_timer(chan);
- __set_monitor_timer(chan);
+ while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ struct sk_buff *skb;
+ BT_DBG("Searching for skb with txseq %d (queue len %d)",
+ chan->buffer_seq, skb_queue_len(&chan->srej_q));
- set_bit(CONN_WAIT_F, &chan->conn_state);
+ skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
-done:
- clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
- clear_bit(CONN_RNR_SENT, &chan->conn_state);
+ if (!skb)
+ break;
- BT_DBG("chan %p, Exit local busy", chan);
-}
+ skb_unlink(skb, &chan->srej_q);
+ chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
+ err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
+ if (err)
+ break;
+ }
-void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
-{
- if (chan->mode == L2CAP_MODE_ERTM) {
- if (busy)
- l2cap_ertm_enter_local_busy(chan);
- else
- l2cap_ertm_exit_local_busy(chan);
+ if (skb_queue_empty(&chan->srej_q)) {
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+ l2cap_send_ack(chan);
}
+
+ return err;
}
-static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
+static void l2cap_handle_srej(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
{
struct sk_buff *skb;
- u32 control;
- while ((skb = skb_peek(&chan->srej_q)) &&
- !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- int err;
+ BT_DBG("chan %p, control %p", chan, control);
- if (bt_cb(skb)->control.txseq != tx_seq)
- break;
+ if (control->reqseq == chan->next_tx_seq) {
+ BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+ return;
+ }
- skb = skb_dequeue(&chan->srej_q);
- control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
- err = l2cap_reassemble_sdu(chan, skb, control);
+ skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
- if (err < 0) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- break;
- }
+ if (skb == NULL) {
+ BT_DBG("Seq %d not available for retransmission",
+ control->reqseq);
+ return;
+ }
- chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
- tx_seq = __next_seq(chan, tx_seq);
+ if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
+ BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+ return;
}
-}
-static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
-{
- struct srej_list *l, *tmp;
- u32 control;
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
- if (l->tx_seq == tx_seq) {
- list_del(&l->list);
- kfree(l);
- return;
+ if (control->poll) {
+ l2cap_pass_to_tx(chan, control);
+
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ l2cap_retransmit(chan, control);
+ l2cap_ertm_send(chan);
+
+ if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
+ set_bit(CONN_SREJ_ACT, &chan->conn_state);
+ chan->srej_save_reqseq = control->reqseq;
+ }
+ } else {
+ l2cap_pass_to_tx_fbit(chan, control);
+
+ if (control->final) {
+ if (chan->srej_save_reqseq != control->reqseq ||
+ !test_and_clear_bit(CONN_SREJ_ACT,
+ &chan->conn_state))
+ l2cap_retransmit(chan, control);
+ } else {
+ l2cap_retransmit(chan, control);
+ if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
+ set_bit(CONN_SREJ_ACT, &chan->conn_state);
+ chan->srej_save_reqseq = control->reqseq;
+ }
}
- control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
- control |= __set_reqseq(chan, l->tx_seq);
- l2cap_send_sframe(chan, control);
- list_del(&l->list);
- list_add_tail(&l->list, &chan->srej_l);
}
}
-static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
+static void l2cap_handle_rej(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
{
- struct srej_list *new;
- u32 control;
-
- while (tx_seq != chan->expected_tx_seq) {
- control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
- control |= __set_reqseq(chan, chan->expected_tx_seq);
- l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
- l2cap_send_sframe(chan, control);
+ struct sk_buff *skb;
- new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
- if (!new)
- return -ENOMEM;
+ BT_DBG("chan %p, control %p", chan, control);
- new->tx_seq = chan->expected_tx_seq;
+ if (control->reqseq == chan->next_tx_seq) {
+ BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+ return;
+ }
- chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
+ skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
- list_add_tail(&new->list, &chan->srej_l);
+ if (chan->max_tx && skb &&
+ bt_cb(skb)->control.retries >= chan->max_tx) {
+ BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+ return;
}
- chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+
+ l2cap_pass_to_tx(chan, control);
- return 0;
+ if (control->final) {
+ if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
+ l2cap_retransmit_all(chan, control);
+ } else {
+ l2cap_retransmit_all(chan, control);
+ l2cap_ertm_send(chan);
+ if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
+ set_bit(CONN_REJ_ACT, &chan->conn_state);
+ }
}
-static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
+static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
{
- u16 tx_seq = __get_txseq(chan, rx_control);
- u16 req_seq = __get_reqseq(chan, rx_control);
- u8 sar = __get_ctrl_sar(chan, rx_control);
- int tx_seq_offset, expected_tx_seq_offset;
- int num_to_ack = (chan->tx_win/6) + 1;
- int err = 0;
+ BT_DBG("chan %p, txseq %d", chan, txseq);
- BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
- tx_seq, rx_control);
+ BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
+ chan->expected_tx_seq);
- if (__is_ctrl_final(chan, rx_control) &&
- test_bit(CONN_WAIT_F, &chan->conn_state)) {
- __clear_monitor_timer(chan);
- if (chan->unacked_frames > 0)
- __set_retrans_timer(chan);
- clear_bit(CONN_WAIT_F, &chan->conn_state);
- }
+ if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
+ chan->tx_win) {
+ /* See notes below regarding "double poll" and
+ * invalid packets.
+ */
+ if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
+ BT_DBG("Invalid/Ignore - after SREJ");
+ return L2CAP_TXSEQ_INVALID_IGNORE;
+ } else {
+ BT_DBG("Invalid - in window after SREJ sent");
+ return L2CAP_TXSEQ_INVALID;
+ }
+ }
- chan->expected_ack_seq = req_seq;
- l2cap_drop_acked_frames(chan);
+ if (chan->srej_list.head == txseq) {
+ BT_DBG("Expected SREJ");
+ return L2CAP_TXSEQ_EXPECTED_SREJ;
+ }
- tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
+ if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
+ BT_DBG("Duplicate SREJ - txseq already stored");
+ return L2CAP_TXSEQ_DUPLICATE_SREJ;
+ }
- /* invalid tx_seq */
- if (tx_seq_offset >= chan->tx_win) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- goto drop;
+ if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
+ BT_DBG("Unexpected SREJ - not requested");
+ return L2CAP_TXSEQ_UNEXPECTED_SREJ;
+ }
}
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
- l2cap_send_ack(chan);
- goto drop;
+ if (chan->expected_tx_seq == txseq) {
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
+ chan->tx_win) {
+ BT_DBG("Invalid - txseq outside tx window");
+ return L2CAP_TXSEQ_INVALID;
+ } else {
+ BT_DBG("Expected");
+ return L2CAP_TXSEQ_EXPECTED;
+ }
}
- if (tx_seq == chan->expected_tx_seq)
- goto expected;
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) <
+ __seq_offset(chan, chan->expected_tx_seq,
+ chan->last_acked_seq)){
+ BT_DBG("Duplicate - expected_tx_seq later than txseq");
+ return L2CAP_TXSEQ_DUPLICATE;
+ }
+
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
+ /* A source of invalid packets is a "double poll" condition,
+ * where delays cause us to send multiple poll packets. If
+ * the remote stack receives and processes both polls,
+ * sequence numbers can wrap around in such a way that a
+ * resent frame has a sequence number that looks like new data
+ * with a sequence gap. This would trigger an erroneous SREJ
+ * request.
+ *
+ * Fortunately, this is impossible with a tx window that's
+ * less than half of the maximum sequence number, which allows
+ * invalid frames to be safely ignored.
+ *
+ * With tx window sizes greater than half of the tx window
+ * maximum, the frame is invalid and cannot be ignored. This
+ * causes a disconnect.
+ */
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- struct srej_list *first;
+ if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
+ BT_DBG("Invalid/Ignore - txseq outside tx window");
+ return L2CAP_TXSEQ_INVALID_IGNORE;
+ } else {
+ BT_DBG("Invalid - txseq outside tx window");
+ return L2CAP_TXSEQ_INVALID;
+ }
+ } else {
+ BT_DBG("Unexpected - txseq indicates missing frames");
+ return L2CAP_TXSEQ_UNEXPECTED;
+ }
+}
- first = list_first_entry(&chan->srej_l,
- struct srej_list, list);
- if (tx_seq == first->tx_seq) {
- l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
- l2cap_check_srej_gap(chan, tx_seq);
+static int l2cap_rx_state_recv(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
+{
+ int err = 0;
+ bool skb_in_use = 0;
- list_del(&first->list);
- kfree(first);
+ BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+ event);
- if (list_empty(&chan->srej_l)) {
- chan->buffer_seq = chan->buffer_seq_srej;
- clear_bit(CONN_SREJ_SENT, &chan->conn_state);
- l2cap_send_ack(chan);
- BT_DBG("chan %p, Exit SREJ_SENT", chan);
+ switch (event) {
+ case L2CAP_EV_RECV_IFRAME:
+ switch (l2cap_classify_txseq(chan, control->txseq)) {
+ case L2CAP_TXSEQ_EXPECTED:
+ l2cap_pass_to_tx(chan, control);
+
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ BT_DBG("Busy, discarding expected seq %d",
+ control->txseq);
+ break;
}
- } else {
- struct srej_list *l;
- /* duplicated tx_seq */
- if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
- goto drop;
+ chan->expected_tx_seq = __next_seq(chan,
+ control->txseq);
+
+ chan->buffer_seq = chan->expected_tx_seq;
+ skb_in_use = 1;
+
+ err = l2cap_reassemble_sdu(chan, skb, control);
+ if (err)
+ break;
- list_for_each_entry(l, &chan->srej_l, list) {
- if (l->tx_seq == tx_seq) {
- l2cap_resend_srejframe(chan, tx_seq);
- return 0;
+ if (control->final) {
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
+ control->final = 0;
+ l2cap_retransmit_all(chan, control);
+ l2cap_ertm_send(chan);
}
}
- err = l2cap_send_srejframe(chan, tx_seq);
- if (err < 0) {
- l2cap_send_disconn_req(chan->conn, chan, -err);
- return err;
+ if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
+ l2cap_send_ack(chan);
+ break;
+ case L2CAP_TXSEQ_UNEXPECTED:
+ l2cap_pass_to_tx(chan, control);
+
+ /* Can't issue SREJ frames in the local busy state.
+ * Drop this frame, it will be seen as missing
+ * when local busy is exited.
+ */
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ BT_DBG("Busy, discarding unexpected seq %d",
+ control->txseq);
+ break;
}
- }
- } else {
- expected_tx_seq_offset = __seq_offset(chan,
- chan->expected_tx_seq, chan->buffer_seq);
- /* duplicated tx_seq */
- if (tx_seq_offset < expected_tx_seq_offset)
- goto drop;
-
- set_bit(CONN_SREJ_SENT, &chan->conn_state);
+ /* There was a gap in the sequence, so an SREJ
+ * must be sent for each missing frame. The
+ * current frame is stored for later use.
+ */
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
- BT_DBG("chan %p, Enter SREJ", chan);
+ clear_bit(CONN_SREJ_ACT, &chan->conn_state);
+ l2cap_seq_list_clear(&chan->srej_list);
+ l2cap_send_srej(chan, control->txseq);
- INIT_LIST_HEAD(&chan->srej_l);
- chan->buffer_seq_srej = chan->buffer_seq;
+ chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
+ break;
+ case L2CAP_TXSEQ_DUPLICATE:
+ l2cap_pass_to_tx(chan, control);
+ break;
+ case L2CAP_TXSEQ_INVALID_IGNORE:
+ break;
+ case L2CAP_TXSEQ_INVALID:
+ default:
+ l2cap_send_disconn_req(chan->conn, chan,
+ ECONNRESET);
+ break;
+ }
+ break;
+ case L2CAP_EV_RECV_RR:
+ l2cap_pass_to_tx(chan, control);
+ if (control->final) {
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- __skb_queue_head_init(&chan->srej_q);
- l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
+ control->final = 0;
+ l2cap_retransmit_all(chan, control);
+ }
- /* Set P-bit only if there are some I-frames to ack. */
- if (__clear_ack_timer(chan))
- set_bit(CONN_SEND_PBIT, &chan->conn_state);
+ l2cap_ertm_send(chan);
+ } else if (control->poll) {
+ l2cap_send_i_or_rr_or_rnr(chan);
+ } else {
+ if (test_and_clear_bit(CONN_REMOTE_BUSY,
+ &chan->conn_state) &&
+ chan->unacked_frames)
+ __set_retrans_timer(chan);
- err = l2cap_send_srejframe(chan, tx_seq);
- if (err < 0) {
- l2cap_send_disconn_req(chan->conn, chan, -err);
- return err;
+ l2cap_ertm_send(chan);
}
- }
- return 0;
-
-expected:
- chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
-
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- bt_cb(skb)->control.txseq = tx_seq;
- bt_cb(skb)->control.sar = sar;
- __skb_queue_tail(&chan->srej_q, skb);
- return 0;
+ break;
+ case L2CAP_EV_RECV_RNR:
+ set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+ l2cap_pass_to_tx(chan, control);
+ if (control && control->poll) {
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ l2cap_send_rr_or_rnr(chan, 0);
+ }
+ __clear_retrans_timer(chan);
+ l2cap_seq_list_clear(&chan->retrans_list);
+ break;
+ case L2CAP_EV_RECV_REJ:
+ l2cap_handle_rej(chan, control);
+ break;
+ case L2CAP_EV_RECV_SREJ:
+ l2cap_handle_srej(chan, control);
+ break;
+ default:
+ break;
}
- err = l2cap_reassemble_sdu(chan, skb, rx_control);
- chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
-
- if (err < 0) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- return err;
+ if (skb && !skb_in_use) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
}
- if (__is_ctrl_final(chan, rx_control)) {
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
- l2cap_retransmit_frames(chan);
- }
+ return err;
+}
+static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
+{
+ int err = 0;
+ u16 txseq = control->txseq;
+ bool skb_in_use = 0;
+
+ BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+ event);
+
+ switch (event) {
+ case L2CAP_EV_RECV_IFRAME:
+ switch (l2cap_classify_txseq(chan, txseq)) {
+ case L2CAP_TXSEQ_EXPECTED:
+ /* Keep frame for reassembly later */
+ l2cap_pass_to_tx(chan, control);
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
+
+ chan->expected_tx_seq = __next_seq(chan, txseq);
+ break;
+ case L2CAP_TXSEQ_EXPECTED_SREJ:
+ l2cap_seq_list_pop(&chan->srej_list);
- chan->num_acked = (chan->num_acked + 1) % num_to_ack;
- if (chan->num_acked == num_to_ack - 1)
- l2cap_send_ack(chan);
- else
- __set_ack_timer(chan);
+ l2cap_pass_to_tx(chan, control);
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
- return 0;
+ err = l2cap_rx_queued_iframes(chan);
+ if (err)
+ break;
-drop:
- kfree_skb(skb);
- return 0;
-}
+ break;
+ case L2CAP_TXSEQ_UNEXPECTED:
+ /* Got a frame that can't be reassembled yet.
+ * Save it for later, and send SREJs to cover
+ * the missing frames.
+ */
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
+
+ l2cap_pass_to_tx(chan, control);
+ l2cap_send_srej(chan, control->txseq);
+ break;
+ case L2CAP_TXSEQ_UNEXPECTED_SREJ:
+ /* This frame was requested with an SREJ, but
+ * some expected retransmitted frames are
+ * missing. Request retransmission of missing
+ * SREJ'd frames.
+ */
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
+
+ l2cap_pass_to_tx(chan, control);
+ l2cap_send_srej_list(chan, control->txseq);
+ break;
+ case L2CAP_TXSEQ_DUPLICATE_SREJ:
+ /* We've already queued this frame. Drop this copy. */
+ l2cap_pass_to_tx(chan, control);
+ break;
+ case L2CAP_TXSEQ_DUPLICATE:
+ /* Expecting a later sequence number, so this frame
+ * was already received. Ignore it completely.
+ */
+ break;
+ case L2CAP_TXSEQ_INVALID_IGNORE:
+ break;
+ case L2CAP_TXSEQ_INVALID:
+ default:
+ l2cap_send_disconn_req(chan->conn, chan,
+ ECONNRESET);
+ break;
+ }
+ break;
+ case L2CAP_EV_RECV_RR:
+ l2cap_pass_to_tx(chan, control);
+ if (control->final) {
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
-{
- BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
- __get_reqseq(chan, rx_control), rx_control);
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
+ control->final = 0;
+ l2cap_retransmit_all(chan, control);
+ }
- chan->expected_ack_seq = __get_reqseq(chan, rx_control);
- l2cap_drop_acked_frames(chan);
+ l2cap_ertm_send(chan);
+ } else if (control->poll) {
+ if (test_and_clear_bit(CONN_REMOTE_BUSY,
+ &chan->conn_state) &&
+ chan->unacked_frames) {
+ __set_retrans_timer(chan);
+ }
- if (__is_ctrl_poll(chan, rx_control)) {
- set_bit(CONN_SEND_FBIT, &chan->conn_state);
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
- (chan->unacked_frames > 0))
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ l2cap_send_srej_tail(chan);
+ } else {
+ if (test_and_clear_bit(CONN_REMOTE_BUSY,
+ &chan->conn_state) &&
+ chan->unacked_frames)
__set_retrans_timer(chan);
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- l2cap_send_srejtail(chan);
+ l2cap_send_ack(chan);
+ }
+ break;
+ case L2CAP_EV_RECV_RNR:
+ set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+ l2cap_pass_to_tx(chan, control);
+ if (control->poll) {
+ l2cap_send_srej_tail(chan);
} else {
- l2cap_send_i_or_rr_or_rnr(chan);
+ struct l2cap_ctrl rr_control;
+ memset(&rr_control, 0, sizeof(rr_control));
+ rr_control.sframe = 1;
+ rr_control.super = L2CAP_SUPER_RR;
+ rr_control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &rr_control);
}
- } else if (__is_ctrl_final(chan, rx_control)) {
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
- l2cap_retransmit_frames(chan);
-
- } else {
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
- (chan->unacked_frames > 0))
- __set_retrans_timer(chan);
+ break;
+ case L2CAP_EV_RECV_REJ:
+ l2cap_handle_rej(chan, control);
+ break;
+ case L2CAP_EV_RECV_SREJ:
+ l2cap_handle_srej(chan, control);
+ break;
+ }
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
- l2cap_send_ack(chan);
- else
- l2cap_ertm_send(chan);
+ if (skb && !skb_in_use) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
}
+
+ return err;
}
-static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
+static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
{
- u16 tx_seq = __get_reqseq(chan, rx_control);
-
- BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
-
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-
- chan->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(chan);
+ /* Make sure reqseq is for a packet that has been sent but not acked */
+ u16 unacked;
- if (__is_ctrl_final(chan, rx_control)) {
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
- l2cap_retransmit_frames(chan);
- } else {
- l2cap_retransmit_frames(chan);
-
- if (test_bit(CONN_WAIT_F, &chan->conn_state))
- set_bit(CONN_REJ_ACT, &chan->conn_state);
- }
+ unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
+ return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
}
-static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
-{
- u16 tx_seq = __get_reqseq(chan, rx_control);
- BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
-
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-
- if (__is_ctrl_poll(chan, rx_control)) {
- chan->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(chan);
-
- set_bit(CONN_SEND_FBIT, &chan->conn_state);
- l2cap_retransmit_one_frame(chan, tx_seq);
+static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
+{
+ int err = 0;
- l2cap_ertm_send(chan);
+ BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
+ control, skb, event, chan->rx_state);
- if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
- chan->srej_save_reqseq = tx_seq;
- set_bit(CONN_SREJ_ACT, &chan->conn_state);
+ if (__valid_reqseq(chan, control->reqseq)) {
+ switch (chan->rx_state) {
+ case L2CAP_RX_STATE_RECV:
+ err = l2cap_rx_state_recv(chan, control, skb, event);
+ break;
+ case L2CAP_RX_STATE_SREJ_SENT:
+ err = l2cap_rx_state_srej_sent(chan, control, skb,
+ event);
+ break;
+ default:
+ /* shut it down */
+ break;
}
- } else if (__is_ctrl_final(chan, rx_control)) {
- if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
- chan->srej_save_reqseq == tx_seq)
- clear_bit(CONN_SREJ_ACT, &chan->conn_state);
- else
- l2cap_retransmit_one_frame(chan, tx_seq);
} else {
- l2cap_retransmit_one_frame(chan, tx_seq);
- if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
- chan->srej_save_reqseq = tx_seq;
- set_bit(CONN_SREJ_ACT, &chan->conn_state);
- }
+ BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
+ control->reqseq, chan->next_tx_seq,
+ chan->expected_ack_seq);
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
}
+
+ return err;
}
-static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
+static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff *skb)
{
- u16 tx_seq = __get_reqseq(chan, rx_control);
+ int err = 0;
- BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
+ BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
+ chan->rx_state);
- set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- chan->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(chan);
+ if (l2cap_classify_txseq(chan, control->txseq) ==
+ L2CAP_TXSEQ_EXPECTED) {
+ l2cap_pass_to_tx(chan, control);
- if (__is_ctrl_poll(chan, rx_control))
- set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
+ __next_seq(chan, chan->buffer_seq));
- if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- __clear_retrans_timer(chan);
- if (__is_ctrl_poll(chan, rx_control))
- l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
- return;
- }
+ chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
- if (__is_ctrl_poll(chan, rx_control)) {
- l2cap_send_srejtail(chan);
+ l2cap_reassemble_sdu(chan, skb, control);
} else {
- rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
- l2cap_send_sframe(chan, rx_control);
- }
-}
-
-static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
-{
- BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
+ if (chan->sdu) {
+ kfree_skb(chan->sdu);
+ chan->sdu = NULL;
+ }
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
- if (__is_ctrl_final(chan, rx_control) &&
- test_bit(CONN_WAIT_F, &chan->conn_state)) {
- __clear_monitor_timer(chan);
- if (chan->unacked_frames > 0)
- __set_retrans_timer(chan);
- clear_bit(CONN_WAIT_F, &chan->conn_state);
+ if (skb) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
+ }
}
- switch (__get_ctrl_super(chan, rx_control)) {
- case L2CAP_SUPER_RR:
- l2cap_data_channel_rrframe(chan, rx_control);
- break;
-
- case L2CAP_SUPER_REJ:
- l2cap_data_channel_rejframe(chan, rx_control);
- break;
-
- case L2CAP_SUPER_SREJ:
- l2cap_data_channel_srejframe(chan, rx_control);
- break;
-
- case L2CAP_SUPER_RNR:
- l2cap_data_channel_rnrframe(chan, rx_control);
- break;
- }
+ chan->last_acked_seq = control->txseq;
+ chan->expected_tx_seq = __next_seq(chan, control->txseq);
- kfree_skb(skb);
- return 0;
+ return err;
}
-static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
+static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
{
- u32 control;
- u16 req_seq;
- int len, next_tx_seq_offset, req_seq_offset;
+ struct l2cap_ctrl *control = &bt_cb(skb)->control;
+ u16 len;
+ u8 event;
__unpack_control(chan, skb);
- control = __get_control(chan, skb->data);
- skb_pull(skb, __ctrl_size(chan));
len = skb->len;
/*
* We can just drop the corrupted I-frame here.
* Receiver will miss it and start proper recovery
- * procedures and ask retransmission.
+ * procedures and ask for retransmission.
*/
if (l2cap_check_fcs(chan, skb))
goto drop;
- if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
+ if (!control->sframe && control->sar == L2CAP_SAR_START)
len -= L2CAP_SDULEN_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16)
@@ -4585,34 +5085,57 @@ static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
goto drop;
}
- req_seq = __get_reqseq(chan, control);
-
- req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
+ if (!control->sframe) {
+ int err;
- next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
- chan->expected_ack_seq);
+ BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
+ control->sar, control->reqseq, control->final,
+ control->txseq);
- /* check for invalid req-seq */
- if (req_seq_offset > next_tx_seq_offset) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- goto drop;
- }
-
- if (!__is_sframe(chan, control)) {
- if (len < 0) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+ /* Validate F-bit - F=0 always valid, F=1 only
+ * valid in TX WAIT_F
+ */
+ if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
goto drop;
+
+ if (chan->mode != L2CAP_MODE_STREAMING) {
+ event = L2CAP_EV_RECV_IFRAME;
+ err = l2cap_rx(chan, control, skb, event);
+ } else {
+ err = l2cap_stream_rx(chan, control, skb);
}
- l2cap_data_channel_iframe(chan, control, skb);
+ if (err)
+ l2cap_send_disconn_req(chan->conn, chan,
+ ECONNRESET);
} else {
+ const u8 rx_func_to_event[4] = {
+ L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
+ L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
+ };
+
+ /* Only I-frames are expected in streaming mode */
+ if (chan->mode == L2CAP_MODE_STREAMING)
+ goto drop;
+
+ BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
+ control->reqseq, control->final, control->poll,
+ control->super);
+
if (len != 0) {
BT_ERR("%d", len);
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
goto drop;
}
- l2cap_data_channel_sframe(chan, control, skb);
+ /* Validate F and P bits */
+ if (control->final && (control->poll ||
+ chan->tx_state != L2CAP_TX_STATE_WAIT_F))
+ goto drop;
+
+ event = rx_func_to_event[control->super];
+ if (l2cap_rx(chan, control, skb, event))
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
}
return 0;
@@ -4622,19 +5145,27 @@ drop:
return 0;
}
-static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
+static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
+ struct sk_buff *skb)
{
struct l2cap_chan *chan;
- u32 control;
- u16 tx_seq;
- int len;
chan = l2cap_get_chan_by_scid(conn, cid);
if (!chan) {
- BT_DBG("unknown cid 0x%4.4x", cid);
- /* Drop packet and return */
- kfree_skb(skb);
- return 0;
+ if (cid == L2CAP_CID_A2MP) {
+ chan = a2mp_channel_create(conn, skb);
+ if (!chan) {
+ kfree_skb(skb);
+ return;
+ }
+
+ l2cap_chan_lock(chan);
+ } else {
+ BT_DBG("unknown cid 0x%4.4x", cid);
+ /* Drop packet and return */
+ kfree_skb(skb);
+ return;
+ }
}
BT_DBG("chan %p, len %d", chan, skb->len);
@@ -4652,49 +5183,13 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
if (chan->imtu < skb->len)
goto drop;
- if (!chan->ops->recv(chan->data, skb))
+ if (!chan->ops->recv(chan, skb))
goto done;
break;
case L2CAP_MODE_ERTM:
- l2cap_ertm_data_rcv(chan, skb);
-
- goto done;
-
case L2CAP_MODE_STREAMING:
- control = __get_control(chan, skb->data);
- skb_pull(skb, __ctrl_size(chan));
- len = skb->len;
-
- if (l2cap_check_fcs(chan, skb))
- goto drop;
-
- if (__is_sar_start(chan, control))
- len -= L2CAP_SDULEN_SIZE;
-
- if (chan->fcs == L2CAP_FCS_CRC16)
- len -= L2CAP_FCS_SIZE;
-
- if (len > chan->mps || len < 0 || __is_sframe(chan, control))
- goto drop;
-
- tx_seq = __get_txseq(chan, control);
-
- if (chan->expected_tx_seq != tx_seq) {
- /* Frame(s) missing - must discard partial SDU */
- kfree_skb(chan->sdu);
- chan->sdu = NULL;
- chan->sdu_last_frag = NULL;
- chan->sdu_len = 0;
-
- /* TODO: Notify userland of missing data */
- }
-
- chan->expected_tx_seq = __next_seq(chan, tx_seq);
-
- if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
-
+ l2cap_data_rcv(chan, skb);
goto done;
default:
@@ -4707,11 +5202,10 @@ drop:
done:
l2cap_chan_unlock(chan);
-
- return 0;
}
-static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
+static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
+ struct sk_buff *skb)
{
struct l2cap_chan *chan;
@@ -4727,17 +5221,15 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
if (chan->imtu < skb->len)
goto drop;
- if (!chan->ops->recv(chan->data, skb))
- return 0;
+ if (!chan->ops->recv(chan, skb))
+ return;
drop:
kfree_skb(skb);
-
- return 0;
}
-static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
- struct sk_buff *skb)
+static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
+ struct sk_buff *skb)
{
struct l2cap_chan *chan;
@@ -4753,13 +5245,11 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
if (chan->imtu < skb->len)
goto drop;
- if (!chan->ops->recv(chan->data, skb))
- return 0;
+ if (!chan->ops->recv(chan, skb))
+ return;
drop:
kfree_skb(skb);
-
- return 0;
}
static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -4787,7 +5277,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
case L2CAP_CID_CONN_LESS:
psm = get_unaligned((__le16 *) skb->data);
- skb_pull(skb, 2);
+ skb_pull(skb, L2CAP_PSMLEN_SIZE);
l2cap_conless_channel(conn, psm, skb);
break;
@@ -4898,7 +5388,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
if (!conn)
return 0;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
if (hcon->type == LE_LINK) {
if (!status && encrypt)
@@ -4911,7 +5401,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
list_for_each_entry(chan, &conn->chan_l, list) {
l2cap_chan_lock(chan);
- BT_DBG("chan->scid %d", chan->scid);
+ BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
+ state_to_string(chan->state));
if (chan->scid == L2CAP_CID_LE_DATA) {
if (!status && encrypt) {
@@ -4981,6 +5472,17 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
rsp.status = cpu_to_le16(stat);
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
sizeof(rsp), &rsp);
+
+ if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
+ res == L2CAP_CR_SUCCESS) {
+ char buf[128];
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn),
+ L2CAP_CONF_REQ,
+ l2cap_build_conf_req(chan, buf),
+ buf);
+ chan->num_conf_req++;
+ }
}
l2cap_chan_unlock(chan);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 3bb1611b9d4..a4bb27e8427 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -27,7 +27,6 @@
/* Bluetooth L2CAP sockets. */
-#include <linux/security.h>
#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>
@@ -89,8 +88,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
if (err < 0)
goto done;
- if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
- __le16_to_cpu(la.l2_psm) == 0x0003)
+ if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
+ __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
chan->sec_level = BT_SECURITY_SDP;
bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
@@ -446,6 +445,22 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
return err;
}
+static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
+{
+ switch (chan->scid) {
+ case L2CAP_CID_LE_DATA:
+ if (mtu < L2CAP_LE_MIN_MTU)
+ return false;
+ break;
+
+ default:
+ if (mtu < L2CAP_DEFAULT_MIN_MTU)
+ return false;
+ }
+
+ return true;
+}
+
static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
@@ -484,6 +499,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
break;
}
+ if (!l2cap_valid_mtu(chan, opts.imtu)) {
+ err = -EINVAL;
+ break;
+ }
+
chan->mode = opts.mode;
switch (chan->mode) {
case L2CAP_MODE_BASIC:
@@ -873,9 +893,34 @@ static int l2cap_sock_release(struct socket *sock)
return err;
}
-static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
+static void l2cap_sock_cleanup_listen(struct sock *parent)
{
- struct sock *sk, *parent = data;
+ struct sock *sk;
+
+ BT_DBG("parent %p", parent);
+
+ /* Close not yet accepted channels */
+ while ((sk = bt_accept_dequeue(parent, NULL))) {
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+
+ l2cap_chan_lock(chan);
+ __clear_chan_timer(chan);
+ l2cap_chan_close(chan, ECONNRESET);
+ l2cap_chan_unlock(chan);
+
+ l2cap_sock_kill(sk);
+ }
+}
+
+static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk, *parent = chan->data;
+
+ /* Check for backlog size */
+ if (sk_acceptq_is_full(parent)) {
+ BT_DBG("backlog full %d", parent->sk_ack_backlog);
+ return NULL;
+ }
sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
GFP_ATOMIC);
@@ -889,10 +934,10 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
return l2cap_pi(sk)->chan;
}
-static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
+static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
{
int err;
- struct sock *sk = data;
+ struct sock *sk = chan->data;
struct l2cap_pinfo *pi = l2cap_pi(sk);
lock_sock(sk);
@@ -925,16 +970,57 @@ done:
return err;
}
-static void l2cap_sock_close_cb(void *data)
+static void l2cap_sock_close_cb(struct l2cap_chan *chan)
{
- struct sock *sk = data;
+ struct sock *sk = chan->data;
l2cap_sock_kill(sk);
}
-static void l2cap_sock_state_change_cb(void *data, int state)
+static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
{
- struct sock *sk = data;
+ struct sock *sk = chan->data;
+ struct sock *parent;
+
+ lock_sock(sk);
+
+ parent = bt_sk(sk)->parent;
+
+ sock_set_flag(sk, SOCK_ZAPPED);
+
+ switch (chan->state) {
+ case BT_OPEN:
+ case BT_BOUND:
+ case BT_CLOSED:
+ break;
+ case BT_LISTEN:
+ l2cap_sock_cleanup_listen(sk);
+ sk->sk_state = BT_CLOSED;
+ chan->state = BT_CLOSED;
+
+ break;
+ default:
+ sk->sk_state = BT_CLOSED;
+ chan->state = BT_CLOSED;
+
+ sk->sk_err = err;
+
+ if (parent) {
+ bt_accept_unlink(sk);
+ parent->sk_data_ready(parent, 0);
+ } else {
+ sk->sk_state_change(sk);
+ }
+
+ break;
+ }
+
+ release_sock(sk);
+}
+
+static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state)
+{
+ struct sock *sk = chan->data;
sk->sk_state = state;
}
@@ -955,12 +1041,34 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
return skb;
}
+static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+ struct sock *parent;
+
+ lock_sock(sk);
+
+ parent = bt_sk(sk)->parent;
+
+ BT_DBG("sk %p, parent %p", sk, parent);
+
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+
+ if (parent)
+ parent->sk_data_ready(parent, 0);
+
+ release_sock(sk);
+}
+
static struct l2cap_ops l2cap_chan_ops = {
.name = "L2CAP Socket Interface",
.new_connection = l2cap_sock_new_connection_cb,
.recv = l2cap_sock_recv_cb,
.close = l2cap_sock_close_cb,
+ .teardown = l2cap_sock_teardown_cb,
.state_change = l2cap_sock_state_change_cb,
+ .ready = l2cap_sock_ready_cb,
.alloc_skb = l2cap_sock_alloc_skb_cb,
};
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 506628876f3..e1c97527e16 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -26,12 +26,7 @@
#define pr_fmt(fmt) "Bluetooth: " fmt
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/stddef.h>
-#include <linux/string.h>
-#include <asm/errno.h>
+#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 3e5e3362ea0..ad6613d17ca 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -24,8 +24,6 @@
/* Bluetooth HCI Management interface */
-#include <linux/kernel.h>
-#include <linux/uaccess.h>
#include <linux/module.h>
#include <asm/unaligned.h>
@@ -212,7 +210,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -243,7 +241,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
BT_DBG("sock %p", sk);
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -689,14 +687,14 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
{
struct pending_cmd *cmd;
- cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return NULL;
cmd->opcode = opcode;
cmd->index = hdev->id;
- cmd->param = kmalloc(len, GFP_ATOMIC);
+ cmd->param = kmalloc(len, GFP_KERNEL);
if (!cmd->param) {
kfree(cmd);
return NULL;
@@ -714,7 +712,8 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
}
static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
- void (*cb)(struct pending_cmd *cmd, void *data),
+ void (*cb)(struct pending_cmd *cmd,
+ void *data),
void *data)
{
struct list_head *p, *n;
@@ -813,7 +812,7 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
struct sk_buff *skb;
struct mgmt_hdr *hdr;
- skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -871,7 +870,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
MGMT_STATUS_BUSY);
goto failed;
@@ -978,7 +977,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
MGMT_STATUS_BUSY);
goto failed;
@@ -1001,7 +1000,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
scan = 0;
if (test_bit(HCI_ISCAN, &hdev->flags) &&
- hdev->discov_timeout > 0)
+ hdev->discov_timeout > 0)
cancel_delayed_work(&hdev->discov_off);
}
@@ -1056,7 +1055,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
bool changed = false;
if (!!cp->val != test_bit(HCI_LINK_SECURITY,
- &hdev->dev_flags)) {
+ &hdev->dev_flags)) {
change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
changed = true;
}
@@ -1269,7 +1268,7 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
goto failed;
}
- uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
+ uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
if (!uuid) {
err = -ENOMEM;
goto failed;
@@ -1317,7 +1316,7 @@ static bool enable_service_cache(struct hci_dev *hdev)
}
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
+ u16 len)
{
struct mgmt_cp_remove_uuid *cp = data;
struct pending_cmd *cmd;
@@ -1442,7 +1441,7 @@ unlock:
}
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
+ u16 len)
{
struct mgmt_cp_load_link_keys *cp = data;
u16 key_count, expected_len;
@@ -1454,13 +1453,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
sizeof(struct mgmt_link_key_info);
if (expected_len != len) {
BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
- len, expected_len);
+ len, expected_len);
return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
MGMT_STATUS_INVALID_PARAMS);
}
BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
- key_count);
+ key_count);
hci_dev_lock(hdev);
@@ -1535,10 +1534,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
if (cp->disconnect) {
if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
- &cp->addr.bdaddr);
+ &cp->addr.bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
- &cp->addr.bdaddr);
+ &cp->addr.bdaddr);
} else {
conn = NULL;
}
@@ -1594,7 +1593,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (cp->addr.type == BDADDR_BREDR)
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
@@ -1611,7 +1611,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
}
dc.handle = cpu_to_le16(conn->handle);
- dc.reason = 0x13; /* Remote User Terminated Connection */
+ dc.reason = HCI_ERROR_REMOTE_USER_TERM;
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
if (err < 0)
@@ -1667,7 +1667,7 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
}
rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
- rp = kmalloc(rp_len, GFP_ATOMIC);
+ rp = kmalloc(rp_len, GFP_KERNEL);
if (!rp) {
err = -ENOMEM;
goto unlock;
@@ -1778,29 +1778,6 @@ failed:
return err;
}
-static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 len)
-{
- struct mgmt_cp_pin_code_neg_reply *cp = data;
- int err;
-
- BT_DBG("");
-
- hci_dev_lock(hdev);
-
- if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
- MGMT_STATUS_NOT_POWERED);
- goto failed;
- }
-
- err = send_pin_code_neg_reply(sk, hdev, cp);
-
-failed:
- hci_dev_unlock(hdev);
- return err;
-}
-
static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -1813,7 +1790,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
hdev->io_capability = cp->io_capability;
BT_DBG("%s IO capability set to 0x%02x", hdev->name,
- hdev->io_capability);
+ hdev->io_capability);
hci_dev_unlock(hdev);
@@ -1821,7 +1798,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
0);
}
-static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
+static struct pending_cmd *find_pairing(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
struct pending_cmd *cmd;
@@ -1927,8 +1904,15 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
rp.addr.type = cp->addr.type;
if (IS_ERR(conn)) {
+ int status;
+
+ if (PTR_ERR(conn) == -EBUSY)
+ status = MGMT_STATUS_BUSY;
+ else
+ status = MGMT_STATUS_CONNECT_FAILED;
+
err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
- MGMT_STATUS_CONNECT_FAILED, &rp,
+ status, &rp,
sizeof(rp));
goto unlock;
}
@@ -1959,7 +1943,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
cmd->user_data = conn;
if (conn->state == BT_CONNECTED &&
- hci_conn_security(conn, sec_level, auth_type))
+ hci_conn_security(conn, sec_level, auth_type))
pairing_complete(cmd, 0);
err = 0;
@@ -2076,6 +2060,18 @@ done:
return err;
}
+static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_pin_code_neg_reply *cp = data;
+
+ BT_DBG("");
+
+ return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
+ MGMT_OP_PIN_CODE_NEG_REPLY,
+ HCI_OP_PIN_CODE_NEG_REPLY, 0);
+}
+
static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -2256,7 +2252,7 @@ unlock:
}
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 len)
+ void *data, u16 len)
{
struct mgmt_cp_remove_remote_oob_data *cp = data;
u8 status;
@@ -2425,7 +2421,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
case DISCOVERY_RESOLVING:
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
- NAME_PENDING);
+ NAME_PENDING);
if (!e) {
mgmt_pending_remove(cmd);
err = cmd_complete(sk, hdev->id,
@@ -2600,8 +2596,8 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
if (cp->val) {
type = PAGE_SCAN_TYPE_INTERLACED;
- /* 22.5 msec page scan interval */
- acp.interval = __constant_cpu_to_le16(0x0024);
+ /* 160 msec page scan interval */
+ acp.interval = __constant_cpu_to_le16(0x0100);
} else {
type = PAGE_SCAN_TYPE_STANDARD; /* default */
@@ -2647,7 +2643,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
sizeof(struct mgmt_ltk_info);
if (expected_len != len) {
BT_ERR("load_keys: expected %u bytes, got %u bytes",
- len, expected_len);
+ len, expected_len);
return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
EINVAL);
}
@@ -2772,7 +2768,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
}
if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
- mgmt_handlers[opcode].func == NULL) {
+ mgmt_handlers[opcode].func == NULL) {
BT_DBG("Unknown op %u", opcode);
err = cmd_status(sk, index, opcode,
MGMT_STATUS_UNKNOWN_COMMAND);
@@ -2780,7 +2776,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
}
if ((hdev && opcode < MGMT_OP_READ_INFO) ||
- (!hdev && opcode >= MGMT_OP_READ_INFO)) {
+ (!hdev && opcode >= MGMT_OP_READ_INFO)) {
err = cmd_status(sk, index, opcode,
MGMT_STATUS_INVALID_INDEX);
goto done;
@@ -2789,7 +2785,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
handler = &mgmt_handlers[opcode];
if ((handler->var_len && len < handler->data_len) ||
- (!handler->var_len && len != handler->data_len)) {
+ (!handler->var_len && len != handler->data_len)) {
err = cmd_status(sk, index, opcode,
MGMT_STATUS_INVALID_PARAMS);
goto done;
@@ -2973,7 +2969,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
ev.key.addr.type = BDADDR_BREDR;
ev.key.type = key->type;
- memcpy(ev.key.val, key->val, 16);
+ memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
ev.key.pin_len = key->pin_len;
return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
@@ -3108,7 +3104,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
mgmt_pending_remove(cmd);
mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
- hdev);
+ hdev);
return err;
}
@@ -3198,7 +3194,7 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
}
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type)
+ u8 link_type, u8 addr_type)
{
struct mgmt_ev_user_passkey_request ev;
@@ -3212,8 +3208,8 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
}
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type, u8 status,
- u8 opcode)
+ u8 link_type, u8 addr_type, u8 status,
+ u8 opcode)
{
struct pending_cmd *cmd;
struct mgmt_rp_user_confirm_reply rp;
@@ -3244,7 +3240,8 @@ int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 status)
{
return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
- status, MGMT_OP_USER_CONFIRM_NEG_REPLY);
+ status,
+ MGMT_OP_USER_CONFIRM_NEG_REPLY);
}
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -3258,7 +3255,8 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 status)
{
return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
- status, MGMT_OP_USER_PASSKEY_NEG_REPLY);
+ status,
+ MGMT_OP_USER_PASSKEY_NEG_REPLY);
}
int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
@@ -3537,9 +3535,9 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi;
if (cfm_name)
- ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME;
+ ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
if (!ssp)
- ev->flags[0] |= MGMT_DEV_FOUND_LEGACY_PAIRING;
+ ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
if (eir_len > 0)
memcpy(ev->eir, eir, eir_len);
@@ -3549,7 +3547,6 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
dev_class, 3);
ev->eir_len = cpu_to_le16(eir_len);
-
ev_size = sizeof(*ev) + eir_len;
return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 8a602388f1e..c75107ef892 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -26,22 +26,8 @@
*/
#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/device.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/net.h>
-#include <linux/mutex.h>
#include <linux/kthread.h>
-#include <linux/slab.h>
-
-#include <net/sock.h>
-#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -115,14 +101,14 @@ static void rfcomm_session_del(struct rfcomm_session *s);
#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1)
#define __get_rpn_parity(line) (((line) >> 3) & 0x7)
-static inline void rfcomm_schedule(void)
+static void rfcomm_schedule(void)
{
if (!rfcomm_thread)
return;
wake_up_process(rfcomm_thread);
}
-static inline void rfcomm_session_put(struct rfcomm_session *s)
+static void rfcomm_session_put(struct rfcomm_session *s)
{
if (atomic_dec_and_test(&s->refcnt))
rfcomm_session_del(s);
@@ -227,7 +213,7 @@ static int rfcomm_l2sock_create(struct socket **sock)
return err;
}
-static inline int rfcomm_check_security(struct rfcomm_dlc *d)
+static int rfcomm_check_security(struct rfcomm_dlc *d)
{
struct sock *sk = d->session->sock->sk;
struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
@@ -1750,7 +1736,7 @@ static void rfcomm_process_connect(struct rfcomm_session *s)
/* Send data queued for the DLC.
* Return number of frames left in the queue.
*/
-static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
+static int rfcomm_process_tx(struct rfcomm_dlc *d)
{
struct sk_buff *skb;
int err;
@@ -1798,7 +1784,7 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
return skb_queue_len(&d->tx_queue);
}
-static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
+static void rfcomm_process_dlcs(struct rfcomm_session *s)
{
struct rfcomm_dlc *d;
struct list_head *p, *n;
@@ -1858,7 +1844,7 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
}
}
-static inline void rfcomm_process_rx(struct rfcomm_session *s)
+static void rfcomm_process_rx(struct rfcomm_session *s)
{
struct socket *sock = s->sock;
struct sock *sk = sock->sk;
@@ -1883,7 +1869,7 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s)
}
}
-static inline void rfcomm_accept_connection(struct rfcomm_session *s)
+static void rfcomm_accept_connection(struct rfcomm_session *s)
{
struct socket *sock = s->sock, *nsock;
int err;
@@ -1917,7 +1903,7 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
sock_release(nsock);
}
-static inline void rfcomm_check_connection(struct rfcomm_session *s)
+static void rfcomm_check_connection(struct rfcomm_session *s)
{
struct sock *sk = s->sock->sk;
@@ -1941,7 +1927,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
}
}
-static inline void rfcomm_process_sessions(void)
+static void rfcomm_process_sessions(void)
{
struct list_head *p, *n;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index e8707debb86..7e1e59645c0 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -25,27 +25,8 @@
* RFCOMM sockets.
*/
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/list.h>
-#include <linux/device.h>
+#include <linux/export.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/security.h>
-#include <net/sock.h>
-
-#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index d1820ff14ae..cb960773c00 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -31,11 +31,6 @@
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
-#include <linux/capability.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/workqueue.h>
-
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/rfcomm.h>
@@ -132,7 +127,7 @@ static struct rfcomm_dev *__rfcomm_dev_get(int id)
return NULL;
}
-static inline struct rfcomm_dev *rfcomm_dev_get(int id)
+static struct rfcomm_dev *rfcomm_dev_get(int id)
{
struct rfcomm_dev *dev;
@@ -345,7 +340,7 @@ static void rfcomm_wfree(struct sk_buff *skb)
tty_port_put(&dev->port);
}
-static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
+static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
{
tty_port_get(&dev->port);
atomic_add(skb->truesize, &dev->wmem_alloc);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index cbdd313659a..40bbe25dcff 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -25,26 +25,8 @@
/* Bluetooth SCO sockets. */
#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <linux/list.h>
-#include <linux/security.h>
-#include <net/sock.h>
-
-#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -123,7 +105,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
return conn;
}
-static inline struct sock *sco_chan_get(struct sco_conn *conn)
+static struct sock *sco_chan_get(struct sco_conn *conn)
{
struct sock *sk = NULL;
sco_conn_lock(conn);
@@ -157,7 +139,8 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
return 0;
}
-static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
+static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
+ struct sock *parent)
{
int err = 0;
@@ -228,7 +211,7 @@ done:
return err;
}
-static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
+static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
@@ -254,7 +237,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
return len;
}
-static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
+static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
{
struct sock *sk = sco_chan_get(conn);
@@ -523,7 +506,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
goto done;
err = bt_sock_wait_state(sk, BT_CONNECTED,
- sock_sndtimeo(sk, flags & O_NONBLOCK));
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
@@ -788,7 +771,7 @@ static int sco_sock_shutdown(struct socket *sock, int how)
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED,
- sk->sk_lingertime);
+ sk->sk_lingertime);
}
release_sock(sk);
return err;
@@ -878,7 +861,7 @@ static void sco_conn_ready(struct sco_conn *conn)
bh_lock_sock(parent);
sk = sco_sock_alloc(sock_net(parent), NULL,
- BTPROTO_SCO, GFP_ATOMIC);
+ BTPROTO_SCO, GFP_ATOMIC);
if (!sk) {
bh_unlock_sock(parent);
goto done;
@@ -907,7 +890,7 @@ done:
/* ----- SCO interface with lower layer (HCI) ----- */
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
- register struct sock *sk;
+ struct sock *sk;
struct hlist_node *node;
int lm = 0;
@@ -920,7 +903,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
continue;
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
- !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
+ !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm |= HCI_LM_ACCEPT;
break;
}
@@ -981,7 +964,7 @@ static int sco_debugfs_show(struct seq_file *f, void *p)
sk_for_each(sk, node, &sco_sk_list.head) {
seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
- batostr(&bt_sk(sk)->dst), sk->sk_state);
+ batostr(&bt_sk(sk)->dst), sk->sk_state);
}
read_unlock(&sco_sk_list.lock);
@@ -1044,8 +1027,8 @@ int __init sco_init(void)
}
if (bt_debugfs) {
- sco_debugfs = debugfs_create_file("sco", 0444,
- bt_debugfs, NULL, &sco_debugfs_fops);
+ sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
+ NULL, &sco_debugfs_fops);
if (!sco_debugfs)
BT_ERR("Failed to create SCO debug file");
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 37df4e9b389..16ef0dc85a0 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -20,14 +20,15 @@
SOFTWARE IS DISCLAIMED.
*/
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <crypto/b128ops.h>
+
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
#include <net/bluetooth/smp.h>
-#include <linux/crypto.h>
-#include <linux/scatterlist.h>
-#include <crypto/b128ops.h>
#define SMP_TIMEOUT msecs_to_jiffies(30000)
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 929e48aed44..33348453760 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -127,9 +127,9 @@ static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
const struct br_cpu_netstats *bstats
= per_cpu_ptr(br->stats, cpu);
do {
- start = u64_stats_fetch_begin(&bstats->syncp);
+ start = u64_stats_fetch_begin_bh(&bstats->syncp);
memcpy(&tmp, bstats, sizeof(tmp));
- } while (u64_stats_fetch_retry(&bstats->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&bstats->syncp, start));
sum.tx_bytes += tmp.tx_bytes;
sum.tx_packets += tmp.tx_packets;
sum.rx_bytes += tmp.rx_bytes;
@@ -246,10 +246,7 @@ int br_netpoll_enable(struct net_bridge_port *p)
if (!np)
goto out;
- np->dev = p->dev;
- strlcpy(np->dev_name, p->dev->name, IFNAMSIZ);
-
- err = __netpoll_setup(np);
+ err = __netpoll_setup(np, p->dev);
if (err) {
kfree(np);
goto out;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index b66581208cb..241743417f4 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -540,10 +540,11 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
if (mdb->size >= max) {
max *= 2;
- if (unlikely(max >= br->hash_max)) {
- br_warn(br, "Multicast hash table maximum "
- "reached, disabling snooping: %s, %d\n",
- port ? port->dev->name : br->dev->name, max);
+ if (unlikely(max > br->hash_max)) {
+ br_warn(br, "Multicast hash table maximum of %d "
+ "reached, disabling snooping: %s\n",
+ br->hash_max,
+ port ? port->dev->name : br->dev->name);
err = -E2BIG;
disable:
br->multicast_disabled = 1;
@@ -1160,7 +1161,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
goto out;
}
mld = (struct mld_msg *) icmp6_hdr(skb);
- max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay));
+ max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
if (max_delay)
group = &mld->mld_mca;
} else if (skb->len >= sizeof(*mld2q)) {
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index e41456bd3cc..68e8f364bbf 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -111,7 +111,13 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
pppoe_proto(skb) == htons(PPP_IPV6) && \
brnf_filter_pppoe_tagged)
-static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
+static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
+{
+}
+
+static void fake_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb)
{
}
@@ -120,7 +126,9 @@ static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
return NULL;
}
-static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const void *daddr)
+static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr)
{
return NULL;
}
@@ -134,6 +142,7 @@ static struct dst_ops fake_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.update_pmtu = fake_update_pmtu,
+ .redirect = fake_redirect,
.cow_metrics = fake_cow_metrics,
.neigh_lookup = fake_neigh_lookup,
.mtu = fake_mtu,
@@ -373,19 +382,29 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
if (!skb->dev)
goto free_skb;
dst = skb_dst(skb);
- neigh = dst_get_neighbour_noref(dst);
- if (neigh->hh.hh_len) {
- neigh_hh_bridge(&neigh->hh, skb);
- skb->dev = nf_bridge->physindev;
- return br_handle_frame_finish(skb);
- } else {
- /* the neighbour function below overwrites the complete
- * MAC header, so we save the Ethernet source address and
- * protocol number. */
- skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
- /* tell br_dev_xmit to continue with forwarding */
- nf_bridge->mask |= BRNF_BRIDGED_DNAT;
- return neigh->output(neigh, skb);
+ neigh = dst_neigh_lookup_skb(dst, skb);
+ if (neigh) {
+ int ret;
+
+ if (neigh->hh.hh_len) {
+ neigh_hh_bridge(&neigh->hh, skb);
+ skb->dev = nf_bridge->physindev;
+ ret = br_handle_frame_finish(skb);
+ } else {
+ /* the neighbour function below overwrites the complete
+ * MAC header, so we save the Ethernet source address and
+ * protocol number.
+ */
+ skb_copy_from_linear_data_offset(skb,
+ -(ETH_HLEN-ETH_ALEN),
+ skb->nf_bridge->data,
+ ETH_HLEN-ETH_ALEN);
+ /* tell br_dev_xmit to continue with forwarding */
+ nf_bridge->mask |= BRNF_BRIDGED_DNAT;
+ ret = neigh->output(neigh, skb);
+ }
+ neigh_release(neigh);
+ return ret;
}
free_skb:
kfree_skb(skb);
@@ -764,9 +783,9 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
return NF_DROP;
if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
- pf = PF_INET;
+ pf = NFPROTO_IPV4;
else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
- pf = PF_INET6;
+ pf = NFPROTO_IPV6;
else
return NF_ACCEPT;
@@ -778,13 +797,13 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
nf_bridge->mask |= BRNF_PKT_TYPE;
}
- if (pf == PF_INET && br_parse_ip_options(skb))
+ if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
return NF_DROP;
/* The physdev module checks on this */
nf_bridge->mask |= BRNF_BRIDGED;
nf_bridge->physoutdev = skb->dev;
- if (pf == PF_INET)
+ if (pf == NFPROTO_IPV4)
skb->protocol = htons(ETH_P_IP);
else
skb->protocol = htons(ETH_P_IPV6);
@@ -871,9 +890,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
return NF_DROP;
if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
- pf = PF_INET;
+ pf = NFPROTO_IPV4;
else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
- pf = PF_INET6;
+ pf = NFPROTO_IPV6;
else
return NF_ACCEPT;
@@ -886,7 +905,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
nf_bridge_pull_encap_header(skb);
nf_bridge_save_header(skb);
- if (pf == PF_INET)
+ if (pf == NFPROTO_IPV4)
skb->protocol = htons(ETH_P_IP);
else
skb->protocol = htons(ETH_P_IPV6);
@@ -919,49 +938,49 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
{
.hook = br_nf_pre_routing,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_PRE_ROUTING,
.priority = NF_BR_PRI_BRNF,
},
{
.hook = br_nf_local_in,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_IN,
.priority = NF_BR_PRI_BRNF,
},
{
.hook = br_nf_forward_ip,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_BRNF - 1,
},
{
.hook = br_nf_forward_arp,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_BRNF,
},
{
.hook = br_nf_post_routing,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_POST_ROUTING,
.priority = NF_BR_PRI_LAST,
},
{
.hook = ip_sabotage_in,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_FIRST,
},
{
.hook = ip_sabotage_in,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP6_PRI_FIRST,
},
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 6229b62749e..13b36bdc76a 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -27,7 +27,7 @@ struct brport_attribute {
};
#define BRPORT_ATTR(_name,_mode,_show,_store) \
-struct brport_attribute brport_attr_##_name = { \
+const struct brport_attribute brport_attr_##_name = { \
.attr = {.name = __stringify(_name), \
.mode = _mode }, \
.show = _show, \
@@ -164,7 +164,7 @@ static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
store_multicast_router);
#endif
-static struct brport_attribute *brport_attrs[] = {
+static const struct brport_attribute *brport_attrs[] = {
&brport_attr_path_cost,
&brport_attr_priority,
&brport_attr_port_id,
@@ -241,7 +241,7 @@ const struct sysfs_ops brport_sysfs_ops = {
int br_sysfs_addif(struct net_bridge_port *p)
{
struct net_bridge *br = p->br;
- struct brport_attribute **a;
+ const struct brport_attribute **a;
int err;
err = sysfs_create_link(&p->kobj, &br->dev->dev.kobj,
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 5449294bdd5..19063473c71 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -145,19 +145,24 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
if (!ub->skb) {
if (!(ub->skb = ulog_alloc_skb(size)))
- goto alloc_failure;
+ goto unlock;
} else if (size > skb_tailroom(ub->skb)) {
ulog_send(group);
if (!(ub->skb = ulog_alloc_skb(size)))
- goto alloc_failure;
+ goto unlock;
}
- nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, 0,
- size - NLMSG_ALIGN(sizeof(*nlh)));
+ nlh = nlmsg_put(ub->skb, 0, ub->qlen, 0,
+ size - NLMSG_ALIGN(sizeof(*nlh)), 0);
+ if (!nlh) {
+ kfree_skb(ub->skb);
+ ub->skb = NULL;
+ goto unlock;
+ }
ub->qlen++;
- pm = NLMSG_DATA(nlh);
+ pm = nlmsg_data(nlh);
/* Fill in the ulog data */
pm->version = EBT_ULOG_VERSION;
@@ -209,14 +214,6 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
unlock:
spin_unlock_bh(lock);
-
- return;
-
-nlmsg_failure:
- pr_debug("error during NLMSG_PUT. This should "
- "not happen, please report to author.\n");
-alloc_failure:
- goto unlock;
}
/* this function is registered with the netfilter core */
@@ -285,6 +282,9 @@ static int __init ebt_ulog_init(void)
{
int ret;
int i;
+ struct netlink_kernel_cfg cfg = {
+ .groups = EBT_ULOG_MAXNLGROUPS,
+ };
if (nlbufsiz >= 128*1024) {
pr_warning("Netlink buffer has to be <= 128kB,"
@@ -299,8 +299,7 @@ static int __init ebt_ulog_init(void)
}
ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
- EBT_ULOG_MAXNLGROUPS, NULL, NULL,
- THIS_MODULE);
+ THIS_MODULE, &cfg);
if (!ebtulognl)
ret = -ENOMEM;
else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0)
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 8c83c175b03..1ae1d9cb278 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -90,11 +90,8 @@ static int caifd_refcnt_read(struct caif_device_entry *e)
/* Allocate new CAIF device. */
static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
{
- struct caif_device_entry_list *caifdevs;
struct caif_device_entry *caifd;
- caifdevs = caif_device_list(dev_net(dev));
-
caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
if (!caifd)
return NULL;
@@ -131,6 +128,11 @@ void caif_flow_cb(struct sk_buff *skb)
rcu_read_lock();
caifd = caif_get(skb->dev);
+
+ WARN_ON(caifd == NULL);
+ if (caifd == NULL)
+ return;
+
caifd_hold(caifd);
rcu_read_unlock();
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 78f1cdad5b3..095259f8390 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -141,7 +141,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
err = sk_filter(sk, skb);
if (err)
return err;
- if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
+ if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
set_rx_flow_off(cf_sk);
net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 047cd0eec02..44f270fc2d0 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -175,15 +175,17 @@ static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
{
+ struct cfpkt *pkt;
struct cfctrl *cfctrl = container_obj(layer);
- struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
struct cflayer *dn = cfctrl->serv.layer.dn;
- if (!pkt)
- return;
+
if (!dn) {
pr_debug("not able to send enum request\n");
return;
}
+ pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt)
+ return;
caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
init_info(cfpkt_info(pkt), cfctrl);
cfpkt_info(pkt)->dev_info->id = physlinkid;
@@ -302,18 +304,17 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
struct cflayer *client)
{
int ret;
+ struct cfpkt *pkt;
struct cfctrl *cfctrl = container_obj(layer);
- struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
struct cflayer *dn = cfctrl->serv.layer.dn;
- if (!pkt)
- return -ENOMEM;
-
if (!dn) {
pr_debug("not able to send link-down request\n");
return -ENODEV;
}
-
+ pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt)
+ return -ENOMEM;
cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
cfpkt_addbdy(pkt, channelid);
init_info(cfpkt_info(pkt), cfctrl);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 0ce2ad0696d..821022a7214 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -41,6 +41,7 @@
*/
#include <linux/module.h>
+#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/slab.h>
@@ -220,30 +221,46 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
* -ENOBUFS on full driver queue (see net_xmit_errno())
* -ENOMEM when local loopback failed at calling skb_clone()
* -EPERM when trying to send on a non-CAN interface
+ * -EMSGSIZE CAN frame size is bigger than CAN interface MTU
* -EINVAL when the skb->data does not contain a valid CAN frame
*/
int can_send(struct sk_buff *skb, int loop)
{
struct sk_buff *newskb = NULL;
- struct can_frame *cf = (struct can_frame *)skb->data;
- int err;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ int err = -EINVAL;
+
+ if (skb->len == CAN_MTU) {
+ skb->protocol = htons(ETH_P_CAN);
+ if (unlikely(cfd->len > CAN_MAX_DLEN))
+ goto inval_skb;
+ } else if (skb->len == CANFD_MTU) {
+ skb->protocol = htons(ETH_P_CANFD);
+ if (unlikely(cfd->len > CANFD_MAX_DLEN))
+ goto inval_skb;
+ } else
+ goto inval_skb;
- if (skb->len != sizeof(struct can_frame) || cf->can_dlc > 8) {
- kfree_skb(skb);
- return -EINVAL;
+ /*
+ * Make sure the CAN frame can pass the selected CAN netdevice.
+ * As structs can_frame and canfd_frame are similar, we can provide
+ * CAN FD frames to legacy CAN drivers as long as the length is <= 8
+ */
+ if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) {
+ err = -EMSGSIZE;
+ goto inval_skb;
}
- if (skb->dev->type != ARPHRD_CAN) {
- kfree_skb(skb);
- return -EPERM;
+ if (unlikely(skb->dev->type != ARPHRD_CAN)) {
+ err = -EPERM;
+ goto inval_skb;
}
- if (!(skb->dev->flags & IFF_UP)) {
- kfree_skb(skb);
- return -ENETDOWN;
+ if (unlikely(!(skb->dev->flags & IFF_UP))) {
+ err = -ENETDOWN;
+ goto inval_skb;
}
- skb->protocol = htons(ETH_P_CAN);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
@@ -300,6 +317,10 @@ int can_send(struct sk_buff *skb, int loop)
can_stats.tx_frames_delta++;
return 0;
+
+inval_skb:
+ kfree_skb(skb);
+ return err;
}
EXPORT_SYMBOL(can_send);
@@ -334,8 +355,8 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
* relevant bits for the filter.
*
* The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
- * filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames
- * there is a special filterlist and a special rx path filter handling.
+ * filter for error messages (CAN_ERR_FLAG bit set in mask). For error msg
+ * frames there is a special filterlist and a special rx path filter handling.
*
* Return:
* Pointer to optimal filterlist for the given can_id/mask pair.
@@ -347,7 +368,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
{
canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
- /* filter for error frames in extra filterlist */
+ /* filter for error message frames in extra filterlist */
if (*mask & CAN_ERR_FLAG) {
/* clear CAN_ERR_FLAG in filter entry */
*mask &= CAN_ERR_MASK;
@@ -408,7 +429,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
* <received_can_id> & mask == can_id & mask
*
* The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
- * filter for error frames (CAN_ERR_FLAG bit set in mask).
+ * filter for error message frames (CAN_ERR_FLAG bit set in mask).
*
* The provided pointer to the sk_buff is guaranteed to be valid as long as
* the callback function is running. The callback function must *not* free
@@ -578,7 +599,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
return 0;
if (can_id & CAN_ERR_FLAG) {
- /* check for error frame entries only */
+ /* check for error message frame entries only */
hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) {
if (can_id & r->mask) {
deliver(skb, r);
@@ -632,24 +653,11 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
return matches;
}
-static int can_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev)
+static void can_receive(struct sk_buff *skb, struct net_device *dev)
{
struct dev_rcv_lists *d;
- struct can_frame *cf = (struct can_frame *)skb->data;
int matches;
- if (!net_eq(dev_net(dev), &init_net))
- goto drop;
-
- if (WARN_ONCE(dev->type != ARPHRD_CAN ||
- skb->len != sizeof(struct can_frame) ||
- cf->can_dlc > 8,
- "PF_CAN: dropped non conform skbuf: "
- "dev type %d, len %d, can_dlc %d\n",
- dev->type, skb->len, cf->can_dlc))
- goto drop;
-
/* update statistics */
can_stats.rx_frames++;
can_stats.rx_frames_delta++;
@@ -673,7 +681,49 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
can_stats.matches++;
can_stats.matches_delta++;
}
+}
+static int can_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (unlikely(!net_eq(dev_net(dev), &init_net)))
+ goto drop;
+
+ if (WARN_ONCE(dev->type != ARPHRD_CAN ||
+ skb->len != CAN_MTU ||
+ cfd->len > CAN_MAX_DLEN,
+ "PF_CAN: dropped non conform CAN skbuf: "
+ "dev type %d, len %d, datalen %d\n",
+ dev->type, skb->len, cfd->len))
+ goto drop;
+
+ can_receive(skb, dev);
+ return NET_RX_SUCCESS;
+
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (unlikely(!net_eq(dev_net(dev), &init_net)))
+ goto drop;
+
+ if (WARN_ONCE(dev->type != ARPHRD_CAN ||
+ skb->len != CANFD_MTU ||
+ cfd->len > CANFD_MAX_DLEN,
+ "PF_CAN: dropped non conform CAN FD skbuf: "
+ "dev type %d, len %d, datalen %d\n",
+ dev->type, skb->len, cfd->len))
+ goto drop;
+
+ can_receive(skb, dev);
return NET_RX_SUCCESS;
drop:
@@ -807,10 +857,14 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
static struct packet_type can_packet __read_mostly = {
.type = cpu_to_be16(ETH_P_CAN),
- .dev = NULL,
.func = can_rcv,
};
+static struct packet_type canfd_packet __read_mostly = {
+ .type = cpu_to_be16(ETH_P_CANFD),
+ .func = canfd_rcv,
+};
+
static const struct net_proto_family can_family_ops = {
.family = PF_CAN,
.create = can_create,
@@ -824,6 +878,12 @@ static struct notifier_block can_netdev_notifier __read_mostly = {
static __init int can_init(void)
{
+ /* check for correct padding to be able to use the structs similarly */
+ BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) !=
+ offsetof(struct canfd_frame, len) ||
+ offsetof(struct can_frame, data) !=
+ offsetof(struct canfd_frame, data));
+
printk(banner);
memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
@@ -846,6 +906,7 @@ static __init int can_init(void)
sock_register(&can_family_ops);
register_netdevice_notifier(&can_netdev_notifier);
dev_add_pack(&can_packet);
+ dev_add_pack(&canfd_packet);
return 0;
}
@@ -860,6 +921,7 @@ static __exit void can_exit(void)
can_remove_proc();
/* protocol unregister */
+ dev_remove_pack(&canfd_packet);
dev_remove_pack(&can_packet);
unregister_netdevice_notifier(&can_netdev_notifier);
sock_unregister(PF_CAN);
diff --git a/net/can/af_can.h b/net/can/af_can.h
index fd882dbadad..1dccb4c3389 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -104,6 +104,9 @@ struct s_pstats {
unsigned long rcv_entries_max;
};
+/* receive filters subscribed for 'all' CAN devices */
+extern struct dev_rcv_lists can_rx_alldev_list;
+
/* function prototypes for the CAN networklayer procfs (proc.c) */
extern void can_init_proc(void);
extern void can_remove_proc(void);
diff --git a/net/can/gw.c b/net/can/gw.c
index b41acf25668..b54d5e695b0 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -444,11 +444,14 @@ static int cgw_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
+static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
+ u32 pid, u32 seq, int flags)
{
struct cgw_frame_mod mb;
struct rtcanmsg *rtcan;
- struct nlmsghdr *nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*rtcan), 0);
+ struct nlmsghdr *nlh;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
if (!nlh)
return -EMSGSIZE;
@@ -462,15 +465,11 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
if (gwj->handled_frames) {
if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
}
if (gwj->dropped_frames) {
if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
}
/* check non default settings of attributes */
@@ -480,8 +479,6 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
mb.modtype = gwj->mod.modtype.and;
if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
}
if (gwj->mod.modtype.or) {
@@ -489,8 +486,6 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
mb.modtype = gwj->mod.modtype.or;
if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
}
if (gwj->mod.modtype.xor) {
@@ -498,8 +493,6 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
mb.modtype = gwj->mod.modtype.xor;
if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
}
if (gwj->mod.modtype.set) {
@@ -507,26 +500,18 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
mb.modtype = gwj->mod.modtype.set;
if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
}
if (gwj->mod.csumfunc.crc8) {
if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
&gwj->mod.csum.crc8) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + \
- NLA_ALIGN(CGW_CS_CRC8_LEN);
}
if (gwj->mod.csumfunc.xor) {
if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
&gwj->mod.csum.xor) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + \
- NLA_ALIGN(CGW_CS_XOR_LEN);
}
if (gwj->gwtype == CGW_TYPE_CAN_CAN) {
@@ -535,23 +520,16 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
&gwj->ccgw.filter) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN +
- NLA_ALIGN(sizeof(struct can_filter));
}
if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
}
- return skb->len;
+ return nlmsg_end(skb, nlh);
cancel:
nlmsg_cancel(skb, nlh);
@@ -571,7 +549,8 @@ static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
if (idx < s_idx)
goto cont;
- if (cgw_put_job(skb, gwj) < 0)
+ if (cgw_put_job(skb, gwj, RTM_NEWROUTE, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0)
break;
cont:
idx++;
@@ -583,6 +562,18 @@ cont:
return skb->len;
}
+static const struct nla_policy cgw_policy[CGW_MAX+1] = {
+ [CGW_MOD_AND] = { .len = sizeof(struct cgw_frame_mod) },
+ [CGW_MOD_OR] = { .len = sizeof(struct cgw_frame_mod) },
+ [CGW_MOD_XOR] = { .len = sizeof(struct cgw_frame_mod) },
+ [CGW_MOD_SET] = { .len = sizeof(struct cgw_frame_mod) },
+ [CGW_CS_XOR] = { .len = sizeof(struct cgw_csum_xor) },
+ [CGW_CS_CRC8] = { .len = sizeof(struct cgw_csum_crc8) },
+ [CGW_SRC_IF] = { .type = NLA_U32 },
+ [CGW_DST_IF] = { .type = NLA_U32 },
+ [CGW_FILTER] = { .len = sizeof(struct can_filter) },
+};
+
/* check for common and gwtype specific attributes */
static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
u8 gwtype, void *gwtypeattr)
@@ -595,14 +586,14 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
/* initialize modification & checksum data space */
memset(mod, 0, sizeof(*mod));
- err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, NULL);
+ err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX,
+ cgw_policy);
if (err < 0)
return err;
/* check for AND/OR/XOR/SET modifications */
- if (tb[CGW_MOD_AND] &&
- nla_len(tb[CGW_MOD_AND]) == CGW_MODATTR_LEN) {
+ if (tb[CGW_MOD_AND]) {
nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.and, &mb.cf);
@@ -618,8 +609,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
mod->modfunc[modidx++] = mod_and_data;
}
- if (tb[CGW_MOD_OR] &&
- nla_len(tb[CGW_MOD_OR]) == CGW_MODATTR_LEN) {
+ if (tb[CGW_MOD_OR]) {
nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.or, &mb.cf);
@@ -635,8 +625,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
mod->modfunc[modidx++] = mod_or_data;
}
- if (tb[CGW_MOD_XOR] &&
- nla_len(tb[CGW_MOD_XOR]) == CGW_MODATTR_LEN) {
+ if (tb[CGW_MOD_XOR]) {
nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.xor, &mb.cf);
@@ -652,8 +641,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
mod->modfunc[modidx++] = mod_xor_data;
}
- if (tb[CGW_MOD_SET] &&
- nla_len(tb[CGW_MOD_SET]) == CGW_MODATTR_LEN) {
+ if (tb[CGW_MOD_SET]) {
nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.set, &mb.cf);
@@ -672,11 +660,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
/* check for checksum operations after CAN frame modifications */
if (modidx) {
- if (tb[CGW_CS_CRC8] &&
- nla_len(tb[CGW_CS_CRC8]) == CGW_CS_CRC8_LEN) {
-
- struct cgw_csum_crc8 *c = (struct cgw_csum_crc8 *)\
- nla_data(tb[CGW_CS_CRC8]);
+ if (tb[CGW_CS_CRC8]) {
+ struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]);
err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
c->result_idx);
@@ -699,11 +684,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
mod->csumfunc.crc8 = cgw_csum_crc8_neg;
}
- if (tb[CGW_CS_XOR] &&
- nla_len(tb[CGW_CS_XOR]) == CGW_CS_XOR_LEN) {
-
- struct cgw_csum_xor *c = (struct cgw_csum_xor *)\
- nla_data(tb[CGW_CS_XOR]);
+ if (tb[CGW_CS_XOR]) {
+ struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]);
err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
c->result_idx);
@@ -735,8 +717,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
memset(ccgw, 0, sizeof(*ccgw));
/* check for can_filter in attributes */
- if (tb[CGW_FILTER] &&
- nla_len(tb[CGW_FILTER]) == sizeof(struct can_filter))
+ if (tb[CGW_FILTER])
nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
sizeof(struct can_filter));
@@ -746,13 +727,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
return err;
- if (nla_len(tb[CGW_SRC_IF]) == sizeof(u32))
- nla_memcpy(&ccgw->src_idx, tb[CGW_SRC_IF],
- sizeof(u32));
-
- if (nla_len(tb[CGW_DST_IF]) == sizeof(u32))
- nla_memcpy(&ccgw->dst_idx, tb[CGW_DST_IF],
- sizeof(u32));
+ ccgw->src_idx = nla_get_u32(tb[CGW_SRC_IF]);
+ ccgw->dst_idx = nla_get_u32(tb[CGW_DST_IF]);
/* both indices set to 0 for flushing all routing entries */
if (!ccgw->src_idx && !ccgw->dst_idx)
diff --git a/net/can/proc.c b/net/can/proc.c
index ba873c36d2f..3b6dd318049 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -83,9 +83,6 @@ static const char rx_list_name[][8] = {
[RX_EFF] = "rx_eff",
};
-/* receive filters subscribed for 'all' CAN devices */
-extern struct dev_rcv_lists can_rx_alldev_list;
-
/*
* af_can statistics stuff
*/
diff --git a/net/can/raw.c b/net/can/raw.c
index 46cca3a91d1..3e9c89356a9 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -82,6 +82,7 @@ struct raw_sock {
struct notifier_block notifier;
int loopback;
int recv_own_msgs;
+ int fd_frames;
int count; /* number of active filters */
struct can_filter dfilter; /* default/single filter */
struct can_filter *filter; /* pointer to filter(s) */
@@ -119,6 +120,14 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
if (!ro->recv_own_msgs && oskb->sk == sk)
return;
+ /* do not pass frames with DLC > 8 to a legacy socket */
+ if (!ro->fd_frames) {
+ struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;
+
+ if (unlikely(cfd->len > CAN_MAX_DLEN))
+ return;
+ }
+
/* clone the given skb to be able to enqueue it into the rcv queue */
skb = skb_clone(oskb, GFP_ATOMIC);
if (!skb)
@@ -291,6 +300,7 @@ static int raw_init(struct sock *sk)
/* set default loopback behaviour */
ro->loopback = 1;
ro->recv_own_msgs = 0;
+ ro->fd_frames = 0;
/* set notifier */
ro->notifier.notifier_call = raw_notifier;
@@ -569,6 +579,15 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
break;
+ case CAN_RAW_FD_FRAMES:
+ if (optlen != sizeof(ro->fd_frames))
+ return -EINVAL;
+
+ if (copy_from_user(&ro->fd_frames, optval, optlen))
+ return -EFAULT;
+
+ break;
+
default:
return -ENOPROTOOPT;
}
@@ -627,6 +646,12 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
val = &ro->recv_own_msgs;
break;
+ case CAN_RAW_FD_FRAMES:
+ if (len > sizeof(int))
+ len = sizeof(int);
+ val = &ro->fd_frames;
+ break;
+
default:
return -ENOPROTOOPT;
}
@@ -662,8 +687,13 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
} else
ifindex = ro->ifindex;
- if (size != sizeof(struct can_frame))
- return -EINVAL;
+ if (ro->fd_frames) {
+ if (unlikely(size != CANFD_MTU && size != CAN_MTU))
+ return -EINVAL;
+ } else {
+ if (unlikely(size != CAN_MTU))
+ return -EINVAL;
+ }
dev = dev_get_by_index(&init_net, ifindex);
if (!dev)
@@ -705,7 +735,9 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
+ struct raw_sock *ro = raw_sk(sk);
struct sk_buff *skb;
+ int rxmtu;
int err = 0;
int noblock;
@@ -716,10 +748,20 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!skb)
return err;
- if (size < skb->len)
+ /*
+ * when serving a legacy socket the DLC <= 8 is already checked inside
+ * raw_rcv(). Now check if we need to pass a canfd_frame to a legacy
+ * socket and cut the possible CANFD_MTU/CAN_MTU length to CAN_MTU
+ */
+ if (!ro->fd_frames)
+ rxmtu = CAN_MTU;
+ else
+ rxmtu = skb->len;
+
+ if (size < rxmtu)
msg->msg_flags |= MSG_TRUNC;
else
- size = skb->len;
+ size = rxmtu;
err = memcpy_toiovec(msg->msg_iov, skb->data, size);
if (err < 0) {
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index ba4323bce0e..69e38db28e5 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -17,6 +17,7 @@
#include <linux/string.h>
+#include <linux/ceph/ceph_features.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/debugfs.h>
#include <linux/ceph/decode.h>
@@ -460,27 +461,23 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
client->auth_err = 0;
client->extra_mon_dispatch = NULL;
- client->supported_features = CEPH_FEATURE_SUPPORTED_DEFAULT |
+ client->supported_features = CEPH_FEATURES_SUPPORTED_DEFAULT |
supported_features;
- client->required_features = CEPH_FEATURE_REQUIRED_DEFAULT |
+ client->required_features = CEPH_FEATURES_REQUIRED_DEFAULT |
required_features;
/* msgr */
if (ceph_test_opt(client, MYIP))
myaddr = &client->options->my_addr;
- client->msgr = ceph_messenger_create(myaddr,
- client->supported_features,
- client->required_features);
- if (IS_ERR(client->msgr)) {
- err = PTR_ERR(client->msgr);
- goto fail;
- }
- client->msgr->nocrc = ceph_test_opt(client, NOCRC);
+ ceph_messenger_init(&client->msgr, myaddr,
+ client->supported_features,
+ client->required_features,
+ ceph_test_opt(client, NOCRC));
/* subsystems */
err = ceph_monc_init(&client->monc, client);
if (err < 0)
- goto fail_msgr;
+ goto fail;
err = ceph_osdc_init(&client->osdc, client);
if (err < 0)
goto fail_monc;
@@ -489,8 +486,6 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
fail_monc:
ceph_monc_stop(&client->monc);
-fail_msgr:
- ceph_messenger_destroy(client->msgr);
fail:
kfree(client);
return ERR_PTR(err);
@@ -501,6 +496,8 @@ void ceph_destroy_client(struct ceph_client *client)
{
dout("destroy_client %p\n", client);
+ atomic_set(&client->msgr.stopping, 1);
+
/* unmount */
ceph_osdc_stop(&client->osdc);
@@ -508,8 +505,6 @@ void ceph_destroy_client(struct ceph_client *client)
ceph_debugfs_client_cleanup(client);
- ceph_messenger_destroy(client->msgr);
-
ceph_destroy_options(client->options);
kfree(client);
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index d7edc24333b..35fce755ce1 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -306,7 +306,6 @@ static int crush_choose(const struct crush_map *map,
int item = 0;
int itemtype;
int collide, reject;
- const unsigned int orig_tries = 5; /* attempts before we fall back to search */
dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
bucket->id, x, outpos, numrep);
@@ -351,8 +350,9 @@ static int crush_choose(const struct crush_map *map,
reject = 1;
goto reject;
}
- if (flocal >= (in->size>>1) &&
- flocal > orig_tries)
+ if (map->choose_local_fallback_tries > 0 &&
+ flocal >= (in->size>>1) &&
+ flocal > map->choose_local_fallback_tries)
item = bucket_perm_choose(in, x, r);
else
item = crush_bucket_choose(in, x, r);
@@ -422,13 +422,14 @@ reject:
ftotal++;
flocal++;
- if (collide && flocal < 3)
+ if (collide && flocal <= map->choose_local_tries)
/* retry locally a few times */
retry_bucket = 1;
- else if (flocal <= in->size + orig_tries)
+ else if (map->choose_local_fallback_tries > 0 &&
+ flocal <= in->size + map->choose_local_fallback_tries)
/* exhaustive bucket search */
retry_bucket = 1;
- else if (ftotal < 20)
+ else if (ftotal <= map->choose_total_tries)
/* then retry descent */
retry_descent = 1;
else
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index b780cb7947d..9da7fdd3cd8 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -466,6 +466,7 @@ void ceph_key_destroy(struct key *key) {
struct ceph_crypto_key *ckey = key->payload.data;
ceph_crypto_key_destroy(ckey);
+ kfree(ckey);
}
struct key_type key_type_ceph = {
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index 1919d1550d7..3572dc518bc 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -16,7 +16,8 @@ struct ceph_crypto_key {
static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
{
- kfree(key->key);
+ if (key)
+ kfree(key->key);
}
extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 10255e81be7..b9796750034 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -29,6 +29,74 @@
* the sender.
*/
+/*
+ * We track the state of the socket on a given connection using
+ * values defined below. The transition to a new socket state is
+ * handled by a function which verifies we aren't coming from an
+ * unexpected state.
+ *
+ * --------
+ * | NEW* | transient initial state
+ * --------
+ * | con_sock_state_init()
+ * v
+ * ----------
+ * | CLOSED | initialized, but no socket (and no
+ * ---------- TCP connection)
+ * ^ \
+ * | \ con_sock_state_connecting()
+ * | ----------------------
+ * | \
+ * + con_sock_state_closed() \
+ * |+--------------------------- \
+ * | \ \ \
+ * | ----------- \ \
+ * | | CLOSING | socket event; \ \
+ * | ----------- await close \ \
+ * | ^ \ |
+ * | | \ |
+ * | + con_sock_state_closing() \ |
+ * | / \ | |
+ * | / --------------- | |
+ * | / \ v v
+ * | / --------------
+ * | / -----------------| CONNECTING | socket created, TCP
+ * | | / -------------- connect initiated
+ * | | | con_sock_state_connected()
+ * | | v
+ * -------------
+ * | CONNECTED | TCP connection established
+ * -------------
+ *
+ * State values for ceph_connection->sock_state; NEW is assumed to be 0.
+ */
+
+#define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
+#define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
+#define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
+#define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
+#define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
+
+/*
+ * connection states
+ */
+#define CON_STATE_CLOSED 1 /* -> PREOPEN */
+#define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */
+#define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */
+#define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */
+#define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */
+#define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */
+
+/*
+ * ceph_connection flag bits
+ */
+#define CON_FLAG_LOSSYTX 0 /* we can close channel or drop
+ * messages on errors */
+#define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
+#define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */
+#define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
+#define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
+
/* static tag bytes (protocol control messages) */
static char tag_msg = CEPH_MSGR_TAG_MSG;
static char tag_ack = CEPH_MSGR_TAG_ACK;
@@ -147,72 +215,130 @@ void ceph_msgr_flush(void)
}
EXPORT_SYMBOL(ceph_msgr_flush);
+/* Connection socket state transition functions */
+
+static void con_sock_state_init(struct ceph_connection *con)
+{
+ int old_state;
+
+ old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
+ if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
+ printk("%s: unexpected old state %d\n", __func__, old_state);
+ dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
+ CON_SOCK_STATE_CLOSED);
+}
+
+static void con_sock_state_connecting(struct ceph_connection *con)
+{
+ int old_state;
+
+ old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
+ if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
+ printk("%s: unexpected old state %d\n", __func__, old_state);
+ dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
+ CON_SOCK_STATE_CONNECTING);
+}
+
+static void con_sock_state_connected(struct ceph_connection *con)
+{
+ int old_state;
+
+ old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
+ if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
+ printk("%s: unexpected old state %d\n", __func__, old_state);
+ dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
+ CON_SOCK_STATE_CONNECTED);
+}
+
+static void con_sock_state_closing(struct ceph_connection *con)
+{
+ int old_state;
+
+ old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
+ if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
+ old_state != CON_SOCK_STATE_CONNECTED &&
+ old_state != CON_SOCK_STATE_CLOSING))
+ printk("%s: unexpected old state %d\n", __func__, old_state);
+ dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
+ CON_SOCK_STATE_CLOSING);
+}
+
+static void con_sock_state_closed(struct ceph_connection *con)
+{
+ int old_state;
+
+ old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
+ if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
+ old_state != CON_SOCK_STATE_CLOSING &&
+ old_state != CON_SOCK_STATE_CONNECTING &&
+ old_state != CON_SOCK_STATE_CLOSED))
+ printk("%s: unexpected old state %d\n", __func__, old_state);
+ dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
+ CON_SOCK_STATE_CLOSED);
+}
/*
* socket callback functions
*/
/* data available on socket, or listen socket received a connect */
-static void ceph_data_ready(struct sock *sk, int count_unused)
+static void ceph_sock_data_ready(struct sock *sk, int count_unused)
{
struct ceph_connection *con = sk->sk_user_data;
+ if (atomic_read(&con->msgr->stopping)) {
+ return;
+ }
if (sk->sk_state != TCP_CLOSE_WAIT) {
- dout("ceph_data_ready on %p state = %lu, queueing work\n",
+ dout("%s on %p state = %lu, queueing work\n", __func__,
con, con->state);
queue_con(con);
}
}
/* socket has buffer space for writing */
-static void ceph_write_space(struct sock *sk)
+static void ceph_sock_write_space(struct sock *sk)
{
struct ceph_connection *con = sk->sk_user_data;
/* only queue to workqueue if there is data we want to write,
* and there is sufficient space in the socket buffer to accept
- * more data. clear SOCK_NOSPACE so that ceph_write_space()
+ * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
* doesn't get called again until try_write() fills the socket
* buffer. See net/ipv4/tcp_input.c:tcp_check_space()
* and net/core/stream.c:sk_stream_write_space().
*/
- if (test_bit(WRITE_PENDING, &con->state)) {
+ if (test_bit(CON_FLAG_WRITE_PENDING, &con->flags)) {
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
- dout("ceph_write_space %p queueing write work\n", con);
+ dout("%s %p queueing write work\n", __func__, con);
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
queue_con(con);
}
} else {
- dout("ceph_write_space %p nothing to write\n", con);
+ dout("%s %p nothing to write\n", __func__, con);
}
}
/* socket's state has changed */
-static void ceph_state_change(struct sock *sk)
+static void ceph_sock_state_change(struct sock *sk)
{
struct ceph_connection *con = sk->sk_user_data;
- dout("ceph_state_change %p state = %lu sk_state = %u\n",
+ dout("%s %p state = %lu sk_state = %u\n", __func__,
con, con->state, sk->sk_state);
- if (test_bit(CLOSED, &con->state))
- return;
-
switch (sk->sk_state) {
case TCP_CLOSE:
- dout("ceph_state_change TCP_CLOSE\n");
+ dout("%s TCP_CLOSE\n", __func__);
case TCP_CLOSE_WAIT:
- dout("ceph_state_change TCP_CLOSE_WAIT\n");
- if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) {
- if (test_bit(CONNECTING, &con->state))
- con->error_msg = "connection failed";
- else
- con->error_msg = "socket closed";
- queue_con(con);
- }
+ dout("%s TCP_CLOSE_WAIT\n", __func__);
+ con_sock_state_closing(con);
+ set_bit(CON_FLAG_SOCK_CLOSED, &con->flags);
+ queue_con(con);
break;
case TCP_ESTABLISHED:
- dout("ceph_state_change TCP_ESTABLISHED\n");
+ dout("%s TCP_ESTABLISHED\n", __func__);
+ con_sock_state_connected(con);
queue_con(con);
break;
default: /* Everything else is uninteresting */
@@ -228,9 +354,9 @@ static void set_sock_callbacks(struct socket *sock,
{
struct sock *sk = sock->sk;
sk->sk_user_data = con;
- sk->sk_data_ready = ceph_data_ready;
- sk->sk_write_space = ceph_write_space;
- sk->sk_state_change = ceph_state_change;
+ sk->sk_data_ready = ceph_sock_data_ready;
+ sk->sk_write_space = ceph_sock_write_space;
+ sk->sk_state_change = ceph_sock_state_change;
}
@@ -262,6 +388,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
+ con_sock_state_connecting(con);
ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
O_NONBLOCK);
if (ret == -EINPROGRESS) {
@@ -277,7 +404,6 @@ static int ceph_tcp_connect(struct ceph_connection *con)
return ret;
}
con->sock = sock;
-
return 0;
}
@@ -333,16 +459,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
*/
static int con_close_socket(struct ceph_connection *con)
{
- int rc;
+ int rc = 0;
dout("con_close_socket on %p sock %p\n", con, con->sock);
- if (!con->sock)
- return 0;
- set_bit(SOCK_CLOSED, &con->state);
- rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
- sock_release(con->sock);
- con->sock = NULL;
- clear_bit(SOCK_CLOSED, &con->state);
+ if (con->sock) {
+ rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
+ sock_release(con->sock);
+ con->sock = NULL;
+ }
+
+ /*
+ * Forcibly clear the SOCK_CLOSED flag. It gets set
+ * independent of the connection mutex, and we could have
+ * received a socket close event before we had the chance to
+ * shut the socket down.
+ */
+ clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags);
+
+ con_sock_state_closed(con);
return rc;
}
@@ -353,6 +487,10 @@ static int con_close_socket(struct ceph_connection *con)
static void ceph_msg_remove(struct ceph_msg *msg)
{
list_del_init(&msg->list_head);
+ BUG_ON(msg->con == NULL);
+ msg->con->ops->put(msg->con);
+ msg->con = NULL;
+
ceph_msg_put(msg);
}
static void ceph_msg_remove_list(struct list_head *head)
@@ -372,8 +510,11 @@ static void reset_connection(struct ceph_connection *con)
ceph_msg_remove_list(&con->out_sent);
if (con->in_msg) {
+ BUG_ON(con->in_msg->con != con);
+ con->in_msg->con = NULL;
ceph_msg_put(con->in_msg);
con->in_msg = NULL;
+ con->ops->put(con);
}
con->connect_seq = 0;
@@ -391,32 +532,44 @@ static void reset_connection(struct ceph_connection *con)
*/
void ceph_con_close(struct ceph_connection *con)
{
+ mutex_lock(&con->mutex);
dout("con_close %p peer %s\n", con,
ceph_pr_addr(&con->peer_addr.in_addr));
- set_bit(CLOSED, &con->state); /* in case there's queued work */
- clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
- clear_bit(LOSSYTX, &con->state); /* so we retry next connect */
- clear_bit(KEEPALIVE_PENDING, &con->state);
- clear_bit(WRITE_PENDING, &con->state);
- mutex_lock(&con->mutex);
+ con->state = CON_STATE_CLOSED;
+
+ clear_bit(CON_FLAG_LOSSYTX, &con->flags); /* so we retry next connect */
+ clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags);
+ clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+ clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags);
+ clear_bit(CON_FLAG_BACKOFF, &con->flags);
+
reset_connection(con);
con->peer_global_seq = 0;
cancel_delayed_work(&con->work);
+ con_close_socket(con);
mutex_unlock(&con->mutex);
- queue_con(con);
}
EXPORT_SYMBOL(ceph_con_close);
/*
* Reopen a closed connection, with a new peer address.
*/
-void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
+void ceph_con_open(struct ceph_connection *con,
+ __u8 entity_type, __u64 entity_num,
+ struct ceph_entity_addr *addr)
{
+ mutex_lock(&con->mutex);
dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
- set_bit(OPENING, &con->state);
- clear_bit(CLOSED, &con->state);
+
+ BUG_ON(con->state != CON_STATE_CLOSED);
+ con->state = CON_STATE_PREOPEN;
+
+ con->peer_name.type = (__u8) entity_type;
+ con->peer_name.num = cpu_to_le64(entity_num);
+
memcpy(&con->peer_addr, addr, sizeof(*addr));
con->delay = 0; /* reset backoff memory */
+ mutex_unlock(&con->mutex);
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_open);
@@ -430,42 +583,26 @@ bool ceph_con_opened(struct ceph_connection *con)
}
/*
- * generic get/put
- */
-struct ceph_connection *ceph_con_get(struct ceph_connection *con)
-{
- int nref = __atomic_add_unless(&con->nref, 1, 0);
-
- dout("con_get %p nref = %d -> %d\n", con, nref, nref + 1);
-
- return nref ? con : NULL;
-}
-
-void ceph_con_put(struct ceph_connection *con)
-{
- int nref = atomic_dec_return(&con->nref);
-
- BUG_ON(nref < 0);
- if (nref == 0) {
- BUG_ON(con->sock);
- kfree(con);
- }
- dout("con_put %p nref = %d -> %d\n", con, nref + 1, nref);
-}
-
-/*
* initialize a new connection.
*/
-void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con)
+void ceph_con_init(struct ceph_connection *con, void *private,
+ const struct ceph_connection_operations *ops,
+ struct ceph_messenger *msgr)
{
dout("con_init %p\n", con);
memset(con, 0, sizeof(*con));
- atomic_set(&con->nref, 1);
+ con->private = private;
+ con->ops = ops;
con->msgr = msgr;
+
+ con_sock_state_init(con);
+
mutex_init(&con->mutex);
INIT_LIST_HEAD(&con->out_queue);
INIT_LIST_HEAD(&con->out_sent);
INIT_DELAYED_WORK(&con->work, con_work);
+
+ con->state = CON_STATE_CLOSED;
}
EXPORT_SYMBOL(ceph_con_init);
@@ -486,14 +623,14 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
return ret;
}
-static void ceph_con_out_kvec_reset(struct ceph_connection *con)
+static void con_out_kvec_reset(struct ceph_connection *con)
{
con->out_kvec_left = 0;
con->out_kvec_bytes = 0;
con->out_kvec_cur = &con->out_kvec[0];
}
-static void ceph_con_out_kvec_add(struct ceph_connection *con,
+static void con_out_kvec_add(struct ceph_connection *con,
size_t size, void *data)
{
int index;
@@ -507,6 +644,53 @@ static void ceph_con_out_kvec_add(struct ceph_connection *con,
con->out_kvec_bytes += size;
}
+#ifdef CONFIG_BLOCK
+static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
+{
+ if (!bio) {
+ *iter = NULL;
+ *seg = 0;
+ return;
+ }
+ *iter = bio;
+ *seg = bio->bi_idx;
+}
+
+static void iter_bio_next(struct bio **bio_iter, int *seg)
+{
+ if (*bio_iter == NULL)
+ return;
+
+ BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
+
+ (*seg)++;
+ if (*seg == (*bio_iter)->bi_vcnt)
+ init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
+}
+#endif
+
+static void prepare_write_message_data(struct ceph_connection *con)
+{
+ struct ceph_msg *msg = con->out_msg;
+
+ BUG_ON(!msg);
+ BUG_ON(!msg->hdr.data_len);
+
+ /* initialize page iterator */
+ con->out_msg_pos.page = 0;
+ if (msg->pages)
+ con->out_msg_pos.page_pos = msg->page_alignment;
+ else
+ con->out_msg_pos.page_pos = 0;
+#ifdef CONFIG_BLOCK
+ if (msg->bio)
+ init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
+#endif
+ con->out_msg_pos.data_pos = 0;
+ con->out_msg_pos.did_page_crc = false;
+ con->out_more = 1; /* data + footer will follow */
+}
+
/*
* Prepare footer for currently outgoing message, and finish things
* off. Assumes out_kvec* are already valid.. we just add on to the end.
@@ -516,6 +700,8 @@ static void prepare_write_message_footer(struct ceph_connection *con)
struct ceph_msg *m = con->out_msg;
int v = con->out_kvec_left;
+ m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
+
dout("prepare_write_message_footer %p\n", con);
con->out_kvec_is_msg = true;
con->out_kvec[v].iov_base = &m->footer;
@@ -534,7 +720,7 @@ static void prepare_write_message(struct ceph_connection *con)
struct ceph_msg *m;
u32 crc;
- ceph_con_out_kvec_reset(con);
+ con_out_kvec_reset(con);
con->out_kvec_is_msg = true;
con->out_msg_done = false;
@@ -542,14 +728,16 @@ static void prepare_write_message(struct ceph_connection *con)
* TCP packet that's a good thing. */
if (con->in_seq > con->in_seq_acked) {
con->in_seq_acked = con->in_seq;
- ceph_con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
+ con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
- ceph_con_out_kvec_add(con, sizeof (con->out_temp_ack),
+ con_out_kvec_add(con, sizeof (con->out_temp_ack),
&con->out_temp_ack);
}
+ BUG_ON(list_empty(&con->out_queue));
m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
con->out_msg = m;
+ BUG_ON(m->con != con);
/* put message on sent list */
ceph_msg_get(m);
@@ -576,18 +764,18 @@ static void prepare_write_message(struct ceph_connection *con)
BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
/* tag + hdr + front + middle */
- ceph_con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
- ceph_con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
- ceph_con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
+ con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
+ con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
+ con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
if (m->middle)
- ceph_con_out_kvec_add(con, m->middle->vec.iov_len,
+ con_out_kvec_add(con, m->middle->vec.iov_len,
m->middle->vec.iov_base);
/* fill in crc (except data pages), footer */
crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
con->out_msg->hdr.crc = cpu_to_le32(crc);
- con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE;
+ con->out_msg->footer.flags = 0;
crc = crc32c(0, m->front.iov_base, m->front.iov_len);
con->out_msg->footer.front_crc = cpu_to_le32(crc);
@@ -597,28 +785,19 @@ static void prepare_write_message(struct ceph_connection *con)
con->out_msg->footer.middle_crc = cpu_to_le32(crc);
} else
con->out_msg->footer.middle_crc = 0;
- con->out_msg->footer.data_crc = 0;
- dout("prepare_write_message front_crc %u data_crc %u\n",
+ dout("%s front_crc %u middle_crc %u\n", __func__,
le32_to_cpu(con->out_msg->footer.front_crc),
le32_to_cpu(con->out_msg->footer.middle_crc));
/* is there a data payload? */
- if (le32_to_cpu(m->hdr.data_len) > 0) {
- /* initialize page iterator */
- con->out_msg_pos.page = 0;
- if (m->pages)
- con->out_msg_pos.page_pos = m->page_alignment;
- else
- con->out_msg_pos.page_pos = 0;
- con->out_msg_pos.data_pos = 0;
- con->out_msg_pos.did_page_crc = false;
- con->out_more = 1; /* data + footer will follow */
- } else {
+ con->out_msg->footer.data_crc = 0;
+ if (m->hdr.data_len)
+ prepare_write_message_data(con);
+ else
/* no, queue up footer too and be done */
prepare_write_message_footer(con);
- }
- set_bit(WRITE_PENDING, &con->state);
+ set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
}
/*
@@ -630,16 +809,16 @@ static void prepare_write_ack(struct ceph_connection *con)
con->in_seq_acked, con->in_seq);
con->in_seq_acked = con->in_seq;
- ceph_con_out_kvec_reset(con);
+ con_out_kvec_reset(con);
- ceph_con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
+ con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
- ceph_con_out_kvec_add(con, sizeof (con->out_temp_ack),
+ con_out_kvec_add(con, sizeof (con->out_temp_ack),
&con->out_temp_ack);
con->out_more = 1; /* more will follow.. eventually.. */
- set_bit(WRITE_PENDING, &con->state);
+ set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
}
/*
@@ -648,9 +827,9 @@ static void prepare_write_ack(struct ceph_connection *con)
static void prepare_write_keepalive(struct ceph_connection *con)
{
dout("prepare_write_keepalive %p\n", con);
- ceph_con_out_kvec_reset(con);
- ceph_con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
- set_bit(WRITE_PENDING, &con->state);
+ con_out_kvec_reset(con);
+ con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
+ set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
}
/*
@@ -665,27 +844,21 @@ static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection
if (!con->ops->get_authorizer) {
con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
con->out_connect.authorizer_len = 0;
-
return NULL;
}
/* Can't hold the mutex while getting authorizer */
-
mutex_unlock(&con->mutex);
-
auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
-
mutex_lock(&con->mutex);
if (IS_ERR(auth))
return auth;
- if (test_bit(CLOSED, &con->state) || test_bit(OPENING, &con->state))
+ if (con->state != CON_STATE_NEGOTIATING)
return ERR_PTR(-EAGAIN);
con->auth_reply_buf = auth->authorizer_reply_buf;
con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
-
-
return auth;
}
@@ -694,12 +867,12 @@ static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection
*/
static void prepare_write_banner(struct ceph_connection *con)
{
- ceph_con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
- ceph_con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
+ con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
+ con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
&con->msgr->my_enc_addr);
con->out_more = 0;
- set_bit(WRITE_PENDING, &con->state);
+ set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
}
static int prepare_write_connect(struct ceph_connection *con)
@@ -742,14 +915,15 @@ static int prepare_write_connect(struct ceph_connection *con)
con->out_connect.authorizer_len = auth ?
cpu_to_le32(auth->authorizer_buf_len) : 0;
- ceph_con_out_kvec_add(con, sizeof (con->out_connect),
+ con_out_kvec_reset(con);
+ con_out_kvec_add(con, sizeof (con->out_connect),
&con->out_connect);
if (auth && auth->authorizer_buf_len)
- ceph_con_out_kvec_add(con, auth->authorizer_buf_len,
+ con_out_kvec_add(con, auth->authorizer_buf_len,
auth->authorizer_buf);
con->out_more = 0;
- set_bit(WRITE_PENDING, &con->state);
+ set_bit(CON_FLAG_WRITE_PENDING, &con->flags);
return 0;
}
@@ -797,30 +971,34 @@ out:
return ret; /* done! */
}
-#ifdef CONFIG_BLOCK
-static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
+static void out_msg_pos_next(struct ceph_connection *con, struct page *page,
+ size_t len, size_t sent, bool in_trail)
{
- if (!bio) {
- *iter = NULL;
- *seg = 0;
- return;
- }
- *iter = bio;
- *seg = bio->bi_idx;
-}
+ struct ceph_msg *msg = con->out_msg;
-static void iter_bio_next(struct bio **bio_iter, int *seg)
-{
- if (*bio_iter == NULL)
- return;
+ BUG_ON(!msg);
+ BUG_ON(!sent);
- BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
+ con->out_msg_pos.data_pos += sent;
+ con->out_msg_pos.page_pos += sent;
+ if (sent < len)
+ return;
- (*seg)++;
- if (*seg == (*bio_iter)->bi_vcnt)
- init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
-}
+ BUG_ON(sent != len);
+ con->out_msg_pos.page_pos = 0;
+ con->out_msg_pos.page++;
+ con->out_msg_pos.did_page_crc = false;
+ if (in_trail)
+ list_move_tail(&page->lru,
+ &msg->trail->head);
+ else if (msg->pagelist)
+ list_move_tail(&page->lru,
+ &msg->pagelist->head);
+#ifdef CONFIG_BLOCK
+ else if (msg->bio)
+ iter_bio_next(&msg->bio_iter, &msg->bio_seg);
#endif
+}
/*
* Write as much message data payload as we can. If we finish, queue
@@ -837,41 +1015,36 @@ static int write_partial_msg_pages(struct ceph_connection *con)
bool do_datacrc = !con->msgr->nocrc;
int ret;
int total_max_write;
- int in_trail = 0;
- size_t trail_len = (msg->trail ? msg->trail->length : 0);
+ bool in_trail = false;
+ const size_t trail_len = (msg->trail ? msg->trail->length : 0);
+ const size_t trail_off = data_len - trail_len;
dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
- con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
+ con, msg, con->out_msg_pos.page, msg->nr_pages,
con->out_msg_pos.page_pos);
-#ifdef CONFIG_BLOCK
- if (msg->bio && !msg->bio_iter)
- init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
-#endif
-
+ /*
+ * Iterate through each page that contains data to be
+ * written, and send as much as possible for each.
+ *
+ * If we are calculating the data crc (the default), we will
+ * need to map the page. If we have no pages, they have
+ * been revoked, so use the zero page.
+ */
while (data_len > con->out_msg_pos.data_pos) {
struct page *page = NULL;
int max_write = PAGE_SIZE;
int bio_offset = 0;
- total_max_write = data_len - trail_len -
- con->out_msg_pos.data_pos;
-
- /*
- * if we are calculating the data crc (the default), we need
- * to map the page. if our pages[] has been revoked, use the
- * zero page.
- */
-
- /* have we reached the trail part of the data? */
- if (con->out_msg_pos.data_pos >= data_len - trail_len) {
- in_trail = 1;
+ in_trail = in_trail || con->out_msg_pos.data_pos >= trail_off;
+ if (!in_trail)
+ total_max_write = trail_off - con->out_msg_pos.data_pos;
+ if (in_trail) {
total_max_write = data_len - con->out_msg_pos.data_pos;
page = list_first_entry(&msg->trail->head,
struct page, lru);
- max_write = PAGE_SIZE;
} else if (msg->pages) {
page = msg->pages[con->out_msg_pos.page];
} else if (msg->pagelist) {
@@ -894,15 +1067,14 @@ static int write_partial_msg_pages(struct ceph_connection *con)
if (do_datacrc && !con->out_msg_pos.did_page_crc) {
void *base;
- u32 crc;
- u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
+ u32 crc = le32_to_cpu(msg->footer.data_crc);
char *kaddr;
kaddr = kmap(page);
BUG_ON(kaddr == NULL);
base = kaddr + con->out_msg_pos.page_pos + bio_offset;
- crc = crc32c(tmpcrc, base, len);
- con->out_msg->footer.data_crc = cpu_to_le32(crc);
+ crc = crc32c(crc, base, len);
+ msg->footer.data_crc = cpu_to_le32(crc);
con->out_msg_pos.did_page_crc = true;
}
ret = ceph_tcp_sendpage(con->sock, page,
@@ -915,31 +1087,15 @@ static int write_partial_msg_pages(struct ceph_connection *con)
if (ret <= 0)
goto out;
- con->out_msg_pos.data_pos += ret;
- con->out_msg_pos.page_pos += ret;
- if (ret == len) {
- con->out_msg_pos.page_pos = 0;
- con->out_msg_pos.page++;
- con->out_msg_pos.did_page_crc = false;
- if (in_trail)
- list_move_tail(&page->lru,
- &msg->trail->head);
- else if (msg->pagelist)
- list_move_tail(&page->lru,
- &msg->pagelist->head);
-#ifdef CONFIG_BLOCK
- else if (msg->bio)
- iter_bio_next(&msg->bio_iter, &msg->bio_seg);
-#endif
- }
+ out_msg_pos_next(con, page, len, (size_t) ret, in_trail);
}
dout("write_partial_msg_pages %p msg %p done\n", con, msg);
/* prepare and queue up footer, too */
if (!do_datacrc)
- con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
- ceph_con_out_kvec_reset(con);
+ msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
+ con_out_kvec_reset(con);
prepare_write_message_footer(con);
ret = 1;
out:
@@ -1351,20 +1507,14 @@ static int process_banner(struct ceph_connection *con)
ceph_pr_addr(&con->msgr->inst.addr.in_addr));
}
- set_bit(NEGOTIATING, &con->state);
- prepare_read_connect(con);
return 0;
}
static void fail_protocol(struct ceph_connection *con)
{
reset_connection(con);
- set_bit(CLOSED, &con->state); /* in case there's queued work */
-
- mutex_unlock(&con->mutex);
- if (con->ops->bad_proto)
- con->ops->bad_proto(con);
- mutex_lock(&con->mutex);
+ BUG_ON(con->state != CON_STATE_NEGOTIATING);
+ con->state = CON_STATE_CLOSED;
}
static int process_connect(struct ceph_connection *con)
@@ -1407,7 +1557,6 @@ static int process_connect(struct ceph_connection *con)
return -1;
}
con->auth_retry = 1;
- ceph_con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
@@ -1428,7 +1577,6 @@ static int process_connect(struct ceph_connection *con)
ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr.in_addr));
reset_connection(con);
- ceph_con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
@@ -1440,8 +1588,7 @@ static int process_connect(struct ceph_connection *con)
if (con->ops->peer_reset)
con->ops->peer_reset(con);
mutex_lock(&con->mutex);
- if (test_bit(CLOSED, &con->state) ||
- test_bit(OPENING, &con->state))
+ if (con->state != CON_STATE_NEGOTIATING)
return -EAGAIN;
break;
@@ -1454,7 +1601,6 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->out_connect.connect_seq),
le32_to_cpu(con->in_reply.connect_seq));
con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
- ceph_con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
@@ -1471,7 +1617,6 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->in_reply.global_seq));
get_global_seq(con->msgr,
le32_to_cpu(con->in_reply.global_seq));
- ceph_con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
return ret;
@@ -1489,7 +1634,10 @@ static int process_connect(struct ceph_connection *con)
fail_protocol(con);
return -1;
}
- clear_bit(CONNECTING, &con->state);
+
+ BUG_ON(con->state != CON_STATE_NEGOTIATING);
+ con->state = CON_STATE_OPEN;
+
con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
con->connect_seq++;
con->peer_features = server_feat;
@@ -1501,7 +1649,9 @@ static int process_connect(struct ceph_connection *con)
le32_to_cpu(con->in_reply.connect_seq));
if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
- set_bit(LOSSYTX, &con->state);
+ set_bit(CON_FLAG_LOSSYTX, &con->flags);
+
+ con->delay = 0; /* reset backoff memory */
prepare_read_tag(con);
break;
@@ -1587,10 +1737,7 @@ static int read_partial_message_section(struct ceph_connection *con,
return 1;
}
-static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
- struct ceph_msg_header *hdr,
- int *skip);
-
+static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip);
static int read_partial_message_pages(struct ceph_connection *con,
struct page **pages,
@@ -1633,9 +1780,6 @@ static int read_partial_message_bio(struct ceph_connection *con,
void *p;
int ret, left;
- if (IS_ERR(bv))
- return PTR_ERR(bv);
-
left = min((int)(data_len - con->in_msg_pos.data_pos),
(int)(bv->bv_len - con->in_msg_pos.page_pos));
@@ -1672,7 +1816,6 @@ static int read_partial_message(struct ceph_connection *con)
int ret;
unsigned int front_len, middle_len, data_len;
bool do_datacrc = !con->msgr->nocrc;
- int skip;
u64 seq;
u32 crc;
@@ -1723,10 +1866,13 @@ static int read_partial_message(struct ceph_connection *con)
/* allocate message? */
if (!con->in_msg) {
+ int skip = 0;
+
dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
con->in_hdr.front_len, con->in_hdr.data_len);
- skip = 0;
- con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
+ ret = ceph_con_in_msg_alloc(con, &skip);
+ if (ret < 0)
+ return ret;
if (skip) {
/* skip this message */
dout("alloc_msg said skip message\n");
@@ -1737,11 +1883,9 @@ static int read_partial_message(struct ceph_connection *con)
con->in_seq++;
return 0;
}
- if (!con->in_msg) {
- con->error_msg =
- "error allocating memory for incoming message";
- return -ENOMEM;
- }
+
+ BUG_ON(!con->in_msg);
+ BUG_ON(con->in_msg->con != con);
m = con->in_msg;
m->front.iov_len = 0; /* haven't read it yet */
if (m->middle)
@@ -1753,6 +1897,11 @@ static int read_partial_message(struct ceph_connection *con)
else
con->in_msg_pos.page_pos = 0;
con->in_msg_pos.data_pos = 0;
+
+#ifdef CONFIG_BLOCK
+ if (m->bio)
+ init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
+#endif
}
/* front */
@@ -1769,10 +1918,6 @@ static int read_partial_message(struct ceph_connection *con)
if (ret <= 0)
return ret;
}
-#ifdef CONFIG_BLOCK
- if (m->bio && !m->bio_iter)
- init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
-#endif
/* (page) data */
while (con->in_msg_pos.data_pos < data_len) {
@@ -1783,7 +1928,7 @@ static int read_partial_message(struct ceph_connection *con)
return ret;
#ifdef CONFIG_BLOCK
} else if (m->bio) {
-
+ BUG_ON(!m->bio_iter);
ret = read_partial_message_bio(con,
&m->bio_iter, &m->bio_seg,
data_len, do_datacrc);
@@ -1837,8 +1982,11 @@ static void process_message(struct ceph_connection *con)
{
struct ceph_msg *msg;
+ BUG_ON(con->in_msg->con != con);
+ con->in_msg->con = NULL;
msg = con->in_msg;
con->in_msg = NULL;
+ con->ops->put(con);
/* if first message, set peer_name */
if (con->peer_name.type == 0)
@@ -1858,7 +2006,6 @@ static void process_message(struct ceph_connection *con)
con->ops->dispatch(con, msg);
mutex_lock(&con->mutex);
- prepare_read_tag(con);
}
@@ -1870,22 +2017,19 @@ static int try_write(struct ceph_connection *con)
{
int ret = 1;
- dout("try_write start %p state %lu nref %d\n", con, con->state,
- atomic_read(&con->nref));
+ dout("try_write start %p state %lu\n", con, con->state);
more:
dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
/* open the socket first? */
- if (con->sock == NULL) {
- ceph_con_out_kvec_reset(con);
+ if (con->state == CON_STATE_PREOPEN) {
+ BUG_ON(con->sock);
+ con->state = CON_STATE_CONNECTING;
+
+ con_out_kvec_reset(con);
prepare_write_banner(con);
- ret = prepare_write_connect(con);
- if (ret < 0)
- goto out;
prepare_read_banner(con);
- set_bit(CONNECTING, &con->state);
- clear_bit(NEGOTIATING, &con->state);
BUG_ON(con->in_msg);
con->in_tag = CEPH_MSGR_TAG_READY;
@@ -1932,7 +2076,7 @@ more_kvec:
}
do_next:
- if (!test_bit(CONNECTING, &con->state)) {
+ if (con->state == CON_STATE_OPEN) {
/* is anything else pending? */
if (!list_empty(&con->out_queue)) {
prepare_write_message(con);
@@ -1942,14 +2086,15 @@ do_next:
prepare_write_ack(con);
goto more;
}
- if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) {
+ if (test_and_clear_bit(CON_FLAG_KEEPALIVE_PENDING,
+ &con->flags)) {
prepare_write_keepalive(con);
goto more;
}
}
/* Nothing to do! */
- clear_bit(WRITE_PENDING, &con->state);
+ clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
dout("try_write nothing else to write.\n");
ret = 0;
out:
@@ -1966,38 +2111,42 @@ static int try_read(struct ceph_connection *con)
{
int ret = -1;
- if (!con->sock)
- return 0;
-
- if (test_bit(STANDBY, &con->state))
+more:
+ dout("try_read start on %p state %lu\n", con, con->state);
+ if (con->state != CON_STATE_CONNECTING &&
+ con->state != CON_STATE_NEGOTIATING &&
+ con->state != CON_STATE_OPEN)
return 0;
- dout("try_read start on %p\n", con);
+ BUG_ON(!con->sock);
-more:
dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
con->in_base_pos);
- /*
- * process_connect and process_message drop and re-take
- * con->mutex. make sure we handle a racing close or reopen.
- */
- if (test_bit(CLOSED, &con->state) ||
- test_bit(OPENING, &con->state)) {
- ret = -EAGAIN;
+ if (con->state == CON_STATE_CONNECTING) {
+ dout("try_read connecting\n");
+ ret = read_partial_banner(con);
+ if (ret <= 0)
+ goto out;
+ ret = process_banner(con);
+ if (ret < 0)
+ goto out;
+
+ BUG_ON(con->state != CON_STATE_CONNECTING);
+ con->state = CON_STATE_NEGOTIATING;
+
+ /* Banner is good, exchange connection info */
+ ret = prepare_write_connect(con);
+ if (ret < 0)
+ goto out;
+ prepare_read_connect(con);
+
+ /* Send connection info before awaiting response */
goto out;
}
- if (test_bit(CONNECTING, &con->state)) {
- if (!test_bit(NEGOTIATING, &con->state)) {
- dout("try_read connecting\n");
- ret = read_partial_banner(con);
- if (ret <= 0)
- goto out;
- ret = process_banner(con);
- if (ret < 0)
- goto out;
- }
+ if (con->state == CON_STATE_NEGOTIATING) {
+ dout("try_read negotiating\n");
ret = read_partial_connect(con);
if (ret <= 0)
goto out;
@@ -2007,6 +2156,8 @@ more:
goto more;
}
+ BUG_ON(con->state != CON_STATE_OPEN);
+
if (con->in_base_pos < 0) {
/*
* skipping + discarding content.
@@ -2040,7 +2191,8 @@ more:
prepare_read_ack(con);
break;
case CEPH_MSGR_TAG_CLOSE:
- set_bit(CLOSED, &con->state); /* fixme */
+ con_close_socket(con);
+ con->state = CON_STATE_CLOSED;
goto out;
default:
goto bad_tag;
@@ -2063,6 +2215,8 @@ more:
if (con->in_tag == CEPH_MSGR_TAG_READY)
goto more;
process_message(con);
+ if (con->state == CON_STATE_OPEN)
+ prepare_read_tag(con);
goto more;
}
if (con->in_tag == CEPH_MSGR_TAG_ACK) {
@@ -2091,12 +2245,6 @@ bad_tag:
*/
static void queue_con(struct ceph_connection *con)
{
- if (test_bit(DEAD, &con->state)) {
- dout("queue_con %p ignoring: DEAD\n",
- con);
- return;
- }
-
if (!con->ops->get(con)) {
dout("queue_con %p ref count 0\n", con);
return;
@@ -2121,7 +2269,26 @@ static void con_work(struct work_struct *work)
mutex_lock(&con->mutex);
restart:
- if (test_and_clear_bit(BACKOFF, &con->state)) {
+ if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) {
+ switch (con->state) {
+ case CON_STATE_CONNECTING:
+ con->error_msg = "connection failed";
+ break;
+ case CON_STATE_NEGOTIATING:
+ con->error_msg = "negotiation failed";
+ break;
+ case CON_STATE_OPEN:
+ con->error_msg = "socket closed";
+ break;
+ default:
+ dout("unrecognized con state %d\n", (int)con->state);
+ con->error_msg = "unrecognized con state";
+ BUG();
+ }
+ goto fault;
+ }
+
+ if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) {
dout("con_work %p backing off\n", con);
if (queue_delayed_work(ceph_msgr_wq, &con->work,
round_jiffies_relative(con->delay))) {
@@ -2135,35 +2302,35 @@ restart:
}
}
- if (test_bit(STANDBY, &con->state)) {
+ if (con->state == CON_STATE_STANDBY) {
dout("con_work %p STANDBY\n", con);
goto done;
}
- if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
- dout("con_work CLOSED\n");
- con_close_socket(con);
+ if (con->state == CON_STATE_CLOSED) {
+ dout("con_work %p CLOSED\n", con);
+ BUG_ON(con->sock);
goto done;
}
- if (test_and_clear_bit(OPENING, &con->state)) {
- /* reopen w/ new peer */
+ if (con->state == CON_STATE_PREOPEN) {
dout("con_work OPENING\n");
- con_close_socket(con);
+ BUG_ON(con->sock);
}
- if (test_and_clear_bit(SOCK_CLOSED, &con->state))
- goto fault;
-
ret = try_read(con);
if (ret == -EAGAIN)
goto restart;
- if (ret < 0)
+ if (ret < 0) {
+ con->error_msg = "socket error on read";
goto fault;
+ }
ret = try_write(con);
if (ret == -EAGAIN)
goto restart;
- if (ret < 0)
+ if (ret < 0) {
+ con->error_msg = "socket error on write";
goto fault;
+ }
done:
mutex_unlock(&con->mutex);
@@ -2172,7 +2339,6 @@ done_unlocked:
return;
fault:
- mutex_unlock(&con->mutex);
ceph_fault(con); /* error/fault path */
goto done_unlocked;
}
@@ -2183,26 +2349,31 @@ fault:
* exponential backoff
*/
static void ceph_fault(struct ceph_connection *con)
+ __releases(con->mutex)
{
pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
dout("fault %p state %lu to peer %s\n",
con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
- if (test_bit(LOSSYTX, &con->state)) {
- dout("fault on LOSSYTX channel\n");
- goto out;
- }
-
- mutex_lock(&con->mutex);
- if (test_bit(CLOSED, &con->state))
- goto out_unlock;
+ BUG_ON(con->state != CON_STATE_CONNECTING &&
+ con->state != CON_STATE_NEGOTIATING &&
+ con->state != CON_STATE_OPEN);
con_close_socket(con);
+ if (test_bit(CON_FLAG_LOSSYTX, &con->flags)) {
+ dout("fault on LOSSYTX channel, marking CLOSED\n");
+ con->state = CON_STATE_CLOSED;
+ goto out_unlock;
+ }
+
if (con->in_msg) {
+ BUG_ON(con->in_msg->con != con);
+ con->in_msg->con = NULL;
ceph_msg_put(con->in_msg);
con->in_msg = NULL;
+ con->ops->put(con);
}
/* Requeue anything that hasn't been acked */
@@ -2211,12 +2382,13 @@ static void ceph_fault(struct ceph_connection *con)
/* If there are no messages queued or keepalive pending, place
* the connection in a STANDBY state */
if (list_empty(&con->out_queue) &&
- !test_bit(KEEPALIVE_PENDING, &con->state)) {
+ !test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)) {
dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
- clear_bit(WRITE_PENDING, &con->state);
- set_bit(STANDBY, &con->state);
+ clear_bit(CON_FLAG_WRITE_PENDING, &con->flags);
+ con->state = CON_STATE_STANDBY;
} else {
/* retry after a delay. */
+ con->state = CON_STATE_PREOPEN;
if (con->delay == 0)
con->delay = BASE_DELAY_INTERVAL;
else if (con->delay < MAX_DELAY_INTERVAL)
@@ -2237,13 +2409,12 @@ static void ceph_fault(struct ceph_connection *con)
* that when con_work restarts we schedule the
* delay then.
*/
- set_bit(BACKOFF, &con->state);
+ set_bit(CON_FLAG_BACKOFF, &con->flags);
}
}
out_unlock:
mutex_unlock(&con->mutex);
-out:
/*
* in case we faulted due to authentication, invalidate our
* current tickets so that we can get new ones.
@@ -2260,18 +2431,14 @@ out:
/*
- * create a new messenger instance
+ * initialize a new messenger instance
*/
-struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr,
- u32 supported_features,
- u32 required_features)
+void ceph_messenger_init(struct ceph_messenger *msgr,
+ struct ceph_entity_addr *myaddr,
+ u32 supported_features,
+ u32 required_features,
+ bool nocrc)
{
- struct ceph_messenger *msgr;
-
- msgr = kzalloc(sizeof(*msgr), GFP_KERNEL);
- if (msgr == NULL)
- return ERR_PTR(-ENOMEM);
-
msgr->supported_features = supported_features;
msgr->required_features = required_features;
@@ -2284,30 +2451,23 @@ struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr,
msgr->inst.addr.type = 0;
get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
encode_my_addr(msgr);
+ msgr->nocrc = nocrc;
- dout("messenger_create %p\n", msgr);
- return msgr;
-}
-EXPORT_SYMBOL(ceph_messenger_create);
+ atomic_set(&msgr->stopping, 0);
-void ceph_messenger_destroy(struct ceph_messenger *msgr)
-{
- dout("destroy %p\n", msgr);
- kfree(msgr);
- dout("destroyed messenger %p\n", msgr);
+ dout("%s %p\n", __func__, msgr);
}
-EXPORT_SYMBOL(ceph_messenger_destroy);
+EXPORT_SYMBOL(ceph_messenger_init);
static void clear_standby(struct ceph_connection *con)
{
/* come back from STANDBY? */
- if (test_and_clear_bit(STANDBY, &con->state)) {
- mutex_lock(&con->mutex);
+ if (con->state == CON_STATE_STANDBY) {
dout("clear_standby %p and ++connect_seq\n", con);
+ con->state = CON_STATE_PREOPEN;
con->connect_seq++;
- WARN_ON(test_bit(WRITE_PENDING, &con->state));
- WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state));
- mutex_unlock(&con->mutex);
+ WARN_ON(test_bit(CON_FLAG_WRITE_PENDING, &con->flags));
+ WARN_ON(test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags));
}
}
@@ -2316,21 +2476,24 @@ static void clear_standby(struct ceph_connection *con)
*/
void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
{
- if (test_bit(CLOSED, &con->state)) {
- dout("con_send %p closed, dropping %p\n", con, msg);
- ceph_msg_put(msg);
- return;
- }
-
/* set src+dst */
msg->hdr.src = con->msgr->inst.name;
-
BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
-
msg->needs_out_seq = true;
- /* queue */
mutex_lock(&con->mutex);
+
+ if (con->state == CON_STATE_CLOSED) {
+ dout("con_send %p closed, dropping %p\n", con, msg);
+ ceph_msg_put(msg);
+ mutex_unlock(&con->mutex);
+ return;
+ }
+
+ BUG_ON(msg->con != NULL);
+ msg->con = con->ops->get(con);
+ BUG_ON(msg->con == NULL);
+
BUG_ON(!list_empty(&msg->list_head));
list_add_tail(&msg->list_head, &con->out_queue);
dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
@@ -2339,12 +2502,13 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
le32_to_cpu(msg->hdr.front_len),
le32_to_cpu(msg->hdr.middle_len),
le32_to_cpu(msg->hdr.data_len));
+
+ clear_standby(con);
mutex_unlock(&con->mutex);
/* if there wasn't anything waiting to send before, queue
* new work */
- clear_standby(con);
- if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
+ if (test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0)
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_send);
@@ -2352,24 +2516,34 @@ EXPORT_SYMBOL(ceph_con_send);
/*
* Revoke a message that was previously queued for send
*/
-void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
+void ceph_msg_revoke(struct ceph_msg *msg)
{
+ struct ceph_connection *con = msg->con;
+
+ if (!con)
+ return; /* Message not in our possession */
+
mutex_lock(&con->mutex);
if (!list_empty(&msg->list_head)) {
- dout("con_revoke %p msg %p - was on queue\n", con, msg);
+ dout("%s %p msg %p - was on queue\n", __func__, con, msg);
list_del_init(&msg->list_head);
- ceph_msg_put(msg);
+ BUG_ON(msg->con == NULL);
+ msg->con->ops->put(msg->con);
+ msg->con = NULL;
msg->hdr.seq = 0;
+
+ ceph_msg_put(msg);
}
if (con->out_msg == msg) {
- dout("con_revoke %p msg %p - was sending\n", con, msg);
+ dout("%s %p msg %p - was sending\n", __func__, con, msg);
con->out_msg = NULL;
if (con->out_kvec_is_msg) {
con->out_skip = con->out_kvec_bytes;
con->out_kvec_is_msg = false;
}
- ceph_msg_put(msg);
msg->hdr.seq = 0;
+
+ ceph_msg_put(msg);
}
mutex_unlock(&con->mutex);
}
@@ -2377,17 +2551,27 @@ void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
/*
* Revoke a message that we may be reading data into
*/
-void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
+void ceph_msg_revoke_incoming(struct ceph_msg *msg)
{
+ struct ceph_connection *con;
+
+ BUG_ON(msg == NULL);
+ if (!msg->con) {
+ dout("%s msg %p null con\n", __func__, msg);
+
+ return; /* Message not in our possession */
+ }
+
+ con = msg->con;
mutex_lock(&con->mutex);
- if (con->in_msg && con->in_msg == msg) {
+ if (con->in_msg == msg) {
unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
/* skip rest of message */
- dout("con_revoke_pages %p msg %p revoked\n", con, msg);
- con->in_base_pos = con->in_base_pos -
+ dout("%s %p msg %p revoked\n", __func__, con, msg);
+ con->in_base_pos = con->in_base_pos -
sizeof(struct ceph_msg_header) -
front_len -
middle_len -
@@ -2398,8 +2582,8 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
con->in_tag = CEPH_MSGR_TAG_READY;
con->in_seq++;
} else {
- dout("con_revoke_pages %p msg %p pages %p no-op\n",
- con, con->in_msg, msg);
+ dout("%s %p in_msg %p msg %p no-op\n",
+ __func__, con, con->in_msg, msg);
}
mutex_unlock(&con->mutex);
}
@@ -2410,9 +2594,11 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
void ceph_con_keepalive(struct ceph_connection *con)
{
dout("con_keepalive %p\n", con);
+ mutex_lock(&con->mutex);
clear_standby(con);
- if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
- test_and_set_bit(WRITE_PENDING, &con->state) == 0)
+ mutex_unlock(&con->mutex);
+ if (test_and_set_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags) == 0 &&
+ test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0)
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_keepalive);
@@ -2431,6 +2617,8 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
if (m == NULL)
goto out;
kref_init(&m->kref);
+
+ m->con = NULL;
INIT_LIST_HEAD(&m->list_head);
m->hdr.tid = 0;
@@ -2526,46 +2714,77 @@ static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
}
/*
- * Generic message allocator, for incoming messages.
+ * Allocate a message for receiving an incoming message on a
+ * connection, and save the result in con->in_msg. Uses the
+ * connection's private alloc_msg op if available.
+ *
+ * Returns 0 on success, or a negative error code.
+ *
+ * On success, if we set *skip = 1:
+ * - the next message should be skipped and ignored.
+ * - con->in_msg == NULL
+ * or if we set *skip = 0:
+ * - con->in_msg is non-null.
+ * On error (ENOMEM, EAGAIN, ...),
+ * - con->in_msg == NULL
*/
-static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
- struct ceph_msg_header *hdr,
- int *skip)
+static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
{
+ struct ceph_msg_header *hdr = &con->in_hdr;
int type = le16_to_cpu(hdr->type);
int front_len = le32_to_cpu(hdr->front_len);
int middle_len = le32_to_cpu(hdr->middle_len);
- struct ceph_msg *msg = NULL;
- int ret;
+ int ret = 0;
+
+ BUG_ON(con->in_msg != NULL);
if (con->ops->alloc_msg) {
+ struct ceph_msg *msg;
+
mutex_unlock(&con->mutex);
msg = con->ops->alloc_msg(con, hdr, skip);
mutex_lock(&con->mutex);
- if (!msg || *skip)
- return NULL;
+ if (con->state != CON_STATE_OPEN) {
+ ceph_msg_put(msg);
+ return -EAGAIN;
+ }
+ con->in_msg = msg;
+ if (con->in_msg) {
+ con->in_msg->con = con->ops->get(con);
+ BUG_ON(con->in_msg->con == NULL);
+ }
+ if (*skip) {
+ con->in_msg = NULL;
+ return 0;
+ }
+ if (!con->in_msg) {
+ con->error_msg =
+ "error allocating memory for incoming message";
+ return -ENOMEM;
+ }
}
- if (!msg) {
- *skip = 0;
- msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
- if (!msg) {
+ if (!con->in_msg) {
+ con->in_msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
+ if (!con->in_msg) {
pr_err("unable to allocate msg type %d len %d\n",
type, front_len);
- return NULL;
+ return -ENOMEM;
}
- msg->page_alignment = le16_to_cpu(hdr->data_off);
+ con->in_msg->con = con->ops->get(con);
+ BUG_ON(con->in_msg->con == NULL);
+ con->in_msg->page_alignment = le16_to_cpu(hdr->data_off);
}
- memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
+ memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
- if (middle_len && !msg->middle) {
- ret = ceph_alloc_middle(con, msg);
+ if (middle_len && !con->in_msg->middle) {
+ ret = ceph_alloc_middle(con, con->in_msg);
if (ret < 0) {
- ceph_msg_put(msg);
- return NULL;
+ ceph_msg_put(con->in_msg);
+ con->in_msg = NULL;
}
}
- return msg;
+ return ret;
}
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index d0649a9655b..105d533b55f 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -106,9 +106,9 @@ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
monc->pending_auth = 1;
monc->m_auth->front.iov_len = len;
monc->m_auth->hdr.front_len = cpu_to_le32(len);
- ceph_con_revoke(monc->con, monc->m_auth);
+ ceph_msg_revoke(monc->m_auth);
ceph_msg_get(monc->m_auth); /* keep our ref */
- ceph_con_send(monc->con, monc->m_auth);
+ ceph_con_send(&monc->con, monc->m_auth);
}
/*
@@ -117,8 +117,11 @@ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
static void __close_session(struct ceph_mon_client *monc)
{
dout("__close_session closing mon%d\n", monc->cur_mon);
- ceph_con_revoke(monc->con, monc->m_auth);
- ceph_con_close(monc->con);
+ ceph_msg_revoke(monc->m_auth);
+ ceph_msg_revoke_incoming(monc->m_auth_reply);
+ ceph_msg_revoke(monc->m_subscribe);
+ ceph_msg_revoke_incoming(monc->m_subscribe_ack);
+ ceph_con_close(&monc->con);
monc->cur_mon = -1;
monc->pending_auth = 0;
ceph_auth_reset(monc->auth);
@@ -142,9 +145,8 @@ static int __open_session(struct ceph_mon_client *monc)
monc->want_next_osdmap = !!monc->want_next_osdmap;
dout("open_session mon%d opening\n", monc->cur_mon);
- monc->con->peer_name.type = CEPH_ENTITY_TYPE_MON;
- monc->con->peer_name.num = cpu_to_le64(monc->cur_mon);
- ceph_con_open(monc->con,
+ ceph_con_open(&monc->con,
+ CEPH_ENTITY_TYPE_MON, monc->cur_mon,
&monc->monmap->mon_inst[monc->cur_mon].addr);
/* initiatiate authentication handshake */
@@ -226,8 +228,8 @@ static void __send_subscribe(struct ceph_mon_client *monc)
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
- ceph_con_revoke(monc->con, msg);
- ceph_con_send(monc->con, ceph_msg_get(msg));
+ ceph_msg_revoke(msg);
+ ceph_con_send(&monc->con, ceph_msg_get(msg));
monc->sub_sent = jiffies | 1; /* never 0 */
}
@@ -247,7 +249,7 @@ static void handle_subscribe_ack(struct ceph_mon_client *monc,
if (monc->hunting) {
pr_info("mon%d %s session established\n",
monc->cur_mon,
- ceph_pr_addr(&monc->con->peer_addr.in_addr));
+ ceph_pr_addr(&monc->con.peer_addr.in_addr));
monc->hunting = false;
}
dout("handle_subscribe_ack after %d seconds\n", seconds);
@@ -439,6 +441,7 @@ static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
m = NULL;
} else {
dout("get_generic_reply %lld got %p\n", tid, req->reply);
+ *skip = 0;
m = ceph_msg_get(req->reply);
/*
* we don't need to track the connection reading into
@@ -461,7 +464,7 @@ static int do_generic_request(struct ceph_mon_client *monc,
req->request->hdr.tid = cpu_to_le64(req->tid);
__insert_generic_request(monc, req);
monc->num_generic_requests++;
- ceph_con_send(monc->con, ceph_msg_get(req->request));
+ ceph_con_send(&monc->con, ceph_msg_get(req->request));
mutex_unlock(&monc->mutex);
err = wait_for_completion_interruptible(&req->completion);
@@ -684,8 +687,9 @@ static void __resend_generic_request(struct ceph_mon_client *monc)
for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) {
req = rb_entry(p, struct ceph_mon_generic_request, node);
- ceph_con_revoke(monc->con, req->request);
- ceph_con_send(monc->con, ceph_msg_get(req->request));
+ ceph_msg_revoke(req->request);
+ ceph_msg_revoke_incoming(req->reply);
+ ceph_con_send(&monc->con, ceph_msg_get(req->request));
}
}
@@ -705,7 +709,7 @@ static void delayed_work(struct work_struct *work)
__close_session(monc);
__open_session(monc); /* continue hunting */
} else {
- ceph_con_keepalive(monc->con);
+ ceph_con_keepalive(&monc->con);
__validate_auth(monc);
@@ -760,19 +764,12 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
goto out;
/* connection */
- monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
- if (!monc->con)
- goto out_monmap;
- ceph_con_init(monc->client->msgr, monc->con);
- monc->con->private = monc;
- monc->con->ops = &mon_con_ops;
-
/* authentication */
monc->auth = ceph_auth_init(cl->options->name,
cl->options->key);
if (IS_ERR(monc->auth)) {
err = PTR_ERR(monc->auth);
- goto out_con;
+ goto out_monmap;
}
monc->auth->want_keys =
CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
@@ -801,6 +798,9 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
if (!monc->m_auth)
goto out_auth_reply;
+ ceph_con_init(&monc->con, monc, &mon_con_ops,
+ &monc->client->msgr);
+
monc->cur_mon = -1;
monc->hunting = true;
monc->sub_renew_after = jiffies;
@@ -824,8 +824,6 @@ out_subscribe_ack:
ceph_msg_put(monc->m_subscribe_ack);
out_auth:
ceph_auth_destroy(monc->auth);
-out_con:
- monc->con->ops->put(monc->con);
out_monmap:
kfree(monc->monmap);
out:
@@ -841,10 +839,6 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
mutex_lock(&monc->mutex);
__close_session(monc);
- monc->con->private = NULL;
- monc->con->ops->put(monc->con);
- monc->con = NULL;
-
mutex_unlock(&monc->mutex);
/*
@@ -888,8 +882,8 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
} else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) {
dout("authenticated, starting session\n");
- monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
- monc->client->msgr->inst.name.num =
+ monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
+ monc->client->msgr.inst.name.num =
cpu_to_le64(monc->auth->global_id);
__send_subscribe(monc);
@@ -1000,6 +994,8 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
case CEPH_MSG_MDS_MAP:
case CEPH_MSG_OSD_MAP:
m = ceph_msg_new(type, front_len, GFP_NOFS, false);
+ if (!m)
+ return NULL; /* ENOMEM--return skip == 0 */
break;
}
@@ -1029,7 +1025,7 @@ static void mon_fault(struct ceph_connection *con)
if (!monc->hunting)
pr_info("mon%d %s session lost, "
"hunting for new mon\n", monc->cur_mon,
- ceph_pr_addr(&monc->con->peer_addr.in_addr));
+ ceph_pr_addr(&monc->con.peer_addr.in_addr));
__close_session(monc);
if (!monc->hunting) {
@@ -1044,9 +1040,23 @@ out:
mutex_unlock(&monc->mutex);
}
+/*
+ * We can ignore refcounting on the connection struct, as all references
+ * will come from the messenger workqueue, which is drained prior to
+ * mon_client destruction.
+ */
+static struct ceph_connection *con_get(struct ceph_connection *con)
+{
+ return con;
+}
+
+static void con_put(struct ceph_connection *con)
+{
+}
+
static const struct ceph_connection_operations mon_con_ops = {
- .get = ceph_con_get,
- .put = ceph_con_put,
+ .get = con_get,
+ .put = con_put,
.dispatch = dispatch,
.fault = mon_fault,
.alloc_msg = mon_alloc_msg,
diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c
index 11d5f4196a7..ddec1c10ac8 100644
--- a/net/ceph/msgpool.c
+++ b/net/ceph/msgpool.c
@@ -12,7 +12,7 @@ static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
struct ceph_msgpool *pool = arg;
struct ceph_msg *msg;
- msg = ceph_msg_new(0, pool->front_len, gfp_mask, true);
+ msg = ceph_msg_new(pool->type, pool->front_len, gfp_mask, true);
if (!msg) {
dout("msgpool_alloc %s failed\n", pool->name);
} else {
@@ -32,10 +32,11 @@ static void msgpool_free(void *element, void *arg)
ceph_msg_put(msg);
}
-int ceph_msgpool_init(struct ceph_msgpool *pool,
+int ceph_msgpool_init(struct ceph_msgpool *pool, int type,
int front_len, int size, bool blocking, const char *name)
{
dout("msgpool %s init\n", name);
+ pool->type = type;
pool->front_len = front_len;
pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
if (!pool->pool)
@@ -61,7 +62,7 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
WARN_ON(1);
/* try to alloc a fresh message */
- return ceph_msg_new(0, front_len, GFP_NOFS, false);
+ return ceph_msg_new(pool->type, front_len, GFP_NOFS, false);
}
msg = mempool_alloc(pool->pool, GFP_NOFS);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index ca59e66c978..42119c05e82 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -140,10 +140,9 @@ void ceph_osdc_release_request(struct kref *kref)
if (req->r_request)
ceph_msg_put(req->r_request);
if (req->r_con_filling_msg) {
- dout("release_request revoking pages %p from con %p\n",
+ dout("%s revoking pages %p from con %p\n", __func__,
req->r_pages, req->r_con_filling_msg);
- ceph_con_revoke_message(req->r_con_filling_msg,
- req->r_reply);
+ ceph_msg_revoke_incoming(req->r_reply);
req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
}
if (req->r_reply)
@@ -214,10 +213,13 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
kref_init(&req->r_kref);
init_completion(&req->r_completion);
init_completion(&req->r_safe_completion);
+ rb_init_node(&req->r_node);
INIT_LIST_HEAD(&req->r_unsafe_item);
INIT_LIST_HEAD(&req->r_linger_item);
INIT_LIST_HEAD(&req->r_linger_osd);
INIT_LIST_HEAD(&req->r_req_lru_item);
+ INIT_LIST_HEAD(&req->r_osd_item);
+
req->r_flags = flags;
WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
@@ -243,6 +245,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
}
ceph_pagelist_init(req->r_trail);
}
+
/* create request message; allow space for oid */
msg_size += MAX_OBJ_NAME_SIZE;
if (snapc)
@@ -256,7 +259,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
return NULL;
}
- msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
memset(msg->front.iov_base, 0, msg->front.iov_len);
req->r_request = msg;
@@ -624,7 +626,7 @@ static void osd_reset(struct ceph_connection *con)
/*
* Track open sessions with osds.
*/
-static struct ceph_osd *create_osd(struct ceph_osd_client *osdc)
+static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
{
struct ceph_osd *osd;
@@ -634,15 +636,13 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc)
atomic_set(&osd->o_ref, 1);
osd->o_osdc = osdc;
+ osd->o_osd = onum;
INIT_LIST_HEAD(&osd->o_requests);
INIT_LIST_HEAD(&osd->o_linger_requests);
INIT_LIST_HEAD(&osd->o_osd_lru);
osd->o_incarnation = 1;
- ceph_con_init(osdc->client->msgr, &osd->o_con);
- osd->o_con.private = osd;
- osd->o_con.ops = &osd_con_ops;
- osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD;
+ ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
INIT_LIST_HEAD(&osd->o_keepalive_item);
return osd;
@@ -688,7 +688,7 @@ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
static void remove_all_osds(struct ceph_osd_client *osdc)
{
- dout("__remove_old_osds %p\n", osdc);
+ dout("%s %p\n", __func__, osdc);
mutex_lock(&osdc->request_mutex);
while (!RB_EMPTY_ROOT(&osdc->osds)) {
struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
@@ -752,7 +752,8 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
ret = -EAGAIN;
} else {
ceph_con_close(&osd->o_con);
- ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]);
+ ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
+ &osdc->osdmap->osd_addr[osd->o_osd]);
osd->o_incarnation++;
}
return ret;
@@ -853,7 +854,7 @@ static void __unregister_request(struct ceph_osd_client *osdc,
if (req->r_osd) {
/* make sure the original request isn't in flight. */
- ceph_con_revoke(&req->r_osd->o_con, req->r_request);
+ ceph_msg_revoke(req->r_request);
list_del_init(&req->r_osd_item);
if (list_empty(&req->r_osd->o_requests) &&
@@ -880,7 +881,7 @@ static void __unregister_request(struct ceph_osd_client *osdc,
static void __cancel_request(struct ceph_osd_request *req)
{
if (req->r_sent && req->r_osd) {
- ceph_con_revoke(&req->r_osd->o_con, req->r_request);
+ ceph_msg_revoke(req->r_request);
req->r_sent = 0;
}
}
@@ -890,7 +891,9 @@ static void __register_linger_request(struct ceph_osd_client *osdc,
{
dout("__register_linger_request %p\n", req);
list_add_tail(&req->r_linger_item, &osdc->req_linger);
- list_add_tail(&req->r_linger_osd, &req->r_osd->o_linger_requests);
+ if (req->r_osd)
+ list_add_tail(&req->r_linger_osd,
+ &req->r_osd->o_linger_requests);
}
static void __unregister_linger_request(struct ceph_osd_client *osdc,
@@ -998,18 +1001,18 @@ static int __map_request(struct ceph_osd_client *osdc,
req->r_osd = __lookup_osd(osdc, o);
if (!req->r_osd && o >= 0) {
err = -ENOMEM;
- req->r_osd = create_osd(osdc);
+ req->r_osd = create_osd(osdc, o);
if (!req->r_osd) {
list_move(&req->r_req_lru_item, &osdc->req_notarget);
goto out;
}
dout("map_request osd %p is osd%d\n", req->r_osd, o);
- req->r_osd->o_osd = o;
- req->r_osd->o_con.peer_name.num = cpu_to_le64(o);
__insert_osd(osdc, req->r_osd);
- ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]);
+ ceph_con_open(&req->r_osd->o_con,
+ CEPH_ENTITY_TYPE_OSD, o,
+ &osdc->osdmap->osd_addr[o]);
}
if (req->r_osd) {
@@ -1304,8 +1307,9 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
mutex_lock(&osdc->request_mutex);
- for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
+ for (p = rb_first(&osdc->requests); p; ) {
req = rb_entry(p, struct ceph_osd_request, r_node);
+ p = rb_next(p);
err = __map_request(osdc, req, force_resend);
if (err < 0)
continue; /* error */
@@ -1313,10 +1317,23 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
dout("%p tid %llu maps to no osd\n", req, req->r_tid);
needmap++; /* request a newer map */
} else if (err > 0) {
- dout("%p tid %llu requeued on osd%d\n", req, req->r_tid,
- req->r_osd ? req->r_osd->o_osd : -1);
- if (!req->r_linger)
+ if (!req->r_linger) {
+ dout("%p tid %llu requeued on osd%d\n", req,
+ req->r_tid,
+ req->r_osd ? req->r_osd->o_osd : -1);
req->r_flags |= CEPH_OSD_FLAG_RETRY;
+ }
+ }
+ if (req->r_linger && list_empty(&req->r_linger_item)) {
+ /*
+ * register as a linger so that we will
+ * re-submit below and get a new tid
+ */
+ dout("%p tid %llu restart on osd%d\n",
+ req, req->r_tid,
+ req->r_osd ? req->r_osd->o_osd : -1);
+ __register_linger_request(osdc, req);
+ __unregister_request(osdc, req);
}
}
@@ -1391,7 +1408,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
epoch, maplen);
newmap = osdmap_apply_incremental(&p, next,
osdc->osdmap,
- osdc->client->msgr);
+ &osdc->client->msgr);
if (IS_ERR(newmap)) {
err = PTR_ERR(newmap);
goto bad;
@@ -1839,11 +1856,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
if (!osdc->req_mempool)
goto out;
- err = ceph_msgpool_init(&osdc->msgpool_op, OSD_OP_FRONT_LEN, 10, true,
+ err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
+ OSD_OP_FRONT_LEN, 10, true,
"osd_op");
if (err < 0)
goto out_mempool;
- err = ceph_msgpool_init(&osdc->msgpool_op_reply,
+ err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
OSD_OPREPLY_FRONT_LEN, 10, true,
"osd_op_reply");
if (err < 0)
@@ -2019,15 +2037,15 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
if (!req) {
*skip = 1;
m = NULL;
- pr_info("get_reply unknown tid %llu from osd%d\n", tid,
- osd->o_osd);
+ dout("get_reply unknown tid %llu from osd%d\n", tid,
+ osd->o_osd);
goto out;
}
if (req->r_con_filling_msg) {
- dout("get_reply revoking msg %p from old con %p\n",
+ dout("%s revoking msg %p from old con %p\n", __func__,
req->r_reply, req->r_con_filling_msg);
- ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply);
+ ceph_msg_revoke_incoming(req->r_reply);
req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
req->r_con_filling_msg = NULL;
}
@@ -2080,6 +2098,7 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con,
int type = le16_to_cpu(hdr->type);
int front = le32_to_cpu(hdr->front_len);
+ *skip = 0;
switch (type) {
case CEPH_MSG_OSD_MAP:
case CEPH_MSG_WATCH_NOTIFY:
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 81e3b84a77e..3124b71a888 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -135,6 +135,21 @@ bad:
return -EINVAL;
}
+static int skip_name_map(void **p, void *end)
+{
+ int len;
+ ceph_decode_32_safe(p, end, len ,bad);
+ while (len--) {
+ int strlen;
+ *p += sizeof(u32);
+ ceph_decode_32_safe(p, end, strlen, bad);
+ *p += strlen;
+}
+ return 0;
+bad:
+ return -EINVAL;
+}
+
static struct crush_map *crush_decode(void *pbyval, void *end)
{
struct crush_map *c;
@@ -143,6 +158,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
void **p = &pbyval;
void *start = pbyval;
u32 magic;
+ u32 num_name_maps;
dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
@@ -150,6 +166,11 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
if (c == NULL)
return ERR_PTR(-ENOMEM);
+ /* set tunables to default values */
+ c->choose_local_tries = 2;
+ c->choose_local_fallback_tries = 5;
+ c->choose_total_tries = 19;
+
ceph_decode_need(p, end, 4*sizeof(u32), bad);
magic = ceph_decode_32(p);
if (magic != CRUSH_MAGIC) {
@@ -297,7 +318,25 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
}
/* ignore trailing name maps. */
+ for (num_name_maps = 0; num_name_maps < 3; num_name_maps++) {
+ err = skip_name_map(p, end);
+ if (err < 0)
+ goto done;
+ }
+
+ /* tunables */
+ ceph_decode_need(p, end, 3*sizeof(u32), done);
+ c->choose_local_tries = ceph_decode_32(p);
+ c->choose_local_fallback_tries = ceph_decode_32(p);
+ c->choose_total_tries = ceph_decode_32(p);
+ dout("crush decode tunable choose_local_tries = %d",
+ c->choose_local_tries);
+ dout("crush decode tunable choose_local_fallback_tries = %d",
+ c->choose_local_fallback_tries);
+ dout("crush decode tunable choose_total_tries = %d",
+ c->choose_total_tries);
+done:
dout("crush_decode success\n");
return c;
@@ -488,15 +527,16 @@ static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
ceph_decode_32_safe(p, end, pool, bad);
ceph_decode_32_safe(p, end, len, bad);
dout(" pool %d len %d\n", pool, len);
+ ceph_decode_need(p, end, len, bad);
pi = __lookup_pg_pool(&map->pg_pools, pool);
if (pi) {
+ char *name = kstrndup(*p, len, GFP_NOFS);
+
+ if (!name)
+ return -ENOMEM;
kfree(pi->name);
- pi->name = kmalloc(len + 1, GFP_NOFS);
- if (pi->name) {
- memcpy(pi->name, *p, len);
- pi->name[len] = '\0';
- dout(" name is %s\n", pi->name);
- }
+ pi->name = name;
+ dout(" name is %s\n", pi->name);
}
*p += len;
}
@@ -666,6 +706,9 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
ceph_decode_copy(p, &pgid, sizeof(pgid));
n = ceph_decode_32(p);
+ err = -EINVAL;
+ if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
+ goto bad;
ceph_decode_need(p, end, n * sizeof(u32), bad);
err = -ENOMEM;
pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
@@ -889,6 +932,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
(void) __remove_pg_mapping(&map->pg_temp, pgid);
/* insert */
+ if (pglen > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) {
+ err = -EINVAL;
+ goto bad;
+ }
pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
if (!pg) {
err = -ENOMEM;
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
index 13cb409a7bb..665cd23020f 100644
--- a/net/ceph/pagelist.c
+++ b/net/ceph/pagelist.c
@@ -72,8 +72,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
}
EXPORT_SYMBOL(ceph_pagelist_append);
-/**
- * Allocate enough pages for a pagelist to append the given amount
+/* Allocate enough pages for a pagelist to append the given amount
* of data without without allocating.
* Returns: 0 on success, -ENOMEM on error.
*/
@@ -95,9 +94,7 @@ int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space)
}
EXPORT_SYMBOL(ceph_pagelist_reserve);
-/**
- * Free any pages that have been preallocated.
- */
+/* Free any pages that have been preallocated. */
int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
{
while (!list_empty(&pl->free_list)) {
@@ -112,9 +109,7 @@ int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
}
EXPORT_SYMBOL(ceph_pagelist_free_reserve);
-/**
- * Create a truncation point.
- */
+/* Create a truncation point. */
void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
struct ceph_pagelist_cursor *c)
{
@@ -124,8 +119,7 @@ void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
}
EXPORT_SYMBOL(ceph_pagelist_set_cursor);
-/**
- * Truncate a pagelist to the given point. Move extra pages to reserve.
+/* Truncate a pagelist to the given point. Move extra pages to reserve.
* This won't sleep.
* Returns: 0 on success,
* -EINVAL if the pagelist doesn't match the trunc point pagelist
diff --git a/net/compat.c b/net/compat.c
index 1b96281892d..74ed1d7a84a 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -221,6 +221,8 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
{
struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
struct compat_cmsghdr cmhdr;
+ struct compat_timeval ctv;
+ struct compat_timespec cts[3];
int cmlen;
if (cm == NULL || kmsg->msg_controllen < sizeof(*cm)) {
@@ -229,8 +231,6 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
}
if (!COMPAT_USE_64BIT_TIME) {
- struct compat_timeval ctv;
- struct compat_timespec cts[3];
if (level == SOL_SOCKET && type == SCM_TIMESTAMP) {
struct timeval *tv = (struct timeval *)data;
ctv.tv_sec = tv->tv_sec;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index ae6acf6a3de..0337e2b7686 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -248,7 +248,6 @@ void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
unlock_sock_fast(sk, slow);
/* skb is now orphaned, can be freed outside of locked section */
- trace_kfree_skb(skb, skb_free_datagram_locked);
__kfree_skb(skb);
}
EXPORT_SYMBOL(skb_free_datagram_locked);
diff --git a/net/core/dev.c b/net/core/dev.c
index 1cb0d8a6aa6..0cb3fe8d8e7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1172,6 +1172,7 @@ static int __dev_open(struct net_device *dev)
net_dmaengine_get();
dev_set_rx_mode(dev);
dev_activate(dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
}
return ret;
@@ -1632,6 +1633,8 @@ static inline int deliver_skb(struct sk_buff *skb,
struct packet_type *pt_prev,
struct net_device *orig_dev)
{
+ if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
+ return -ENOMEM;
atomic_inc(&skb->users);
return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
@@ -1691,7 +1694,8 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
rcu_read_unlock();
}
-/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
+/**
+ * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
* @dev: Network device
* @txq: number of queues available
*
@@ -1793,6 +1797,18 @@ int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
EXPORT_SYMBOL(netif_set_real_num_rx_queues);
#endif
+/**
+ * netif_get_num_default_rss_queues - default number of RSS queues
+ *
+ * This routine should set an upper limit on the number of RSS queues
+ * used by default by multiqueue devices.
+ */
+int netif_get_num_default_rss_queues(void)
+{
+ return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
+}
+EXPORT_SYMBOL(netif_get_num_default_rss_queues);
+
static inline void __netif_reschedule(struct Qdisc *q)
{
struct softnet_data *sd;
@@ -2459,6 +2475,23 @@ static DEFINE_PER_CPU(int, xmit_recursion);
#define RECURSION_LIMIT 10
/**
+ * dev_loopback_xmit - loop back @skb
+ * @skb: buffer to transmit
+ */
+int dev_loopback_xmit(struct sk_buff *skb)
+{
+ skb_reset_mac_header(skb);
+ __skb_pull(skb, skb_network_offset(skb));
+ skb->pkt_type = PACKET_LOOPBACK;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ WARN_ON(!skb_dst(skb));
+ skb_dst_force(skb);
+ netif_rx_ni(skb);
+ return 0;
+}
+EXPORT_SYMBOL(dev_loopback_xmit);
+
+/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
*
@@ -3123,6 +3156,23 @@ void netdev_rx_handler_unregister(struct net_device *dev)
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
+/*
+ * Limit the use of PFMEMALLOC reserves to those protocols that implement
+ * the special handling of PFMEMALLOC skbs.
+ */
+static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
+{
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_ARP):
+ case __constant_htons(ETH_P_IP):
+ case __constant_htons(ETH_P_IPV6):
+ case __constant_htons(ETH_P_8021Q):
+ return true;
+ default:
+ return false;
+ }
+}
+
static int __netif_receive_skb(struct sk_buff *skb)
{
struct packet_type *ptype, *pt_prev;
@@ -3132,17 +3182,28 @@ static int __netif_receive_skb(struct sk_buff *skb)
bool deliver_exact = false;
int ret = NET_RX_DROP;
__be16 type;
+ unsigned long pflags = current->flags;
net_timestamp_check(!netdev_tstamp_prequeue, skb);
trace_netif_receive_skb(skb);
+ /*
+ * PFMEMALLOC skbs are special, they should
+ * - be delivered to SOCK_MEMALLOC sockets only
+ * - stay away from userspace
+ * - have bounded memory usage
+ *
+ * Use PF_MEMALLOC as this saves us from propagating the allocation
+ * context down to all allocation sites.
+ */
+ if (sk_memalloc_socks() && skb_pfmemalloc(skb))
+ current->flags |= PF_MEMALLOC;
+
/* if we've gotten here through NAPI, check netpoll */
if (netpoll_receive_skb(skb))
- return NET_RX_DROP;
+ goto out;
- if (!skb->skb_iif)
- skb->skb_iif = skb->dev->ifindex;
orig_dev = skb->dev;
skb_reset_network_header(skb);
@@ -3154,13 +3215,14 @@ static int __netif_receive_skb(struct sk_buff *skb)
rcu_read_lock();
another_round:
+ skb->skb_iif = skb->dev->ifindex;
__this_cpu_inc(softnet_data.processed);
if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
skb = vlan_untag(skb);
if (unlikely(!skb))
- goto out;
+ goto unlock;
}
#ifdef CONFIG_NET_CLS_ACT
@@ -3170,6 +3232,9 @@ another_round:
}
#endif
+ if (sk_memalloc_socks() && skb_pfmemalloc(skb))
+ goto skip_taps;
+
list_for_each_entry_rcu(ptype, &ptype_all, list) {
if (!ptype->dev || ptype->dev == skb->dev) {
if (pt_prev)
@@ -3178,13 +3243,18 @@ another_round:
}
}
+skip_taps:
#ifdef CONFIG_NET_CLS_ACT
skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
if (!skb)
- goto out;
+ goto unlock;
ncls:
#endif
+ if (sk_memalloc_socks() && skb_pfmemalloc(skb)
+ && !skb_pfmemalloc_protocol(skb))
+ goto drop;
+
rx_handler = rcu_dereference(skb->dev->rx_handler);
if (vlan_tx_tag_present(skb)) {
if (pt_prev) {
@@ -3194,7 +3264,7 @@ ncls:
if (vlan_do_receive(&skb, !rx_handler))
goto another_round;
else if (unlikely(!skb))
- goto out;
+ goto unlock;
}
if (rx_handler) {
@@ -3204,7 +3274,7 @@ ncls:
}
switch (rx_handler(&skb)) {
case RX_HANDLER_CONSUMED:
- goto out;
+ goto unlock;
case RX_HANDLER_ANOTHER:
goto another_round;
case RX_HANDLER_EXACT:
@@ -3232,8 +3302,12 @@ ncls:
}
if (pt_prev) {
- ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+ if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
+ ret = -ENOMEM;
+ else
+ ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
} else {
+drop:
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
/* Jamal, now you will not able to escape explaining
@@ -3242,8 +3316,10 @@ ncls:
ret = NET_RX_DROP;
}
-out:
+unlock:
rcu_read_unlock();
+out:
+ tsk_restore_flags(current, pflags, PF_MEMALLOC);
return ret;
}
@@ -4767,6 +4843,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
err = ops->ndo_set_mac_address(dev, sa);
if (!err)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
return err;
}
EXPORT_SYMBOL(dev_set_mac_address);
@@ -5545,6 +5622,7 @@ int register_netdevice(struct net_device *dev)
dev_init_scheduler(dev);
dev_hold(dev);
list_netdevice(dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
/* Notify protocols, that a new device appeared. */
ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
@@ -5646,7 +5724,7 @@ int netdev_refcnt_read(const struct net_device *dev)
}
EXPORT_SYMBOL(netdev_refcnt_read);
-/*
+/**
* netdev_wait_allrefs - wait until all references are gone.
*
* This is called when unregistering network devices.
diff --git a/net/core/dst.c b/net/core/dst.c
index 43d94cedbf7..069d51d2941 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -94,7 +94,7 @@ loop:
* But we do not have state "obsoleted, but
* referenced by parent", so it is right.
*/
- if (dst->obsolete > 1)
+ if (dst->obsolete > 0)
continue;
___dst_free(dst);
@@ -152,7 +152,7 @@ EXPORT_SYMBOL(dst_discard);
const u32 dst_default_metrics[RTAX_MAX];
void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
- int initial_ref, int initial_obsolete, int flags)
+ int initial_ref, int initial_obsolete, unsigned short flags)
{
struct dst_entry *dst;
@@ -171,7 +171,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
dst_init_metrics(dst, dst_default_metrics, true);
dst->expires = 0UL;
dst->path = dst;
- RCU_INIT_POINTER(dst->_neighbour, NULL);
#ifdef CONFIG_XFRM
dst->xfrm = NULL;
#endif
@@ -188,6 +187,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
dst->__use = 0;
dst->lastuse = jiffies;
dst->flags = flags;
+ dst->pending_confirm = 0;
dst->next = NULL;
if (!(flags & DST_NOCOUNT))
dst_entries_add(ops, 1);
@@ -202,7 +202,7 @@ static void ___dst_free(struct dst_entry *dst)
*/
if (dst->dev == NULL || !(dst->dev->flags&IFF_UP))
dst->input = dst->output = dst_discard;
- dst->obsolete = 2;
+ dst->obsolete = DST_OBSOLETE_DEAD;
}
void __dst_free(struct dst_entry *dst)
@@ -224,19 +224,12 @@ EXPORT_SYMBOL(__dst_free);
struct dst_entry *dst_destroy(struct dst_entry * dst)
{
struct dst_entry *child;
- struct neighbour *neigh;
smp_rmb();
again:
- neigh = rcu_dereference_protected(dst->_neighbour, 1);
child = dst->child;
- if (neigh) {
- RCU_INIT_POINTER(dst->_neighbour, NULL);
- neigh_release(neigh);
- }
-
if (!(dst->flags & DST_NOCOUNT))
dst_entries_add(dst->ops, -1);
@@ -360,19 +353,9 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
if (!unregister) {
dst->input = dst->output = dst_discard;
} else {
- struct neighbour *neigh;
-
dst->dev = dev_net(dst->dev)->loopback_dev;
dev_hold(dst->dev);
dev_put(dev);
- rcu_read_lock();
- neigh = dst_get_neighbour_noref(dst);
- if (neigh && neigh->dev == dev) {
- neigh->dev = dst->dev;
- dev_hold(dst->dev);
- dev_put(dev);
- }
- rcu_read_unlock();
}
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 9c2afb48027..cbf033dcaf1 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -729,6 +729,40 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
return dev->ethtool_ops->set_wol(dev, &wol);
}
+static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_eee edata;
+ int rc;
+
+ if (!dev->ethtool_ops->get_eee)
+ return -EOPNOTSUPP;
+
+ memset(&edata, 0, sizeof(struct ethtool_eee));
+ edata.cmd = ETHTOOL_GEEE;
+ rc = dev->ethtool_ops->get_eee(dev, &edata);
+
+ if (rc)
+ return rc;
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ethtool_set_eee(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_eee edata;
+
+ if (!dev->ethtool_ops->set_eee)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_eee(dev, &edata);
+}
+
static int ethtool_nway_reset(struct net_device *dev)
{
if (!dev->ethtool_ops->nway_reset)
@@ -1409,6 +1443,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GSET:
case ETHTOOL_GDRVINFO:
case ETHTOOL_GMSGLVL:
+ case ETHTOOL_GLINK:
case ETHTOOL_GCOALESCE:
case ETHTOOL_GRINGPARAM:
case ETHTOOL_GPAUSEPARAM:
@@ -1417,6 +1452,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GSG:
case ETHTOOL_GSSET_INFO:
case ETHTOOL_GSTRINGS:
+ case ETHTOOL_GSTATS:
case ETHTOOL_GTSO:
case ETHTOOL_GPERMADDR:
case ETHTOOL_GUFO:
@@ -1429,8 +1465,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
+ case ETHTOOL_GRXFHINDIR:
case ETHTOOL_GFEATURES:
+ case ETHTOOL_GCHANNELS:
case ETHTOOL_GET_TS_INFO:
+ case ETHTOOL_GEEE:
break;
default:
if (!capable(CAP_NET_ADMIN))
@@ -1471,6 +1510,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
rc = ethtool_set_value_void(dev, useraddr,
dev->ethtool_ops->set_msglevel);
break;
+ case ETHTOOL_GEEE:
+ rc = ethtool_get_eee(dev, useraddr);
+ break;
+ case ETHTOOL_SEEE:
+ rc = ethtool_set_eee(dev, useraddr);
+ break;
case ETHTOOL_NWAY_RST:
rc = ethtool_nway_reset(dev);
break;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 72cceb79d0d..ab7db83236c 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -151,6 +151,8 @@ static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
list_del_rcu(&rule->list);
+ if (ops->delete)
+ ops->delete(rule);
fib_rule_put(rule);
}
}
@@ -499,6 +501,8 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
notify_rule_change(RTM_DELRULE, rule, ops, nlh,
NETLINK_CB(skb).pid);
+ if (ops->delete)
+ ops->delete(rule);
fib_rule_put(rule);
flush_route_cache(ops);
rules_ops_put(ops);
diff --git a/net/core/filter.c b/net/core/filter.c
index d4ce2dc712e..907efd27ec7 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -83,6 +83,14 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
int err;
struct sk_filter *filter;
+ /*
+ * If the skb was allocated from pfmemalloc reserves, only
+ * allow SOCK_MEMALLOC sockets to use it as this socket is
+ * helping free memory
+ */
+ if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
+ return -ENOMEM;
+
err = security_sock_rcv_skb(sk, skb);
if (err)
return err;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index a225089df5b..466820b6e34 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -4,6 +4,7 @@
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
+#include <net/ipv6.h>
#include <linux/if_tunnel.h>
#include <linux/if_pppox.h>
#include <linux/ppp_defs.h>
@@ -55,8 +56,8 @@ ipv6:
return false;
ip_proto = iph->nexthdr;
- flow->src = iph->saddr.s6_addr32[3];
- flow->dst = iph->daddr.s6_addr32[3];
+ flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
+ flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
nhoff += sizeof(struct ipv6hdr);
break;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index d81d026138f..117afaf5126 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -474,8 +474,8 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
}
EXPORT_SYMBOL(neigh_lookup_nodev);
-struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
- struct net_device *dev)
+struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev, bool want_ref)
{
u32 hash_val;
int key_len = tbl->key_len;
@@ -535,14 +535,16 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
n1 = rcu_dereference_protected(n1->next,
lockdep_is_held(&tbl->lock))) {
if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
- neigh_hold(n1);
+ if (want_ref)
+ neigh_hold(n1);
rc = n1;
goto out_tbl_unlock;
}
}
n->dead = 0;
- neigh_hold(n);
+ if (want_ref)
+ neigh_hold(n);
rcu_assign_pointer(n->next,
rcu_dereference_protected(nht->hash_buckets[hash_val],
lockdep_is_held(&tbl->lock)));
@@ -558,7 +560,7 @@ out_neigh_release:
neigh_release(n);
goto out;
}
-EXPORT_SYMBOL(neigh_create);
+EXPORT_SYMBOL(__neigh_create);
static u32 pneigh_hash(const void *pkey, int key_len)
{
@@ -1199,10 +1201,23 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
write_unlock_bh(&neigh->lock);
rcu_read_lock();
- /* On shaper/eql skb->dst->neighbour != neigh :( */
- if (dst && (n2 = dst_get_neighbour_noref(dst)) != NULL)
- n1 = n2;
+
+ /* Why not just use 'neigh' as-is? The problem is that
+ * things such as shaper, eql, and sch_teql can end up
+ * using alternative, different, neigh objects to output
+ * the packet in the output path. So what we need to do
+ * here is re-lookup the top-level neigh in the path so
+ * we can reinject the packet there.
+ */
+ n2 = NULL;
+ if (dst) {
+ n2 = dst_neigh_lookup_skb(dst, skb);
+ if (n2)
+ n1 = n2;
+ }
n1->output(n1, skb);
+ if (n2)
+ neigh_release(n2);
rcu_read_unlock();
write_lock_bh(&neigh->lock);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index fdf9e61d065..72607174ea5 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -417,72 +417,6 @@ static struct attribute_group netstat_group = {
.name = "statistics",
.attrs = netstat_attrs,
};
-
-#ifdef CONFIG_WIRELESS_EXT_SYSFS
-/* helper function that does all the locking etc for wireless stats */
-static ssize_t wireless_show(struct device *d, char *buf,
- ssize_t (*format)(const struct iw_statistics *,
- char *))
-{
- struct net_device *dev = to_net_dev(d);
- const struct iw_statistics *iw;
- ssize_t ret = -EINVAL;
-
- if (!rtnl_trylock())
- return restart_syscall();
- if (dev_isalive(dev)) {
- iw = get_wireless_stats(dev);
- if (iw)
- ret = (*format)(iw, buf);
- }
- rtnl_unlock();
-
- return ret;
-}
-
-/* show function template for wireless fields */
-#define WIRELESS_SHOW(name, field, format_string) \
-static ssize_t format_iw_##name(const struct iw_statistics *iw, char *buf) \
-{ \
- return sprintf(buf, format_string, iw->field); \
-} \
-static ssize_t show_iw_##name(struct device *d, \
- struct device_attribute *attr, char *buf) \
-{ \
- return wireless_show(d, buf, format_iw_##name); \
-} \
-static DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL)
-
-WIRELESS_SHOW(status, status, fmt_hex);
-WIRELESS_SHOW(link, qual.qual, fmt_dec);
-WIRELESS_SHOW(level, qual.level, fmt_dec);
-WIRELESS_SHOW(noise, qual.noise, fmt_dec);
-WIRELESS_SHOW(nwid, discard.nwid, fmt_dec);
-WIRELESS_SHOW(crypt, discard.code, fmt_dec);
-WIRELESS_SHOW(fragment, discard.fragment, fmt_dec);
-WIRELESS_SHOW(misc, discard.misc, fmt_dec);
-WIRELESS_SHOW(retries, discard.retries, fmt_dec);
-WIRELESS_SHOW(beacon, miss.beacon, fmt_dec);
-
-static struct attribute *wireless_attrs[] = {
- &dev_attr_status.attr,
- &dev_attr_link.attr,
- &dev_attr_level.attr,
- &dev_attr_noise.attr,
- &dev_attr_nwid.attr,
- &dev_attr_crypt.attr,
- &dev_attr_fragment.attr,
- &dev_attr_retries.attr,
- &dev_attr_misc.attr,
- &dev_attr_beacon.attr,
- NULL
-};
-
-static struct attribute_group wireless_group = {
- .name = "wireless",
- .attrs = wireless_attrs,
-};
-#endif
#endif /* CONFIG_SYSFS */
#ifdef CONFIG_RPS
@@ -1463,14 +1397,6 @@ int netdev_register_kobject(struct net_device *net)
groups++;
*groups++ = &netstat_group;
-#ifdef CONFIG_WIRELESS_EXT_SYSFS
- if (net->ieee80211_ptr)
- *groups++ = &wireless_group;
-#ifdef CONFIG_WIRELESS_EXT
- else if (net->wireless_handlers)
- *groups++ = &wireless_group;
-#endif
-#endif
#endif /* CONFIG_SYSFS */
error = device_add(dev);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index f9f40b932e4..b4c90e42b44 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -715,14 +715,16 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
}
EXPORT_SYMBOL(netpoll_parse_options);
-int __netpoll_setup(struct netpoll *np)
+int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
{
- struct net_device *ndev = np->dev;
struct netpoll_info *npinfo;
const struct net_device_ops *ops;
unsigned long flags;
int err;
+ np->dev = ndev;
+ strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
+
if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
!ndev->netdev_ops->ndo_poll_controller) {
np_err(np, "%s doesn't support polling, aborting\n",
@@ -851,13 +853,11 @@ int netpoll_setup(struct netpoll *np)
np_info(np, "local IP %pI4\n", &np->local_ip);
}
- np->dev = ndev;
-
/* fill up the skb queue */
refill_skbs();
rtnl_lock();
- err = __netpoll_setup(np);
+ err = __netpoll_setup(np, ndev);
rtnl_unlock();
if (err)
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index b2e9caa1ad1..ed0c0431fcd 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -25,6 +25,8 @@
#include <net/sock.h>
#include <net/netprio_cgroup.h>
+#include <linux/fdtable.h>
+
#define PRIOIDX_SZ 128
static unsigned long prioidx_map[PRIOIDX_SZ];
@@ -232,7 +234,7 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
/*
*Separate the devname from the associated priority
- *and advance the priostr poitner to the priority value
+ *and advance the priostr pointer to the priority value
*/
*priostr = '\0';
priostr++;
@@ -272,6 +274,56 @@ out_free_devname:
return ret;
}
+void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+ struct task_struct *p;
+ char *tmp = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL);
+
+ if (!tmp) {
+ pr_warn("Unable to attach cgrp due to alloc failure!\n");
+ return;
+ }
+
+ cgroup_taskset_for_each(p, cgrp, tset) {
+ unsigned int fd;
+ struct fdtable *fdt;
+ struct files_struct *files;
+
+ task_lock(p);
+ files = p->files;
+ if (!files) {
+ task_unlock(p);
+ continue;
+ }
+
+ rcu_read_lock();
+ fdt = files_fdtable(files);
+ for (fd = 0; fd < fdt->max_fds; fd++) {
+ char *path;
+ struct file *file;
+ struct socket *sock;
+ unsigned long s;
+ int rv, err = 0;
+
+ file = fcheck_files(files, fd);
+ if (!file)
+ continue;
+
+ path = d_path(&file->f_path, tmp, PAGE_SIZE);
+ rv = sscanf(path, "socket:[%lu]", &s);
+ if (rv <= 0)
+ continue;
+
+ sock = sock_from_file(file, &err);
+ if (!err)
+ sock_update_netprioidx(sock->sk, p);
+ }
+ rcu_read_unlock();
+ task_unlock(p);
+ }
+ kfree(tmp);
+}
+
static struct cftype ss_files[] = {
{
.name = "prioidx",
@@ -289,6 +341,7 @@ struct cgroup_subsys net_prio_subsys = {
.name = "net_prio",
.create = cgrp_create,
.destroy = cgrp_destroy,
+ .attach = net_prio_attach,
#ifdef CONFIG_NETPRIO_CGROUP
.subsys_id = net_prio_subsys_id,
#endif
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 21318d15bbc..2c5a0a06c4c 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -541,19 +541,6 @@ static const int rta_max[RTM_NR_FAMILIES] =
[RTM_FAM(RTM_NEWACTION)] = TCAA_MAX,
};
-void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
-{
- struct rtattr *rta;
- int size = RTA_LENGTH(attrlen);
-
- rta = (struct rtattr *)skb_put(skb, RTA_ALIGN(size));
- rta->rta_type = attrtype;
- rta->rta_len = size;
- memcpy(RTA_DATA(rta), data, attrlen);
- memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
-}
-EXPORT_SYMBOL(__rta_fill);
-
int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
{
struct sock *rtnl = net->rtnl;
@@ -628,7 +615,7 @@ nla_put_failure:
EXPORT_SYMBOL(rtnetlink_put_metrics);
int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
- u32 ts, u32 tsage, long expires, u32 error)
+ long expires, u32 error)
{
struct rta_cacheinfo ci = {
.rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse),
@@ -636,13 +623,15 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
.rta_clntref = atomic_read(&(dst->__refcnt)),
.rta_error = error,
.rta_id = id,
- .rta_ts = ts,
- .rta_tsage = tsage,
};
- if (expires)
- ci.rta_expires = jiffies_to_clock_t(expires);
+ if (expires) {
+ unsigned long clock;
+ clock = jiffies_to_clock_t(abs(expires));
+ clock = min_t(unsigned long, clock, INT_MAX);
+ ci.rta_expires = (expires > 0) ? clock : -clock;
+ }
return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
}
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
@@ -674,6 +663,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
}
}
+static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
+{
+ return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
+ (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
+}
+
static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
const struct ifinfomsg *ifm)
{
@@ -682,7 +677,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
/* bugwards compatibility: ifi_change == 0 is treated as ~0 */
if (ifm->ifi_change)
flags = (flags & ifm->ifi_change) |
- (dev->flags & ~ifm->ifi_change);
+ (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
return flags;
}
@@ -786,6 +781,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(4) /* IFLA_LINK */
+ nla_total_size(4) /* IFLA_MASTER */
+ nla_total_size(4) /* IFLA_PROMISCUITY */
+ + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
+ + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(ext_filter_mask
@@ -904,6 +901,10 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
nla_put_u32(skb, IFLA_GROUP, dev->group) ||
nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
+ nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
+#ifdef CONFIG_RPS
+ nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
+#endif
(dev->ifindex != dev->iflink &&
nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
(dev->master &&
@@ -1121,6 +1122,8 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_AF_SPEC] = { .type = NLA_NESTED },
[IFLA_EXT_MASK] = { .type = NLA_U32 },
[IFLA_PROMISCUITY] = { .type = NLA_U32 },
+ [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
+ [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
};
EXPORT_SYMBOL(ifla_policy);
@@ -1378,6 +1381,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
goto errout;
send_addr_notify = 1;
modified = 1;
+ add_device_randomness(dev->dev_addr, dev->addr_len);
}
if (tb[IFLA_MTU]) {
@@ -1639,17 +1643,22 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
{
int err;
struct net_device *dev;
- unsigned int num_queues = 1;
+ unsigned int num_tx_queues = 1;
+ unsigned int num_rx_queues = 1;
- if (ops->get_tx_queues) {
- err = ops->get_tx_queues(src_net, tb);
- if (err < 0)
- goto err;
- num_queues = err;
- }
+ if (tb[IFLA_NUM_TX_QUEUES])
+ num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
+ else if (ops->get_num_tx_queues)
+ num_tx_queues = ops->get_num_tx_queues();
+
+ if (tb[IFLA_NUM_RX_QUEUES])
+ num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
+ else if (ops->get_num_rx_queues)
+ num_rx_queues = ops->get_num_rx_queues();
err = -ENOMEM;
- dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues);
+ dev = alloc_netdev_mqs(ops->priv_size, ifname, ops->setup,
+ num_tx_queues, num_rx_queues);
if (!dev)
goto err;
@@ -2189,7 +2198,7 @@ skip:
}
/**
- * ndo_dflt_fdb_dump: default netdevice operation to dump an FDB table.
+ * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
* @nlh: netlink message header
* @dev: netdevice
*
@@ -2366,8 +2375,13 @@ static struct notifier_block rtnetlink_dev_notifier = {
static int __net_init rtnetlink_net_init(struct net *net)
{
struct sock *sk;
- sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
- rtnetlink_rcv, &rtnl_mutex, THIS_MODULE);
+ struct netlink_kernel_cfg cfg = {
+ .groups = RTNLGRP_MAX,
+ .input = rtnetlink_rcv,
+ .cb_mutex = &rtnl_mutex,
+ };
+
+ sk = netlink_kernel_create(net, NETLINK_ROUTE, THIS_MODULE, &cfg);
if (!sk)
return -ENOMEM;
net->rtnl = sk;
diff --git a/net/core/scm.c b/net/core/scm.c
index 611c5efd4cb..8f6ccfd68ef 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -109,25 +109,9 @@ void __scm_destroy(struct scm_cookie *scm)
if (fpl) {
scm->fp = NULL;
- if (current->scm_work_list) {
- list_add_tail(&fpl->list, current->scm_work_list);
- } else {
- LIST_HEAD(work_list);
-
- current->scm_work_list = &work_list;
-
- list_add(&fpl->list, &work_list);
- while (!list_empty(&work_list)) {
- fpl = list_first_entry(&work_list, struct scm_fp_list, list);
-
- list_del(&fpl->list);
- for (i=fpl->count-1; i>=0; i--)
- fput(fpl->fp[i]);
- kfree(fpl);
- }
-
- current->scm_work_list = NULL;
- }
+ for (i=fpl->count-1; i>=0; i--)
+ fput(fpl->fp[i]);
+ kfree(fpl);
}
}
EXPORT_SYMBOL(__scm_destroy);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d124306b81f..fe00d120816 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -145,6 +145,43 @@ static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
BUG();
}
+
+/*
+ * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
+ * the caller if emergency pfmemalloc reserves are being used. If it is and
+ * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
+ * may be used. Otherwise, the packet data may be discarded until enough
+ * memory is free
+ */
+#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
+ __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
+void *__kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip,
+ bool *pfmemalloc)
+{
+ void *obj;
+ bool ret_pfmemalloc = false;
+
+ /*
+ * Try a regular allocation, when that fails and we're not entitled
+ * to the reserves, fail.
+ */
+ obj = kmalloc_node_track_caller(size,
+ flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
+ node);
+ if (obj || !(gfp_pfmemalloc_allowed(flags)))
+ goto out;
+
+ /* Try again but now we are using pfmemalloc reserves */
+ ret_pfmemalloc = true;
+ obj = kmalloc_node_track_caller(size, flags, node);
+
+out:
+ if (pfmemalloc)
+ *pfmemalloc = ret_pfmemalloc;
+
+ return obj;
+}
+
/* Allocate a new skbuff. We do this ourselves so we can fill in a few
* 'private' fields and also do memory statistics to find all the
* [BEEP] leaks.
@@ -155,26 +192,33 @@ static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
* __alloc_skb - allocate a network buffer
* @size: size to allocate
* @gfp_mask: allocation mask
- * @fclone: allocate from fclone cache instead of head cache
- * and allocate a cloned (child) skb
+ * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
+ * instead of head cache and allocate a cloned (child) skb.
+ * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
+ * allocations in case the data is required for writeback
* @node: numa node to allocate memory on
*
* Allocate a new &sk_buff. The returned buffer has no headroom and a
- * tail room of size bytes. The object has a reference count of one.
- * The return is the buffer. On a failure the return is %NULL.
+ * tail room of at least size bytes. The object has a reference count
+ * of one. The return is the buffer. On a failure the return is %NULL.
*
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
*/
struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
- int fclone, int node)
+ int flags, int node)
{
struct kmem_cache *cache;
struct skb_shared_info *shinfo;
struct sk_buff *skb;
u8 *data;
+ bool pfmemalloc;
- cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
+ cache = (flags & SKB_ALLOC_FCLONE)
+ ? skbuff_fclone_cache : skbuff_head_cache;
+
+ if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
+ gfp_mask |= __GFP_MEMALLOC;
/* Get the HEAD */
skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
@@ -189,7 +233,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
*/
size = SKB_DATA_ALIGN(size);
size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- data = kmalloc_node_track_caller(size, gfp_mask, node);
+ data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
if (!data)
goto nodata;
/* kmalloc(size) might give us more room than requested.
@@ -207,6 +251,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
memset(skb, 0, offsetof(struct sk_buff, tail));
/* Account for allocated memory : skb + skb->head */
skb->truesize = SKB_TRUESIZE(size);
+ skb->pfmemalloc = pfmemalloc;
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
@@ -222,7 +267,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
atomic_set(&shinfo->dataref, 1);
kmemcheck_annotate_variable(shinfo->destructor_arg);
- if (fclone) {
+ if (flags & SKB_ALLOC_FCLONE) {
struct sk_buff *child = skb + 1;
atomic_t *fclone_ref = (atomic_t *) (child + 1);
@@ -232,6 +277,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
atomic_set(fclone_ref, 1);
child->fclone = SKB_FCLONE_UNAVAILABLE;
+ child->pfmemalloc = pfmemalloc;
}
out:
return skb;
@@ -296,17 +342,13 @@ EXPORT_SYMBOL(build_skb);
struct netdev_alloc_cache {
struct page *page;
unsigned int offset;
+ unsigned int pagecnt_bias;
};
static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
-/**
- * netdev_alloc_frag - allocate a page fragment
- * @fragsz: fragment size
- *
- * Allocates a frag from a page for receive buffer.
- * Uses GFP_ATOMIC allocations.
- */
-void *netdev_alloc_frag(unsigned int fragsz)
+#define NETDEV_PAGECNT_BIAS (PAGE_SIZE / SMP_CACHE_BYTES)
+
+static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
struct netdev_alloc_cache *nc;
void *data = NULL;
@@ -316,21 +358,42 @@ void *netdev_alloc_frag(unsigned int fragsz)
nc = &__get_cpu_var(netdev_alloc_cache);
if (unlikely(!nc->page)) {
refill:
- nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ nc->page = alloc_page(gfp_mask);
+ if (unlikely(!nc->page))
+ goto end;
+recycle:
+ atomic_set(&nc->page->_count, NETDEV_PAGECNT_BIAS);
+ nc->pagecnt_bias = NETDEV_PAGECNT_BIAS;
nc->offset = 0;
}
- if (likely(nc->page)) {
- if (nc->offset + fragsz > PAGE_SIZE) {
- put_page(nc->page);
- goto refill;
- }
- data = page_address(nc->page) + nc->offset;
- nc->offset += fragsz;
- get_page(nc->page);
+
+ if (nc->offset + fragsz > PAGE_SIZE) {
+ /* avoid unnecessary locked operations if possible */
+ if ((atomic_read(&nc->page->_count) == nc->pagecnt_bias) ||
+ atomic_sub_and_test(nc->pagecnt_bias, &nc->page->_count))
+ goto recycle;
+ goto refill;
}
+
+ data = page_address(nc->page) + nc->offset;
+ nc->offset += fragsz;
+ nc->pagecnt_bias--;
+end:
local_irq_restore(flags);
return data;
}
+
+/**
+ * netdev_alloc_frag - allocate a page fragment
+ * @fragsz: fragment size
+ *
+ * Allocates a frag from a page for receive buffer.
+ * Uses GFP_ATOMIC allocations.
+ */
+void *netdev_alloc_frag(unsigned int fragsz)
+{
+ return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
+}
EXPORT_SYMBOL(netdev_alloc_frag);
/**
@@ -354,7 +417,12 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
- void *data = netdev_alloc_frag(fragsz);
+ void *data;
+
+ if (sk_memalloc_socks())
+ gfp_mask |= __GFP_MEMALLOC;
+
+ data = __netdev_alloc_frag(fragsz, gfp_mask);
if (likely(data)) {
skb = build_skb(data, fragsz);
@@ -362,7 +430,8 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
put_page(virt_to_head_page(data));
}
} else {
- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
+ skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
+ SKB_ALLOC_RX, NUMA_NO_NODE);
}
if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD);
@@ -644,6 +713,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
#if IS_ENABLED(CONFIG_IP_VS)
new->ipvs_property = old->ipvs_property;
#endif
+ new->pfmemalloc = old->pfmemalloc;
new->protocol = old->protocol;
new->mark = old->mark;
new->skb_iif = old->skb_iif;
@@ -713,7 +783,8 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
}
EXPORT_SYMBOL_GPL(skb_morph);
-/* skb_copy_ubufs - copy userspace skb frags buffers to kernel
+/**
+ * skb_copy_ubufs - copy userspace skb frags buffers to kernel
* @skb: the skb to modify
* @gfp_mask: allocation priority
*
@@ -738,7 +809,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
u8 *vaddr;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- page = alloc_page(GFP_ATOMIC);
+ page = alloc_page(gfp_mask);
if (!page) {
while (head) {
struct page *next = (struct page *)head->private;
@@ -756,22 +827,22 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
}
/* skb frags release userspace buffers */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ for (i = 0; i < num_frags; i++)
skb_frag_unref(skb, i);
uarg->callback(uarg);
/* skb frags point to kernel buffers */
- for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
- __skb_fill_page_desc(skb, i-1, head, 0,
- skb_shinfo(skb)->frags[i - 1].size);
+ for (i = num_frags - 1; i >= 0; i--) {
+ __skb_fill_page_desc(skb, i, head, 0,
+ skb_shinfo(skb)->frags[i].size);
head = (struct page *)head->private;
}
skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
return 0;
}
-
+EXPORT_SYMBOL_GPL(skb_copy_ubufs);
/**
* skb_clone - duplicate an sk_buff
@@ -791,10 +862,8 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff *n;
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
- if (skb_copy_ubufs(skb, gfp_mask))
- return NULL;
- }
+ if (skb_orphan_frags(skb, gfp_mask))
+ return NULL;
n = skb + 1;
if (skb->fclone == SKB_FCLONE_ORIG &&
@@ -803,6 +872,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
n->fclone = SKB_FCLONE_CLONE;
atomic_inc(fclone_ref);
} else {
+ if (skb_pfmemalloc(skb))
+ gfp_mask |= __GFP_MEMALLOC;
+
n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
if (!n)
return NULL;
@@ -839,6 +911,13 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
}
+static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
+{
+ if (skb_pfmemalloc(skb))
+ return SKB_ALLOC_RX;
+ return 0;
+}
+
/**
* skb_copy - create private copy of an sk_buff
* @skb: buffer to copy
@@ -860,7 +939,8 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
int headerlen = skb_headroom(skb);
unsigned int size = skb_end_offset(skb) + skb->data_len;
- struct sk_buff *n = alloc_skb(size, gfp_mask);
+ struct sk_buff *n = __alloc_skb(size, gfp_mask,
+ skb_alloc_rx_flag(skb), NUMA_NO_NODE);
if (!n)
return NULL;
@@ -895,7 +975,8 @@ EXPORT_SYMBOL(skb_copy);
struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
{
unsigned int size = skb_headlen(skb) + headroom;
- struct sk_buff *n = alloc_skb(size, gfp_mask);
+ struct sk_buff *n = __alloc_skb(size, gfp_mask,
+ skb_alloc_rx_flag(skb), NUMA_NO_NODE);
if (!n)
goto out;
@@ -914,12 +995,10 @@ struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
if (skb_shinfo(skb)->nr_frags) {
int i;
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
- if (skb_copy_ubufs(skb, gfp_mask)) {
- kfree_skb(n);
- n = NULL;
- goto out;
- }
+ if (skb_orphan_frags(skb, gfp_mask)) {
+ kfree_skb(n);
+ n = NULL;
+ goto out;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -970,8 +1049,10 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
size = SKB_DATA_ALIGN(size);
- data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
- gfp_mask);
+ if (skb_pfmemalloc(skb))
+ gfp_mask |= __GFP_MEMALLOC;
+ data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
goto nodata;
size = SKB_WITH_OVERHEAD(ksize(data));
@@ -992,10 +1073,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
*/
if (skb_cloned(skb)) {
/* copy this zero copy skb frags */
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
- if (skb_copy_ubufs(skb, gfp_mask))
- goto nofrags;
- }
+ if (skb_orphan_frags(skb, gfp_mask))
+ goto nofrags;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb_frag_ref(skb, i);
@@ -1085,8 +1164,9 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
/*
* Allocate the copy buffer
*/
- struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
- gfp_mask);
+ struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
+ gfp_mask, skb_alloc_rx_flag(skb),
+ NUMA_NO_NODE);
int oldheadroom = skb_headroom(skb);
int head_copy_len, head_copy_off;
int off;
@@ -2614,7 +2694,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
EXPORT_SYMBOL(skb_find_text);
/**
- * skb_append_datato_frags: - append the user data to a skb
+ * skb_append_datato_frags - append the user data to a skb
* @sk: sock structure
* @skb: skb structure to be appened with user data.
* @getfrag: call back function to be used for getting the user data
@@ -2768,8 +2848,9 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
skb_release_head_state(nskb);
__skb_push(nskb, doffset);
} else {
- nskb = alloc_skb(hsize + doffset + headroom,
- GFP_ATOMIC);
+ nskb = __alloc_skb(hsize + doffset + headroom,
+ GFP_ATOMIC, skb_alloc_rx_flag(skb),
+ NUMA_NO_NODE);
if (unlikely(!nskb))
goto err;
diff --git a/net/core/sock.c b/net/core/sock.c
index 9e5b71fda6e..6b654b3ddfd 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -142,7 +142,7 @@
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_MEMCG_KMEM
int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
struct proto *proto;
@@ -271,6 +271,61 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
EXPORT_SYMBOL(sysctl_optmem_max);
+struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(memalloc_socks);
+
+/**
+ * sk_set_memalloc - sets %SOCK_MEMALLOC
+ * @sk: socket to set it on
+ *
+ * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
+ * It's the responsibility of the admin to adjust min_free_kbytes
+ * to meet the requirements
+ */
+void sk_set_memalloc(struct sock *sk)
+{
+ sock_set_flag(sk, SOCK_MEMALLOC);
+ sk->sk_allocation |= __GFP_MEMALLOC;
+ static_key_slow_inc(&memalloc_socks);
+}
+EXPORT_SYMBOL_GPL(sk_set_memalloc);
+
+void sk_clear_memalloc(struct sock *sk)
+{
+ sock_reset_flag(sk, SOCK_MEMALLOC);
+ sk->sk_allocation &= ~__GFP_MEMALLOC;
+ static_key_slow_dec(&memalloc_socks);
+
+ /*
+ * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
+ * progress of swapping. However, if SOCK_MEMALLOC is cleared while
+ * it has rmem allocations there is a risk that the user of the
+ * socket cannot make forward progress due to exceeding the rmem
+ * limits. By rights, sk_clear_memalloc() should only be called
+ * on sockets being torn down but warn and reset the accounting if
+ * that assumption breaks.
+ */
+ if (WARN_ON(sk->sk_forward_alloc))
+ sk_mem_reclaim(sk);
+}
+EXPORT_SYMBOL_GPL(sk_clear_memalloc);
+
+int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ int ret;
+ unsigned long pflags = current->flags;
+
+ /* these should have been dropped before queueing */
+ BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
+
+ current->flags |= PF_MEMALLOC;
+ ret = sk->sk_backlog_rcv(sk, skb);
+ tsk_restore_flags(current, pflags, PF_MEMALLOC);
+
+ return ret;
+}
+EXPORT_SYMBOL(__sk_backlog_rcv);
+
#if defined(CONFIG_CGROUPS)
#if !defined(CONFIG_NET_CLS_CGROUP)
int net_cls_subsys_id = -1;
@@ -353,7 +408,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (err)
return err;
- if (!sk_rmem_schedule(sk, skb->truesize)) {
+ if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
atomic_inc(&sk->sk_drops);
return -ENOBUFS;
}
@@ -1180,12 +1235,12 @@ void sock_update_classid(struct sock *sk)
}
EXPORT_SYMBOL(sock_update_classid);
-void sock_update_netprioidx(struct sock *sk)
+void sock_update_netprioidx(struct sock *sk, struct task_struct *task)
{
if (in_interrupt())
return;
- sk->sk_cgrp_prioidx = task_netprioidx(current);
+ sk->sk_cgrp_prioidx = task_netprioidx(task);
}
EXPORT_SYMBOL_GPL(sock_update_netprioidx);
#endif
@@ -1215,7 +1270,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
atomic_set(&sk->sk_wmem_alloc, 1);
sock_update_classid(sk);
- sock_update_netprioidx(sk);
+ sock_update_netprioidx(sk, current);
}
return sk;
@@ -1465,6 +1520,11 @@ void sock_rfree(struct sk_buff *skb)
}
EXPORT_SYMBOL(sock_rfree);
+void sock_edemux(struct sk_buff *skb)
+{
+ sock_put(skb->sk);
+}
+EXPORT_SYMBOL(sock_edemux);
int sock_i_uid(struct sock *sk)
{
@@ -2154,6 +2214,10 @@ void release_sock(struct sock *sk)
spin_lock_bh(&sk->sk_lock.slock);
if (sk->sk_backlog.tail)
__release_sock(sk);
+
+ if (sk->sk_prot->release_cb)
+ sk->sk_prot->release_cb(sk);
+
sk->sk_lock.owned = 0;
if (waitqueue_active(&sk->sk_lock.wq))
wake_up(&sk->sk_lock.wq);
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 5fd146720f3..9d8755e4a7a 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -4,7 +4,6 @@
#include <net/netlink.h>
#include <net/net_namespace.h>
#include <linux/module.h>
-#include <linux/rtnetlink.h>
#include <net/sock.h>
#include <linux/inet_diag.h>
@@ -35,9 +34,7 @@ EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
{
- __u32 *mem;
-
- mem = RTA_DATA(__RTA_PUT(skb, attrtype, SK_MEMINFO_VARS * sizeof(__u32)));
+ u32 mem[SK_MEMINFO_VARS];
mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
@@ -46,11 +43,9 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+ mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
- return 0;
-
-rtattr_failure:
- return -EMSGSIZE;
+ return nla_put(skb, attrtype, sizeof(mem), &mem);
}
EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
@@ -120,7 +115,7 @@ static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
int err;
- struct sock_diag_req *req = NLMSG_DATA(nlh);
+ struct sock_diag_req *req = nlmsg_data(nlh);
const struct sock_diag_handler *hndl;
if (nlmsg_len(nlh) < sizeof(*req))
@@ -171,19 +166,36 @@ static void sock_diag_rcv(struct sk_buff *skb)
mutex_unlock(&sock_diag_mutex);
}
-struct sock *sock_diag_nlsk;
-EXPORT_SYMBOL_GPL(sock_diag_nlsk);
+static int __net_init diag_net_init(struct net *net)
+{
+ struct netlink_kernel_cfg cfg = {
+ .input = sock_diag_rcv,
+ };
+
+ net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG,
+ THIS_MODULE, &cfg);
+ return net->diag_nlsk == NULL ? -ENOMEM : 0;
+}
+
+static void __net_exit diag_net_exit(struct net *net)
+{
+ netlink_kernel_release(net->diag_nlsk);
+ net->diag_nlsk = NULL;
+}
+
+static struct pernet_operations diag_net_ops = {
+ .init = diag_net_init,
+ .exit = diag_net_exit,
+};
static int __init sock_diag_init(void)
{
- sock_diag_nlsk = netlink_kernel_create(&init_net, NETLINK_SOCK_DIAG, 0,
- sock_diag_rcv, NULL, THIS_MODULE);
- return sock_diag_nlsk == NULL ? -ENOMEM : 0;
+ return register_pernet_subsys(&diag_net_ops);
}
static void __exit sock_diag_exit(void)
{
- netlink_kernel_release(sock_diag_nlsk);
+ unregister_pernet_subsys(&diag_net_ops);
}
module_init(sock_diag_init);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 656c7c75b19..81f2bb62dea 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -28,8 +28,7 @@
#include <linux/module.h>
#include <net/sock.h>
-/**
- * Data Center Bridging (DCB) is a collection of Ethernet enhancements
+/* Data Center Bridging (DCB) is a collection of Ethernet enhancements
* intended to allow network traffic with differing requirements
* (highly reliable, no drops vs. best effort vs. low latency) to operate
* and co-exist on Ethernet. Current DCB features are:
@@ -196,92 +195,66 @@ static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
static LIST_HEAD(dcb_app_list);
static DEFINE_SPINLOCK(dcb_lock);
-/* standard netlink reply call */
-static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
- u32 seq, u16 flags)
+static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
+ u32 flags, struct nlmsghdr **nlhp)
{
- struct sk_buff *dcbnl_skb;
+ struct sk_buff *skb;
struct dcbmsg *dcb;
struct nlmsghdr *nlh;
- int ret = -EINVAL;
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb)
- return ret;
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return NULL;
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags);
+ nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
+ BUG_ON(!nlh);
- dcb = NLMSG_DATA(nlh);
+ dcb = nlmsg_data(nlh);
dcb->dcb_family = AF_UNSPEC;
dcb->cmd = cmd;
dcb->dcb_pad = 0;
- ret = nla_put_u8(dcbnl_skb, attr, value);
- if (ret)
- goto err;
+ if (nlhp)
+ *nlhp = nlh;
- /* end the message, assign the nlmsg_len. */
- nlmsg_end(dcbnl_skb, nlh);
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret)
- return -EINVAL;
-
- return 0;
-nlmsg_failure:
-err:
- kfree_skb(dcbnl_skb);
- return ret;
+ return skb;
}
-static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int ret = -EINVAL;
-
/* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
if (!netdev->dcbnl_ops->getstate)
- return ret;
-
- ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
- DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
+ return -EOPNOTSUPP;
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_STATE,
+ netdev->dcbnl_ops->getstate(netdev));
}
-static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
u8 value;
- int ret = -EINVAL;
+ int ret;
int i;
int getall = 0;
- if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg)
- return ret;
+ if (!tb[DCB_ATTR_PFC_CFG])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->getpfccfg)
+ return -EOPNOTSUPP;
ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
tb[DCB_ATTR_PFC_CFG],
dcbnl_pfc_up_nest);
if (ret)
- goto err_out;
-
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb)
- goto err_out;
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_PFC_GCFG;
+ return ret;
- nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG);
+ nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG);
if (!nest)
- goto err;
+ return -EMSGSIZE;
if (data[DCB_PFC_UP_ATTR_ALL])
getall = 1;
@@ -292,103 +265,53 @@ static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
&value);
- ret = nla_put_u8(dcbnl_skb, i, value);
-
+ ret = nla_put_u8(skb, i, value);
if (ret) {
- nla_nest_cancel(dcbnl_skb, nest);
- goto err;
+ nla_nest_cancel(skb, nest);
+ return ret;
}
}
- nla_nest_end(dcbnl_skb, nest);
-
- nlmsg_end(dcbnl_skb, nlh);
-
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret)
- goto err_out;
+ nla_nest_end(skb, nest);
return 0;
-nlmsg_failure:
-err:
- kfree_skb(dcbnl_skb);
-err_out:
- return -EINVAL;
}
-static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
u8 perm_addr[MAX_ADDR_LEN];
- int ret = -EINVAL;
if (!netdev->dcbnl_ops->getpermhwaddr)
- return ret;
-
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb)
- goto err_out;
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_GPERM_HWADDR;
+ return -EOPNOTSUPP;
netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
- ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
- perm_addr);
-
- nlmsg_end(dcbnl_skb, nlh);
-
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret)
- goto err_out;
-
- return 0;
-
-nlmsg_failure:
- kfree_skb(dcbnl_skb);
-err_out:
- return -EINVAL;
+ return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
}
-static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
u8 value;
- int ret = -EINVAL;
+ int ret;
int i;
int getall = 0;
- if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap)
- return ret;
+ if (!tb[DCB_ATTR_CAP])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->getcap)
+ return -EOPNOTSUPP;
ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
dcbnl_cap_nest);
if (ret)
- goto err_out;
-
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb)
- goto err_out;
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_GCAP;
+ return ret;
- nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP);
+ nest = nla_nest_start(skb, DCB_ATTR_CAP);
if (!nest)
- goto err;
+ return -EMSGSIZE;
if (data[DCB_CAP_ATTR_ALL])
getall = 1;
@@ -398,69 +321,41 @@ static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
continue;
if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
- ret = nla_put_u8(dcbnl_skb, i, value);
-
+ ret = nla_put_u8(skb, i, value);
if (ret) {
- nla_nest_cancel(dcbnl_skb, nest);
- goto err;
+ nla_nest_cancel(skb, nest);
+ return ret;
}
}
}
- nla_nest_end(dcbnl_skb, nest);
-
- nlmsg_end(dcbnl_skb, nlh);
-
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret)
- goto err_out;
+ nla_nest_end(skb, nest);
return 0;
-nlmsg_failure:
-err:
- kfree_skb(dcbnl_skb);
-err_out:
- return -EINVAL;
}
-static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
u8 value;
- int ret = -EINVAL;
+ int ret;
int i;
int getall = 0;
- if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs)
- return ret;
+ if (!tb[DCB_ATTR_NUMTCS])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->getnumtcs)
+ return -EOPNOTSUPP;
ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
dcbnl_numtcs_nest);
- if (ret) {
- ret = -EINVAL;
- goto err_out;
- }
-
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb) {
- ret = -EINVAL;
- goto err_out;
- }
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_GNUMTCS;
+ if (ret)
+ return ret;
- nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS);
- if (!nest) {
- ret = -EINVAL;
- goto err;
- }
+ nest = nla_nest_start(skb, DCB_ATTR_NUMTCS);
+ if (!nest)
+ return -EMSGSIZE;
if (data[DCB_NUMTCS_ATTR_ALL])
getall = 1;
@@ -471,53 +366,37 @@ static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
if (!ret) {
- ret = nla_put_u8(dcbnl_skb, i, value);
-
+ ret = nla_put_u8(skb, i, value);
if (ret) {
- nla_nest_cancel(dcbnl_skb, nest);
- ret = -EINVAL;
- goto err;
+ nla_nest_cancel(skb, nest);
+ return ret;
}
- } else {
- goto err;
- }
- }
- nla_nest_end(dcbnl_skb, nest);
-
- nlmsg_end(dcbnl_skb, nlh);
-
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret) {
- ret = -EINVAL;
- goto err_out;
+ } else
+ return -EINVAL;
}
+ nla_nest_end(skb, nest);
return 0;
-nlmsg_failure:
-err:
- kfree_skb(dcbnl_skb);
-err_out:
- return ret;
}
-static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
- int ret = -EINVAL;
+ int ret;
u8 value;
int i;
- if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs)
- return ret;
+ if (!tb[DCB_ATTR_NUMTCS])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->setnumtcs)
+ return -EOPNOTSUPP;
ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
dcbnl_numtcs_nest);
-
- if (ret) {
- ret = -EINVAL;
- goto err;
- }
+ if (ret)
+ return ret;
for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
if (data[i] == NULL)
@@ -526,84 +405,68 @@ static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
value = nla_get_u8(data[i]);
ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
-
if (ret)
- goto operr;
+ break;
}
-operr:
- ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS,
- DCB_ATTR_NUMTCS, pid, seq, flags);
-
-err:
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
}
-static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int ret = -EINVAL;
-
if (!netdev->dcbnl_ops->getpfcstate)
- return ret;
-
- ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
- DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
- pid, seq, flags);
+ return -EOPNOTSUPP;
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
+ netdev->dcbnl_ops->getpfcstate(netdev));
}
-static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int ret = -EINVAL;
u8 value;
- if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate)
- return ret;
+ if (!tb[DCB_ATTR_PFC_STATE])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->setpfcstate)
+ return -EOPNOTSUPP;
value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
netdev->dcbnl_ops->setpfcstate(netdev, value);
- ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE,
- pid, seq, flags);
-
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
}
-static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *app_nest;
struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
u16 id;
u8 up, idtype;
- int ret = -EINVAL;
+ int ret;
if (!tb[DCB_ATTR_APP])
- goto out;
+ return -EINVAL;
ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
dcbnl_app_nest);
if (ret)
- goto out;
+ return ret;
- ret = -EINVAL;
/* all must be non-null */
if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
(!app_tb[DCB_APP_ATTR_ID]))
- goto out;
+ return -EINVAL;
/* either by eth type or by socket number */
idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
(idtype != DCB_APP_IDTYPE_PORTNUM))
- goto out;
+ return -EINVAL;
id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
@@ -617,138 +480,106 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
up = dcb_getapp(netdev, &app);
}
- /* send this back */
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb)
- goto out;
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_GAPP;
-
- app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
+ app_nest = nla_nest_start(skb, DCB_ATTR_APP);
if (!app_nest)
- goto out_cancel;
+ return -EMSGSIZE;
- ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
+ ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
if (ret)
goto out_cancel;
- ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id);
+ ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
if (ret)
goto out_cancel;
- ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up);
+ ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
if (ret)
goto out_cancel;
- nla_nest_end(dcbnl_skb, app_nest);
- nlmsg_end(dcbnl_skb, nlh);
-
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret)
- goto nlmsg_failure;
+ nla_nest_end(skb, app_nest);
- goto out;
+ return 0;
out_cancel:
- nla_nest_cancel(dcbnl_skb, app_nest);
-nlmsg_failure:
- kfree_skb(dcbnl_skb);
-out:
+ nla_nest_cancel(skb, app_nest);
return ret;
}
-static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int err, ret = -EINVAL;
+ int ret;
u16 id;
u8 up, idtype;
struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
if (!tb[DCB_ATTR_APP])
- goto out;
+ return -EINVAL;
ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
dcbnl_app_nest);
if (ret)
- goto out;
+ return ret;
- ret = -EINVAL;
/* all must be non-null */
if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
(!app_tb[DCB_APP_ATTR_ID]) ||
(!app_tb[DCB_APP_ATTR_PRIORITY]))
- goto out;
+ return -EINVAL;
/* either by eth type or by socket number */
idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
(idtype != DCB_APP_IDTYPE_PORTNUM))
- goto out;
+ return -EINVAL;
id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
if (netdev->dcbnl_ops->setapp) {
- err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
+ ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
} else {
struct dcb_app app;
app.selector = idtype;
app.protocol = id;
app.priority = up;
- err = dcb_setapp(netdev, &app);
+ ret = dcb_setapp(netdev, &app);
}
- ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
- pid, seq, flags);
+ ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
-out:
+
return ret;
}
-static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags, int dir)
+static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ struct nlattr **tb, struct sk_buff *skb, int dir)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *pg_nest, *param_nest, *data;
struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
u8 prio, pgid, tc_pct, up_map;
- int ret = -EINVAL;
+ int ret;
int getall = 0;
int i;
- if (!tb[DCB_ATTR_PG_CFG] ||
- !netdev->dcbnl_ops->getpgtccfgtx ||
+ if (!tb[DCB_ATTR_PG_CFG])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->getpgtccfgtx ||
!netdev->dcbnl_ops->getpgtccfgrx ||
!netdev->dcbnl_ops->getpgbwgcfgtx ||
!netdev->dcbnl_ops->getpgbwgcfgrx)
- return ret;
+ return -EOPNOTSUPP;
ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
-
if (ret)
- goto err_out;
-
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb)
- goto err_out;
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
+ return ret;
- pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG);
+ pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG);
if (!pg_nest)
- goto err;
+ return -EMSGSIZE;
if (pg_tb[DCB_PG_ATTR_TC_ALL])
getall = 1;
@@ -766,7 +597,7 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
if (ret)
goto err_pg;
- param_nest = nla_nest_start(dcbnl_skb, i);
+ param_nest = nla_nest_start(skb, i);
if (!param_nest)
goto err_pg;
@@ -789,33 +620,33 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
- ret = nla_put_u8(dcbnl_skb,
+ ret = nla_put_u8(skb,
DCB_TC_ATTR_PARAM_PGID, pgid);
if (ret)
goto err_param;
}
if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
- ret = nla_put_u8(dcbnl_skb,
+ ret = nla_put_u8(skb,
DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
if (ret)
goto err_param;
}
if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
- ret = nla_put_u8(dcbnl_skb,
+ ret = nla_put_u8(skb,
DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
if (ret)
goto err_param;
}
if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
- ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT,
+ ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
tc_pct);
if (ret)
goto err_param;
}
- nla_nest_end(dcbnl_skb, param_nest);
+ nla_nest_end(skb, param_nest);
}
if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
@@ -838,80 +669,71 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
}
- ret = nla_put_u8(dcbnl_skb, i, tc_pct);
-
+ ret = nla_put_u8(skb, i, tc_pct);
if (ret)
goto err_pg;
}
- nla_nest_end(dcbnl_skb, pg_nest);
-
- nlmsg_end(dcbnl_skb, nlh);
-
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret)
- goto err_out;
+ nla_nest_end(skb, pg_nest);
return 0;
err_param:
- nla_nest_cancel(dcbnl_skb, param_nest);
+ nla_nest_cancel(skb, param_nest);
err_pg:
- nla_nest_cancel(dcbnl_skb, pg_nest);
-nlmsg_failure:
-err:
- kfree_skb(dcbnl_skb);
-err_out:
- ret = -EINVAL;
- return ret;
+ nla_nest_cancel(skb, pg_nest);
+
+ return -EMSGSIZE;
}
-static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0);
+ return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
}
-static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1);
+ return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
}
-static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int ret = -EINVAL;
u8 value;
- if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate)
- return ret;
+ if (!tb[DCB_ATTR_STATE])
+ return -EINVAL;
- value = nla_get_u8(tb[DCB_ATTR_STATE]);
+ if (!netdev->dcbnl_ops->setstate)
+ return -EOPNOTSUPP;
- ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value),
- RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
- pid, seq, flags);
+ value = nla_get_u8(tb[DCB_ATTR_STATE]);
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_STATE,
+ netdev->dcbnl_ops->setstate(netdev, value));
}
-static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
int i;
- int ret = -EINVAL;
+ int ret;
u8 value;
- if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg)
- return ret;
+ if (!tb[DCB_ATTR_PFC_CFG])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->setpfccfg)
+ return -EOPNOTSUPP;
ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
tb[DCB_ATTR_PFC_CFG],
dcbnl_pfc_up_nest);
if (ret)
- goto err;
+ return ret;
for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
if (data[i] == NULL)
@@ -921,50 +743,53 @@ static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
}
- ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG,
- pid, seq, flags);
-err:
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
}
-static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int ret = -EINVAL;
+ int ret;
- if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall)
- return ret;
+ if (!tb[DCB_ATTR_SET_ALL])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->setall)
+ return -EOPNOTSUPP;
- ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
- DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
+ ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
+ netdev->dcbnl_ops->setall(netdev));
dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
return ret;
}
-static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags, int dir)
+static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb,
+ int dir)
{
struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
- int ret = -EINVAL;
+ int ret;
int i;
u8 pgid;
u8 up_map;
u8 prio;
u8 tc_pct;
- if (!tb[DCB_ATTR_PG_CFG] ||
- !netdev->dcbnl_ops->setpgtccfgtx ||
+ if (!tb[DCB_ATTR_PG_CFG])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->setpgtccfgtx ||
!netdev->dcbnl_ops->setpgtccfgrx ||
!netdev->dcbnl_ops->setpgbwgcfgtx ||
!netdev->dcbnl_ops->setpgbwgcfgrx)
- return ret;
+ return -EOPNOTSUPP;
ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
if (ret)
- goto err;
+ return ret;
for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
if (!pg_tb[i])
@@ -973,7 +798,7 @@ static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
pg_tb[i], dcbnl_tc_param_nest);
if (ret)
- goto err;
+ return ret;
pgid = DCB_ATTR_VALUE_UNDEFINED;
prio = DCB_ATTR_VALUE_UNDEFINED;
@@ -1026,63 +851,47 @@ static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
}
}
- ret = dcbnl_reply(0, RTM_SETDCB,
- (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
- DCB_ATTR_PG_CFG, pid, seq, flags);
-
-err:
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
}
-static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0);
+ return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
}
-static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1);
+ return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
}
-static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *bcn_nest;
struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
u8 value_byte;
u32 value_integer;
- int ret = -EINVAL;
+ int ret;
bool getall = false;
int i;
- if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp ||
+ if (!tb[DCB_ATTR_BCN])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->getbcnrp ||
!netdev->dcbnl_ops->getbcncfg)
- return ret;
+ return -EOPNOTSUPP;
ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
-
if (ret)
- goto err_out;
-
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb)
- goto err_out;
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_BCN_GCFG;
+ return ret;
- bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN);
+ bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN);
if (!bcn_nest)
- goto err;
+ return -EMSGSIZE;
if (bcn_tb[DCB_BCN_ATTR_ALL])
getall = true;
@@ -1093,7 +902,7 @@ static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
&value_byte);
- ret = nla_put_u8(dcbnl_skb, i, value_byte);
+ ret = nla_put_u8(skb, i, value_byte);
if (ret)
goto err_bcn;
}
@@ -1104,49 +913,41 @@ static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
netdev->dcbnl_ops->getbcncfg(netdev, i,
&value_integer);
- ret = nla_put_u32(dcbnl_skb, i, value_integer);
+ ret = nla_put_u32(skb, i, value_integer);
if (ret)
goto err_bcn;
}
- nla_nest_end(dcbnl_skb, bcn_nest);
-
- nlmsg_end(dcbnl_skb, nlh);
-
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret)
- goto err_out;
+ nla_nest_end(skb, bcn_nest);
return 0;
err_bcn:
- nla_nest_cancel(dcbnl_skb, bcn_nest);
-nlmsg_failure:
-err:
- kfree_skb(dcbnl_skb);
-err_out:
- ret = -EINVAL;
+ nla_nest_cancel(skb, bcn_nest);
return ret;
}
-static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
int i;
- int ret = -EINVAL;
+ int ret;
u8 value_byte;
u32 value_int;
- if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg ||
+ if (!tb[DCB_ATTR_BCN])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->setbcncfg ||
!netdev->dcbnl_ops->setbcnrp)
- return ret;
+ return -EOPNOTSUPP;
ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
tb[DCB_ATTR_BCN],
dcbnl_pfc_up_nest);
if (ret)
- goto err;
+ return ret;
for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
if (data[i] == NULL)
@@ -1164,10 +965,7 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
i, value_int);
}
- ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN,
- pid, seq, flags);
-err:
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_BCN, 0);
}
static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
@@ -1233,20 +1031,21 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
struct dcb_app_type *itr;
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
int dcbx;
- int err = -EMSGSIZE;
+ int err;
if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
- goto nla_put_failure;
+ return -EMSGSIZE;
+
ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
if (!ieee)
- goto nla_put_failure;
+ return -EMSGSIZE;
if (ops->ieee_getets) {
struct ieee_ets ets;
err = ops->ieee_getets(netdev, &ets);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
- goto nla_put_failure;
+ return -EMSGSIZE;
}
if (ops->ieee_getmaxrate) {
@@ -1256,7 +1055,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
sizeof(maxrate), &maxrate);
if (err)
- goto nla_put_failure;
+ return -EMSGSIZE;
}
}
@@ -1265,12 +1064,12 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
err = ops->ieee_getpfc(netdev, &pfc);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
- goto nla_put_failure;
+ return -EMSGSIZE;
}
app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
if (!app)
- goto nla_put_failure;
+ return -EMSGSIZE;
spin_lock(&dcb_lock);
list_for_each_entry(itr, &dcb_app_list, list) {
@@ -1279,7 +1078,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
&itr->app);
if (err) {
spin_unlock(&dcb_lock);
- goto nla_put_failure;
+ return -EMSGSIZE;
}
}
}
@@ -1298,7 +1097,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
err = ops->ieee_peer_getets(netdev, &ets);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
- goto nla_put_failure;
+ return -EMSGSIZE;
}
if (ops->ieee_peer_getpfc) {
@@ -1306,7 +1105,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
err = ops->ieee_peer_getpfc(netdev, &pfc);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
- goto nla_put_failure;
+ return -EMSGSIZE;
}
if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1315,20 +1114,17 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
DCB_ATTR_IEEE_APP_UNSPEC,
DCB_ATTR_IEEE_APP);
if (err)
- goto nla_put_failure;
+ return -EMSGSIZE;
}
nla_nest_end(skb, ieee);
if (dcbx >= 0) {
err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
if (err)
- goto nla_put_failure;
+ return -EMSGSIZE;
}
return 0;
-
-nla_put_failure:
- return err;
}
static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
@@ -1340,13 +1136,13 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
struct nlattr *pg = nla_nest_start(skb, i);
if (!pg)
- goto nla_put_failure;
+ return -EMSGSIZE;
for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
struct nlattr *tc_nest = nla_nest_start(skb, i);
if (!tc_nest)
- goto nla_put_failure;
+ return -EMSGSIZE;
pgid = DCB_ATTR_VALUE_UNDEFINED;
prio = DCB_ATTR_VALUE_UNDEFINED;
@@ -1364,7 +1160,7 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
- goto nla_put_failure;
+ return -EMSGSIZE;
nla_nest_end(skb, tc_nest);
}
@@ -1378,13 +1174,10 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
&tc_pct);
if (nla_put_u8(skb, i, tc_pct))
- goto nla_put_failure;
+ return -EMSGSIZE;
}
nla_nest_end(skb, pg);
return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
}
static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
@@ -1531,27 +1324,16 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
struct net *net = dev_net(dev);
struct sk_buff *skb;
struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
int err;
if (!ops)
return -EOPNOTSUPP;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ skb = dcbnl_newmsg(event, cmd, pid, seq, 0, &nlh);
if (!skb)
return -ENOBUFS;
- nlh = nlmsg_put(skb, pid, 0, event, sizeof(*dcb), 0);
- if (nlh == NULL) {
- nlmsg_free(skb);
- return -EMSGSIZE;
- }
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = cmd;
-
if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
err = dcbnl_ieee_fill(skb, dev);
else
@@ -1559,8 +1341,7 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
if (err < 0) {
/* Report error to broadcast listeners */
- nlmsg_cancel(skb, nlh);
- kfree_skb(skb);
+ nlmsg_free(skb);
rtnl_set_sk_err(net, RTNLGRP_DCB, err);
} else {
/* End nlmsg and notify broadcast listeners */
@@ -1590,15 +1371,15 @@ EXPORT_SYMBOL(dcbnl_cee_notify);
* No attempt is made to reconcile the case where only part of the
* cmd can be completed.
*/
-static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
- int err = -EOPNOTSUPP;
+ int err;
if (!ops)
- return err;
+ return -EOPNOTSUPP;
if (!tb[DCB_ATTR_IEEE])
return -EINVAL;
@@ -1649,58 +1430,28 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
}
err:
- dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
- pid, seq, flags);
+ err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
return err;
}
-static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct net *net = dev_net(netdev);
- struct sk_buff *skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
- int err;
if (!ops)
return -EOPNOTSUPP;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!skb)
- return -ENOBUFS;
-
- nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
- if (nlh == NULL) {
- nlmsg_free(skb);
- return -EMSGSIZE;
- }
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_IEEE_GET;
-
- err = dcbnl_ieee_fill(skb, netdev);
-
- if (err < 0) {
- nlmsg_cancel(skb, nlh);
- kfree_skb(skb);
- } else {
- nlmsg_end(skb, nlh);
- err = rtnl_unicast(skb, net, pid);
- }
-
- return err;
+ return dcbnl_ieee_fill(skb, netdev);
}
-static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
- int err = -EOPNOTSUPP;
+ int err;
if (!ops)
return -EOPNOTSUPP;
@@ -1733,32 +1484,26 @@ static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
}
err:
- dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE,
- pid, seq, flags);
+ err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
return err;
}
/* DCBX configuration */
-static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int ret;
-
if (!netdev->dcbnl_ops->getdcbx)
return -EOPNOTSUPP;
- ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB,
- DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags);
-
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_DCBX,
+ netdev->dcbnl_ops->getdcbx(netdev));
}
-static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int ret;
u8 value;
if (!netdev->dcbnl_ops->setdcbx)
@@ -1769,19 +1514,13 @@ static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
value = nla_get_u8(tb[DCB_ATTR_DCBX]);
- ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value),
- RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX,
- pid, seq, flags);
-
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_DCBX,
+ netdev->dcbnl_ops->setdcbx(netdev, value));
}
-static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
u8 value;
int ret, i;
@@ -1796,25 +1535,11 @@ static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
dcbnl_featcfg_nest);
if (ret)
- goto err_out;
-
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb) {
- ret = -ENOBUFS;
- goto err_out;
- }
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_GFEATCFG;
+ return ret;
- nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG);
- if (!nest) {
- ret = -EMSGSIZE;
- goto nla_put_failure;
- }
+ nest = nla_nest_start(skb, DCB_ATTR_FEATCFG);
+ if (!nest)
+ return -EMSGSIZE;
if (data[DCB_FEATCFG_ATTR_ALL])
getall = 1;
@@ -1825,28 +1550,21 @@ static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
if (!ret)
- ret = nla_put_u8(dcbnl_skb, i, value);
+ ret = nla_put_u8(skb, i, value);
if (ret) {
- nla_nest_cancel(dcbnl_skb, nest);
+ nla_nest_cancel(skb, nest);
goto nla_put_failure;
}
}
- nla_nest_end(dcbnl_skb, nest);
+ nla_nest_end(skb, nest);
- nlmsg_end(dcbnl_skb, nlh);
-
- return rtnl_unicast(dcbnl_skb, &init_net, pid);
nla_put_failure:
- nlmsg_cancel(dcbnl_skb, nlh);
-nlmsg_failure:
- kfree_skb(dcbnl_skb);
-err_out:
return ret;
}
-static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
int ret, i;
@@ -1876,60 +1594,73 @@ static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
goto err;
}
err:
- dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG,
- pid, seq, flags);
+ ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
return ret;
}
/* Handle CEE DCBX GET commands. */
-static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct net *net = dev_net(netdev);
- struct sk_buff *skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
- int err;
if (!ops)
return -EOPNOTSUPP;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!skb)
- return -ENOBUFS;
-
- nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
- if (nlh == NULL) {
- nlmsg_free(skb);
- return -EMSGSIZE;
- }
+ return dcbnl_cee_fill(skb, netdev);
+}
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_CEE_GET;
+struct reply_func {
+ /* reply netlink message type */
+ int type;
- err = dcbnl_cee_fill(skb, netdev);
+ /* function to fill message contents */
+ int (*cb)(struct net_device *, struct nlmsghdr *, u32,
+ struct nlattr **, struct sk_buff *);
+};
- if (err < 0) {
- nlmsg_cancel(skb, nlh);
- nlmsg_free(skb);
- } else {
- nlmsg_end(skb, nlh);
- err = rtnl_unicast(skb, net, pid);
- }
- return err;
-}
+static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
+ [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
+ [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
+ [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
+ [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
+ [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
+ [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
+ [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
+ [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
+ [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
+ [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
+ [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
+ [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
+ [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
+ [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
+ [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
+ [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
+ [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
+ [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
+ [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
+ [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
+ [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
+ [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
+ [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
+ [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
+ [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
+ [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
+ [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
+};
static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct net_device *netdev;
- struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
+ struct dcbmsg *dcb = nlmsg_data(nlh);
struct nlattr *tb[DCB_ATTR_MAX + 1];
u32 pid = skb ? NETLINK_CB(skb).pid : 0;
int ret = -EINVAL;
+ struct sk_buff *reply_skb;
+ struct nlmsghdr *reply_nlh = NULL;
+ const struct reply_func *fn;
if (!net_eq(net, &init_net))
return -EINVAL;
@@ -1939,136 +1670,78 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
if (ret < 0)
return ret;
+ if (dcb->cmd > DCB_CMD_MAX)
+ return -EINVAL;
+
+ /* check if a reply function has been defined for the command */
+ fn = &reply_funcs[dcb->cmd];
+ if (!fn->cb)
+ return -EOPNOTSUPP;
+
if (!tb[DCB_ATTR_IFNAME])
return -EINVAL;
netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
if (!netdev)
- return -EINVAL;
+ return -ENODEV;
- if (!netdev->dcbnl_ops)
- goto errout;
-
- switch (dcb->cmd) {
- case DCB_CMD_GSTATE:
- ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PFC_GCFG:
- ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_GPERM_HWADDR:
- ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PGTX_GCFG:
- ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PGRX_GCFG:
- ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_BCN_GCFG:
- ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_SSTATE:
- ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PFC_SCFG:
- ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
+ if (!netdev->dcbnl_ops) {
+ ret = -EOPNOTSUPP;
goto out;
+ }
- case DCB_CMD_SET_ALL:
- ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PGTX_SCFG:
- ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PGRX_SCFG:
- ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_GCAP:
- ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_GNUMTCS:
- ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_SNUMTCS:
- ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PFC_GSTATE:
- ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PFC_SSTATE:
- ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_BCN_SCFG:
- ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_GAPP:
- ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_SAPP:
- ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_IEEE_SET:
- ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_IEEE_GET:
- ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_IEEE_DEL:
- ret = dcbnl_ieee_del(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_GDCBX:
- ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_SDCBX:
- ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_GFEATCFG:
- ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_SFEATCFG:
- ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
+ reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, pid, nlh->nlmsg_seq,
+ nlh->nlmsg_flags, &reply_nlh);
+ if (!reply_skb) {
+ ret = -ENOBUFS;
goto out;
- case DCB_CMD_CEE_GET:
- ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
+ }
+
+ ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
+ if (ret < 0) {
+ nlmsg_free(reply_skb);
goto out;
- default:
- goto errout;
}
-errout:
- ret = -EINVAL;
+
+ nlmsg_end(reply_skb, reply_nlh);
+
+ ret = rtnl_unicast(reply_skb, &init_net, pid);
out:
dev_put(netdev);
return ret;
}
+static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
+ int ifindex, int prio)
+{
+ struct dcb_app_type *itr;
+
+ list_for_each_entry(itr, &dcb_app_list, list) {
+ if (itr->app.selector == app->selector &&
+ itr->app.protocol == app->protocol &&
+ itr->ifindex == ifindex &&
+ (!prio || itr->app.priority == prio))
+ return itr;
+ }
+
+ return NULL;
+}
+
+static int dcb_app_add(const struct dcb_app *app, int ifindex)
+{
+ struct dcb_app_type *entry;
+
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ memcpy(&entry->app, app, sizeof(*app));
+ entry->ifindex = ifindex;
+ list_add(&entry->list, &dcb_app_list);
+
+ return 0;
+}
+
/**
* dcb_getapp - retrieve the DCBX application user priority
*
@@ -2082,14 +1755,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
u8 prio = 0;
spin_lock(&dcb_lock);
- list_for_each_entry(itr, &dcb_app_list, list) {
- if (itr->app.selector == app->selector &&
- itr->app.protocol == app->protocol &&
- itr->ifindex == dev->ifindex) {
- prio = itr->app.priority;
- break;
- }
- }
+ if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
+ prio = itr->app.priority;
spin_unlock(&dcb_lock);
return prio;
@@ -2107,6 +1774,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
{
struct dcb_app_type *itr;
struct dcb_app_type event;
+ int err = 0;
event.ifindex = dev->ifindex;
memcpy(&event.app, new, sizeof(event.app));
@@ -2115,36 +1783,23 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
spin_lock(&dcb_lock);
/* Search for existing match and replace */
- list_for_each_entry(itr, &dcb_app_list, list) {
- if (itr->app.selector == new->selector &&
- itr->app.protocol == new->protocol &&
- itr->ifindex == dev->ifindex) {
- if (new->priority)
- itr->app.priority = new->priority;
- else {
- list_del(&itr->list);
- kfree(itr);
- }
- goto out;
+ if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
+ if (new->priority)
+ itr->app.priority = new->priority;
+ else {
+ list_del(&itr->list);
+ kfree(itr);
}
+ goto out;
}
/* App type does not exist add new application type */
- if (new->priority) {
- struct dcb_app_type *entry;
- entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
- if (!entry) {
- spin_unlock(&dcb_lock);
- return -ENOMEM;
- }
-
- memcpy(&entry->app, new, sizeof(*new));
- entry->ifindex = dev->ifindex;
- list_add(&entry->list, &dcb_app_list);
- }
+ if (new->priority)
+ err = dcb_app_add(new, dev->ifindex);
out:
spin_unlock(&dcb_lock);
- call_dcbevent_notifiers(DCB_APP_EVENT, &event);
- return 0;
+ if (!err)
+ call_dcbevent_notifiers(DCB_APP_EVENT, &event);
+ return err;
}
EXPORT_SYMBOL(dcb_setapp);
@@ -2161,13 +1816,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
u8 prio = 0;
spin_lock(&dcb_lock);
- list_for_each_entry(itr, &dcb_app_list, list) {
- if (itr->app.selector == app->selector &&
- itr->app.protocol == app->protocol &&
- itr->ifindex == dev->ifindex) {
- prio |= 1 << itr->app.priority;
- }
- }
+ if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
+ prio |= 1 << itr->app.priority;
spin_unlock(&dcb_lock);
return prio;
@@ -2183,7 +1833,6 @@ EXPORT_SYMBOL(dcb_ieee_getapp_mask);
*/
int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
{
- struct dcb_app_type *itr, *entry;
struct dcb_app_type event;
int err = 0;
@@ -2194,26 +1843,12 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
spin_lock(&dcb_lock);
/* Search for existing match and abort if found */
- list_for_each_entry(itr, &dcb_app_list, list) {
- if (itr->app.selector == new->selector &&
- itr->app.protocol == new->protocol &&
- itr->app.priority == new->priority &&
- itr->ifindex == dev->ifindex) {
- err = -EEXIST;
- goto out;
- }
- }
-
- /* App entry does not exist add new entry */
- entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
- if (!entry) {
- err = -ENOMEM;
+ if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
+ err = -EEXIST;
goto out;
}
- memcpy(&entry->app, new, sizeof(*new));
- entry->ifindex = dev->ifindex;
- list_add(&entry->list, &dcb_app_list);
+ err = dcb_app_add(new, dev->ifindex);
out:
spin_unlock(&dcb_lock);
if (!err)
@@ -2240,19 +1875,12 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
spin_lock(&dcb_lock);
/* Search for existing match and remove it. */
- list_for_each_entry(itr, &dcb_app_list, list) {
- if (itr->app.selector == del->selector &&
- itr->app.protocol == del->protocol &&
- itr->app.priority == del->priority &&
- itr->ifindex == dev->ifindex) {
- list_del(&itr->list);
- kfree(itr);
- err = 0;
- goto out;
- }
+ if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
+ list_del(&itr->list);
+ kfree(itr);
+ err = 0;
}
-out:
spin_unlock(&dcb_lock);
if (!err)
call_dcbevent_notifiers(DCB_APP_EVENT, &event);
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index e2ab0627a5f..a269aa7f792 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -50,7 +50,8 @@ static inline u8 dccp_ackvec_state(const u8 *cell)
return *cell & ~DCCPAV_MAX_RUNLEN;
}
-/** struct dccp_ackvec - Ack Vector main data structure
+/**
+ * struct dccp_ackvec - Ack Vector main data structure
*
* This implements a fixed-size circular buffer within an array and is largely
* based on Appendix A of RFC 4340.
@@ -76,7 +77,8 @@ struct dccp_ackvec {
struct list_head av_records;
};
-/** struct dccp_ackvec_record - Records information about sent Ack Vectors
+/**
+ * struct dccp_ackvec_record - Records information about sent Ack Vectors
*
* These list entries define the additional information which the HC-Receiver
* keeps about recently-sent Ack Vectors; again refer to RFC 4340, Appendix A.
@@ -121,6 +123,7 @@ static inline bool dccp_ackvec_is_empty(const struct dccp_ackvec *av)
* @len: length of @vec
* @nonce: whether @vec had an ECN nonce of 0 or 1
* @node: FIFO - arranged in descending order of ack_ackno
+ *
* This structure is used by CCIDs to access Ack Vectors in a received skb.
*/
struct dccp_ackvec_parsed {
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 48b585a5cba..597557254dd 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -46,6 +46,7 @@ bool ccid_support_check(u8 const *ccid_array, u8 array_len)
* ccid_get_builtin_ccids - Populate a list of built-in CCIDs
* @ccid_array: pointer to copy into
* @array_len: value to return length into
+ *
* This function allocates memory - caller must see that it is freed after use.
*/
int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 8c67bedf85b..d65e98798ec 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -113,6 +113,7 @@ static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
/**
* ccid3_hc_tx_update_x - Update allowed sending rate X
* @stamp: most recent time if available - can be left NULL.
+ *
* This function tracks draft rfc3448bis, check there for latest details.
*
* Note: X and X_recv are both stored in units of 64 * bytes/second, to support
@@ -161,9 +162,11 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
}
}
-/*
- * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1)
+/**
+ * ccid3_hc_tx_update_s - Track the mean packet size `s'
* @len: DCCP packet payload size in bytes
+ *
+ * cf. RFC 4342, 5.3 and RFC 3448, 4.1
*/
static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hc, int len)
{
@@ -270,6 +273,7 @@ out:
/**
* ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets
* @skb: next packet candidate to send on @sk
+ *
* This function uses the convention of ccid_packet_dequeue_eval() and
* returns a millisecond-delay value between 0 and t_mbi = 64000 msec.
*/
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 497723c4d4b..57f9fd78c4d 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -133,6 +133,7 @@ static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur,
* @rh: Receive history containing a fresh loss event
* @calc_first_li: Caller-dependent routine to compute length of first interval
* @sk: Used by @calc_first_li in caller-specific way (subtyping)
+ *
* Updates I_mean and returns 1 if a new interval has in fact been added to @lh.
*/
int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh,
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index de8fe294bf0..08df7a3acb3 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -315,6 +315,7 @@ static void __three_after_loss(struct tfrc_rx_hist *h)
* @ndp: The NDP count belonging to @skb
* @calc_first_li: Caller-dependent computation of first loss interval in @lh
* @sk: Used by @calc_first_li (see tfrc_lh_interval_add)
+ *
* Chooses action according to pending loss, updates LI database when a new
* loss was detected, and does required post-processing. Returns 1 when caller
* should send feedback, 0 otherwise.
@@ -387,7 +388,7 @@ static inline struct tfrc_rx_hist_entry *
}
/**
- * tfrc_rx_hist_rtt_prev_s: previously suitable (wrt rtt_last_s) RTT-sampling entry
+ * tfrc_rx_hist_rtt_prev_s - previously suitable (wrt rtt_last_s) RTT-sampling entry
*/
static inline struct tfrc_rx_hist_entry *
tfrc_rx_hist_rtt_prev_s(const struct tfrc_rx_hist *h)
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index a052a4377e2..88ef98285be 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -611,6 +611,7 @@ static inline u32 tfrc_binsearch(u32 fval, u8 small)
* @s: packet size in bytes
* @R: RTT scaled by 1000000 (i.e., microseconds)
* @p: loss ratio estimate scaled by 1000000
+ *
* Returns X_calc in bytes per second (not scaled).
*/
u32 tfrc_calc_x(u16 s, u32 R, u32 p)
@@ -659,6 +660,7 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
/**
* tfrc_calc_x_reverse_lookup - try to find p given f(p)
* @fvalue: function value to match, scaled by 1000000
+ *
* Returns closest match for p, also scaled by 1000000
*/
u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 9040be049d8..708e75bf623 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -352,6 +352,7 @@ static inline int dccp_bad_service_code(const struct sock *sk,
* @dccpd_opt_len: total length of all options (5.8) in the packet
* @dccpd_seq: sequence number
* @dccpd_ack_seq: acknowledgment number subheader field value
+ *
* This is used for transmission as well as for reception.
*/
struct dccp_skb_cb {
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 78a2ad70e1b..9733ddbc96c 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -350,6 +350,7 @@ static int __dccp_feat_activate(struct sock *sk, const int idx,
* @feat_num: feature to activate, one of %dccp_feature_numbers
* @local: whether local (1) or remote (0) @feat_num is meant
* @fval: the value (SP or NN) to activate, or NULL to use the default value
+ *
* For general use this function is preferable over __dccp_feat_activate().
*/
static int dccp_feat_activate(struct sock *sk, u8 feat_num, bool local,
@@ -446,6 +447,7 @@ static struct dccp_feat_entry *dccp_feat_list_lookup(struct list_head *fn_list,
* @head: list to add to
* @feat: feature number
* @local: whether the local (1) or remote feature with number @feat is meant
+ *
* This is the only constructor and serves to ensure the above invariants.
*/
static struct dccp_feat_entry *
@@ -504,6 +506,7 @@ static int dccp_feat_push_change(struct list_head *fn_list, u8 feat, u8 local,
* @feat: one of %dccp_feature_numbers
* @local: whether local (1) or remote (0) @feat_num is being confirmed
* @fval: pointer to NN/SP value to be inserted or NULL
+ *
* Returns 0 on success, a Reset code for further processing otherwise.
*/
static int dccp_feat_push_confirm(struct list_head *fn_list, u8 feat, u8 local,
@@ -691,6 +694,7 @@ int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq,
* @feat: an NN feature from %dccp_feature_numbers
* @mandatory: use Mandatory option if 1
* @nn_val: value to register (restricted to 4 bytes)
+ *
* Note that NN features are local by definition (RFC 4340, 6.3.2).
*/
static int __feat_register_nn(struct list_head *fn, u8 feat,
@@ -760,6 +764,7 @@ int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
* dccp_feat_nn_get - Query current/pending value of NN feature
* @sk: DCCP socket of an established connection
* @feat: NN feature number from %dccp_feature_numbers
+ *
* For a known NN feature, returns value currently being negotiated, or
* current (confirmed) value if no negotiation is going on.
*/
@@ -790,6 +795,7 @@ EXPORT_SYMBOL_GPL(dccp_feat_nn_get);
* @sk: DCCP socket of an established connection
* @feat: NN feature number from %dccp_feature_numbers
* @nn_val: the new value to use
+ *
* This function is used to communicate NN updates out-of-band.
*/
int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val)
@@ -930,6 +936,7 @@ static const struct ccid_dependency *dccp_feat_ccid_deps(u8 ccid, bool is_local)
* @fn: feature-negotiation list to update
* @id: CCID number to track
* @is_local: whether TX CCID (1) or RX CCID (0) is meant
+ *
* This function needs to be called after registering all other features.
*/
static int dccp_feat_propagate_ccid(struct list_head *fn, u8 id, bool is_local)
@@ -953,6 +960,7 @@ static int dccp_feat_propagate_ccid(struct list_head *fn, u8 id, bool is_local)
/**
* dccp_feat_finalise_settings - Finalise settings before starting negotiation
* @dp: client or listening socket (settings will be inherited)
+ *
* This is called after all registrations (socket initialisation, sysctls, and
* sockopt calls), and before sending the first packet containing Change options
* (ie. client-Request or server-Response), to ensure internal consistency.
@@ -1284,6 +1292,7 @@ confirmation_failed:
* @feat: NN number, one of %dccp_feature_numbers
* @val: NN value
* @len: length of @val in bytes
+ *
* This function combines the functionality of change_recv/confirm_recv, with
* the following differences (reset codes are the same):
* - cleanup after receiving the Confirm;
@@ -1379,6 +1388,7 @@ fast_path_failed:
* @feat: one of %dccp_feature_numbers
* @val: value contents of @opt
* @len: length of @val in bytes
+ *
* Returns 0 on success, a Reset code for ending the connection otherwise.
*/
int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
diff --git a/net/dccp/input.c b/net/dccp/input.c
index bc93a333931..14cdafad7a9 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -710,6 +710,7 @@ EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
/**
* dccp_sample_rtt - Validate and finalise computation of RTT sample
* @delta: number of microseconds between packet and acknowledgment
+ *
* The routine is kept generic to work in different contexts. It should be
* called immediately when the ACK used for the RTT sample arrives.
*/
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 07f5579ca75..176ecdba4a2 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -161,17 +161,10 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
if (sk->sk_state == DCCP_LISTEN)
return;
- /* We don't check in the destentry if pmtu discovery is forbidden
- * on this route. We just assume that no packet_to_big packets
- * are send back when pmtu discovery is not active.
- * There is a small race when the user changes this flag in the
- * route, but I think that's acceptable.
- */
- if ((dst = __sk_dst_check(sk, 0)) == NULL)
+ dst = inet_csk_update_pmtu(sk, mtu);
+ if (!dst)
return;
- dst->ops->update_pmtu(dst, mtu);
-
/* Something is about to be wrong... Remember soft error
* for the case, if this connection will not able to recover.
*/
@@ -195,6 +188,14 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
} /* else let the usual retransmit timer handle it */
}
+static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk)
+{
+ struct dst_entry *dst = __sk_dst_check(sk, 0);
+
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+}
+
/*
* This routine is called by the ICMP module when it gets some sort of error
* condition. If err < 0 then the socket should be closed and the error
@@ -259,6 +260,9 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
}
switch (type) {
+ case ICMP_REDIRECT:
+ dccp_do_redirect(skb, sk);
+ goto out;
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
goto out;
@@ -477,7 +481,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
struct rtable *rt;
const struct iphdr *iph = ip_hdr(skb);
struct flowi4 fl4 = {
- .flowi4_oif = skb_rtable(skb)->rt_iif,
+ .flowi4_oif = inet_iif(skb),
.daddr = iph->saddr,
.saddr = iph->daddr,
.flowi4_tos = RT_CONN_FLAGS(sk),
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index fa9512d86f3..56840b249f3 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -130,6 +130,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
np = inet6_sk(sk);
+ if (type == NDISC_REDIRECT) {
+ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+ }
+
if (type == ICMPV6_PKT_TOOBIG) {
struct dst_entry *dst = NULL;
@@ -138,37 +145,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
goto out;
- /* icmp should have updated the destination cache entry */
- dst = __sk_dst_check(sk, np->dst_cookie);
- if (dst == NULL) {
- struct inet_sock *inet = inet_sk(sk);
- struct flowi6 fl6;
-
- /* BUGGG_FUTURE: Again, it is not clear how
- to handle rthdr case. Ignore this complexity
- for now.
- */
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_DCCP;
- fl6.daddr = np->daddr;
- fl6.saddr = np->saddr;
- fl6.flowi6_oif = sk->sk_bound_dev_if;
- fl6.fl6_dport = inet->inet_dport;
- fl6.fl6_sport = inet->inet_sport;
- security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
-
- dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
- if (IS_ERR(dst)) {
- sk->sk_err_soft = -PTR_ERR(dst);
- goto out;
- }
- } else
- dst_hold(dst);
+ dst = inet6_csk_update_pmtu(sk, ntohl(info));
+ if (!dst)
+ goto out;
- if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
+ if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst))
dccp_sync_mss(sk, dst_mtu(dst));
- } /* else let the usual retransmit timer handle it */
- dst_release(dst);
goto out;
}
@@ -237,7 +219,6 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
struct inet6_request_sock *ireq6 = inet6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
- struct ipv6_txoptions *opt = NULL;
struct in6_addr *final_p, final;
struct flowi6 fl6;
int err = -1;
@@ -253,9 +234,8 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
fl6.fl6_sport = inet_rsk(req)->loc_port;
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
- opt = np->opt;
- final_p = fl6_update_dst(&fl6, opt, &final);
+ final_p = fl6_update_dst(&fl6, np->opt, &final);
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
if (IS_ERR(dst)) {
@@ -272,13 +252,11 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
&ireq6->loc_addr,
&ireq6->rmt_addr);
fl6.daddr = ireq6->rmt_addr;
- err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
+ err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
err = net_xmit_eval(err);
}
done:
- if (opt != NULL && opt != np->opt)
- sock_kfree_s(sk, opt, opt->tot_len);
dst_release(dst);
return err;
}
@@ -473,7 +451,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
struct inet_sock *newinet;
struct dccp6_sock *newdp6;
struct sock *newsk;
- struct ipv6_txoptions *opt;
if (skb->protocol == htons(ETH_P_IP)) {
/*
@@ -518,7 +495,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
return newsk;
}
- opt = np->opt;
if (sk_acceptq_is_full(sk))
goto out_overflow;
@@ -530,7 +506,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
fl6.daddr = ireq6->rmt_addr;
- final_p = fl6_update_dst(&fl6, opt, &final);
+ final_p = fl6_update_dst(&fl6, np->opt, &final);
fl6.saddr = ireq6->loc_addr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
@@ -595,11 +571,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
* Yes, keeping reference count would be much more clever, but we make
* one more one thing there: reattach optmem to newsk.
*/
- if (opt != NULL) {
- newnp->opt = ipv6_dup_options(newsk, opt);
- if (opt != np->opt)
- sock_kfree_s(sk, opt, opt->tot_len);
- }
+ if (np->opt != NULL)
+ newnp->opt = ipv6_dup_options(newsk, np->opt);
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (newnp->opt != NULL)
@@ -625,8 +598,6 @@ out_nonewsk:
dst_release(dst);
out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
- if (opt != NULL && opt != np->opt)
- sock_kfree_s(sk, opt, opt->tot_len);
return NULL;
}
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 68fa6b7a3e0..a58e0b63405 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -527,6 +527,7 @@ int dccp_insert_option_mandatory(struct sk_buff *skb)
* @val: NN value or SP array (preferred element first) to copy
* @len: true length of @val in bytes (excluding first element repetition)
* @repeat_first: whether to copy the first element of @val twice
+ *
* The last argument is used to construct Confirm options, where the preferred
* value and the preference list appear separately (RFC 4340, 6.3.1). Preference
* lists are kept such that the preferred entry is always first, so we only need
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 78736730879..d17fc90a74b 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -214,6 +214,7 @@ void dccp_write_space(struct sock *sk)
* dccp_wait_for_ccid - Await CCID send permission
* @sk: socket to wait for
* @delay: timeout in jiffies
+ *
* This is used by CCIDs which need to delay the send time in process context.
*/
static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 7eaf9879972..102d6106a94 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -505,6 +505,14 @@ static int dn_fib_check_attr(struct rtmsg *r, struct rtattr **rta)
return 0;
}
+static inline u32 rtm_get_table(struct rtattr **rta, u8 table)
+{
+ if (rta[RTA_TABLE - 1])
+ table = nla_get_u32((struct nlattr *) rta[RTA_TABLE - 1]);
+
+ return table;
+}
+
static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index ac90f658586..3aede1b459f 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -202,7 +202,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct dn_route *rt = (struct dn_route *)dst;
- struct neighbour *neigh = dst_get_neighbour_noref(dst);
+ struct neighbour *neigh = rt->n;
struct net_device *dev = neigh->dev;
char mac_addr[ETH_ALEN];
unsigned int seq;
@@ -240,7 +240,7 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
kfree_skb(skb);
return -ENOBUFS;
}
- kfree_skb(skb);
+ consume_skb(skb);
skb = skb2;
net_info_ratelimited("dn_long_output: Increasing headroom\n");
}
@@ -283,7 +283,7 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
kfree_skb(skb);
return -ENOBUFS;
}
- kfree_skb(skb);
+ consume_skb(skb);
skb = skb2;
net_info_ratelimited("dn_short_output: Increasing headroom\n");
}
@@ -322,7 +322,7 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
kfree_skb(skb);
return -ENOBUFS;
}
- kfree_skb(skb);
+ consume_skb(skb);
skb = skb2;
net_info_ratelimited("dn_phase3_output: Increasing headroom\n");
}
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 564a6ad13ce..8a96047c7c9 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -322,7 +322,7 @@ static __le16 *dn_mk_ack_header(struct sock *sk, struct sk_buff *skb, unsigned c
/* Set "cross subchannel" bit in ackcrs */
ackcrs |= 0x2000;
- ptr = (__le16 *)dn_mk_common_header(scp, skb, msgflag, hlen);
+ ptr = dn_mk_common_header(scp, skb, msgflag, hlen);
*ptr++ = cpu_to_le16(acknum);
*ptr++ = cpu_to_le16(ackcrs);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 586302e557a..85a3604c87c 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -114,10 +114,16 @@ static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
static unsigned int dn_dst_mtu(const struct dst_entry *dst);
static void dn_dst_destroy(struct dst_entry *);
+static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
-static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
-static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr);
+static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb , u32 mtu);
+static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb);
+static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr);
static int dn_route_input(struct sk_buff *);
static void dn_run_flush(unsigned long dummy);
@@ -138,17 +144,37 @@ static struct dst_ops dn_dst_ops = {
.mtu = dn_dst_mtu,
.cow_metrics = dst_cow_metrics_generic,
.destroy = dn_dst_destroy,
+ .ifdown = dn_dst_ifdown,
.negative_advice = dn_dst_negative_advice,
.link_failure = dn_dst_link_failure,
.update_pmtu = dn_dst_update_pmtu,
+ .redirect = dn_dst_redirect,
.neigh_lookup = dn_dst_neigh_lookup,
};
static void dn_dst_destroy(struct dst_entry *dst)
{
+ struct dn_route *rt = (struct dn_route *) dst;
+
+ if (rt->n)
+ neigh_release(rt->n);
dst_destroy_metrics_generic(dst);
}
+static void dn_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how)
+{
+ if (how) {
+ struct dn_route *rt = (struct dn_route *) dst;
+ struct neighbour *n = rt->n;
+
+ if (n && n->dev == dev) {
+ n->dev = dev_net(dev)->loopback_dev;
+ dev_hold(n->dev);
+ dev_put(dev);
+ }
+ }
+}
+
static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
{
__u16 tmp = (__u16 __force)(src ^ dst);
@@ -242,9 +268,11 @@ static int dn_dst_gc(struct dst_ops *ops)
* We update both the mtu and the advertised mss (i.e. the segment size we
* advertise to the other end).
*/
-static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
+static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
{
- struct neighbour *n = dst_get_neighbour_noref(dst);
+ struct dn_route *rt = (struct dn_route *) dst;
+ struct neighbour *n = rt->n;
u32 min_mtu = 230;
struct dn_dev *dn;
@@ -269,6 +297,11 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
}
}
+static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb)
+{
+}
+
/*
* When a route has been marked obsolete. (e.g. routing cache flush)
*/
@@ -713,7 +746,8 @@ out:
static int dn_to_neigh_output(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- struct neighbour *n = dst_get_neighbour_noref(dst);
+ struct dn_route *rt = (struct dn_route *) dst;
+ struct neighbour *n = rt->n;
return n->output(n, skb);
}
@@ -727,7 +761,7 @@ static int dn_output(struct sk_buff *skb)
int err = -EINVAL;
- if (dst_get_neighbour_noref(dst) == NULL)
+ if (rt->n == NULL)
goto error;
skb->dev = dev;
@@ -828,7 +862,9 @@ static unsigned int dn_dst_mtu(const struct dst_entry *dst)
return mtu ? : dst->dev->mtu;
}
-static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
+static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr)
{
return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev);
}
@@ -848,11 +884,11 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
}
rt->rt_type = res->type;
- if (dev != NULL && dst_get_neighbour_noref(&rt->dst) == NULL) {
+ if (dev != NULL && rt->n == NULL) {
n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
if (IS_ERR(n))
return PTR_ERR(n);
- dst_set_neighbour(&rt->dst, n);
+ rt->n = n;
}
if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
@@ -1140,7 +1176,7 @@ make_route:
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
- rt = dst_alloc(&dn_dst_ops, dev_out, 1, 0, DST_HOST);
+ rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
if (rt == NULL)
goto e_nobufs;
@@ -1159,7 +1195,7 @@ make_route:
rt->rt_dst_map = fld.daddr;
rt->rt_src_map = fld.saddr;
- dst_set_neighbour(&rt->dst, neigh);
+ rt->n = neigh;
neigh = NULL;
rt->dst.lastuse = jiffies;
@@ -1388,7 +1424,6 @@ static int dn_route_input_slow(struct sk_buff *skb)
/* Packet was intra-ethernet, so we know its on-link */
if (cb->rt_flags & DN_RT_F_IE) {
gateway = cb->src;
- flags |= RTCF_DIRECTSRC;
goto make_route;
}
@@ -1401,14 +1436,13 @@ static int dn_route_input_slow(struct sk_buff *skb)
/* Close eyes and pray */
gateway = cb->src;
- flags |= RTCF_DIRECTSRC;
goto make_route;
default:
goto e_inval;
}
make_route:
- rt = dst_alloc(&dn_dst_ops, out_dev, 0, 0, DST_HOST);
+ rt = dst_alloc(&dn_dst_ops, out_dev, 0, DST_OBSOLETE_NONE, DST_HOST);
if (rt == NULL)
goto e_nobufs;
@@ -1429,7 +1463,7 @@ make_route:
rt->fld.flowidn_iif = in_dev->ifindex;
rt->fld.flowidn_mark = fld.flowidn_mark;
- dst_set_neighbour(&rt->dst, neigh);
+ rt->n = neigh;
rt->dst.lastuse = jiffies;
rt->dst.output = dn_rt_bug;
switch (res.type) {
@@ -1515,54 +1549,68 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
struct dn_route *rt = (struct dn_route *)skb_dst(skb);
struct rtmsg *r;
struct nlmsghdr *nlh;
- unsigned char *b = skb_tail_pointer(skb);
long expires;
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
- r = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ r = nlmsg_data(nlh);
r->rtm_family = AF_DECnet;
r->rtm_dst_len = 16;
r->rtm_src_len = 0;
r->rtm_tos = 0;
r->rtm_table = RT_TABLE_MAIN;
- RTA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
r->rtm_type = rt->rt_type;
r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
r->rtm_scope = RT_SCOPE_UNIVERSE;
r->rtm_protocol = RTPROT_UNSPEC;
+
if (rt->rt_flags & RTCF_NOTIFY)
r->rtm_flags |= RTM_F_NOTIFY;
- RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr);
+
+ if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN) < 0 ||
+ nla_put_le16(skb, RTA_DST, rt->rt_daddr) < 0)
+ goto errout;
+
if (rt->fld.saddr) {
r->rtm_src_len = 16;
- RTA_PUT(skb, RTA_SRC, 2, &rt->fld.saddr);
+ if (nla_put_le16(skb, RTA_SRC, rt->fld.saddr) < 0)
+ goto errout;
}
- if (rt->dst.dev)
- RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex);
+ if (rt->dst.dev &&
+ nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0)
+ goto errout;
+
/*
* Note to self - change this if input routes reverse direction when
* they deal only with inputs and not with replies like they do
* currently.
*/
- RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src);
- if (rt->rt_daddr != rt->rt_gateway)
- RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);
+ if (nla_put_le16(skb, RTA_PREFSRC, rt->rt_local_src) < 0)
+ goto errout;
+
+ if (rt->rt_daddr != rt->rt_gateway &&
+ nla_put_le16(skb, RTA_GATEWAY, rt->rt_gateway) < 0)
+ goto errout;
+
if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
- goto rtattr_failure;
+ goto errout;
+
expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
- if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires,
+ if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires,
rt->dst.error) < 0)
- goto rtattr_failure;
- if (dn_is_input_route(rt))
- RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fld.flowidn_iif);
+ goto errout;
- nlh->nlmsg_len = skb_tail_pointer(skb) - b;
- return skb->len;
+ if (dn_is_input_route(rt) &&
+ nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0)
+ goto errout;
-nlmsg_failure:
-rtattr_failure:
- nlmsg_trim(skb, b);
- return -1;
+ return nlmsg_end(skb, nlh);
+
+errout:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
/*
@@ -1572,7 +1620,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
{
struct net *net = sock_net(in_skb->sk);
struct rtattr **rta = arg;
- struct rtmsg *rtm = NLMSG_DATA(nlh);
+ struct rtmsg *rtm = nlmsg_data(nlh);
struct dn_route *rt = NULL;
struct dn_skb_cb *cb;
int err;
@@ -1585,7 +1633,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
memset(&fld, 0, sizeof(fld));
fld.flowidn_proto = DNPROTO_NSP;
- skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb == NULL)
return -ENOBUFS;
skb_reset_mac_header(skb);
@@ -1663,13 +1711,16 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct dn_route *rt;
int h, s_h;
int idx, s_idx;
+ struct rtmsg *rtm;
if (!net_eq(net, &init_net))
return 0;
- if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg))
+ if (nlmsg_len(cb->nlh) < sizeof(struct rtmsg))
return -EINVAL;
- if (!(((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED))
+
+ rtm = nlmsg_data(cb->nlh);
+ if (!(rtm->rtm_flags & RTM_F_CLONED))
return 0;
s_h = cb->args[0];
@@ -1769,12 +1820,11 @@ static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
- rt->dst.dev ? rt->dst.dev->name : "*",
- dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
- dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
- atomic_read(&rt->dst.__refcnt),
- rt->dst.__use,
- (int) dst_metric(&rt->dst, RTAX_RTT));
+ rt->dst.dev ? rt->dst.dev->name : "*",
+ dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
+ dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
+ atomic_read(&rt->dst.__refcnt),
+ rt->dst.__use, 0);
return 0;
}
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 650f3380c98..16c986ab122 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -297,61 +297,75 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
{
struct rtmsg *rtm;
struct nlmsghdr *nlh;
- unsigned char *b = skb_tail_pointer(skb);
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
- rtm = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ rtm = nlmsg_data(nlh);
rtm->rtm_family = AF_DECnet;
rtm->rtm_dst_len = dst_len;
rtm->rtm_src_len = 0;
rtm->rtm_tos = 0;
rtm->rtm_table = tb_id;
- RTA_PUT_U32(skb, RTA_TABLE, tb_id);
rtm->rtm_flags = fi->fib_flags;
rtm->rtm_scope = scope;
rtm->rtm_type = type;
- if (rtm->rtm_dst_len)
- RTA_PUT(skb, RTA_DST, 2, dst);
rtm->rtm_protocol = fi->fib_protocol;
- if (fi->fib_priority)
- RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority);
+
+ if (nla_put_u32(skb, RTA_TABLE, tb_id) < 0)
+ goto errout;
+
+ if (rtm->rtm_dst_len &&
+ nla_put(skb, RTA_DST, 2, dst) < 0)
+ goto errout;
+
+ if (fi->fib_priority &&
+ nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority) < 0)
+ goto errout;
+
if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
- goto rtattr_failure;
+ goto errout;
+
if (fi->fib_nhs == 1) {
- if (fi->fib_nh->nh_gw)
- RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw);
- if (fi->fib_nh->nh_oif)
- RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif);
+ if (fi->fib_nh->nh_gw &&
+ nla_put_le16(skb, RTA_GATEWAY, fi->fib_nh->nh_gw) < 0)
+ goto errout;
+
+ if (fi->fib_nh->nh_oif &&
+ nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif) < 0)
+ goto errout;
}
+
if (fi->fib_nhs > 1) {
struct rtnexthop *nhp;
- struct rtattr *mp_head;
- if (skb_tailroom(skb) <= RTA_SPACE(0))
- goto rtattr_failure;
- mp_head = (struct rtattr *)skb_put(skb, RTA_SPACE(0));
+ struct nlattr *mp_head;
+
+ if (!(mp_head = nla_nest_start(skb, RTA_MULTIPATH)))
+ goto errout;
for_nexthops(fi) {
- if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
- goto rtattr_failure;
- nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
+ if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp))))
+ goto errout;
+
nhp->rtnh_flags = nh->nh_flags & 0xFF;
nhp->rtnh_hops = nh->nh_weight - 1;
nhp->rtnh_ifindex = nh->nh_oif;
- if (nh->nh_gw)
- RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw);
+
+ if (nh->nh_gw &&
+ nla_put_le16(skb, RTA_GATEWAY, nh->nh_gw) < 0)
+ goto errout;
+
nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp;
} endfor_nexthops(fi);
- mp_head->rta_type = RTA_MULTIPATH;
- mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
- }
- nlh->nlmsg_len = skb_tail_pointer(skb) - b;
- return skb->len;
+ nla_nest_end(skb, mp_head);
+ }
+ return nlmsg_end(skb, nlh);
-nlmsg_failure:
-rtattr_failure:
- nlmsg_trim(skb, b);
+errout:
+ nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
@@ -476,7 +490,7 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
return 0;
if (NLMSG_PAYLOAD(cb->nlh, 0) >= sizeof(struct rtmsg) &&
- ((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED)
+ ((struct rtmsg *)nlmsg_data(cb->nlh))->rtm_flags&RTM_F_CLONED)
return dn_cache_dump(skb, cb);
s_h = cb->args[0];
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 44b890936fc..11db0ecf342 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -42,23 +42,23 @@ static struct sk_buff *dnrmg_build_message(struct sk_buff *rt_skb, int *errp)
size = NLMSG_SPACE(rt_skb->len);
size += NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg));
skb = alloc_skb(size, GFP_ATOMIC);
- if (!skb)
- goto nlmsg_failure;
+ if (!skb) {
+ *errp = -ENOMEM;
+ return NULL;
+ }
old_tail = skb->tail;
- nlh = NLMSG_PUT(skb, 0, 0, 0, size - sizeof(*nlh));
+ nlh = nlmsg_put(skb, 0, 0, 0, size - sizeof(*nlh), 0);
+ if (!nlh) {
+ kfree_skb(skb);
+ *errp = -ENOMEM;
+ return NULL;
+ }
rtm = (struct nf_dn_rtmsg *)NLMSG_DATA(nlh);
rtm->nfdn_ifindex = rt_skb->dev->ifindex;
ptr = NFDN_RTMSG(rtm);
skb_copy_from_linear_data(rt_skb, ptr, rt_skb->len);
nlh->nlmsg_len = skb->tail - old_tail;
return skb;
-
-nlmsg_failure:
- if (skb)
- kfree_skb(skb);
- *errp = -ENOMEM;
- net_err_ratelimited("dn_rtmsg: error creating netlink message\n");
- return NULL;
}
static void dnrmg_send_peer(struct sk_buff *skb)
@@ -117,7 +117,7 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
static struct nf_hook_ops dnrmg_ops __read_mostly = {
.hook = dnrmg_hook,
- .pf = PF_DECnet,
+ .pf = NFPROTO_DECNET,
.hooknum = NF_DN_ROUTE,
.priority = NF_DN_PRI_DNRTMSG,
};
@@ -125,11 +125,13 @@ static struct nf_hook_ops dnrmg_ops __read_mostly = {
static int __init dn_rtmsg_init(void)
{
int rv = 0;
+ struct netlink_kernel_cfg cfg = {
+ .groups = DNRNG_NLGRP_MAX,
+ .input = dnrmg_receive_user_skb,
+ };
dnrmg = netlink_kernel_create(&init_net,
- NETLINK_DNRTMSG, DNRNG_NLGRP_MAX,
- dnrmg_receive_user_skb,
- NULL, THIS_MODULE);
+ NETLINK_DNRTMSG, THIS_MODULE, &cfg);
if (dnrmg == NULL) {
printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket");
return -ENOMEM;
diff --git a/net/ethernet/Makefile b/net/ethernet/Makefile
index 7cef1d8ace2..32317750540 100644
--- a/net/ethernet/Makefile
+++ b/net/ethernet/Makefile
@@ -3,5 +3,3 @@
#
obj-y += eth.o
-obj-$(subst m,y,$(CONFIG_IPX)) += pe2.o
-obj-$(subst m,y,$(CONFIG_ATALK)) += pe2.o
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 36e58800a9e..4efad533e5f 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -232,6 +232,7 @@ EXPORT_SYMBOL(eth_header_parse);
* @neigh: source neighbour
* @hh: destination cache entry
* @type: Ethernet type field
+ *
* Create an Ethernet header template from the neighbour.
*/
int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type)
@@ -274,6 +275,7 @@ EXPORT_SYMBOL(eth_header_cache_update);
* eth_mac_addr - set new Ethernet hardware address
* @dev: network device
* @p: socket address
+ *
* Change hardware address of device.
*
* This doesn't change hardware matching, so needs to be overridden
@@ -283,7 +285,7 @@ int eth_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
- if (netif_running(dev))
+ if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
return -EBUSY;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -331,6 +333,7 @@ const struct header_ops eth_header_ops ____cacheline_aligned = {
/**
* ether_setup - setup Ethernet network device
* @dev: network device
+ *
* Fill in the fields of the device structure with Ethernet-generic values.
*/
void ether_setup(struct net_device *dev)
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 32eb4179e8f..6a095225148 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -55,7 +55,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
#include <net/af_ieee802154.h>
#include <net/ieee802154.h>
#include <net/ieee802154_netdev.h>
@@ -114,7 +113,6 @@ struct lowpan_dev_record {
struct lowpan_fragment {
struct sk_buff *skb; /* skb to be assembled */
- spinlock_t lock; /* concurency lock */
u16 length; /* length to be assemled */
u32 bytes_rcv; /* bytes received */
u16 tag; /* current fragment tag */
@@ -124,7 +122,7 @@ struct lowpan_fragment {
static unsigned short fragment_tag;
static LIST_HEAD(lowpan_fragments);
-spinlock_t flist_lock;
+static DEFINE_SPINLOCK(flist_lock);
static inline struct
lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
@@ -240,8 +238,7 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
}
- pr_debug("(%s): uncompressing %d + %d => ", __func__, prefcount,
- postcount);
+ pr_debug("uncompressing %d + %d => ", prefcount, postcount);
lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
return 0;
@@ -252,13 +249,11 @@ lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
{
struct udphdr *uh = udp_hdr(skb);
- pr_debug("(%s): UDP header compression\n", __func__);
-
if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) ==
LOWPAN_NHC_UDP_4BIT_PORT) &&
((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) ==
LOWPAN_NHC_UDP_4BIT_PORT)) {
- pr_debug("(%s): both ports compression to 4 bits\n", __func__);
+ pr_debug("UDP header: both ports compression to 4 bits\n");
**hc06_ptr = LOWPAN_NHC_UDP_CS_P_11;
**(hc06_ptr + 1) = /* subtraction is faster */
(u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) +
@@ -266,20 +261,20 @@ lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
*hc06_ptr += 2;
} else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) ==
LOWPAN_NHC_UDP_8BIT_PORT) {
- pr_debug("(%s): remove 8 bits of dest\n", __func__);
+ pr_debug("UDP header: remove 8 bits of dest\n");
**hc06_ptr = LOWPAN_NHC_UDP_CS_P_01;
memcpy(*hc06_ptr + 1, &uh->source, 2);
**(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT);
*hc06_ptr += 4;
} else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) ==
LOWPAN_NHC_UDP_8BIT_PORT) {
- pr_debug("(%s): remove 8 bits of source\n", __func__);
+ pr_debug("UDP header: remove 8 bits of source\n");
**hc06_ptr = LOWPAN_NHC_UDP_CS_P_10;
memcpy(*hc06_ptr + 1, &uh->dest, 2);
**(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT);
*hc06_ptr += 4;
} else {
- pr_debug("(%s): can't compress header\n", __func__);
+ pr_debug("UDP header: can't compress\n");
**hc06_ptr = LOWPAN_NHC_UDP_CS_P_00;
memcpy(*hc06_ptr + 1, &uh->source, 2);
memcpy(*hc06_ptr + 3, &uh->dest, 2);
@@ -291,25 +286,26 @@ lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
*hc06_ptr += 2;
}
-static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
+static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
{
- u8 ret;
+ if (unlikely(!pskb_may_pull(skb, 1)))
+ return -EINVAL;
- ret = skb->data[0];
+ *val = skb->data[0];
skb_pull(skb, 1);
- return ret;
+ return 0;
}
-static u16 lowpan_fetch_skb_u16(struct sk_buff *skb)
+static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val)
{
- u16 ret;
-
- BUG_ON(!pskb_may_pull(skb, 2));
+ if (unlikely(!pskb_may_pull(skb, 2)))
+ return -EINVAL;
- ret = skb->data[0] | (skb->data[1] << 8);
+ *val = (skb->data[0] << 8) | skb->data[1];
skb_pull(skb, 2);
- return ret;
+
+ return 0;
}
static int
@@ -318,10 +314,14 @@ lowpan_uncompress_udp_header(struct sk_buff *skb)
struct udphdr *uh = udp_hdr(skb);
u8 tmp;
- tmp = lowpan_fetch_skb_u8(skb);
+ if (!uh)
+ goto err;
+
+ if (lowpan_fetch_skb_u8(skb, &tmp))
+ goto err;
if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
- pr_debug("(%s): UDP header uncompression\n", __func__);
+ pr_debug("UDP header uncompression\n");
switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
case LOWPAN_NHC_UDP_CS_P_00:
memcpy(&uh->source, &skb->data[0], 2);
@@ -347,19 +347,19 @@ lowpan_uncompress_udp_header(struct sk_buff *skb)
skb_pull(skb, 1);
break;
default:
- pr_debug("(%s) ERROR: unknown UDP format\n", __func__);
+ pr_debug("ERROR: unknown UDP format\n");
goto err;
break;
}
- pr_debug("(%s): uncompressed UDP ports: src = %d, dst = %d\n",
- __func__, uh->source, uh->dest);
+ pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
+ uh->source, uh->dest);
/* copy checksum */
memcpy(&uh->check, &skb->data[0], 2);
skb_pull(skb, 2);
} else {
- pr_debug("(%s): ERROR: unsupported NH format\n", __func__);
+ pr_debug("ERROR: unsupported NH format\n");
goto err;
}
@@ -392,10 +392,9 @@ static int lowpan_header_create(struct sk_buff *skb,
hdr = ipv6_hdr(skb);
hc06_ptr = head + 2;
- pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
- "\tnexthdr = 0x%02x\n\thop_lim = %d\n", __func__,
- hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
- hdr->hop_limit);
+ pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
+ "\tnexthdr = 0x%02x\n\thop_lim = %d\n", hdr->version,
+ ntohs(hdr->payload_len), hdr->nexthdr, hdr->hop_limit);
lowpan_raw_dump_table(__func__, "raw skb network header dump",
skb_network_header(skb), sizeof(struct ipv6hdr));
@@ -490,28 +489,28 @@ static int lowpan_header_create(struct sk_buff *skb,
break;
default:
*hc06_ptr = hdr->hop_limit;
+ hc06_ptr += 1;
break;
}
/* source address compression */
if (is_addr_unspecified(&hdr->saddr)) {
- pr_debug("(%s): source address is unspecified, setting SAC\n",
- __func__);
+ pr_debug("source address is unspecified, setting SAC\n");
iphc1 |= LOWPAN_IPHC_SAC;
/* TODO: context lookup */
} else if (is_addr_link_local(&hdr->saddr)) {
- pr_debug("(%s): source address is link-local\n", __func__);
+ pr_debug("source address is link-local\n");
iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr);
} else {
- pr_debug("(%s): send the full source address\n", __func__);
+ pr_debug("send the full source address\n");
memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
hc06_ptr += 16;
}
/* destination address compression */
if (is_addr_mcast(&hdr->daddr)) {
- pr_debug("(%s): destination address is multicast", __func__);
+ pr_debug("destination address is multicast: ");
iphc1 |= LOWPAN_IPHC_M;
if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
pr_debug("compressed to 1 octet\n");
@@ -540,14 +539,13 @@ static int lowpan_header_create(struct sk_buff *skb,
hc06_ptr += 16;
}
} else {
- pr_debug("(%s): destination address is unicast: ", __func__);
/* TODO: context lookup */
if (is_addr_link_local(&hdr->daddr)) {
- pr_debug("destination address is link-local\n");
+ pr_debug("dest address is unicast and link-local\n");
iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr);
} else {
- pr_debug("using full address\n");
+ pr_debug("dest address is unicast: using full one\n");
memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
hc06_ptr += 16;
}
@@ -639,19 +637,15 @@ static void lowpan_fragment_timer_expired(unsigned long entry_addr)
{
struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
- pr_debug("%s: timer expired for frame with tag %d\n", __func__,
- entry->tag);
+ pr_debug("timer expired for frame with tag %d\n", entry->tag);
- spin_lock(&flist_lock);
list_del(&entry->list);
- spin_unlock(&flist_lock);
-
dev_kfree_skb(entry->skb);
kfree(entry);
}
static struct lowpan_fragment *
-lowpan_alloc_new_frame(struct sk_buff *skb, u8 iphc0, u8 len, u8 tag)
+lowpan_alloc_new_frame(struct sk_buff *skb, u8 len, u16 tag)
{
struct lowpan_fragment *frame;
@@ -662,12 +656,12 @@ lowpan_alloc_new_frame(struct sk_buff *skb, u8 iphc0, u8 len, u8 tag)
INIT_LIST_HEAD(&frame->list);
- frame->length = (iphc0 & 7) | (len << 3);
+ frame->length = len;
frame->tag = tag;
/* allocate buffer for frame assembling */
- frame->skb = alloc_skb(frame->length +
- sizeof(struct ipv6hdr), GFP_ATOMIC);
+ frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length +
+ sizeof(struct ipv6hdr));
if (!frame->skb)
goto skb_err;
@@ -710,7 +704,9 @@ lowpan_process_data(struct sk_buff *skb)
/* at least two bytes will be used for the encoding */
if (skb->len < 2)
goto drop;
- iphc0 = lowpan_fetch_skb_u8(skb);
+
+ if (lowpan_fetch_skb_u8(skb, &iphc0))
+ goto drop;
/* fragments assembling */
switch (iphc0 & LOWPAN_DISPATCH_MASK) {
@@ -718,18 +714,23 @@ lowpan_process_data(struct sk_buff *skb)
case LOWPAN_DISPATCH_FRAGN:
{
struct lowpan_fragment *frame;
- u8 len, offset;
- u16 tag;
+ /* slen stores the rightmost 8 bits of the 11 bits length */
+ u8 slen, offset;
+ u16 len, tag;
bool found = false;
- len = lowpan_fetch_skb_u8(skb); /* frame length */
- tag = lowpan_fetch_skb_u16(skb);
+ if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */
+ lowpan_fetch_skb_u16(skb, &tag)) /* fragment tag */
+ goto drop;
+
+ /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */
+ len = ((iphc0 & 7) << 8) | slen;
/*
* check if frame assembling with the same tag is
* already in progress
*/
- spin_lock(&flist_lock);
+ spin_lock_bh(&flist_lock);
list_for_each_entry(frame, &lowpan_fragments, list)
if (frame->tag == tag) {
@@ -739,7 +740,7 @@ lowpan_process_data(struct sk_buff *skb)
/* alloc new frame structure */
if (!found) {
- frame = lowpan_alloc_new_frame(skb, iphc0, len, tag);
+ frame = lowpan_alloc_new_frame(skb, len, tag);
if (!frame)
goto unlock_and_drop;
}
@@ -747,7 +748,8 @@ lowpan_process_data(struct sk_buff *skb)
if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
goto unlock_and_drop;
- offset = lowpan_fetch_skb_u8(skb); /* fetch offset */
+ if (lowpan_fetch_skb_u8(skb, &offset)) /* fetch offset */
+ goto unlock_and_drop;
/* if payload fits buffer, copy it */
if (likely((offset * 8 + skb->len) <= frame->length))
@@ -762,17 +764,20 @@ lowpan_process_data(struct sk_buff *skb)
if ((frame->bytes_rcv == frame->length) &&
frame->timer.expires > jiffies) {
/* if timer haven't expired - first of all delete it */
- del_timer(&frame->timer);
+ del_timer_sync(&frame->timer);
list_del(&frame->list);
- spin_unlock(&flist_lock);
+ spin_unlock_bh(&flist_lock);
dev_kfree_skb(skb);
skb = frame->skb;
kfree(frame);
- iphc0 = lowpan_fetch_skb_u8(skb);
+
+ if (lowpan_fetch_skb_u8(skb, &iphc0))
+ goto drop;
+
break;
}
- spin_unlock(&flist_lock);
+ spin_unlock_bh(&flist_lock);
return kfree_skb(skb), 0;
}
@@ -780,20 +785,19 @@ lowpan_process_data(struct sk_buff *skb)
break;
}
- iphc1 = lowpan_fetch_skb_u8(skb);
+ if (lowpan_fetch_skb_u8(skb, &iphc1))
+ goto drop;
_saddr = mac_cb(skb)->sa.hwaddr;
_daddr = mac_cb(skb)->da.hwaddr;
- pr_debug("(%s): iphc0 = %02x, iphc1 = %02x\n", __func__, iphc0, iphc1);
+ pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1);
/* another if the CID flag is set */
if (iphc1 & LOWPAN_IPHC_CID) {
- pr_debug("(%s): CID flag is set, increase header with one\n",
- __func__);
- if (!skb->len)
+ pr_debug("CID flag is set, increase header with one\n");
+ if (lowpan_fetch_skb_u8(skb, &num_context))
goto drop;
- num_context = lowpan_fetch_skb_u8(skb);
}
hdr.version = 6;
@@ -805,9 +809,9 @@ lowpan_process_data(struct sk_buff *skb)
* ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
*/
case 0: /* 00b */
- if (!skb->len)
+ if (lowpan_fetch_skb_u8(skb, &tmp))
goto drop;
- tmp = lowpan_fetch_skb_u8(skb);
+
memcpy(&hdr.flow_lbl, &skb->data[0], 3);
skb_pull(skb, 3);
hdr.priority = ((tmp >> 2) & 0x0f);
@@ -819,9 +823,9 @@ lowpan_process_data(struct sk_buff *skb)
* ECN + DSCP (1 byte), Flow Label is elided
*/
case 1: /* 10b */
- if (!skb->len)
+ if (lowpan_fetch_skb_u8(skb, &tmp))
goto drop;
- tmp = lowpan_fetch_skb_u8(skb);
+
hdr.priority = ((tmp >> 2) & 0x0f);
hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
hdr.flow_lbl[1] = 0;
@@ -832,9 +836,9 @@ lowpan_process_data(struct sk_buff *skb)
* ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
*/
case 2: /* 01b */
- if (!skb->len)
+ if (lowpan_fetch_skb_u8(skb, &tmp))
goto drop;
- tmp = lowpan_fetch_skb_u8(skb);
+
hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
skb_pull(skb, 2);
@@ -853,27 +857,26 @@ lowpan_process_data(struct sk_buff *skb)
/* Next Header */
if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
/* Next header is carried inline */
- if (!skb->len)
+ if (lowpan_fetch_skb_u8(skb, &(hdr.nexthdr)))
goto drop;
- hdr.nexthdr = lowpan_fetch_skb_u8(skb);
- pr_debug("(%s): NH flag is set, next header is carried "
- "inline: %02x\n", __func__, hdr.nexthdr);
+
+ pr_debug("NH flag is set, next header carried inline: %02x\n",
+ hdr.nexthdr);
}
/* Hop Limit */
if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
else {
- if (!skb->len)
+ if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit)))
goto drop;
- hdr.hop_limit = lowpan_fetch_skb_u8(skb);
}
/* Extract SAM to the tmp variable */
tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
/* Source address uncompression */
- pr_debug("(%s): source address stateless compression\n", __func__);
+ pr_debug("source address stateless compression\n");
err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
lowpan_unc_llconf[tmp], skb->data);
if (err)
@@ -885,19 +888,15 @@ lowpan_process_data(struct sk_buff *skb)
/* check for Multicast Compression */
if (iphc1 & LOWPAN_IPHC_M) {
if (iphc1 & LOWPAN_IPHC_DAC) {
- pr_debug("(%s): destination address context-based "
- "multicast compression\n", __func__);
+ pr_debug("dest: context-based mcast compression\n");
/* TODO: implement this */
} else {
u8 prefix[] = {0xff, 0x02};
- pr_debug("(%s): destination address non-context-based"
- " multicast compression\n", __func__);
+ pr_debug("dest: non context-based mcast compression\n");
if (0 < tmp && tmp < 3) {
- if (!skb->len)
+ if (lowpan_fetch_skb_u8(skb, &prefix[1]))
goto drop;
- else
- prefix[1] = lowpan_fetch_skb_u8(skb);
}
err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix,
@@ -906,8 +905,7 @@ lowpan_process_data(struct sk_buff *skb)
goto drop;
}
} else {
- pr_debug("(%s): destination address stateless compression\n",
- __func__);
+ pr_debug("dest: stateless compression\n");
err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
lowpan_unc_llconf[tmp], skb->data);
if (err)
@@ -922,11 +920,11 @@ lowpan_process_data(struct sk_buff *skb)
/* Not fragmented package */
hdr.payload_len = htons(skb->len);
- pr_debug("(%s): skb headroom size = %d, data length = %d\n", __func__,
- skb_headroom(skb), skb->len);
+ pr_debug("skb headroom size = %d, data length = %d\n",
+ skb_headroom(skb), skb->len);
- pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
- "nexthdr = 0x%02x\n\thop_lim = %d\n", __func__, hdr.version,
+ pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
+ "nexthdr = 0x%02x\n\thop_lim = %d\n", hdr.version,
ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit);
lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
@@ -934,12 +932,25 @@ lowpan_process_data(struct sk_buff *skb)
return lowpan_skb_deliver(skb, &hdr);
unlock_and_drop:
- spin_unlock(&flist_lock);
+ spin_unlock_bh(&flist_lock);
drop:
kfree_skb(skb);
return -EINVAL;
}
+static int lowpan_set_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *sa = p;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* TODO: validate addr */
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+
+ return 0;
+}
+
static int lowpan_get_mac_header_length(struct sk_buff *skb)
{
/*
@@ -997,10 +1008,10 @@ lowpan_skb_fragmentation(struct sk_buff *skb)
tag = fragment_tag++;
/* first fragment header */
- head[0] = LOWPAN_DISPATCH_FRAG1 | (payload_length & 0x7);
- head[1] = (payload_length >> 3) & 0xff;
- head[2] = tag & 0xff;
- head[3] = tag >> 8;
+ head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7);
+ head[1] = payload_length & 0xff;
+ head[2] = tag >> 8;
+ head[3] = tag & 0xff;
err = lowpan_fragment_xmit(skb, head, header_length, 0, 0);
@@ -1028,11 +1039,11 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
{
int err = -1;
- pr_debug("(%s): package xmit\n", __func__);
+ pr_debug("package xmit\n");
skb->dev = lowpan_dev_info(dev)->real_dev;
if (skb->dev == NULL) {
- pr_debug("(%s) ERROR: no real wpan device found\n", __func__);
+ pr_debug("ERROR: no real wpan device found\n");
goto error;
}
@@ -1041,14 +1052,13 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
- pr_debug("(%s): frame is too big, fragmentation is needed\n",
- __func__);
+ pr_debug("frame is too big, fragmentation is needed\n");
err = lowpan_skb_fragmentation(skb);
error:
dev_kfree_skb(skb);
out:
if (err < 0)
- pr_debug("(%s): ERROR: xmit failed\n", __func__);
+ pr_debug("ERROR: xmit failed\n");
return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
}
@@ -1083,7 +1093,7 @@ static struct header_ops lowpan_header_ops = {
static const struct net_device_ops lowpan_netdev_ops = {
.ndo_start_xmit = lowpan_xmit,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = lowpan_set_address,
};
static struct ieee802154_mlme_ops lowpan_mlme = {
@@ -1094,8 +1104,6 @@ static struct ieee802154_mlme_ops lowpan_mlme = {
static void lowpan_setup(struct net_device *dev)
{
- pr_debug("(%s)\n", __func__);
-
dev->addr_len = IEEE802154_ADDR_LEN;
memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
dev->type = ARPHRD_IEEE802154;
@@ -1115,8 +1123,6 @@ static void lowpan_setup(struct net_device *dev)
static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
{
- pr_debug("(%s)\n", __func__);
-
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
return -EINVAL;
@@ -1157,7 +1163,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
struct net_device *real_dev;
struct lowpan_dev_record *entry;
- pr_debug("(%s)\n", __func__);
+ pr_debug("adding new link\n");
if (!tb[IFLA_LINK])
return -EINVAL;
@@ -1183,8 +1189,6 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
list_add_tail(&entry->list, &lowpan_devices);
mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
- spin_lock_init(&flist_lock);
-
register_netdevice(dev);
return 0;
@@ -1195,19 +1199,9 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
struct net_device *real_dev = lowpan_dev->real_dev;
struct lowpan_dev_record *entry, *tmp;
- struct lowpan_fragment *frame, *tframe;
ASSERT_RTNL();
- spin_lock(&flist_lock);
- list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
- del_timer(&frame->timer);
- list_del(&frame->list);
- dev_kfree_skb(frame->skb);
- kfree(frame);
- }
- spin_unlock(&flist_lock);
-
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
if (entry->ldev == dev) {
@@ -1252,8 +1246,6 @@ static int __init lowpan_init_module(void)
{
int err = 0;
- pr_debug("(%s)\n", __func__);
-
err = lowpan_netlink_init();
if (err < 0)
goto out;
@@ -1265,11 +1257,24 @@ out:
static void __exit lowpan_cleanup_module(void)
{
- pr_debug("(%s)\n", __func__);
+ struct lowpan_fragment *frame, *tframe;
lowpan_netlink_fini();
dev_remove_pack(&lowpan_packet_type);
+
+ /* Now 6lowpan packet_type is removed, so no new fragments are
+ * expected on RX, therefore that's the time to clean incomplete
+ * fragments.
+ */
+ spin_lock_bh(&flist_lock);
+ list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
+ del_timer_sync(&frame->timer);
+ list_del(&frame->list);
+ dev_kfree_skb(frame->skb);
+ kfree(frame);
+ }
+ spin_unlock_bh(&flist_lock);
}
module_init(lowpan_init_module);
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index c8097ae2482..97351e1d07a 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -44,7 +44,7 @@ struct genl_family nl802154_family = {
struct sk_buff *ieee802154_nl_create(int flags, u8 req)
{
void *hdr;
- struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
unsigned long f;
if (!msg)
@@ -80,7 +80,7 @@ struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
int flags, u8 req)
{
void *hdr;
- struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
return NULL;
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index ca92587720f..1e9917124e7 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -530,7 +530,7 @@ static int ieee802154_list_iface(struct sk_buff *skb,
if (!dev)
return -ENODEV;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
goto out_dev;
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index eed291626da..d54be34cca9 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -101,7 +101,7 @@ static int ieee802154_list_phy(struct sk_buff *skb,
if (!phy)
return -ENODEV;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
goto out_dev;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 20f1cb5c8ab..5a19aeb8609 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -310,6 +310,17 @@ config SYN_COOKIES
If unsure, say N.
+config NET_IPVTI
+ tristate "Virtual (secure) IP: tunneling"
+ select INET_TUNNEL
+ depends on INET_XFRM_MODE_TUNNEL
+ ---help---
+ Tunneling means encapsulating data of one protocol type within
+ another protocol and sending it over a channel that understands the
+ encapsulating protocol. This can be used with xfrm mode tunnel to give
+ the notion of a secure tunnel for IPSEC and then use routing protocol
+ on top.
+
config INET_AH
tristate "IP: AH transformation"
select XFRM_ALGO
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index ff75d3bbcd6..15ca63ec604 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -7,7 +7,7 @@ obj-y := route.o inetpeer.o protocol.o \
ip_output.o ip_sockglue.o inet_hashtables.o \
inet_timewait_sock.o inet_connection_sock.o \
tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
- tcp_minisocks.o tcp_cong.o \
+ tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \
datagram.o raw.o udp.o udplite.o \
arp.o icmp.o devinet.o af_inet.o igmp.o \
fib_frontend.o fib_semantics.o fib_trie.o \
@@ -20,6 +20,7 @@ obj-$(CONFIG_IP_MROUTE) += ipmr.o
obj-$(CONFIG_NET_IPIP) += ipip.o
obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
obj-$(CONFIG_NET_IPGRE) += ip_gre.o
+obj-$(CONFIG_NET_IPVTI) += ip_vti.o
obj-$(CONFIG_SYN_COOKIES) += syncookies.o
obj-$(CONFIG_INET_AH) += ah4.o
obj-$(CONFIG_INET_ESP) += esp4.o
@@ -48,7 +49,7 @@ obj-$(CONFIG_TCP_CONG_SCALABLE) += tcp_scalable.o
obj-$(CONFIG_TCP_CONG_LP) += tcp_lp.o
obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
-obj-$(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) += tcp_memcontrol.o
+obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c8f7aee587d..fe4582ca969 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -157,6 +157,7 @@ void inet_sock_destruct(struct sock *sk)
kfree(rcu_dereference_protected(inet->inet_opt, 1));
dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
+ dst_release(sk->sk_rx_dst);
sk_refcnt_debug_dec(sk);
}
EXPORT_SYMBOL(inet_sock_destruct);
@@ -242,20 +243,18 @@ void build_ehash_secret(void)
}
EXPORT_SYMBOL(build_ehash_secret);
-static inline int inet_netns_ok(struct net *net, int protocol)
+static inline int inet_netns_ok(struct net *net, __u8 protocol)
{
- int hash;
const struct net_protocol *ipprot;
if (net_eq(net, &init_net))
return 1;
- hash = protocol & (MAX_INET_PROTOS - 1);
- ipprot = rcu_dereference(inet_protos[hash]);
-
- if (ipprot == NULL)
+ ipprot = rcu_dereference(inet_protos[protocol]);
+ if (ipprot == NULL) {
/* raw IP is OK */
return 1;
+ }
return ipprot->netns_ok;
}
@@ -553,15 +552,16 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
if (!inet_sk(sk)->inet_num && inet_autobind(sk))
return -EAGAIN;
- return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len);
+ return sk->sk_prot->connect(sk, uaddr, addr_len);
}
EXPORT_SYMBOL(inet_dgram_connect);
-static long inet_wait_for_connect(struct sock *sk, long timeo)
+static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
{
DEFINE_WAIT(wait);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ sk->sk_write_pending += writebias;
/* Basic assumption: if someone sets sk->sk_err, he _must_
* change state of the socket from TCP_SYN_*.
@@ -577,6 +577,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
finish_wait(sk_sleep(sk), &wait);
+ sk->sk_write_pending -= writebias;
return timeo;
}
@@ -584,8 +585,8 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
* Connect to a remote host. There is regrettably still a little
* TCP 'magic' in here.
*/
-int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
- int addr_len, int flags)
+int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags)
{
struct sock *sk = sock->sk;
int err;
@@ -594,8 +595,6 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
if (addr_len < sizeof(uaddr->sa_family))
return -EINVAL;
- lock_sock(sk);
-
if (uaddr->sa_family == AF_UNSPEC) {
err = sk->sk_prot->disconnect(sk, flags);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
@@ -635,8 +634,12 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
+ tcp_sk(sk)->fastopen_req &&
+ tcp_sk(sk)->fastopen_req->data ? 1 : 0;
+
/* Error code is set above */
- if (!timeo || !inet_wait_for_connect(sk, timeo))
+ if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
goto out;
err = sock_intr_errno(timeo);
@@ -658,7 +661,6 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
sock->state = SS_CONNECTED;
err = 0;
out:
- release_sock(sk);
return err;
sock_error:
@@ -668,6 +670,18 @@ sock_error:
sock->state = SS_DISCONNECTING;
goto out;
}
+EXPORT_SYMBOL(__inet_stream_connect);
+
+int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags)
+{
+ int err;
+
+ lock_sock(sock->sk);
+ err = __inet_stream_connect(sock, uaddr, addr_len, flags);
+ release_sock(sock->sk);
+ return err;
+}
EXPORT_SYMBOL(inet_stream_connect);
/*
@@ -1216,8 +1230,8 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
static int inet_gso_send_check(struct sk_buff *skb)
{
- const struct iphdr *iph;
const struct net_protocol *ops;
+ const struct iphdr *iph;
int proto;
int ihl;
int err = -EINVAL;
@@ -1236,7 +1250,7 @@ static int inet_gso_send_check(struct sk_buff *skb)
__skb_pull(skb, ihl);
skb_reset_transport_header(skb);
iph = ip_hdr(skb);
- proto = iph->protocol & (MAX_INET_PROTOS - 1);
+ proto = iph->protocol;
err = -EPROTONOSUPPORT;
rcu_read_lock();
@@ -1253,8 +1267,8 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
- struct iphdr *iph;
const struct net_protocol *ops;
+ struct iphdr *iph;
int proto;
int ihl;
int id;
@@ -1286,7 +1300,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
skb_reset_transport_header(skb);
iph = ip_hdr(skb);
id = ntohs(iph->id);
- proto = iph->protocol & (MAX_INET_PROTOS - 1);
+ proto = iph->protocol;
segs = ERR_PTR(-EPROTONOSUPPORT);
rcu_read_lock();
@@ -1340,7 +1354,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
goto out;
}
- proto = iph->protocol & (MAX_INET_PROTOS - 1);
+ proto = iph->protocol;
rcu_read_lock();
ops = rcu_dereference(inet_protos[proto]);
@@ -1398,11 +1412,11 @@ out:
static int inet_gro_complete(struct sk_buff *skb)
{
- const struct net_protocol *ops;
+ __be16 newlen = htons(skb->len - skb_network_offset(skb));
struct iphdr *iph = ip_hdr(skb);
- int proto = iph->protocol & (MAX_INET_PROTOS - 1);
+ const struct net_protocol *ops;
+ int proto = iph->protocol;
int err = -ENOSYS;
- __be16 newlen = htons(skb->len - skb_network_offset(skb));
csum_replace2(&iph->check, iph->tot_len, newlen);
iph->tot_len = newlen;
@@ -1520,14 +1534,15 @@ static const struct net_protocol igmp_protocol = {
#endif
static const struct net_protocol tcp_protocol = {
- .handler = tcp_v4_rcv,
- .err_handler = tcp_v4_err,
- .gso_send_check = tcp_v4_gso_send_check,
- .gso_segment = tcp_tso_segment,
- .gro_receive = tcp4_gro_receive,
- .gro_complete = tcp4_gro_complete,
- .no_policy = 1,
- .netns_ok = 1,
+ .early_demux = tcp_v4_early_demux,
+ .handler = tcp_v4_rcv,
+ .err_handler = tcp_v4_err,
+ .gso_send_check = tcp_v4_gso_send_check,
+ .gso_segment = tcp_tso_segment,
+ .gro_receive = tcp4_gro_receive,
+ .gro_complete = tcp4_gro_complete,
+ .no_policy = 1,
+ .netns_ok = 1,
};
static const struct net_protocol udp_protocol = {
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index e8f2617ecd4..a0d8392491c 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -398,16 +398,25 @@ static void ah4_err(struct sk_buff *skb, u32 info)
struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
struct xfrm_state *x;
- if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
- icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+ switch (icmp_hdr(skb)->type) {
+ case ICMP_DEST_UNREACH:
+ if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+ return;
+ case ICMP_REDIRECT:
+ break;
+ default:
return;
+ }
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
ah->spi, IPPROTO_AH, AF_INET);
if (!x)
return;
- pr_debug("pmtu discovery on SA AH/%08x/%08x\n",
- ntohl(ah->spi), ntohl(iph->daddr));
+
+ if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+ ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
+ else
+ ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index cda37be02f8..77e87aff419 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -475,8 +475,7 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
return 1;
}
- paddr = skb_rtable(skb)->rt_gateway;
-
+ paddr = rt_nexthop(skb_rtable(skb), ip_hdr(skb)->daddr);
if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr,
paddr, dev))
return 0;
@@ -790,7 +789,8 @@ static int arp_process(struct sk_buff *skb)
* Check for bad requests for 127.x.x.x and requests for multicast
* addresses. If this is one such, delete it.
*/
- if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
+ if (ipv4_is_multicast(tip) ||
+ (!IN_DEV_ROUTE_LOCALNET(in_dev) && ipv4_is_loopback(tip)))
goto out;
/*
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 10e15a144e9..44bf82e3aef 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1500,7 +1500,8 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
if (cnf == net->ipv4.devconf_dflt)
devinet_copy_dflt_conf(net, i);
- if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
+ if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
+ i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
if ((new_value == 0) && (old_value != 0))
rt_cache_flush(net, 0);
}
@@ -1617,6 +1618,8 @@ static struct devinet_sysctl_table {
"force_igmp_version"),
DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
"promote_secondaries"),
+ DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
+ "route_localnet"),
},
};
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index cb982a61536..b61e9deb7c7 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -484,16 +484,25 @@ static void esp4_err(struct sk_buff *skb, u32 info)
struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
struct xfrm_state *x;
- if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
- icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+ switch (icmp_hdr(skb)->type) {
+ case ICMP_DEST_UNREACH:
+ if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+ return;
+ case ICMP_REDIRECT:
+ break;
+ default:
return;
+ }
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
esph->spi, IPPROTO_ESP, AF_INET);
if (!x)
return;
- NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
- ntohl(esph->spi), ntohl(iph->daddr));
+
+ if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+ ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
+ else
+ ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 3854411fa37..c43ae3fba79 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -31,6 +31,7 @@
#include <linux/if_addr.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
+#include <linux/cache.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
@@ -85,6 +86,24 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
tb = fib_trie_table(id);
if (!tb)
return NULL;
+
+ switch (id) {
+ case RT_TABLE_LOCAL:
+ net->ipv4.fib_local = tb;
+ break;
+
+ case RT_TABLE_MAIN:
+ net->ipv4.fib_main = tb;
+ break;
+
+ case RT_TABLE_DEFAULT:
+ net->ipv4.fib_default = tb;
+ break;
+
+ default:
+ break;
+ }
+
h = id & (FIB_TABLE_HASHSZ - 1);
hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
return tb;
@@ -150,10 +169,6 @@ static inline unsigned int __inet_dev_addr_type(struct net *net,
if (ipv4_is_multicast(addr))
return RTN_MULTICAST;
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- res.r = NULL;
-#endif
-
local_table = fib_get_table(net, RT_TABLE_LOCAL);
if (local_table) {
ret = RTN_UNICAST;
@@ -180,6 +195,44 @@ unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
}
EXPORT_SYMBOL(inet_dev_addr_type);
+__be32 fib_compute_spec_dst(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct in_device *in_dev;
+ struct fib_result res;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct net *net;
+ int scope;
+
+ rt = skb_rtable(skb);
+ if ((rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | RTCF_LOCAL)) ==
+ RTCF_LOCAL)
+ return ip_hdr(skb)->daddr;
+
+ in_dev = __in_dev_get_rcu(dev);
+ BUG_ON(!in_dev);
+
+ net = dev_net(dev);
+
+ scope = RT_SCOPE_UNIVERSE;
+ if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
+ fl4.flowi4_oif = 0;
+ fl4.flowi4_iif = net->loopback_dev->ifindex;
+ fl4.daddr = ip_hdr(skb)->saddr;
+ fl4.saddr = 0;
+ fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
+ fl4.flowi4_scope = scope;
+ fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
+ if (!fib_lookup(net, &fl4, &res))
+ return FIB_RES_PREFSRC(net, res);
+ } else {
+ scope = RT_SCOPE_LINK;
+ }
+
+ return inet_select_addr(dev, ip_hdr(skb)->saddr, scope);
+}
+
/* Given (packet source, input interface) and optional (dst, oif, tos):
* - (main) check, that source is valid i.e. not broadcast or our local
* address.
@@ -188,17 +241,15 @@ EXPORT_SYMBOL(inet_dev_addr_type);
* - check, that packet arrived from expected physical interface.
* called with rcu_read_lock()
*/
-int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
- int oif, struct net_device *dev, __be32 *spec_dst,
- u32 *itag)
+static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
+ u8 tos, int oif, struct net_device *dev,
+ int rpf, struct in_device *idev, u32 *itag)
{
- struct in_device *in_dev;
- struct flowi4 fl4;
+ int ret, no_addr, accept_local;
struct fib_result res;
- int no_addr, rpf, accept_local;
- bool dev_match;
- int ret;
+ struct flowi4 fl4;
struct net *net;
+ bool dev_match;
fl4.flowi4_oif = 0;
fl4.flowi4_iif = oif;
@@ -207,20 +258,10 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
fl4.flowi4_tos = tos;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
- no_addr = rpf = accept_local = 0;
- in_dev = __in_dev_get_rcu(dev);
- if (in_dev) {
- no_addr = in_dev->ifa_list == NULL;
-
- /* Ignore rp_filter for packets protected by IPsec. */
- rpf = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(in_dev);
-
- accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
- fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
- }
+ no_addr = idev->ifa_list == NULL;
- if (in_dev == NULL)
- goto e_inval;
+ accept_local = IN_DEV_ACCEPT_LOCAL(idev);
+ fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
net = dev_net(dev);
if (fib_lookup(net, &fl4, &res))
@@ -229,7 +270,6 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
if (res.type != RTN_LOCAL || !accept_local)
goto e_inval;
}
- *spec_dst = FIB_RES_PREFSRC(net, res);
fib_combine_itag(itag, &res);
dev_match = false;
@@ -258,17 +298,14 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
ret = 0;
if (fib_lookup(net, &fl4, &res) == 0) {
- if (res.type == RTN_UNICAST) {
- *spec_dst = FIB_RES_PREFSRC(net, res);
+ if (res.type == RTN_UNICAST)
ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
- }
}
return ret;
last_resort:
if (rpf)
goto e_rpf;
- *spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
*itag = 0;
return 0;
@@ -278,6 +315,20 @@ e_rpf:
return -EXDEV;
}
+/* Ignore rp_filter for packets protected by IPsec. */
+int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
+ u8 tos, int oif, struct net_device *dev,
+ struct in_device *idev, u32 *itag)
+{
+ int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
+
+ if (!r && !fib_num_tclassid_users(dev_net(dev))) {
+ *itag = 0;
+ return 0;
+ }
+ return __fib_validate_source(skb, src, dst, tos, oif, dev, r, idev, itag);
+}
+
static inline __be32 sk_extract_addr(struct sockaddr *addr)
{
return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
@@ -879,10 +930,6 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
.flowi4_scope = frn->fl_scope,
};
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- res.r = NULL;
-#endif
-
frn->err = -ENOENT;
if (tb) {
local_bh_disable();
@@ -935,8 +982,11 @@ static void nl_fib_input(struct sk_buff *skb)
static int __net_init nl_fib_lookup_init(struct net *net)
{
struct sock *sk;
- sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0,
- nl_fib_input, NULL, THIS_MODULE);
+ struct netlink_kernel_cfg cfg = {
+ .input = nl_fib_input,
+ };
+
+ sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, THIS_MODULE, &cfg);
if (sk == NULL)
return -EAFNOSUPPORT;
net->ipv4.fibnl = sk;
@@ -996,6 +1046,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
if (event == NETDEV_UNREGISTER) {
fib_disable_ip(dev, 2, -1);
+ rt_flush_dev(dev);
return NOTIFY_DONE;
}
@@ -1021,11 +1072,6 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
rt_cache_flush(dev_net(dev), 0);
break;
case NETDEV_UNREGISTER_BATCH:
- /* The batch unregister is only called on the first
- * device in the list of devices being unregistered.
- * Therefore we should not pass dev_net(dev) in here.
- */
- rt_cache_flush_batch(NULL);
break;
}
return NOTIFY_DONE;
@@ -1090,6 +1136,9 @@ static int __net_init fib_net_init(struct net *net)
{
int error;
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ net->ipv4.fib_num_tclassid_users = 0;
+#endif
error = ip_fib_net_init(net);
if (error < 0)
goto out;
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 2d043f71ef7..a83d74e498d 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -47,14 +47,7 @@ struct fib4_rule {
#endif
};
-#ifdef CONFIG_IP_ROUTE_CLASSID
-u32 fib_rules_tclass(const struct fib_result *res)
-{
- return res->r ? ((struct fib4_rule *) res->r)->tclassid : 0;
-}
-#endif
-
-int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
+int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
{
struct fib_lookup_arg arg = {
.result = res,
@@ -63,11 +56,15 @@ int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
int err;
err = fib_rules_lookup(net->ipv4.rules_ops, flowi4_to_flowi(flp), 0, &arg);
- res->r = arg.rule;
-
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ if (arg.rule)
+ res->tclassid = ((struct fib4_rule *)arg.rule)->tclassid;
+ else
+ res->tclassid = 0;
+#endif
return err;
}
-EXPORT_SYMBOL_GPL(fib_lookup);
+EXPORT_SYMBOL_GPL(__fib_lookup);
static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
int flags, struct fib_lookup_arg *arg)
@@ -169,8 +166,11 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
rule4->dst = nla_get_be32(tb[FRA_DST]);
#ifdef CONFIG_IP_ROUTE_CLASSID
- if (tb[FRA_FLOW])
+ if (tb[FRA_FLOW]) {
rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
+ if (rule4->tclassid)
+ net->ipv4.fib_num_tclassid_users++;
+ }
#endif
rule4->src_len = frh->src_len;
@@ -179,11 +179,24 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
rule4->dstmask = inet_make_mask(rule4->dst_len);
rule4->tos = frh->tos;
+ net->ipv4.fib_has_custom_rules = true;
err = 0;
errout:
return err;
}
+static void fib4_rule_delete(struct fib_rule *rule)
+{
+ struct net *net = rule->fr_net;
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ struct fib4_rule *rule4 = (struct fib4_rule *) rule;
+
+ if (rule4->tclassid)
+ net->ipv4.fib_num_tclassid_users--;
+#endif
+ net->ipv4.fib_has_custom_rules = true;
+}
+
static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
struct nlattr **tb)
{
@@ -256,6 +269,7 @@ static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = {
.action = fib4_rule_action,
.match = fib4_rule_match,
.configure = fib4_rule_configure,
+ .delete = fib4_rule_delete,
.compare = fib4_rule_compare,
.fill = fib4_rule_fill,
.default_pref = fib_default_rule_pref,
@@ -295,6 +309,7 @@ int __net_init fib4_rules_init(struct net *net)
if (err < 0)
goto fail;
net->ipv4.rules_ops = ops;
+ net->ipv4.fib_has_custom_rules = false;
return 0;
fail:
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index e5b7182fa09..da80dc14cc7 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -140,6 +140,62 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
},
};
+static void rt_fibinfo_free(struct rtable __rcu **rtp)
+{
+ struct rtable *rt = rcu_dereference_protected(*rtp, 1);
+
+ if (!rt)
+ return;
+
+ /* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
+ * because we waited an RCU grace period before calling
+ * free_fib_info_rcu()
+ */
+
+ dst_free(&rt->dst);
+}
+
+static void free_nh_exceptions(struct fib_nh *nh)
+{
+ struct fnhe_hash_bucket *hash = nh->nh_exceptions;
+ int i;
+
+ for (i = 0; i < FNHE_HASH_SIZE; i++) {
+ struct fib_nh_exception *fnhe;
+
+ fnhe = rcu_dereference_protected(hash[i].chain, 1);
+ while (fnhe) {
+ struct fib_nh_exception *next;
+
+ next = rcu_dereference_protected(fnhe->fnhe_next, 1);
+
+ rt_fibinfo_free(&fnhe->fnhe_rth);
+
+ kfree(fnhe);
+
+ fnhe = next;
+ }
+ }
+ kfree(hash);
+}
+
+static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
+{
+ int cpu;
+
+ if (!rtp)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ struct rtable *rt;
+
+ rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
+ if (rt)
+ dst_free(&rt->dst);
+ }
+ free_percpu(rtp);
+}
+
/* Release a nexthop info record */
static void free_fib_info_rcu(struct rcu_head *head)
{
@@ -148,6 +204,10 @@ static void free_fib_info_rcu(struct rcu_head *head)
change_nexthops(fi) {
if (nexthop_nh->nh_dev)
dev_put(nexthop_nh->nh_dev);
+ if (nexthop_nh->nh_exceptions)
+ free_nh_exceptions(nexthop_nh);
+ rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
+ rt_fibinfo_free(&nexthop_nh->nh_rth_input);
} endfor_nexthops(fi);
release_net(fi->fib_net);
@@ -163,6 +223,12 @@ void free_fib_info(struct fib_info *fi)
return;
}
fib_info_cnt--;
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ change_nexthops(fi) {
+ if (nexthop_nh->nh_tclassid)
+ fi->fib_net->ipv4.fib_num_tclassid_users--;
+ } endfor_nexthops(fi);
+#endif
call_rcu(&fi->rcu, free_fib_info_rcu);
}
@@ -421,6 +487,8 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
+ if (nexthop_nh->nh_tclassid)
+ fi->fib_net->ipv4.fib_num_tclassid_users++;
#endif
}
@@ -769,6 +837,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
fi->fib_nhs = nhs;
change_nexthops(fi) {
nexthop_nh->nh_parent = fi;
+ nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
} endfor_nexthops(fi)
if (cfg->fc_mx) {
@@ -779,9 +848,16 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
int type = nla_type(nla);
if (type) {
+ u32 val;
+
if (type > RTAX_MAX)
goto err_inval;
- fi->fib_metrics[type - 1] = nla_get_u32(nla);
+ val = nla_get_u32(nla);
+ if (type == RTAX_ADVMSS && val > 65535 - 40)
+ val = 65535 - 40;
+ if (type == RTAX_MTU && val > 65535 - 15)
+ val = 65535 - 15;
+ fi->fib_metrics[type - 1] = val;
}
}
}
@@ -810,6 +886,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
nh->nh_flags = cfg->fc_flags;
#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid = cfg->fc_flow;
+ if (nh->nh_tclassid)
+ fi->fib_net->ipv4.fib_num_tclassid_users++;
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->nh_weight = 1;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 30b88d7b4bd..f0cdb30921c 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -159,7 +159,6 @@ struct trie {
#endif
};
-static void put_child(struct trie *t, struct tnode *tn, int i, struct rt_trie_node *n);
static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
int wasfull);
static struct rt_trie_node *resize(struct trie *t, struct tnode *tn);
@@ -473,7 +472,7 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
}
pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
- sizeof(struct rt_trie_node) << bits);
+ sizeof(struct rt_trie_node *) << bits);
return tn;
}
@@ -490,7 +489,7 @@ static inline int tnode_full(const struct tnode *tn, const struct rt_trie_node *
return ((struct tnode *) n)->pos == tn->pos + tn->bits;
}
-static inline void put_child(struct trie *t, struct tnode *tn, int i,
+static inline void put_child(struct tnode *tn, int i,
struct rt_trie_node *n)
{
tnode_put_child_reorg(tn, i, n, -1);
@@ -754,8 +753,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
goto nomem;
}
- put_child(t, tn, 2*i, (struct rt_trie_node *) left);
- put_child(t, tn, 2*i+1, (struct rt_trie_node *) right);
+ put_child(tn, 2*i, (struct rt_trie_node *) left);
+ put_child(tn, 2*i+1, (struct rt_trie_node *) right);
}
}
@@ -776,9 +775,9 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
if (tkey_extract_bits(node->key,
oldtnode->pos + oldtnode->bits,
1) == 0)
- put_child(t, tn, 2*i, node);
+ put_child(tn, 2*i, node);
else
- put_child(t, tn, 2*i+1, node);
+ put_child(tn, 2*i+1, node);
continue;
}
@@ -786,8 +785,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
inode = (struct tnode *) node;
if (inode->bits == 1) {
- put_child(t, tn, 2*i, rtnl_dereference(inode->child[0]));
- put_child(t, tn, 2*i+1, rtnl_dereference(inode->child[1]));
+ put_child(tn, 2*i, rtnl_dereference(inode->child[0]));
+ put_child(tn, 2*i+1, rtnl_dereference(inode->child[1]));
tnode_free_safe(inode);
continue;
@@ -817,22 +816,22 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
*/
left = (struct tnode *) tnode_get_child(tn, 2*i);
- put_child(t, tn, 2*i, NULL);
+ put_child(tn, 2*i, NULL);
BUG_ON(!left);
right = (struct tnode *) tnode_get_child(tn, 2*i+1);
- put_child(t, tn, 2*i+1, NULL);
+ put_child(tn, 2*i+1, NULL);
BUG_ON(!right);
size = tnode_child_length(left);
for (j = 0; j < size; j++) {
- put_child(t, left, j, rtnl_dereference(inode->child[j]));
- put_child(t, right, j, rtnl_dereference(inode->child[j + size]));
+ put_child(left, j, rtnl_dereference(inode->child[j]));
+ put_child(right, j, rtnl_dereference(inode->child[j + size]));
}
- put_child(t, tn, 2*i, resize(t, left));
- put_child(t, tn, 2*i+1, resize(t, right));
+ put_child(tn, 2*i, resize(t, left));
+ put_child(tn, 2*i+1, resize(t, right));
tnode_free_safe(inode);
}
@@ -877,7 +876,7 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
if (!newn)
goto nomem;
- put_child(t, tn, i/2, (struct rt_trie_node *)newn);
+ put_child(tn, i/2, (struct rt_trie_node *)newn);
}
}
@@ -892,21 +891,21 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
if (left == NULL) {
if (right == NULL) /* Both are empty */
continue;
- put_child(t, tn, i/2, right);
+ put_child(tn, i/2, right);
continue;
}
if (right == NULL) {
- put_child(t, tn, i/2, left);
+ put_child(tn, i/2, left);
continue;
}
/* Two nonempty children */
newBinNode = (struct tnode *) tnode_get_child(tn, i/2);
- put_child(t, tn, i/2, NULL);
- put_child(t, newBinNode, 0, left);
- put_child(t, newBinNode, 1, right);
- put_child(t, tn, i/2, resize(t, newBinNode));
+ put_child(tn, i/2, NULL);
+ put_child(newBinNode, 0, left);
+ put_child(newBinNode, 1, right);
+ put_child(tn, i/2, resize(t, newBinNode));
}
tnode_free_safe(oldtnode);
return tn;
@@ -1007,9 +1006,9 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) {
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
- tn = (struct tnode *) resize(t, (struct tnode *)tn);
+ tn = (struct tnode *)resize(t, tn);
- tnode_put_child_reorg((struct tnode *)tp, cindex,
+ tnode_put_child_reorg(tp, cindex,
(struct rt_trie_node *)tn, wasfull);
tp = node_parent((struct rt_trie_node *) tn);
@@ -1024,7 +1023,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
/* Handle last (top) tnode */
if (IS_TNODE(tn))
- tn = (struct tnode *)resize(t, (struct tnode *)tn);
+ tn = (struct tnode *)resize(t, tn);
rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
tnode_free_flush();
@@ -1125,7 +1124,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
node_set_parent((struct rt_trie_node *)l, tp);
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
- put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)l);
+ put_child(tp, cindex, (struct rt_trie_node *)l);
} else {
/* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
/*
@@ -1155,13 +1154,12 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
node_set_parent((struct rt_trie_node *)tn, tp);
missbit = tkey_extract_bits(key, newpos, 1);
- put_child(t, tn, missbit, (struct rt_trie_node *)l);
- put_child(t, tn, 1-missbit, n);
+ put_child(tn, missbit, (struct rt_trie_node *)l);
+ put_child(tn, 1-missbit, n);
if (tp) {
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
- put_child(t, (struct tnode *)tp, cindex,
- (struct rt_trie_node *)tn);
+ put_child(tp, cindex, (struct rt_trie_node *)tn);
} else {
rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
tp = tn;
@@ -1620,7 +1618,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l)
if (tp) {
t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits);
- put_child(t, (struct tnode *)tp, cindex, NULL);
+ put_child(tp, cindex, NULL);
trie_rebalance(t, tp);
} else
RCU_INIT_POINTER(t->trie, NULL);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index c75efbdc71c..f2eccd53174 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -95,6 +95,7 @@
#include <net/checksum.h>
#include <net/xfrm.h>
#include <net/inet_common.h>
+#include <net/ip_fib.h>
/*
* Build xmit assembly blocks
@@ -253,10 +254,10 @@ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
/* Limit if icmp type is enabled in ratemask. */
if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
- if (!rt->peer)
- rt_bind_peer(rt, fl4->daddr, 1);
- rc = inet_peer_xrlim_allow(rt->peer,
+ struct inet_peer *peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
+ rc = inet_peer_xrlim_allow(peer,
net->ipv4.sysctl_icmp_ratelimit);
+ inet_putpeer(peer);
}
out:
return rc;
@@ -334,7 +335,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
struct flowi4 fl4;
struct sock *sk;
struct inet_sock *inet;
- __be32 daddr;
+ __be32 daddr, saddr;
if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
return;
@@ -348,6 +349,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
inet->tos = ip_hdr(skb)->tos;
daddr = ipc.addr = ip_hdr(skb)->saddr;
+ saddr = fib_compute_spec_dst(skb);
ipc.opt = NULL;
ipc.tx_flags = 0;
if (icmp_param->replyopts.opt.opt.optlen) {
@@ -357,7 +359,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
}
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = daddr;
- fl4.saddr = rt->rt_spec_dst;
+ fl4.saddr = saddr;
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
fl4.flowi4_proto = IPPROTO_ICMP;
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
@@ -569,7 +571,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
rcu_read_lock();
if (rt_is_input_route(rt) &&
net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
- dev = dev_get_by_index_rcu(net, rt->rt_iif);
+ dev = dev_get_by_index_rcu(net, inet_iif(skb_in));
if (dev)
saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
@@ -632,6 +634,27 @@ out:;
EXPORT_SYMBOL(icmp_send);
+static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
+{
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ const struct net_protocol *ipprot;
+ int protocol = iph->protocol;
+
+ /* Checkin full IP header plus 8 bytes of protocol to
+ * avoid additional coding at protocol handlers.
+ */
+ if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
+ return;
+
+ raw_icmp_error(skb, protocol, info);
+
+ rcu_read_lock();
+ ipprot = rcu_dereference(inet_protos[protocol]);
+ if (ipprot && ipprot->err_handler)
+ ipprot->err_handler(skb, info);
+ rcu_read_unlock();
+}
+
/*
* Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, and ICMP_QUENCH.
*/
@@ -640,10 +663,8 @@ static void icmp_unreach(struct sk_buff *skb)
{
const struct iphdr *iph;
struct icmphdr *icmph;
- int hash, protocol;
- const struct net_protocol *ipprot;
- u32 info = 0;
struct net *net;
+ u32 info = 0;
net = dev_net(skb_dst(skb)->dev);
@@ -674,9 +695,7 @@ static void icmp_unreach(struct sk_buff *skb)
LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"),
&iph->daddr);
} else {
- info = ip_rt_frag_needed(net, iph,
- ntohs(icmph->un.frag.mtu),
- skb->dev);
+ info = ntohs(icmph->un.frag.mtu);
if (!info)
goto out;
}
@@ -720,26 +739,7 @@ static void icmp_unreach(struct sk_buff *skb)
goto out;
}
- /* Checkin full IP header plus 8 bytes of protocol to
- * avoid additional coding at protocol handlers.
- */
- if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
- goto out;
-
- iph = (const struct iphdr *)skb->data;
- protocol = iph->protocol;
-
- /*
- * Deliver ICMP message to raw sockets. Pretty useless feature?
- */
- raw_icmp_error(skb, protocol, info);
-
- hash = protocol & (MAX_INET_PROTOS - 1);
- rcu_read_lock();
- ipprot = rcu_dereference(inet_protos[hash]);
- if (ipprot && ipprot->err_handler)
- ipprot->err_handler(skb, info);
- rcu_read_unlock();
+ icmp_socket_deliver(skb, info);
out:
return;
@@ -755,46 +755,15 @@ out_err:
static void icmp_redirect(struct sk_buff *skb)
{
- const struct iphdr *iph;
-
- if (skb->len < sizeof(struct iphdr))
- goto out_err;
-
- /*
- * Get the copied header of the packet that caused the redirect
- */
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- goto out;
-
- iph = (const struct iphdr *)skb->data;
-
- switch (icmp_hdr(skb)->code & 7) {
- case ICMP_REDIR_NET:
- case ICMP_REDIR_NETTOS:
- /*
- * As per RFC recommendations now handle it as a host redirect.
- */
- case ICMP_REDIR_HOST:
- case ICMP_REDIR_HOSTTOS:
- ip_rt_redirect(ip_hdr(skb)->saddr, iph->daddr,
- icmp_hdr(skb)->un.gateway,
- iph->saddr, skb->dev);
- break;
+ if (skb->len < sizeof(struct iphdr)) {
+ ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
+ return;
}
- /* Ping wants to see redirects.
- * Let's pretend they are errors of sorts... */
- if (iph->protocol == IPPROTO_ICMP &&
- iph->ihl >= 5 &&
- pskb_may_pull(skb, (iph->ihl<<2)+8)) {
- ping_err(skb, icmp_hdr(skb)->un.gateway);
- }
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return;
-out:
- return;
-out_err:
- ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
- goto out;
+ icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway);
}
/*
@@ -868,86 +837,6 @@ out_err:
goto out;
}
-
-/*
- * Handle ICMP_ADDRESS_MASK requests. (RFC950)
- *
- * RFC1122 (3.2.2.9). A host MUST only send replies to
- * ADDRESS_MASK requests if it's been configured as an address mask
- * agent. Receiving a request doesn't constitute implicit permission to
- * act as one. Of course, implementing this correctly requires (SHOULD)
- * a way to turn the functionality on and off. Another one for sysctl(),
- * I guess. -- MS
- *
- * RFC1812 (4.3.3.9). A router MUST implement it.
- * A router SHOULD have switch turning it on/off.
- * This switch MUST be ON by default.
- *
- * Gratuitous replies, zero-source replies are not implemented,
- * that complies with RFC. DO NOT implement them!!! All the idea
- * of broadcast addrmask replies as specified in RFC950 is broken.
- * The problem is that it is not uncommon to have several prefixes
- * on one physical interface. Moreover, addrmask agent can even be
- * not aware of existing another prefixes.
- * If source is zero, addrmask agent cannot choose correct prefix.
- * Gratuitous mask announcements suffer from the same problem.
- * RFC1812 explains it, but still allows to use ADDRMASK,
- * that is pretty silly. --ANK
- *
- * All these rules are so bizarre, that I removed kernel addrmask
- * support at all. It is wrong, it is obsolete, nobody uses it in
- * any case. --ANK
- *
- * Furthermore you can do it with a usermode address agent program
- * anyway...
- */
-
-static void icmp_address(struct sk_buff *skb)
-{
-#if 0
- net_dbg_ratelimited("a guy asks for address mask. Who is it?\n");
-#endif
-}
-
-/*
- * RFC1812 (4.3.3.9). A router SHOULD listen all replies, and complain
- * loudly if an inconsistency is found.
- * called with rcu_read_lock()
- */
-
-static void icmp_address_reply(struct sk_buff *skb)
-{
- struct rtable *rt = skb_rtable(skb);
- struct net_device *dev = skb->dev;
- struct in_device *in_dev;
- struct in_ifaddr *ifa;
-
- if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC))
- return;
-
- in_dev = __in_dev_get_rcu(dev);
- if (!in_dev)
- return;
-
- if (in_dev->ifa_list &&
- IN_DEV_LOG_MARTIANS(in_dev) &&
- IN_DEV_FORWARD(in_dev)) {
- __be32 _mask, *mp;
-
- mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask);
- BUG_ON(mp == NULL);
- for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
- if (*mp == ifa->ifa_mask &&
- inet_ifa_match(ip_hdr(skb)->saddr, ifa))
- break;
- }
- if (!ifa)
- net_info_ratelimited("Wrong address mask %pI4 from %s/%pI4\n",
- mp,
- dev->name, &ip_hdr(skb)->saddr);
- }
-}
-
static void icmp_discard(struct sk_buff *skb)
{
}
@@ -1111,10 +1000,10 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
.handler = icmp_discard,
},
[ICMP_ADDRESS] = {
- .handler = icmp_address,
+ .handler = icmp_discard,
},
[ICMP_ADDRESSREPLY] = {
- .handler = icmp_address_reply,
+ .handler = icmp_discard,
},
};
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f9ee7417f6a..db0cf17c00f 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -374,18 +374,19 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
const struct inet_request_sock *ireq = inet_rsk(req);
struct ip_options_rcu *opt = inet_rsk(req)->opt;
struct net *net = sock_net(sk);
+ int flags = inet_sk_flowi_flags(sk);
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol,
- inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS,
+ flags,
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
security_req_classify_flow(req, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
goto no_route;
- if (opt && opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
+ if (opt && opt->opt.is_strictroute && rt->rt_gateway)
goto route_err;
return &rt->dst;
@@ -418,7 +419,7 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
goto no_route;
- if (opt && opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
+ if (opt && opt->opt.is_strictroute && rt->rt_gateway)
goto route_err;
return &rt->dst;
@@ -799,3 +800,49 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
}
EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
#endif
+
+static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
+{
+ const struct inet_sock *inet = inet_sk(sk);
+ const struct ip_options_rcu *inet_opt;
+ __be32 daddr = inet->inet_daddr;
+ struct flowi4 *fl4;
+ struct rtable *rt;
+
+ rcu_read_lock();
+ inet_opt = rcu_dereference(inet->inet_opt);
+ if (inet_opt && inet_opt->opt.srr)
+ daddr = inet_opt->opt.faddr;
+ fl4 = &fl->u.ip4;
+ rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
+ inet->inet_saddr, inet->inet_dport,
+ inet->inet_sport, sk->sk_protocol,
+ RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
+ if (IS_ERR(rt))
+ rt = NULL;
+ if (rt)
+ sk_setup_caps(sk, &rt->dst);
+ rcu_read_unlock();
+
+ return &rt->dst;
+}
+
+struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
+{
+ struct dst_entry *dst = __sk_dst_check(sk, 0);
+ struct inet_sock *inet = inet_sk(sk);
+
+ if (!dst) {
+ dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
+ if (!dst)
+ goto out;
+ }
+ dst->ops->update_pmtu(dst, sk, NULL, mtu);
+
+ dst = __sk_dst_check(sk, 0);
+ if (!dst)
+ dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
+out:
+ return dst;
+}
+EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 46d1e7199a8..570e61f9611 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -46,9 +46,6 @@ struct inet_diag_entry {
u16 userlocks;
};
-#define INET_DIAG_PUT(skb, attrtype, attrlen) \
- RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
-
static DEFINE_MUTEX(inet_diag_table_mutex);
static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
@@ -78,24 +75,22 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
const struct inet_sock *inet = inet_sk(sk);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
+ struct nlattr *attr;
void *info = NULL;
- struct inet_diag_meminfo *minfo = NULL;
- unsigned char *b = skb_tail_pointer(skb);
const struct inet_diag_handler *handler;
int ext = req->idiag_ext;
handler = inet_diag_table[req->sdiag_protocol];
BUG_ON(handler == NULL);
- nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
- nlh->nlmsg_flags = nlmsg_flags;
+ nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
+ nlmsg_flags);
+ if (!nlh)
+ return -EMSGSIZE;
- r = NLMSG_DATA(nlh);
+ r = nlmsg_data(nlh);
BUG_ON(sk->sk_state == TCP_TIME_WAIT);
- if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
- minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
-
r->idiag_family = sk->sk_family;
r->idiag_state = sk->sk_state;
r->idiag_timer = 0;
@@ -113,7 +108,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
* hence this needs to be included regardless of socket family.
*/
if (ext & (1 << (INET_DIAG_TOS - 1)))
- RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
+ if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
+ goto errout;
#if IS_ENABLED(CONFIG_IPV6)
if (r->idiag_family == AF_INET6) {
@@ -121,24 +117,31 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
*(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
*(struct in6_addr *)r->id.idiag_dst = np->daddr;
+
if (ext & (1 << (INET_DIAG_TCLASS - 1)))
- RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
+ if (nla_put_u8(skb, INET_DIAG_TCLASS, np->tclass) < 0)
+ goto errout;
}
#endif
r->idiag_uid = sock_i_uid(sk);
r->idiag_inode = sock_i_ino(sk);
- if (minfo) {
- minfo->idiag_rmem = sk_rmem_alloc_get(sk);
- minfo->idiag_wmem = sk->sk_wmem_queued;
- minfo->idiag_fmem = sk->sk_forward_alloc;
- minfo->idiag_tmem = sk_wmem_alloc_get(sk);
+ if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
+ struct inet_diag_meminfo minfo = {
+ .idiag_rmem = sk_rmem_alloc_get(sk),
+ .idiag_wmem = sk->sk_wmem_queued,
+ .idiag_fmem = sk->sk_forward_alloc,
+ .idiag_tmem = sk_wmem_alloc_get(sk),
+ };
+
+ if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
+ goto errout;
}
if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
- goto rtattr_failure;
+ goto errout;
if (icsk == NULL) {
handler->idiag_get_info(sk, r, NULL);
@@ -165,16 +168,20 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
}
#undef EXPIRES_IN_MS
- if (ext & (1 << (INET_DIAG_INFO - 1)))
- info = INET_DIAG_PUT(skb, INET_DIAG_INFO, sizeof(struct tcp_info));
-
- if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
- const size_t len = strlen(icsk->icsk_ca_ops->name);
+ if (ext & (1 << (INET_DIAG_INFO - 1))) {
+ attr = nla_reserve(skb, INET_DIAG_INFO,
+ sizeof(struct tcp_info));
+ if (!attr)
+ goto errout;
- strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
- icsk->icsk_ca_ops->name);
+ info = nla_data(attr);
}
+ if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops)
+ if (nla_put_string(skb, INET_DIAG_CONG,
+ icsk->icsk_ca_ops->name) < 0)
+ goto errout;
+
handler->idiag_get_info(sk, r, info);
if (sk->sk_state < TCP_TIME_WAIT &&
@@ -182,12 +189,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
icsk->icsk_ca_ops->get_info(sk, ext, skb);
out:
- nlh->nlmsg_len = skb_tail_pointer(skb) - b;
- return skb->len;
+ return nlmsg_end(skb, nlh);
-rtattr_failure:
-nlmsg_failure:
- nlmsg_trim(skb, b);
+errout:
+ nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
@@ -208,14 +213,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
{
long tmo;
struct inet_diag_msg *r;
- const unsigned char *previous_tail = skb_tail_pointer(skb);
- struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq,
- unlh->nlmsg_type, sizeof(*r));
+ struct nlmsghdr *nlh;
- r = NLMSG_DATA(nlh);
- BUG_ON(tw->tw_state != TCP_TIME_WAIT);
+ nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
+ nlmsg_flags);
+ if (!nlh)
+ return -EMSGSIZE;
- nlh->nlmsg_flags = nlmsg_flags;
+ r = nlmsg_data(nlh);
+ BUG_ON(tw->tw_state != TCP_TIME_WAIT);
tmo = tw->tw_ttd - jiffies;
if (tmo < 0)
@@ -245,11 +251,8 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
*(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
}
#endif
- nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail;
- return skb->len;
-nlmsg_failure:
- nlmsg_trim(skb, previous_tail);
- return -EMSGSIZE;
+
+ return nlmsg_end(skb, nlh);
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
@@ -269,16 +272,17 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
int err;
struct sock *sk;
struct sk_buff *rep;
+ struct net *net = sock_net(in_skb->sk);
err = -EINVAL;
if (req->sdiag_family == AF_INET) {
- sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
+ sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
req->id.idiag_dport, req->id.idiag_src[0],
req->id.idiag_sport, req->id.idiag_if);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (req->sdiag_family == AF_INET6) {
- sk = inet6_lookup(&init_net, hashinfo,
+ sk = inet6_lookup(net, hashinfo,
(struct in6_addr *)req->id.idiag_dst,
req->id.idiag_dport,
(struct in6_addr *)req->id.idiag_src,
@@ -298,23 +302,23 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
if (err)
goto out;
- err = -ENOMEM;
- rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
- sizeof(struct inet_diag_meminfo) +
- sizeof(struct tcp_info) + 64)),
- GFP_KERNEL);
- if (!rep)
+ rep = nlmsg_new(sizeof(struct inet_diag_msg) +
+ sizeof(struct inet_diag_meminfo) +
+ sizeof(struct tcp_info) + 64, GFP_KERNEL);
+ if (!rep) {
+ err = -ENOMEM;
goto out;
+ }
err = sk_diag_fill(sk, rep, req,
NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, nlh);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
- kfree_skb(rep);
+ nlmsg_free(rep);
goto out;
}
- err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+ err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
MSG_DONTWAIT);
if (err > 0)
err = 0;
@@ -592,15 +596,16 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct inet_sock *inet = inet_sk(sk);
- unsigned char *b = skb_tail_pointer(skb);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
long tmo;
- nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
- nlh->nlmsg_flags = NLM_F_MULTI;
- r = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
+ NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+ r = nlmsg_data(nlh);
r->idiag_family = sk->sk_family;
r->idiag_state = TCP_SYN_RECV;
r->idiag_timer = 1;
@@ -628,13 +633,8 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
*(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
}
#endif
- nlh->nlmsg_len = skb_tail_pointer(skb) - b;
-
- return skb->len;
-nlmsg_failure:
- nlmsg_trim(skb, b);
- return -1;
+ return nlmsg_end(skb, nlh);
}
static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
@@ -725,6 +725,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
{
int i, num;
int s_i, s_num;
+ struct net *net = sock_net(skb->sk);
s_i = cb->args[1];
s_num = num = cb->args[2];
@@ -744,6 +745,9 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
sk_nulls_for_each(sk, node, &ilb->head) {
struct inet_sock *inet = inet_sk(sk);
+ if (!net_eq(sock_net(sk), net))
+ continue;
+
if (num < s_num) {
num++;
continue;
@@ -814,6 +818,8 @@ skip_listen_ht:
sk_nulls_for_each(sk, node, &head->chain) {
struct inet_sock *inet = inet_sk(sk);
+ if (!net_eq(sock_net(sk), net))
+ continue;
if (num < s_num)
goto next_normal;
if (!(r->idiag_states & (1 << sk->sk_state)))
@@ -840,6 +846,8 @@ next_normal:
inet_twsk_for_each(tw, node,
&head->twchain) {
+ if (!net_eq(twsk_net(tw), net))
+ continue;
if (num < s_num)
goto next_dying;
@@ -892,7 +900,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (nlmsg_attrlen(cb->nlh, hdrlen))
bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
- return __inet_diag_dump(skb, cb, (struct inet_diag_req_v2 *)NLMSG_DATA(cb->nlh), bc);
+ return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
}
static inline int inet_diag_type2proto(int type)
@@ -909,7 +917,7 @@ static inline int inet_diag_type2proto(int type)
static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct inet_diag_req *rc = NLMSG_DATA(cb->nlh);
+ struct inet_diag_req *rc = nlmsg_data(cb->nlh);
struct inet_diag_req_v2 req;
struct nlattr *bc = NULL;
int hdrlen = sizeof(struct inet_diag_req);
@@ -929,7 +937,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *c
static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
const struct nlmsghdr *nlh)
{
- struct inet_diag_req *rc = NLMSG_DATA(nlh);
+ struct inet_diag_req *rc = nlmsg_data(nlh);
struct inet_diag_req_v2 req;
req.sdiag_family = rc->idiag_family;
@@ -944,6 +952,7 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
{
int hdrlen = sizeof(struct inet_diag_req);
+ struct net *net = sock_net(skb->sk);
if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
nlmsg_len(nlh) < hdrlen)
@@ -964,7 +973,7 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
struct netlink_dump_control c = {
.dump = inet_diag_dump_compat,
};
- return netlink_dump_start(sock_diag_nlsk, skb, nlh, &c);
+ return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
}
}
@@ -974,6 +983,7 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
{
int hdrlen = sizeof(struct inet_diag_req_v2);
+ struct net *net = sock_net(skb->sk);
if (nlmsg_len(h) < hdrlen)
return -EINVAL;
@@ -992,11 +1002,11 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
struct netlink_dump_control c = {
.dump = inet_diag_dump,
};
- return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
+ return netlink_dump_start(net->diag_nlsk, skb, h, &c);
}
}
- return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h));
+ return inet_diag_get_exact(skb, h, nlmsg_data(h));
}
static const struct sock_diag_handler inet_diag_handler = {
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 5ff2a51b6d0..85190e69297 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -243,12 +243,12 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
if (q == NULL)
return NULL;
+ q->net = nf;
f->constructor(q, arg);
atomic_add(f->qsize, &nf->mem);
setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
spin_lock_init(&q->lock);
atomic_set(&q->refcnt, 1);
- q->net = nf;
return q;
}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index dfba343b250..e1e0a4e8fd3 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -82,23 +82,39 @@ static const struct inet_peer peer_fake_node = {
.avl_height = 0
};
-struct inet_peer_base {
- struct inet_peer __rcu *root;
- seqlock_t lock;
- int total;
-};
+void inet_peer_base_init(struct inet_peer_base *bp)
+{
+ bp->root = peer_avl_empty_rcu;
+ seqlock_init(&bp->lock);
+ bp->flush_seq = ~0U;
+ bp->total = 0;
+}
+EXPORT_SYMBOL_GPL(inet_peer_base_init);
-static struct inet_peer_base v4_peers = {
- .root = peer_avl_empty_rcu,
- .lock = __SEQLOCK_UNLOCKED(v4_peers.lock),
- .total = 0,
-};
+static atomic_t v4_seq = ATOMIC_INIT(0);
+static atomic_t v6_seq = ATOMIC_INIT(0);
-static struct inet_peer_base v6_peers = {
- .root = peer_avl_empty_rcu,
- .lock = __SEQLOCK_UNLOCKED(v6_peers.lock),
- .total = 0,
-};
+static atomic_t *inetpeer_seq_ptr(int family)
+{
+ return (family == AF_INET ? &v4_seq : &v6_seq);
+}
+
+static inline void flush_check(struct inet_peer_base *base, int family)
+{
+ atomic_t *fp = inetpeer_seq_ptr(family);
+
+ if (unlikely(base->flush_seq != atomic_read(fp))) {
+ inetpeer_invalidate_tree(base);
+ base->flush_seq = atomic_read(fp);
+ }
+}
+
+void inetpeer_invalidate_family(int family)
+{
+ atomic_t *fp = inetpeer_seq_ptr(family);
+
+ atomic_inc(fp);
+}
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
@@ -110,7 +126,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
static void inetpeer_gc_worker(struct work_struct *work)
{
- struct inet_peer *p, *n;
+ struct inet_peer *p, *n, *c;
LIST_HEAD(list);
spin_lock_bh(&gc_lock);
@@ -122,17 +138,19 @@ static void inetpeer_gc_worker(struct work_struct *work)
list_for_each_entry_safe(p, n, &list, gc_list) {
- if(need_resched())
+ if (need_resched())
cond_resched();
- if (p->avl_left != peer_avl_empty) {
- list_add_tail(&p->avl_left->gc_list, &list);
- p->avl_left = peer_avl_empty;
+ c = rcu_dereference_protected(p->avl_left, 1);
+ if (c != peer_avl_empty) {
+ list_add_tail(&c->gc_list, &list);
+ p->avl_left = peer_avl_empty_rcu;
}
- if (p->avl_right != peer_avl_empty) {
- list_add_tail(&p->avl_right->gc_list, &list);
- p->avl_right = peer_avl_empty;
+ c = rcu_dereference_protected(p->avl_right, 1);
+ if (c != peer_avl_empty) {
+ list_add_tail(&c->gc_list, &list);
+ p->avl_right = peer_avl_empty_rcu;
}
n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
@@ -401,11 +419,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
call_rcu(&p->rcu, inetpeer_free_rcu);
}
-static struct inet_peer_base *family_to_base(int family)
-{
- return family == AF_INET ? &v4_peers : &v6_peers;
-}
-
/* perform garbage collect on all items stacked during a lookup */
static int inet_peer_gc(struct inet_peer_base *base,
struct inet_peer __rcu **stack[PEER_MAXDEPTH],
@@ -443,14 +456,17 @@ static int inet_peer_gc(struct inet_peer_base *base,
return cnt;
}
-struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
+struct inet_peer *inet_getpeer(struct inet_peer_base *base,
+ const struct inetpeer_addr *daddr,
+ int create)
{
struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
- struct inet_peer_base *base = family_to_base(daddr->family);
struct inet_peer *p;
unsigned int sequence;
int invalidated, gccnt = 0;
+ flush_check(base, daddr->family);
+
/* Attempt a lockless lookup first.
* Because of a concurrent writer, we might not find an existing entry.
*/
@@ -492,13 +508,9 @@ relookup:
(daddr->family == AF_INET) ?
secure_ip_id(daddr->addr.a4) :
secure_ipv6_id(daddr->addr.a6));
- p->tcp_ts_stamp = 0;
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p->rate_tokens = 0;
p->rate_last = 0;
- p->pmtu_expires = 0;
- p->pmtu_orig = 0;
- memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
INIT_LIST_HEAD(&p->gc_list);
/* Link the node. */
@@ -571,26 +583,19 @@ static void inetpeer_inval_rcu(struct rcu_head *head)
schedule_delayed_work(&gc_work, gc_delay);
}
-void inetpeer_invalidate_tree(int family)
+void inetpeer_invalidate_tree(struct inet_peer_base *base)
{
- struct inet_peer *old, *new, *prev;
- struct inet_peer_base *base = family_to_base(family);
+ struct inet_peer *root;
write_seqlock_bh(&base->lock);
- old = base->root;
- if (old == peer_avl_empty_rcu)
- goto out;
-
- new = peer_avl_empty_rcu;
-
- prev = cmpxchg(&base->root, old, new);
- if (prev == old) {
+ root = rcu_deref_locked(base->root, base);
+ if (root != peer_avl_empty) {
+ base->root = peer_avl_empty_rcu;
base->total = 0;
- call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
+ call_rcu(&root->gc_rcu, inetpeer_inval_rcu);
}
-out:
write_sequnlock_bh(&base->lock);
}
EXPORT_SYMBOL(inetpeer_invalidate_tree);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 9dbd3dd6022..8d07c973409 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -171,6 +171,10 @@ static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
static void ip4_frag_init(struct inet_frag_queue *q, void *a)
{
struct ipq *qp = container_of(q, struct ipq, q);
+ struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4,
+ frags);
+ struct net *net = container_of(ipv4, struct net, ipv4);
+
struct ip4_create_arg *arg = a;
qp->protocol = arg->iph->protocol;
@@ -180,7 +184,7 @@ static void ip4_frag_init(struct inet_frag_queue *q, void *a)
qp->daddr = arg->iph->daddr;
qp->user = arg->user;
qp->peer = sysctl_ipfrag_max_dist ?
- inet_getpeer_v4(arg->iph->saddr, 1) : NULL;
+ inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL;
}
static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f49047b7960..b062a98574f 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -516,9 +516,6 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
case ICMP_PORT_UNREACH:
/* Impossible event. */
return;
- case ICMP_FRAG_NEEDED:
- /* Soft state for pmtu is maintained by IP core. */
- return;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -531,6 +528,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
if (code != ICMP_EXC_TTL)
return;
break;
+
+ case ICMP_REDIRECT:
+ break;
}
rcu_read_lock();
@@ -538,7 +538,20 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
flags & GRE_KEY ?
*(((__be32 *)p) + (grehlen / 4) - 1) : 0,
p[1]);
- if (t == NULL || t->parms.iph.daddr == 0 ||
+ if (t == NULL)
+ goto out;
+
+ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+ ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+ t->parms.link, 0, IPPROTO_GRE, 0);
+ goto out;
+ }
+ if (type == ICMP_REDIRECT) {
+ ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
+ IPPROTO_GRE, 0);
+ goto out;
+ }
+ if (t->parms.iph.daddr == 0 ||
ipv4_is_multicast(t->parms.iph.daddr))
goto out;
@@ -753,7 +766,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
if (skb->protocol == htons(ETH_P_IP)) {
rt = skb_rtable(skb);
- dst = rt->rt_gateway;
+ dst = rt_nexthop(rt, old_iph->daddr);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6)) {
@@ -820,7 +833,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
df |= (old_iph->frag_off&htons(IP_DF));
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 8590144ca33..f1395a6fb35 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -198,14 +198,13 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
rcu_read_lock();
{
int protocol = ip_hdr(skb)->protocol;
- int hash, raw;
const struct net_protocol *ipprot;
+ int raw;
resubmit:
raw = raw_local_deliver(skb, protocol);
- hash = protocol & (MAX_INET_PROTOS - 1);
- ipprot = rcu_dereference(inet_protos[hash]);
+ ipprot = rcu_dereference(inet_protos[protocol]);
if (ipprot != NULL) {
int ret;
@@ -314,26 +313,35 @@ drop:
return true;
}
+int sysctl_ip_early_demux __read_mostly = 1;
+EXPORT_SYMBOL(sysctl_ip_early_demux);
+
static int ip_rcv_finish(struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
struct rtable *rt;
+ if (sysctl_ip_early_demux && !skb_dst(skb)) {
+ const struct net_protocol *ipprot;
+ int protocol = iph->protocol;
+
+ ipprot = rcu_dereference(inet_protos[protocol]);
+ if (ipprot && ipprot->early_demux) {
+ ipprot->early_demux(skb);
+ /* must reload iph, skb->head might have changed */
+ iph = ip_hdr(skb);
+ }
+ }
+
/*
* Initialise the virtual path cache for the packet. It describes
* how the packet travels inside Linux networking.
*/
- if (skb_dst(skb) == NULL) {
+ if (!skb_dst(skb)) {
int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
iph->tos, skb->dev);
if (unlikely(err)) {
- if (err == -EHOSTUNREACH)
- IP_INC_STATS_BH(dev_net(skb->dev),
- IPSTATS_MIB_INADDRERRORS);
- else if (err == -ENETUNREACH)
- IP_INC_STATS_BH(dev_net(skb->dev),
- IPSTATS_MIB_INNOROUTES);
- else if (err == -EXDEV)
+ if (err == -EXDEV)
NET_INC_STATS_BH(dev_net(skb->dev),
LINUX_MIB_IPRPFILTER);
goto drop;
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 708b99494e2..1dc01f9793d 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -27,6 +27,7 @@
#include <net/icmp.h>
#include <net/route.h>
#include <net/cipso_ipv4.h>
+#include <net/ip_fib.h>
/*
* Write options to IP header, record destination address to
@@ -92,7 +93,6 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
unsigned char *sptr, *dptr;
int soffset, doffset;
int optlen;
- __be32 daddr;
memset(dopt, 0, sizeof(struct ip_options));
@@ -104,8 +104,6 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
sptr = skb_network_header(skb);
dptr = dopt->__data;
- daddr = skb_rtable(skb)->rt_spec_dst;
-
if (sopt->rr) {
optlen = sptr[sopt->rr+1];
soffset = sptr[sopt->rr+2];
@@ -179,6 +177,8 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
doffset -= 4;
}
if (doffset > 3) {
+ __be32 daddr = fib_compute_spec_dst(skb);
+
memcpy(&start[doffset-1], &daddr, 4);
dopt->faddr = faddr;
dptr[0] = start[0];
@@ -241,6 +241,15 @@ void ip_options_fragment(struct sk_buff *skb)
opt->ts_needtime = 0;
}
+/* helper used by ip_options_compile() to call fib_compute_spec_dst()
+ * at most one time.
+ */
+static void spec_dst_fill(__be32 *spec_dst, struct sk_buff *skb)
+{
+ if (*spec_dst == htonl(INADDR_ANY))
+ *spec_dst = fib_compute_spec_dst(skb);
+}
+
/*
* Verify options and fill pointers in struct options.
* Caller should clear *opt, and set opt->data.
@@ -250,12 +259,12 @@ void ip_options_fragment(struct sk_buff *skb)
int ip_options_compile(struct net *net,
struct ip_options *opt, struct sk_buff *skb)
{
- int l;
- unsigned char *iph;
- unsigned char *optptr;
- int optlen;
+ __be32 spec_dst = htonl(INADDR_ANY);
unsigned char *pp_ptr = NULL;
struct rtable *rt = NULL;
+ unsigned char *optptr;
+ unsigned char *iph;
+ int optlen, l;
if (skb != NULL) {
rt = skb_rtable(skb);
@@ -331,7 +340,8 @@ int ip_options_compile(struct net *net,
goto error;
}
if (rt) {
- memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
+ spec_dst_fill(&spec_dst, skb);
+ memcpy(&optptr[optptr[2]-1], &spec_dst, 4);
opt->is_changed = 1;
}
optptr[2] += 4;
@@ -373,7 +383,8 @@ int ip_options_compile(struct net *net,
}
opt->ts = optptr - iph;
if (rt) {
- memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
+ spec_dst_fill(&spec_dst, skb);
+ memcpy(&optptr[optptr[2]-1], &spec_dst, 4);
timeptr = &optptr[optptr[2]+3];
}
opt->ts_needaddr = 1;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 451f97c42eb..ba39a52d18c 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -113,19 +113,6 @@ int ip_local_out(struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(ip_local_out);
-/* dev_loopback_xmit for use with netfilter. */
-static int ip_dev_loopback_xmit(struct sk_buff *newskb)
-{
- skb_reset_mac_header(newskb);
- __skb_pull(newskb, skb_network_offset(newskb));
- newskb->pkt_type = PACKET_LOOPBACK;
- newskb->ip_summed = CHECKSUM_UNNECESSARY;
- WARN_ON(!skb_dst(newskb));
- skb_dst_force(newskb);
- netif_rx_ni(newskb);
- return 0;
-}
-
static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
{
int ttl = inet->uc_ttl;
@@ -183,6 +170,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
+ u32 nexthop;
if (rt->rt_type == RTN_MULTICAST) {
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
@@ -200,19 +188,22 @@ static inline int ip_finish_output2(struct sk_buff *skb)
}
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
- kfree_skb(skb);
+ consume_skb(skb);
skb = skb2;
}
- rcu_read_lock();
- neigh = dst_get_neighbour_noref(dst);
+ rcu_read_lock_bh();
+ nexthop = rt->rt_gateway ? rt->rt_gateway : ip_hdr(skb)->daddr;
+ neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
+ if (unlikely(!neigh))
+ neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
if (neigh) {
- int res = neigh_output(neigh, skb);
+ int res = dst_neigh_output(dst, neigh, skb);
- rcu_read_unlock();
+ rcu_read_unlock_bh();
return res;
}
- rcu_read_unlock();
+ rcu_read_unlock_bh();
net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
__func__);
@@ -281,7 +272,7 @@ int ip_mc_output(struct sk_buff *skb)
if (newskb)
NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
newskb, NULL, newskb->dev,
- ip_dev_loopback_xmit);
+ dev_loopback_xmit);
}
/* Multicasts with ttl 0 must not go beyond the host */
@@ -296,7 +287,7 @@ int ip_mc_output(struct sk_buff *skb)
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
- NULL, newskb->dev, ip_dev_loopback_xmit);
+ NULL, newskb->dev, dev_loopback_xmit);
}
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
@@ -380,7 +371,7 @@ int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
skb_dst_set_noref(skb, &rt->dst);
packet_routed:
- if (inet_opt && inet_opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
+ if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_gateway)
goto no_route;
/* OK, we know where to send it, allocate and build IP header. */
@@ -709,7 +700,7 @@ slow_path:
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
}
- kfree_skb(skb);
+ consume_skb(skb);
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
return err;
@@ -1472,19 +1463,34 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
/*
* Generic function to send a packet as reply to another packet.
- * Used to send TCP resets so far. ICMP should use this function too.
+ * Used to send some TCP resets/acks so far.
*
- * Should run single threaded per socket because it uses the sock
- * structure to pass arguments.
+ * Use a fake percpu inet socket to avoid false sharing and contention.
*/
-void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
- const struct ip_reply_arg *arg, unsigned int len)
+static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
+ .sk = {
+ .__sk_common = {
+ .skc_refcnt = ATOMIC_INIT(1),
+ },
+ .sk_wmem_alloc = ATOMIC_INIT(1),
+ .sk_allocation = GFP_ATOMIC,
+ .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
+ },
+ .pmtudisc = IP_PMTUDISC_WANT,
+ .uc_ttl = -1,
+};
+
+void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ __be32 saddr, const struct ip_reply_arg *arg,
+ unsigned int len)
{
- struct inet_sock *inet = inet_sk(sk);
struct ip_options_data replyopts;
struct ipcm_cookie ipc;
struct flowi4 fl4;
struct rtable *rt = skb_rtable(skb);
+ struct sk_buff *nskb;
+ struct sock *sk;
+ struct inet_sock *inet;
if (ip_options_echo(&replyopts.opt.opt, skb))
return;
@@ -1502,38 +1508,39 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
flowi4_init_output(&fl4, arg->bound_dev_if, 0,
RT_TOS(arg->tos),
- RT_SCOPE_UNIVERSE, sk->sk_protocol,
+ RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
ip_reply_arg_flowi_flags(arg),
- daddr, rt->rt_spec_dst,
+ daddr, saddr,
tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
- rt = ip_route_output_key(sock_net(sk), &fl4);
+ rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
return;
- /* And let IP do all the hard work.
+ inet = &get_cpu_var(unicast_sock);
- This chunk is not reenterable, hence spinlock.
- Note that it uses the fact, that this function is called
- with locally disabled BH and that sk cannot be already spinlocked.
- */
- bh_lock_sock(sk);
inet->tos = arg->tos;
+ sk = &inet->sk;
sk->sk_priority = skb->priority;
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
+ sock_net_set(sk, net);
+ __skb_queue_head_init(&sk->sk_write_queue);
+ sk->sk_sndbuf = sysctl_wmem_default;
ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
&ipc, &rt, MSG_DONTWAIT);
- if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
+ nskb = skb_peek(&sk->sk_write_queue);
+ if (nskb) {
if (arg->csumoffset >= 0)
- *((__sum16 *)skb_transport_header(skb) +
- arg->csumoffset) = csum_fold(csum_add(skb->csum,
+ *((__sum16 *)skb_transport_header(nskb) +
+ arg->csumoffset) = csum_fold(csum_add(nskb->csum,
arg->csum));
- skb->ip_summed = CHECKSUM_NONE;
+ nskb->ip_summed = CHECKSUM_NONE;
+ skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
ip_push_pending_frames(sk, &fl4);
}
- bh_unlock_sock(sk);
+ put_cpu_var(unicast_sock);
ip_rt_put(rt);
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 0d11f234d61..5eea4a81104 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -40,6 +40,7 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/transp_v6.h>
#endif
+#include <net/ip_fib.h>
#include <linux/errqueue.h>
#include <asm/uaccess.h>
@@ -1019,18 +1020,17 @@ e_inval:
* @sk: socket
* @skb: buffer
*
- * To support IP_CMSG_PKTINFO option, we store rt_iif and rt_spec_dst
- * in skb->cb[] before dst drop.
+ * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
+ * destination in skb->cb[] before dst drop.
* This way, receiver doesnt make cache line misses to read rtable.
*/
void ipv4_pktinfo_prepare(struct sk_buff *skb)
{
struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
- const struct rtable *rt = skb_rtable(skb);
- if (rt) {
- pktinfo->ipi_ifindex = rt->rt_iif;
- pktinfo->ipi_spec_dst.s_addr = rt->rt_spec_dst;
+ if (skb_rtable(skb)) {
+ pktinfo->ipi_ifindex = inet_iif(skb);
+ pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
} else {
pktinfo->ipi_ifindex = 0;
pktinfo->ipi_spec_dst.s_addr = 0;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
new file mode 100644
index 00000000000..3511ffba7bd
--- /dev/null
+++ b/net/ipv4/ip_vti.c
@@ -0,0 +1,956 @@
+/*
+ * Linux NET3: IP/IP protocol decoder modified to support
+ * virtual tunnel interface
+ *
+ * Authors:
+ * Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+/*
+ This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c
+
+ For comments look at net/ipv4/ip_gre.c --ANK
+ */
+
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/mroute.h>
+#include <linux/init.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/if_ether.h>
+
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/ipip.h>
+#include <net/inet_ecn.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#define HASH_SIZE 16
+#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
+
+static struct rtnl_link_ops vti_link_ops __read_mostly;
+
+static int vti_net_id __read_mostly;
+struct vti_net {
+ struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_wc[1];
+ struct ip_tunnel __rcu **tunnels[4];
+
+ struct net_device *fb_tunnel_dev;
+};
+
+static int vti_fb_tunnel_init(struct net_device *dev);
+static int vti_tunnel_init(struct net_device *dev);
+static void vti_tunnel_setup(struct net_device *dev);
+static void vti_dev_free(struct net_device *dev);
+static int vti_tunnel_bind_dev(struct net_device *dev);
+
+/* Locking : hash tables are protected by RCU and RTNL */
+
+#define for_each_ip_tunnel_rcu(start) \
+ for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+};
+
+#define VTI_XMIT(stats1, stats2) do { \
+ int err; \
+ int pkt_len = skb->len; \
+ err = dst_output(skb); \
+ if (net_xmit_eval(err) == 0) { \
+ u64_stats_update_begin(&(stats1)->syncp); \
+ (stats1)->tx_bytes += pkt_len; \
+ (stats1)->tx_packets++; \
+ u64_stats_update_end(&(stats1)->syncp); \
+ } else { \
+ (stats2)->tx_errors++; \
+ (stats2)->tx_aborted_errors++; \
+ } \
+} while (0)
+
+
+static struct rtnl_link_stats64 *vti_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&tstats->syncp);
+ rx_packets = tstats->rx_packets;
+ tx_packets = tstats->tx_packets;
+ rx_bytes = tstats->rx_bytes;
+ tx_bytes = tstats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+ tot->rx_packets += rx_packets;
+ tot->tx_packets += tx_packets;
+ tot->rx_bytes += rx_bytes;
+ tot->tx_bytes += tx_bytes;
+ }
+
+ tot->multicast = dev->stats.multicast;
+ tot->rx_crc_errors = dev->stats.rx_crc_errors;
+ tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
+ tot->rx_length_errors = dev->stats.rx_length_errors;
+ tot->rx_errors = dev->stats.rx_errors;
+ tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+ tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+ tot->tx_dropped = dev->stats.tx_dropped;
+ tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+ tot->tx_errors = dev->stats.tx_errors;
+
+ return tot;
+}
+
+static struct ip_tunnel *vti_tunnel_lookup(struct net *net,
+ __be32 remote, __be32 local)
+{
+ unsigned h0 = HASH(remote);
+ unsigned h1 = HASH(local);
+ struct ip_tunnel *t;
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+
+ for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1])
+ if (local == t->parms.iph.saddr &&
+ remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
+ return t;
+ for_each_ip_tunnel_rcu(ipn->tunnels_r[h0])
+ if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
+ return t;
+
+ for_each_ip_tunnel_rcu(ipn->tunnels_l[h1])
+ if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
+ return t;
+
+ for_each_ip_tunnel_rcu(ipn->tunnels_wc[0])
+ if (t && (t->dev->flags&IFF_UP))
+ return t;
+ return NULL;
+}
+
+static struct ip_tunnel __rcu **__vti_bucket(struct vti_net *ipn,
+ struct ip_tunnel_parm *parms)
+{
+ __be32 remote = parms->iph.daddr;
+ __be32 local = parms->iph.saddr;
+ unsigned h = 0;
+ int prio = 0;
+
+ if (remote) {
+ prio |= 2;
+ h ^= HASH(remote);
+ }
+ if (local) {
+ prio |= 1;
+ h ^= HASH(local);
+ }
+ return &ipn->tunnels[prio][h];
+}
+
+static inline struct ip_tunnel __rcu **vti_bucket(struct vti_net *ipn,
+ struct ip_tunnel *t)
+{
+ return __vti_bucket(ipn, &t->parms);
+}
+
+static void vti_tunnel_unlink(struct vti_net *ipn, struct ip_tunnel *t)
+{
+ struct ip_tunnel __rcu **tp;
+ struct ip_tunnel *iter;
+
+ for (tp = vti_bucket(ipn, t);
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+ rcu_assign_pointer(*tp, t->next);
+ break;
+ }
+ }
+}
+
+static void vti_tunnel_link(struct vti_net *ipn, struct ip_tunnel *t)
+{
+ struct ip_tunnel __rcu **tp = vti_bucket(ipn, t);
+
+ rcu_assign_pointer(t->next, rtnl_dereference(*tp));
+ rcu_assign_pointer(*tp, t);
+}
+
+static struct ip_tunnel *vti_tunnel_locate(struct net *net,
+ struct ip_tunnel_parm *parms,
+ int create)
+{
+ __be32 remote = parms->iph.daddr;
+ __be32 local = parms->iph.saddr;
+ struct ip_tunnel *t, *nt;
+ struct ip_tunnel __rcu **tp;
+ struct net_device *dev;
+ char name[IFNAMSIZ];
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+
+ for (tp = __vti_bucket(ipn, parms);
+ (t = rtnl_dereference(*tp)) != NULL;
+ tp = &t->next) {
+ if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
+ return t;
+ }
+ if (!create)
+ return NULL;
+
+ if (parms->name[0])
+ strlcpy(name, parms->name, IFNAMSIZ);
+ else
+ strcpy(name, "vti%d");
+
+ dev = alloc_netdev(sizeof(*t), name, vti_tunnel_setup);
+ if (dev == NULL)
+ return NULL;
+
+ dev_net_set(dev, net);
+
+ nt = netdev_priv(dev);
+ nt->parms = *parms;
+ dev->rtnl_link_ops = &vti_link_ops;
+
+ vti_tunnel_bind_dev(dev);
+
+ if (register_netdevice(dev) < 0)
+ goto failed_free;
+
+ dev_hold(dev);
+ vti_tunnel_link(ipn, nt);
+ return nt;
+
+failed_free:
+ free_netdev(dev);
+ return NULL;
+}
+
+static void vti_tunnel_uninit(struct net_device *dev)
+{
+ struct net *net = dev_net(dev);
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+
+ vti_tunnel_unlink(ipn, netdev_priv(dev));
+ dev_put(dev);
+}
+
+static int vti_err(struct sk_buff *skb, u32 info)
+{
+
+ /* All the routers (except for Linux) return only
+ * 8 bytes of packet payload. It means, that precise relaying of
+ * ICMP in the real Internet is absolutely infeasible.
+ */
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ const int type = icmp_hdr(skb)->type;
+ const int code = icmp_hdr(skb)->code;
+ struct ip_tunnel *t;
+ int err;
+
+ switch (type) {
+ default:
+ case ICMP_PARAMETERPROB:
+ return 0;
+
+ case ICMP_DEST_UNREACH:
+ switch (code) {
+ case ICMP_SR_FAILED:
+ case ICMP_PORT_UNREACH:
+ /* Impossible event. */
+ return 0;
+ default:
+ /* All others are translated to HOST_UNREACH. */
+ break;
+ }
+ break;
+ case ICMP_TIME_EXCEEDED:
+ if (code != ICMP_EXC_TTL)
+ return 0;
+ break;
+ }
+
+ err = -ENOENT;
+
+ rcu_read_lock();
+ t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
+ if (t == NULL)
+ goto out;
+
+ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+ ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+ t->parms.link, 0, IPPROTO_IPIP, 0);
+ err = 0;
+ goto out;
+ }
+
+ err = 0;
+ if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
+ goto out;
+
+ if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
+ t->err_count++;
+ else
+ t->err_count = 1;
+ t->err_time = jiffies;
+out:
+ rcu_read_unlock();
+ return err;
+}
+
+/* We dont digest the packet therefore let the packet pass */
+static int vti_rcv(struct sk_buff *skb)
+{
+ struct ip_tunnel *tunnel;
+ const struct iphdr *iph = ip_hdr(skb);
+
+ rcu_read_lock();
+ tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
+ if (tunnel != NULL) {
+ struct pcpu_tstats *tstats;
+
+ tstats = this_cpu_ptr(tunnel->dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->rx_packets++;
+ tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
+
+ skb->dev = tunnel->dev;
+ rcu_read_unlock();
+ return 1;
+ }
+ rcu_read_unlock();
+
+ return -1;
+}
+
+/* This function assumes it is being called from dev_queue_xmit()
+ * and that skb is filled properly by that function.
+ */
+
+static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct pcpu_tstats *tstats;
+ struct iphdr *tiph = &tunnel->parms.iph;
+ u8 tos;
+ struct rtable *rt; /* Route to the other host */
+ struct net_device *tdev; /* Device to other host */
+ struct iphdr *old_iph = ip_hdr(skb);
+ __be32 dst = tiph->daddr;
+ struct flowi4 fl4;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ goto tx_error;
+
+ tos = old_iph->tos;
+
+ memset(&fl4, 0, sizeof(fl4));
+ flowi4_init_output(&fl4, tunnel->parms.link,
+ htonl(tunnel->parms.i_key), RT_TOS(tos),
+ RT_SCOPE_UNIVERSE,
+ IPPROTO_IPIP, 0,
+ dst, tiph->saddr, 0, 0);
+ rt = ip_route_output_key(dev_net(dev), &fl4);
+ if (IS_ERR(rt)) {
+ dev->stats.tx_carrier_errors++;
+ goto tx_error_icmp;
+ }
+ /* if there is no transform then this tunnel is not functional.
+ * Or if the xfrm is not mode tunnel.
+ */
+ if (!rt->dst.xfrm ||
+ rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
+ dev->stats.tx_carrier_errors++;
+ goto tx_error_icmp;
+ }
+ tdev = rt->dst.dev;
+
+ if (tdev == dev) {
+ ip_rt_put(rt);
+ dev->stats.collisions++;
+ goto tx_error;
+ }
+
+ if (tunnel->err_count > 0) {
+ if (time_before(jiffies,
+ tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
+ tunnel->err_count--;
+ dst_link_failure(skb);
+ } else
+ tunnel->err_count = 0;
+ }
+
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ skb_dst_drop(skb);
+ skb_dst_set(skb, &rt->dst);
+ nf_reset(skb);
+ skb->dev = skb_dst(skb)->dev;
+
+ tstats = this_cpu_ptr(dev->tstats);
+ VTI_XMIT(tstats, &dev->stats);
+ return NETDEV_TX_OK;
+
+tx_error_icmp:
+ dst_link_failure(skb);
+tx_error:
+ dev->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static int vti_tunnel_bind_dev(struct net_device *dev)
+{
+ struct net_device *tdev = NULL;
+ struct ip_tunnel *tunnel;
+ struct iphdr *iph;
+
+ tunnel = netdev_priv(dev);
+ iph = &tunnel->parms.iph;
+
+ if (iph->daddr) {
+ struct rtable *rt;
+ struct flowi4 fl4;
+ memset(&fl4, 0, sizeof(fl4));
+ flowi4_init_output(&fl4, tunnel->parms.link,
+ htonl(tunnel->parms.i_key),
+ RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
+ IPPROTO_IPIP, 0,
+ iph->daddr, iph->saddr, 0, 0);
+ rt = ip_route_output_key(dev_net(dev), &fl4);
+ if (!IS_ERR(rt)) {
+ tdev = rt->dst.dev;
+ ip_rt_put(rt);
+ }
+ dev->flags |= IFF_POINTOPOINT;
+ }
+
+ if (!tdev && tunnel->parms.link)
+ tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
+
+ if (tdev) {
+ dev->hard_header_len = tdev->hard_header_len +
+ sizeof(struct iphdr);
+ dev->mtu = tdev->mtu;
+ }
+ dev->iflink = tunnel->parms.link;
+ return dev->mtu;
+}
+
+static int
+vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ int err = 0;
+ struct ip_tunnel_parm p;
+ struct ip_tunnel *t;
+ struct net *net = dev_net(dev);
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+
+ switch (cmd) {
+ case SIOCGETTUNNEL:
+ t = NULL;
+ if (dev == ipn->fb_tunnel_dev) {
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
+ sizeof(p))) {
+ err = -EFAULT;
+ break;
+ }
+ t = vti_tunnel_locate(net, &p, 0);
+ }
+ if (t == NULL)
+ t = netdev_priv(dev);
+ memcpy(&p, &t->parms, sizeof(p));
+ p.i_flags |= GRE_KEY | VTI_ISVTI;
+ p.o_flags |= GRE_KEY;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+ err = -EFAULT;
+ break;
+
+ case SIOCADDTUNNEL:
+ case SIOCCHGTUNNEL:
+ err = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ goto done;
+
+ err = -EFAULT;
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+ goto done;
+
+ err = -EINVAL;
+ if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
+ p.iph.ihl != 5)
+ goto done;
+
+ t = vti_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
+
+ if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
+ if (t != NULL) {
+ if (t->dev != dev) {
+ err = -EEXIST;
+ break;
+ }
+ } else {
+ if (((dev->flags&IFF_POINTOPOINT) &&
+ !p.iph.daddr) ||
+ (!(dev->flags&IFF_POINTOPOINT) &&
+ p.iph.daddr)) {
+ err = -EINVAL;
+ break;
+ }
+ t = netdev_priv(dev);
+ vti_tunnel_unlink(ipn, t);
+ synchronize_net();
+ t->parms.iph.saddr = p.iph.saddr;
+ t->parms.iph.daddr = p.iph.daddr;
+ t->parms.i_key = p.i_key;
+ t->parms.o_key = p.o_key;
+ t->parms.iph.protocol = IPPROTO_IPIP;
+ memcpy(dev->dev_addr, &p.iph.saddr, 4);
+ memcpy(dev->broadcast, &p.iph.daddr, 4);
+ vti_tunnel_link(ipn, t);
+ netdev_state_change(dev);
+ }
+ }
+
+ if (t) {
+ err = 0;
+ if (cmd == SIOCCHGTUNNEL) {
+ t->parms.i_key = p.i_key;
+ t->parms.o_key = p.o_key;
+ if (t->parms.link != p.link) {
+ t->parms.link = p.link;
+ vti_tunnel_bind_dev(dev);
+ netdev_state_change(dev);
+ }
+ }
+ p.i_flags |= GRE_KEY | VTI_ISVTI;
+ p.o_flags |= GRE_KEY;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms,
+ sizeof(p)))
+ err = -EFAULT;
+ } else
+ err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+ break;
+
+ case SIOCDELTUNNEL:
+ err = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ goto done;
+
+ if (dev == ipn->fb_tunnel_dev) {
+ err = -EFAULT;
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
+ sizeof(p)))
+ goto done;
+ err = -ENOENT;
+
+ t = vti_tunnel_locate(net, &p, 0);
+ if (t == NULL)
+ goto done;
+ err = -EPERM;
+ if (t->dev == ipn->fb_tunnel_dev)
+ goto done;
+ dev = t->dev;
+ }
+ unregister_netdevice(dev);
+ err = 0;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+done:
+ return err;
+}
+
+static int vti_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu < 68 || new_mtu > 0xFFF8)
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static const struct net_device_ops vti_netdev_ops = {
+ .ndo_init = vti_tunnel_init,
+ .ndo_uninit = vti_tunnel_uninit,
+ .ndo_start_xmit = vti_tunnel_xmit,
+ .ndo_do_ioctl = vti_tunnel_ioctl,
+ .ndo_change_mtu = vti_tunnel_change_mtu,
+ .ndo_get_stats64 = vti_get_stats64,
+};
+
+static void vti_dev_free(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+ free_netdev(dev);
+}
+
+static void vti_tunnel_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &vti_netdev_ops;
+ dev->destructor = vti_dev_free;
+
+ dev->type = ARPHRD_TUNNEL;
+ dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
+ dev->mtu = ETH_DATA_LEN;
+ dev->flags = IFF_NOARP;
+ dev->iflink = 0;
+ dev->addr_len = 4;
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->features |= NETIF_F_LLTX;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+}
+
+static int vti_tunnel_init(struct net_device *dev)
+{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+
+ tunnel->dev = dev;
+ strcpy(tunnel->parms.name, dev->name);
+
+ memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
+ memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
+
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int __net_init vti_fb_tunnel_init(struct net_device *dev)
+{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct iphdr *iph = &tunnel->parms.iph;
+ struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
+
+ tunnel->dev = dev;
+ strcpy(tunnel->parms.name, dev->name);
+
+ iph->version = 4;
+ iph->protocol = IPPROTO_IPIP;
+ iph->ihl = 5;
+
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ dev_hold(dev);
+ rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
+ return 0;
+}
+
+static struct xfrm_tunnel vti_handler __read_mostly = {
+ .handler = vti_rcv,
+ .err_handler = vti_err,
+ .priority = 1,
+};
+
+static void vti_destroy_tunnels(struct vti_net *ipn, struct list_head *head)
+{
+ int prio;
+
+ for (prio = 1; prio < 4; prio++) {
+ int h;
+ for (h = 0; h < HASH_SIZE; h++) {
+ struct ip_tunnel *t;
+
+ t = rtnl_dereference(ipn->tunnels[prio][h]);
+ while (t != NULL) {
+ unregister_netdevice_queue(t->dev, head);
+ t = rtnl_dereference(t->next);
+ }
+ }
+ }
+}
+
+static int __net_init vti_init_net(struct net *net)
+{
+ int err;
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+
+ ipn->tunnels[0] = ipn->tunnels_wc;
+ ipn->tunnels[1] = ipn->tunnels_l;
+ ipn->tunnels[2] = ipn->tunnels_r;
+ ipn->tunnels[3] = ipn->tunnels_r_l;
+
+ ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
+ "ip_vti0",
+ vti_tunnel_setup);
+ if (!ipn->fb_tunnel_dev) {
+ err = -ENOMEM;
+ goto err_alloc_dev;
+ }
+ dev_net_set(ipn->fb_tunnel_dev, net);
+
+ err = vti_fb_tunnel_init(ipn->fb_tunnel_dev);
+ if (err)
+ goto err_reg_dev;
+ ipn->fb_tunnel_dev->rtnl_link_ops = &vti_link_ops;
+
+ err = register_netdev(ipn->fb_tunnel_dev);
+ if (err)
+ goto err_reg_dev;
+ return 0;
+
+err_reg_dev:
+ vti_dev_free(ipn->fb_tunnel_dev);
+err_alloc_dev:
+ /* nothing */
+ return err;
+}
+
+static void __net_exit vti_exit_net(struct net *net)
+{
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ vti_destroy_tunnels(ipn, &list);
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
+static struct pernet_operations vti_net_ops = {
+ .init = vti_init_net,
+ .exit = vti_exit_net,
+ .id = &vti_net_id,
+ .size = sizeof(struct vti_net),
+};
+
+static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ return 0;
+}
+
+static void vti_netlink_parms(struct nlattr *data[],
+ struct ip_tunnel_parm *parms)
+{
+ memset(parms, 0, sizeof(*parms));
+
+ parms->iph.protocol = IPPROTO_IPIP;
+
+ if (!data)
+ return;
+
+ if (data[IFLA_VTI_LINK])
+ parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
+
+ if (data[IFLA_VTI_IKEY])
+ parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
+
+ if (data[IFLA_VTI_OKEY])
+ parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
+
+ if (data[IFLA_VTI_LOCAL])
+ parms->iph.saddr = nla_get_be32(data[IFLA_VTI_LOCAL]);
+
+ if (data[IFLA_VTI_REMOTE])
+ parms->iph.daddr = nla_get_be32(data[IFLA_VTI_REMOTE]);
+
+}
+
+static int vti_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct ip_tunnel *nt;
+ struct net *net = dev_net(dev);
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+ int mtu;
+ int err;
+
+ nt = netdev_priv(dev);
+ vti_netlink_parms(data, &nt->parms);
+
+ if (vti_tunnel_locate(net, &nt->parms, 0))
+ return -EEXIST;
+
+ mtu = vti_tunnel_bind_dev(dev);
+ if (!tb[IFLA_MTU])
+ dev->mtu = mtu;
+
+ err = register_netdevice(dev);
+ if (err)
+ goto out;
+
+ dev_hold(dev);
+ vti_tunnel_link(ipn, nt);
+
+out:
+ return err;
+}
+
+static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ struct ip_tunnel *t, *nt;
+ struct net *net = dev_net(dev);
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+ struct ip_tunnel_parm p;
+ int mtu;
+
+ if (dev == ipn->fb_tunnel_dev)
+ return -EINVAL;
+
+ nt = netdev_priv(dev);
+ vti_netlink_parms(data, &p);
+
+ t = vti_tunnel_locate(net, &p, 0);
+
+ if (t) {
+ if (t->dev != dev)
+ return -EEXIST;
+ } else {
+ t = nt;
+
+ vti_tunnel_unlink(ipn, t);
+ t->parms.iph.saddr = p.iph.saddr;
+ t->parms.iph.daddr = p.iph.daddr;
+ t->parms.i_key = p.i_key;
+ t->parms.o_key = p.o_key;
+ if (dev->type != ARPHRD_ETHER) {
+ memcpy(dev->dev_addr, &p.iph.saddr, 4);
+ memcpy(dev->broadcast, &p.iph.daddr, 4);
+ }
+ vti_tunnel_link(ipn, t);
+ netdev_state_change(dev);
+ }
+
+ if (t->parms.link != p.link) {
+ t->parms.link = p.link;
+ mtu = vti_tunnel_bind_dev(dev);
+ if (!tb[IFLA_MTU])
+ dev->mtu = mtu;
+ netdev_state_change(dev);
+ }
+
+ return 0;
+}
+
+static size_t vti_get_size(const struct net_device *dev)
+{
+ return
+ /* IFLA_VTI_LINK */
+ nla_total_size(4) +
+ /* IFLA_VTI_IKEY */
+ nla_total_size(4) +
+ /* IFLA_VTI_OKEY */
+ nla_total_size(4) +
+ /* IFLA_VTI_LOCAL */
+ nla_total_size(4) +
+ /* IFLA_VTI_REMOTE */
+ nla_total_size(4) +
+ 0;
+}
+
+static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct ip_tunnel *t = netdev_priv(dev);
+ struct ip_tunnel_parm *p = &t->parms;
+
+ nla_put_u32(skb, IFLA_VTI_LINK, p->link);
+ nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key);
+ nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key);
+ nla_put_be32(skb, IFLA_VTI_LOCAL, p->iph.saddr);
+ nla_put_be32(skb, IFLA_VTI_REMOTE, p->iph.daddr);
+
+ return 0;
+}
+
+static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
+ [IFLA_VTI_LINK] = { .type = NLA_U32 },
+ [IFLA_VTI_IKEY] = { .type = NLA_U32 },
+ [IFLA_VTI_OKEY] = { .type = NLA_U32 },
+ [IFLA_VTI_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
+ [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+};
+
+static struct rtnl_link_ops vti_link_ops __read_mostly = {
+ .kind = "vti",
+ .maxtype = IFLA_VTI_MAX,
+ .policy = vti_policy,
+ .priv_size = sizeof(struct ip_tunnel),
+ .setup = vti_tunnel_setup,
+ .validate = vti_tunnel_validate,
+ .newlink = vti_newlink,
+ .changelink = vti_changelink,
+ .get_size = vti_get_size,
+ .fill_info = vti_fill_info,
+};
+
+static int __init vti_init(void)
+{
+ int err;
+
+ pr_info("IPv4 over IPSec tunneling driver\n");
+
+ err = register_pernet_device(&vti_net_ops);
+ if (err < 0)
+ return err;
+ err = xfrm4_mode_tunnel_input_register(&vti_handler);
+ if (err < 0) {
+ unregister_pernet_device(&vti_net_ops);
+ pr_info(KERN_INFO "vti init: can't register tunnel\n");
+ }
+
+ err = rtnl_link_register(&vti_link_ops);
+ if (err < 0)
+ goto rtnl_link_failed;
+
+ return err;
+
+rtnl_link_failed:
+ xfrm4_mode_tunnel_input_deregister(&vti_handler);
+ unregister_pernet_device(&vti_net_ops);
+ return err;
+}
+
+static void __exit vti_fini(void)
+{
+ rtnl_link_unregister(&vti_link_ops);
+ if (xfrm4_mode_tunnel_input_deregister(&vti_handler))
+ pr_info("vti close: can't deregister tunnel\n");
+
+ unregister_pernet_device(&vti_net_ops);
+}
+
+module_init(vti_init);
+module_exit(vti_fini);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("vti");
+MODULE_ALIAS_NETDEV("ip_vti0");
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 63b64c45a82..d3ab47e19a8 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -31,17 +31,26 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
struct xfrm_state *x;
- if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
- icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+ switch (icmp_hdr(skb)->type) {
+ case ICMP_DEST_UNREACH:
+ if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+ return;
+ case ICMP_REDIRECT:
+ break;
+ default:
return;
+ }
spi = htonl(ntohs(ipch->cpi));
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
spi, IPPROTO_COMP, AF_INET);
if (!x)
return;
- NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI4\n",
- spi, &iph->daddr);
+
+ if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+ ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
+ else
+ ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 2d0f99bf61b..99af1f0cc65 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -348,9 +348,6 @@ static int ipip_err(struct sk_buff *skb, u32 info)
case ICMP_PORT_UNREACH:
/* Impossible event. */
return 0;
- case ICMP_FRAG_NEEDED:
- /* Soft state for pmtu is maintained by IP core. */
- return 0;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -363,13 +360,32 @@ static int ipip_err(struct sk_buff *skb, u32 info)
if (code != ICMP_EXC_TTL)
return 0;
break;
+ case ICMP_REDIRECT:
+ break;
}
err = -ENOENT;
rcu_read_lock();
t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
- if (t == NULL || t->parms.iph.daddr == 0)
+ if (t == NULL)
+ goto out;
+
+ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+ ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+ t->dev->ifindex, 0, IPPROTO_IPIP, 0);
+ err = 0;
+ goto out;
+ }
+
+ if (type == ICMP_REDIRECT) {
+ ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
+ IPPROTO_IPIP, 0);
+ err = 0;
+ goto out;
+ }
+
+ if (t->parms.iph.daddr == 0)
goto out;
err = 0;
@@ -471,7 +487,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_fifo_errors++;
goto tx_error;
}
- dst = rt->rt_gateway;
+ dst = rt_nexthop(rt, old_iph->daddr);
}
rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
@@ -503,7 +519,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if ((old_iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(old_iph->tot_len)) {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index c94bbc6f2ba..8eec8f4a053 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -524,8 +524,8 @@ failure:
}
#endif
-/*
- * Delete a VIF entry
+/**
+ * vif_delete - Delete a VIF entry
* @notify: Set to 1, if the caller is a notifier_call
*/
@@ -1795,9 +1795,12 @@ static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
.daddr = iph->daddr,
.saddr = iph->saddr,
.flowi4_tos = RT_TOS(iph->tos),
- .flowi4_oif = rt->rt_oif,
- .flowi4_iif = rt->rt_iif,
- .flowi4_mark = rt->rt_mark,
+ .flowi4_oif = (rt_is_output_route(rt) ?
+ skb->dev->ifindex : 0),
+ .flowi4_iif = (rt_is_output_route(rt) ?
+ net->loopback_dev->ifindex :
+ skb->dev->ifindex),
+ .flowi4_mark = skb->mark,
};
struct mr_table *mrt;
int err;
@@ -2006,37 +2009,37 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
{
int ct;
struct rtnexthop *nhp;
- u8 *b = skb_tail_pointer(skb);
- struct rtattr *mp_head;
+ struct nlattr *mp_attr;
/* If cache is unresolved, don't try to parse IIF and OIF */
if (c->mfc_parent >= MAXVIFS)
return -ENOENT;
- if (VIF_EXISTS(mrt, c->mfc_parent))
- RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
+ if (VIF_EXISTS(mrt, c->mfc_parent) &&
+ nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
+ return -EMSGSIZE;
- mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
+ if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
+ return -EMSGSIZE;
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
- if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
- goto rtattr_failure;
- nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
+ if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
+ nla_nest_cancel(skb, mp_attr);
+ return -EMSGSIZE;
+ }
+
nhp->rtnh_flags = 0;
nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
nhp->rtnh_len = sizeof(*nhp);
}
}
- mp_head->rta_type = RTA_MULTIPATH;
- mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
+
+ nla_nest_end(skb, mp_attr);
+
rtm->rtm_type = RTN_MULTICAST;
return 1;
-
-rtattr_failure:
- nlmsg_trim(skb, b);
- return -EMSGSIZE;
}
int ipmr_get_route(struct net *net, struct sk_buff *skb,
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 2f210c79dc8..cbb6a1a6f6f 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -52,7 +52,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
struct nf_nat_ipv4_range newrange;
const struct nf_nat_ipv4_multi_range_compat *mr;
const struct rtable *rt;
- __be32 newsrc;
+ __be32 newsrc, nh;
NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING);
@@ -70,7 +70,8 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
mr = par->targinfo;
rt = skb_rtable(skb);
- newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE);
+ nh = rt_nexthop(rt, ip_hdr(skb)->daddr);
+ newsrc = inet_select_addr(par->out, nh, RT_SCOPE_UNIVERSE);
if (!newsrc) {
pr_info("%s ate my IP address\n", par->out->name);
return NF_DROP;
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index ba5756d2016..1109f7f6c25 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -196,12 +196,15 @@ static void ipt_ulog_packet(unsigned int hooknum,
pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);
- /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */
- nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
- sizeof(*pm)+copy_len);
+ nlh = nlmsg_put(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
+ sizeof(*pm)+copy_len, 0);
+ if (!nlh) {
+ pr_debug("error during nlmsg_put\n");
+ goto out_unlock;
+ }
ub->qlen++;
- pm = NLMSG_DATA(nlh);
+ pm = nlmsg_data(nlh);
/* We might not have a timestamp, get one */
if (skb->tstamp.tv64 == 0)
@@ -261,13 +264,11 @@ static void ipt_ulog_packet(unsigned int hooknum,
nlh->nlmsg_type = NLMSG_DONE;
ulog_send(groupnum);
}
-
+out_unlock:
spin_unlock_bh(&ulog_lock);
return;
-nlmsg_failure:
- pr_debug("error during NLMSG_PUT\n");
alloc_failure:
pr_debug("Error building netlink message\n");
spin_unlock_bh(&ulog_lock);
@@ -380,6 +381,9 @@ static struct nf_logger ipt_ulog_logger __read_mostly = {
static int __init ulog_tg_init(void)
{
int ret, i;
+ struct netlink_kernel_cfg cfg = {
+ .groups = ULOG_MAXNLGROUPS,
+ };
pr_debug("init module\n");
@@ -392,9 +396,8 @@ static int __init ulog_tg_init(void)
for (i = 0; i < ULOG_MAXNLGROUPS; i++)
setup_timer(&ulog_buffers[i].timer, ulog_timer, i);
- nflognl = netlink_kernel_create(&init_net,
- NETLINK_NFLOG, ULOG_MAXNLGROUPS, NULL,
- NULL, THIS_MODULE);
+ nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
+ THIS_MODULE, &cfg);
if (!nflognl)
return -ENOMEM;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 91747d4ebc2..e7ff2dcab6c 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -95,11 +95,11 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
return NF_ACCEPT;
}
-static unsigned int ipv4_confirm(unsigned int hooknum,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int ipv4_helper(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
@@ -110,24 +110,38 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
/* This is where we call the helper: as the packet goes out. */
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- goto out;
+ return NF_ACCEPT;
help = nfct_help(ct);
if (!help)
- goto out;
+ return NF_ACCEPT;
/* rcu_read_lock()ed by nf_hook_slow */
helper = rcu_dereference(help->helper);
if (!helper)
- goto out;
+ return NF_ACCEPT;
ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
ct, ctinfo);
- if (ret != NF_ACCEPT) {
+ if (ret != NF_ACCEPT && (ret & NF_VERDICT_MASK) != NF_QUEUE) {
nf_log_packet(NFPROTO_IPV4, hooknum, skb, in, out, NULL,
"nf_ct_%s: dropping packet", helper->name);
- return ret;
}
+ return ret;
+}
+
+static unsigned int ipv4_confirm(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct || ctinfo == IP_CT_RELATED_REPLY)
+ goto out;
/* adjust seqs for loopback traffic only in outgoing direction */
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
@@ -185,6 +199,13 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
.priority = NF_IP_PRI_CONNTRACK,
},
{
+ .hook = ipv4_helper,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_CONNTRACK_HELPER,
+ },
+ {
.hook = ipv4_confirm,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
@@ -192,6 +213,13 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
{
+ .hook = ipv4_helper,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP_PRI_CONNTRACK_HELPER,
+ },
+ {
.hook = ipv4_confirm,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
@@ -207,35 +235,30 @@ static int log_invalid_proto_max = 255;
static ctl_table ip_ct_sysctl_table[] = {
{
.procname = "ip_conntrack_max",
- .data = &nf_conntrack_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_count",
- .data = &init_net.ct.count,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_buckets",
- .data = &init_net.ct.htable_size,
.maxlen = sizeof(unsigned int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_checksum",
- .data = &init_net.ct.sysctl_checksum,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_log_invalid",
- .data = &init_net.ct.sysctl_log_invalid,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
@@ -351,6 +374,25 @@ static struct nf_sockopt_ops so_getorigdst = {
.owner = THIS_MODULE,
};
+static int ipv4_init_net(struct net *net)
+{
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+ struct nf_ip_net *in = &net->ct.nf_ct_proto;
+ in->ctl_table = kmemdup(ip_ct_sysctl_table,
+ sizeof(ip_ct_sysctl_table),
+ GFP_KERNEL);
+ if (!in->ctl_table)
+ return -ENOMEM;
+
+ in->ctl_table[0].data = &nf_conntrack_max;
+ in->ctl_table[1].data = &net->ct.count;
+ in->ctl_table[2].data = &net->ct.htable_size;
+ in->ctl_table[3].data = &net->ct.sysctl_checksum;
+ in->ctl_table[4].data = &net->ct.sysctl_log_invalid;
+#endif
+ return 0;
+}
+
struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
.l3proto = PF_INET,
.name = "ipv4",
@@ -366,8 +408,8 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
#endif
#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
.ctl_table_path = "net/ipv4/netfilter",
- .ctl_table = ip_ct_sysctl_table,
#endif
+ .init_net = ipv4_init_net,
.me = THIS_MODULE,
};
@@ -378,6 +420,65 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
MODULE_ALIAS("ip_conntrack");
MODULE_LICENSE("GPL");
+static int ipv4_net_init(struct net *net)
+{
+ int ret = 0;
+
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_tcp4);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_tcp4 :protocol register failed\n");
+ goto out_tcp;
+ }
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_udp4);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_udp4 :protocol register failed\n");
+ goto out_udp;
+ }
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_icmp);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_icmp4 :protocol register failed\n");
+ goto out_icmp;
+ }
+ ret = nf_conntrack_l3proto_register(net,
+ &nf_conntrack_l3proto_ipv4);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l3proto_ipv4 :protocol register failed\n");
+ goto out_ipv4;
+ }
+ return 0;
+out_ipv4:
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_icmp);
+out_icmp:
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_udp4);
+out_udp:
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_tcp4);
+out_tcp:
+ return ret;
+}
+
+static void ipv4_net_exit(struct net *net)
+{
+ nf_conntrack_l3proto_unregister(net,
+ &nf_conntrack_l3proto_ipv4);
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_icmp);
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_udp4);
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_tcp4);
+}
+
+static struct pernet_operations ipv4_net_ops = {
+ .init = ipv4_net_init,
+ .exit = ipv4_net_exit,
+};
+
static int __init nf_conntrack_l3proto_ipv4_init(void)
{
int ret = 0;
@@ -391,35 +492,17 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
return ret;
}
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp4);
+ ret = register_pernet_subsys(&ipv4_net_ops);
if (ret < 0) {
- pr_err("nf_conntrack_ipv4: can't register tcp.\n");
+ pr_err("nf_conntrack_ipv4: can't register pernet ops\n");
goto cleanup_sockopt;
}
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp4);
- if (ret < 0) {
- pr_err("nf_conntrack_ipv4: can't register udp.\n");
- goto cleanup_tcp;
- }
-
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmp);
- if (ret < 0) {
- pr_err("nf_conntrack_ipv4: can't register icmp.\n");
- goto cleanup_udp;
- }
-
- ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv4);
- if (ret < 0) {
- pr_err("nf_conntrack_ipv4: can't register ipv4\n");
- goto cleanup_icmp;
- }
-
ret = nf_register_hooks(ipv4_conntrack_ops,
ARRAY_SIZE(ipv4_conntrack_ops));
if (ret < 0) {
pr_err("nf_conntrack_ipv4: can't register hooks.\n");
- goto cleanup_ipv4;
+ goto cleanup_pernet;
}
#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
ret = nf_conntrack_ipv4_compat_init();
@@ -431,14 +514,8 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
cleanup_hooks:
nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
#endif
- cleanup_ipv4:
- nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
- cleanup_icmp:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp);
- cleanup_udp:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4);
- cleanup_tcp:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
+ cleanup_pernet:
+ unregister_pernet_subsys(&ipv4_net_ops);
cleanup_sockopt:
nf_unregister_sockopt(&so_getorigdst);
return ret;
@@ -451,10 +528,7 @@ static void __exit nf_conntrack_l3proto_ipv4_fini(void)
nf_conntrack_ipv4_compat_fini();
#endif
nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
- nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
+ unregister_pernet_subsys(&ipv4_net_ops);
nf_unregister_sockopt(&so_getorigdst);
}
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 0847e373d33..5241d997ab7 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -23,6 +23,11 @@
static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
+static inline struct nf_icmp_net *icmp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmp;
+}
+
static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
@@ -77,7 +82,7 @@ static int icmp_print_tuple(struct seq_file *s,
static unsigned int *icmp_get_timeouts(struct net *net)
{
- return &nf_ct_icmp_timeout;
+ return &icmp_pernet(net)->timeout;
}
/* Returns verdict for packet, or -1 for invalid. */
@@ -274,16 +279,18 @@ static int icmp_nlattr_tuple_size(void)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeout = data;
+ struct nf_icmp_net *in = icmp_pernet(net);
if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
*timeout =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ;
} else {
/* Set default ICMP timeout. */
- *timeout = nf_ct_icmp_timeout;
+ *timeout = in->timeout;
}
return 0;
}
@@ -308,11 +315,9 @@ icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *icmp_sysctl_header;
static struct ctl_table icmp_sysctl_table[] = {
{
.procname = "nf_conntrack_icmp_timeout",
- .data = &nf_ct_icmp_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -323,7 +328,6 @@ static struct ctl_table icmp_sysctl_table[] = {
static struct ctl_table icmp_compat_sysctl_table[] = {
{
.procname = "ip_conntrack_icmp_timeout",
- .data = &nf_ct_icmp_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -333,6 +337,62 @@ static struct ctl_table icmp_compat_sysctl_table[] = {
#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
+static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct nf_icmp_net *in)
+{
+#ifdef CONFIG_SYSCTL
+ pn->ctl_table = kmemdup(icmp_sysctl_table,
+ sizeof(icmp_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+
+ pn->ctl_table[0].data = &in->timeout;
+#endif
+ return 0;
+}
+
+static int icmp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
+ struct nf_icmp_net *in)
+{
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+ pn->ctl_compat_table = kmemdup(icmp_compat_sysctl_table,
+ sizeof(icmp_compat_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_compat_table)
+ return -ENOMEM;
+
+ pn->ctl_compat_table[0].data = &in->timeout;
+#endif
+#endif
+ return 0;
+}
+
+static int icmp_init_net(struct net *net, u_int16_t proto)
+{
+ int ret;
+ struct nf_icmp_net *in = icmp_pernet(net);
+ struct nf_proto_net *pn = &in->pn;
+
+ in->timeout = nf_ct_icmp_timeout;
+
+ ret = icmp_kmemdup_compat_sysctl_table(pn, in);
+ if (ret < 0)
+ return ret;
+
+ ret = icmp_kmemdup_sysctl_table(pn, in);
+ if (ret < 0)
+ nf_ct_kfree_compat_sysctl_table(pn);
+
+ return ret;
+}
+
+static struct nf_proto_net *icmp_get_net_proto(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmp.pn;
+}
+
struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
{
.l3proto = PF_INET,
@@ -362,11 +422,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
.nla_policy = icmp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_header = &icmp_sysctl_header,
- .ctl_table = icmp_sysctl_table,
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- .ctl_compat_table = icmp_compat_sysctl_table,
-#endif
-#endif
+ .init_net = icmp_init_net,
+ .get_net_proto = icmp_get_net_proto,
};
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 9bb1b8a37a2..742815518b0 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -94,14 +94,14 @@ static struct nf_hook_ops ipv4_defrag_ops[] = {
{
.hook = ipv4_conntrack_defrag,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_DEFRAG,
},
{
.hook = ipv4_conntrack_defrag,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_CONNTRACK_DEFRAG,
},
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 7b22382ff0e..3c04d24e297 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -13,10 +13,10 @@
#include <linux/skbuff.h>
#include <linux/udp.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_nat_rule.h>
#include <linux/netfilter/nf_conntrack_amanda.h>
MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>");
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index abb52adf5ac..44b082fd48a 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -691,6 +691,10 @@ static struct nf_ct_helper_expectfn follow_master_nat = {
.expectfn = nf_nat_follow_master,
};
+static struct nfq_ct_nat_hook nfq_ct_nat = {
+ .seq_adjust = nf_nat_tcp_seq_adjust,
+};
+
static int __init nf_nat_init(void)
{
size_t i;
@@ -731,6 +735,7 @@ static int __init nf_nat_init(void)
nfnetlink_parse_nat_setup);
BUG_ON(nf_ct_nat_offset != NULL);
RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
+ RCU_INIT_POINTER(nfq_ct_nat_hook, &nfq_ct_nat);
return 0;
cleanup_extend:
@@ -747,6 +752,7 @@ static void __exit nf_nat_cleanup(void)
RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
+ RCU_INIT_POINTER(nfq_ct_nat_hook, NULL);
synchronize_net();
}
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index cad29c12131..c6784a18c1c 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -95,7 +95,7 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
unsigned char **data,
TransportAddress *taddr, int count)
{
- const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ const struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
int i;
__be16 port;
@@ -178,7 +178,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
struct nf_conntrack_expect *rtp_exp,
struct nf_conntrack_expect *rtcp_exp)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
int i;
u_int16_t nated_port;
@@ -330,7 +330,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
u_int16_t nated_port = ntohs(port);
@@ -419,7 +419,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
unsigned char **data, TransportAddress *taddr, int idx,
__be16 port, struct nf_conntrack_expect *exp)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
u_int16_t nated_port = ntohs(port);
union nf_inet_addr addr;
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index af65958f630..2e59ad0b90c 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -153,6 +153,19 @@ void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
}
EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
+void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+ u32 ctinfo, int off)
+{
+ const struct tcphdr *th;
+
+ if (nf_ct_protonum(ct) != IPPROTO_TCP)
+ return;
+
+ th = (struct tcphdr *)(skb_network_header(skb)+ ip_hdrlen(skb));
+ nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
+}
+EXPORT_SYMBOL_GPL(nf_nat_tcp_seq_adjust);
+
static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data,
int datalen, __sum16 *check, int oldlen)
{
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index c273d58980a..388140881eb 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -49,7 +49,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
const struct nf_nat_pptp *nat_pptp_info;
struct nf_nat_ipv4_range range;
- ct_pptp_info = &nfct_help(master)->help.ct_pptp_info;
+ ct_pptp_info = nfct_help_data(master);
nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info;
/* And here goes the grand finale of corrosion... */
@@ -123,7 +123,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
__be16 new_callid;
unsigned int cid_off;
- ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info;
+ ct_pptp_info = nfct_help_data(ct);
nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
new_callid = ct_pptp_info->pns_call_id;
@@ -192,7 +192,7 @@ pptp_exp_gre(struct nf_conntrack_expect *expect_orig,
struct nf_ct_pptp_master *ct_pptp_info;
struct nf_nat_pptp *nat_pptp_info;
- ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info;
+ ct_pptp_info = nfct_help_data(ct);
nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
/* save original PAC call ID in nat_info */
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 746edec8b86..bac712293fd 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -405,7 +405,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
ptr = *octets;
while (ctx->pointer < eoc) {
- if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) {
+ if (!asn1_octet_decode(ctx, ptr++)) {
kfree(*octets);
*octets = NULL;
return 0;
@@ -759,7 +759,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
}
break;
case SNMP_OBJECTID:
- if (!asn1_oid_decode(ctx, end, (unsigned long **)&lp, &len)) {
+ if (!asn1_oid_decode(ctx, end, &lp, &len)) {
kfree(id);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index a2901bf829c..9dbb8d284f9 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -8,10 +8,10 @@
#include <linux/module.h>
#include <linux/udp.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_nat_rule.h>
#include <linux/netfilter/nf_conntrack_tftp.h>
MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>");
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 2c00e8bf684..6232d476f37 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -371,6 +371,7 @@ void ping_err(struct sk_buff *skb, u32 info)
break;
case ICMP_DEST_UNREACH:
if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
+ ipv4_sk_update_pmtu(skb, sk, info);
if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
err = EMSGSIZE;
harderr = 1;
@@ -386,6 +387,7 @@ void ping_err(struct sk_buff *skb, u32 info)
break;
case ICMP_REDIRECT:
/* See ICMP_SOURCE_QUENCH */
+ ipv4_sk_redirect(skb, sk);
err = EREMOTEIO;
break;
}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 8af0d44e4e2..957acd12250 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -232,7 +232,6 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT),
SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV),
SNMP_MIB_ITEM("TCPDSACKOfoRecv", LINUX_MIB_TCPDSACKOFORECV),
- SNMP_MIB_ITEM("TCPAbortOnSyn", LINUX_MIB_TCPABORTONSYN),
SNMP_MIB_ITEM("TCPAbortOnData", LINUX_MIB_TCPABORTONDATA),
SNMP_MIB_ITEM("TCPAbortOnClose", LINUX_MIB_TCPABORTONCLOSE),
SNMP_MIB_ITEM("TCPAbortOnMemory", LINUX_MIB_TCPABORTONMEMORY),
@@ -258,6 +257,12 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
SNMP_MIB_ITEM("TCPRetransFail", LINUX_MIB_TCPRETRANSFAIL),
SNMP_MIB_ITEM("TCPRcvCoalesce", LINUX_MIB_TCPRCVCOALESCE),
+ SNMP_MIB_ITEM("TCPOFOQueue", LINUX_MIB_TCPOFOQUEUE),
+ SNMP_MIB_ITEM("TCPOFODrop", LINUX_MIB_TCPOFODROP),
+ SNMP_MIB_ITEM("TCPOFOMerge", LINUX_MIB_TCPOFOMERGE),
+ SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK),
+ SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE),
+ SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 9ae5c01cd0b..8918eff1426 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -36,9 +36,7 @@ const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
{
- int hash = protocol & (MAX_INET_PROTOS - 1);
-
- return !cmpxchg((const struct net_protocol **)&inet_protos[hash],
+ return !cmpxchg((const struct net_protocol **)&inet_protos[protocol],
NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet_add_protocol);
@@ -49,9 +47,9 @@ EXPORT_SYMBOL(inet_add_protocol);
int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
{
- int ret, hash = protocol & (MAX_INET_PROTOS - 1);
+ int ret;
- ret = (cmpxchg((const struct net_protocol **)&inet_protos[hash],
+ ret = (cmpxchg((const struct net_protocol **)&inet_protos[protocol],
prot, NULL) == prot) ? 0 : -1;
synchronize_net();
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 4032b818f3e..ff0f071969e 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -216,6 +216,11 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
int err = 0;
int harderr = 0;
+ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
+ ipv4_sk_update_pmtu(skb, sk, info);
+ else if (type == ICMP_REDIRECT)
+ ipv4_sk_redirect(skb, sk);
+
/* Report error on raw socket, if:
1. User requested ip_recverr.
2. Socket is connected (otherwise the error indication
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 98b30d08efe..c035251beb0 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -133,10 +133,6 @@ static int ip_rt_gc_elasticity __read_mostly = 8;
static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
-static int rt_chain_length_max __read_mostly = 20;
-
-static struct delayed_work expires_work;
-static unsigned long expires_ljiffies;
/*
* Interface to generic destination cache.
@@ -145,11 +141,13 @@ static unsigned long expires_ljiffies;
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
static unsigned int ipv4_mtu(const struct dst_entry *dst);
-static void ipv4_dst_destroy(struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
-static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
-static int rt_garbage_collect(struct dst_ops *ops);
+static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu);
+static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb);
+static void ipv4_dst_destroy(struct dst_entry *dst);
static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
int how)
@@ -158,45 +156,17 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
{
- struct rtable *rt = (struct rtable *) dst;
- struct inet_peer *peer;
- u32 *p = NULL;
-
- if (!rt->peer)
- rt_bind_peer(rt, rt->rt_dst, 1);
-
- peer = rt->peer;
- if (peer) {
- u32 *old_p = __DST_METRICS_PTR(old);
- unsigned long prev, new;
-
- p = peer->metrics;
- if (inet_metrics_new(peer))
- memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
-
- new = (unsigned long) p;
- prev = cmpxchg(&dst->_metrics, old, new);
-
- if (prev != old) {
- p = __DST_METRICS_PTR(prev);
- if (prev & DST_METRICS_READ_ONLY)
- p = NULL;
- } else {
- if (rt->fi) {
- fib_info_put(rt->fi);
- rt->fi = NULL;
- }
- }
- }
- return p;
+ WARN_ON(1);
+ return NULL;
}
-static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr);
+static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr);
static struct dst_ops ipv4_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
- .gc = rt_garbage_collect,
.check = ipv4_dst_check,
.default_advmss = ipv4_default_advmss,
.mtu = ipv4_mtu,
@@ -206,6 +176,7 @@ static struct dst_ops ipv4_dst_ops = {
.negative_advice = ipv4_negative_advice,
.link_failure = ipv4_link_failure,
.update_pmtu = ip_rt_update_pmtu,
+ .redirect = ip_do_redirect,
.local_out = __ip_local_out,
.neigh_lookup = ipv4_neigh_lookup,
};
@@ -232,184 +203,30 @@ const __u8 ip_tos2prio[16] = {
};
EXPORT_SYMBOL(ip_tos2prio);
-/*
- * Route cache.
- */
-
-/* The locking scheme is rather straight forward:
- *
- * 1) Read-Copy Update protects the buckets of the central route hash.
- * 2) Only writers remove entries, and they hold the lock
- * as they look at rtable reference counts.
- * 3) Only readers acquire references to rtable entries,
- * they do so with atomic increments and with the
- * lock held.
- */
-
-struct rt_hash_bucket {
- struct rtable __rcu *chain;
-};
-
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
- defined(CONFIG_PROVE_LOCKING)
-/*
- * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
- * The size of this table is a power of two and depends on the number of CPUS.
- * (on lockdep we have a quite big spinlock_t, so keep the size down there)
- */
-#ifdef CONFIG_LOCKDEP
-# define RT_HASH_LOCK_SZ 256
-#else
-# if NR_CPUS >= 32
-# define RT_HASH_LOCK_SZ 4096
-# elif NR_CPUS >= 16
-# define RT_HASH_LOCK_SZ 2048
-# elif NR_CPUS >= 8
-# define RT_HASH_LOCK_SZ 1024
-# elif NR_CPUS >= 4
-# define RT_HASH_LOCK_SZ 512
-# else
-# define RT_HASH_LOCK_SZ 256
-# endif
-#endif
-
-static spinlock_t *rt_hash_locks;
-# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
-
-static __init void rt_hash_lock_init(void)
-{
- int i;
-
- rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
- GFP_KERNEL);
- if (!rt_hash_locks)
- panic("IP: failed to allocate rt_hash_locks\n");
-
- for (i = 0; i < RT_HASH_LOCK_SZ; i++)
- spin_lock_init(&rt_hash_locks[i]);
-}
-#else
-# define rt_hash_lock_addr(slot) NULL
-
-static inline void rt_hash_lock_init(void)
-{
-}
-#endif
-
-static struct rt_hash_bucket *rt_hash_table __read_mostly;
-static unsigned int rt_hash_mask __read_mostly;
-static unsigned int rt_hash_log __read_mostly;
-
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
-static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
- int genid)
-{
- return jhash_3words((__force u32)daddr, (__force u32)saddr,
- idx, genid)
- & rt_hash_mask;
-}
-
static inline int rt_genid(struct net *net)
{
return atomic_read(&net->ipv4.rt_genid);
}
#ifdef CONFIG_PROC_FS
-struct rt_cache_iter_state {
- struct seq_net_private p;
- int bucket;
- int genid;
-};
-
-static struct rtable *rt_cache_get_first(struct seq_file *seq)
-{
- struct rt_cache_iter_state *st = seq->private;
- struct rtable *r = NULL;
-
- for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
- if (!rcu_access_pointer(rt_hash_table[st->bucket].chain))
- continue;
- rcu_read_lock_bh();
- r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
- while (r) {
- if (dev_net(r->dst.dev) == seq_file_net(seq) &&
- r->rt_genid == st->genid)
- return r;
- r = rcu_dereference_bh(r->dst.rt_next);
- }
- rcu_read_unlock_bh();
- }
- return r;
-}
-
-static struct rtable *__rt_cache_get_next(struct seq_file *seq,
- struct rtable *r)
-{
- struct rt_cache_iter_state *st = seq->private;
-
- r = rcu_dereference_bh(r->dst.rt_next);
- while (!r) {
- rcu_read_unlock_bh();
- do {
- if (--st->bucket < 0)
- return NULL;
- } while (!rcu_access_pointer(rt_hash_table[st->bucket].chain));
- rcu_read_lock_bh();
- r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
- }
- return r;
-}
-
-static struct rtable *rt_cache_get_next(struct seq_file *seq,
- struct rtable *r)
-{
- struct rt_cache_iter_state *st = seq->private;
- while ((r = __rt_cache_get_next(seq, r)) != NULL) {
- if (dev_net(r->dst.dev) != seq_file_net(seq))
- continue;
- if (r->rt_genid == st->genid)
- break;
- }
- return r;
-}
-
-static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
-{
- struct rtable *r = rt_cache_get_first(seq);
-
- if (r)
- while (pos && (r = rt_cache_get_next(seq, r)))
- --pos;
- return pos ? NULL : r;
-}
-
static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
{
- struct rt_cache_iter_state *st = seq->private;
if (*pos)
- return rt_cache_get_idx(seq, *pos - 1);
- st->genid = rt_genid(seq_file_net(seq));
+ return NULL;
return SEQ_START_TOKEN;
}
static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct rtable *r;
-
- if (v == SEQ_START_TOKEN)
- r = rt_cache_get_first(seq);
- else
- r = rt_cache_get_next(seq, v);
++*pos;
- return r;
+ return NULL;
}
static void rt_cache_seq_stop(struct seq_file *seq, void *v)
{
- if (v && v != SEQ_START_TOKEN)
- rcu_read_unlock_bh();
}
static int rt_cache_seq_show(struct seq_file *seq, void *v)
@@ -419,34 +236,6 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
"Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
"Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
"HHUptod\tSpecDst");
- else {
- struct rtable *r = v;
- struct neighbour *n;
- int len, HHUptod;
-
- rcu_read_lock();
- n = dst_get_neighbour_noref(&r->dst);
- HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
- rcu_read_unlock();
-
- seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
- "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
- r->dst.dev ? r->dst.dev->name : "*",
- (__force u32)r->rt_dst,
- (__force u32)r->rt_gateway,
- r->rt_flags, atomic_read(&r->dst.__refcnt),
- r->dst.__use, 0, (__force u32)r->rt_src,
- dst_metric_advmss(&r->dst) + 40,
- dst_metric(&r->dst, RTAX_WINDOW),
- (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
- dst_metric(&r->dst, RTAX_RTTVAR)),
- r->rt_key_tos,
- -1,
- HHUptod,
- r->rt_spec_dst, &len);
-
- seq_printf(seq, "%*s\n", 127 - len, "");
- }
return 0;
}
@@ -459,8 +248,7 @@ static const struct seq_operations rt_cache_seq_ops = {
static int rt_cache_seq_open(struct inode *inode, struct file *file)
{
- return seq_open_net(inode, file, &rt_cache_seq_ops,
- sizeof(struct rt_cache_iter_state));
+ return seq_open(file, &rt_cache_seq_ops);
}
static const struct file_operations rt_cache_seq_fops = {
@@ -468,7 +256,7 @@ static const struct file_operations rt_cache_seq_fops = {
.open = rt_cache_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release_net,
+ .release = seq_release,
};
@@ -658,275 +446,12 @@ static inline int ip_rt_proc_init(void)
}
#endif /* CONFIG_PROC_FS */
-static inline void rt_free(struct rtable *rt)
-{
- call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
-}
-
-static inline void rt_drop(struct rtable *rt)
-{
- ip_rt_put(rt);
- call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
-}
-
-static inline int rt_fast_clean(struct rtable *rth)
-{
- /* Kill broadcast/multicast entries very aggresively, if they
- collide in hash table with more useful entries */
- return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
- rt_is_input_route(rth) && rth->dst.rt_next;
-}
-
-static inline int rt_valuable(struct rtable *rth)
-{
- return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
- (rth->peer && rth->peer->pmtu_expires);
-}
-
-static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
-{
- unsigned long age;
- int ret = 0;
-
- if (atomic_read(&rth->dst.__refcnt))
- goto out;
-
- age = jiffies - rth->dst.lastuse;
- if ((age <= tmo1 && !rt_fast_clean(rth)) ||
- (age <= tmo2 && rt_valuable(rth)))
- goto out;
- ret = 1;
-out: return ret;
-}
-
-/* Bits of score are:
- * 31: very valuable
- * 30: not quite useless
- * 29..0: usage counter
- */
-static inline u32 rt_score(struct rtable *rt)
-{
- u32 score = jiffies - rt->dst.lastuse;
-
- score = ~score & ~(3<<30);
-
- if (rt_valuable(rt))
- score |= (1<<31);
-
- if (rt_is_output_route(rt) ||
- !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
- score |= (1<<30);
-
- return score;
-}
-
-static inline bool rt_caching(const struct net *net)
-{
- return net->ipv4.current_rt_cache_rebuild_count <=
- net->ipv4.sysctl_rt_cache_rebuild_count;
-}
-
-static inline bool compare_hash_inputs(const struct rtable *rt1,
- const struct rtable *rt2)
-{
- return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
- ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
- (rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0);
-}
-
-static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
-{
- return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
- ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
- (rt1->rt_mark ^ rt2->rt_mark) |
- (rt1->rt_key_tos ^ rt2->rt_key_tos) |
- (rt1->rt_route_iif ^ rt2->rt_route_iif) |
- (rt1->rt_oif ^ rt2->rt_oif)) == 0;
-}
-
-static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
-{
- return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
-}
-
-static inline int rt_is_expired(struct rtable *rth)
+static inline bool rt_is_expired(const struct rtable *rth)
{
return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
}
/*
- * Perform a full scan of hash table and free all entries.
- * Can be called by a softirq or a process.
- * In the later case, we want to be reschedule if necessary
- */
-static void rt_do_flush(struct net *net, int process_context)
-{
- unsigned int i;
- struct rtable *rth, *next;
-
- for (i = 0; i <= rt_hash_mask; i++) {
- struct rtable __rcu **pprev;
- struct rtable *list;
-
- if (process_context && need_resched())
- cond_resched();
- rth = rcu_access_pointer(rt_hash_table[i].chain);
- if (!rth)
- continue;
-
- spin_lock_bh(rt_hash_lock_addr(i));
-
- list = NULL;
- pprev = &rt_hash_table[i].chain;
- rth = rcu_dereference_protected(*pprev,
- lockdep_is_held(rt_hash_lock_addr(i)));
-
- while (rth) {
- next = rcu_dereference_protected(rth->dst.rt_next,
- lockdep_is_held(rt_hash_lock_addr(i)));
-
- if (!net ||
- net_eq(dev_net(rth->dst.dev), net)) {
- rcu_assign_pointer(*pprev, next);
- rcu_assign_pointer(rth->dst.rt_next, list);
- list = rth;
- } else {
- pprev = &rth->dst.rt_next;
- }
- rth = next;
- }
-
- spin_unlock_bh(rt_hash_lock_addr(i));
-
- for (; list; list = next) {
- next = rcu_dereference_protected(list->dst.rt_next, 1);
- rt_free(list);
- }
- }
-}
-
-/*
- * While freeing expired entries, we compute average chain length
- * and standard deviation, using fixed-point arithmetic.
- * This to have an estimation of rt_chain_length_max
- * rt_chain_length_max = max(elasticity, AVG + 4*SD)
- * We use 3 bits for frational part, and 29 (or 61) for magnitude.
- */
-
-#define FRACT_BITS 3
-#define ONE (1UL << FRACT_BITS)
-
-/*
- * Given a hash chain and an item in this hash chain,
- * find if a previous entry has the same hash_inputs
- * (but differs on tos, mark or oif)
- * Returns 0 if an alias is found.
- * Returns ONE if rth has no alias before itself.
- */
-static int has_noalias(const struct rtable *head, const struct rtable *rth)
-{
- const struct rtable *aux = head;
-
- while (aux != rth) {
- if (compare_hash_inputs(aux, rth))
- return 0;
- aux = rcu_dereference_protected(aux->dst.rt_next, 1);
- }
- return ONE;
-}
-
-static void rt_check_expire(void)
-{
- static unsigned int rover;
- unsigned int i = rover, goal;
- struct rtable *rth;
- struct rtable __rcu **rthp;
- unsigned long samples = 0;
- unsigned long sum = 0, sum2 = 0;
- unsigned long delta;
- u64 mult;
-
- delta = jiffies - expires_ljiffies;
- expires_ljiffies = jiffies;
- mult = ((u64)delta) << rt_hash_log;
- if (ip_rt_gc_timeout > 1)
- do_div(mult, ip_rt_gc_timeout);
- goal = (unsigned int)mult;
- if (goal > rt_hash_mask)
- goal = rt_hash_mask + 1;
- for (; goal > 0; goal--) {
- unsigned long tmo = ip_rt_gc_timeout;
- unsigned long length;
-
- i = (i + 1) & rt_hash_mask;
- rthp = &rt_hash_table[i].chain;
-
- if (need_resched())
- cond_resched();
-
- samples++;
-
- if (rcu_dereference_raw(*rthp) == NULL)
- continue;
- length = 0;
- spin_lock_bh(rt_hash_lock_addr(i));
- while ((rth = rcu_dereference_protected(*rthp,
- lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
- prefetch(rth->dst.rt_next);
- if (rt_is_expired(rth)) {
- *rthp = rth->dst.rt_next;
- rt_free(rth);
- continue;
- }
- if (rth->dst.expires) {
- /* Entry is expired even if it is in use */
- if (time_before_eq(jiffies, rth->dst.expires)) {
-nofree:
- tmo >>= 1;
- rthp = &rth->dst.rt_next;
- /*
- * We only count entries on
- * a chain with equal hash inputs once
- * so that entries for different QOS
- * levels, and other non-hash input
- * attributes don't unfairly skew
- * the length computation
- */
- length += has_noalias(rt_hash_table[i].chain, rth);
- continue;
- }
- } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
- goto nofree;
-
- /* Cleanup aged off entries. */
- *rthp = rth->dst.rt_next;
- rt_free(rth);
- }
- spin_unlock_bh(rt_hash_lock_addr(i));
- sum += length;
- sum2 += length*length;
- }
- if (samples) {
- unsigned long avg = sum / samples;
- unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
- rt_chain_length_max = max_t(unsigned long,
- ip_rt_gc_elasticity,
- (avg + 4*sd) >> FRACT_BITS);
- }
- rover = i;
-}
-
-/*
- * rt_worker_func() is run in process context.
- * we call rt_check_expire() to scan part of the hash table
- */
-static void rt_worker_func(struct work_struct *work)
-{
- rt_check_expire();
- schedule_delayed_work(&expires_work, ip_rt_gc_interval);
-}
-
-/*
* Perturbation of rt_genid by a small quantity [1..256]
* Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
* many times (2^24) without giving recent rt_genid.
@@ -938,7 +463,6 @@ static void rt_cache_invalidate(struct net *net)
get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
- inetpeer_invalidate_tree(AF_INET);
}
/*
@@ -948,183 +472,22 @@ static void rt_cache_invalidate(struct net *net)
void rt_cache_flush(struct net *net, int delay)
{
rt_cache_invalidate(net);
- if (delay >= 0)
- rt_do_flush(net, !in_softirq());
-}
-
-/* Flush previous cache invalidated entries from the cache */
-void rt_cache_flush_batch(struct net *net)
-{
- rt_do_flush(net, !in_softirq());
-}
-
-static void rt_emergency_hash_rebuild(struct net *net)
-{
- net_warn_ratelimited("Route hash chain too long!\n");
- rt_cache_invalidate(net);
}
-/*
- Short description of GC goals.
-
- We want to build algorithm, which will keep routing cache
- at some equilibrium point, when number of aged off entries
- is kept approximately equal to newly generated ones.
-
- Current expiration strength is variable "expire".
- We try to adjust it dynamically, so that if networking
- is idle expires is large enough to keep enough of warm entries,
- and when load increases it reduces to limit cache size.
- */
-
-static int rt_garbage_collect(struct dst_ops *ops)
+static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr)
{
- static unsigned long expire = RT_GC_TIMEOUT;
- static unsigned long last_gc;
- static int rover;
- static int equilibrium;
- struct rtable *rth;
- struct rtable __rcu **rthp;
- unsigned long now = jiffies;
- int goal;
- int entries = dst_entries_get_fast(&ipv4_dst_ops);
-
- /*
- * Garbage collection is pretty expensive,
- * do not make it too frequently.
- */
-
- RT_CACHE_STAT_INC(gc_total);
-
- if (now - last_gc < ip_rt_gc_min_interval &&
- entries < ip_rt_max_size) {
- RT_CACHE_STAT_INC(gc_ignored);
- goto out;
- }
-
- entries = dst_entries_get_slow(&ipv4_dst_ops);
- /* Calculate number of entries, which we want to expire now. */
- goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
- if (goal <= 0) {
- if (equilibrium < ipv4_dst_ops.gc_thresh)
- equilibrium = ipv4_dst_ops.gc_thresh;
- goal = entries - equilibrium;
- if (goal > 0) {
- equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
- goal = entries - equilibrium;
- }
- } else {
- /* We are in dangerous area. Try to reduce cache really
- * aggressively.
- */
- goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
- equilibrium = entries - goal;
- }
-
- if (now - last_gc >= ip_rt_gc_min_interval)
- last_gc = now;
-
- if (goal <= 0) {
- equilibrium += goal;
- goto work_done;
- }
-
- do {
- int i, k;
-
- for (i = rt_hash_mask, k = rover; i >= 0; i--) {
- unsigned long tmo = expire;
-
- k = (k + 1) & rt_hash_mask;
- rthp = &rt_hash_table[k].chain;
- spin_lock_bh(rt_hash_lock_addr(k));
- while ((rth = rcu_dereference_protected(*rthp,
- lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
- if (!rt_is_expired(rth) &&
- !rt_may_expire(rth, tmo, expire)) {
- tmo >>= 1;
- rthp = &rth->dst.rt_next;
- continue;
- }
- *rthp = rth->dst.rt_next;
- rt_free(rth);
- goal--;
- }
- spin_unlock_bh(rt_hash_lock_addr(k));
- if (goal <= 0)
- break;
- }
- rover = k;
-
- if (goal <= 0)
- goto work_done;
-
- /* Goal is not achieved. We stop process if:
-
- - if expire reduced to zero. Otherwise, expire is halfed.
- - if table is not full.
- - if we are called from interrupt.
- - jiffies check is just fallback/debug loop breaker.
- We will not spin here for long time in any case.
- */
-
- RT_CACHE_STAT_INC(gc_goal_miss);
-
- if (expire == 0)
- break;
-
- expire >>= 1;
-
- if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
- goto out;
- } while (!in_softirq() && time_before_eq(jiffies, now));
-
- if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
- goto out;
- if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
- goto out;
- net_warn_ratelimited("dst cache overflow\n");
- RT_CACHE_STAT_INC(gc_dst_overflow);
- return 1;
-
-work_done:
- expire += ip_rt_gc_min_interval;
- if (expire > ip_rt_gc_timeout ||
- dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
- dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
- expire = ip_rt_gc_timeout;
-out: return 0;
-}
-
-/*
- * Returns number of entries in a hash chain that have different hash_inputs
- */
-static int slow_chain_length(const struct rtable *head)
-{
- int length = 0;
- const struct rtable *rth = head;
-
- while (rth) {
- length += has_noalias(head, rth);
- rth = rcu_dereference_protected(rth->dst.rt_next, 1);
- }
- return length >> FRACT_BITS;
-}
-
-static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr)
-{
- static const __be32 inaddr_any = 0;
struct net_device *dev = dst->dev;
const __be32 *pkey = daddr;
const struct rtable *rt;
struct neighbour *n;
rt = (const struct rtable *) dst;
-
- if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
- pkey = &inaddr_any;
- else if (rt->rt_gateway)
+ if (rt->rt_gateway)
pkey = (const __be32 *) &rt->rt_gateway;
+ else if (skb)
+ pkey = &ip_hdr(skb)->daddr;
n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
if (n)
@@ -1132,311 +495,221 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const vo
return neigh_create(&arp_tbl, pkey, dev);
}
-static int rt_bind_neighbour(struct rtable *rt)
+/*
+ * Peer allocation may fail only in serious out-of-memory conditions. However
+ * we still can generate some output.
+ * Random ID selection looks a bit dangerous because we have no chances to
+ * select ID being unique in a reasonable period of time.
+ * But broken packet identifier may be better than no packet at all.
+ */
+static void ip_select_fb_ident(struct iphdr *iph)
{
- struct neighbour *n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
- if (IS_ERR(n))
- return PTR_ERR(n);
- dst_set_neighbour(&rt->dst, n);
+ static DEFINE_SPINLOCK(ip_fb_id_lock);
+ static u32 ip_fallback_id;
+ u32 salt;
- return 0;
+ spin_lock_bh(&ip_fb_id_lock);
+ salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
+ iph->id = htons(salt & 0xFFFF);
+ ip_fallback_id = salt;
+ spin_unlock_bh(&ip_fb_id_lock);
}
-static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt,
- struct sk_buff *skb, int ifindex)
+void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
{
- struct rtable *rth, *cand;
- struct rtable __rcu **rthp, **candp;
- unsigned long now;
- u32 min_score;
- int chain_length;
- int attempts = !in_softirq();
-
-restart:
- chain_length = 0;
- min_score = ~(u32)0;
- cand = NULL;
- candp = NULL;
- now = jiffies;
-
- if (!rt_caching(dev_net(rt->dst.dev))) {
- /*
- * If we're not caching, just tell the caller we
- * were successful and don't touch the route. The
- * caller hold the sole reference to the cache entry, and
- * it will be released when the caller is done with it.
- * If we drop it here, the callers have no way to resolve routes
- * when we're not caching. Instead, just point *rp at rt, so
- * the caller gets a single use out of the route
- * Note that we do rt_free on this new route entry, so that
- * once its refcount hits zero, we are still able to reap it
- * (Thanks Alexey)
- * Note: To avoid expensive rcu stuff for this uncached dst,
- * we set DST_NOCACHE so that dst_release() can free dst without
- * waiting a grace period.
- */
-
- rt->dst.flags |= DST_NOCACHE;
- if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
- int err = rt_bind_neighbour(rt);
- if (err) {
- net_warn_ratelimited("Neighbour table failure & not caching routes\n");
- ip_rt_put(rt);
- return ERR_PTR(err);
- }
- }
+ struct net *net = dev_net(dst->dev);
+ struct inet_peer *peer;
- goto skip_hashing;
+ peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
+ if (peer) {
+ iph->id = htons(inet_getid(peer, more));
+ inet_putpeer(peer);
+ return;
}
- rthp = &rt_hash_table[hash].chain;
-
- spin_lock_bh(rt_hash_lock_addr(hash));
- while ((rth = rcu_dereference_protected(*rthp,
- lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
- if (rt_is_expired(rth)) {
- *rthp = rth->dst.rt_next;
- rt_free(rth);
- continue;
- }
- if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
- /* Put it first */
- *rthp = rth->dst.rt_next;
- /*
- * Since lookup is lockfree, the deletion
- * must be visible to another weakly ordered CPU before
- * the insertion at the start of the hash chain.
- */
- rcu_assign_pointer(rth->dst.rt_next,
- rt_hash_table[hash].chain);
- /*
- * Since lookup is lockfree, the update writes
- * must be ordered for consistency on SMP.
- */
- rcu_assign_pointer(rt_hash_table[hash].chain, rth);
-
- dst_use(&rth->dst, now);
- spin_unlock_bh(rt_hash_lock_addr(hash));
-
- rt_drop(rt);
- if (skb)
- skb_dst_set(skb, &rth->dst);
- return rth;
- }
-
- if (!atomic_read(&rth->dst.__refcnt)) {
- u32 score = rt_score(rth);
-
- if (score <= min_score) {
- cand = rth;
- candp = rthp;
- min_score = score;
- }
- }
-
- chain_length++;
-
- rthp = &rth->dst.rt_next;
- }
+ ip_select_fb_ident(iph);
+}
+EXPORT_SYMBOL(__ip_select_ident);
- if (cand) {
- /* ip_rt_gc_elasticity used to be average length of chain
- * length, when exceeded gc becomes really aggressive.
- *
- * The second limit is less certain. At the moment it allows
- * only 2 entries per bucket. We will see.
- */
- if (chain_length > ip_rt_gc_elasticity) {
- *candp = cand->dst.rt_next;
- rt_free(cand);
- }
- } else {
- if (chain_length > rt_chain_length_max &&
- slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
- struct net *net = dev_net(rt->dst.dev);
- int num = ++net->ipv4.current_rt_cache_rebuild_count;
- if (!rt_caching(net)) {
- pr_warn("%s: %d rebuilds is over limit, route caching disabled\n",
- rt->dst.dev->name, num);
- }
- rt_emergency_hash_rebuild(net);
- spin_unlock_bh(rt_hash_lock_addr(hash));
+static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+ const struct iphdr *iph,
+ int oif, u8 tos,
+ u8 prot, u32 mark, int flow_flags)
+{
+ if (sk) {
+ const struct inet_sock *inet = inet_sk(sk);
- hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
- ifindex, rt_genid(net));
- goto restart;
- }
+ oif = sk->sk_bound_dev_if;
+ mark = sk->sk_mark;
+ tos = RT_CONN_FLAGS(sk);
+ prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
}
+ flowi4_init_output(fl4, oif, mark, tos,
+ RT_SCOPE_UNIVERSE, prot,
+ flow_flags,
+ iph->daddr, iph->saddr, 0, 0);
+}
- /* Try to bind route to arp only if it is output
- route or unicast forwarding path.
- */
- if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
- int err = rt_bind_neighbour(rt);
- if (err) {
- spin_unlock_bh(rt_hash_lock_addr(hash));
-
- if (err != -ENOBUFS) {
- rt_drop(rt);
- return ERR_PTR(err);
- }
-
- /* Neighbour tables are full and nothing
- can be released. Try to shrink route cache,
- it is most likely it holds some neighbour records.
- */
- if (attempts-- > 0) {
- int saved_elasticity = ip_rt_gc_elasticity;
- int saved_int = ip_rt_gc_min_interval;
- ip_rt_gc_elasticity = 1;
- ip_rt_gc_min_interval = 0;
- rt_garbage_collect(&ipv4_dst_ops);
- ip_rt_gc_min_interval = saved_int;
- ip_rt_gc_elasticity = saved_elasticity;
- goto restart;
- }
-
- net_warn_ratelimited("Neighbour table overflow\n");
- rt_drop(rt);
- return ERR_PTR(-ENOBUFS);
- }
- }
+static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
+ const struct sock *sk)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ int oif = skb->dev->ifindex;
+ u8 tos = RT_TOS(iph->tos);
+ u8 prot = iph->protocol;
+ u32 mark = skb->mark;
- rt->dst.rt_next = rt_hash_table[hash].chain;
+ __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
+}
- /*
- * Since lookup is lockfree, we must make sure
- * previous writes to rt are committed to memory
- * before making rt visible to other CPUS.
- */
- rcu_assign_pointer(rt_hash_table[hash].chain, rt);
+static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
+{
+ const struct inet_sock *inet = inet_sk(sk);
+ const struct ip_options_rcu *inet_opt;
+ __be32 daddr = inet->inet_daddr;
- spin_unlock_bh(rt_hash_lock_addr(hash));
+ rcu_read_lock();
+ inet_opt = rcu_dereference(inet->inet_opt);
+ if (inet_opt && inet_opt->opt.srr)
+ daddr = inet_opt->opt.faddr;
+ flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
+ RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
+ inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
+ inet_sk_flowi_flags(sk),
+ daddr, inet->inet_saddr, 0, 0);
+ rcu_read_unlock();
+}
-skip_hashing:
+static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+ const struct sk_buff *skb)
+{
if (skb)
- skb_dst_set(skb, &rt->dst);
- return rt;
+ build_skb_flow_key(fl4, skb, sk);
+ else
+ build_sk_flow_key(fl4, sk);
}
-static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
-
-static u32 rt_peer_genid(void)
+static inline void rt_free(struct rtable *rt)
{
- return atomic_read(&__rt_peer_genid);
+ call_rcu(&rt->dst.rcu_head, dst_rcu_free);
}
-void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
-{
- struct inet_peer *peer;
+static DEFINE_SPINLOCK(fnhe_lock);
- peer = inet_getpeer_v4(daddr, create);
+static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
+{
+ struct fib_nh_exception *fnhe, *oldest;
+ struct rtable *orig;
- if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
- inet_putpeer(peer);
- else
- rt->rt_peer_genid = rt_peer_genid();
+ oldest = rcu_dereference(hash->chain);
+ for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
+ fnhe = rcu_dereference(fnhe->fnhe_next)) {
+ if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
+ oldest = fnhe;
+ }
+ orig = rcu_dereference(oldest->fnhe_rth);
+ if (orig) {
+ RCU_INIT_POINTER(oldest->fnhe_rth, NULL);
+ rt_free(orig);
+ }
+ return oldest;
}
-/*
- * Peer allocation may fail only in serious out-of-memory conditions. However
- * we still can generate some output.
- * Random ID selection looks a bit dangerous because we have no chances to
- * select ID being unique in a reasonable period of time.
- * But broken packet identifier may be better than no packet at all.
- */
-static void ip_select_fb_ident(struct iphdr *iph)
+static inline u32 fnhe_hashfun(__be32 daddr)
{
- static DEFINE_SPINLOCK(ip_fb_id_lock);
- static u32 ip_fallback_id;
- u32 salt;
+ u32 hval;
- spin_lock_bh(&ip_fb_id_lock);
- salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
- iph->id = htons(salt & 0xFFFF);
- ip_fallback_id = salt;
- spin_unlock_bh(&ip_fb_id_lock);
+ hval = (__force u32) daddr;
+ hval ^= (hval >> 11) ^ (hval >> 22);
+
+ return hval & (FNHE_HASH_SIZE - 1);
}
-void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
+static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
+ u32 pmtu, unsigned long expires)
{
- struct rtable *rt = (struct rtable *) dst;
+ struct fnhe_hash_bucket *hash;
+ struct fib_nh_exception *fnhe;
+ int depth;
+ u32 hval = fnhe_hashfun(daddr);
- if (rt && !(rt->dst.flags & DST_NOPEER)) {
- if (rt->peer == NULL)
- rt_bind_peer(rt, rt->rt_dst, 1);
+ spin_lock_bh(&fnhe_lock);
- /* If peer is attached to destination, it is never detached,
- so that we need not to grab a lock to dereference it.
- */
- if (rt->peer) {
- iph->id = htons(inet_getid(rt->peer, more));
- return;
- }
- } else if (!rt)
- pr_debug("rt_bind_peer(0) @%p\n", __builtin_return_address(0));
+ hash = nh->nh_exceptions;
+ if (!hash) {
+ hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
+ if (!hash)
+ goto out_unlock;
+ nh->nh_exceptions = hash;
+ }
- ip_select_fb_ident(iph);
-}
-EXPORT_SYMBOL(__ip_select_ident);
+ hash += hval;
-static void rt_del(unsigned int hash, struct rtable *rt)
-{
- struct rtable __rcu **rthp;
- struct rtable *aux;
+ depth = 0;
+ for (fnhe = rcu_dereference(hash->chain); fnhe;
+ fnhe = rcu_dereference(fnhe->fnhe_next)) {
+ if (fnhe->fnhe_daddr == daddr)
+ break;
+ depth++;
+ }
- rthp = &rt_hash_table[hash].chain;
- spin_lock_bh(rt_hash_lock_addr(hash));
- ip_rt_put(rt);
- while ((aux = rcu_dereference_protected(*rthp,
- lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
- if (aux == rt || rt_is_expired(aux)) {
- *rthp = aux->dst.rt_next;
- rt_free(aux);
- continue;
+ if (fnhe) {
+ if (gw)
+ fnhe->fnhe_gw = gw;
+ if (pmtu) {
+ fnhe->fnhe_pmtu = pmtu;
+ fnhe->fnhe_expires = expires;
+ }
+ } else {
+ if (depth > FNHE_RECLAIM_DEPTH)
+ fnhe = fnhe_oldest(hash);
+ else {
+ fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
+ if (!fnhe)
+ goto out_unlock;
+
+ fnhe->fnhe_next = hash->chain;
+ rcu_assign_pointer(hash->chain, fnhe);
}
- rthp = &aux->dst.rt_next;
+ fnhe->fnhe_daddr = daddr;
+ fnhe->fnhe_gw = gw;
+ fnhe->fnhe_pmtu = pmtu;
+ fnhe->fnhe_expires = expires;
}
- spin_unlock_bh(rt_hash_lock_addr(hash));
+
+ fnhe->fnhe_stamp = jiffies;
+
+out_unlock:
+ spin_unlock_bh(&fnhe_lock);
+ return;
}
-static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
+ bool kill_route)
{
- struct rtable *rt = (struct rtable *) dst;
- __be32 orig_gw = rt->rt_gateway;
- struct neighbour *n, *old_n;
-
- dst_confirm(&rt->dst);
+ __be32 new_gw = icmp_hdr(skb)->un.gateway;
+ __be32 old_gw = ip_hdr(skb)->saddr;
+ struct net_device *dev = skb->dev;
+ struct in_device *in_dev;
+ struct fib_result res;
+ struct neighbour *n;
+ struct net *net;
- rt->rt_gateway = peer->redirect_learned.a4;
+ switch (icmp_hdr(skb)->code & 7) {
+ case ICMP_REDIR_NET:
+ case ICMP_REDIR_NETTOS:
+ case ICMP_REDIR_HOST:
+ case ICMP_REDIR_HOSTTOS:
+ break;
- n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
- if (IS_ERR(n)) {
- rt->rt_gateway = orig_gw;
+ default:
return;
}
- old_n = xchg(&rt->dst._neighbour, n);
- if (old_n)
- neigh_release(old_n);
- if (!(n->nud_state & NUD_VALID)) {
- neigh_event_send(n, NULL);
- } else {
- rt->rt_flags |= RTCF_REDIRECTED;
- call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
- }
-}
-/* called in rcu_read_lock() section */
-void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
- __be32 saddr, struct net_device *dev)
-{
- int s, i;
- struct in_device *in_dev = __in_dev_get_rcu(dev);
- __be32 skeys[2] = { saddr, 0 };
- int ikeys[2] = { dev->ifindex, 0 };
- struct inet_peer *peer;
- struct net *net;
+ if (rt->rt_gateway != old_gw)
+ return;
+ in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
return;
@@ -1456,72 +729,50 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
goto reject_redirect;
}
- for (s = 0; s < 2; s++) {
- for (i = 0; i < 2; i++) {
- unsigned int hash;
- struct rtable __rcu **rthp;
- struct rtable *rt;
-
- hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net));
-
- rthp = &rt_hash_table[hash].chain;
-
- while ((rt = rcu_dereference(*rthp)) != NULL) {
- rthp = &rt->dst.rt_next;
-
- if (rt->rt_key_dst != daddr ||
- rt->rt_key_src != skeys[s] ||
- rt->rt_oif != ikeys[i] ||
- rt_is_input_route(rt) ||
- rt_is_expired(rt) ||
- !net_eq(dev_net(rt->dst.dev), net) ||
- rt->dst.error ||
- rt->dst.dev != dev ||
- rt->rt_gateway != old_gw)
- continue;
-
- if (!rt->peer)
- rt_bind_peer(rt, rt->rt_dst, 1);
-
- peer = rt->peer;
- if (peer) {
- if (peer->redirect_learned.a4 != new_gw) {
- peer->redirect_learned.a4 = new_gw;
- atomic_inc(&__rt_peer_genid);
- }
- check_peer_redir(&rt->dst, peer);
- }
+ n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
+ if (n) {
+ if (!(n->nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
+ } else {
+ if (fib_lookup(net, fl4, &res) == 0) {
+ struct fib_nh *nh = &FIB_RES_NH(res);
+
+ update_or_create_fnhe(nh, fl4->daddr, new_gw,
+ 0, 0);
}
+ if (kill_route)
+ rt->dst.obsolete = DST_OBSOLETE_KILL;
+ call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
}
+ neigh_release(n);
}
return;
reject_redirect:
#ifdef CONFIG_IP_ROUTE_VERBOSE
- if (IN_DEV_LOG_MARTIANS(in_dev))
+ if (IN_DEV_LOG_MARTIANS(in_dev)) {
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ __be32 daddr = iph->daddr;
+ __be32 saddr = iph->saddr;
+
net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
" Advised path = %pI4 -> %pI4\n",
&old_gw, dev->name, &new_gw,
&saddr, &daddr);
+ }
#endif
;
}
-static bool peer_pmtu_expired(struct inet_peer *peer)
+static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
{
- unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
+ struct rtable *rt;
+ struct flowi4 fl4;
- return orig &&
- time_after_eq(jiffies, orig) &&
- cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
-}
+ rt = (struct rtable *) dst;
-static bool peer_pmtu_cleaned(struct inet_peer *peer)
-{
- unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
-
- return orig &&
- cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
+ ip_rt_build_flow_key(&fl4, sk, skb);
+ __ip_do_redirect(rt, skb, &fl4, true);
}
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
@@ -1533,14 +784,10 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
if (dst->obsolete > 0) {
ip_rt_put(rt);
ret = NULL;
- } else if (rt->rt_flags & RTCF_REDIRECTED) {
- unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
- rt->rt_oif,
- rt_genid(dev_net(dst->dev)));
- rt_del(hash, rt);
+ } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
+ rt->dst.expires) {
+ ip_rt_put(rt);
ret = NULL;
- } else if (rt->peer && peer_pmtu_expired(rt->peer)) {
- dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
}
}
return ret;
@@ -1567,6 +814,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
struct rtable *rt = skb_rtable(skb);
struct in_device *in_dev;
struct inet_peer *peer;
+ struct net *net;
int log_martians;
rcu_read_lock();
@@ -1578,9 +826,8 @@ void ip_rt_send_redirect(struct sk_buff *skb)
log_martians = IN_DEV_LOG_MARTIANS(in_dev);
rcu_read_unlock();
- if (!rt->peer)
- rt_bind_peer(rt, rt->rt_dst, 1);
- peer = rt->peer;
+ net = dev_net(rt->dst.dev);
+ peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
if (!peer) {
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
return;
@@ -1597,7 +844,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
*/
if (peer->rate_tokens >= ip_rt_redirect_number) {
peer->rate_last = jiffies;
- return;
+ goto out_put_peer;
}
/* Check for load limit; set rate_last to the latest sent
@@ -1614,20 +861,38 @@ void ip_rt_send_redirect(struct sk_buff *skb)
if (log_martians &&
peer->rate_tokens == ip_rt_redirect_number)
net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
- &ip_hdr(skb)->saddr, rt->rt_iif,
- &rt->rt_dst, &rt->rt_gateway);
+ &ip_hdr(skb)->saddr, inet_iif(skb),
+ &ip_hdr(skb)->daddr, &rt->rt_gateway);
#endif
}
+out_put_peer:
+ inet_putpeer(peer);
}
static int ip_error(struct sk_buff *skb)
{
+ struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
struct rtable *rt = skb_rtable(skb);
struct inet_peer *peer;
unsigned long now;
+ struct net *net;
bool send;
int code;
+ net = dev_net(rt->dst.dev);
+ if (!IN_DEV_FORWARD(in_dev)) {
+ switch (rt->dst.error) {
+ case EHOSTUNREACH:
+ IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
+ break;
+
+ case ENETUNREACH:
+ IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
+ break;
+ }
+ goto out;
+ }
+
switch (rt->dst.error) {
case EINVAL:
default:
@@ -1637,17 +902,14 @@ static int ip_error(struct sk_buff *skb)
break;
case ENETUNREACH:
code = ICMP_NET_UNREACH;
- IP_INC_STATS_BH(dev_net(rt->dst.dev),
- IPSTATS_MIB_INNOROUTES);
+ IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
break;
case EACCES:
code = ICMP_PKT_FILTERED;
break;
}
- if (!rt->peer)
- rt_bind_peer(rt, rt->rt_dst, 1);
- peer = rt->peer;
+ peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
send = true;
if (peer) {
@@ -1660,6 +922,7 @@ static int ip_error(struct sk_buff *skb)
peer->rate_tokens -= ip_rt_error_cost;
else
send = false;
+ inet_putpeer(peer);
}
if (send)
icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
@@ -1668,163 +931,120 @@ out: kfree_skb(skb);
return 0;
}
-/*
- * The last two values are not from the RFC but
- * are needed for AMPRnet AX.25 paths.
- */
+static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
+{
+ struct fib_result res;
-static const unsigned short mtu_plateau[] =
-{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
+ if (mtu < ip_rt_min_pmtu)
+ mtu = ip_rt_min_pmtu;
-static inline unsigned short guess_mtu(unsigned short old_mtu)
-{
- int i;
+ if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
+ struct fib_nh *nh = &FIB_RES_NH(res);
- for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
- if (old_mtu > mtu_plateau[i])
- return mtu_plateau[i];
- return 68;
+ update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
+ jiffies + ip_rt_mtu_expires);
+ }
+ return mtu;
}
-unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
- unsigned short new_mtu,
- struct net_device *dev)
+static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
{
- unsigned short old_mtu = ntohs(iph->tot_len);
- unsigned short est_mtu = 0;
- struct inet_peer *peer;
-
- peer = inet_getpeer_v4(iph->daddr, 1);
- if (peer) {
- unsigned short mtu = new_mtu;
-
- if (new_mtu < 68 || new_mtu >= old_mtu) {
- /* BSD 4.2 derived systems incorrectly adjust
- * tot_len by the IP header length, and report
- * a zero MTU in the ICMP message.
- */
- if (mtu == 0 &&
- old_mtu >= 68 + (iph->ihl << 2))
- old_mtu -= iph->ihl << 2;
- mtu = guess_mtu(old_mtu);
- }
-
- if (mtu < ip_rt_min_pmtu)
- mtu = ip_rt_min_pmtu;
- if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
- unsigned long pmtu_expires;
-
- pmtu_expires = jiffies + ip_rt_mtu_expires;
- if (!pmtu_expires)
- pmtu_expires = 1UL;
+ struct rtable *rt = (struct rtable *) dst;
+ struct flowi4 fl4;
- est_mtu = mtu;
- peer->pmtu_learned = mtu;
- peer->pmtu_expires = pmtu_expires;
- atomic_inc(&__rt_peer_genid);
- }
+ ip_rt_build_flow_key(&fl4, sk, skb);
+ mtu = __ip_rt_update_pmtu(rt, &fl4, mtu);
- inet_putpeer(peer);
+ if (!rt->rt_pmtu) {
+ dst->obsolete = DST_OBSOLETE_KILL;
+ } else {
+ rt->rt_pmtu = mtu;
+ dst_set_expires(&rt->dst, ip_rt_mtu_expires);
}
- return est_mtu ? : new_mtu;
}
-static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
+void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
+ int oif, u32 mark, u8 protocol, int flow_flags)
{
- unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ struct flowi4 fl4;
+ struct rtable *rt;
- if (!expires)
- return;
- if (time_before(jiffies, expires)) {
- u32 orig_dst_mtu = dst_mtu(dst);
- if (peer->pmtu_learned < orig_dst_mtu) {
- if (!peer->pmtu_orig)
- peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
- dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
- }
- } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
- dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
+ __build_flow_key(&fl4, NULL, iph, oif,
+ RT_TOS(iph->tos), protocol, mark, flow_flags);
+ rt = __ip_route_output_key(net, &fl4);
+ if (!IS_ERR(rt)) {
+ __ip_rt_update_pmtu(rt, &fl4, mtu);
+ ip_rt_put(rt);
+ }
}
+EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
-static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
+void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
{
- struct rtable *rt = (struct rtable *) dst;
- struct inet_peer *peer;
-
- dst_confirm(dst);
-
- if (!rt->peer)
- rt_bind_peer(rt, rt->rt_dst, 1);
- peer = rt->peer;
- if (peer) {
- unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
-
- if (mtu < ip_rt_min_pmtu)
- mtu = ip_rt_min_pmtu;
- if (!pmtu_expires || mtu < peer->pmtu_learned) {
-
- pmtu_expires = jiffies + ip_rt_mtu_expires;
- if (!pmtu_expires)
- pmtu_expires = 1UL;
-
- peer->pmtu_learned = mtu;
- peer->pmtu_expires = pmtu_expires;
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ struct flowi4 fl4;
+ struct rtable *rt;
- atomic_inc(&__rt_peer_genid);
- rt->rt_peer_genid = rt_peer_genid();
- }
- check_peer_pmtu(dst, peer);
+ __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+ rt = __ip_route_output_key(sock_net(sk), &fl4);
+ if (!IS_ERR(rt)) {
+ __ip_rt_update_pmtu(rt, &fl4, mtu);
+ ip_rt_put(rt);
}
}
+EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
-
-static void ipv4_validate_peer(struct rtable *rt)
+void ipv4_redirect(struct sk_buff *skb, struct net *net,
+ int oif, u32 mark, u8 protocol, int flow_flags)
{
- if (rt->rt_peer_genid != rt_peer_genid()) {
- struct inet_peer *peer;
-
- if (!rt->peer)
- rt_bind_peer(rt, rt->rt_dst, 0);
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ struct flowi4 fl4;
+ struct rtable *rt;
- peer = rt->peer;
- if (peer) {
- check_peer_pmtu(&rt->dst, peer);
+ __build_flow_key(&fl4, NULL, iph, oif,
+ RT_TOS(iph->tos), protocol, mark, flow_flags);
+ rt = __ip_route_output_key(net, &fl4);
+ if (!IS_ERR(rt)) {
+ __ip_do_redirect(rt, skb, &fl4, false);
+ ip_rt_put(rt);
+ }
+}
+EXPORT_SYMBOL_GPL(ipv4_redirect);
- if (peer->redirect_learned.a4 &&
- peer->redirect_learned.a4 != rt->rt_gateway)
- check_peer_redir(&rt->dst, peer);
- }
+void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
+{
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ struct flowi4 fl4;
+ struct rtable *rt;
- rt->rt_peer_genid = rt_peer_genid();
+ __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+ rt = __ip_route_output_key(sock_net(sk), &fl4);
+ if (!IS_ERR(rt)) {
+ __ip_do_redirect(rt, skb, &fl4, false);
+ ip_rt_put(rt);
}
}
+EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
{
struct rtable *rt = (struct rtable *) dst;
- if (rt_is_expired(rt))
+ /* All IPV4 dsts are created with ->obsolete set to the value
+ * DST_OBSOLETE_FORCE_CHK which forces validation calls down
+ * into this function always.
+ *
+ * When a PMTU/redirect information update invalidates a
+ * route, this is indicated by setting obsolete to
+ * DST_OBSOLETE_KILL.
+ */
+ if (dst->obsolete == DST_OBSOLETE_KILL || rt_is_expired(rt))
return NULL;
- ipv4_validate_peer(rt);
return dst;
}
-static void ipv4_dst_destroy(struct dst_entry *dst)
-{
- struct rtable *rt = (struct rtable *) dst;
- struct inet_peer *peer = rt->peer;
-
- if (rt->fi) {
- fib_info_put(rt->fi);
- rt->fi = NULL;
- }
- if (peer) {
- rt->peer = NULL;
- inet_putpeer(peer);
- }
-}
-
-
static void ipv4_link_failure(struct sk_buff *skb)
{
struct rtable *rt;
@@ -1832,8 +1052,8 @@ static void ipv4_link_failure(struct sk_buff *skb)
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
rt = skb_rtable(skb);
- if (rt && rt->peer && peer_pmtu_cleaned(rt->peer))
- dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
+ if (rt)
+ dst_set_expires(&rt->dst, 0);
}
static int ip_rt_bug(struct sk_buff *skb)
@@ -1880,8 +1100,9 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
else
- src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
- RT_SCOPE_UNIVERSE);
+ src = inet_select_addr(rt->dst.dev,
+ rt_nexthop(rt, iph->daddr),
+ RT_SCOPE_UNIVERSE);
rcu_read_unlock();
}
memcpy(addr, &src, 4);
@@ -1913,7 +1134,13 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
static unsigned int ipv4_mtu(const struct dst_entry *dst)
{
const struct rtable *rt = (const struct rtable *) dst;
- unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+ unsigned int mtu = rt->rt_pmtu;
+
+ if (mtu && time_after_eq(jiffies, rt->dst.expires))
+ mtu = 0;
+
+ if (!mtu)
+ mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu && rt_is_output_route(rt))
return mtu;
@@ -1921,8 +1148,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
mtu = dst->dev->mtu;
if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
-
- if (rt->rt_gateway != rt->rt_dst && mtu > 576)
+ if (rt->rt_gateway && mtu > 576)
mtu = 576;
}
@@ -1932,76 +1158,184 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
return mtu;
}
-static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
- struct fib_info *fi)
+static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
{
- struct inet_peer *peer;
- int create = 0;
+ struct fnhe_hash_bucket *hash = nh->nh_exceptions;
+ struct fib_nh_exception *fnhe;
+ u32 hval;
- /* If a peer entry exists for this destination, we must hook
- * it up in order to get at cached metrics.
- */
- if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
- create = 1;
+ if (!hash)
+ return NULL;
- rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
- if (peer) {
- rt->rt_peer_genid = rt_peer_genid();
- if (inet_metrics_new(peer))
- memcpy(peer->metrics, fi->fib_metrics,
- sizeof(u32) * RTAX_MAX);
- dst_init_metrics(&rt->dst, peer->metrics, false);
+ hval = fnhe_hashfun(daddr);
- check_peer_pmtu(&rt->dst, peer);
+ for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
+ fnhe = rcu_dereference(fnhe->fnhe_next)) {
+ if (fnhe->fnhe_daddr == daddr)
+ return fnhe;
+ }
+ return NULL;
+}
+
+static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
+ __be32 daddr)
+{
+ bool ret = false;
+
+ spin_lock_bh(&fnhe_lock);
- if (peer->redirect_learned.a4 &&
- peer->redirect_learned.a4 != rt->rt_gateway) {
- rt->rt_gateway = peer->redirect_learned.a4;
+ if (daddr == fnhe->fnhe_daddr) {
+ struct rtable *orig;
+
+ if (fnhe->fnhe_pmtu) {
+ unsigned long expires = fnhe->fnhe_expires;
+ unsigned long diff = expires - jiffies;
+
+ if (time_before(jiffies, expires)) {
+ rt->rt_pmtu = fnhe->fnhe_pmtu;
+ dst_set_expires(&rt->dst, diff);
+ }
+ }
+ if (fnhe->fnhe_gw) {
rt->rt_flags |= RTCF_REDIRECTED;
+ rt->rt_gateway = fnhe->fnhe_gw;
}
+
+ orig = rcu_dereference(fnhe->fnhe_rth);
+ rcu_assign_pointer(fnhe->fnhe_rth, rt);
+ if (orig)
+ rt_free(orig);
+
+ fnhe->fnhe_stamp = jiffies;
+ ret = true;
+ } else {
+ /* Routes we intend to cache in nexthop exception have
+ * the DST_NOCACHE bit clear. However, if we are
+ * unsuccessful at storing this route into the cache
+ * we really need to set it.
+ */
+ rt->dst.flags |= DST_NOCACHE;
+ }
+ spin_unlock_bh(&fnhe_lock);
+
+ return ret;
+}
+
+static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
+{
+ struct rtable *orig, *prev, **p;
+ bool ret = true;
+
+ if (rt_is_input_route(rt)) {
+ p = (struct rtable **)&nh->nh_rth_input;
+ } else {
+ if (!nh->nh_pcpu_rth_output)
+ goto nocache;
+ p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output);
+ }
+ orig = *p;
+
+ prev = cmpxchg(p, orig, rt);
+ if (prev == orig) {
+ if (orig)
+ rt_free(orig);
} else {
- if (fi->fib_metrics != (u32 *) dst_default_metrics) {
- rt->fi = fi;
- atomic_inc(&fi->fib_clntref);
+ /* Routes we intend to cache in the FIB nexthop have
+ * the DST_NOCACHE bit clear. However, if we are
+ * unsuccessful at storing this route into the cache
+ * we really need to set it.
+ */
+nocache:
+ rt->dst.flags |= DST_NOCACHE;
+ ret = false;
+ }
+
+ return ret;
+}
+
+static DEFINE_SPINLOCK(rt_uncached_lock);
+static LIST_HEAD(rt_uncached_list);
+
+static void rt_add_uncached_list(struct rtable *rt)
+{
+ spin_lock_bh(&rt_uncached_lock);
+ list_add_tail(&rt->rt_uncached, &rt_uncached_list);
+ spin_unlock_bh(&rt_uncached_lock);
+}
+
+static void ipv4_dst_destroy(struct dst_entry *dst)
+{
+ struct rtable *rt = (struct rtable *) dst;
+
+ if (dst->flags & DST_NOCACHE) {
+ spin_lock_bh(&rt_uncached_lock);
+ list_del(&rt->rt_uncached);
+ spin_unlock_bh(&rt_uncached_lock);
+ }
+}
+
+void rt_flush_dev(struct net_device *dev)
+{
+ if (!list_empty(&rt_uncached_list)) {
+ struct net *net = dev_net(dev);
+ struct rtable *rt;
+
+ spin_lock_bh(&rt_uncached_lock);
+ list_for_each_entry(rt, &rt_uncached_list, rt_uncached) {
+ if (rt->dst.dev != dev)
+ continue;
+ rt->dst.dev = net->loopback_dev;
+ dev_hold(rt->dst.dev);
+ dev_put(dev);
}
- dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+ spin_unlock_bh(&rt_uncached_lock);
}
}
-static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
+static bool rt_cache_valid(const struct rtable *rt)
+{
+ return rt &&
+ rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
+ !rt_is_expired(rt);
+}
+
+static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
const struct fib_result *res,
+ struct fib_nh_exception *fnhe,
struct fib_info *fi, u16 type, u32 itag)
{
- struct dst_entry *dst = &rt->dst;
+ bool cached = false;
if (fi) {
- if (FIB_RES_GW(*res) &&
- FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
- rt->rt_gateway = FIB_RES_GW(*res);
- rt_init_metrics(rt, fl4, fi);
+ struct fib_nh *nh = &FIB_RES_NH(*res);
+
+ if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK)
+ rt->rt_gateway = nh->nh_gw;
+ dst_init_metrics(&rt->dst, fi->fib_metrics, true);
#ifdef CONFIG_IP_ROUTE_CLASSID
- dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
+ rt->dst.tclassid = nh->nh_tclassid;
#endif
+ if (unlikely(fnhe))
+ cached = rt_bind_exception(rt, fnhe, daddr);
+ else if (!(rt->dst.flags & DST_NOCACHE))
+ cached = rt_cache_route(nh, rt);
}
-
- if (dst_mtu(dst) > IP_MAX_MTU)
- dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
- if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
- dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
+ if (unlikely(!cached))
+ rt_add_uncached_list(rt);
#ifdef CONFIG_IP_ROUTE_CLASSID
#ifdef CONFIG_IP_MULTIPLE_TABLES
- set_class_tag(rt, fib_rules_tclass(res));
+ set_class_tag(rt, res->tclassid);
#endif
set_class_tag(rt, itag);
#endif
}
static struct rtable *rt_dst_alloc(struct net_device *dev,
- bool nopolicy, bool noxfrm)
+ bool nopolicy, bool noxfrm, bool will_cache)
{
- return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
- DST_HOST |
+ return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
+ (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
(nopolicy ? DST_NOPOLICY : 0) |
(noxfrm ? DST_NOXFRM : 0));
}
@@ -2010,9 +1344,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev, int our)
{
- unsigned int hash;
struct rtable *rth;
- __be32 spec_dst;
struct in_device *in_dev = __in_dev_get_rcu(dev);
u32 itag = 0;
int err;
@@ -2023,21 +1355,24 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
return -EINVAL;
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
- ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
+ skb->protocol != htons(ETH_P_IP))
goto e_inval;
+ if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
+ if (ipv4_is_loopback(saddr))
+ goto e_inval;
+
if (ipv4_is_zeronet(saddr)) {
if (!ipv4_is_local_multicast(daddr))
goto e_inval;
- spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
} else {
- err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
- &itag);
+ err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
+ in_dev, &itag);
if (err < 0)
goto e_err;
}
rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
- IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
+ IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
if (!rth)
goto e_nobufs;
@@ -2046,23 +1381,14 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
#endif
rth->dst.output = ip_rt_bug;
- rth->rt_key_dst = daddr;
- rth->rt_key_src = saddr;
rth->rt_genid = rt_genid(dev_net(dev));
rth->rt_flags = RTCF_MULTICAST;
rth->rt_type = RTN_MULTICAST;
- rth->rt_key_tos = tos;
- rth->rt_dst = daddr;
- rth->rt_src = saddr;
- rth->rt_route_iif = dev->ifindex;
- rth->rt_iif = dev->ifindex;
- rth->rt_oif = 0;
- rth->rt_mark = skb->mark;
- rth->rt_gateway = daddr;
- rth->rt_spec_dst= spec_dst;
- rth->rt_peer_genid = 0;
- rth->peer = NULL;
- rth->fi = NULL;
+ rth->rt_is_input= 1;
+ rth->rt_iif = 0;
+ rth->rt_pmtu = 0;
+ rth->rt_gateway = 0;
+ INIT_LIST_HEAD(&rth->rt_uncached);
if (our) {
rth->dst.input= ip_local_deliver;
rth->rt_flags |= RTCF_LOCAL;
@@ -2074,9 +1400,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
#endif
RT_CACHE_STAT_INC(in_slow_mc);
- hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
- rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
- return IS_ERR(rth) ? PTR_ERR(rth) : 0;
+ skb_dst_set(skb, &rth->dst);
+ return 0;
e_nobufs:
return -ENOBUFS;
@@ -2116,14 +1441,13 @@ static void ip_handle_martian_source(struct net_device *dev,
static int __mkroute_input(struct sk_buff *skb,
const struct fib_result *res,
struct in_device *in_dev,
- __be32 daddr, __be32 saddr, u32 tos,
- struct rtable **result)
+ __be32 daddr, __be32 saddr, u32 tos)
{
struct rtable *rth;
int err;
struct in_device *out_dev;
unsigned int flags = 0;
- __be32 spec_dst;
+ bool do_cache;
u32 itag;
/* get a working reference to the output device */
@@ -2135,7 +1459,7 @@ static int __mkroute_input(struct sk_buff *skb,
err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
- in_dev->dev, &spec_dst, &itag);
+ in_dev->dev, in_dev, &itag);
if (err < 0) {
ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
saddr);
@@ -2143,9 +1467,6 @@ static int __mkroute_input(struct sk_buff *skb,
goto cleanup;
}
- if (err)
- flags |= RTCF_DIRECTSRC;
-
if (out_dev == in_dev && err &&
(IN_DEV_SHARED_MEDIA(out_dev) ||
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
@@ -2166,38 +1487,41 @@ static int __mkroute_input(struct sk_buff *skb,
}
}
+ do_cache = false;
+ if (res->fi) {
+ if (!itag) {
+ rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+ if (rt_cache_valid(rth)) {
+ skb_dst_set_noref(skb, &rth->dst);
+ goto out;
+ }
+ do_cache = true;
+ }
+ }
+
rth = rt_dst_alloc(out_dev->dev,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
- IN_DEV_CONF_GET(out_dev, NOXFRM));
+ IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
if (!rth) {
err = -ENOBUFS;
goto cleanup;
}
- rth->rt_key_dst = daddr;
- rth->rt_key_src = saddr;
rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
rth->rt_flags = flags;
rth->rt_type = res->type;
- rth->rt_key_tos = tos;
- rth->rt_dst = daddr;
- rth->rt_src = saddr;
- rth->rt_route_iif = in_dev->dev->ifindex;
- rth->rt_iif = in_dev->dev->ifindex;
- rth->rt_oif = 0;
- rth->rt_mark = skb->mark;
- rth->rt_gateway = daddr;
- rth->rt_spec_dst= spec_dst;
- rth->rt_peer_genid = 0;
- rth->peer = NULL;
- rth->fi = NULL;
+ rth->rt_is_input = 1;
+ rth->rt_iif = 0;
+ rth->rt_pmtu = 0;
+ rth->rt_gateway = 0;
+ INIT_LIST_HEAD(&rth->rt_uncached);
rth->dst.input = ip_forward;
rth->dst.output = ip_output;
- rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
-
- *result = rth;
+ rt_set_nexthop(rth, daddr, res, NULL, res->fi, res->type, itag);
+ skb_dst_set(skb, &rth->dst);
+out:
err = 0;
cleanup:
return err;
@@ -2209,27 +1533,13 @@ static int ip_mkroute_input(struct sk_buff *skb,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos)
{
- struct rtable *rth = NULL;
- int err;
- unsigned int hash;
-
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (res->fi && res->fi->fib_nhs > 1)
fib_select_multipath(res);
#endif
/* create a routing cache entry */
- err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
- if (err)
- return err;
-
- /* put it into the cache */
- hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
- rt_genid(dev_net(rth->dst.dev)));
- rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
- if (IS_ERR(rth))
- return PTR_ERR(rth);
- return 0;
+ return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
}
/*
@@ -2252,10 +1562,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
unsigned int flags = 0;
u32 itag = 0;
struct rtable *rth;
- unsigned int hash;
- __be32 spec_dst;
int err = -EINVAL;
struct net *net = dev_net(dev);
+ bool do_cache;
/* IP on this device is disabled. */
@@ -2266,10 +1575,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
by fib_lookup.
*/
- if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
- ipv4_is_loopback(saddr))
+ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
goto martian_source;
+ res.fi = NULL;
if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
goto brd_input;
@@ -2279,9 +1588,17 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (ipv4_is_zeronet(saddr))
goto martian_source;
- if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
+ if (ipv4_is_zeronet(daddr))
goto martian_destination;
+ if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) {
+ if (ipv4_is_loopback(daddr))
+ goto martian_destination;
+
+ if (ipv4_is_loopback(saddr))
+ goto martian_source;
+ }
+
/*
* Now we are ready to route packet.
*/
@@ -2293,11 +1610,8 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
fl4.daddr = daddr;
fl4.saddr = saddr;
err = fib_lookup(net, &fl4, &res);
- if (err != 0) {
- if (!IN_DEV_FORWARD(in_dev))
- goto e_hostunreach;
+ if (err != 0)
goto no_route;
- }
RT_CACHE_STAT_INC(in_slow_tot);
@@ -2307,17 +1621,14 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (res.type == RTN_LOCAL) {
err = fib_validate_source(skb, saddr, daddr, tos,
net->loopback_dev->ifindex,
- dev, &spec_dst, &itag);
+ dev, in_dev, &itag);
if (err < 0)
goto martian_source_keep_err;
- if (err)
- flags |= RTCF_DIRECTSRC;
- spec_dst = daddr;
goto local_input;
}
if (!IN_DEV_FORWARD(in_dev))
- goto e_hostunreach;
+ goto no_route;
if (res.type != RTN_UNICAST)
goto martian_destination;
@@ -2328,23 +1639,32 @@ brd_input:
if (skb->protocol != htons(ETH_P_IP))
goto e_inval;
- if (ipv4_is_zeronet(saddr))
- spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
- else {
- err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
- &itag);
+ if (!ipv4_is_zeronet(saddr)) {
+ err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
+ in_dev, &itag);
if (err < 0)
goto martian_source_keep_err;
- if (err)
- flags |= RTCF_DIRECTSRC;
}
flags |= RTCF_BROADCAST;
res.type = RTN_BROADCAST;
RT_CACHE_STAT_INC(in_brd);
local_input:
+ do_cache = false;
+ if (res.fi) {
+ if (!itag) {
+ rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
+ if (rt_cache_valid(rth)) {
+ skb_dst_set_noref(skb, &rth->dst);
+ err = 0;
+ goto out;
+ }
+ do_cache = true;
+ }
+ }
+
rth = rt_dst_alloc(net->loopback_dev,
- IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
+ IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
if (!rth)
goto e_nobufs;
@@ -2354,41 +1674,27 @@ local_input:
rth->dst.tclassid = itag;
#endif
- rth->rt_key_dst = daddr;
- rth->rt_key_src = saddr;
rth->rt_genid = rt_genid(net);
rth->rt_flags = flags|RTCF_LOCAL;
rth->rt_type = res.type;
- rth->rt_key_tos = tos;
- rth->rt_dst = daddr;
- rth->rt_src = saddr;
-#ifdef CONFIG_IP_ROUTE_CLASSID
- rth->dst.tclassid = itag;
-#endif
- rth->rt_route_iif = dev->ifindex;
- rth->rt_iif = dev->ifindex;
- rth->rt_oif = 0;
- rth->rt_mark = skb->mark;
- rth->rt_gateway = daddr;
- rth->rt_spec_dst= spec_dst;
- rth->rt_peer_genid = 0;
- rth->peer = NULL;
- rth->fi = NULL;
+ rth->rt_is_input = 1;
+ rth->rt_iif = 0;
+ rth->rt_pmtu = 0;
+ rth->rt_gateway = 0;
+ INIT_LIST_HEAD(&rth->rt_uncached);
if (res.type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
rth->dst.error= -err;
rth->rt_flags &= ~RTCF_LOCAL;
}
- hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
- rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
+ if (do_cache)
+ rt_cache_route(&FIB_RES_NH(res), rth);
+ skb_dst_set(skb, &rth->dst);
err = 0;
- if (IS_ERR(rth))
- err = PTR_ERR(rth);
goto out;
no_route:
RT_CACHE_STAT_INC(in_no_route);
- spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
res.type = RTN_UNREACHABLE;
if (err == -ESRCH)
err = -ENETUNREACH;
@@ -2405,10 +1711,6 @@ martian_destination:
&daddr, &saddr, dev->name);
#endif
-e_hostunreach:
- err = -EHOSTUNREACH;
- goto out;
-
e_inval:
err = -EINVAL;
goto out;
@@ -2424,50 +1726,13 @@ martian_source_keep_err:
goto out;
}
-int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev, bool noref)
+int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev)
{
- struct rtable *rth;
- unsigned int hash;
- int iif = dev->ifindex;
- struct net *net;
int res;
- net = dev_net(dev);
-
rcu_read_lock();
- if (!rt_caching(net))
- goto skip_cache;
-
- tos &= IPTOS_RT_MASK;
- hash = rt_hash(daddr, saddr, iif, rt_genid(net));
-
- for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference(rth->dst.rt_next)) {
- if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
- ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
- (rth->rt_route_iif ^ iif) |
- (rth->rt_key_tos ^ tos)) == 0 &&
- rth->rt_mark == skb->mark &&
- net_eq(dev_net(rth->dst.dev), net) &&
- !rt_is_expired(rth)) {
- ipv4_validate_peer(rth);
- if (noref) {
- dst_use_noref(&rth->dst, jiffies);
- skb_dst_set_noref(skb, &rth->dst);
- } else {
- dst_use(&rth->dst, jiffies);
- skb_dst_set(skb, &rth->dst);
- }
- RT_CACHE_STAT_INC(in_hit);
- rcu_read_unlock();
- return 0;
- }
- RT_CACHE_STAT_INC(in_hlist_search);
- }
-
-skip_cache:
/* Multicast recognition logic is moved from route cache to here.
The problem was that too many Ethernet cards have broken/missing
hardware multicast filters :-( As result the host on multicasting
@@ -2505,24 +1770,28 @@ skip_cache:
rcu_read_unlock();
return res;
}
-EXPORT_SYMBOL(ip_route_input_common);
+EXPORT_SYMBOL(ip_route_input_noref);
/* called with rcu_read_lock() */
static struct rtable *__mkroute_output(const struct fib_result *res,
- const struct flowi4 *fl4,
- __be32 orig_daddr, __be32 orig_saddr,
- int orig_oif, __u8 orig_rtos,
+ const struct flowi4 *fl4, int orig_oif,
struct net_device *dev_out,
unsigned int flags)
{
struct fib_info *fi = res->fi;
+ struct fib_nh_exception *fnhe;
struct in_device *in_dev;
u16 type = res->type;
struct rtable *rth;
- if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
+ in_dev = __in_dev_get_rcu(dev_out);
+ if (!in_dev)
return ERR_PTR(-EINVAL);
+ if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
+ if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
+ return ERR_PTR(-EINVAL);
+
if (ipv4_is_lbcast(fl4->daddr))
type = RTN_BROADCAST;
else if (ipv4_is_multicast(fl4->daddr))
@@ -2533,10 +1802,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
- in_dev = __in_dev_get_rcu(dev_out);
- if (!in_dev)
- return ERR_PTR(-EINVAL);
-
if (type == RTN_BROADCAST) {
flags |= RTCF_BROADCAST | RTCF_LOCAL;
fi = NULL;
@@ -2553,40 +1818,44 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
fi = NULL;
}
+ fnhe = NULL;
+ if (fi) {
+ struct rtable __rcu **prth;
+
+ fnhe = find_exception(&FIB_RES_NH(*res), fl4->daddr);
+ if (fnhe)
+ prth = &fnhe->fnhe_rth;
+ else
+ prth = __this_cpu_ptr(FIB_RES_NH(*res).nh_pcpu_rth_output);
+ rth = rcu_dereference(*prth);
+ if (rt_cache_valid(rth)) {
+ dst_hold(&rth->dst);
+ return rth;
+ }
+ }
rth = rt_dst_alloc(dev_out,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
- IN_DEV_CONF_GET(in_dev, NOXFRM));
+ IN_DEV_CONF_GET(in_dev, NOXFRM),
+ fi);
if (!rth)
return ERR_PTR(-ENOBUFS);
rth->dst.output = ip_output;
- rth->rt_key_dst = orig_daddr;
- rth->rt_key_src = orig_saddr;
rth->rt_genid = rt_genid(dev_net(dev_out));
rth->rt_flags = flags;
rth->rt_type = type;
- rth->rt_key_tos = orig_rtos;
- rth->rt_dst = fl4->daddr;
- rth->rt_src = fl4->saddr;
- rth->rt_route_iif = 0;
- rth->rt_iif = orig_oif ? : dev_out->ifindex;
- rth->rt_oif = orig_oif;
- rth->rt_mark = fl4->flowi4_mark;
- rth->rt_gateway = fl4->daddr;
- rth->rt_spec_dst= fl4->saddr;
- rth->rt_peer_genid = 0;
- rth->peer = NULL;
- rth->fi = NULL;
+ rth->rt_is_input = 0;
+ rth->rt_iif = orig_oif ? : 0;
+ rth->rt_pmtu = 0;
+ rth->rt_gateway = 0;
+ INIT_LIST_HEAD(&rth->rt_uncached);
RT_CACHE_STAT_INC(out_slow_tot);
- if (flags & RTCF_LOCAL) {
+ if (flags & RTCF_LOCAL)
rth->dst.input = ip_local_deliver;
- rth->rt_spec_dst = fl4->daddr;
- }
if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
- rth->rt_spec_dst = fl4->saddr;
if (flags & RTCF_LOCAL &&
!(dev_out->flags & IFF_LOOPBACK)) {
rth->dst.output = ip_mc_output;
@@ -2603,34 +1872,28 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
#endif
}
- rt_set_nexthop(rth, fl4, res, fi, type, 0);
+ rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
return rth;
}
/*
* Major route resolver routine.
- * called with rcu_read_lock();
*/
-static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
+struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
{
struct net_device *dev_out = NULL;
__u8 tos = RT_FL_TOS(fl4);
unsigned int flags = 0;
struct fib_result res;
struct rtable *rth;
- __be32 orig_daddr;
- __be32 orig_saddr;
int orig_oif;
+ res.tclassid = 0;
res.fi = NULL;
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- res.r = NULL;
-#endif
+ res.table = NULL;
- orig_daddr = fl4->daddr;
- orig_saddr = fl4->saddr;
orig_oif = fl4->flowi4_oif;
fl4->flowi4_iif = net->loopback_dev->ifindex;
@@ -2730,6 +1993,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
if (fib_lookup(net, fl4, &res)) {
res.fi = NULL;
+ res.table = NULL;
if (fl4->flowi4_oif) {
/* Apparently, routing tables are wrong. Assume,
that the destination is on link.
@@ -2791,60 +2055,12 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
make_route:
- rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
- tos, dev_out, flags);
- if (!IS_ERR(rth)) {
- unsigned int hash;
-
- hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
- rt_genid(dev_net(dev_out)));
- rth = rt_intern_hash(hash, rth, NULL, orig_oif);
- }
+ rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
out:
rcu_read_unlock();
return rth;
}
-
-struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
-{
- struct rtable *rth;
- unsigned int hash;
-
- if (!rt_caching(net))
- goto slow_output;
-
- hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
-
- rcu_read_lock_bh();
- for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference_bh(rth->dst.rt_next)) {
- if (rth->rt_key_dst == flp4->daddr &&
- rth->rt_key_src == flp4->saddr &&
- rt_is_output_route(rth) &&
- rth->rt_oif == flp4->flowi4_oif &&
- rth->rt_mark == flp4->flowi4_mark &&
- !((rth->rt_key_tos ^ flp4->flowi4_tos) &
- (IPTOS_RT_MASK | RTO_ONLINK)) &&
- net_eq(dev_net(rth->dst.dev), net) &&
- !rt_is_expired(rth)) {
- ipv4_validate_peer(rth);
- dst_use(&rth->dst, jiffies);
- RT_CACHE_STAT_INC(out_hit);
- rcu_read_unlock_bh();
- if (!flp4->saddr)
- flp4->saddr = rth->rt_src;
- if (!flp4->daddr)
- flp4->daddr = rth->rt_dst;
- return rth;
- }
- RT_CACHE_STAT_INC(out_hlist_search);
- }
- rcu_read_unlock_bh();
-
-slow_output:
- return ip_route_output_slow(net, flp4);
-}
EXPORT_SYMBOL_GPL(__ip_route_output_key);
static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
@@ -2859,7 +2075,13 @@ static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
return mtu ? : dst->dev->mtu;
}
-static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
+static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
+{
+}
+
+static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb)
{
}
@@ -2872,53 +2094,42 @@ static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
static struct dst_ops ipv4_dst_blackhole_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
- .destroy = ipv4_dst_destroy,
.check = ipv4_blackhole_dst_check,
.mtu = ipv4_blackhole_mtu,
.default_advmss = ipv4_default_advmss,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
+ .redirect = ipv4_rt_blackhole_redirect,
.cow_metrics = ipv4_rt_blackhole_cow_metrics,
.neigh_lookup = ipv4_neigh_lookup,
};
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
- struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
struct rtable *ort = (struct rtable *) dst_orig;
+ struct rtable *rt;
+ rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
if (rt) {
struct dst_entry *new = &rt->dst;
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard;
- dst_copy_metrics(new, &ort->dst);
new->dev = ort->dst.dev;
if (new->dev)
dev_hold(new->dev);
- rt->rt_key_dst = ort->rt_key_dst;
- rt->rt_key_src = ort->rt_key_src;
- rt->rt_key_tos = ort->rt_key_tos;
- rt->rt_route_iif = ort->rt_route_iif;
+ rt->rt_is_input = ort->rt_is_input;
rt->rt_iif = ort->rt_iif;
- rt->rt_oif = ort->rt_oif;
- rt->rt_mark = ort->rt_mark;
+ rt->rt_pmtu = ort->rt_pmtu;
rt->rt_genid = rt_genid(net);
rt->rt_flags = ort->rt_flags;
rt->rt_type = ort->rt_type;
- rt->rt_dst = ort->rt_dst;
- rt->rt_src = ort->rt_src;
rt->rt_gateway = ort->rt_gateway;
- rt->rt_spec_dst = ort->rt_spec_dst;
- rt->peer = ort->peer;
- if (rt->peer)
- atomic_inc(&rt->peer->refcnt);
- rt->fi = ort->fi;
- if (rt->fi)
- atomic_inc(&rt->fi->fib_clntref);
+
+ INIT_LIST_HEAD(&rt->rt_uncached);
dst_free(new);
}
@@ -2945,16 +2156,16 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
}
EXPORT_SYMBOL_GPL(ip_route_output_flow);
-static int rt_fill_info(struct net *net,
- struct sk_buff *skb, u32 pid, u32 seq, int event,
- int nowait, unsigned int flags)
+static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
+ struct flowi4 *fl4, struct sk_buff *skb, u32 pid,
+ u32 seq, int event, int nowait, unsigned int flags)
{
struct rtable *rt = skb_rtable(skb);
struct rtmsg *r;
struct nlmsghdr *nlh;
unsigned long expires = 0;
- const struct inet_peer *peer = rt->peer;
- u32 id = 0, ts = 0, tsage = 0, error;
+ u32 error;
+ u32 metrics[RTAX_MAX];
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
if (nlh == NULL)
@@ -2964,7 +2175,7 @@ static int rt_fill_info(struct net *net,
r->rtm_family = AF_INET;
r->rtm_dst_len = 32;
r->rtm_src_len = 0;
- r->rtm_tos = rt->rt_key_tos;
+ r->rtm_tos = fl4->flowi4_tos;
r->rtm_table = RT_TABLE_MAIN;
if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
goto nla_put_failure;
@@ -2975,11 +2186,11 @@ static int rt_fill_info(struct net *net,
if (rt->rt_flags & RTCF_NOTIFY)
r->rtm_flags |= RTM_F_NOTIFY;
- if (nla_put_be32(skb, RTA_DST, rt->rt_dst))
+ if (nla_put_be32(skb, RTA_DST, dst))
goto nla_put_failure;
- if (rt->rt_key_src) {
+ if (src) {
r->rtm_src_len = 32;
- if (nla_put_be32(skb, RTA_SRC, rt->rt_key_src))
+ if (nla_put_be32(skb, RTA_SRC, src))
goto nla_put_failure;
}
if (rt->dst.dev &&
@@ -2990,69 +2201,40 @@ static int rt_fill_info(struct net *net,
nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
goto nla_put_failure;
#endif
- if (rt_is_input_route(rt)) {
- if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_spec_dst))
- goto nla_put_failure;
- } else if (rt->rt_src != rt->rt_key_src) {
- if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src))
+ if (!rt_is_input_route(rt) &&
+ fl4->saddr != src) {
+ if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
goto nla_put_failure;
}
- if (rt->rt_dst != rt->rt_gateway &&
+ if (rt->rt_gateway &&
nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
goto nla_put_failure;
- if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
+ memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
+ if (rt->rt_pmtu)
+ metrics[RTAX_MTU - 1] = rt->rt_pmtu;
+ if (rtnetlink_put_metrics(skb, metrics) < 0)
goto nla_put_failure;
- if (rt->rt_mark &&
- nla_put_be32(skb, RTA_MARK, rt->rt_mark))
+ if (fl4->flowi4_mark &&
+ nla_put_be32(skb, RTA_MARK, fl4->flowi4_mark))
goto nla_put_failure;
error = rt->dst.error;
- if (peer) {
- inet_peer_refcheck(rt->peer);
- id = atomic_read(&peer->ip_id_count) & 0xffff;
- if (peer->tcp_ts_stamp) {
- ts = peer->tcp_ts;
- tsage = get_seconds() - peer->tcp_ts_stamp;
- }
- expires = ACCESS_ONCE(peer->pmtu_expires);
- if (expires) {
- if (time_before(jiffies, expires))
- expires -= jiffies;
- else
- expires = 0;
- }
+ expires = rt->dst.expires;
+ if (expires) {
+ if (time_before(jiffies, expires))
+ expires -= jiffies;
+ else
+ expires = 0;
}
if (rt_is_input_route(rt)) {
-#ifdef CONFIG_IP_MROUTE
- __be32 dst = rt->rt_dst;
-
- if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
- IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
- int err = ipmr_get_route(net, skb,
- rt->rt_src, rt->rt_dst,
- r, nowait);
- if (err <= 0) {
- if (!nowait) {
- if (err == 0)
- return 0;
- goto nla_put_failure;
- } else {
- if (err == -EMSGSIZE)
- goto nla_put_failure;
- error = err;
- }
- }
- } else
-#endif
- if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
- goto nla_put_failure;
+ if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
+ goto nla_put_failure;
}
- if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
- expires, error) < 0)
+ if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
goto nla_put_failure;
return nlmsg_end(skb, nlh);
@@ -3068,6 +2250,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
struct rtable *rt = NULL;
+ struct flowi4 fl4;
__be32 dst = 0;
__be32 src = 0;
u32 iif;
@@ -3102,6 +2285,13 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.daddr = dst;
+ fl4.saddr = src;
+ fl4.flowi4_tos = rtm->rtm_tos;
+ fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
+ fl4.flowi4_mark = mark;
+
if (iif) {
struct net_device *dev;
@@ -3122,13 +2312,6 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
if (err == 0 && rt->dst.error)
err = -rt->dst.error;
} else {
- struct flowi4 fl4 = {
- .daddr = dst,
- .saddr = src,
- .flowi4_tos = rtm->rtm_tos,
- .flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
- .flowi4_mark = mark,
- };
rt = ip_route_output_key(net, &fl4);
err = 0;
@@ -3143,7 +2326,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
if (rtm->rtm_flags & RTM_F_NOTIFY)
rt->rt_flags |= RTCF_NOTIFY;
- err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
+ err = rt_fill_info(net, dst, src, &fl4, skb,
+ NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
RTM_NEWROUTE, 0, 0);
if (err <= 0)
goto errout_free;
@@ -3159,43 +2343,6 @@ errout_free:
int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct rtable *rt;
- int h, s_h;
- int idx, s_idx;
- struct net *net;
-
- net = sock_net(skb->sk);
-
- s_h = cb->args[0];
- if (s_h < 0)
- s_h = 0;
- s_idx = idx = cb->args[1];
- for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
- if (!rt_hash_table[h].chain)
- continue;
- rcu_read_lock_bh();
- for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
- rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
- if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
- continue;
- if (rt_is_expired(rt))
- continue;
- skb_dst_set_noref(skb, &rt->dst);
- if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, RTM_NEWROUTE,
- 1, NLM_F_MULTI) <= 0) {
- skb_dst_drop(skb);
- rcu_read_unlock_bh();
- goto done;
- }
- skb_dst_drop(skb);
- }
- rcu_read_unlock_bh();
- }
-
-done:
- cb->args[0] = h;
- cb->args[1] = idx;
return skb->len;
}
@@ -3400,26 +2547,34 @@ static __net_initdata struct pernet_operations rt_genid_ops = {
.init = rt_genid_init,
};
+static int __net_init ipv4_inetpeer_init(struct net *net)
+{
+ struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
-#ifdef CONFIG_IP_ROUTE_CLASSID
-struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
-#endif /* CONFIG_IP_ROUTE_CLASSID */
+ if (!bp)
+ return -ENOMEM;
+ inet_peer_base_init(bp);
+ net->ipv4.peers = bp;
+ return 0;
+}
-static __initdata unsigned long rhash_entries;
-static int __init set_rhash_entries(char *str)
+static void __net_exit ipv4_inetpeer_exit(struct net *net)
{
- ssize_t ret;
+ struct inet_peer_base *bp = net->ipv4.peers;
- if (!str)
- return 0;
+ net->ipv4.peers = NULL;
+ inetpeer_invalidate_tree(bp);
+ kfree(bp);
+}
- ret = kstrtoul(str, 0, &rhash_entries);
- if (ret)
- return 0;
+static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
+ .init = ipv4_inetpeer_init,
+ .exit = ipv4_inetpeer_exit,
+};
- return 1;
-}
-__setup("rhash_entries=", set_rhash_entries);
+#ifdef CONFIG_IP_ROUTE_CLASSID
+struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
+#endif /* CONFIG_IP_ROUTE_CLASSID */
int __init ip_rt_init(void)
{
@@ -3443,31 +2598,12 @@ int __init ip_rt_init(void)
if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
- rt_hash_table = (struct rt_hash_bucket *)
- alloc_large_system_hash("IP route cache",
- sizeof(struct rt_hash_bucket),
- rhash_entries,
- (totalram_pages >= 128 * 1024) ?
- 15 : 17,
- 0,
- &rt_hash_log,
- &rt_hash_mask,
- 0,
- rhash_entries ? 0 : 512 * 1024);
- memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
- rt_hash_lock_init();
-
- ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
- ip_rt_max_size = (rt_hash_mask + 1) * 16;
+ ipv4_dst_ops.gc_thresh = ~0;
+ ip_rt_max_size = INT_MAX;
devinet_init();
ip_fib_init();
- INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
- expires_ljiffies = jiffies;
- schedule_delayed_work(&expires_work,
- net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
-
if (ip_rt_proc_init())
pr_err("Unable to create route proc files\n");
#ifdef CONFIG_XFRM
@@ -3480,6 +2616,7 @@ int __init ip_rt_init(void)
register_pernet_subsys(&sysctl_route_ops);
#endif
register_pernet_subsys(&rt_genid_ops);
+ register_pernet_subsys(&ipv4_inetpeer_ops);
return rc;
}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index eab2a7fb15d..650e1528e1e 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -293,7 +293,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
/* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
+ tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL);
if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
goto out;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index ef32956ed65..1b5ce96707a 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -184,7 +184,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
int ret;
unsigned long vec[3];
struct net *net = current->nsproxy->net_ns;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_MEMCG_KMEM
struct mem_cgroup *memcg;
#endif
@@ -203,7 +203,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
if (ret)
return ret;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_MEMCG_KMEM
rcu_read_lock();
memcg = mem_cgroup_from_task(current);
@@ -301,6 +301,13 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "ip_early_demux",
+ .data = &sysctl_ip_early_demux,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
.procname = "ip_dynaddr",
.data = &sysctl_ip_dynaddr,
.maxlen = sizeof(int),
@@ -360,6 +367,13 @@ static struct ctl_table ipv4_table[] = {
},
#endif
{
+ .procname = "tcp_fastopen",
+ .data = &sysctl_tcp_fastopen,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "tcp_tw_recycle",
.data = &tcp_death_row.sysctl_tw_recycle,
.maxlen = sizeof(int),
@@ -591,6 +605,20 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "tcp_limit_output_bytes",
+ .data = &sysctl_tcp_limit_output_bytes,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "tcp_challenge_ack_limit",
+ .data = &sysctl_tcp_challenge_ack_limit,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
#ifdef CONFIG_NET_DMA
{
.procname = "tcp_dma_copybreak",
@@ -756,13 +784,6 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dointvec
},
{
- .procname = "rt_cache_rebuild_count",
- .data = &init_net.ipv4.sysctl_rt_cache_rebuild_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- {
.procname = "ping_group_range",
.data = &init_net.ipv4.sysctl_ping_group_range,
.maxlen = sizeof(init_net.ipv4.sysctl_ping_group_range),
@@ -801,8 +822,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
table[5].data =
&net->ipv4.sysctl_icmp_ratemask;
table[6].data =
- &net->ipv4.sysctl_rt_cache_rebuild_count;
- table[7].data =
&net->ipv4.sysctl_ping_group_range;
}
@@ -814,8 +833,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
net->ipv4.sysctl_ping_group_range[0] = 1;
net->ipv4.sysctl_ping_group_range[1] = 0;
- net->ipv4.sysctl_rt_cache_rebuild_count = 4;
-
tcp_init_mem(net);
net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3ba605f60e4..e7e6eeae49c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -270,6 +270,7 @@
#include <linux/slab.h>
#include <net/icmp.h>
+#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/xfrm.h>
#include <net/ip.h>
@@ -376,6 +377,7 @@ void tcp_init_sock(struct sock *sk)
skb_queue_head_init(&tp->out_of_order_queue);
tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp);
+ INIT_LIST_HEAD(&tp->tsq_node);
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev = TCP_TIMEOUT_INIT;
@@ -796,6 +798,10 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
inet_csk(sk)->icsk_ext_hdr_len -
tp->tcp_header_len);
+ /* TSQ : try to have two TSO segments in flight */
+ xmit_size_goal = min_t(u32, xmit_size_goal,
+ sysctl_tcp_limit_output_bytes >> 1);
+
xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
/* We try hard to avoid divides here */
@@ -977,26 +983,67 @@ static inline int select_size(const struct sock *sk, bool sg)
return tmp;
}
+void tcp_free_fastopen_req(struct tcp_sock *tp)
+{
+ if (tp->fastopen_req != NULL) {
+ kfree(tp->fastopen_req);
+ tp->fastopen_req = NULL;
+ }
+}
+
+static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int err, flags;
+
+ if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
+ return -EOPNOTSUPP;
+ if (tp->fastopen_req != NULL)
+ return -EALREADY; /* Another Fast Open is in progress */
+
+ tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
+ sk->sk_allocation);
+ if (unlikely(tp->fastopen_req == NULL))
+ return -ENOBUFS;
+ tp->fastopen_req->data = msg;
+
+ flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
+ err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
+ msg->msg_namelen, flags);
+ *size = tp->fastopen_req->copied;
+ tcp_free_fastopen_req(tp);
+ return err;
+}
+
int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t size)
{
struct iovec *iov;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
- int iovlen, flags, err, copied;
- int mss_now = 0, size_goal;
+ int iovlen, flags, err, copied = 0;
+ int mss_now = 0, size_goal, copied_syn = 0, offset = 0;
bool sg;
long timeo;
lock_sock(sk);
flags = msg->msg_flags;
+ if (flags & MSG_FASTOPEN) {
+ err = tcp_sendmsg_fastopen(sk, msg, &copied_syn);
+ if (err == -EINPROGRESS && copied_syn > 0)
+ goto out;
+ else if (err)
+ goto out_err;
+ offset = copied_syn;
+ }
+
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
/* Wait for a connection to finish. */
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
- goto out_err;
+ goto do_error;
if (unlikely(tp->repair)) {
if (tp->repair_queue == TCP_RECV_QUEUE) {
@@ -1032,6 +1079,15 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
unsigned char __user *from = iov->iov_base;
iov++;
+ if (unlikely(offset > 0)) { /* Skip bytes copied in SYN */
+ if (offset >= seglen) {
+ offset -= seglen;
+ continue;
+ }
+ seglen -= offset;
+ from += offset;
+ offset = 0;
+ }
while (seglen > 0) {
int copy = 0;
@@ -1194,7 +1250,7 @@ out:
if (copied && likely(!tp->repair))
tcp_push(sk, flags, mss_now, tp->nonagle);
release_sock(sk);
- return copied;
+ return copied + copied_syn;
do_fault:
if (!skb->len) {
@@ -1207,7 +1263,7 @@ do_fault:
}
do_error:
- if (copied)
+ if (copied + copied_syn)
goto out;
out_err:
err = sk_stream_error(sk, flags, err);
@@ -2625,7 +2681,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
/* Cap the max timeout in ms TCP will retry/retrans
* before giving up and aborting (ETIMEDOUT) a connection.
*/
- icsk->icsk_user_timeout = msecs_to_jiffies(val);
+ if (val < 0)
+ err = -EINVAL;
+ else
+ icsk->icsk_user_timeout = msecs_to_jiffies(val);
break;
default:
err = -ENOPROTOOPT;
@@ -3310,8 +3369,7 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
#endif
-/**
- * Each Responder maintains up to two secret values concurrently for
+/* Each Responder maintains up to two secret values concurrently for
* efficient secret rollover. Each secret value has 4 states:
*
* Generating. (tcp_secret_generating != tcp_secret_primary)
@@ -3563,6 +3621,8 @@ void __init tcp_init(void)
pr_info("Hash tables configured (established %u bind %u)\n",
tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
+ tcp_metrics_init();
+
tcp_register_congestion_control(&tcp_reno);
memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets));
@@ -3573,4 +3633,5 @@ void __init tcp_init(void)
tcp_secret_primary = &tcp_secret_one;
tcp_secret_retiring = &tcp_secret_two;
tcp_secret_secondary = &tcp_secret_two;
+ tcp_tasklet_init();
}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 04dbd7ae7c6..4d4db16e336 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -307,6 +307,7 @@ EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
void tcp_slow_start(struct tcp_sock *tp)
{
int cnt; /* increase in packets */
+ unsigned int delta = 0;
/* RFC3465: ABC Slow start
* Increase only after a full MSS of bytes is acked
@@ -333,9 +334,9 @@ void tcp_slow_start(struct tcp_sock *tp)
tp->snd_cwnd_cnt += cnt;
while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
tp->snd_cwnd_cnt -= tp->snd_cwnd;
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
+ delta++;
}
+ tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp);
}
EXPORT_SYMBOL_GPL(tcp_slow_start);
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
new file mode 100644
index 00000000000..a7f729c409d
--- /dev/null
+++ b/net/ipv4/tcp_fastopen.c
@@ -0,0 +1,11 @@
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+int sysctl_tcp_fastopen;
+
+static int __init tcp_fastopen_init(void)
+{
+ return 0;
+}
+
+late_initcall(tcp_fastopen_init);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b224eb8bce8..2fd2bc9e3c6 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -88,12 +88,14 @@ int sysctl_tcp_app_win __read_mostly = 31;
int sysctl_tcp_adv_win_scale __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
+/* rfc5961 challenge ack rate limiting */
+int sysctl_tcp_challenge_ack_limit = 100;
+
int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly;
int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
int sysctl_tcp_frto __read_mostly = 2;
int sysctl_tcp_frto_response __read_mostly;
-int sysctl_tcp_nometrics_save __read_mostly;
int sysctl_tcp_thin_dupack __read_mostly;
@@ -701,7 +703,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
/* Calculate rto without backoff. This is the second half of Van Jacobson's
* routine referred to above.
*/
-static inline void tcp_set_rto(struct sock *sk)
+void tcp_set_rto(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
/* Old crap is replaced with new one. 8)
@@ -728,109 +730,6 @@ static inline void tcp_set_rto(struct sock *sk)
tcp_bound_rto(sk);
}
-/* Save metrics learned by this TCP session.
- This function is called only, when TCP finishes successfully
- i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
- */
-void tcp_update_metrics(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
-
- if (sysctl_tcp_nometrics_save)
- return;
-
- dst_confirm(dst);
-
- if (dst && (dst->flags & DST_HOST)) {
- const struct inet_connection_sock *icsk = inet_csk(sk);
- int m;
- unsigned long rtt;
-
- if (icsk->icsk_backoff || !tp->srtt) {
- /* This session failed to estimate rtt. Why?
- * Probably, no packets returned in time.
- * Reset our results.
- */
- if (!(dst_metric_locked(dst, RTAX_RTT)))
- dst_metric_set(dst, RTAX_RTT, 0);
- return;
- }
-
- rtt = dst_metric_rtt(dst, RTAX_RTT);
- m = rtt - tp->srtt;
-
- /* If newly calculated rtt larger than stored one,
- * store new one. Otherwise, use EWMA. Remember,
- * rtt overestimation is always better than underestimation.
- */
- if (!(dst_metric_locked(dst, RTAX_RTT))) {
- if (m <= 0)
- set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt);
- else
- set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3));
- }
-
- if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
- unsigned long var;
- if (m < 0)
- m = -m;
-
- /* Scale deviation to rttvar fixed point */
- m >>= 1;
- if (m < tp->mdev)
- m = tp->mdev;
-
- var = dst_metric_rtt(dst, RTAX_RTTVAR);
- if (m >= var)
- var = m;
- else
- var -= (var - m) >> 2;
-
- set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
- }
-
- if (tcp_in_initial_slowstart(tp)) {
- /* Slow start still did not finish. */
- if (dst_metric(dst, RTAX_SSTHRESH) &&
- !dst_metric_locked(dst, RTAX_SSTHRESH) &&
- (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
- dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
- if (!dst_metric_locked(dst, RTAX_CWND) &&
- tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
- dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
- } else if (tp->snd_cwnd > tp->snd_ssthresh &&
- icsk->icsk_ca_state == TCP_CA_Open) {
- /* Cong. avoidance phase, cwnd is reliable. */
- if (!dst_metric_locked(dst, RTAX_SSTHRESH))
- dst_metric_set(dst, RTAX_SSTHRESH,
- max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
- if (!dst_metric_locked(dst, RTAX_CWND))
- dst_metric_set(dst, RTAX_CWND,
- (dst_metric(dst, RTAX_CWND) +
- tp->snd_cwnd) >> 1);
- } else {
- /* Else slow start did not finish, cwnd is non-sense,
- ssthresh may be also invalid.
- */
- if (!dst_metric_locked(dst, RTAX_CWND))
- dst_metric_set(dst, RTAX_CWND,
- (dst_metric(dst, RTAX_CWND) +
- tp->snd_ssthresh) >> 1);
- if (dst_metric(dst, RTAX_SSTHRESH) &&
- !dst_metric_locked(dst, RTAX_SSTHRESH) &&
- tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
- dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
- }
-
- if (!dst_metric_locked(dst, RTAX_REORDERING)) {
- if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
- tp->reordering != sysctl_tcp_reordering)
- dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
- }
- }
-}
-
__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
{
__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
@@ -867,7 +766,7 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
* Packet counting of FACK is based on in-order assumptions, therefore TCP
* disables it when reordering is detected
*/
-static void tcp_disable_fack(struct tcp_sock *tp)
+void tcp_disable_fack(struct tcp_sock *tp)
{
/* RFC3517 uses different metric in lost marker => reset on change */
if (tcp_is_fack(tp))
@@ -881,86 +780,6 @@ static void tcp_dsack_seen(struct tcp_sock *tp)
tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
}
-/* Initialize metrics on socket. */
-
-static void tcp_init_metrics(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
-
- if (dst == NULL)
- goto reset;
-
- dst_confirm(dst);
-
- if (dst_metric_locked(dst, RTAX_CWND))
- tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND);
- if (dst_metric(dst, RTAX_SSTHRESH)) {
- tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
- if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
- tp->snd_ssthresh = tp->snd_cwnd_clamp;
- } else {
- /* ssthresh may have been reduced unnecessarily during.
- * 3WHS. Restore it back to its initial default.
- */
- tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
- }
- if (dst_metric(dst, RTAX_REORDERING) &&
- tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
- tcp_disable_fack(tp);
- tcp_disable_early_retrans(tp);
- tp->reordering = dst_metric(dst, RTAX_REORDERING);
- }
-
- if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0)
- goto reset;
-
- /* Initial rtt is determined from SYN,SYN-ACK.
- * The segment is small and rtt may appear much
- * less than real one. Use per-dst memory
- * to make it more realistic.
- *
- * A bit of theory. RTT is time passed after "normal" sized packet
- * is sent until it is ACKed. In normal circumstances sending small
- * packets force peer to delay ACKs and calculation is correct too.
- * The algorithm is adaptive and, provided we follow specs, it
- * NEVER underestimate RTT. BUT! If peer tries to make some clever
- * tricks sort of "quick acks" for time long enough to decrease RTT
- * to low value, and then abruptly stops to do it and starts to delay
- * ACKs, wait for troubles.
- */
- if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) {
- tp->srtt = dst_metric_rtt(dst, RTAX_RTT);
- tp->rtt_seq = tp->snd_nxt;
- }
- if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) {
- tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR);
- tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
- }
- tcp_set_rto(sk);
-reset:
- if (tp->srtt == 0) {
- /* RFC6298: 5.7 We've failed to get a valid RTT sample from
- * 3WHS. This is most likely due to retransmission,
- * including spurious one. Reset the RTO back to 3secs
- * from the more aggressive 1sec to avoid more spurious
- * retransmission.
- */
- tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
- inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
- }
- /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
- * retransmitted. In light of RFC6298 more aggressive 1sec
- * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
- * retransmission has occurred.
- */
- if (tp->total_retrans > 1)
- tp->snd_cwnd = 1;
- else
- tp->snd_cwnd = tcp_init_cwnd(tp, dst);
- tp->snd_cwnd_stamp = tcp_time_stamp;
-}
-
static void tcp_update_reordering(struct sock *sk, const int metric,
const int ts)
{
@@ -2702,7 +2521,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag)
/* Nothing was retransmitted or returned timestamp is less
* than timestamp of the first retransmission.
*/
-static inline int tcp_packet_delayed(const struct tcp_sock *tp)
+static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
{
return !tp->retrans_stamp ||
(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -2763,7 +2582,7 @@ static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
tp->snd_cwnd_stamp = tcp_time_stamp;
}
-static inline int tcp_may_undo(const struct tcp_sock *tp)
+static inline bool tcp_may_undo(const struct tcp_sock *tp)
{
return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
}
@@ -3552,13 +3371,13 @@ static void tcp_ack_probe(struct sock *sk)
}
}
-static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
+static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
{
return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
}
-static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
+static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
{
const struct tcp_sock *tp = tcp_sk(sk);
return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
@@ -3568,7 +3387,7 @@ static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
/* Check that window update is acceptable.
* The function assumes that snd_una<=ack<=snd_next.
*/
-static inline int tcp_may_update_window(const struct tcp_sock *tp,
+static inline bool tcp_may_update_window(const struct tcp_sock *tp,
const u32 ack, const u32 ack_seq,
const u32 nwin)
{
@@ -3869,9 +3688,11 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_cong_avoid(sk, ack, prior_in_flight);
}
- if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
- dst_confirm(__sk_dst_get(sk));
-
+ if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
+ struct dst_entry *dst = __sk_dst_get(sk);
+ if (dst)
+ dst_confirm(dst);
+ }
return 1;
no_queue:
@@ -3911,7 +3732,8 @@ old_ack:
* the fast version below fails.
*/
void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx,
- const u8 **hvpp, int estab)
+ const u8 **hvpp, int estab,
+ struct tcp_fastopen_cookie *foc)
{
const unsigned char *ptr;
const struct tcphdr *th = tcp_hdr(skb);
@@ -4018,8 +3840,25 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
break;
}
break;
- }
+ case TCPOPT_EXP:
+ /* Fast Open option shares code 254 using a
+ * 16 bits magic number. It's valid only in
+ * SYN or SYN-ACK with an even size.
+ */
+ if (opsize < TCPOLEN_EXP_FASTOPEN_BASE ||
+ get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC ||
+ foc == NULL || !th->syn || (opsize & 1))
+ break;
+ foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE;
+ if (foc->len >= TCP_FASTOPEN_COOKIE_MIN &&
+ foc->len <= TCP_FASTOPEN_COOKIE_MAX)
+ memcpy(foc->val, ptr + 2, foc->len);
+ else if (foc->len != 0)
+ foc->len = -1;
+ break;
+
+ }
ptr += opsize-2;
length -= opsize;
}
@@ -4061,7 +3900,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
if (tcp_parse_aligned_timestamp(tp, th))
return true;
}
- tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
+ tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
return true;
}
@@ -4167,7 +4006,7 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
(s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
}
-static inline int tcp_paws_discard(const struct sock *sk,
+static inline bool tcp_paws_discard(const struct sock *sk,
const struct sk_buff *skb)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -4189,7 +4028,7 @@ static inline int tcp_paws_discard(const struct sock *sk,
* (borrowed from freebsd)
*/
-static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
+static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
{
return !before(end_seq, tp->rcv_wup) &&
!after(seq, tp->rcv_nxt + tcp_receive_window(tp));
@@ -4512,19 +4351,20 @@ static void tcp_ofo_queue(struct sock *sk)
static bool tcp_prune_ofo_queue(struct sock *sk);
static int tcp_prune_queue(struct sock *sk);
-static int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
+static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
+ unsigned int size)
{
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
- !sk_rmem_schedule(sk, size)) {
+ !sk_rmem_schedule(sk, skb, size)) {
if (tcp_prune_queue(sk) < 0)
return -1;
- if (!sk_rmem_schedule(sk, size)) {
+ if (!sk_rmem_schedule(sk, skb, size)) {
if (!tcp_prune_ofo_queue(sk))
return -1;
- if (!sk_rmem_schedule(sk, size))
+ if (!sk_rmem_schedule(sk, skb, size))
return -1;
}
}
@@ -4579,8 +4419,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
TCP_ECN_check_ce(tp, skb);
- if (tcp_try_rmem_schedule(sk, skb->truesize)) {
- /* TODO: should increment a counter */
+ if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
__kfree_skb(skb);
return;
}
@@ -4589,6 +4429,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
tp->pred_flags = 0;
inet_csk_schedule_ack(sk);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
@@ -4642,6 +4483,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
/* All the bits are present. Drop. */
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
__kfree_skb(skb);
skb = NULL;
tcp_dsack_set(sk, seq, end_seq);
@@ -4680,6 +4522,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
__skb_unlink(skb1, &tp->out_of_order_queue);
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
TCP_SKB_CB(skb1)->end_seq);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
__kfree_skb(skb1);
}
@@ -4710,17 +4553,17 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
struct tcphdr *th;
bool fragstolen;
- if (tcp_try_rmem_schedule(sk, size + sizeof(*th)))
- goto err;
-
skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
if (!skb)
goto err;
+ if (tcp_try_rmem_schedule(sk, skb, size + sizeof(*th)))
+ goto err_free;
+
th = (struct tcphdr *)skb_put(skb, sizeof(*th));
skb_reset_transport_header(skb);
memset(th, 0, sizeof(*th));
@@ -4791,7 +4634,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
if (eaten <= 0) {
queue_and_out:
if (eaten < 0 &&
- tcp_try_rmem_schedule(sk, skb->truesize))
+ tcp_try_rmem_schedule(sk, skb, skb->truesize))
goto drop;
eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
@@ -5372,7 +5215,7 @@ static __sum16 __tcp_checksum_complete_user(struct sock *sk,
return result;
}
-static inline int tcp_checksum_complete_user(struct sock *sk,
+static inline bool tcp_checksum_complete_user(struct sock *sk,
struct sk_buff *skb)
{
return !skb_csum_unnecessary(skb) &&
@@ -5426,11 +5269,28 @@ out:
}
#endif /* CONFIG_NET_DMA */
+static void tcp_send_challenge_ack(struct sock *sk)
+{
+ /* unprotected vars, we dont care of overwrites */
+ static u32 challenge_timestamp;
+ static unsigned int challenge_count;
+ u32 now = jiffies / HZ;
+
+ if (now != challenge_timestamp) {
+ challenge_timestamp = now;
+ challenge_count = 0;
+ }
+ if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
+ tcp_send_ack(sk);
+ }
+}
+
/* Does PAWS and seqno based validation of an incoming segment, flags will
* play significant role here.
*/
-static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
- const struct tcphdr *th, int syn_inerr)
+static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, int syn_inerr)
{
const u8 *hash_location;
struct tcp_sock *tp = tcp_sk(sk);
@@ -5455,14 +5315,26 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
* an acknowledgment should be sent in reply (unless the RST
* bit is set, if so drop the segment and return)".
*/
- if (!th->rst)
+ if (!th->rst) {
+ if (th->syn)
+ goto syn_challenge;
tcp_send_dupack(sk, skb);
+ }
goto discard;
}
/* Step 2: check RST bit */
if (th->rst) {
- tcp_reset(sk);
+ /* RFC 5961 3.2 :
+ * If sequence number exactly matches RCV.NXT, then
+ * RESET the connection
+ * else
+ * Send a challenge ACK
+ */
+ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt)
+ tcp_reset(sk);
+ else
+ tcp_send_challenge_ack(sk);
goto discard;
}
@@ -5473,20 +5345,23 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
/* step 3: check security and precedence [ignored] */
- /* step 4: Check for a SYN in window. */
- if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
+ /* step 4: Check for a SYN
+ * RFC 5691 4.2 : Send a challenge ack
+ */
+ if (th->syn) {
+syn_challenge:
if (syn_inerr)
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
- tcp_reset(sk);
- return -1;
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
+ tcp_send_challenge_ack(sk);
+ goto discard;
}
- return 1;
+ return true;
discard:
__kfree_skb(skb);
- return 0;
+ return false;
}
/*
@@ -5516,7 +5391,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len)
{
struct tcp_sock *tp = tcp_sk(sk);
- int res;
/*
* Header prediction.
@@ -5602,7 +5476,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
if (tp->copied_seq == tp->rcv_nxt &&
len - tcp_header_len <= tp->ucopy.len) {
#ifdef CONFIG_NET_DMA
- if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
+ if (tp->ucopy.task == current &&
+ sock_owned_by_user(sk) &&
+ tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
copied_early = 1;
eaten = 1;
}
@@ -5693,9 +5569,8 @@ slow_path:
* Standard slow path.
*/
- res = tcp_validate_incoming(sk, skb, th, 1);
- if (res <= 0)
- return -res;
+ if (!tcp_validate_incoming(sk, skb, th, 1))
+ return 0;
step5:
if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
@@ -5729,8 +5604,10 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
tcp_set_state(sk, TCP_ESTABLISHED);
- if (skb != NULL)
+ if (skb != NULL) {
+ inet_sk_rx_dst_set(sk, skb);
security_inet_conn_established(sk, skb);
+ }
/* Make sure socket is routed, for correct metrics. */
icsk->icsk_af_ops->rebuild_header(sk);
@@ -5760,6 +5637,45 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
}
}
+static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
+ struct tcp_fastopen_cookie *cookie)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL;
+ u16 mss = tp->rx_opt.mss_clamp;
+ bool syn_drop;
+
+ if (mss == tp->rx_opt.user_mss) {
+ struct tcp_options_received opt;
+ const u8 *hash_location;
+
+ /* Get original SYNACK MSS value if user MSS sets mss_clamp */
+ tcp_clear_options(&opt);
+ opt.user_mss = opt.mss_clamp = 0;
+ tcp_parse_options(synack, &opt, &hash_location, 0, NULL);
+ mss = opt.mss_clamp;
+ }
+
+ if (!tp->syn_fastopen) /* Ignore an unsolicited cookie */
+ cookie->len = -1;
+
+ /* The SYN-ACK neither has cookie nor acknowledges the data. Presumably
+ * the remote receives only the retransmitted (regular) SYNs: either
+ * the original SYN-data or the corresponding SYN-ACK is lost.
+ */
+ syn_drop = (cookie->len <= 0 && data &&
+ inet_csk(sk)->icsk_retransmits);
+
+ tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
+
+ if (data) { /* Retransmit unacked data in SYN */
+ tcp_retransmit_skb(sk, data);
+ tcp_rearm_rto(sk);
+ return true;
+ }
+ return false;
+}
+
static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len)
{
@@ -5767,9 +5683,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
+ struct tcp_fastopen_cookie foc = { .len = -1 };
int saved_clamp = tp->rx_opt.mss_clamp;
- tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
+ tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc);
if (th->ack) {
/* rfc793:
@@ -5779,11 +5696,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
* a reset (unless the RST bit is set, if so drop
* the segment and return)"
- *
- * We do not send data with SYN, so that RFC-correct
- * test reduces to:
*/
- if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt)
+ if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) ||
+ after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt))
goto reset_and_undo;
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -5895,6 +5810,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
tcp_finish_connect(sk, skb);
+ if ((tp->syn_fastopen || tp->syn_data) &&
+ tcp_rcv_fastopen_synack(sk, skb, &foc))
+ return -1;
+
if (sk->sk_write_pending ||
icsk->icsk_accept_queue.rskq_defer_accept ||
icsk->icsk_ack.pingpong) {
@@ -6013,7 +5932,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
int queued = 0;
- int res;
tp->rx_opt.saw_tstamp = 0;
@@ -6068,9 +5986,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
return 0;
}
- res = tcp_validate_incoming(sk, skb, th, 0);
- if (res <= 0)
- return -res;
+ if (!tcp_validate_incoming(sk, skb, th, 0))
+ return 0;
/* step 5: check the ACK field */
if (th->ack) {
@@ -6126,9 +6043,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
case TCP_FIN_WAIT1:
if (tp->snd_una == tp->write_seq) {
+ struct dst_entry *dst;
+
tcp_set_state(sk, TCP_FIN_WAIT2);
sk->sk_shutdown |= SEND_SHUTDOWN;
- dst_confirm(__sk_dst_get(sk));
+
+ dst = __sk_dst_get(sk);
+ if (dst)
+ dst_confirm(dst);
if (!sock_flag(sk, SOCK_DEAD))
/* Wake up lingering close() */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c8d28c433b2..42b2a6a7309 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -209,22 +209,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
}
if (tcp_death_row.sysctl_tw_recycle &&
- !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
- struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
- /*
- * VJ's idea. We save last timestamp seen from
- * the destination in peer table, when entering state
- * TIME-WAIT * and initialize rx_opt.ts_recent from it,
- * when trying new connection.
- */
- if (peer) {
- inet_peer_refcheck(peer);
- if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
- tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
- tp->rx_opt.ts_recent = peer->tcp_ts;
- }
- }
- }
+ !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
+ tcp_fetch_timewait_stamp(sk, &rt->dst);
inet->inet_dport = usin->sin_port;
inet->inet_daddr = daddr;
@@ -289,12 +275,15 @@ failure:
EXPORT_SYMBOL(tcp_v4_connect);
/*
- * This routine does path mtu discovery as defined in RFC1191.
+ * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
+ * It can be called through tcp_release_cb() if socket was owned by user
+ * at the time tcp_v4_err() was called to handle ICMP message.
*/
-static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
+static void tcp_v4_mtu_reduced(struct sock *sk)
{
struct dst_entry *dst;
struct inet_sock *inet = inet_sk(sk);
+ u32 mtu = tcp_sk(sk)->mtu_info;
/* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
* send out by Linux are always <576bytes so they should go through
@@ -303,17 +292,10 @@ static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
if (sk->sk_state == TCP_LISTEN)
return;
- /* We don't check in the destentry if pmtu discovery is forbidden
- * on this route. We just assume that no packet_to_big packets
- * are send back when pmtu discovery is not active.
- * There is a small race when the user changes this flag in the
- * route, but I think that's acceptable.
- */
- if ((dst = __sk_dst_check(sk, 0)) == NULL)
+ dst = inet_csk_update_pmtu(sk, mtu);
+ if (!dst)
return;
- dst->ops->update_pmtu(dst, mtu);
-
/* Something is about to be wrong... Remember soft error
* for the case, if this connection will not able to recover.
*/
@@ -335,6 +317,14 @@ static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
} /* else let the usual retransmit timer handle it */
}
+static void do_redirect(struct sk_buff *skb, struct sock *sk)
+{
+ struct dst_entry *dst = __sk_dst_check(sk, 0);
+
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+}
+
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
@@ -386,8 +376,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
+ * We do take care of PMTU discovery (RFC1191) special case :
+ * we can receive locally generated ICMP messages while socket is held.
*/
- if (sock_owned_by_user(sk))
+ if (sock_owned_by_user(sk) &&
+ type != ICMP_DEST_UNREACH &&
+ code != ICMP_FRAG_NEEDED)
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE)
@@ -408,6 +402,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
}
switch (type) {
+ case ICMP_REDIRECT:
+ do_redirect(icmp_skb, sk);
+ goto out;
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
goto out;
@@ -419,8 +416,11 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
goto out;
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
+ tp->mtu_info = info;
if (!sock_owned_by_user(sk))
- do_pmtu_discovery(sk, iph, info);
+ tcp_v4_mtu_reduced(sk);
+ else
+ set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
goto out;
}
@@ -698,8 +698,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
net = dev_net(skb_dst(skb)->dev);
arg.tos = ip_hdr(skb)->tos;
- ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
- &arg, arg.iov[0].iov_len);
+ ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
@@ -781,8 +781,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
if (oif)
arg.bound_dev_if = oif;
arg.tos = tos;
- ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
- &arg, arg.iov[0].iov_len);
+ ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
}
@@ -825,7 +825,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct request_values *rvp,
- u16 queue_mapping)
+ u16 queue_mapping,
+ bool nocache)
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct flowi4 fl4;
@@ -848,7 +849,6 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
err = net_xmit_eval(err);
}
- dst_release(dst);
return err;
}
@@ -856,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
struct request_values *rvp)
{
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
- return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
+ return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
}
/*
@@ -1317,7 +1317,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
tmp_opt.user_mss = tp->rx_opt.user_mss;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
@@ -1375,7 +1375,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
isn = cookie_v4_init_sequence(sk, skb, &req->mss);
req->cookie_ts = tmp_opt.tstamp_ok;
} else if (!isn) {
- struct inet_peer *peer = NULL;
struct flowi4 fl4;
/* VJ's idea. We save last timestamp seen
@@ -1390,12 +1389,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (tmp_opt.saw_tstamp &&
tcp_death_row.sysctl_tw_recycle &&
(dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
- fl4.daddr == saddr &&
- (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
- inet_peer_refcheck(peer);
- if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
- (s32)(peer->tcp_ts - req->ts_recent) >
- TCP_PAWS_WINDOW) {
+ fl4.daddr == saddr) {
+ if (!tcp_peer_is_proven(req, dst, true)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
goto drop_and_release;
}
@@ -1404,8 +1399,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
else if (!sysctl_tcp_syncookies &&
(sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
(sysctl_max_syn_backlog >> 2)) &&
- (!peer || !peer->tcp_ts_stamp) &&
- (!dst || !dst_metric(dst, RTAX_RTT))) {
+ !tcp_peer_is_proven(req, dst, false)) {
/* Without syncookies last quarter of
* backlog is filled with destinations,
* proven to be alive.
@@ -1425,7 +1419,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (tcp_v4_send_synack(sk, dst, req,
(struct request_values *)&tmp_ext,
- skb_get_queue_mapping(skb)) ||
+ skb_get_queue_mapping(skb),
+ want_cookie) ||
want_cookie)
goto drop_and_free;
@@ -1622,7 +1617,19 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
#endif
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
+ struct dst_entry *dst = sk->sk_rx_dst;
+
sock_rps_save_rxhash(sk, skb);
+ if (dst) {
+ if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
+ dst->ops->check(dst, 0) == NULL) {
+ dst_release(dst);
+ sk->sk_rx_dst = NULL;
+ }
+ }
+ if (unlikely(sk->sk_rx_dst == NULL))
+ inet_sk_rx_dst_set(sk, skb);
+
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
goto reset;
@@ -1672,6 +1679,44 @@ csum_err:
}
EXPORT_SYMBOL(tcp_v4_do_rcv);
+void tcp_v4_early_demux(struct sk_buff *skb)
+{
+ struct net *net = dev_net(skb->dev);
+ const struct iphdr *iph;
+ const struct tcphdr *th;
+ struct sock *sk;
+
+ if (skb->pkt_type != PACKET_HOST)
+ return;
+
+ if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
+ return;
+
+ iph = ip_hdr(skb);
+ th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
+
+ if (th->doff < sizeof(struct tcphdr) / 4)
+ return;
+
+ sk = __inet_lookup_established(net, &tcp_hashinfo,
+ iph->saddr, th->source,
+ iph->daddr, ntohs(th->dest),
+ skb->skb_iif);
+ if (sk) {
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+ if (sk->sk_state != TCP_TIME_WAIT) {
+ struct dst_entry *dst = sk->sk_rx_dst;
+
+ if (dst)
+ dst = dst_check(dst, 0);
+ if (dst &&
+ inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
+ skb_dst_set_noref(skb, dst);
+ }
+ }
+}
+
/*
* From tcp_input.c
*/
@@ -1821,40 +1866,10 @@ do_time_wait:
goto discard_it;
}
-struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
-{
- struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
- struct inet_sock *inet = inet_sk(sk);
- struct inet_peer *peer;
-
- if (!rt ||
- inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
- peer = inet_getpeer_v4(inet->inet_daddr, 1);
- *release_it = true;
- } else {
- if (!rt->peer)
- rt_bind_peer(rt, inet->inet_daddr, 1);
- peer = rt->peer;
- *release_it = false;
- }
-
- return peer;
-}
-EXPORT_SYMBOL(tcp_v4_get_peer);
-
-void *tcp_v4_tw_get_peer(struct sock *sk)
-{
- const struct inet_timewait_sock *tw = inet_twsk(sk);
-
- return inet_getpeer_v4(tw->tw_daddr, 1);
-}
-EXPORT_SYMBOL(tcp_v4_tw_get_peer);
-
static struct timewait_sock_ops tcp_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp_timewait_sock),
.twsk_unique = tcp_twsk_unique,
.twsk_destructor= tcp_twsk_destructor,
- .twsk_getpeer = tcp_v4_tw_get_peer,
};
const struct inet_connection_sock_af_ops ipv4_specific = {
@@ -1863,7 +1878,6 @@ const struct inet_connection_sock_af_ops ipv4_specific = {
.rebuild_header = inet_sk_rebuild_header,
.conn_request = tcp_v4_conn_request,
.syn_recv_sock = tcp_v4_syn_recv_sock,
- .get_peer = tcp_v4_get_peer,
.net_header_len = sizeof(struct iphdr),
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
@@ -1953,6 +1967,9 @@ void tcp_v4_destroy_sock(struct sock *sk)
tp->cookie_values = NULL;
}
+ /* If socket is aborted during connect operation */
+ tcp_free_fastopen_req(tp);
+
sk_sockets_allocated_dec(sk);
sock_release_memcg(sk);
}
@@ -2593,6 +2610,8 @@ struct proto tcp_prot = {
.sendmsg = tcp_sendmsg,
.sendpage = tcp_sendpage,
.backlog_rcv = tcp_v4_do_rcv,
+ .release_cb = tcp_release_cb,
+ .mtu_reduced = tcp_v4_mtu_reduced,
.hash = inet_hash,
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
@@ -2614,7 +2633,7 @@ struct proto tcp_prot = {
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
#endif
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_MEMCG_KMEM
.init_cgroup = tcp_init_cgroup,
.destroy_cgroup = tcp_destroy_cgroup,
.proto_cgroup = tcp_proto_cgroup,
@@ -2624,13 +2643,11 @@ EXPORT_SYMBOL(tcp_prot);
static int __net_init tcp_sk_init(struct net *net)
{
- return inet_ctl_sock_create(&net->ipv4.tcp_sock,
- PF_INET, SOCK_RAW, IPPROTO_TCP, net);
+ return 0;
}
static void __net_exit tcp_sk_exit(struct net *net)
{
- inet_ctl_sock_destroy(net->ipv4.tcp_sock);
}
static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
new file mode 100644
index 00000000000..2288a6399e1
--- /dev/null
+++ b/net/ipv4/tcp_metrics.c
@@ -0,0 +1,745 @@
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/cache.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/tcp.h>
+#include <linux/hash.h>
+
+#include <net/inet_connection_sock.h>
+#include <net/net_namespace.h>
+#include <net/request_sock.h>
+#include <net/inetpeer.h>
+#include <net/sock.h>
+#include <net/ipv6.h>
+#include <net/dst.h>
+#include <net/tcp.h>
+
+int sysctl_tcp_nometrics_save __read_mostly;
+
+enum tcp_metric_index {
+ TCP_METRIC_RTT,
+ TCP_METRIC_RTTVAR,
+ TCP_METRIC_SSTHRESH,
+ TCP_METRIC_CWND,
+ TCP_METRIC_REORDERING,
+
+ /* Always last. */
+ TCP_METRIC_MAX,
+};
+
+struct tcp_fastopen_metrics {
+ u16 mss;
+ u16 syn_loss:10; /* Recurring Fast Open SYN losses */
+ unsigned long last_syn_loss; /* Last Fast Open SYN loss */
+ struct tcp_fastopen_cookie cookie;
+};
+
+struct tcp_metrics_block {
+ struct tcp_metrics_block __rcu *tcpm_next;
+ struct inetpeer_addr tcpm_addr;
+ unsigned long tcpm_stamp;
+ u32 tcpm_ts;
+ u32 tcpm_ts_stamp;
+ u32 tcpm_lock;
+ u32 tcpm_vals[TCP_METRIC_MAX];
+ struct tcp_fastopen_metrics tcpm_fastopen;
+};
+
+static bool tcp_metric_locked(struct tcp_metrics_block *tm,
+ enum tcp_metric_index idx)
+{
+ return tm->tcpm_lock & (1 << idx);
+}
+
+static u32 tcp_metric_get(struct tcp_metrics_block *tm,
+ enum tcp_metric_index idx)
+{
+ return tm->tcpm_vals[idx];
+}
+
+static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
+ enum tcp_metric_index idx)
+{
+ return msecs_to_jiffies(tm->tcpm_vals[idx]);
+}
+
+static void tcp_metric_set(struct tcp_metrics_block *tm,
+ enum tcp_metric_index idx,
+ u32 val)
+{
+ tm->tcpm_vals[idx] = val;
+}
+
+static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
+ enum tcp_metric_index idx,
+ u32 val)
+{
+ tm->tcpm_vals[idx] = jiffies_to_msecs(val);
+}
+
+static bool addr_same(const struct inetpeer_addr *a,
+ const struct inetpeer_addr *b)
+{
+ const struct in6_addr *a6, *b6;
+
+ if (a->family != b->family)
+ return false;
+ if (a->family == AF_INET)
+ return a->addr.a4 == b->addr.a4;
+
+ a6 = (const struct in6_addr *) &a->addr.a6[0];
+ b6 = (const struct in6_addr *) &b->addr.a6[0];
+
+ return ipv6_addr_equal(a6, b6);
+}
+
+struct tcpm_hash_bucket {
+ struct tcp_metrics_block __rcu *chain;
+};
+
+static DEFINE_SPINLOCK(tcp_metrics_lock);
+
+static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
+{
+ u32 val;
+
+ tm->tcpm_stamp = jiffies;
+
+ val = 0;
+ if (dst_metric_locked(dst, RTAX_RTT))
+ val |= 1 << TCP_METRIC_RTT;
+ if (dst_metric_locked(dst, RTAX_RTTVAR))
+ val |= 1 << TCP_METRIC_RTTVAR;
+ if (dst_metric_locked(dst, RTAX_SSTHRESH))
+ val |= 1 << TCP_METRIC_SSTHRESH;
+ if (dst_metric_locked(dst, RTAX_CWND))
+ val |= 1 << TCP_METRIC_CWND;
+ if (dst_metric_locked(dst, RTAX_REORDERING))
+ val |= 1 << TCP_METRIC_REORDERING;
+ tm->tcpm_lock = val;
+
+ tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
+ tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
+ tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
+ tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
+ tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
+ tm->tcpm_ts = 0;
+ tm->tcpm_ts_stamp = 0;
+ tm->tcpm_fastopen.mss = 0;
+ tm->tcpm_fastopen.syn_loss = 0;
+ tm->tcpm_fastopen.cookie.len = 0;
+}
+
+static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
+ struct inetpeer_addr *addr,
+ unsigned int hash,
+ bool reclaim)
+{
+ struct tcp_metrics_block *tm;
+ struct net *net;
+
+ spin_lock_bh(&tcp_metrics_lock);
+ net = dev_net(dst->dev);
+ if (unlikely(reclaim)) {
+ struct tcp_metrics_block *oldest;
+
+ oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
+ for (tm = rcu_dereference(oldest->tcpm_next); tm;
+ tm = rcu_dereference(tm->tcpm_next)) {
+ if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
+ oldest = tm;
+ }
+ tm = oldest;
+ } else {
+ tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
+ if (!tm)
+ goto out_unlock;
+ }
+ tm->tcpm_addr = *addr;
+
+ tcpm_suck_dst(tm, dst);
+
+ if (likely(!reclaim)) {
+ tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
+ rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
+ }
+
+out_unlock:
+ spin_unlock_bh(&tcp_metrics_lock);
+ return tm;
+}
+
+#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
+
+static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
+{
+ if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
+ tcpm_suck_dst(tm, dst);
+}
+
+#define TCP_METRICS_RECLAIM_DEPTH 5
+#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
+
+static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
+{
+ if (tm)
+ return tm;
+ if (depth > TCP_METRICS_RECLAIM_DEPTH)
+ return TCP_METRICS_RECLAIM_PTR;
+ return NULL;
+}
+
+static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
+ struct net *net, unsigned int hash)
+{
+ struct tcp_metrics_block *tm;
+ int depth = 0;
+
+ for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+ tm = rcu_dereference(tm->tcpm_next)) {
+ if (addr_same(&tm->tcpm_addr, addr))
+ break;
+ depth++;
+ }
+ return tcp_get_encode(tm, depth);
+}
+
+static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
+ struct dst_entry *dst)
+{
+ struct tcp_metrics_block *tm;
+ struct inetpeer_addr addr;
+ unsigned int hash;
+ struct net *net;
+
+ addr.family = req->rsk_ops->family;
+ switch (addr.family) {
+ case AF_INET:
+ addr.addr.a4 = inet_rsk(req)->rmt_addr;
+ hash = (__force unsigned int) addr.addr.a4;
+ break;
+ case AF_INET6:
+ *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
+ hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
+ break;
+ default:
+ return NULL;
+ }
+
+ net = dev_net(dst->dev);
+ hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+
+ for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+ tm = rcu_dereference(tm->tcpm_next)) {
+ if (addr_same(&tm->tcpm_addr, &addr))
+ break;
+ }
+ tcpm_check_stamp(tm, dst);
+ return tm;
+}
+
+static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
+{
+ struct inet6_timewait_sock *tw6;
+ struct tcp_metrics_block *tm;
+ struct inetpeer_addr addr;
+ unsigned int hash;
+ struct net *net;
+
+ addr.family = tw->tw_family;
+ switch (addr.family) {
+ case AF_INET:
+ addr.addr.a4 = tw->tw_daddr;
+ hash = (__force unsigned int) addr.addr.a4;
+ break;
+ case AF_INET6:
+ tw6 = inet6_twsk((struct sock *)tw);
+ *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
+ hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
+ break;
+ default:
+ return NULL;
+ }
+
+ net = twsk_net(tw);
+ hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+
+ for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+ tm = rcu_dereference(tm->tcpm_next)) {
+ if (addr_same(&tm->tcpm_addr, &addr))
+ break;
+ }
+ return tm;
+}
+
+static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
+ struct dst_entry *dst,
+ bool create)
+{
+ struct tcp_metrics_block *tm;
+ struct inetpeer_addr addr;
+ unsigned int hash;
+ struct net *net;
+ bool reclaim;
+
+ addr.family = sk->sk_family;
+ switch (addr.family) {
+ case AF_INET:
+ addr.addr.a4 = inet_sk(sk)->inet_daddr;
+ hash = (__force unsigned int) addr.addr.a4;
+ break;
+ case AF_INET6:
+ *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
+ hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
+ break;
+ default:
+ return NULL;
+ }
+
+ net = dev_net(dst->dev);
+ hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+
+ tm = __tcp_get_metrics(&addr, net, hash);
+ reclaim = false;
+ if (tm == TCP_METRICS_RECLAIM_PTR) {
+ reclaim = true;
+ tm = NULL;
+ }
+ if (!tm && create)
+ tm = tcpm_new(dst, &addr, hash, reclaim);
+ else
+ tcpm_check_stamp(tm, dst);
+
+ return tm;
+}
+
+/* Save metrics learned by this TCP session. This function is called
+ * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
+ * or goes from LAST-ACK to CLOSE.
+ */
+void tcp_update_metrics(struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct dst_entry *dst = __sk_dst_get(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_metrics_block *tm;
+ unsigned long rtt;
+ u32 val;
+ int m;
+
+ if (sysctl_tcp_nometrics_save || !dst)
+ return;
+
+ if (dst->flags & DST_HOST)
+ dst_confirm(dst);
+
+ rcu_read_lock();
+ if (icsk->icsk_backoff || !tp->srtt) {
+ /* This session failed to estimate rtt. Why?
+ * Probably, no packets returned in time. Reset our
+ * results.
+ */
+ tm = tcp_get_metrics(sk, dst, false);
+ if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
+ tcp_metric_set(tm, TCP_METRIC_RTT, 0);
+ goto out_unlock;
+ } else
+ tm = tcp_get_metrics(sk, dst, true);
+
+ if (!tm)
+ goto out_unlock;
+
+ rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
+ m = rtt - tp->srtt;
+
+ /* If newly calculated rtt larger than stored one, store new
+ * one. Otherwise, use EWMA. Remember, rtt overestimation is
+ * always better than underestimation.
+ */
+ if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
+ if (m <= 0)
+ rtt = tp->srtt;
+ else
+ rtt -= (m >> 3);
+ tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
+ }
+
+ if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
+ unsigned long var;
+
+ if (m < 0)
+ m = -m;
+
+ /* Scale deviation to rttvar fixed point */
+ m >>= 1;
+ if (m < tp->mdev)
+ m = tp->mdev;
+
+ var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
+ if (m >= var)
+ var = m;
+ else
+ var -= (var - m) >> 2;
+
+ tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
+ }
+
+ if (tcp_in_initial_slowstart(tp)) {
+ /* Slow start still did not finish. */
+ if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
+ val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
+ if (val && (tp->snd_cwnd >> 1) > val)
+ tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
+ tp->snd_cwnd >> 1);
+ }
+ if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
+ val = tcp_metric_get(tm, TCP_METRIC_CWND);
+ if (tp->snd_cwnd > val)
+ tcp_metric_set(tm, TCP_METRIC_CWND,
+ tp->snd_cwnd);
+ }
+ } else if (tp->snd_cwnd > tp->snd_ssthresh &&
+ icsk->icsk_ca_state == TCP_CA_Open) {
+ /* Cong. avoidance phase, cwnd is reliable. */
+ if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
+ tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
+ max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
+ if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
+ val = tcp_metric_get(tm, TCP_METRIC_CWND);
+ tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
+ }
+ } else {
+ /* Else slow start did not finish, cwnd is non-sense,
+ * ssthresh may be also invalid.
+ */
+ if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
+ val = tcp_metric_get(tm, TCP_METRIC_CWND);
+ tcp_metric_set(tm, TCP_METRIC_CWND,
+ (val + tp->snd_ssthresh) >> 1);
+ }
+ if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
+ val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
+ if (val && tp->snd_ssthresh > val)
+ tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
+ tp->snd_ssthresh);
+ }
+ if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
+ val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
+ if (val < tp->reordering &&
+ tp->reordering != sysctl_tcp_reordering)
+ tcp_metric_set(tm, TCP_METRIC_REORDERING,
+ tp->reordering);
+ }
+ }
+ tm->tcpm_stamp = jiffies;
+out_unlock:
+ rcu_read_unlock();
+}
+
+/* Initialize metrics on socket. */
+
+void tcp_init_metrics(struct sock *sk)
+{
+ struct dst_entry *dst = __sk_dst_get(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_metrics_block *tm;
+ u32 val;
+
+ if (dst == NULL)
+ goto reset;
+
+ dst_confirm(dst);
+
+ rcu_read_lock();
+ tm = tcp_get_metrics(sk, dst, true);
+ if (!tm) {
+ rcu_read_unlock();
+ goto reset;
+ }
+
+ if (tcp_metric_locked(tm, TCP_METRIC_CWND))
+ tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
+
+ val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
+ if (val) {
+ tp->snd_ssthresh = val;
+ if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
+ tp->snd_ssthresh = tp->snd_cwnd_clamp;
+ } else {
+ /* ssthresh may have been reduced unnecessarily during.
+ * 3WHS. Restore it back to its initial default.
+ */
+ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ }
+ val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
+ if (val && tp->reordering != val) {
+ tcp_disable_fack(tp);
+ tcp_disable_early_retrans(tp);
+ tp->reordering = val;
+ }
+
+ val = tcp_metric_get(tm, TCP_METRIC_RTT);
+ if (val == 0 || tp->srtt == 0) {
+ rcu_read_unlock();
+ goto reset;
+ }
+ /* Initial rtt is determined from SYN,SYN-ACK.
+ * The segment is small and rtt may appear much
+ * less than real one. Use per-dst memory
+ * to make it more realistic.
+ *
+ * A bit of theory. RTT is time passed after "normal" sized packet
+ * is sent until it is ACKed. In normal circumstances sending small
+ * packets force peer to delay ACKs and calculation is correct too.
+ * The algorithm is adaptive and, provided we follow specs, it
+ * NEVER underestimate RTT. BUT! If peer tries to make some clever
+ * tricks sort of "quick acks" for time long enough to decrease RTT
+ * to low value, and then abruptly stops to do it and starts to delay
+ * ACKs, wait for troubles.
+ */
+ val = msecs_to_jiffies(val);
+ if (val > tp->srtt) {
+ tp->srtt = val;
+ tp->rtt_seq = tp->snd_nxt;
+ }
+ val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
+ if (val > tp->mdev) {
+ tp->mdev = val;
+ tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
+ }
+ rcu_read_unlock();
+
+ tcp_set_rto(sk);
+reset:
+ if (tp->srtt == 0) {
+ /* RFC6298: 5.7 We've failed to get a valid RTT sample from
+ * 3WHS. This is most likely due to retransmission,
+ * including spurious one. Reset the RTO back to 3secs
+ * from the more aggressive 1sec to avoid more spurious
+ * retransmission.
+ */
+ tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
+ inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
+ }
+ /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
+ * retransmitted. In light of RFC6298 more aggressive 1sec
+ * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
+ * retransmission has occurred.
+ */
+ if (tp->total_retrans > 1)
+ tp->snd_cwnd = 1;
+ else
+ tp->snd_cwnd = tcp_init_cwnd(tp, dst);
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+}
+
+bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
+{
+ struct tcp_metrics_block *tm;
+ bool ret;
+
+ if (!dst)
+ return false;
+
+ rcu_read_lock();
+ tm = __tcp_get_metrics_req(req, dst);
+ if (paws_check) {
+ if (tm &&
+ (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
+ (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
+ ret = false;
+ else
+ ret = true;
+ } else {
+ if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
+ ret = true;
+ else
+ ret = false;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
+
+void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
+{
+ struct tcp_metrics_block *tm;
+
+ rcu_read_lock();
+ tm = tcp_get_metrics(sk, dst, true);
+ if (tm) {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
+ tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
+ tp->rx_opt.ts_recent = tm->tcpm_ts;
+ }
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
+
+/* VJ's idea. Save last timestamp seen from this destination and hold
+ * it at least for normal timewait interval to use for duplicate
+ * segment detection in subsequent connections, before they enter
+ * synchronized state.
+ */
+bool tcp_remember_stamp(struct sock *sk)
+{
+ struct dst_entry *dst = __sk_dst_get(sk);
+ bool ret = false;
+
+ if (dst) {
+ struct tcp_metrics_block *tm;
+
+ rcu_read_lock();
+ tm = tcp_get_metrics(sk, dst, true);
+ if (tm) {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
+ ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
+ tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
+ tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
+ tm->tcpm_ts = tp->rx_opt.ts_recent;
+ }
+ ret = true;
+ }
+ rcu_read_unlock();
+ }
+ return ret;
+}
+
+bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
+{
+ struct tcp_metrics_block *tm;
+ bool ret = false;
+
+ rcu_read_lock();
+ tm = __tcp_get_metrics_tw(tw);
+ if (tm) {
+ const struct tcp_timewait_sock *tcptw;
+ struct sock *sk = (struct sock *) tw;
+
+ tcptw = tcp_twsk(sk);
+ if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
+ ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
+ tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
+ tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
+ tm->tcpm_ts = tcptw->tw_ts_recent;
+ }
+ ret = true;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static DEFINE_SEQLOCK(fastopen_seqlock);
+
+void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
+ struct tcp_fastopen_cookie *cookie,
+ int *syn_loss, unsigned long *last_syn_loss)
+{
+ struct tcp_metrics_block *tm;
+
+ rcu_read_lock();
+ tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
+ if (tm) {
+ struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
+ unsigned int seq;
+
+ do {
+ seq = read_seqbegin(&fastopen_seqlock);
+ if (tfom->mss)
+ *mss = tfom->mss;
+ *cookie = tfom->cookie;
+ *syn_loss = tfom->syn_loss;
+ *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
+ } while (read_seqretry(&fastopen_seqlock, seq));
+ }
+ rcu_read_unlock();
+}
+
+void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
+ struct tcp_fastopen_cookie *cookie, bool syn_lost)
+{
+ struct tcp_metrics_block *tm;
+
+ rcu_read_lock();
+ tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
+ if (tm) {
+ struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
+
+ write_seqlock_bh(&fastopen_seqlock);
+ tfom->mss = mss;
+ if (cookie->len > 0)
+ tfom->cookie = *cookie;
+ if (syn_lost) {
+ ++tfom->syn_loss;
+ tfom->last_syn_loss = jiffies;
+ } else
+ tfom->syn_loss = 0;
+ write_sequnlock_bh(&fastopen_seqlock);
+ }
+ rcu_read_unlock();
+}
+
+static unsigned int tcpmhash_entries;
+static int __init set_tcpmhash_entries(char *str)
+{
+ ssize_t ret;
+
+ if (!str)
+ return 0;
+
+ ret = kstrtouint(str, 0, &tcpmhash_entries);
+ if (ret)
+ return 0;
+
+ return 1;
+}
+__setup("tcpmhash_entries=", set_tcpmhash_entries);
+
+static int __net_init tcp_net_metrics_init(struct net *net)
+{
+ size_t size;
+ unsigned int slots;
+
+ slots = tcpmhash_entries;
+ if (!slots) {
+ if (totalram_pages >= 128 * 1024)
+ slots = 16 * 1024;
+ else
+ slots = 8 * 1024;
+ }
+
+ net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
+ size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
+
+ net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL);
+ if (!net->ipv4.tcp_metrics_hash)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __net_exit tcp_net_metrics_exit(struct net *net)
+{
+ kfree(net->ipv4.tcp_metrics_hash);
+}
+
+static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
+ .init = tcp_net_metrics_init,
+ .exit = tcp_net_metrics_exit,
+};
+
+void __init tcp_metrics_init(void)
+{
+ register_pernet_subsys(&tcp_net_metrics_ops);
+}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b85d9fe7d66..232a90c3ec8 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -49,56 +49,6 @@ struct inet_timewait_death_row tcp_death_row = {
};
EXPORT_SYMBOL_GPL(tcp_death_row);
-/* VJ's idea. Save last timestamp seen from this destination
- * and hold it at least for normal timewait interval to use for duplicate
- * segment detection in subsequent connections, before they enter synchronized
- * state.
- */
-
-static bool tcp_remember_stamp(struct sock *sk)
-{
- const struct inet_connection_sock *icsk = inet_csk(sk);
- struct tcp_sock *tp = tcp_sk(sk);
- struct inet_peer *peer;
- bool release_it;
-
- peer = icsk->icsk_af_ops->get_peer(sk, &release_it);
- if (peer) {
- if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
- ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
- peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
- peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
- peer->tcp_ts = tp->rx_opt.ts_recent;
- }
- if (release_it)
- inet_putpeer(peer);
- return true;
- }
-
- return false;
-}
-
-static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
-{
- struct sock *sk = (struct sock *) tw;
- struct inet_peer *peer;
-
- peer = twsk_getpeer(sk);
- if (peer) {
- const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
-
- if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
- ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
- peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
- peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
- peer->tcp_ts = tcptw->tw_ts_recent;
- }
- inet_putpeer(peer);
- return true;
- }
- return false;
-}
-
static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
{
if (seq == s_win)
@@ -147,7 +97,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = tcptw->tw_ts_recent;
@@ -327,8 +277,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
if (tw != NULL) {
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
+ struct inet_sock *inet = inet_sk(sk);
- tw->tw_transparent = inet_sk(sk)->transparent;
+ tw->tw_transparent = inet->transparent;
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
tcptw->tw_rcv_nxt = tp->rcv_nxt;
tcptw->tw_snd_nxt = tp->snd_nxt;
@@ -403,6 +354,7 @@ void tcp_twsk_destructor(struct sock *sk)
{
#ifdef CONFIG_TCP_MD5SIG
struct tcp_timewait_sock *twsk = tcp_twsk(sk);
+
if (twsk->tw_md5_key) {
tcp_free_md5sig_pool();
kfree_rcu(twsk->tw_md5_key, rcu);
@@ -435,6 +387,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
struct tcp_sock *oldtp = tcp_sk(sk);
struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
+ inet_sk_rx_dst_set(newsk, skb);
+
/* TCP Cookie Transactions require space for the cookie pair,
* as it differs for each connection. There is no need to
* copy any s_data_payload stored at the original socket.
@@ -470,6 +424,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
treq->snt_isn + 1 + tcp_s_data_size(oldtp);
tcp_prequeue_init(newtp);
+ INIT_LIST_HEAD(&newtp->tsq_node);
tcp_init_wl(newtp, treq->rcv_isn);
@@ -579,7 +534,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr)>>2)) {
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = req->ts_recent;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 803cbfe82fb..3f1bcff0b10 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -50,6 +50,9 @@ int sysctl_tcp_retrans_collapse __read_mostly = 1;
*/
int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
+/* Default TSQ limit of two TSO segments */
+int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
+
/* This limits the percentage of the congestion window which we
* will allow a single TSO frame to consume. Building TSO frames
* which are too large can cause TCP streams to be bursty.
@@ -65,6 +68,8 @@ int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */
EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
+static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+ int push_one, gfp_t gfp);
/* Account for new data that has been sent to the network. */
static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
@@ -380,15 +385,17 @@ static inline bool tcp_urg_mode(const struct tcp_sock *tp)
#define OPTION_MD5 (1 << 2)
#define OPTION_WSCALE (1 << 3)
#define OPTION_COOKIE_EXTENSION (1 << 4)
+#define OPTION_FAST_OPEN_COOKIE (1 << 8)
struct tcp_out_options {
- u8 options; /* bit field of OPTION_* */
+ u16 options; /* bit field of OPTION_* */
+ u16 mss; /* 0 to disable */
u8 ws; /* window scale, 0 to disable */
u8 num_sack_blocks; /* number of SACK blocks to include */
u8 hash_size; /* bytes in hash_location */
- u16 mss; /* 0 to disable */
- __u32 tsval, tsecr; /* need to include OPTION_TS */
__u8 *hash_location; /* temporary pointer, overloaded */
+ __u32 tsval, tsecr; /* need to include OPTION_TS */
+ struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */
};
/* The sysctl int routines are generic, so check consistency here.
@@ -437,7 +444,7 @@ static u8 tcp_cookie_size_check(u8 desired)
static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
struct tcp_out_options *opts)
{
- u8 options = opts->options; /* mungable copy */
+ u16 options = opts->options; /* mungable copy */
/* Having both authentication and cookies for security is redundant,
* and there's certainly not enough room. Instead, the cookie-less
@@ -559,6 +566,21 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
tp->rx_opt.dsack = 0;
}
+
+ if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
+ struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
+
+ *ptr++ = htonl((TCPOPT_EXP << 24) |
+ ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
+ TCPOPT_FASTOPEN_MAGIC);
+
+ memcpy(ptr, foc->val, foc->len);
+ if ((foc->len & 3) == 2) {
+ u8 *align = ((u8 *)ptr) + foc->len;
+ align[0] = align[1] = TCPOPT_NOP;
+ }
+ ptr += (foc->len + 3) >> 2;
+ }
}
/* Compute TCP options for SYN packets. This is not the final
@@ -574,6 +596,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
tcp_cookie_size_check(cvp->cookie_desired) :
0;
+ struct tcp_fastopen_request *fastopen = tp->fastopen_req;
#ifdef CONFIG_TCP_MD5SIG
*md5 = tp->af_specific->md5_lookup(sk, sk);
@@ -614,6 +637,16 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
remaining -= TCPOLEN_SACKPERM_ALIGNED;
}
+ if (fastopen && fastopen->cookie.len >= 0) {
+ u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
+ need = (need + 3) & ~3U; /* Align to 32 bits */
+ if (remaining >= need) {
+ opts->options |= OPTION_FAST_OPEN_COOKIE;
+ opts->fastopen_cookie = &fastopen->cookie;
+ remaining -= need;
+ tp->syn_fastopen = 1;
+ }
+ }
/* Note that timestamps are required by the specification.
*
* Odd numbers of bytes are prohibited by the specification, ensuring
@@ -783,6 +816,156 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
return size;
}
+
+/* TCP SMALL QUEUES (TSQ)
+ *
+ * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
+ * to reduce RTT and bufferbloat.
+ * We do this using a special skb destructor (tcp_wfree).
+ *
+ * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
+ * needs to be reallocated in a driver.
+ * The invariant being skb->truesize substracted from sk->sk_wmem_alloc
+ *
+ * Since transmit from skb destructor is forbidden, we use a tasklet
+ * to process all sockets that eventually need to send more skbs.
+ * We use one tasklet per cpu, with its own queue of sockets.
+ */
+struct tsq_tasklet {
+ struct tasklet_struct tasklet;
+ struct list_head head; /* queue of tcp sockets */
+};
+static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
+
+static void tcp_tsq_handler(struct sock *sk)
+{
+ if ((1 << sk->sk_state) &
+ (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
+ TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
+ tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
+}
+/*
+ * One tasklest per cpu tries to send more skbs.
+ * We run in tasklet context but need to disable irqs when
+ * transfering tsq->head because tcp_wfree() might
+ * interrupt us (non NAPI drivers)
+ */
+static void tcp_tasklet_func(unsigned long data)
+{
+ struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
+ LIST_HEAD(list);
+ unsigned long flags;
+ struct list_head *q, *n;
+ struct tcp_sock *tp;
+ struct sock *sk;
+
+ local_irq_save(flags);
+ list_splice_init(&tsq->head, &list);
+ local_irq_restore(flags);
+
+ list_for_each_safe(q, n, &list) {
+ tp = list_entry(q, struct tcp_sock, tsq_node);
+ list_del(&tp->tsq_node);
+
+ sk = (struct sock *)tp;
+ bh_lock_sock(sk);
+
+ if (!sock_owned_by_user(sk)) {
+ tcp_tsq_handler(sk);
+ } else {
+ /* defer the work to tcp_release_cb() */
+ set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
+ }
+ bh_unlock_sock(sk);
+
+ clear_bit(TSQ_QUEUED, &tp->tsq_flags);
+ sk_free(sk);
+ }
+}
+
+#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \
+ (1UL << TCP_WRITE_TIMER_DEFERRED) | \
+ (1UL << TCP_DELACK_TIMER_DEFERRED) | \
+ (1UL << TCP_MTU_REDUCED_DEFERRED))
+/**
+ * tcp_release_cb - tcp release_sock() callback
+ * @sk: socket
+ *
+ * called from release_sock() to perform protocol dependent
+ * actions before socket release.
+ */
+void tcp_release_cb(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ unsigned long flags, nflags;
+
+ /* perform an atomic operation only if at least one flag is set */
+ do {
+ flags = tp->tsq_flags;
+ if (!(flags & TCP_DEFERRED_ALL))
+ return;
+ nflags = flags & ~TCP_DEFERRED_ALL;
+ } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
+
+ if (flags & (1UL << TCP_TSQ_DEFERRED))
+ tcp_tsq_handler(sk);
+
+ if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED))
+ tcp_write_timer_handler(sk);
+
+ if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED))
+ tcp_delack_timer_handler(sk);
+
+ if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED))
+ sk->sk_prot->mtu_reduced(sk);
+}
+EXPORT_SYMBOL(tcp_release_cb);
+
+void __init tcp_tasklet_init(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
+
+ INIT_LIST_HEAD(&tsq->head);
+ tasklet_init(&tsq->tasklet,
+ tcp_tasklet_func,
+ (unsigned long)tsq);
+ }
+}
+
+/*
+ * Write buffer destructor automatically called from kfree_skb.
+ * We cant xmit new skbs from this context, as we might already
+ * hold qdisc lock.
+ */
+void tcp_wfree(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
+ !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
+ unsigned long flags;
+ struct tsq_tasklet *tsq;
+
+ /* Keep a ref on socket.
+ * This last ref will be released in tcp_tasklet_func()
+ */
+ atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc);
+
+ /* queue this socket to tasklet queue */
+ local_irq_save(flags);
+ tsq = &__get_cpu_var(tsq_tasklet);
+ list_add(&tp->tsq_node, &tsq->head);
+ tasklet_schedule(&tsq->tasklet);
+ local_irq_restore(flags);
+ } else {
+ sock_wfree(skb);
+ }
+}
+
/* This routine actually transmits TCP packets queued in by
* tcp_do_sendmsg(). This is used by both the initial
* transmission and possible later retransmissions.
@@ -844,7 +1027,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
- skb_set_owner_w(skb, sk);
+
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ?
+ tcp_wfree : sock_wfree;
+ atomic_add(skb->truesize, &sk->sk_wmem_alloc);
/* Build TCP header and checksum it. */
th = tcp_hdr(skb);
@@ -1780,6 +1968,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
+
tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
BUG_ON(!tso_segs);
@@ -1800,6 +1989,13 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
break;
}
+ /* TSQ : sk_wmem_alloc accounts skb truesize,
+ * including skb overhead. But thats OK.
+ */
+ if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) {
+ set_bit(TSQ_THROTTLED, &tp->tsq_flags);
+ break;
+ }
limit = mss_now;
if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
@@ -1849,7 +2045,8 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
if (unlikely(sk->sk_state == TCP_CLOSE))
return;
- if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC))
+ if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
+ sk_gfp_atomic(sk, GFP_ATOMIC)))
tcp_check_probe_timer(sk);
}
@@ -2442,7 +2639,16 @@ int tcp_send_synack(struct sock *sk)
return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
-/* Prepare a SYN-ACK. */
+/**
+ * tcp_make_synack - Prepare a SYN-ACK.
+ * sk: listener socket
+ * dst: dst entry attached to the SYNACK
+ * req: request_sock pointer
+ * rvp: request_values pointer
+ *
+ * Allocate one skb and build a SYNACK packet.
+ * @dst is consumed : Caller should not use it again.
+ */
struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct request_values *rvp)
@@ -2461,14 +2667,16 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
s_data_desired = cvp->s_data_desired;
- skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
- if (skb == NULL)
+ skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired,
+ sk_gfp_atomic(sk, GFP_ATOMIC));
+ if (unlikely(!skb)) {
+ dst_release(dst);
return NULL;
-
+ }
/* Reserve space for headers. */
skb_reserve(skb, MAX_TCP_HEADER);
- skb_dst_set(skb, dst_clone(dst));
+ skb_dst_set(skb, dst);
mss = dst_metric_advmss(dst);
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
@@ -2645,6 +2853,109 @@ void tcp_connect_init(struct sock *sk)
tcp_clear_retrans(tp);
}
+static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
+
+ tcb->end_seq += skb->len;
+ skb_header_release(skb);
+ __tcp_add_write_queue_tail(sk, skb);
+ sk->sk_wmem_queued += skb->truesize;
+ sk_mem_charge(sk, skb->truesize);
+ tp->write_seq = tcb->end_seq;
+ tp->packets_out += tcp_skb_pcount(skb);
+}
+
+/* Build and send a SYN with data and (cached) Fast Open cookie. However,
+ * queue a data-only packet after the regular SYN, such that regular SYNs
+ * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
+ * only the SYN sequence, the data are retransmitted in the first ACK.
+ * If cookie is not cached or other error occurs, falls back to send a
+ * regular SYN with Fast Open cookie request option.
+ */
+static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_fastopen_request *fo = tp->fastopen_req;
+ int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
+ struct sk_buff *syn_data = NULL, *data;
+ unsigned long last_syn_loss = 0;
+
+ tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */
+ tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
+ &syn_loss, &last_syn_loss);
+ /* Recurring FO SYN losses: revert to regular handshake temporarily */
+ if (syn_loss > 1 &&
+ time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
+ fo->cookie.len = -1;
+ goto fallback;
+ }
+
+ if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
+ fo->cookie.len = -1;
+ else if (fo->cookie.len <= 0)
+ goto fallback;
+
+ /* MSS for SYN-data is based on cached MSS and bounded by PMTU and
+ * user-MSS. Reserve maximum option space for middleboxes that add
+ * private TCP options. The cost is reduced data space in SYN :(
+ */
+ if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
+ tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
+ space = tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
+ MAX_TCP_OPTION_SPACE;
+
+ syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
+ sk->sk_allocation);
+ if (syn_data == NULL)
+ goto fallback;
+
+ for (i = 0; i < iovlen && syn_data->len < space; ++i) {
+ struct iovec *iov = &fo->data->msg_iov[i];
+ unsigned char __user *from = iov->iov_base;
+ int len = iov->iov_len;
+
+ if (syn_data->len + len > space)
+ len = space - syn_data->len;
+ else if (i + 1 == iovlen)
+ /* No more data pending in inet_wait_for_connect() */
+ fo->data = NULL;
+
+ if (skb_add_data(syn_data, from, len))
+ goto fallback;
+ }
+
+ /* Queue a data-only packet after the regular SYN for retransmission */
+ data = pskb_copy(syn_data, sk->sk_allocation);
+ if (data == NULL)
+ goto fallback;
+ TCP_SKB_CB(data)->seq++;
+ TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
+ TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
+ tcp_connect_queue_skb(sk, data);
+ fo->copied = data->len;
+
+ if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
+ tp->syn_data = (fo->copied > 0);
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
+ goto done;
+ }
+ syn_data = NULL;
+
+fallback:
+ /* Send a regular SYN with Fast Open cookie request option */
+ if (fo->cookie.len > 0)
+ fo->cookie.len = 0;
+ err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
+ if (err)
+ tp->syn_fastopen = 0;
+ kfree_skb(syn_data);
+done:
+ fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */
+ return err;
+}
+
/* Build a SYN and send it off. */
int tcp_connect(struct sock *sk)
{
@@ -2662,17 +2973,13 @@ int tcp_connect(struct sock *sk)
skb_reserve(buff, MAX_TCP_HEADER);
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
+ tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
+ tcp_connect_queue_skb(sk, buff);
TCP_ECN_send_syn(sk, buff);
- /* Send it off. */
- TCP_SKB_CB(buff)->when = tcp_time_stamp;
- tp->retrans_stamp = TCP_SKB_CB(buff)->when;
- skb_header_release(buff);
- __tcp_add_write_queue_tail(sk, buff);
- sk->sk_wmem_queued += buff->truesize;
- sk_mem_charge(sk, buff->truesize);
- tp->packets_out += tcp_skb_pcount(buff);
- err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
+ /* Send off SYN; include data in Fast Open. */
+ err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
+ tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
if (err == -ECONNREFUSED)
return err;
@@ -2759,7 +3066,7 @@ void tcp_send_ack(struct sock *sk)
* tcp_transmit_skb() will set the ownership to this
* sock.
*/
- buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
+ buff = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
if (buff == NULL) {
inet_csk_schedule_ack(sk);
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
@@ -2774,7 +3081,7 @@ void tcp_send_ack(struct sock *sk)
/* Send it off, this clears delayed acks for us. */
TCP_SKB_CB(buff)->when = tcp_time_stamp;
- tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
+ tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
}
/* This routine sends a packet with an out of date sequence
@@ -2794,7 +3101,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
struct sk_buff *skb;
/* We don't queue it, tcp_transmit_skb() sets ownership. */
- skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
+ skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC));
if (skb == NULL)
return -1;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index e911e6c523e..6df36ad55a3 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -32,17 +32,6 @@ int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
int sysctl_tcp_orphan_retries __read_mostly;
int sysctl_tcp_thin_linear_timeouts __read_mostly;
-static void tcp_write_timer(unsigned long);
-static void tcp_delack_timer(unsigned long);
-static void tcp_keepalive_timer (unsigned long data);
-
-void tcp_init_xmit_timers(struct sock *sk)
-{
- inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
- &tcp_keepalive_timer);
-}
-EXPORT_SYMBOL(tcp_init_xmit_timers);
-
static void tcp_write_err(struct sock *sk)
{
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
@@ -205,21 +194,11 @@ static int tcp_write_timeout(struct sock *sk)
return 0;
}
-static void tcp_delack_timer(unsigned long data)
+void tcp_delack_timer_handler(struct sock *sk)
{
- struct sock *sk = (struct sock *)data;
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
- bh_lock_sock(sk);
- if (sock_owned_by_user(sk)) {
- /* Try again later. */
- icsk->icsk_ack.blocked = 1;
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
- sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
- goto out_unlock;
- }
-
sk_mem_reclaim_partial(sk);
if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
@@ -260,7 +239,21 @@ static void tcp_delack_timer(unsigned long data)
out:
if (sk_under_memory_pressure(sk))
sk_mem_reclaim(sk);
-out_unlock:
+}
+
+static void tcp_delack_timer(unsigned long data)
+{
+ struct sock *sk = (struct sock *)data;
+
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk)) {
+ tcp_delack_timer_handler(sk);
+ } else {
+ inet_csk(sk)->icsk_ack.blocked = 1;
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
+ /* deleguate our work to tcp_release_cb() */
+ set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
+ }
bh_unlock_sock(sk);
sock_put(sk);
}
@@ -450,19 +443,11 @@ out_reset_timer:
out:;
}
-static void tcp_write_timer(unsigned long data)
+void tcp_write_timer_handler(struct sock *sk)
{
- struct sock *sk = (struct sock *)data;
struct inet_connection_sock *icsk = inet_csk(sk);
int event;
- bh_lock_sock(sk);
- if (sock_owned_by_user(sk)) {
- /* Try again later */
- sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20));
- goto out_unlock;
- }
-
if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
goto out;
@@ -485,7 +470,19 @@ static void tcp_write_timer(unsigned long data)
out:
sk_mem_reclaim(sk);
-out_unlock:
+}
+
+static void tcp_write_timer(unsigned long data)
+{
+ struct sock *sk = (struct sock *)data;
+
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk)) {
+ tcp_write_timer_handler(sk);
+ } else {
+ /* deleguate our work to tcp_release_cb() */
+ set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
+ }
bh_unlock_sock(sk);
sock_put(sk);
}
@@ -602,3 +599,10 @@ out:
bh_unlock_sock(sk);
sock_put(sk);
}
+
+void tcp_init_xmit_timers(struct sock *sk)
+{
+ inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
+ &tcp_keepalive_timer);
+}
+EXPORT_SYMBOL(tcp_init_xmit_timers);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index eaca73644e7..b4c3582a991 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -108,6 +108,7 @@
#include <net/xfrm.h>
#include <trace/events/udp.h>
#include <linux/static_key.h>
+#include <trace/events/skb.h>
#include "udp_impl.h"
struct udp_table udp_table __read_mostly;
@@ -615,6 +616,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
break;
case ICMP_DEST_UNREACH:
if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
+ ipv4_sk_update_pmtu(skb, sk, info);
if (inet->pmtudisc != IP_PMTUDISC_DONT) {
err = EMSGSIZE;
harderr = 1;
@@ -628,6 +630,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
err = icmp_err_convert[code].errno;
}
break;
+ case ICMP_REDIRECT:
+ ipv4_sk_redirect(skb, sk);
+ break;
}
/*
@@ -1219,8 +1224,10 @@ try_again:
goto csum_copy_err;
}
- if (err)
+ if (unlikely(err)) {
+ trace_kfree_skb(skb, udp_recvmsg);
goto out_free;
+ }
if (!peeked)
UDP_INC_STATS_USER(sock_net(sk),
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index a7f86a3cd50..16d0960062b 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -34,15 +34,16 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
int err = -EINVAL;
struct sock *sk;
struct sk_buff *rep;
+ struct net *net = sock_net(in_skb->sk);
if (req->sdiag_family == AF_INET)
- sk = __udp4_lib_lookup(&init_net,
+ sk = __udp4_lib_lookup(net,
req->id.idiag_src[0], req->id.idiag_sport,
req->id.idiag_dst[0], req->id.idiag_dport,
req->id.idiag_if, tbl);
#if IS_ENABLED(CONFIG_IPV6)
else if (req->sdiag_family == AF_INET6)
- sk = __udp6_lib_lookup(&init_net,
+ sk = __udp6_lib_lookup(net,
(struct in6_addr *)req->id.idiag_src,
req->id.idiag_sport,
(struct in6_addr *)req->id.idiag_dst,
@@ -75,7 +76,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
kfree_skb(rep);
goto out;
}
- err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+ err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
MSG_DONTWAIT);
if (err > 0)
err = 0;
@@ -90,6 +91,7 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
struct inet_diag_req_v2 *r, struct nlattr *bc)
{
int num, s_num, slot, s_slot;
+ struct net *net = sock_net(skb->sk);
s_slot = cb->args[0];
num = s_num = cb->args[1];
@@ -106,6 +108,8 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
sk_nulls_for_each(sk, node, &hslot->head) {
struct inet_sock *inet = inet_sk(sk);
+ if (!net_eq(sock_net(sk), net))
+ continue;
if (num < s_num)
goto next;
if (!(r->idiag_states & (1 << sk->sk_state)))
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index ed4bf11ef9f..ddee0a099a2 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -15,6 +15,65 @@
#include <net/ip.h>
#include <net/xfrm.h>
+/* Informational hook. The decap is still done here. */
+static struct xfrm_tunnel __rcu *rcv_notify_handlers __read_mostly;
+static DEFINE_MUTEX(xfrm4_mode_tunnel_input_mutex);
+
+int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler)
+{
+ struct xfrm_tunnel __rcu **pprev;
+ struct xfrm_tunnel *t;
+ int ret = -EEXIST;
+ int priority = handler->priority;
+
+ mutex_lock(&xfrm4_mode_tunnel_input_mutex);
+
+ for (pprev = &rcv_notify_handlers;
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&xfrm4_mode_tunnel_input_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t->priority > priority)
+ break;
+ if (t->priority == priority)
+ goto err;
+
+ }
+
+ handler->next = *pprev;
+ rcu_assign_pointer(*pprev, handler);
+
+ ret = 0;
+
+err:
+ mutex_unlock(&xfrm4_mode_tunnel_input_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_register);
+
+int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler)
+{
+ struct xfrm_tunnel __rcu **pprev;
+ struct xfrm_tunnel *t;
+ int ret = -ENOENT;
+
+ mutex_lock(&xfrm4_mode_tunnel_input_mutex);
+ for (pprev = &rcv_notify_handlers;
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&xfrm4_mode_tunnel_input_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t == handler) {
+ *pprev = handler->next;
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&xfrm4_mode_tunnel_input_mutex);
+ synchronize_net();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_deregister);
+
static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
{
struct iphdr *inner_iph = ipip_hdr(skb);
@@ -64,8 +123,14 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
return 0;
}
+#define for_each_input_rcu(head, handler) \
+ for (handler = rcu_dereference(head); \
+ handler != NULL; \
+ handler = rcu_dereference(handler->next))
+
static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
+ struct xfrm_tunnel *handler;
int err = -EINVAL;
if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
@@ -74,6 +139,9 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto out;
+ for_each_input_rcu(rcv_notify_handlers, handler)
+ handler->handler(skb);
+
if (skb_cloned(skb) &&
(err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
goto out;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 0d3426cb5c4..681ea2f413e 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -79,30 +79,20 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
struct rtable *rt = (struct rtable *)xdst->route;
const struct flowi4 *fl4 = &fl->u.ip4;
- xdst->u.rt.rt_key_dst = fl4->daddr;
- xdst->u.rt.rt_key_src = fl4->saddr;
- xdst->u.rt.rt_key_tos = fl4->flowi4_tos;
- xdst->u.rt.rt_route_iif = fl4->flowi4_iif;
xdst->u.rt.rt_iif = fl4->flowi4_iif;
- xdst->u.rt.rt_oif = fl4->flowi4_oif;
- xdst->u.rt.rt_mark = fl4->flowi4_mark;
xdst->u.dst.dev = dev;
dev_hold(dev);
- xdst->u.rt.peer = rt->peer;
- if (rt->peer)
- atomic_inc(&rt->peer->refcnt);
-
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
+ xdst->u.rt.rt_is_input = rt->rt_is_input;
xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
RTCF_LOCAL);
xdst->u.rt.rt_type = rt->rt_type;
- xdst->u.rt.rt_src = rt->rt_src;
- xdst->u.rt.rt_dst = rt->rt_dst;
xdst->u.rt.rt_gateway = rt->rt_gateway;
- xdst->u.rt.rt_spec_dst = rt->rt_spec_dst;
+ xdst->u.rt.rt_pmtu = rt->rt_pmtu;
+ INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
return 0;
}
@@ -198,12 +188,22 @@ static inline int xfrm4_garbage_collect(struct dst_ops *ops)
return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
}
-static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu)
+static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
+{
+ struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ struct dst_entry *path = xdst->route;
+
+ path->ops->update_pmtu(path, sk, skb, mtu);
+}
+
+static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;
- path->ops->update_pmtu(path, mtu);
+ path->ops->redirect(path, sk, skb);
}
static void xfrm4_dst_destroy(struct dst_entry *dst)
@@ -212,9 +212,6 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
dst_destroy_metrics_generic(dst);
- if (likely(xdst->u.rt.peer))
- inet_putpeer(xdst->u.rt.peer);
-
xfrm_dst_destroy(xdst);
}
@@ -232,6 +229,7 @@ static struct dst_ops xfrm4_dst_ops = {
.protocol = cpu_to_be16(ETH_P_IP),
.gc = xfrm4_garbage_collect,
.update_pmtu = xfrm4_update_pmtu,
+ .redirect = xfrm4_redirect,
.cow_metrics = dst_cow_metrics_generic,
.destroy = xfrm4_dst_destroy,
.ifdown = xfrm4_dst_ifdown,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8f6411c9718..79181819a24 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -63,6 +63,7 @@
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/string.h>
+#include <linux/hash.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -579,15 +580,9 @@ ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
list_add_tail(&ifp->if_list, p);
}
-static u32 ipv6_addr_hash(const struct in6_addr *addr)
+static u32 inet6_addr_hash(const struct in6_addr *addr)
{
- /*
- * We perform the hash function over the last 64 bits of the address
- * This will include the IEEE address token on links that support it.
- */
- return jhash_2words((__force u32)addr->s6_addr32[2],
- (__force u32)addr->s6_addr32[3], 0)
- & (IN6_ADDR_HSIZE - 1);
+ return hash_32(ipv6_addr_hash(addr), IN6_ADDR_HSIZE_SHIFT);
}
/* On success it returns ifp with increased reference count */
@@ -662,7 +657,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
in6_ifa_hold(ifa);
/* Add to big hash table */
- hash = ipv6_addr_hash(addr);
+ hash = inet6_addr_hash(addr);
hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
spin_unlock(&addrconf_hash_lock);
@@ -1270,7 +1265,7 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
{
struct inet6_ifaddr *ifp;
struct hlist_node *node;
- unsigned int hash = ipv6_addr_hash(addr);
+ unsigned int hash = inet6_addr_hash(addr);
rcu_read_lock_bh();
hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
@@ -1293,7 +1288,7 @@ EXPORT_SYMBOL(ipv6_chk_addr);
static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
struct net_device *dev)
{
- unsigned int hash = ipv6_addr_hash(addr);
+ unsigned int hash = inet6_addr_hash(addr);
struct inet6_ifaddr *ifp;
struct hlist_node *node;
@@ -1336,7 +1331,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
struct net_device *dev, int strict)
{
struct inet6_ifaddr *ifp, *result = NULL;
- unsigned int hash = ipv6_addr_hash(addr);
+ unsigned int hash = inet6_addr_hash(addr);
struct hlist_node *node;
rcu_read_lock_bh();
@@ -3223,7 +3218,7 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
int ret = 0;
struct inet6_ifaddr *ifp = NULL;
struct hlist_node *n;
- unsigned int hash = ipv6_addr_hash(addr);
+ unsigned int hash = inet6_addr_hash(addr);
rcu_read_lock_bh();
hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) {
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index f1a4a2c28ed..7e6139508ee 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -35,6 +35,7 @@
#include <linux/pfkeyv2.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
+#include <net/ip6_route.h>
#include <net/icmp.h>
#include <net/ipv6.h>
#include <net/protocol.h>
@@ -612,16 +613,18 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct xfrm_state *x;
if (type != ICMPV6_DEST_UNREACH &&
- type != ICMPV6_PKT_TOOBIG)
+ type != ICMPV6_PKT_TOOBIG &&
+ type != NDISC_REDIRECT)
return;
x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
if (!x)
return;
- NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/%pI6\n",
- ntohl(ah->spi), &iph->daddr);
-
+ if (type == NDISC_REDIRECT)
+ ip6_redirect(skb, net, 0, 0);
+ else
+ ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index db1521fcda5..6dc7fd353ef 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -39,6 +39,7 @@
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <net/ip6_route.h>
#include <net/icmp.h>
#include <net/ipv6.h>
#include <net/protocol.h>
@@ -433,15 +434,19 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct xfrm_state *x;
if (type != ICMPV6_DEST_UNREACH &&
- type != ICMPV6_PKT_TOOBIG)
+ type != ICMPV6_PKT_TOOBIG &&
+ type != NDISC_REDIRECT)
return;
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
esph->spi, IPPROTO_ESP, AF_INET6);
if (!x)
return;
- pr_debug("pmtu discovery on SA ESP/%08x/%pI6\n",
- ntohl(esph->spi), &iph->daddr);
+
+ if (type == NDISC_REDIRECT)
+ ip6_redirect(skb, net, 0, 0);
+ else
+ ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 6447dc49429..fa3d9c32809 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -791,14 +791,14 @@ static int ipv6_renew_option(void *ohdr,
if (ohdr) {
memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
*hdr = (struct ipv6_opt_hdr *)*p;
- *p += CMSG_ALIGN(ipv6_optlen(*(struct ipv6_opt_hdr **)hdr));
+ *p += CMSG_ALIGN(ipv6_optlen(*hdr));
}
} else {
if (newopt) {
if (copy_from_user(*p, newopt, newoptlen))
return -EFAULT;
*hdr = (struct ipv6_opt_hdr *)*p;
- if (ipv6_optlen(*(struct ipv6_opt_hdr **)hdr) > newoptlen)
+ if (ipv6_optlen(*hdr) > newoptlen)
return -EINVAL;
*p += CMSG_ALIGN(newoptlen);
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 091a2971c7b..24d69dbca4d 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -188,14 +188,16 @@ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
} else {
struct rt6_info *rt = (struct rt6_info *)dst;
int tmo = net->ipv6.sysctl.icmpv6_time;
+ struct inet_peer *peer;
/* Give more bandwidth to wider prefixes. */
if (rt->rt6i_dst.plen < 128)
tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
- if (!rt->rt6i_peer)
- rt6_bind_peer(rt, 1);
- res = inet_peer_xrlim_allow(rt->rt6i_peer, tmo);
+ peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
+ res = inet_peer_xrlim_allow(peer, tmo);
+ if (peer)
+ inet_putpeer(peer);
}
dst_release(dst);
return res;
@@ -596,13 +598,12 @@ out:
icmpv6_xmit_unlock(sk);
}
-static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
+void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
{
const struct inet6_protocol *ipprot;
int inner_offset;
- int hash;
- u8 nexthdr;
__be16 frag_off;
+ u8 nexthdr;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
return;
@@ -629,10 +630,8 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
--ANK (980726)
*/
- hash = nexthdr & (MAX_INET_PROTOS - 1);
-
rcu_read_lock();
- ipprot = rcu_dereference(inet6_protos[hash]);
+ ipprot = rcu_dereference(inet6_protos[nexthdr]);
if (ipprot && ipprot->err_handler)
ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
rcu_read_unlock();
@@ -649,7 +648,6 @@ static int icmpv6_rcv(struct sk_buff *skb)
struct net_device *dev = skb->dev;
struct inet6_dev *idev = __in6_dev_get(dev);
const struct in6_addr *saddr, *daddr;
- const struct ipv6hdr *orig_hdr;
struct icmp6hdr *hdr;
u8 type;
@@ -661,7 +659,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
XFRM_STATE_ICMP))
goto drop_no_count;
- if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr)))
+ if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr)))
goto drop_no_count;
nh = skb_network_offset(skb);
@@ -722,9 +720,6 @@ static int icmpv6_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto discard_it;
hdr = icmp6_hdr(skb);
- orig_hdr = (struct ipv6hdr *) (hdr + 1);
- rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
- ntohl(hdr->icmp6_mtu));
/*
* Drop through to notify
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e6cee5292a0..0251a6005be 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -55,26 +55,26 @@ int inet6_csk_bind_conflict(const struct sock *sk,
EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
struct dst_entry *inet6_csk_route_req(struct sock *sk,
+ struct flowi6 *fl6,
const struct request_sock *req)
{
struct inet6_request_sock *treq = inet6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *final_p, final;
struct dst_entry *dst;
- struct flowi6 fl6;
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_TCP;
- fl6.daddr = treq->rmt_addr;
- final_p = fl6_update_dst(&fl6, np->opt, &final);
- fl6.saddr = treq->loc_addr;
- fl6.flowi6_oif = sk->sk_bound_dev_if;
- fl6.flowi6_mark = sk->sk_mark;
- fl6.fl6_dport = inet_rsk(req)->rmt_port;
- fl6.fl6_sport = inet_rsk(req)->loc_port;
- security_req_classify_flow(req, flowi6_to_flowi(&fl6));
-
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+ memset(fl6, 0, sizeof(*fl6));
+ fl6->flowi6_proto = IPPROTO_TCP;
+ fl6->daddr = treq->rmt_addr;
+ final_p = fl6_update_dst(fl6, np->opt, &final);
+ fl6->saddr = treq->loc_addr;
+ fl6->flowi6_oif = treq->iif;
+ fl6->flowi6_mark = sk->sk_mark;
+ fl6->fl6_dport = inet_rsk(req)->rmt_port;
+ fl6->fl6_sport = inet_rsk(req)->loc_port;
+ security_req_classify_flow(req, flowi6_to_flowi(fl6));
+
+ dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
if (IS_ERR(dst))
return NULL;
@@ -171,7 +171,8 @@ EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
static inline
void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
- struct in6_addr *daddr, struct in6_addr *saddr)
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr)
{
__ip6_dst_store(sk, dst, daddr, saddr);
@@ -203,43 +204,52 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
return dst;
}
-int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
+static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
+ struct flowi6 *fl6)
{
- struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
- struct flowi6 fl6;
- struct dst_entry *dst;
struct in6_addr *final_p, final;
- int res;
+ struct dst_entry *dst;
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_proto = sk->sk_protocol;
- fl6.daddr = np->daddr;
- fl6.saddr = np->saddr;
- fl6.flowlabel = np->flow_label;
- IP6_ECN_flow_xmit(sk, fl6.flowlabel);
- fl6.flowi6_oif = sk->sk_bound_dev_if;
- fl6.flowi6_mark = sk->sk_mark;
- fl6.fl6_sport = inet->inet_sport;
- fl6.fl6_dport = inet->inet_dport;
- security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ memset(fl6, 0, sizeof(*fl6));
+ fl6->flowi6_proto = sk->sk_protocol;
+ fl6->daddr = np->daddr;
+ fl6->saddr = np->saddr;
+ fl6->flowlabel = np->flow_label;
+ IP6_ECN_flow_xmit(sk, fl6->flowlabel);
+ fl6->flowi6_oif = sk->sk_bound_dev_if;
+ fl6->flowi6_mark = sk->sk_mark;
+ fl6->fl6_sport = inet->inet_sport;
+ fl6->fl6_dport = inet->inet_dport;
+ security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
- final_p = fl6_update_dst(&fl6, np->opt, &final);
+ final_p = fl6_update_dst(fl6, np->opt, &final);
dst = __inet6_csk_dst_check(sk, np->dst_cookie);
+ if (!dst) {
+ dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
- if (dst == NULL) {
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+ if (!IS_ERR(dst))
+ __inet6_csk_dst_store(sk, dst, NULL, NULL);
+ }
+ return dst;
+}
- if (IS_ERR(dst)) {
- sk->sk_err_soft = -PTR_ERR(dst);
- sk->sk_route_caps = 0;
- kfree_skb(skb);
- return PTR_ERR(dst);
- }
+int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
+{
+ struct sock *sk = skb->sk;
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct flowi6 fl6;
+ struct dst_entry *dst;
+ int res;
- __inet6_csk_dst_store(sk, dst, NULL, NULL);
+ dst = inet6_csk_route_socket(sk, &fl6);
+ if (IS_ERR(dst)) {
+ sk->sk_err_soft = -PTR_ERR(dst);
+ sk->sk_route_caps = 0;
+ kfree_skb(skb);
+ return PTR_ERR(dst);
}
rcu_read_lock();
@@ -253,3 +263,16 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
return res;
}
EXPORT_SYMBOL_GPL(inet6_csk_xmit);
+
+struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
+{
+ struct flowi6 fl6;
+ struct dst_entry *dst = inet6_csk_route_socket(sk, &fl6);
+
+ if (IS_ERR(dst))
+ return NULL;
+ dst->ops->update_pmtu(dst, sk, NULL, mtu);
+
+ return inet6_csk_route_socket(sk, &fl6);
+}
+EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 60832766196..13690d650c3 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -197,6 +197,7 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
table->tb6_id = id;
table->tb6_root.leaf = net->ipv6.ip6_null_entry;
table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
+ inet_peer_base_init(&table->tb6_peers);
}
return table;
@@ -1633,6 +1634,7 @@ static int __net_init fib6_net_init(struct net *net)
net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
net->ipv6.fib6_main_tbl->tb6_root.fn_flags =
RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
+ inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl),
@@ -1643,6 +1645,7 @@ static int __net_init fib6_net_init(struct net *net)
net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
net->ipv6.fib6_local_tbl->tb6_root.fn_flags =
RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
+ inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers);
#endif
fib6_tables_init(net);
@@ -1666,8 +1669,10 @@ static void fib6_net_exit(struct net *net)
del_timer_sync(&net->ipv6.ip6_fib_timer);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
kfree(net->ipv6.fib6_local_tbl);
#endif
+ inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
kfree(net->ipv6.fib6_main_tbl);
kfree(net->ipv6.fib_table_hash);
kfree(net->ipv6.rt6_stats);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 21a15dfe4a9..a52d864d562 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -47,9 +47,16 @@
-inline int ip6_rcv_finish( struct sk_buff *skb)
+int ip6_rcv_finish(struct sk_buff *skb)
{
- if (skb_dst(skb) == NULL)
+ if (sysctl_ip_early_demux && !skb_dst(skb)) {
+ const struct inet6_protocol *ipprot;
+
+ ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
+ if (ipprot && ipprot->early_demux)
+ ipprot->early_demux(skb);
+ }
+ if (!skb_dst(skb))
ip6_route_input(skb);
return dst_input(skb);
@@ -168,13 +175,12 @@ drop:
static int ip6_input_finish(struct sk_buff *skb)
{
+ struct net *net = dev_net(skb_dst(skb)->dev);
const struct inet6_protocol *ipprot;
+ struct inet6_dev *idev;
unsigned int nhoff;
int nexthdr;
bool raw;
- u8 hash;
- struct inet6_dev *idev;
- struct net *net = dev_net(skb_dst(skb)->dev);
/*
* Parse extension headers
@@ -189,9 +195,7 @@ resubmit:
nexthdr = skb_network_header(skb)[nhoff];
raw = raw6_local_deliver(skb, nexthdr);
-
- hash = nexthdr & (MAX_INET_PROTOS - 1);
- if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) {
+ if ((ipprot = rcu_dereference(inet6_protos[nexthdr])) != NULL) {
int ret;
if (ipprot->flags & INET6_PROTO_FINAL) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index decc21d19c5..5b2d63ed793 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -83,24 +83,12 @@ int ip6_local_out(struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(ip6_local_out);
-/* dev_loopback_xmit for use with netfilter. */
-static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
-{
- skb_reset_mac_header(newskb);
- __skb_pull(newskb, skb_network_offset(newskb));
- newskb->pkt_type = PACKET_LOOPBACK;
- newskb->ip_summed = CHECKSUM_UNNECESSARY;
- WARN_ON(!skb_dst(newskb));
-
- netif_rx_ni(newskb);
- return 0;
-}
-
static int ip6_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
struct neighbour *neigh;
+ struct rt6_info *rt;
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
@@ -121,7 +109,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
if (newskb)
NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
newskb, NULL, newskb->dev,
- ip6_dev_loopback_xmit);
+ dev_loopback_xmit);
if (ipv6_hdr(skb)->hop_limit == 0) {
IP6_INC_STATS(dev_net(dev), idev,
@@ -136,9 +124,10 @@ static int ip6_finish_output2(struct sk_buff *skb)
}
rcu_read_lock();
- neigh = dst_get_neighbour_noref(dst);
+ rt = (struct rt6_info *) dst;
+ neigh = rt->n;
if (neigh) {
- int res = neigh_output(neigh, skb);
+ int res = dst_neigh_output(dst, neigh, skb);
rcu_read_unlock();
return res;
@@ -463,6 +452,7 @@ int ip6_forward(struct sk_buff *skb)
*/
if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
struct in6_addr *target = NULL;
+ struct inet_peer *peer;
struct rt6_info *rt;
/*
@@ -476,14 +466,15 @@ int ip6_forward(struct sk_buff *skb)
else
target = &hdr->daddr;
- if (!rt->rt6i_peer)
- rt6_bind_peer(rt, 1);
+ peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
/* Limit redirects both by destination (here)
and by source (inside ndisc_send_redirect)
*/
- if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
+ if (inet_peer_xrlim_allow(peer, 1*HZ))
ndisc_send_redirect(skb, target);
+ if (peer)
+ inet_putpeer(peer);
} else {
int addrtype = ipv6_addr_type(&hdr->saddr);
@@ -604,12 +595,13 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
if (rt && !(rt->dst.flags & DST_NOPEER)) {
struct inet_peer *peer;
+ struct net *net;
- if (!rt->rt6i_peer)
- rt6_bind_peer(rt, 1);
- peer = rt->rt6i_peer;
+ net = dev_net(rt->dst.dev);
+ peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
if (peer) {
fhdr->identification = htonl(inet_getid(peer, 0));
+ inet_putpeer(peer);
return;
}
}
@@ -960,6 +952,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
struct net *net = sock_net(sk);
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
struct neighbour *n;
+ struct rt6_info *rt;
#endif
int err;
@@ -988,7 +981,8 @@ static int ip6_dst_lookup_tail(struct sock *sk,
* dst entry of the nexthop router
*/
rcu_read_lock();
- n = dst_get_neighbour_noref(*dst);
+ rt = (struct rt6_info *) *dst;
+ n = rt->n;
if (n && !(n->nud_state & NUD_VALID)) {
struct inet6_ifaddr *ifp;
struct flowi6 fl_gw6;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c9015fad8d6..9a1d5fe6aef 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -40,6 +40,7 @@
#include <linux/rtnetlink.h>
#include <linux/netfilter_ipv6.h>
#include <linux/slab.h>
+#include <linux/hash.h>
#include <asm/uaccess.h>
#include <linux/atomic.h>
@@ -70,11 +71,15 @@ MODULE_ALIAS_NETDEV("ip6tnl0");
#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
#define IPV6_TCLASS_SHIFT 20
-#define HASH_SIZE 32
+#define HASH_SIZE_SHIFT 5
+#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
-#define HASH(addr) ((__force u32)((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \
- (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
- (HASH_SIZE - 1))
+static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
+{
+ u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
+
+ return hash_32(hash, HASH_SIZE_SHIFT);
+}
static int ip6_tnl_dev_init(struct net_device *dev);
static void ip6_tnl_dev_setup(struct net_device *dev);
@@ -166,12 +171,11 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
static struct ip6_tnl *
ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
{
- unsigned int h0 = HASH(remote);
- unsigned int h1 = HASH(local);
+ unsigned int hash = HASH(remote, local);
struct ip6_tnl *t;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
- for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) {
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(local, &t->parms.laddr) &&
ipv6_addr_equal(remote, &t->parms.raddr) &&
(t->dev->flags & IFF_UP))
@@ -205,7 +209,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
prio = 1;
- h = HASH(remote) ^ HASH(local);
+ h = HASH(remote, local);
}
return &ip6n->tnls[prio][h];
}
@@ -252,7 +256,7 @@ static void ip6_dev_free(struct net_device *dev)
}
/**
- * ip6_tnl_create() - create a new tunnel
+ * ip6_tnl_create - create a new tunnel
* @p: tunnel parameters
* @pt: pointer to new tunnel
*
@@ -550,6 +554,9 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
rel_type = ICMP_DEST_UNREACH;
rel_code = ICMP_FRAG_NEEDED;
break;
+ case NDISC_REDIRECT:
+ rel_type = ICMP_REDIRECT;
+ rel_code = ICMP_REDIR_HOST;
default:
return 0;
}
@@ -606,8 +613,10 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (rel_info > dst_mtu(skb_dst(skb2)))
goto out;
- skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), rel_info);
+ skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info);
}
+ if (rel_type == ICMP_REDIRECT)
+ skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2);
icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
@@ -684,24 +693,50 @@ static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
IP6_ECN_set_ce(ipv6_hdr(skb));
}
+static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
+ const struct in6_addr *laddr,
+ const struct in6_addr *raddr)
+{
+ struct ip6_tnl_parm *p = &t->parms;
+ int ltype = ipv6_addr_type(laddr);
+ int rtype = ipv6_addr_type(raddr);
+ __u32 flags = 0;
+
+ if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
+ flags = IP6_TNL_F_CAP_PER_PACKET;
+ } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
+ rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
+ !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
+ (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
+ if (ltype&IPV6_ADDR_UNICAST)
+ flags |= IP6_TNL_F_CAP_XMIT;
+ if (rtype&IPV6_ADDR_UNICAST)
+ flags |= IP6_TNL_F_CAP_RCV;
+ }
+ return flags;
+}
+
/* called with rcu_read_lock() */
-static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
+static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
+ const struct in6_addr *laddr,
+ const struct in6_addr *raddr)
{
struct ip6_tnl_parm *p = &t->parms;
int ret = 0;
struct net *net = dev_net(t->dev);
- if (p->flags & IP6_TNL_F_CAP_RCV) {
+ if ((p->flags & IP6_TNL_F_CAP_RCV) ||
+ ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
+ (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
struct net_device *ldev = NULL;
if (p->link)
ldev = dev_get_by_index_rcu(net, p->link);
- if ((ipv6_addr_is_multicast(&p->laddr) ||
- likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) &&
- likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0)))
+ if ((ipv6_addr_is_multicast(laddr) ||
+ likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
+ likely(!ipv6_chk_addr(net, raddr, NULL, 0)))
ret = 1;
-
}
return ret;
}
@@ -740,7 +775,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
goto discard;
}
- if (!ip6_tnl_rcv_ctl(t)) {
+ if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
t->dev->stats.rx_dropped++;
rcu_read_unlock();
goto discard;
@@ -921,7 +956,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len > mtu) {
*pmtu = mtu;
err = -EMSGSIZE;
@@ -1114,25 +1149,6 @@ tx_err:
return NETDEV_TX_OK;
}
-static void ip6_tnl_set_cap(struct ip6_tnl *t)
-{
- struct ip6_tnl_parm *p = &t->parms;
- int ltype = ipv6_addr_type(&p->laddr);
- int rtype = ipv6_addr_type(&p->raddr);
-
- p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV);
-
- if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
- rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
- !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
- (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
- if (ltype&IPV6_ADDR_UNICAST)
- p->flags |= IP6_TNL_F_CAP_XMIT;
- if (rtype&IPV6_ADDR_UNICAST)
- p->flags |= IP6_TNL_F_CAP_RCV;
- }
-}
-
static void ip6_tnl_link_config(struct ip6_tnl *t)
{
struct net_device *dev = t->dev;
@@ -1153,7 +1169,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
- ip6_tnl_set_cap(t);
+ p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
+ p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
dev->flags |= IFF_POINTOPOINT;
@@ -1438,6 +1455,9 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
t->parms.proto = IPPROTO_IPV6;
dev_hold(dev);
+
+ ip6_tnl_link_config(t);
+
rcu_assign_pointer(ip6n->tnls_wc[0], t);
return 0;
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 461e47c8e95..4532973f0dd 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2104,8 +2104,9 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
if (c->mf6c_parent >= MAXMIFS)
return -ENOENT;
- if (MIF_EXISTS(mrt, c->mf6c_parent))
- RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex);
+ if (MIF_EXISTS(mrt, c->mf6c_parent) &&
+ nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
+ return -EMSGSIZE;
mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 5cb75bfe45b..7af5aee75d9 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -46,6 +46,7 @@
#include <linux/list.h>
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
+#include <net/ip6_route.h>
#include <net/icmp.h>
#include <net/ipv6.h>
#include <net/protocol.h>
@@ -63,7 +64,9 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
(struct ip_comp_hdr *)(skb->data + offset);
struct xfrm_state *x;
- if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG)
+ if (type != ICMPV6_DEST_UNREACH &&
+ type != ICMPV6_PKT_TOOBIG &&
+ type != NDISC_REDIRECT)
return;
spi = htonl(ntohs(ipcomph->cpi));
@@ -72,8 +75,10 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!x)
return;
- pr_debug("pmtu discovery on SA IPCOMP/%08x/%pI6\n",
- spi, &iph->daddr);
+ if (type == NDISC_REDIRECT)
+ ip6_redirect(skb, net, 0, 0);
+ else
+ ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 6d0f5dc8e3a..92f8e48e4ba 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -211,6 +211,9 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
struct ipv6_mc_socklist __rcu **lnk;
struct net *net = sock_net(sk);
+ if (!ipv6_addr_is_multicast(addr))
+ return -EINVAL;
+
spin_lock(&ipv6_sk_mc_lock);
for (lnk = &np->ipv6_mc_list;
(mc_lst = rcu_dereference_protected(*lnk,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 54f62d3b8dd..ff36194a71a 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -143,40 +143,6 @@ struct neigh_table nd_tbl = {
.gc_thresh3 = 1024,
};
-/* ND options */
-struct ndisc_options {
- struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX];
-#ifdef CONFIG_IPV6_ROUTE_INFO
- struct nd_opt_hdr *nd_opts_ri;
- struct nd_opt_hdr *nd_opts_ri_end;
-#endif
- struct nd_opt_hdr *nd_useropts;
- struct nd_opt_hdr *nd_useropts_end;
-};
-
-#define nd_opts_src_lladdr nd_opt_array[ND_OPT_SOURCE_LL_ADDR]
-#define nd_opts_tgt_lladdr nd_opt_array[ND_OPT_TARGET_LL_ADDR]
-#define nd_opts_pi nd_opt_array[ND_OPT_PREFIX_INFO]
-#define nd_opts_pi_end nd_opt_array[__ND_OPT_PREFIX_INFO_END]
-#define nd_opts_rh nd_opt_array[ND_OPT_REDIRECT_HDR]
-#define nd_opts_mtu nd_opt_array[ND_OPT_MTU]
-
-#define NDISC_OPT_SPACE(len) (((len)+2+7)&~7)
-
-/*
- * Return the padding between the option length and the start of the
- * link addr. Currently only IP-over-InfiniBand needs this, although
- * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may
- * also need a pad of 2.
- */
-static int ndisc_addr_option_pad(unsigned short type)
-{
- switch (type) {
- case ARPHRD_INFINIBAND: return 2;
- default: return 0;
- }
-}
-
static inline int ndisc_opt_addr_space(struct net_device *dev)
{
return NDISC_OPT_SPACE(dev->addr_len + ndisc_addr_option_pad(dev->type));
@@ -233,8 +199,8 @@ static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
return cur <= end && ndisc_is_useropt(cur) ? cur : NULL;
}
-static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
- struct ndisc_options *ndopts)
+struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
+ struct ndisc_options *ndopts)
{
struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)opt;
@@ -297,17 +263,6 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
return ndopts;
}
-static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
- struct net_device *dev)
-{
- u8 *lladdr = (u8 *)(p + 1);
- int lladdrlen = p->nd_opt_len << 3;
- int prepad = ndisc_addr_option_pad(dev->type);
- if (lladdrlen != NDISC_OPT_SPACE(dev->addr_len + prepad))
- return NULL;
- return lladdr + prepad;
-}
-
int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, int dir)
{
switch (dev->type) {
@@ -1379,16 +1334,6 @@ out:
static void ndisc_redirect_rcv(struct sk_buff *skb)
{
- struct inet6_dev *in6_dev;
- struct icmp6hdr *icmph;
- const struct in6_addr *dest;
- const struct in6_addr *target; /* new first hop to destination */
- struct neighbour *neigh;
- int on_link = 0;
- struct ndisc_options ndopts;
- int optlen;
- u8 *lladdr = NULL;
-
#ifdef CONFIG_IPV6_NDISC_NODETYPE
switch (skb->ndisc_nodetype) {
case NDISC_NODETYPE_HOST:
@@ -1405,65 +1350,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
return;
}
- optlen = skb->tail - skb->transport_header;
- optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
-
- if (optlen < 0) {
- ND_PRINTK(2, warn, "Redirect: packet too short\n");
- return;
- }
-
- icmph = icmp6_hdr(skb);
- target = (const struct in6_addr *) (icmph + 1);
- dest = target + 1;
-
- if (ipv6_addr_is_multicast(dest)) {
- ND_PRINTK(2, warn,
- "Redirect: destination address is multicast\n");
- return;
- }
-
- if (ipv6_addr_equal(dest, target)) {
- on_link = 1;
- } else if (ipv6_addr_type(target) !=
- (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
- ND_PRINTK(2, warn,
- "Redirect: target address is not link-local unicast\n");
- return;
- }
-
- in6_dev = __in6_dev_get(skb->dev);
- if (!in6_dev)
- return;
- if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
- return;
-
- /* RFC2461 8.1:
- * The IP source address of the Redirect MUST be the same as the current
- * first-hop router for the specified ICMP Destination Address.
- */
-
- if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
- ND_PRINTK(2, warn, "Redirect: invalid ND options\n");
- return;
- }
- if (ndopts.nd_opts_tgt_lladdr) {
- lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
- skb->dev);
- if (!lladdr) {
- ND_PRINTK(2, warn,
- "Redirect: invalid link-layer address length\n");
- return;
- }
- }
-
- neigh = __neigh_lookup(&nd_tbl, target, skb->dev, 1);
- if (neigh) {
- rt6_redirect(dest, &ipv6_hdr(skb)->daddr,
- &ipv6_hdr(skb)->saddr, neigh, lladdr,
- on_link);
- neigh_release(neigh);
- }
+ icmpv6_notify(skb, NDISC_REDIRECT, 0, 0);
}
void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
@@ -1472,6 +1359,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
struct net *net = dev_net(dev);
struct sock *sk = net->ipv6.ndisc_sk;
int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
+ struct inet_peer *peer;
struct sk_buff *buff;
struct icmp6hdr *icmph;
struct in6_addr saddr_buf;
@@ -1485,6 +1373,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
int rd_len;
int err;
u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
+ bool ret;
if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n",
@@ -1518,9 +1407,11 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
"Redirect: destination is not a neighbour\n");
goto release;
}
- if (!rt->rt6i_peer)
- rt6_bind_peer(rt, 1);
- if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
+ peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
+ ret = inet_peer_xrlim_allow(peer, 1*HZ);
+ if (peer)
+ inet_putpeer(peer);
+ if (!ret)
goto release;
if (dev->addr_len) {
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 3224ef90a21..4794f96cf2e 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -143,11 +143,11 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
return NF_ACCEPT;
}
-static unsigned int ipv6_confirm(unsigned int hooknum,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int ipv6_helper(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
struct nf_conn *ct;
const struct nf_conn_help *help;
@@ -161,15 +161,15 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
/* This is where we call the helper: as the packet goes out. */
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- goto out;
+ return NF_ACCEPT;
help = nfct_help(ct);
if (!help)
- goto out;
+ return NF_ACCEPT;
/* rcu_read_lock()ed by nf_hook_slow */
helper = rcu_dereference(help->helper);
if (!helper)
- goto out;
+ return NF_ACCEPT;
protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum,
skb->len - extoff);
@@ -179,12 +179,19 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
}
ret = helper->help(skb, protoff, ct, ctinfo);
- if (ret != NF_ACCEPT) {
+ if (ret != NF_ACCEPT && (ret & NF_VERDICT_MASK) != NF_QUEUE) {
nf_log_packet(NFPROTO_IPV6, hooknum, skb, in, out, NULL,
"nf_ct_%s: dropping packet", helper->name);
- return ret;
}
-out:
+ return ret;
+}
+
+static unsigned int ipv6_confirm(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
/* We've seen it coming out the other side: confirm it */
return nf_conntrack_confirm(skb);
}
@@ -254,6 +261,13 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
.priority = NF_IP6_PRI_CONNTRACK,
},
{
+ .hook = ipv6_helper,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP6_PRI_CONNTRACK_HELPER,
+ },
+ {
.hook = ipv6_confirm,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV6,
@@ -261,6 +275,13 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
.priority = NF_IP6_PRI_LAST,
},
{
+ .hook = ipv6_helper,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP6_PRI_CONNTRACK_HELPER,
+ },
+ {
.hook = ipv6_confirm,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV6,
@@ -333,37 +354,75 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6));
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>");
-static int __init nf_conntrack_l3proto_ipv6_init(void)
+static int ipv6_net_init(struct net *net)
{
int ret = 0;
- need_conntrack();
- nf_defrag_ipv6_enable();
-
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp6);
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_tcp6);
if (ret < 0) {
- pr_err("nf_conntrack_ipv6: can't register tcp.\n");
- return ret;
+ printk(KERN_ERR "nf_conntrack_l4proto_tcp6: protocol register failed\n");
+ goto out;
}
-
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp6);
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_udp6);
if (ret < 0) {
- pr_err("nf_conntrack_ipv6: can't register udp.\n");
- goto cleanup_tcp;
+ printk(KERN_ERR "nf_conntrack_l4proto_udp6: protocol register failed\n");
+ goto cleanup_tcp6;
}
-
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmpv6);
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_icmpv6);
if (ret < 0) {
- pr_err("nf_conntrack_ipv6: can't register icmpv6.\n");
- goto cleanup_udp;
+ printk(KERN_ERR "nf_conntrack_l4proto_icmp6: protocol register failed\n");
+ goto cleanup_udp6;
}
-
- ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv6);
+ ret = nf_conntrack_l3proto_register(net,
+ &nf_conntrack_l3proto_ipv6);
if (ret < 0) {
- pr_err("nf_conntrack_ipv6: can't register ipv6\n");
+ printk(KERN_ERR "nf_conntrack_l3proto_ipv6: protocol register failed\n");
goto cleanup_icmpv6;
}
+ return 0;
+ cleanup_icmpv6:
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_icmpv6);
+ cleanup_udp6:
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_udp6);
+ cleanup_tcp6:
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_tcp6);
+ out:
+ return ret;
+}
+static void ipv6_net_exit(struct net *net)
+{
+ nf_conntrack_l3proto_unregister(net,
+ &nf_conntrack_l3proto_ipv6);
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_icmpv6);
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_udp6);
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_tcp6);
+}
+
+static struct pernet_operations ipv6_net_ops = {
+ .init = ipv6_net_init,
+ .exit = ipv6_net_exit,
+};
+
+static int __init nf_conntrack_l3proto_ipv6_init(void)
+{
+ int ret = 0;
+
+ need_conntrack();
+ nf_defrag_ipv6_enable();
+
+ ret = register_pernet_subsys(&ipv6_net_ops);
+ if (ret < 0)
+ goto cleanup_pernet;
ret = nf_register_hooks(ipv6_conntrack_ops,
ARRAY_SIZE(ipv6_conntrack_ops));
if (ret < 0) {
@@ -374,13 +433,8 @@ static int __init nf_conntrack_l3proto_ipv6_init(void)
return ret;
cleanup_ipv6:
- nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6);
- cleanup_icmpv6:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6);
- cleanup_udp:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6);
- cleanup_tcp:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6);
+ unregister_pernet_subsys(&ipv6_net_ops);
+ cleanup_pernet:
return ret;
}
@@ -388,10 +442,7 @@ static void __exit nf_conntrack_l3proto_ipv6_fini(void)
{
synchronize_net();
nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops));
- nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6);
+ unregister_pernet_subsys(&ipv6_net_ops);
}
module_init(nf_conntrack_l3proto_ipv6_init);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 3e81904fbbc..2d54b2061d6 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -29,6 +29,11 @@
static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
+static inline struct nf_icmp_net *icmpv6_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmpv6;
+}
+
static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
@@ -90,7 +95,7 @@ static int icmpv6_print_tuple(struct seq_file *s,
static unsigned int *icmpv6_get_timeouts(struct net *net)
{
- return &nf_ct_icmpv6_timeout;
+ return &icmpv6_pernet(net)->timeout;
}
/* Returns verdict for packet, or -1 for invalid. */
@@ -281,16 +286,18 @@ static int icmpv6_nlattr_tuple_size(void)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeout = data;
+ struct nf_icmp_net *in = icmpv6_pernet(net);
if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) {
*timeout =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ;
} else {
/* Set default ICMPv6 timeout. */
- *timeout = nf_ct_icmpv6_timeout;
+ *timeout = in->timeout;
}
return 0;
}
@@ -315,11 +322,9 @@ icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *icmpv6_sysctl_header;
static struct ctl_table icmpv6_sysctl_table[] = {
{
.procname = "nf_conntrack_icmpv6_timeout",
- .data = &nf_ct_icmpv6_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -328,6 +333,36 @@ static struct ctl_table icmpv6_sysctl_table[] = {
};
#endif /* CONFIG_SYSCTL */
+static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct nf_icmp_net *in)
+{
+#ifdef CONFIG_SYSCTL
+ pn->ctl_table = kmemdup(icmpv6_sysctl_table,
+ sizeof(icmpv6_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+
+ pn->ctl_table[0].data = &in->timeout;
+#endif
+ return 0;
+}
+
+static int icmpv6_init_net(struct net *net, u_int16_t proto)
+{
+ struct nf_icmp_net *in = icmpv6_pernet(net);
+ struct nf_proto_net *pn = &in->pn;
+
+ in->timeout = nf_ct_icmpv6_timeout;
+
+ return icmpv6_kmemdup_sysctl_table(pn, in);
+}
+
+static struct nf_proto_net *icmpv6_get_net_proto(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmpv6.pn;
+}
+
struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
{
.l3proto = PF_INET6,
@@ -355,8 +390,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
.nla_policy = icmpv6_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_header = &icmpv6_sysctl_header,
- .ctl_table = icmpv6_sysctl_table,
-#endif
+ .init_net = icmpv6_init_net,
+ .get_net_proto = icmpv6_get_net_proto,
};
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 9a7978fdc02..053082dfc93 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -29,9 +29,7 @@ const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
- int hash = protocol & (MAX_INET_PROTOS - 1);
-
- return !cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
+ return !cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol],
NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet6_add_protocol);
@@ -42,9 +40,9 @@ EXPORT_SYMBOL(inet6_add_protocol);
int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
- int ret, hash = protocol & (MAX_INET_PROTOS - 1);
+ int ret;
- ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
+ ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol],
prot, NULL) == prot) ? 0 : -1;
synchronize_net();
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 93d69836fde..ef0579d5bca 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -165,7 +165,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
saddr = &ipv6_hdr(skb)->saddr;
daddr = saddr + 1;
- hash = nexthdr & (MAX_INET_PROTOS - 1);
+ hash = nexthdr & (RAW_HTABLE_SIZE - 1);
read_lock(&raw_v6_hashinfo.lock);
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
@@ -229,7 +229,7 @@ bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
{
struct sock *raw_sk;
- raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (MAX_INET_PROTOS - 1)]);
+ raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]);
if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
raw_sk = NULL;
@@ -328,9 +328,12 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
return;
harderr = icmpv6_err_convert(type, code, &err);
- if (type == ICMPV6_PKT_TOOBIG)
+ if (type == ICMPV6_PKT_TOOBIG) {
+ ip6_sk_update_pmtu(skb, sk, info);
harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
-
+ }
+ if (type == NDISC_REDIRECT)
+ ip6_sk_redirect(skb, sk);
if (np->recverr) {
u8 *payload = skb->data;
if (!inet->hdrincl)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index becb048d18d..8e80fd27910 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -78,7 +78,10 @@ static int ip6_dst_gc(struct dst_ops *ops);
static int ip6_pkt_discard(struct sk_buff *skb);
static int ip6_pkt_discard_out(struct sk_buff *skb);
static void ip6_link_failure(struct sk_buff *skb);
-static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
+static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu);
+static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb);
#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_add_route_info(struct net *net,
@@ -99,10 +102,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
if (!(rt->dst.flags & DST_HOST))
return NULL;
- if (!rt->rt6i_peer)
- rt6_bind_peer(rt, 1);
-
- peer = rt->rt6i_peer;
+ peer = rt6_get_peer_create(rt);
if (peer) {
u32 *old_p = __DST_METRICS_PTR(old);
unsigned long prev, new;
@@ -123,21 +123,27 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
return p;
}
-static inline const void *choose_neigh_daddr(struct rt6_info *rt, const void *daddr)
+static inline const void *choose_neigh_daddr(struct rt6_info *rt,
+ struct sk_buff *skb,
+ const void *daddr)
{
struct in6_addr *p = &rt->rt6i_gateway;
if (!ipv6_addr_any(p))
return (const void *) p;
+ else if (skb)
+ return &ipv6_hdr(skb)->daddr;
return daddr;
}
-static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr)
+static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr)
{
struct rt6_info *rt = (struct rt6_info *) dst;
struct neighbour *n;
- daddr = choose_neigh_daddr(rt, daddr);
+ daddr = choose_neigh_daddr(rt, skb, daddr);
n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr);
if (n)
return n;
@@ -152,7 +158,7 @@ static int rt6_bind_neighbour(struct rt6_info *rt, struct net_device *dev)
if (IS_ERR(n))
return PTR_ERR(n);
}
- dst_set_neighbour(&rt->dst, n);
+ rt->n = n;
return 0;
}
@@ -171,6 +177,7 @@ static struct dst_ops ip6_dst_ops_template = {
.negative_advice = ip6_negative_advice,
.link_failure = ip6_link_failure,
.update_pmtu = ip6_rt_update_pmtu,
+ .redirect = rt6_do_redirect,
.local_out = __ip6_local_out,
.neigh_lookup = ip6_neigh_lookup,
};
@@ -182,7 +189,13 @@ static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
return mtu ? : dst->dev->mtu;
}
-static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
+static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
+{
+}
+
+static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb)
{
}
@@ -200,6 +213,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
.mtu = ip6_blackhole_mtu,
.default_advmss = ip6_default_advmss,
.update_pmtu = ip6_rt_blackhole_update_pmtu,
+ .redirect = ip6_rt_blackhole_redirect,
.cow_metrics = ip6_rt_blackhole_cow_metrics,
.neigh_lookup = ip6_neigh_lookup,
};
@@ -261,16 +275,20 @@ static struct rt6_info ip6_blk_hole_entry_template = {
#endif
/* allocate dst with ip6_dst_ops */
-static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
+static inline struct rt6_info *ip6_dst_alloc(struct net *net,
struct net_device *dev,
- int flags)
+ int flags,
+ struct fib6_table *table)
{
- struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
+ struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
+ 0, DST_OBSOLETE_NONE, flags);
- if (rt)
- memset(&rt->rt6i_table, 0,
- sizeof(*rt) - sizeof(struct dst_entry));
+ if (rt) {
+ struct dst_entry *dst = &rt->dst;
+ memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
+ rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
+ }
return rt;
}
@@ -278,7 +296,9 @@ static void ip6_dst_destroy(struct dst_entry *dst)
{
struct rt6_info *rt = (struct rt6_info *)dst;
struct inet6_dev *idev = rt->rt6i_idev;
- struct inet_peer *peer = rt->rt6i_peer;
+
+ if (rt->n)
+ neigh_release(rt->n);
if (!(rt->dst.flags & DST_HOST))
dst_destroy_metrics_generic(dst);
@@ -291,8 +311,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from)
dst_release(dst->from);
- if (peer) {
- rt->rt6i_peer = NULL;
+ if (rt6_has_peer(rt)) {
+ struct inet_peer *peer = rt6_peer_ptr(rt);
inet_putpeer(peer);
}
}
@@ -306,13 +326,20 @@ static u32 rt6_peer_genid(void)
void rt6_bind_peer(struct rt6_info *rt, int create)
{
+ struct inet_peer_base *base;
struct inet_peer *peer;
- peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
- if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
- inet_putpeer(peer);
- else
- rt->rt6i_peer_genid = rt6_peer_genid();
+ base = inetpeer_base_ptr(rt->_rt6i_peer);
+ if (!base)
+ return;
+
+ peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
+ if (peer) {
+ if (!rt6_set_peer(rt, peer))
+ inet_putpeer(peer);
+ else
+ rt->rt6i_peer_genid = rt6_peer_genid();
+ }
}
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -323,12 +350,19 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
struct net_device *loopback_dev =
dev_net(dev)->loopback_dev;
- if (dev != loopback_dev && idev && idev->dev == dev) {
- struct inet6_dev *loopback_idev =
- in6_dev_get(loopback_dev);
- if (loopback_idev) {
- rt->rt6i_idev = loopback_idev;
- in6_dev_put(idev);
+ if (dev != loopback_dev) {
+ if (idev && idev->dev == dev) {
+ struct inet6_dev *loopback_idev =
+ in6_dev_get(loopback_dev);
+ if (loopback_idev) {
+ rt->rt6i_idev = loopback_idev;
+ in6_dev_put(idev);
+ }
+ }
+ if (rt->n && rt->n->dev == dev) {
+ rt->n->dev = loopback_dev;
+ dev_hold(loopback_dev);
+ dev_put(dev);
}
}
}
@@ -418,7 +452,7 @@ static void rt6_probe(struct rt6_info *rt)
* to no more than one per minute.
*/
rcu_read_lock();
- neigh = rt ? dst_get_neighbour_noref(&rt->dst) : NULL;
+ neigh = rt ? rt->n : NULL;
if (!neigh || (neigh->nud_state & NUD_VALID))
goto out;
read_lock_bh(&neigh->lock);
@@ -465,7 +499,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
int m;
rcu_read_lock();
- neigh = dst_get_neighbour_noref(&rt->dst);
+ neigh = rt->n;
if (rt->rt6i_flags & RTF_NONEXTHOP ||
!(rt->rt6i_flags & RTF_GATEWAY))
m = 1;
@@ -812,7 +846,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
if (rt) {
rt->rt6i_flags |= RTF_CACHE;
- dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_noref_raw(&ort->dst)));
+ rt->n = neigh_clone(ort->n);
}
return rt;
}
@@ -846,7 +880,7 @@ restart:
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
- if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+ if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP))
nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
else if (!(rt->dst.flags & DST_HOST))
nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -931,6 +965,8 @@ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
{
int flags = 0;
+ fl6->flowi6_iif = net->loopback_dev->ifindex;
+
if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
flags |= RT6_LOOKUP_F_IFACE;
@@ -949,12 +985,13 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
struct dst_entry *new = NULL;
- rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, 0, 0);
+ rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
if (rt) {
- memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
-
new = &rt->dst;
+ memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
+ rt6_init_peer(rt, net->ipv6.peers);
+
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard;
@@ -996,7 +1033,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
if (rt->rt6i_peer_genid != rt6_peer_genid()) {
- if (!rt->rt6i_peer)
+ if (!rt6_has_peer(rt))
rt6_bind_peer(rt, 0);
rt->rt6i_peer_genid = rt6_peer_genid();
}
@@ -1038,11 +1075,15 @@ static void ip6_link_failure(struct sk_buff *skb)
}
}
-static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
+static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
{
struct rt6_info *rt6 = (struct rt6_info*)dst;
+ dst_confirm(dst);
if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
+ struct net *net = dev_net(dst->dev);
+
rt6->rt6i_flags |= RTF_MODIFIED;
if (mtu < IPV6_MIN_MTU) {
u32 features = dst_metric(dst, RTAX_FEATURES);
@@ -1051,9 +1092,66 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
dst_metric_set(dst, RTAX_FEATURES, features);
}
dst_metric_set(dst, RTAX_MTU, mtu);
+ rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
}
}
+void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
+ int oif, u32 mark)
+{
+ const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = oif;
+ fl6.flowi6_mark = mark;
+ fl6.flowi6_flags = 0;
+ fl6.daddr = iph->daddr;
+ fl6.saddr = iph->saddr;
+ fl6.flowlabel = (*(__be32 *) iph) & IPV6_FLOWINFO_MASK;
+
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (!dst->error)
+ ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
+ dst_release(dst);
+}
+EXPORT_SYMBOL_GPL(ip6_update_pmtu);
+
+void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
+{
+ ip6_update_pmtu(skb, sock_net(sk), mtu,
+ sk->sk_bound_dev_if, sk->sk_mark);
+}
+EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
+
+void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
+{
+ const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = oif;
+ fl6.flowi6_mark = mark;
+ fl6.flowi6_flags = 0;
+ fl6.daddr = iph->daddr;
+ fl6.saddr = iph->saddr;
+ fl6.flowlabel = (*(__be32 *) iph) & IPV6_FLOWINFO_MASK;
+
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (!dst->error)
+ rt6_do_redirect(dst, NULL, skb);
+ dst_release(dst);
+}
+EXPORT_SYMBOL_GPL(ip6_redirect);
+
+void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
+{
+ ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
+}
+EXPORT_SYMBOL_GPL(ip6_sk_redirect);
+
static unsigned int ip6_default_advmss(const struct dst_entry *dst)
{
struct net_device *dev = dst->dev;
@@ -1110,7 +1208,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
if (unlikely(!idev))
return ERR_PTR(-ENODEV);
- rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
+ rt = ip6_dst_alloc(net, dev, 0, NULL);
if (unlikely(!rt)) {
in6_dev_put(idev);
dst = ERR_PTR(-ENOMEM);
@@ -1120,7 +1218,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
if (neigh)
neigh_hold(neigh);
else {
- neigh = ip6_neigh_lookup(&rt->dst, &fl6->daddr);
+ neigh = ip6_neigh_lookup(&rt->dst, NULL, &fl6->daddr);
if (IS_ERR(neigh)) {
in6_dev_put(idev);
dst_free(&rt->dst);
@@ -1130,7 +1228,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
rt->dst.flags |= DST_HOST;
rt->dst.output = ip6_output;
- dst_set_neighbour(&rt->dst, neigh);
+ rt->n = neigh;
atomic_set(&rt->dst.__refcnt, 1);
rt->rt6i_dst.addr = fl6->daddr;
rt->rt6i_dst.plen = 128;
@@ -1292,7 +1390,7 @@ int ip6_route_add(struct fib6_config *cfg)
if (!table)
goto out;
- rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
+ rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
if (!rt) {
err = -ENOMEM;
@@ -1546,107 +1644,94 @@ static int ip6_route_del(struct fib6_config *cfg)
return err;
}
-/*
- * Handle redirects
- */
-struct ip6rd_flowi {
- struct flowi6 fl6;
- struct in6_addr gateway;
-};
-
-static struct rt6_info *__ip6_route_redirect(struct net *net,
- struct fib6_table *table,
- struct flowi6 *fl6,
- int flags)
+static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
{
- struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
- struct rt6_info *rt;
- struct fib6_node *fn;
+ struct net *net = dev_net(skb->dev);
+ struct netevent_redirect netevent;
+ struct rt6_info *rt, *nrt = NULL;
+ const struct in6_addr *target;
+ struct ndisc_options ndopts;
+ const struct in6_addr *dest;
+ struct neighbour *old_neigh;
+ struct inet6_dev *in6_dev;
+ struct neighbour *neigh;
+ struct icmp6hdr *icmph;
+ int optlen, on_link;
+ u8 *lladdr;
- /*
- * Get the "current" route for this destination and
- * check if the redirect has come from approriate router.
- *
- * RFC 2461 specifies that redirects should only be
- * accepted if they come from the nexthop to the target.
- * Due to the way the routes are chosen, this notion
- * is a bit fuzzy and one might need to check all possible
- * routes.
- */
+ optlen = skb->tail - skb->transport_header;
+ optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
- read_lock_bh(&table->tb6_lock);
- fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
-restart:
- for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
- /*
- * Current route is on-link; redirect is always invalid.
- *
- * Seems, previous statement is not true. It could
- * be node, which looks for us as on-link (f.e. proxy ndisc)
- * But then router serving it might decide, that we should
- * know truth 8)8) --ANK (980726).
- */
- if (rt6_check_expired(rt))
- continue;
- if (!(rt->rt6i_flags & RTF_GATEWAY))
- continue;
- if (fl6->flowi6_oif != rt->dst.dev->ifindex)
- continue;
- if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
- continue;
- break;
+ if (optlen < 0) {
+ net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
+ return;
}
- if (!rt)
- rt = net->ipv6.ip6_null_entry;
- BACKTRACK(net, &fl6->saddr);
-out:
- dst_hold(&rt->dst);
-
- read_unlock_bh(&table->tb6_lock);
-
- return rt;
-};
+ icmph = icmp6_hdr(skb);
+ target = (const struct in6_addr *) (icmph + 1);
+ dest = target + 1;
-static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
- const struct in6_addr *src,
- const struct in6_addr *gateway,
- struct net_device *dev)
-{
- int flags = RT6_LOOKUP_F_HAS_SADDR;
- struct net *net = dev_net(dev);
- struct ip6rd_flowi rdfl = {
- .fl6 = {
- .flowi6_oif = dev->ifindex,
- .daddr = *dest,
- .saddr = *src,
- },
- };
+ if (ipv6_addr_is_multicast(dest)) {
+ net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
+ return;
+ }
- rdfl.gateway = *gateway;
+ on_link = 0;
+ if (ipv6_addr_equal(dest, target)) {
+ on_link = 1;
+ } else if (ipv6_addr_type(target) !=
+ (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
+ net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
+ return;
+ }
- if (rt6_need_strict(dest))
- flags |= RT6_LOOKUP_F_IFACE;
+ in6_dev = __in6_dev_get(skb->dev);
+ if (!in6_dev)
+ return;
+ if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
+ return;
- return (struct rt6_info *)fib6_rule_lookup(net, &rdfl.fl6,
- flags, __ip6_route_redirect);
-}
+ /* RFC2461 8.1:
+ * The IP source address of the Redirect MUST be the same as the current
+ * first-hop router for the specified ICMP Destination Address.
+ */
-void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
- const struct in6_addr *saddr,
- struct neighbour *neigh, u8 *lladdr, int on_link)
-{
- struct rt6_info *rt, *nrt = NULL;
- struct netevent_redirect netevent;
- struct net *net = dev_net(neigh->dev);
+ if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
+ net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
+ return;
+ }
- rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
+ lladdr = NULL;
+ if (ndopts.nd_opts_tgt_lladdr) {
+ lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
+ skb->dev);
+ if (!lladdr) {
+ net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
+ return;
+ }
+ }
+ rt = (struct rt6_info *) dst;
if (rt == net->ipv6.ip6_null_entry) {
net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
- goto out;
+ return;
}
+ /* Redirect received -> path was valid.
+ * Look, redirects are sent only in response to data packets,
+ * so that this nexthop apparently is reachable. --ANK
+ */
+ dst_confirm(&rt->dst);
+
+ neigh = __neigh_lookup(&nd_tbl, target, skb->dev, 1);
+ if (!neigh)
+ return;
+
+ /* Duplicate redirect: silently ignore. */
+ old_neigh = rt->n;
+ if (neigh == old_neigh)
+ goto out;
+
/*
* We have finally decided to accept it.
*/
@@ -1658,17 +1743,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
NEIGH_UPDATE_F_ISROUTER))
);
- /*
- * Redirect received -> path was valid.
- * Look, redirects are sent only in response to data packets,
- * so that this nexthop apparently is reachable. --ANK
- */
- dst_confirm(&rt->dst);
-
- /* Duplicate redirect: silently ignore. */
- if (neigh == dst_get_neighbour_noref_raw(&rt->dst))
- goto out;
-
nrt = ip6_rt_copy(rt, dest);
if (!nrt)
goto out;
@@ -1678,132 +1752,25 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
nrt->rt6i_flags &= ~RTF_GATEWAY;
nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
- dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
+ nrt->n = neigh_clone(neigh);
if (ip6_ins_rt(nrt))
goto out;
netevent.old = &rt->dst;
+ netevent.old_neigh = old_neigh;
netevent.new = &nrt->dst;
+ netevent.new_neigh = neigh;
+ netevent.daddr = dest;
call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
if (rt->rt6i_flags & RTF_CACHE) {
+ rt = (struct rt6_info *) dst_clone(&rt->dst);
ip6_del_rt(rt);
- return;
}
out:
- dst_release(&rt->dst);
-}
-
-/*
- * Handle ICMP "packet too big" messages
- * i.e. Path MTU discovery
- */
-
-static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr *saddr,
- struct net *net, u32 pmtu, int ifindex)
-{
- struct rt6_info *rt, *nrt;
- int allfrag = 0;
-again:
- rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
- if (!rt)
- return;
-
- if (rt6_check_expired(rt)) {
- ip6_del_rt(rt);
- goto again;
- }
-
- if (pmtu >= dst_mtu(&rt->dst))
- goto out;
-
- if (pmtu < IPV6_MIN_MTU) {
- /*
- * According to RFC2460, PMTU is set to the IPv6 Minimum Link
- * MTU (1280) and a fragment header should always be included
- * after a node receiving Too Big message reporting PMTU is
- * less than the IPv6 Minimum Link MTU.
- */
- pmtu = IPV6_MIN_MTU;
- allfrag = 1;
- }
-
- /* New mtu received -> path was valid.
- They are sent only in response to data packets,
- so that this nexthop apparently is reachable. --ANK
- */
- dst_confirm(&rt->dst);
-
- /* Host route. If it is static, it would be better
- not to override it, but add new one, so that
- when cache entry will expire old pmtu
- would return automatically.
- */
- if (rt->rt6i_flags & RTF_CACHE) {
- dst_metric_set(&rt->dst, RTAX_MTU, pmtu);
- if (allfrag) {
- u32 features = dst_metric(&rt->dst, RTAX_FEATURES);
- features |= RTAX_FEATURE_ALLFRAG;
- dst_metric_set(&rt->dst, RTAX_FEATURES, features);
- }
- rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
- rt->rt6i_flags |= RTF_MODIFIED;
- goto out;
- }
-
- /* Network route.
- Two cases are possible:
- 1. It is connected route. Action: COW
- 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
- */
- if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
- nrt = rt6_alloc_cow(rt, daddr, saddr);
- else
- nrt = rt6_alloc_clone(rt, daddr);
-
- if (nrt) {
- dst_metric_set(&nrt->dst, RTAX_MTU, pmtu);
- if (allfrag) {
- u32 features = dst_metric(&nrt->dst, RTAX_FEATURES);
- features |= RTAX_FEATURE_ALLFRAG;
- dst_metric_set(&nrt->dst, RTAX_FEATURES, features);
- }
-
- /* According to RFC 1981, detecting PMTU increase shouldn't be
- * happened within 5 mins, the recommended timer is 10 mins.
- * Here this route expiration time is set to ip6_rt_mtu_expires
- * which is 10 mins. After 10 mins the decreased pmtu is expired
- * and detecting PMTU increase will be automatically happened.
- */
- rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires);
- nrt->rt6i_flags |= RTF_DYNAMIC;
- ip6_ins_rt(nrt);
- }
-out:
- dst_release(&rt->dst);
-}
-
-void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *saddr,
- struct net_device *dev, u32 pmtu)
-{
- struct net *net = dev_net(dev);
-
- /*
- * RFC 1981 states that a node "MUST reduce the size of the packets it
- * is sending along the path" that caused the Packet Too Big message.
- * Since it's not possible in the general case to determine which
- * interface was used to send the original packet, we update the MTU
- * on the interface that will be used to send future packets. We also
- * update the MTU on the interface that received the Packet Too Big in
- * case the original packet was forced out that interface with
- * SO_BINDTODEVICE or similar. This is the next best thing to the
- * correct behaviour, which would be to update the MTU on all
- * interfaces.
- */
- rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
- rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
+ neigh_release(neigh);
}
/*
@@ -1814,8 +1781,8 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
const struct in6_addr *dest)
{
struct net *net = dev_net(ort->dst.dev);
- struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
- ort->dst.dev, 0);
+ struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
+ ort->rt6i_table);
if (rt) {
rt->dst.input = ort->dst.input;
@@ -2099,8 +2066,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
bool anycast)
{
struct net *net = dev_net(idev->dev);
- struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
- net->loopback_dev, 0);
+ struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL);
int err;
if (!rt) {
@@ -2396,13 +2362,11 @@ static int rt6_fill_node(struct net *net,
int iif, int type, u32 pid, u32 seq,
int prefix, int nowait, unsigned int flags)
{
- const struct inet_peer *peer;
struct rtmsg *rtm;
struct nlmsghdr *nlh;
long expires;
u32 table;
struct neighbour *n;
- u32 ts, tsage;
if (prefix) { /* user wants prefix routes only */
if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
@@ -2440,10 +2404,12 @@ static int rt6_fill_node(struct net *net,
rtm->rtm_protocol = rt->rt6i_protocol;
if (rt->rt6i_flags & RTF_DYNAMIC)
rtm->rtm_protocol = RTPROT_REDIRECT;
- else if (rt->rt6i_flags & RTF_ADDRCONF)
- rtm->rtm_protocol = RTPROT_KERNEL;
- else if (rt->rt6i_flags & RTF_DEFAULT)
- rtm->rtm_protocol = RTPROT_RA;
+ else if (rt->rt6i_flags & RTF_ADDRCONF) {
+ if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
+ rtm->rtm_protocol = RTPROT_RA;
+ else
+ rtm->rtm_protocol = RTPROT_KERNEL;
+ }
if (rt->rt6i_flags & RTF_CACHE)
rtm->rtm_flags |= RTM_F_CLONED;
@@ -2500,7 +2466,7 @@ static int rt6_fill_node(struct net *net,
goto nla_put_failure;
rcu_read_lock();
- n = dst_get_neighbour_noref(&rt->dst);
+ n = rt->n;
if (n) {
if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) {
rcu_read_unlock();
@@ -2514,22 +2480,10 @@ static int rt6_fill_node(struct net *net,
goto nla_put_failure;
if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
goto nla_put_failure;
- if (!(rt->rt6i_flags & RTF_EXPIRES))
- expires = 0;
- else if (rt->dst.expires - jiffies < INT_MAX)
- expires = rt->dst.expires - jiffies;
- else
- expires = INT_MAX;
- peer = rt->rt6i_peer;
- ts = tsage = 0;
- if (peer && peer->tcp_ts_stamp) {
- ts = peer->tcp_ts;
- tsage = get_seconds() - peer->tcp_ts_stamp;
- }
+ expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
- if (rtnl_put_cacheinfo(skb, &rt->dst, 0, ts, tsage,
- expires, rt->dst.error) < 0)
+ if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
goto nla_put_failure;
return nlmsg_end(skb, nlh);
@@ -2722,7 +2676,7 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
seq_puts(m, "00000000000000000000000000000000 00 ");
#endif
rcu_read_lock();
- n = dst_get_neighbour_noref(&rt->dst);
+ n = rt->n;
if (n) {
seq_printf(m, "%pi6", n->primary_key);
} else {
@@ -3007,6 +2961,31 @@ static struct pernet_operations ip6_route_net_ops = {
.exit = ip6_route_net_exit,
};
+static int __net_init ipv6_inetpeer_init(struct net *net)
+{
+ struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
+
+ if (!bp)
+ return -ENOMEM;
+ inet_peer_base_init(bp);
+ net->ipv6.peers = bp;
+ return 0;
+}
+
+static void __net_exit ipv6_inetpeer_exit(struct net *net)
+{
+ struct inet_peer_base *bp = net->ipv6.peers;
+
+ net->ipv6.peers = NULL;
+ inetpeer_invalidate_tree(bp);
+ kfree(bp);
+}
+
+static struct pernet_operations ipv6_inetpeer_ops = {
+ .init = ipv6_inetpeer_init,
+ .exit = ipv6_inetpeer_exit,
+};
+
static struct pernet_operations ip6_route_net_late_ops = {
.init = ip6_route_net_init_late,
.exit = ip6_route_net_exit_late,
@@ -3032,10 +3011,14 @@ int __init ip6_route_init(void)
if (ret)
goto out_kmem_cache;
- ret = register_pernet_subsys(&ip6_route_net_ops);
+ ret = register_pernet_subsys(&ipv6_inetpeer_ops);
if (ret)
goto out_dst_entries;
+ ret = register_pernet_subsys(&ip6_route_net_ops);
+ if (ret)
+ goto out_register_inetpeer;
+
ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
/* Registering of the loopback is done before this portion of code,
@@ -3088,6 +3071,8 @@ out_fib6_init:
fib6_gc_cleanup();
out_register_subsys:
unregister_pernet_subsys(&ip6_route_net_ops);
+out_register_inetpeer:
+ unregister_pernet_subsys(&ipv6_inetpeer_ops);
out_dst_entries:
dst_entries_destroy(&ip6_dst_blackhole_ops);
out_kmem_cache:
@@ -3102,6 +3087,7 @@ void ip6_route_cleanup(void)
fib6_rules_cleanup();
xfrm6_fini();
fib6_gc_cleanup();
+ unregister_pernet_subsys(&ipv6_inetpeer_ops);
unregister_pernet_subsys(&ip6_route_net_ops);
dst_entries_destroy(&ip6_dst_blackhole_ops);
kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 60415711563..3bd1bfc01f8 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -527,9 +527,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
case ICMP_PORT_UNREACH:
/* Impossible event. */
return 0;
- case ICMP_FRAG_NEEDED:
- /* Soft state for pmtu is maintained by IP core. */
- return 0;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -542,6 +539,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
if (code != ICMP_EXC_TTL)
return 0;
break;
+ case ICMP_REDIRECT:
+ break;
}
err = -ENOENT;
@@ -551,7 +550,23 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
skb->dev,
iph->daddr,
iph->saddr);
- if (t == NULL || t->parms.iph.daddr == 0)
+ if (t == NULL)
+ goto out;
+
+ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+ ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+ t->dev->ifindex, 0, IPPROTO_IPV6, 0);
+ err = 0;
+ goto out;
+ }
+ if (type == ICMP_REDIRECT) {
+ ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
+ IPPROTO_IPV6, 0);
+ err = 0;
+ goto out;
+ }
+
+ if (t->parms.iph.daddr == 0)
goto out;
err = 0;
@@ -792,7 +807,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
}
if (tunnel->parms.iph.daddr && skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len > mtu) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 8e951d8d3b8..bb46061c813 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -21,9 +21,6 @@
#include <net/ipv6.h>
#include <net/tcp.h>
-extern int sysctl_tcp_syncookies;
-extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
-
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
@@ -180,7 +177,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
/* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
+ tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL);
if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
goto out;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9df64a50b07..c66b90f71c9 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -277,22 +277,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
rt = (struct rt6_info *) dst;
if (tcp_death_row.sysctl_tw_recycle &&
!tp->rx_opt.ts_recent_stamp &&
- ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
- struct inet_peer *peer = rt6_get_peer(rt);
- /*
- * VJ's idea. We save last timestamp seen from
- * the destination in peer table, when entering state
- * TIME-WAIT * and initialize rx_opt.ts_recent from it,
- * when trying new connection.
- */
- if (peer) {
- inet_peer_refcheck(peer);
- if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
- tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
- tp->rx_opt.ts_recent = peer->tcp_ts;
- }
- }
- }
+ ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
+ tcp_fetch_timewait_stamp(sk, dst);
icsk->icsk_ext_hdr_len = 0;
if (np->opt)
@@ -329,6 +315,23 @@ failure:
return err;
}
+static void tcp_v6_mtu_reduced(struct sock *sk)
+{
+ struct dst_entry *dst;
+
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ return;
+
+ dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
+ if (!dst)
+ return;
+
+ if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
+ tcp_sync_mss(sk, dst_mtu(dst));
+ tcp_simple_retransmit(sk);
+ }
+}
+
static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
@@ -356,7 +359,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
bh_lock_sock(sk);
- if (sock_owned_by_user(sk))
+ if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE)
@@ -377,49 +380,19 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
np = inet6_sk(sk);
- if (type == ICMPV6_PKT_TOOBIG) {
- struct dst_entry *dst;
-
- if (sock_owned_by_user(sk))
- goto out;
- if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
- goto out;
-
- /* icmp should have updated the destination cache entry */
- dst = __sk_dst_check(sk, np->dst_cookie);
-
- if (dst == NULL) {
- struct inet_sock *inet = inet_sk(sk);
- struct flowi6 fl6;
+ if (type == NDISC_REDIRECT) {
+ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
- /* BUGGG_FUTURE: Again, it is not clear how
- to handle rthdr case. Ignore this complexity
- for now.
- */
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_TCP;
- fl6.daddr = np->daddr;
- fl6.saddr = np->saddr;
- fl6.flowi6_oif = sk->sk_bound_dev_if;
- fl6.flowi6_mark = sk->sk_mark;
- fl6.fl6_dport = inet->inet_dport;
- fl6.fl6_sport = inet->inet_sport;
- security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
-
- dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
- if (IS_ERR(dst)) {
- sk->sk_err_soft = -PTR_ERR(dst);
- goto out;
- }
-
- } else
- dst_hold(dst);
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+ }
- if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
- tcp_sync_mss(sk, dst_mtu(dst));
- tcp_simple_retransmit(sk);
- } /* else let the usual retransmit timer handle it */
- dst_release(dst);
+ if (type == ICMPV6_PKT_TOOBIG) {
+ tp->mtu_info = ntohl(info);
+ if (!sock_owned_by_user(sk))
+ tcp_v6_mtu_reduced(sk);
+ else
+ set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
goto out;
}
@@ -475,62 +448,43 @@ out:
}
-static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
+static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
+ struct flowi6 *fl6,
+ struct request_sock *req,
struct request_values *rvp,
u16 queue_mapping)
{
struct inet6_request_sock *treq = inet6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff * skb;
- struct ipv6_txoptions *opt = NULL;
- struct in6_addr * final_p, final;
- struct flowi6 fl6;
- struct dst_entry *dst;
- int err;
+ int err = -ENOMEM;
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_TCP;
- fl6.daddr = treq->rmt_addr;
- fl6.saddr = treq->loc_addr;
- fl6.flowlabel = 0;
- fl6.flowi6_oif = treq->iif;
- fl6.flowi6_mark = sk->sk_mark;
- fl6.fl6_dport = inet_rsk(req)->rmt_port;
- fl6.fl6_sport = inet_rsk(req)->loc_port;
- security_req_classify_flow(req, flowi6_to_flowi(&fl6));
-
- opt = np->opt;
- final_p = fl6_update_dst(&fl6, opt, &final);
-
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
- if (IS_ERR(dst)) {
- err = PTR_ERR(dst);
- dst = NULL;
+ /* First, grab a route. */
+ if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
goto done;
- }
+
skb = tcp_make_synack(sk, dst, req, rvp);
- err = -ENOMEM;
+
if (skb) {
__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
- fl6.daddr = treq->rmt_addr;
+ fl6->daddr = treq->rmt_addr;
skb_set_queue_mapping(skb, queue_mapping);
- err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
+ err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
err = net_xmit_eval(err);
}
done:
- if (opt && opt != np->opt)
- sock_kfree_s(sk, opt, opt->tot_len);
- dst_release(dst);
return err;
}
static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
struct request_values *rvp)
{
+ struct flowi6 fl6;
+
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
- return tcp_v6_send_synack(sk, req, rvp, 0);
+ return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
}
static void tcp_v6_reqsk_destructor(struct request_sock *req)
@@ -1057,6 +1011,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk);
__u32 isn = TCP_SKB_CB(skb)->when;
struct dst_entry *dst = NULL;
+ struct flowi6 fl6;
bool want_cookie = false;
if (skb->protocol == htons(ETH_P_IP))
@@ -1085,7 +1040,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
tmp_opt.user_mss = tp->rx_opt.user_mss;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
@@ -1150,8 +1105,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
treq->iif = inet6_iif(skb);
if (!isn) {
- struct inet_peer *peer = NULL;
-
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -1176,14 +1129,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
*/
if (tmp_opt.saw_tstamp &&
tcp_death_row.sysctl_tw_recycle &&
- (dst = inet6_csk_route_req(sk, req)) != NULL &&
- (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
- ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
- &treq->rmt_addr)) {
- inet_peer_refcheck(peer);
- if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
- (s32)(peer->tcp_ts - req->ts_recent) >
- TCP_PAWS_WINDOW) {
+ (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
+ if (!tcp_peer_is_proven(req, dst, true)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
goto drop_and_release;
}
@@ -1192,8 +1139,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
else if (!sysctl_tcp_syncookies &&
(sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
(sysctl_max_syn_backlog >> 2)) &&
- (!peer || !peer->tcp_ts_stamp) &&
- (!dst || !dst_metric(dst, RTAX_RTT))) {
+ !tcp_peer_is_proven(req, dst, false)) {
/* Without syncookies last quarter of
* backlog is filled with destinations,
* proven to be alive.
@@ -1215,7 +1161,7 @@ have_isn:
if (security_inet_conn_request(sk, skb, req))
goto drop_and_release;
- if (tcp_v6_send_synack(sk, req,
+ if (tcp_v6_send_synack(sk, dst, &fl6, req,
(struct request_values *)&tmp_ext,
skb_get_queue_mapping(skb)) ||
want_cookie)
@@ -1242,10 +1188,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct inet_sock *newinet;
struct tcp_sock *newtp;
struct sock *newsk;
- struct ipv6_txoptions *opt;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *key;
#endif
+ struct flowi6 fl6;
if (skb->protocol == htons(ETH_P_IP)) {
/*
@@ -1302,13 +1248,12 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
}
treq = inet6_rsk(req);
- opt = np->opt;
if (sk_acceptq_is_full(sk))
goto out_overflow;
if (!dst) {
- dst = inet6_csk_route_req(sk, req);
+ dst = inet6_csk_route_req(sk, &fl6, req);
if (!dst)
goto out;
}
@@ -1354,7 +1299,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
/* Clone pktoptions received with SYN */
newnp->pktoptions = NULL;
if (treq->pktopts != NULL) {
- newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
+ newnp->pktoptions = skb_clone(treq->pktopts,
+ sk_gfp_atomic(sk, GFP_ATOMIC));
consume_skb(treq->pktopts);
treq->pktopts = NULL;
if (newnp->pktoptions)
@@ -1371,11 +1317,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
but we make one more one thing there: reattach optmem
to newsk.
*/
- if (opt) {
- newnp->opt = ipv6_dup_options(newsk, opt);
- if (opt != np->opt)
- sock_kfree_s(sk, opt, opt->tot_len);
- }
+ if (np->opt)
+ newnp->opt = ipv6_dup_options(newsk, np->opt);
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (newnp->opt)
@@ -1407,7 +1350,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
* across. Shucks.
*/
tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
- AF_INET6, key->key, key->keylen, GFP_ATOMIC);
+ AF_INET6, key->key, key->keylen,
+ sk_gfp_atomic(sk, GFP_ATOMIC));
}
#endif
@@ -1422,8 +1366,6 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk:
- if (opt && opt != np->opt)
- sock_kfree_s(sk, opt, opt->tot_len);
dst_release(dst);
out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
@@ -1502,7 +1444,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
--ANK (980728)
*/
if (np->rxopt.all)
- opt_skb = skb_clone(skb, GFP_ATOMIC);
+ opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
sock_rps_save_rxhash(sk, skb);
@@ -1734,42 +1676,47 @@ do_time_wait:
goto discard_it;
}
-static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
+static void tcp_v6_early_demux(struct sk_buff *skb)
{
- struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct inet_peer *peer;
+ const struct ipv6hdr *hdr;
+ const struct tcphdr *th;
+ struct sock *sk;
- if (!rt ||
- !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
- peer = inet_getpeer_v6(&np->daddr, 1);
- *release_it = true;
- } else {
- if (!rt->rt6i_peer)
- rt6_bind_peer(rt, 1);
- peer = rt->rt6i_peer;
- *release_it = false;
- }
+ if (skb->pkt_type != PACKET_HOST)
+ return;
- return peer;
-}
+ if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
+ return;
-static void *tcp_v6_tw_get_peer(struct sock *sk)
-{
- const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
- const struct inet_timewait_sock *tw = inet_twsk(sk);
+ hdr = ipv6_hdr(skb);
+ th = tcp_hdr(skb);
- if (tw->tw_family == AF_INET)
- return tcp_v4_tw_get_peer(sk);
+ if (th->doff < sizeof(struct tcphdr) / 4)
+ return;
- return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
+ sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
+ &hdr->saddr, th->source,
+ &hdr->daddr, ntohs(th->dest),
+ inet6_iif(skb));
+ if (sk) {
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+ if (sk->sk_state != TCP_TIME_WAIT) {
+ struct dst_entry *dst = sk->sk_rx_dst;
+ struct inet_sock *icsk = inet_sk(sk);
+ if (dst)
+ dst = dst_check(dst, 0);
+ if (dst &&
+ icsk->rx_dst_ifindex == inet6_iif(skb))
+ skb_dst_set_noref(skb, dst);
+ }
+ }
}
static struct timewait_sock_ops tcp6_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp6_timewait_sock),
.twsk_unique = tcp_twsk_unique,
.twsk_destructor= tcp_twsk_destructor,
- .twsk_getpeer = tcp_v6_tw_get_peer,
};
static const struct inet_connection_sock_af_ops ipv6_specific = {
@@ -1778,7 +1725,6 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
.rebuild_header = inet6_sk_rebuild_header,
.conn_request = tcp_v6_conn_request,
.syn_recv_sock = tcp_v6_syn_recv_sock,
- .get_peer = tcp_v6_get_peer,
.net_header_len = sizeof(struct ipv6hdr),
.net_frag_header_len = sizeof(struct frag_hdr),
.setsockopt = ipv6_setsockopt,
@@ -1810,7 +1756,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
.rebuild_header = inet_sk_rebuild_header,
.conn_request = tcp_v6_conn_request,
.syn_recv_sock = tcp_v6_syn_recv_sock,
- .get_peer = tcp_v4_get_peer,
.net_header_len = sizeof(struct iphdr),
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
@@ -2049,6 +1994,8 @@ struct proto tcpv6_prot = {
.sendmsg = tcp_sendmsg,
.sendpage = tcp_sendpage,
.backlog_rcv = tcp_v6_do_rcv,
+ .release_cb = tcp_release_cb,
+ .mtu_reduced = tcp_v6_mtu_reduced,
.hash = tcp_v6_hash,
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
@@ -2070,12 +2017,13 @@ struct proto tcpv6_prot = {
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
#endif
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#ifdef CONFIG_MEMCG_KMEM
.proto_cgroup = tcp_proto_cgroup,
#endif
};
static const struct inet6_protocol tcpv6_protocol = {
+ .early_demux = tcp_v6_early_demux,
.handler = tcp_v6_rcv,
.err_handler = tcp_v6_err,
.gso_send_check = tcp_v6_gso_send_check,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f05099fc590..99d0077b56b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -48,6 +48,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <trace/events/skb.h>
#include "udp_impl.h"
int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
@@ -385,15 +386,16 @@ try_again:
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov, copied );
+ msg->msg_iov, copied);
else {
err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
if (err == -EINVAL)
goto csum_copy_err;
}
- if (err)
+ if (unlikely(err)) {
+ trace_kfree_skb(skb, udpv6_recvmsg);
goto out_free;
-
+ }
if (!peeked) {
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
@@ -479,6 +481,11 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (sk == NULL)
return;
+ if (type == ICMPV6_PKT_TOOBIG)
+ ip6_sk_update_pmtu(skb, sk, info);
+ if (type == NDISC_REDIRECT)
+ ip6_sk_redirect(skb, sk);
+
np = inet6_sk(sk);
if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8625fba96db..ef39812107b 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -99,12 +99,11 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
if (!xdst->u.rt6.rt6i_idev)
return -ENODEV;
- xdst->u.rt6.rt6i_peer = rt->rt6i_peer;
- if (rt->rt6i_peer)
- atomic_inc(&rt->rt6i_peer->refcnt);
+ rt6_transfer_peer(&xdst->u.rt6, rt);
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
+ xdst->u.rt6.n = neigh_clone(rt->n);
xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
RTF_LOCAL);
xdst->u.rt6.rt6i_metric = rt->rt6i_metric;
@@ -208,12 +207,22 @@ static inline int xfrm6_garbage_collect(struct dst_ops *ops)
return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
}
-static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu)
+static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;
- path->ops->update_pmtu(path, mtu);
+ path->ops->update_pmtu(path, sk, skb, mtu);
+}
+
+static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ struct dst_entry *path = xdst->route;
+
+ path->ops->redirect(path, sk, skb);
}
static void xfrm6_dst_destroy(struct dst_entry *dst)
@@ -223,8 +232,10 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
if (likely(xdst->u.rt6.rt6i_idev))
in6_dev_put(xdst->u.rt6.rt6i_idev);
dst_destroy_metrics_generic(dst);
- if (likely(xdst->u.rt6.rt6i_peer))
- inet_putpeer(xdst->u.rt6.rt6i_peer);
+ if (rt6_has_peer(&xdst->u.rt6)) {
+ struct inet_peer *peer = rt6_peer_ptr(&xdst->u.rt6);
+ inet_putpeer(peer);
+ }
xfrm_dst_destroy(xdst);
}
@@ -260,6 +271,7 @@ static struct dst_ops xfrm6_dst_ops = {
.protocol = cpu_to_be16(ETH_P_IPV6),
.gc = xfrm6_garbage_collect,
.update_pmtu = xfrm6_update_pmtu,
+ .redirect = xfrm6_redirect,
.cow_metrics = dst_cow_metrics_generic,
.destroy = xfrm6_dst_destroy,
.ifdown = xfrm6_dst_ifdown,
diff --git a/net/ipx/Makefile b/net/ipx/Makefile
index 4b95e3ea0f8..440fafa9fd0 100644
--- a/net/ipx/Makefile
+++ b/net/ipx/Makefile
@@ -4,5 +4,5 @@
obj-$(CONFIG_IPX) += ipx.o
-ipx-y := af_ipx.o ipx_route.o ipx_proc.o
+ipx-y := af_ipx.o ipx_route.o ipx_proc.o pe2.o
ipx-$(CONFIG_SYSCTL) += sysctl_net_ipx.o
diff --git a/net/ethernet/pe2.c b/net/ipx/pe2.c
index 85d574addbc..32dcd601ab3 100644
--- a/net/ethernet/pe2.c
+++ b/net/ipx/pe2.c
@@ -28,10 +28,8 @@ struct datalink_proto *make_EII_client(void)
return proto;
}
-EXPORT_SYMBOL(make_EII_client);
void destroy_EII_client(struct datalink_proto *dl)
{
kfree(dl);
}
-EXPORT_SYMBOL(destroy_EII_client);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index bb14c347768..bb738c9f914 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -955,7 +955,7 @@ out:
* The main difference with a "standard" connect is that with IrDA we need
* to resolve the service name into a TSAP selector (in TCP, port number
* doesn't have to be resolved).
- * Because of this service name resoltion, we can offer "auto-connect",
+ * Because of this service name resolution, we can offer "auto-connect",
* where we connect to a service without specifying a destination address.
*
* Note : by consulting "errno", the user space caller may learn the cause
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 32dcaac70b0..4664855222f 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -296,7 +296,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
/* Bigger param length comes from CMD_GET_MEDIA_CHAR */
IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") +
- IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "BORADCAST") +
+ IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "BROADCAST") +
IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "MULTICAST") +
IRLAN_STRING_PARAMETER_LEN("ACCESS_TYPE", "HOSTED"),
GFP_ATOMIC);
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index f06947c4fa8..7152624ed5f 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -523,7 +523,7 @@ void *hashbin_remove_first( hashbin_t *hashbin)
* Dequeue the entry...
*/
dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
- (irda_queue_t*) entry );
+ entry);
hashbin->hb_size--;
entry->q_next = NULL;
entry->q_prev = NULL;
@@ -615,7 +615,7 @@ void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name)
*/
if ( found ) {
dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
- (irda_queue_t*) entry );
+ entry);
hashbin->hb_size--;
/*
@@ -685,7 +685,7 @@ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
* Dequeue the entry...
*/
dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
- (irda_queue_t*) entry );
+ entry);
hashbin->hb_size--;
entry->q_next = NULL;
entry->q_prev = NULL;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 32b2155e7ab..393355d37b4 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1128,6 +1128,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
int headroom;
int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
int udp_len;
+ int ret = NET_XMIT_SUCCESS;
/* Check that there's enough headroom in the skb to insert IP,
* UDP and L2TP headers. If not enough, expand it to
@@ -1137,8 +1138,8 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
uhlen + hdr_len;
old_headroom = skb_headroom(skb);
if (skb_cow_head(skb, headroom)) {
- dev_kfree_skb(skb);
- goto abort;
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
}
new_headroom = skb_headroom(skb);
@@ -1156,7 +1157,8 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
- dev_kfree_skb(skb);
+ kfree_skb(skb);
+ ret = NET_XMIT_DROP;
goto out_unlock;
}
@@ -1215,8 +1217,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
out_unlock:
bh_unlock_sock(sk);
-abort:
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 47b259fccd2..f9ee74deeac 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -44,6 +44,7 @@ struct l2tp_eth {
struct list_head list;
atomic_long_t tx_bytes;
atomic_long_t tx_packets;
+ atomic_long_t tx_dropped;
atomic_long_t rx_bytes;
atomic_long_t rx_packets;
atomic_long_t rx_errors;
@@ -92,12 +93,15 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct l2tp_eth *priv = netdev_priv(dev);
struct l2tp_session *session = priv->session;
+ unsigned int len = skb->len;
+ int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
- atomic_long_add(skb->len, &priv->tx_bytes);
- atomic_long_inc(&priv->tx_packets);
-
- l2tp_xmit_skb(session, skb, session->hdr_len);
-
+ if (likely(ret == NET_XMIT_SUCCESS)) {
+ atomic_long_add(len, &priv->tx_bytes);
+ atomic_long_inc(&priv->tx_packets);
+ } else {
+ atomic_long_inc(&priv->tx_dropped);
+ }
return NETDEV_TX_OK;
}
@@ -108,6 +112,7 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
stats->tx_packets = atomic_long_read(&priv->tx_packets);
+ stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
stats->rx_packets = atomic_long_read(&priv->rx_packets);
stats->rx_errors = atomic_long_read(&priv->rx_errors);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index ddc553e7667..d71cd9229a4 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -72,7 +72,7 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
void *hdr;
int ret = -ENOBUFS;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
goto out;
@@ -353,7 +353,7 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
goto out;
@@ -699,7 +699,7 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
goto out;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 8ef6b9416cb..286366ef893 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1522,8 +1522,8 @@ static int pppol2tp_session_getsockopt(struct sock *sk,
* handler, according to whether the PPPoX socket is a for a regular session
* or the special tunnel type.
*/
-static int pppol2tp_getsockopt(struct socket *sock, int level,
- int optname, char __user *optval, int __user *optlen)
+static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct l2tp_session *session;
@@ -1535,7 +1535,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level,
if (level != SOL_PPPOL2TP)
return udp_prot.getsockopt(sk, level, optname, optval, optlen);
- if (get_user(len, (int __user *) optlen))
+ if (get_user(len, optlen))
return -EFAULT;
len = min_t(unsigned int, len, sizeof(int));
@@ -1568,7 +1568,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level,
err = pppol2tp_session_getsockopt(sk, session, optname, &val);
err = -EFAULT;
- if (put_user(len, (int __user *) optlen))
+ if (put_user(len, optlen))
goto end_put_sess;
if (copy_to_user((void __user *) optval, &val, len))
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index fe5453c3e71..f6fe4d40050 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -1024,7 +1024,7 @@ static int llc_ui_ioctl(struct socket *sock, unsigned int cmd,
* @sock: Socket to set options on.
* @level: Socket level user is requesting operations on.
* @optname: Operation name.
- * @optval User provided operation data.
+ * @optval: User provided operation data.
* @optlen: Length of optval.
*
* Set various connection specific parameters.
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index cf4aea3ba30..39a8d8924b9 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -30,12 +30,12 @@
*
* SAP and connection resource manager, one per adapter.
*
- * @state - state of station
- * @xid_r_count - XID response PDU counter
- * @mac_sa - MAC source address
- * @sap_list - list of related SAPs
- * @ev_q - events entering state mach.
- * @mac_pdu_q - PDUs ready to send to MAC
+ * @state: state of station
+ * @xid_r_count: XID response PDU counter
+ * @mac_sa: MAC source address
+ * @sap_list: list of related SAPs
+ * @ev_q: events entering state mach.
+ * @mac_pdu_q: PDUs ready to send to MAC
*/
struct llc_station {
u8 state;
@@ -646,7 +646,7 @@ static void llc_station_service_events(void)
}
/**
- * llc_station_state_process: queue event and try to process queue.
+ * llc_station_state_process - queue event and try to process queue.
* @skb: Address of the event
*
* Queues an event (on the station event queue) for handling by the
@@ -672,7 +672,7 @@ static void llc_station_ack_tmr_cb(unsigned long timeout_data)
}
}
-/*
+/**
* llc_station_rcv - send received pdu to the station state machine
* @skb: received frame.
*
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 8d249d70598..63af25458fd 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -107,6 +107,19 @@ config MAC80211_DEBUGFS
Say N unless you know you need this.
+config MAC80211_MESSAGE_TRACING
+ bool "Trace all mac80211 debug messages"
+ depends on MAC80211
+ ---help---
+ Select this option to have mac80211 register the
+ mac80211_msg trace subsystem with tracepoints to
+ collect all debugging messages, independent of
+ printing them into the kernel log.
+
+ The overhead in this option is that all the messages
+ need to be present in the binary and formatted at
+ runtime for tracing.
+
menuconfig MAC80211_DEBUG_MENU
bool "Select mac80211 debugging features"
depends on MAC80211
@@ -140,26 +153,35 @@ config MAC80211_VERBOSE_DEBUG
Do not select this option.
-config MAC80211_HT_DEBUG
- bool "Verbose HT debugging"
+config MAC80211_MLME_DEBUG
+ bool "Verbose managed MLME output"
depends on MAC80211_DEBUG_MENU
---help---
- This option enables 802.11n High Throughput features
- debug tracing output.
-
- It should not be selected on production systems as some
+ Selecting this option causes mac80211 to print out
+ debugging messages for the managed-mode MLME. It
+ should not be selected on production systems as some
of the messages are remotely triggerable.
Do not select this option.
-config MAC80211_TKIP_DEBUG
- bool "Verbose TKIP debugging"
+config MAC80211_STA_DEBUG
+ bool "Verbose station debugging"
depends on MAC80211_DEBUG_MENU
---help---
Selecting this option causes mac80211 to print out
- very verbose TKIP debugging messages. It should not
- be selected on production systems as those messages
- are remotely triggerable.
+ debugging messages for station addition/removal.
+
+ Do not select this option.
+
+config MAC80211_HT_DEBUG
+ bool "Verbose HT debugging"
+ depends on MAC80211_DEBUG_MENU
+ ---help---
+ This option enables 802.11n High Throughput features
+ debug tracing output.
+
+ It should not be selected on production systems as some
+ of the messages are remotely triggerable.
Do not select this option.
@@ -174,7 +196,7 @@ config MAC80211_IBSS_DEBUG
Do not select this option.
-config MAC80211_VERBOSE_PS_DEBUG
+config MAC80211_PS_DEBUG
bool "Verbose powersave mode debugging"
depends on MAC80211_DEBUG_MENU
---help---
@@ -186,7 +208,7 @@ config MAC80211_VERBOSE_PS_DEBUG
Do not select this option.
-config MAC80211_VERBOSE_MPL_DEBUG
+config MAC80211_MPL_DEBUG
bool "Verbose mesh peer link debugging"
depends on MAC80211_DEBUG_MENU
depends on MAC80211_MESH
@@ -199,7 +221,7 @@ config MAC80211_VERBOSE_MPL_DEBUG
Do not select this option.
-config MAC80211_VERBOSE_MPATH_DEBUG
+config MAC80211_MPATH_DEBUG
bool "Verbose mesh path debugging"
depends on MAC80211_DEBUG_MENU
depends on MAC80211_MESH
@@ -212,7 +234,7 @@ config MAC80211_VERBOSE_MPATH_DEBUG
Do not select this option.
-config MAC80211_VERBOSE_MHWMP_DEBUG
+config MAC80211_MHWMP_DEBUG
bool "Verbose mesh HWMP routing debugging"
depends on MAC80211_DEBUG_MENU
depends on MAC80211_MESH
@@ -225,7 +247,7 @@ config MAC80211_VERBOSE_MHWMP_DEBUG
Do not select this option.
-config MAC80211_VERBOSE_MESH_SYNC_DEBUG
+config MAC80211_MESH_SYNC_DEBUG
bool "Verbose mesh mesh synchronization debugging"
depends on MAC80211_DEBUG_MENU
depends on MAC80211_MESH
@@ -236,7 +258,7 @@ config MAC80211_VERBOSE_MESH_SYNC_DEBUG
Do not select this option.
-config MAC80211_VERBOSE_TDLS_DEBUG
+config MAC80211_TDLS_DEBUG
bool "Verbose TDLS debugging"
depends on MAC80211_DEBUG_MENU
---help---
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 3e9d931bba3..a7dd110faaf 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -9,7 +9,6 @@ mac80211-y := \
scan.o offchannel.o \
ht.o agg-tx.o agg-rx.o \
ibss.o \
- work.o \
iface.o \
rate.o \
michael.o \
@@ -25,7 +24,7 @@ mac80211-y := \
wme.o \
event.o \
chan.o \
- driver-trace.o mlme.o
+ trace.o mlme.o
mac80211-$(CONFIG_MAC80211_LEDS) += led.o
mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
@@ -43,7 +42,7 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
mac80211-$(CONFIG_PM) += pm.o
-CFLAGS_driver-trace.o := -I$(src)
+CFLAGS_trace.o := -I$(src)
# objects for PID algorithm
rc80211_pid-y := rc80211_pid_algo.o
@@ -59,4 +58,4 @@ mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y)
mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y)
mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y)
-ccflags-y += -D__CHECK_ENDIAN__
+ccflags-y += -D__CHECK_ENDIAN__ -DDEBUG
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index c649188314c..186d9919b04 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -74,18 +74,17 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG
+ ht_dbg(sta->sdata,
"Rx BA session stop requested for %pM tid %u %s reason: %d\n",
sta->sta.addr, tid,
initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
(int)reason);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
&sta->sta, tid, NULL, 0))
- printk(KERN_DEBUG "HW problem - can not stop rx "
- "aggregation for tid %d\n", tid);
+ sdata_info(sta->sdata,
+ "HW problem - can not stop rx aggregation for tid %d\n",
+ tid);
/* check if this is a self generated aggregation halt */
if (initiator == WLAN_BACK_RECIPIENT && tx)
@@ -160,9 +159,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
}
rcu_read_unlock();
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
-#endif
+ ht_dbg(sta->sdata, "rx session timer expired on tid %d\n", (u16)*ptid);
+
set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired);
ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
}
@@ -249,10 +247,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
status = WLAN_STATUS_REQUEST_DECLINED;
if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Suspend in progress. "
- "Denying ADDBA request\n");
-#endif
+ ht_dbg(sta->sdata, "Suspend in progress - Denying ADDBA request\n");
goto end_no_lock;
}
@@ -264,10 +259,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
(!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) ||
(buf_size > IEEE80211_MAX_AMPDU_BUF)) {
status = WLAN_STATUS_INVALID_QOS_PARAM;
-#ifdef CONFIG_MAC80211_HT_DEBUG
- net_dbg_ratelimited("AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
- mgmt->sa, tid, ba_policy, buf_size);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+ ht_dbg_ratelimited(sta->sdata,
+ "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
+ mgmt->sa, tid, ba_policy, buf_size);
goto end_no_lock;
}
/* determine default buffer size */
@@ -282,10 +276,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
mutex_lock(&sta->ampdu_mlme.mtx);
if (sta->ampdu_mlme.tid_rx[tid]) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- net_dbg_ratelimited("unexpected AddBA Req from %pM on tid %u\n",
- mgmt->sa, tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+ ht_dbg_ratelimited(sta->sdata,
+ "unexpected AddBA Req from %pM on tid %u\n",
+ mgmt->sa, tid);
/* delete existing Rx BA session on the same tid */
___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
@@ -324,10 +317,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
&sta->sta, tid, &start_seq_num, 0);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-
+ ht_dbg(sta->sdata, "Rx A-MPDU request on tid %d result %d\n", tid, ret);
if (ret) {
kfree(tid_agg_rx->reorder_buf);
kfree(tid_agg_rx->reorder_time);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 7cf07158805..d0deb3edae2 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -135,7 +135,8 @@ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
bar->control = cpu_to_le16(bar_control);
bar->start_seq_num = cpu_to_le16(ssn);
- IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
ieee80211_tx_skb_tid(sdata, skb, tid);
}
EXPORT_SYMBOL(ieee80211_send_bar);
@@ -184,10 +185,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
spin_unlock_bh(&sta->lock);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
+ ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
sta->sta.addr, tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
del_timer_sync(&tid_tx->addba_resp_timer);
del_timer_sync(&tid_tx->session_timer);
@@ -253,17 +252,13 @@ static void sta_addba_resp_timer_expired(unsigned long data)
if (!tid_tx ||
test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
rcu_read_unlock();
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "timer expired on tid %d but we are not "
- "(or no longer) expecting addBA response there\n",
- tid);
-#endif
+ ht_dbg(sta->sdata,
+ "timer expired on tid %d but we are not (or no longer) expecting addBA response there\n",
+ tid);
return;
}
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
-#endif
+ ht_dbg(sta->sdata, "addBA response timer expired on tid %d\n", tid);
ieee80211_stop_tx_ba_session(&sta->sta, tid);
rcu_read_unlock();
@@ -323,8 +318,9 @@ ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
ieee80211_stop_queue_agg(sdata, tid);
- if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
- " from the pending queue\n", tid))
+ if (WARN(!tid_tx,
+ "TID %d gone but expected when splicing aggregates from the pending queue\n",
+ tid))
return;
if (!skb_queue_empty(&tid_tx->pending)) {
@@ -372,10 +368,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
&sta->sta, tid, &start_seq_num, 0);
if (ret) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "BA request denied - HW unavailable for"
- " tid %d\n", tid);
-#endif
+ ht_dbg(sdata,
+ "BA request denied - HW unavailable for tid %d\n", tid);
spin_lock_bh(&sta->lock);
ieee80211_agg_splice_packets(sdata, tid_tx, tid);
ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -388,9 +382,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
/* activate the timer for the recipient's addBA response */
mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
-#endif
+ ht_dbg(sdata, "activated addBA response timer on tid %d\n", tid);
spin_lock_bh(&sta->lock);
sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
@@ -437,9 +429,7 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
rcu_read_unlock();
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
-#endif
+ ht_dbg(sta->sdata, "tx session timer expired on tid %d\n", (u16)*ptid);
ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
}
@@ -463,10 +453,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
(local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
return -EINVAL;
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
+ ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
pubsta->addr, tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
if (sdata->vif.type != NL80211_IFTYPE_STATION &&
sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
@@ -476,10 +464,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
return -EINVAL;
if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "BA sessions blocked. "
- "Denying BA session request\n");
-#endif
+ ht_dbg(sdata,
+ "BA sessions blocked - Denying BA session request\n");
return -EINVAL;
}
@@ -497,10 +483,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
*/
if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
!sta->sta.ht_cap.ht_supported) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "BA request denied - IBSS STA %pM"
- "does not advertise HT support\n", pubsta->addr);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+ ht_dbg(sdata,
+ "BA request denied - IBSS STA %pM does not advertise HT support\n",
+ pubsta->addr);
return -EINVAL;
}
@@ -520,12 +505,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
HT_AGG_RETRIES_PERIOD)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "BA request denied - "
- "waiting a grace period after %d failed requests "
- "on tid %u\n",
+ ht_dbg(sdata,
+ "BA request denied - waiting a grace period after %d failed requests on tid %u\n",
sta->ampdu_mlme.addba_req_num[tid], tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
ret = -EBUSY;
goto err_unlock_sta;
}
@@ -533,10 +515,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
/* check if the TID is not in aggregation flow already */
if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "BA request denied - session is not "
- "idle on tid %u\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+ ht_dbg(sdata,
+ "BA request denied - session is not idle on tid %u\n",
+ tid);
ret = -EAGAIN;
goto err_unlock_sta;
}
@@ -591,9 +572,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
-#endif
+ ht_dbg(sta->sdata, "Aggregation is on for tid %d\n", tid);
drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_OPERATIONAL,
@@ -627,10 +606,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
trace_api_start_tx_ba_cb(sdata, ra, tid);
if (tid >= STA_TID_NUM) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
- tid, STA_TID_NUM);
-#endif
+ ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
+ tid, STA_TID_NUM);
return;
}
@@ -638,9 +615,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
sta = sta_info_get_bss(sdata, ra);
if (!sta) {
mutex_unlock(&local->sta_mtx);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Could not find station: %pM\n", ra);
-#endif
+ ht_dbg(sdata, "Could not find station: %pM\n", ra);
return;
}
@@ -648,9 +623,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
if (WARN_ON(!tid_tx)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "addBA was not requested!\n");
-#endif
+ ht_dbg(sdata, "addBA was not requested!\n");
goto unlock;
}
@@ -750,25 +723,18 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
trace_api_stop_tx_ba_cb(sdata, ra, tid);
if (tid >= STA_TID_NUM) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
- tid, STA_TID_NUM);
-#endif
+ ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
+ tid, STA_TID_NUM);
return;
}
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Stopping Tx BA session for %pM tid %d\n",
- ra, tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+ ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
mutex_lock(&local->sta_mtx);
sta = sta_info_get_bss(sdata, ra);
if (!sta) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Could not find station: %pM\n", ra);
-#endif
+ ht_dbg(sdata, "Could not find station: %pM\n", ra);
goto unlock;
}
@@ -777,9 +743,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
-#endif
+ ht_dbg(sdata, "unexpected callback to A-MPDU stop\n");
goto unlock_sta;
}
@@ -855,17 +819,13 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
goto out;
if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
-#endif
+ ht_dbg(sta->sdata, "wrong addBA response token, tid %d\n", tid);
goto out;
}
del_timer_sync(&tid_tx->addba_resp_timer);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
-#endif
+ ht_dbg(sta->sdata, "switched off addBA timer for tid %d\n", tid);
/*
* addba_resp_timer may have fired before we got here, and
@@ -874,11 +834,9 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
*/
if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG
+ ht_dbg(sta->sdata,
"got addBA resp for tid %d but we already gave up\n",
tid);
-#endif
goto out;
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 7d5108a867a..d41974aacf5 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -20,31 +20,31 @@
#include "rate.h"
#include "mesh.h"
-static struct net_device *ieee80211_add_iface(struct wiphy *wiphy, char *name,
- enum nl80211_iftype type,
- u32 *flags,
- struct vif_params *params)
+static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy, char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
- struct net_device *dev;
+ struct wireless_dev *wdev;
struct ieee80211_sub_if_data *sdata;
int err;
- err = ieee80211_if_add(local, name, &dev, type, params);
+ err = ieee80211_if_add(local, name, &wdev, type, params);
if (err)
return ERR_PTR(err);
if (type == NL80211_IFTYPE_MONITOR && flags) {
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
sdata->u.mntr_flags = *flags;
}
- return dev;
+ return wdev;
}
-static int ieee80211_del_iface(struct wiphy *wiphy, struct net_device *dev)
+static int ieee80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
{
- ieee80211_if_remove(IEEE80211_DEV_TO_SUB_IF(dev));
+ ieee80211_if_remove(IEEE80211_WDEV_TO_SUB_IF(wdev));
return 0;
}
@@ -353,6 +353,7 @@ void sta_set_rate_info_tx(struct sta_info *sta,
static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
struct timespec uptime;
sinfo->generation = sdata->local->sta_generation;
@@ -388,7 +389,9 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
(sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
sinfo->filled |= STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
- sinfo->signal = (s8)sta->last_signal;
+ if (!local->ops->get_rssi ||
+ drv_get_rssi(local, sdata, &sta->sta, &sinfo->signal))
+ sinfo->signal = (s8)sta->last_signal;
sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
}
@@ -517,7 +520,7 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
* network device.
*/
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid);
@@ -546,7 +549,7 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
data[i] = (u8)sinfo.signal_avg;
i++;
} else {
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ list_for_each_entry(sta, &local->sta_list, list) {
/* Make sure this station belongs to the proper dev */
if (sta->sdata->dev != dev)
continue;
@@ -603,7 +606,7 @@ do_survey:
else
data[i++] = -1LL;
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
if (WARN_ON(i != STA_STATS_LEN))
return;
@@ -629,10 +632,11 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
int ret = -ENOENT;
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
sta = sta_info_get_by_idx(sdata, idx);
if (sta) {
@@ -641,7 +645,7 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
sta_set_sinfo(sta, sinfo);
}
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
return ret;
}
@@ -658,10 +662,11 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_info *sinfo)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
int ret = -ENOENT;
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
sta = sta_info_get_bss(sdata, mac);
if (sta) {
@@ -669,11 +674,54 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
sta_set_sinfo(sta, sinfo);
}
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
return ret;
}
+static int ieee80211_set_channel(struct wiphy *wiphy,
+ struct net_device *netdev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = NULL;
+
+ if (netdev)
+ sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
+
+ switch (ieee80211_get_channel_mode(local, NULL)) {
+ case CHAN_MODE_HOPPING:
+ return -EBUSY;
+ case CHAN_MODE_FIXED:
+ if (local->oper_channel != chan ||
+ (!sdata && local->_oper_channel_type != channel_type))
+ return -EBUSY;
+ if (!sdata && local->_oper_channel_type == channel_type)
+ return 0;
+ break;
+ case CHAN_MODE_UNDEFINED:
+ break;
+ }
+
+ if (!ieee80211_set_channel_type(local, sdata, channel_type))
+ return -EBUSY;
+
+ local->oper_channel = chan;
+
+ /* auto-detects changes */
+ ieee80211_hw_config(local, 0);
+
+ return 0;
+}
+
+static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ return ieee80211_set_channel(wiphy, NULL, chan, channel_type);
+}
+
static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
const u8 *resp, size_t resp_len)
{
@@ -788,6 +836,11 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (old)
return -EALREADY;
+ err = ieee80211_set_channel(wiphy, dev, params->channel,
+ params->channel_type);
+ if (err)
+ return err;
+
/*
* Apply control port protocol, this allows us to
* not encrypt dynamic WEP control frames.
@@ -864,6 +917,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
kfree_rcu(old, rcu_head);
+ sta_info_flush(sdata->local, sdata);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
return 0;
@@ -1482,7 +1536,7 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
if (_chg_mesh_attr(NL80211_MESHCONF_TTL, mask))
conf->dot11MeshTTL = nconf->dot11MeshTTL;
if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask))
- conf->dot11MeshTTL = nconf->element_ttl;
+ conf->element_ttl = nconf->element_ttl;
if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask))
conf->auto_open_plinks = nconf->auto_open_plinks;
if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask))
@@ -1517,17 +1571,16 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
* announcements, so require this ifmsh to also be a root node
* */
if (nconf->dot11MeshGateAnnouncementProtocol &&
- !conf->dot11MeshHWMPRootMode) {
- conf->dot11MeshHWMPRootMode = 1;
+ !(conf->dot11MeshHWMPRootMode > IEEE80211_ROOTMODE_ROOT)) {
+ conf->dot11MeshHWMPRootMode = IEEE80211_PROACTIVE_RANN;
ieee80211_mesh_root_setup(ifmsh);
}
conf->dot11MeshGateAnnouncementProtocol =
nconf->dot11MeshGateAnnouncementProtocol;
}
- if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask)) {
+ if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask))
conf->dot11MeshHWMPRannInterval =
nconf->dot11MeshHWMPRannInterval;
- }
if (_chg_mesh_attr(NL80211_MESHCONF_FORWARDING, mask))
conf->dot11MeshForwarding = nconf->dot11MeshForwarding;
if (_chg_mesh_attr(NL80211_MESHCONF_RSSI_THRESHOLD, mask)) {
@@ -1543,6 +1596,15 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode;
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
}
+ if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, mask))
+ conf->dot11MeshHWMPactivePathToRootTimeout =
+ nconf->dot11MeshHWMPactivePathToRootTimeout;
+ if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOT_INTERVAL, mask))
+ conf->dot11MeshHWMProotInterval =
+ nconf->dot11MeshHWMProotInterval;
+ if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, mask))
+ conf->dot11MeshHWMPconfirmationInterval =
+ nconf->dot11MeshHWMPconfirmationInterval;
return 0;
}
@@ -1558,6 +1620,12 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
err = copy_mesh_setup(ifmsh, setup);
if (err)
return err;
+
+ err = ieee80211_set_channel(wiphy, dev, setup->channel,
+ setup->channel_type);
+ if (err)
+ return err;
+
ieee80211_start_mesh(sdata);
return 0;
@@ -1674,54 +1742,7 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
return -EINVAL;
}
- return 0;
-}
-
-static int ieee80211_set_channel(struct wiphy *wiphy,
- struct net_device *netdev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
-{
- struct ieee80211_local *local = wiphy_priv(wiphy);
- struct ieee80211_sub_if_data *sdata = NULL;
- struct ieee80211_channel *old_oper;
- enum nl80211_channel_type old_oper_type;
- enum nl80211_channel_type old_vif_oper_type= NL80211_CHAN_NO_HT;
-
- if (netdev)
- sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
-
- switch (ieee80211_get_channel_mode(local, NULL)) {
- case CHAN_MODE_HOPPING:
- return -EBUSY;
- case CHAN_MODE_FIXED:
- if (local->oper_channel != chan)
- return -EBUSY;
- if (!sdata && local->_oper_channel_type == channel_type)
- return 0;
- break;
- case CHAN_MODE_UNDEFINED:
- break;
- }
-
- if (sdata)
- old_vif_oper_type = sdata->vif.bss_conf.channel_type;
- old_oper_type = local->_oper_channel_type;
-
- if (!ieee80211_set_channel_type(local, sdata, channel_type))
- return -EBUSY;
-
- old_oper = local->oper_channel;
- local->oper_channel = chan;
-
- /* Update driver if changes were actually made. */
- if ((old_oper != local->oper_channel) ||
- (old_oper_type != local->_oper_channel_type))
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
-
- if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR &&
- old_vif_oper_type != sdata->vif.bss_conf.channel_type)
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS);
return 0;
}
@@ -1743,10 +1764,11 @@ static int ieee80211_resume(struct wiphy *wiphy)
#endif
static int ieee80211_scan(struct wiphy *wiphy,
- struct net_device *dev,
struct cfg80211_scan_request *req)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_sub_if_data *sdata;
+
+ sdata = IEEE80211_WDEV_TO_SUB_IF(req->wdev);
switch (ieee80211_vif_type_p2p(&sdata->vif)) {
case NL80211_IFTYPE_STATION:
@@ -2111,143 +2133,291 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
return 0;
}
-static int ieee80211_remain_on_channel_hw(struct ieee80211_local *local,
- struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type chantype,
- unsigned int duration, u64 *cookie)
+static int ieee80211_start_roc_work(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, u64 *cookie,
+ struct sk_buff *txskb)
{
+ struct ieee80211_roc_work *roc, *tmp;
+ bool queued = false;
int ret;
- u32 random_cookie;
lockdep_assert_held(&local->mtx);
- if (local->hw_roc_cookie)
- return -EBUSY;
- /* must be nonzero */
- random_cookie = random32() | 1;
-
- *cookie = random_cookie;
- local->hw_roc_dev = dev;
- local->hw_roc_cookie = random_cookie;
- local->hw_roc_channel = chan;
- local->hw_roc_channel_type = chantype;
- local->hw_roc_duration = duration;
- ret = drv_remain_on_channel(local, chan, chantype, duration);
+ roc = kzalloc(sizeof(*roc), GFP_KERNEL);
+ if (!roc)
+ return -ENOMEM;
+
+ roc->chan = channel;
+ roc->chan_type = channel_type;
+ roc->duration = duration;
+ roc->req_duration = duration;
+ roc->frame = txskb;
+ roc->mgmt_tx_cookie = (unsigned long)txskb;
+ roc->sdata = sdata;
+ INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
+ INIT_LIST_HEAD(&roc->dependents);
+
+ /* if there's one pending or we're scanning, queue this one */
+ if (!list_empty(&local->roc_list) || local->scanning)
+ goto out_check_combine;
+
+ /* if not HW assist, just queue & schedule work */
+ if (!local->ops->remain_on_channel) {
+ ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
+ goto out_queue;
+ }
+
+ /* otherwise actually kick it off here (for error handling) */
+
+ /*
+ * If the duration is zero, then the driver
+ * wouldn't actually do anything. Set it to
+ * 10 for now.
+ *
+ * TODO: cancel the off-channel operation
+ * when we get the SKB's TX status and
+ * the wait time was zero before.
+ */
+ if (!duration)
+ duration = 10;
+
+ ret = drv_remain_on_channel(local, channel, channel_type, duration);
if (ret) {
- local->hw_roc_channel = NULL;
- local->hw_roc_cookie = 0;
+ kfree(roc);
+ return ret;
}
- return ret;
+ roc->started = true;
+ goto out_queue;
+
+ out_check_combine:
+ list_for_each_entry(tmp, &local->roc_list, list) {
+ if (tmp->chan != channel || tmp->chan_type != channel_type)
+ continue;
+
+ /*
+ * Extend this ROC if possible:
+ *
+ * If it hasn't started yet, just increase the duration
+ * and add the new one to the list of dependents.
+ */
+ if (!tmp->started) {
+ list_add_tail(&roc->list, &tmp->dependents);
+ tmp->duration = max(tmp->duration, roc->duration);
+ queued = true;
+ break;
+ }
+
+ /* If it has already started, it's more difficult ... */
+ if (local->ops->remain_on_channel) {
+ unsigned long j = jiffies;
+
+ /*
+ * In the offloaded ROC case, if it hasn't begun, add
+ * this new one to the dependent list to be handled
+ * when the the master one begins. If it has begun,
+ * check that there's still a minimum time left and
+ * if so, start this one, transmitting the frame, but
+ * add it to the list directly after this one with a
+ * a reduced time so we'll ask the driver to execute
+ * it right after finishing the previous one, in the
+ * hope that it'll also be executed right afterwards,
+ * effectively extending the old one.
+ * If there's no minimum time left, just add it to the
+ * normal list.
+ */
+ if (!tmp->hw_begun) {
+ list_add_tail(&roc->list, &tmp->dependents);
+ queued = true;
+ break;
+ }
+
+ if (time_before(j + IEEE80211_ROC_MIN_LEFT,
+ tmp->hw_start_time +
+ msecs_to_jiffies(tmp->duration))) {
+ int new_dur;
+
+ ieee80211_handle_roc_started(roc);
+
+ new_dur = roc->duration -
+ jiffies_to_msecs(tmp->hw_start_time +
+ msecs_to_jiffies(
+ tmp->duration) -
+ j);
+
+ if (new_dur > 0) {
+ /* add right after tmp */
+ list_add(&roc->list, &tmp->list);
+ } else {
+ list_add_tail(&roc->list,
+ &tmp->dependents);
+ }
+ queued = true;
+ }
+ } else if (del_timer_sync(&tmp->work.timer)) {
+ unsigned long new_end;
+
+ /*
+ * In the software ROC case, cancel the timer, if
+ * that fails then the finish work is already
+ * queued/pending and thus we queue the new ROC
+ * normally, if that succeeds then we can extend
+ * the timer duration and TX the frame (if any.)
+ */
+
+ list_add_tail(&roc->list, &tmp->dependents);
+ queued = true;
+
+ new_end = jiffies + msecs_to_jiffies(roc->duration);
+
+ /* ok, it was started & we canceled timer */
+ if (time_after(new_end, tmp->work.timer.expires))
+ mod_timer(&tmp->work.timer, new_end);
+ else
+ add_timer(&tmp->work.timer);
+
+ ieee80211_handle_roc_started(roc);
+ }
+ break;
+ }
+
+ out_queue:
+ if (!queued)
+ list_add_tail(&roc->list, &local->roc_list);
+
+ /*
+ * cookie is either the roc (for normal roc)
+ * or the SKB (for mgmt TX)
+ */
+ if (txskb)
+ *cookie = (unsigned long)txskb;
+ else
+ *cookie = (unsigned long)roc;
+
+ return 0;
}
static int ieee80211_remain_on_channel(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration,
u64 *cookie)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
struct ieee80211_local *local = sdata->local;
+ int ret;
- if (local->ops->remain_on_channel) {
- int ret;
-
- mutex_lock(&local->mtx);
- ret = ieee80211_remain_on_channel_hw(local, dev,
- chan, channel_type,
- duration, cookie);
- local->hw_roc_for_tx = false;
- mutex_unlock(&local->mtx);
-
- return ret;
- }
+ mutex_lock(&local->mtx);
+ ret = ieee80211_start_roc_work(local, sdata, chan, channel_type,
+ duration, cookie, NULL);
+ mutex_unlock(&local->mtx);
- return ieee80211_wk_remain_on_channel(sdata, chan, channel_type,
- duration, cookie);
+ return ret;
}
-static int ieee80211_cancel_remain_on_channel_hw(struct ieee80211_local *local,
- u64 cookie)
+static int ieee80211_cancel_roc(struct ieee80211_local *local,
+ u64 cookie, bool mgmt_tx)
{
+ struct ieee80211_roc_work *roc, *tmp, *found = NULL;
int ret;
- lockdep_assert_held(&local->mtx);
+ mutex_lock(&local->mtx);
+ list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+ struct ieee80211_roc_work *dep, *tmp2;
- if (local->hw_roc_cookie != cookie)
- return -ENOENT;
+ list_for_each_entry_safe(dep, tmp2, &roc->dependents, list) {
+ if (!mgmt_tx && (unsigned long)dep != cookie)
+ continue;
+ else if (mgmt_tx && dep->mgmt_tx_cookie != cookie)
+ continue;
+ /* found dependent item -- just remove it */
+ list_del(&dep->list);
+ mutex_unlock(&local->mtx);
- ret = drv_cancel_remain_on_channel(local);
- if (ret)
- return ret;
+ ieee80211_roc_notify_destroy(dep);
+ return 0;
+ }
- local->hw_roc_cookie = 0;
- local->hw_roc_channel = NULL;
+ if (!mgmt_tx && (unsigned long)roc != cookie)
+ continue;
+ else if (mgmt_tx && roc->mgmt_tx_cookie != cookie)
+ continue;
+
+ found = roc;
+ break;
+ }
- ieee80211_recalc_idle(local);
+ if (!found) {
+ mutex_unlock(&local->mtx);
+ return -ENOENT;
+ }
- return 0;
-}
+ /*
+ * We found the item to cancel, so do that. Note that it
+ * may have dependents, which we also cancel (and send
+ * the expired signal for.) Not doing so would be quite
+ * tricky here, but we may need to fix it later.
+ */
-static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
- struct net_device *dev,
- u64 cookie)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- struct ieee80211_local *local = sdata->local;
+ if (local->ops->remain_on_channel) {
+ if (found->started) {
+ ret = drv_cancel_remain_on_channel(local);
+ if (WARN_ON_ONCE(ret)) {
+ mutex_unlock(&local->mtx);
+ return ret;
+ }
+ }
- if (local->ops->cancel_remain_on_channel) {
- int ret;
+ list_del(&found->list);
- mutex_lock(&local->mtx);
- ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
+ if (found->started)
+ ieee80211_start_next_roc(local);
mutex_unlock(&local->mtx);
- return ret;
+ ieee80211_roc_notify_destroy(found);
+ } else {
+ /* work may be pending so use it all the time */
+ found->abort = true;
+ ieee80211_queue_delayed_work(&local->hw, &found->work, 0);
+
+ mutex_unlock(&local->mtx);
+
+ /* work will clean up etc */
+ flush_delayed_work(&found->work);
}
- return ieee80211_wk_cancel_remain_on_channel(sdata, cookie);
+ return 0;
}
-static enum work_done_result
-ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb)
+static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie)
{
- /*
- * Use the data embedded in the work struct for reporting
- * here so if the driver mangled the SKB before dropping
- * it (which is the only way we really should get here)
- * then we don't report mangled data.
- *
- * If there was no wait time, then by the time we get here
- * the driver will likely not have reported the status yet,
- * so in that case userspace will have to deal with it.
- */
-
- if (wk->offchan_tx.wait && !wk->offchan_tx.status)
- cfg80211_mgmt_tx_status(wk->sdata->dev,
- (unsigned long) wk->offchan_tx.frame,
- wk->data, wk->data_len, false, GFP_KERNEL);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ struct ieee80211_local *local = sdata->local;
- return WORK_DONE_DESTROY;
+ return ieee80211_cancel_roc(local, cookie, false);
}
-static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
+static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, bool no_cck,
bool dont_wait_for_ack, u64 *cookie)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct sta_info *sta;
- struct ieee80211_work *wk;
const struct ieee80211_mgmt *mgmt = (void *)buf;
+ bool need_offchan = false;
u32 flags;
- bool is_offchan = false;
+ int ret;
if (dont_wait_for_ack)
flags = IEEE80211_TX_CTL_NO_ACK;
@@ -2255,33 +2425,28 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
IEEE80211_TX_CTL_REQ_TX_STATUS;
- /* Check that we are on the requested channel for transmission */
- if (chan != local->tmp_channel &&
- chan != local->oper_channel)
- is_offchan = true;
- if (channel_type_valid &&
- (channel_type != local->tmp_channel_type &&
- channel_type != local->_oper_channel_type))
- is_offchan = true;
-
- if (chan == local->hw_roc_channel) {
- /* TODO: check channel type? */
- is_offchan = false;
- flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
- }
-
if (no_cck)
flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
- if (is_offchan && !offchan)
- return -EBUSY;
-
switch (sdata->vif.type) {
case NL80211_IFTYPE_ADHOC:
+ if (!sdata->vif.bss_conf.ibss_joined)
+ need_offchan = true;
+ /* fall through */
+#ifdef CONFIG_MAC80211_MESH
+ case NL80211_IFTYPE_MESH_POINT:
+ if (ieee80211_vif_is_mesh(&sdata->vif) &&
+ !sdata->u.mesh.mesh_id_len)
+ need_offchan = true;
+ /* fall through */
+#endif
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_P2P_GO:
- case NL80211_IFTYPE_MESH_POINT:
+ if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+ !ieee80211_vif_is_mesh(&sdata->vif) &&
+ !rcu_access_pointer(sdata->bss->beacon))
+ need_offchan = true;
if (!ieee80211_is_action(mgmt->frame_control) ||
mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)
break;
@@ -2293,167 +2458,101 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
+ if (!sdata->u.mgd.associated)
+ need_offchan = true;
break;
default:
return -EOPNOTSUPP;
}
+ mutex_lock(&local->mtx);
+
+ /* Check if the operating channel is the requested channel */
+ if (!need_offchan) {
+ need_offchan = chan != local->oper_channel;
+ if (channel_type_valid &&
+ channel_type != local->_oper_channel_type)
+ need_offchan = true;
+ }
+
+ if (need_offchan && !offchan) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
skb_reserve(skb, local->hw.extra_tx_headroom);
memcpy(skb_put(skb, len), buf, len);
IEEE80211_SKB_CB(skb)->flags = flags;
- if (flags & IEEE80211_TX_CTL_TX_OFFCHAN)
- IEEE80211_SKB_CB(skb)->hw_queue =
- local->hw.offchannel_tx_hw_queue;
-
skb->dev = sdata->dev;
- *cookie = (unsigned long) skb;
-
- if (is_offchan && local->ops->remain_on_channel) {
- unsigned int duration;
- int ret;
-
- mutex_lock(&local->mtx);
- /*
- * If the duration is zero, then the driver
- * wouldn't actually do anything. Set it to
- * 100 for now.
- *
- * TODO: cancel the off-channel operation
- * when we get the SKB's TX status and
- * the wait time was zero before.
- */
- duration = 100;
- if (wait)
- duration = wait;
- ret = ieee80211_remain_on_channel_hw(local, dev, chan,
- channel_type,
- duration, cookie);
- if (ret) {
- kfree_skb(skb);
- mutex_unlock(&local->mtx);
- return ret;
- }
-
- local->hw_roc_for_tx = true;
- local->hw_roc_duration = wait;
-
- /*
- * queue up frame for transmission after
- * ieee80211_ready_on_channel call
- */
+ if (!need_offchan) {
+ *cookie = (unsigned long) skb;
+ ieee80211_tx_skb(sdata, skb);
+ ret = 0;
+ goto out_unlock;
+ }
- /* modify cookie to prevent API mismatches */
- *cookie ^= 2;
- IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
+ if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
IEEE80211_SKB_CB(skb)->hw_queue =
local->hw.offchannel_tx_hw_queue;
- local->hw_roc_skb = skb;
- local->hw_roc_skb_for_status = skb;
- mutex_unlock(&local->mtx);
-
- return 0;
- }
-
- /*
- * Can transmit right away if the channel was the
- * right one and there's no wait involved... If a
- * wait is involved, we might otherwise not be on
- * the right channel for long enough!
- */
- if (!is_offchan && !wait && !sdata->vif.bss_conf.idle) {
- ieee80211_tx_skb(sdata, skb);
- return 0;
- }
- wk = kzalloc(sizeof(*wk) + len, GFP_KERNEL);
- if (!wk) {
+ /* This will handle all kinds of coalescing and immediate TX */
+ ret = ieee80211_start_roc_work(local, sdata, chan, channel_type,
+ wait, cookie, skb);
+ if (ret)
kfree_skb(skb);
- return -ENOMEM;
- }
-
- wk->type = IEEE80211_WORK_OFFCHANNEL_TX;
- wk->chan = chan;
- wk->chan_type = channel_type;
- wk->sdata = sdata;
- wk->done = ieee80211_offchan_tx_done;
- wk->offchan_tx.frame = skb;
- wk->offchan_tx.wait = wait;
- wk->data_len = len;
- memcpy(wk->data, buf, len);
-
- ieee80211_add_work(wk);
- return 0;
+ out_unlock:
+ mutex_unlock(&local->mtx);
+ return ret;
}
static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
u64 cookie)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_work *wk;
- int ret = -ENOENT;
-
- mutex_lock(&local->mtx);
-
- if (local->ops->cancel_remain_on_channel) {
- cookie ^= 2;
- ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
-
- if (ret == 0) {
- kfree_skb(local->hw_roc_skb);
- local->hw_roc_skb = NULL;
- local->hw_roc_skb_for_status = NULL;
- }
-
- mutex_unlock(&local->mtx);
-
- return ret;
- }
-
- list_for_each_entry(wk, &local->work_list, list) {
- if (wk->sdata != sdata)
- continue;
-
- if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
- continue;
-
- if (cookie != (unsigned long) wk->offchan_tx.frame)
- continue;
-
- wk->timeout = jiffies;
-
- ieee80211_queue_work(&local->hw, &local->work_work);
- ret = 0;
- break;
- }
- mutex_unlock(&local->mtx);
+ struct ieee80211_local *local = wiphy_priv(wiphy);
- return ret;
+ return ieee80211_cancel_roc(local, cookie, true);
}
static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
u16 frame_type, bool reg)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
- if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
- return;
+ switch (frame_type) {
+ case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH:
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
- if (reg)
- local->probe_req_reg++;
- else
- local->probe_req_reg--;
+ if (reg)
+ ifibss->auth_frame_registrations++;
+ else
+ ifibss->auth_frame_registrations--;
+ }
+ break;
+ case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ:
+ if (reg)
+ local->probe_req_reg++;
+ else
+ local->probe_req_reg--;
- ieee80211_queue_work(&local->hw, &local->reconfig_filter);
+ ieee80211_queue_work(&local->hw, &local->reconfig_filter);
+ break;
+ default:
+ break;
+ }
}
static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
@@ -2573,8 +2672,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
tf->u.setup_req.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
- ieee80211_add_srates_ie(&sdata->vif, skb, false);
- ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
+ ieee80211_add_srates_ie(sdata, skb, false);
+ ieee80211_add_ext_srates_ie(sdata, skb, false);
ieee80211_tdls_add_ext_capab(skb);
break;
case WLAN_TDLS_SETUP_RESPONSE:
@@ -2587,8 +2686,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
tf->u.setup_resp.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
- ieee80211_add_srates_ie(&sdata->vif, skb, false);
- ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
+ ieee80211_add_srates_ie(sdata, skb, false);
+ ieee80211_add_ext_srates_ie(sdata, skb, false);
ieee80211_tdls_add_ext_capab(skb);
break;
case WLAN_TDLS_SETUP_CONFIRM:
@@ -2648,8 +2747,8 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
mgmt->u.action.u.tdls_discover_resp.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
- ieee80211_add_srates_ie(&sdata->vif, skb, false);
- ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
+ ieee80211_add_srates_ie(sdata, skb, false);
+ ieee80211_add_ext_srates_ie(sdata, skb, false);
ieee80211_tdls_add_ext_capab(skb);
break;
default:
@@ -2679,9 +2778,8 @@ static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
!sdata->u.mgd.associated)
return -EINVAL;
-#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG
- printk(KERN_DEBUG "TDLS mgmt action %d peer %pM\n", action_code, peer);
-#endif
+ tdls_dbg(sdata, "TDLS mgmt action %d peer %pM\n",
+ action_code, peer);
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
max(sizeof(struct ieee80211_mgmt),
@@ -2790,9 +2888,7 @@ static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EINVAL;
-#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG
- printk(KERN_DEBUG "TDLS oper %d peer %pM\n", oper, peer);
-#endif
+ tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
switch (oper) {
case NL80211_TDLS_ENABLE_LINK:
@@ -2889,8 +2985,8 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
}
static struct ieee80211_channel *
-ieee80211_wiphy_get_channel(struct wiphy *wiphy,
- enum nl80211_channel_type *type)
+ieee80211_cfg_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+ enum nl80211_channel_type *type)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
@@ -2936,7 +3032,7 @@ struct cfg80211_ops mac80211_config_ops = {
#endif
.change_bss = ieee80211_change_bss,
.set_txq_params = ieee80211_set_txq_params,
- .set_channel = ieee80211_set_channel,
+ .set_monitor_channel = ieee80211_set_monitor_channel,
.suspend = ieee80211_suspend,
.resume = ieee80211_resume,
.scan = ieee80211_scan,
@@ -2971,7 +3067,6 @@ struct cfg80211_ops mac80211_config_ops = {
.tdls_oper = ieee80211_tdls_oper,
.tdls_mgmt = ieee80211_tdls_mgmt,
.probe_client = ieee80211_probe_client,
- .get_channel = ieee80211_wiphy_get_channel,
.set_noack_map = ieee80211_set_noack_map,
#ifdef CONFIG_PM
.set_wakeup = ieee80211_set_wakeup,
@@ -2979,4 +3074,5 @@ struct cfg80211_ops mac80211_config_ops = {
.get_et_sset_count = ieee80211_get_et_sset_count,
.get_et_stats = ieee80211_get_et_stats,
.get_et_strings = ieee80211_get_et_strings,
+ .get_channel = ieee80211_cfg_get_channel,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index c76cf7230c7..f0f87e5a1d3 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -41,6 +41,10 @@ __ieee80211_get_channel_mode(struct ieee80211_local *local,
if (!sdata->u.ap.beacon)
continue;
break;
+ case NL80211_IFTYPE_MESH_POINT:
+ if (!sdata->wdev.mesh_id_len)
+ continue;
+ break;
default:
break;
}
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
new file mode 100644
index 00000000000..8f383a57601
--- /dev/null
+++ b/net/mac80211/debug.h
@@ -0,0 +1,170 @@
+#ifndef __MAC80211_DEBUG_H
+#define __MAC80211_DEBUG_H
+#include <net/cfg80211.h>
+
+#ifdef CONFIG_MAC80211_IBSS_DEBUG
+#define MAC80211_IBSS_DEBUG 1
+#else
+#define MAC80211_IBSS_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_PS_DEBUG
+#define MAC80211_PS_DEBUG 1
+#else
+#define MAC80211_PS_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+#define MAC80211_HT_DEBUG 1
+#else
+#define MAC80211_HT_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_MPL_DEBUG
+#define MAC80211_MPL_DEBUG 1
+#else
+#define MAC80211_MPL_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_MPATH_DEBUG
+#define MAC80211_MPATH_DEBUG 1
+#else
+#define MAC80211_MPATH_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_MHWMP_DEBUG
+#define MAC80211_MHWMP_DEBUG 1
+#else
+#define MAC80211_MHWMP_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_MESH_SYNC_DEBUG
+#define MAC80211_MESH_SYNC_DEBUG 1
+#else
+#define MAC80211_MESH_SYNC_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_TDLS_DEBUG
+#define MAC80211_TDLS_DEBUG 1
+#else
+#define MAC80211_TDLS_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_STA_DEBUG
+#define MAC80211_STA_DEBUG 1
+#else
+#define MAC80211_STA_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_MLME_DEBUG
+#define MAC80211_MLME_DEBUG 1
+#else
+#define MAC80211_MLME_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_MESSAGE_TRACING
+void __sdata_info(const char *fmt, ...) __printf(1, 2);
+void __sdata_dbg(bool print, const char *fmt, ...) __printf(2, 3);
+void __sdata_err(const char *fmt, ...) __printf(1, 2);
+void __wiphy_dbg(struct wiphy *wiphy, bool print, const char *fmt, ...)
+ __printf(3, 4);
+
+#define _sdata_info(sdata, fmt, ...) \
+ __sdata_info("%s: " fmt, (sdata)->name, ##__VA_ARGS__)
+#define _sdata_dbg(print, sdata, fmt, ...) \
+ __sdata_dbg(print, "%s: " fmt, (sdata)->name, ##__VA_ARGS__)
+#define _sdata_err(sdata, fmt, ...) \
+ __sdata_err("%s: " fmt, (sdata)->name, ##__VA_ARGS__)
+#define _wiphy_dbg(print, wiphy, fmt, ...) \
+ __wiphy_dbg(wiphy, print, fmt, ##__VA_ARGS__)
+#else
+#define _sdata_info(sdata, fmt, ...) \
+do { \
+ pr_info("%s: " fmt, \
+ (sdata)->name, ##__VA_ARGS__); \
+} while (0)
+
+#define _sdata_dbg(print, sdata, fmt, ...) \
+do { \
+ if (print) \
+ pr_debug("%s: " fmt, \
+ (sdata)->name, ##__VA_ARGS__); \
+} while (0)
+
+#define _sdata_err(sdata, fmt, ...) \
+do { \
+ pr_err("%s: " fmt, \
+ (sdata)->name, ##__VA_ARGS__); \
+} while (0)
+
+#define _wiphy_dbg(print, wiphy, fmt, ...) \
+do { \
+ if (print) \
+ wiphy_dbg((wiphy), fmt, ##__VA_ARGS__); \
+} while (0)
+#endif
+
+#define sdata_info(sdata, fmt, ...) \
+ _sdata_info(sdata, fmt, ##__VA_ARGS__)
+#define sdata_err(sdata, fmt, ...) \
+ _sdata_err(sdata, fmt, ##__VA_ARGS__)
+#define sdata_dbg(sdata, fmt, ...) \
+ _sdata_dbg(1, sdata, fmt, ##__VA_ARGS__)
+
+#define ht_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_HT_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define ht_dbg_ratelimited(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_HT_DEBUG && net_ratelimit(), \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define ibss_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_IBSS_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define ps_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_PS_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define ps_dbg_hw(hw, fmt, ...) \
+ _wiphy_dbg(MAC80211_PS_DEBUG, \
+ (hw)->wiphy, fmt, ##__VA_ARGS__)
+
+#define ps_dbg_ratelimited(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_PS_DEBUG && net_ratelimit(), \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define mpl_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_MPL_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define mpath_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_MPATH_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define mhwmp_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_MHWMP_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define msync_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_MESH_SYNC_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define tdls_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_TDLS_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define sta_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_STA_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define mlme_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_MLME_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define mlme_dbg_ratelimited(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_MLME_DEBUG && net_ratelimit(), \
+ sdata, fmt, ##__VA_ARGS__)
+
+#endif /* __MAC80211_DEBUG_H */
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 778e5916d7c..b8dfb440c8e 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -325,8 +325,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
local->rx_handlers_drop_defrag);
DEBUGFS_STATS_ADD(rx_handlers_drop_short,
local->rx_handlers_drop_short);
- DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan,
- local->rx_handlers_drop_passive_scan);
DEBUGFS_STATS_ADD(tx_expand_skb_head,
local->tx_expand_skb_head);
DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned,
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 7932767bb48..090d08ff22c 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -283,6 +283,11 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
lockdep_assert_held(&sdata->local->key_mtx);
+ if (sdata->debugfs.default_unicast_key) {
+ debugfs_remove(sdata->debugfs.default_unicast_key);
+ sdata->debugfs.default_unicast_key = NULL;
+ }
+
if (sdata->default_unicast_key) {
key = key_mtx_dereference(sdata->local,
sdata->default_unicast_key);
@@ -290,9 +295,11 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
sdata->debugfs.default_unicast_key =
debugfs_create_symlink("default_unicast_key",
sdata->debugfs.dir, buf);
- } else {
- debugfs_remove(sdata->debugfs.default_unicast_key);
- sdata->debugfs.default_unicast_key = NULL;
+ }
+
+ if (sdata->debugfs.default_multicast_key) {
+ debugfs_remove(sdata->debugfs.default_multicast_key);
+ sdata->debugfs.default_multicast_key = NULL;
}
if (sdata->default_multicast_key) {
@@ -302,9 +309,6 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
sdata->debugfs.default_multicast_key =
debugfs_create_symlink("default_multicast_key",
sdata->debugfs.dir, buf);
- } else {
- debugfs_remove(sdata->debugfs.default_multicast_key);
- sdata->debugfs.default_multicast_key = NULL;
}
}
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 7ed433c66d6..6d5aec9418e 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -468,48 +468,54 @@ IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
IEEE80211_IF_FILE(dropped_frames_congestion,
- u.mesh.mshstats.dropped_frames_congestion, DEC);
+ u.mesh.mshstats.dropped_frames_congestion, DEC);
IEEE80211_IF_FILE(dropped_frames_no_route,
- u.mesh.mshstats.dropped_frames_no_route, DEC);
+ u.mesh.mshstats.dropped_frames_no_route, DEC);
IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
/* Mesh parameters */
IEEE80211_IF_FILE(dot11MeshMaxRetries,
- u.mesh.mshcfg.dot11MeshMaxRetries, DEC);
+ u.mesh.mshcfg.dot11MeshMaxRetries, DEC);
IEEE80211_IF_FILE(dot11MeshRetryTimeout,
- u.mesh.mshcfg.dot11MeshRetryTimeout, DEC);
+ u.mesh.mshcfg.dot11MeshRetryTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshConfirmTimeout,
- u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC);
+ u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshHoldingTimeout,
- u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC);
+ u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC);
IEEE80211_IF_FILE(element_ttl, u.mesh.mshcfg.element_ttl, DEC);
IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC);
IEEE80211_IF_FILE(dot11MeshMaxPeerLinks,
- u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC);
+ u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout,
- u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC);
+ u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval,
- u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC);
+ u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval,
- u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC);
+ u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime,
- u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC);
+ u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries,
- u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC);
+ u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC);
IEEE80211_IF_FILE(path_refresh_time,
- u.mesh.mshcfg.path_refresh_time, DEC);
+ u.mesh.mshcfg.path_refresh_time, DEC);
IEEE80211_IF_FILE(min_discovery_timeout,
- u.mesh.mshcfg.min_discovery_timeout, DEC);
+ u.mesh.mshcfg.min_discovery_timeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
- u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
+ u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol,
- u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
+ u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
- u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
+ u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC);
+IEEE80211_IF_FILE(dot11MeshHWMPactivePathToRootTimeout,
+ u.mesh.mshcfg.dot11MeshHWMPactivePathToRootTimeout, DEC);
+IEEE80211_IF_FILE(dot11MeshHWMProotInterval,
+ u.mesh.mshcfg.dot11MeshHWMProotInterval, DEC);
+IEEE80211_IF_FILE(dot11MeshHWMPconfirmationInterval,
+ u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval, DEC);
#endif
#define DEBUGFS_ADD_MODE(name, mode) \
@@ -607,9 +613,13 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
MESHPARAMS_ADD(min_discovery_timeout);
MESHPARAMS_ADD(dot11MeshHWMPRootMode);
MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
+ MESHPARAMS_ADD(dot11MeshForwarding);
MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
MESHPARAMS_ADD(rssi_threshold);
MESHPARAMS_ADD(ht_opmode);
+ MESHPARAMS_ADD(dot11MeshHWMPactivePathToRootTimeout);
+ MESHPARAMS_ADD(dot11MeshHWMProotInterval);
+ MESHPARAMS_ADD(dot11MeshHWMPconfirmationInterval);
#undef MESHPARAMS_ADD
}
#endif
@@ -685,6 +695,7 @@ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
sprintf(buf, "netdev:%s", sdata->name);
if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf))
- printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs "
- "dir to %s\n", buf);
+ sdata_err(sdata,
+ "debugfs: failed to rename debugfs dir to %s\n",
+ buf);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 6d33a0c743a..df920319910 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -3,7 +3,7 @@
#include <net/mac80211.h>
#include "ieee80211_i.h"
-#include "driver-trace.h"
+#include "trace.h"
static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
{
@@ -27,14 +27,6 @@ static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
local->ops->tx(&local->hw, skb);
}
-static inline void drv_tx_frags(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct sk_buff_head *skbs)
-{
- local->ops->tx_frags(&local->hw, vif, sta, skbs);
-}
-
static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
u32 sset, u8 *data)
{
@@ -845,4 +837,33 @@ drv_allow_buffered_frames(struct ieee80211_local *local,
more_data);
trace_drv_return_void(local);
}
+
+static inline int drv_get_rssi(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta,
+ s8 *rssi_dbm)
+{
+ int ret;
+
+ might_sleep();
+
+ ret = local->ops->get_rssi(&local->hw, &sdata->vif, sta, rssi_dbm);
+ trace_drv_get_rssi(local, sta, *rssi_dbm, ret);
+
+ return ret;
+}
+
+static inline void drv_mgd_prepare_tx(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ might_sleep();
+
+ check_sdata_in_driver(sdata);
+ WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
+
+ trace_drv_mgd_prepare_tx(local, sdata);
+ if (local->ops->mgd_prepare_tx)
+ local->ops->mgd_prepare_tx(&local->hw, &sdata->vif);
+ trace_drv_return_void(local);
+}
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.c b/net/mac80211/driver-trace.c
deleted file mode 100644
index 8ed8711b1a6..00000000000
--- a/net/mac80211/driver-trace.c
+++ /dev/null
@@ -1,9 +0,0 @@
-/* bug in tracepoint.h, it should include this */
-#include <linux/module.h>
-
-/* sparse isn't too happy with all macros... */
-#ifndef __CHECKER__
-#include "driver-ops.h"
-#define CREATE_TRACE_POINTS
-#include "driver-trace.h"
-#endif
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 6f8615c54b2..4b4538d6392 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -305,12 +305,10 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
-#ifdef CONFIG_MAC80211_HT_DEBUG
- net_dbg_ratelimited("delba from %pM (%s) tid %d reason code %d\n",
- mgmt->sa, initiator ? "initiator" : "recipient",
- tid,
- le16_to_cpu(mgmt->u.action.u.delba.reason_code));
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+ ht_dbg_ratelimited(sdata, "delba from %pM (%s) tid %d reason code %d\n",
+ mgmt->sa, initiator ? "initiator" : "recipient",
+ tid,
+ le16_to_cpu(mgmt->u.action.u.delba.reason_code));
if (initiator == WLAN_BACK_INITIATOR)
__ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 33d9d0c3e3d..5746d62faba 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -82,8 +82,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
local->oper_channel = chan;
channel_type = ifibss->channel_type;
- if (channel_type > NL80211_CHAN_HT20 &&
- !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
+ if (!cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
channel_type = NL80211_CHAN_HT20;
if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
/* can only fail due to HT40+/- mismatch */
@@ -262,11 +261,7 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
memcpy(addr, sta->sta.addr, ETH_ALEN);
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(sdata->local->hw.wiphy,
- "Adding new IBSS station %pM (dev=%s)\n",
- addr, sdata->name);
-#endif
+ ibss_dbg(sdata, "Adding new IBSS station %pM\n", addr);
sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
@@ -280,12 +275,10 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
/* If it fails, maybe we raced another insertion? */
if (sta_info_insert_rcu(sta))
return sta_info_get(sdata, addr);
- if (auth) {
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "TX Auth SA=%pM DA=%pM BSSID=%pM"
- "(auth_transaction=1)\n", sdata->vif.addr,
- sdata->u.ibss.bssid, addr);
-#endif
+ if (auth && !sdata->u.ibss.auth_frame_registrations) {
+ ibss_dbg(sdata,
+ "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n",
+ sdata->vif.addr, sdata->u.ibss.bssid, addr);
ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0,
addr, sdata->u.ibss.bssid, NULL, 0, 0);
}
@@ -308,7 +301,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
* allow new one to be added.
*/
if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
- net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
+ net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
sdata->name, addr);
rcu_read_lock();
return NULL;
@@ -355,11 +348,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
return;
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "%s: RX Auth SA=%pM DA=%pM BSSID=%pM."
- "(auth_transaction=%d)\n",
- sdata->name, mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
-#endif
+ ibss_dbg(sdata,
+ "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
+ mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
sta_info_destroy_addr(sdata, mgmt->sa);
ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
rcu_read_unlock();
@@ -422,15 +413,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
ieee80211_mandatory_rates(local, band);
if (sta->sta.supp_rates[band] != prev_rates) {
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG
- "%s: updated supp_rates set "
- "for %pM based on beacon"
- "/probe_resp (0x%x -> 0x%x)\n",
- sdata->name, sta->sta.addr,
- prev_rates,
- sta->sta.supp_rates[band]);
-#endif
+ ibss_dbg(sdata,
+ "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n",
+ sta->sta.addr, prev_rates,
+ sta->sta.supp_rates[band]);
rates_updated = true;
}
} else {
@@ -545,22 +531,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
rx_timestamp = drv_get_tsf(local, sdata);
}
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "RX beacon SA=%pM BSSID="
- "%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
- mgmt->sa, mgmt->bssid,
- (unsigned long long)rx_timestamp,
- (unsigned long long)beacon_timestamp,
- (unsigned long long)(rx_timestamp - beacon_timestamp),
- jiffies);
-#endif
+ ibss_dbg(sdata,
+ "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
+ mgmt->sa, mgmt->bssid,
+ (unsigned long long)rx_timestamp,
+ (unsigned long long)beacon_timestamp,
+ (unsigned long long)(rx_timestamp - beacon_timestamp),
+ jiffies);
if (beacon_timestamp > rx_timestamp) {
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "%s: beacon TSF higher than "
- "local TSF - IBSS merge with BSSID %pM\n",
- sdata->name, mgmt->bssid);
-#endif
+ ibss_dbg(sdata,
+ "beacon TSF higher than local TSF - IBSS merge with BSSID %pM\n",
+ mgmt->bssid);
ieee80211_sta_join_ibss(sdata, bss);
supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL);
ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
@@ -586,7 +568,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
* allow new one to be added.
*/
if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
- net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
+ net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
sdata->name, addr);
return;
}
@@ -662,8 +644,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
if (ifibss->fixed_channel)
return;
- printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
- "IBSS networks with same SSID (merge)\n", sdata->name);
+ sdata_info(sdata,
+ "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
ieee80211_request_internal_scan(sdata,
ifibss->ssid, ifibss->ssid_len, NULL);
@@ -691,8 +673,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
bssid[0] |= 0x02;
}
- printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n",
- sdata->name, bssid);
+ sdata_info(sdata, "Creating new IBSS network, BSSID %pM\n", bssid);
capability = WLAN_CAPABILITY_IBSS;
@@ -723,10 +704,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
lockdep_assert_held(&ifibss->mtx);
active_ibss = ieee80211_sta_active_ibss(sdata);
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
- sdata->name, active_ibss);
-#endif /* CONFIG_MAC80211_IBSS_DEBUG */
+ ibss_dbg(sdata, "sta_find_ibss (active_ibss=%d)\n", active_ibss);
if (active_ibss)
return;
@@ -749,29 +727,24 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
struct ieee80211_bss *bss;
bss = (void *)cbss->priv;
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG " sta_find_ibss: selected %pM current "
- "%pM\n", cbss->bssid, ifibss->bssid);
-#endif /* CONFIG_MAC80211_IBSS_DEBUG */
-
- printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
- " based on configured SSID\n",
- sdata->name, cbss->bssid);
+ ibss_dbg(sdata,
+ "sta_find_ibss: selected %pM current %pM\n",
+ cbss->bssid, ifibss->bssid);
+ sdata_info(sdata,
+ "Selected IBSS BSSID %pM based on configured SSID\n",
+ cbss->bssid);
ieee80211_sta_join_ibss(sdata, bss);
ieee80211_rx_bss_put(local, bss);
return;
}
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG " did not try to join ibss\n");
-#endif /* CONFIG_MAC80211_IBSS_DEBUG */
+ ibss_dbg(sdata, "sta_find_ibss: did not try to join ibss\n");
/* Selected IBSS not found in current scan results - try to scan */
if (time_after(jiffies, ifibss->last_scan_completed +
IEEE80211_SCAN_INTERVAL)) {
- printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
- "join\n", sdata->name);
+ sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
ieee80211_request_internal_scan(sdata,
ifibss->ssid, ifibss->ssid_len,
@@ -785,9 +758,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
ieee80211_sta_create_ibss(sdata);
return;
}
- printk(KERN_DEBUG "%s: IBSS not allowed on"
- " %d MHz\n", sdata->name,
- local->hw.conf.channel->center_freq);
+ sdata_info(sdata, "IBSS not allowed on %d MHz\n",
+ local->hw.conf.channel->center_freq);
/* No IBSS found - decrease scan interval and continue
* scanning. */
@@ -822,12 +794,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
tx_last_beacon = drv_tx_last_beacon(local);
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM"
- " (tx_last_beacon=%d)\n",
- sdata->name, mgmt->sa, mgmt->da,
- mgmt->bssid, tx_last_beacon);
-#endif /* CONFIG_MAC80211_IBSS_DEBUG */
+ ibss_dbg(sdata,
+ "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
+ mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
return;
@@ -840,11 +809,8 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
pos = mgmt->u.probe_req.variable;
if (pos[0] != WLAN_EID_SSID ||
pos + 2 + pos[1] > end) {
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq "
- "from %pM\n",
- sdata->name, mgmt->sa);
-#endif
+ ibss_dbg(sdata, "Invalid SSID IE in ProbeReq from %pM\n",
+ mgmt->sa);
return;
}
if (pos[1] != 0 &&
@@ -861,10 +827,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
resp = (struct ieee80211_mgmt *) skb->data;
memcpy(resp->da, mgmt->sa, ETH_ALEN);
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n",
- sdata->name, resp->da);
-#endif /* CONFIG_MAC80211_IBSS_DEBUG */
+ ibss_dbg(sdata, "Sending ProbeResp to %pM\n", resp->da);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 3f3cd50fff1..bb61f7718c4 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -30,6 +30,7 @@
#include <net/mac80211.h>
#include "key.h"
#include "sta_info.h"
+#include "debug.h"
struct ieee80211_local;
@@ -55,11 +56,14 @@ struct ieee80211_local;
#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
+/*
+ * Some APs experience problems when working with U-APSD. Decrease the
+ * probability of that happening by using legacy mode for all ACs but VO.
+ * The AP that caused us trouble was a Cisco 4410N. It ignores our
+ * setting, and always treats non-VO ACs as legacy.
+ */
#define IEEE80211_DEFAULT_UAPSD_QUEUES \
- (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \
- IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \
- IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \
- IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
#define IEEE80211_DEFAULT_MAX_SP_LEN \
IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -81,6 +85,8 @@ struct ieee80211_bss {
size_t ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u32 device_ts;
+
u8 dtim_period;
bool wmm_used;
@@ -203,7 +209,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
* enum ieee80211_packet_rx_flags - packet RX flags
* @IEEE80211_RX_RA_MATCH: frame is destined to interface currently processed
* (incl. multicast frames)
- * @IEEE80211_RX_IN_SCAN: received while scanning
* @IEEE80211_RX_FRAGMENTED: fragmented frame
* @IEEE80211_RX_AMSDU: a-MSDU packet
* @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
@@ -213,7 +218,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
* @rx_flags field of &struct ieee80211_rx_status.
*/
enum ieee80211_packet_rx_flags {
- IEEE80211_RX_IN_SCAN = BIT(0),
IEEE80211_RX_RA_MATCH = BIT(1),
IEEE80211_RX_FRAGMENTED = BIT(2),
IEEE80211_RX_AMSDU = BIT(3),
@@ -317,55 +321,30 @@ struct mesh_preq_queue {
u8 flags;
};
-enum ieee80211_work_type {
- IEEE80211_WORK_ABORT,
- IEEE80211_WORK_REMAIN_ON_CHANNEL,
- IEEE80211_WORK_OFFCHANNEL_TX,
-};
-
-/**
- * enum work_done_result - indicates what to do after work was done
- *
- * @WORK_DONE_DESTROY: This work item is no longer needed, destroy.
- * @WORK_DONE_REQUEUE: This work item was reset to be reused, and
- * should be requeued.
- */
-enum work_done_result {
- WORK_DONE_DESTROY,
- WORK_DONE_REQUEUE,
-};
+#if HZ/100 == 0
+#define IEEE80211_ROC_MIN_LEFT 1
+#else
+#define IEEE80211_ROC_MIN_LEFT (HZ/100)
+#endif
-struct ieee80211_work {
+struct ieee80211_roc_work {
struct list_head list;
+ struct list_head dependents;
- struct rcu_head rcu_head;
+ struct delayed_work work;
struct ieee80211_sub_if_data *sdata;
- enum work_done_result (*done)(struct ieee80211_work *wk,
- struct sk_buff *skb);
-
struct ieee80211_channel *chan;
enum nl80211_channel_type chan_type;
- unsigned long timeout;
- enum ieee80211_work_type type;
+ bool started, abort, hw_begun, notified;
- bool started;
+ unsigned long hw_start_time;
- union {
- struct {
- u32 duration;
- } remain;
- struct {
- struct sk_buff *frame;
- u32 wait;
- bool status;
- } offchan_tx;
- };
-
- size_t data_len;
- u8 data[];
+ u32 duration, req_duration;
+ struct sk_buff *frame;
+ u64 mgmt_tx_cookie;
};
/* flags used in struct ieee80211_if_managed.flags */
@@ -399,7 +378,6 @@ struct ieee80211_mgd_auth_data {
struct ieee80211_mgd_assoc_data {
struct cfg80211_bss *bss;
const u8 *supp_rates;
- const u8 *ht_operation_ie;
unsigned long timeout;
int tries;
@@ -414,6 +392,8 @@ struct ieee80211_mgd_assoc_data {
bool sent_assoc;
bool synced;
+ u8 ap_ht_param;
+
size_t ie_len;
u8 ie[];
};
@@ -532,6 +512,7 @@ struct ieee80211_if_ibss {
bool privacy;
bool control_port;
+ unsigned int auth_frame_registrations;
u8 bssid[ETH_ALEN] __aligned(2);
u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -701,6 +682,9 @@ struct ieee80211_sub_if_data {
/* TID bitmap for NoAck policy */
u16 noack_map;
+ /* bit field of ACM bits (BIT(802.1D tag)) */
+ u8 wmm_acm;
+
struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
struct ieee80211_key __rcu *default_unicast_key;
struct ieee80211_key __rcu *default_multicast_key;
@@ -847,13 +831,6 @@ struct ieee80211_local {
const struct ieee80211_ops *ops;
/*
- * work stuff, potentially off-channel (in the future)
- */
- struct list_head work_list;
- struct timer_list work_timer;
- struct work_struct work_work;
-
- /*
* private workqueue to mac80211. mac80211 makes this accessible
* via ieee80211_queue_work()
*/
@@ -912,6 +889,9 @@ struct ieee80211_local {
/* device is started */
bool started;
+ /* device is during a HW reconfig */
+ bool in_reconfig;
+
/* wowlan is enabled -- don't reconfig on resume */
bool wowlan;
@@ -985,14 +965,14 @@ struct ieee80211_local {
int scan_channel_idx;
int scan_ies_len;
- bool sched_scanning;
struct ieee80211_sched_scan_ies sched_scan_ies;
struct work_struct sched_scan_stopped_work;
+ struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
unsigned long leave_oper_channel_time;
enum mac80211_scan_state next_scan_state;
struct delayed_work scan_work;
- struct ieee80211_sub_if_data *scan_sdata;
+ struct ieee80211_sub_if_data __rcu *scan_sdata;
enum nl80211_channel_type _oper_channel_type;
struct ieee80211_channel *oper_channel, *csa_channel;
@@ -1034,7 +1014,6 @@ struct ieee80211_local {
unsigned int rx_handlers_drop_nullfunc;
unsigned int rx_handlers_drop_defrag;
unsigned int rx_handlers_drop_short;
- unsigned int rx_handlers_drop_passive_scan;
unsigned int tx_expand_skb_head;
unsigned int tx_expand_skb_head_cloned;
unsigned int rx_expand_skb_head;
@@ -1050,7 +1029,6 @@ struct ieee80211_local {
int total_ps_buffered; /* total number of all buffered unicast and
* multicast packets for power saving stations
*/
- unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
bool pspolling;
bool offchannel_ps_enabled;
@@ -1087,14 +1065,12 @@ struct ieee80211_local {
} debugfs;
#endif
- struct ieee80211_channel *hw_roc_channel;
- struct net_device *hw_roc_dev;
- struct sk_buff *hw_roc_skb, *hw_roc_skb_for_status;
+ /*
+ * Remain-on-channel support
+ */
+ struct list_head roc_list;
struct work_struct hw_roc_start, hw_roc_done;
- enum nl80211_channel_type hw_roc_channel_type;
- unsigned int hw_roc_duration;
- u32 hw_roc_cookie;
- bool hw_roc_for_tx;
+ unsigned long hw_roc_start_time;
struct idr ack_status_frames;
spinlock_t ack_status_lock;
@@ -1114,6 +1090,12 @@ IEEE80211_DEV_TO_SUB_IF(struct net_device *dev)
return netdev_priv(dev);
}
+static inline struct ieee80211_sub_if_data *
+IEEE80211_WDEV_TO_SUB_IF(struct wireless_dev *wdev)
+{
+ return container_of(wdev, struct ieee80211_sub_if_data, wdev);
+}
+
/* this struct represents 802.11n's RA/TID combination */
struct ieee80211_ra_tid {
u8 ra[ETH_ALEN];
@@ -1264,8 +1246,7 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req);
void ieee80211_scan_cancel(struct ieee80211_local *local);
void ieee80211_run_deferred_scan(struct ieee80211_local *local);
-ieee80211_rx_result
-ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
+void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb);
void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
struct ieee80211_bss *
@@ -1290,19 +1271,23 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
bool offchannel_ps_enable);
void ieee80211_offchannel_return(struct ieee80211_local *local,
bool offchannel_ps_disable);
-void ieee80211_hw_roc_setup(struct ieee80211_local *local);
+void ieee80211_roc_setup(struct ieee80211_local *local);
+void ieee80211_start_next_roc(struct ieee80211_local *local);
+void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
+void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc);
+void ieee80211_sw_roc_work(struct work_struct *work);
+void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
/* interface handling */
int ieee80211_iface_init(void);
void ieee80211_iface_exit(void);
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
- struct net_device **new_dev, enum nl80211_iftype type,
+ struct wireless_dev **new_wdev, enum nl80211_iftype type,
struct vif_params *params);
int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
enum nl80211_iftype type);
void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata);
void ieee80211_remove_interfaces(struct ieee80211_local *local);
-u32 __ieee80211_recalc_idle(struct ieee80211_local *local);
void ieee80211_recalc_idle(struct ieee80211_local *local);
void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
const int offset);
@@ -1499,18 +1484,12 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
struct ieee80211_channel *channel,
enum nl80211_channel_type channel_type,
u16 prot_mode);
-
-/* internal work items */
-void ieee80211_work_init(struct ieee80211_local *local);
-void ieee80211_add_work(struct ieee80211_work *wk);
-void free_work(struct ieee80211_work *wk);
-void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata);
-int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- unsigned int duration, u64 *cookie);
-int ieee80211_wk_cancel_remain_on_channel(
- struct ieee80211_sub_if_data *sdata, u64 cookie);
+u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
+ u32 cap);
+int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, bool need_basic);
+int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, bool need_basic);
/* channel management */
enum ieee80211_chan_mode {
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8664111d056..bfb57dcc153 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -43,6 +43,128 @@
*/
+static u32 ieee80211_idle_off(struct ieee80211_local *local,
+ const char *reason)
+{
+ if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
+ return 0;
+
+ local->hw.conf.flags &= ~IEEE80211_CONF_IDLE;
+ return IEEE80211_CONF_CHANGE_IDLE;
+}
+
+static u32 ieee80211_idle_on(struct ieee80211_local *local)
+{
+ if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
+ return 0;
+
+ drv_flush(local, false);
+
+ local->hw.conf.flags |= IEEE80211_CONF_IDLE;
+ return IEEE80211_CONF_CHANGE_IDLE;
+}
+
+static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
+{
+ struct ieee80211_sub_if_data *sdata;
+ int count = 0;
+ bool working = false, scanning = false;
+ unsigned int led_trig_start = 0, led_trig_stop = 0;
+ struct ieee80211_roc_work *roc;
+
+#ifdef CONFIG_PROVE_LOCKING
+ WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
+ !lockdep_is_held(&local->iflist_mtx));
+#endif
+ lockdep_assert_held(&local->mtx);
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata)) {
+ sdata->vif.bss_conf.idle = true;
+ continue;
+ }
+
+ sdata->old_idle = sdata->vif.bss_conf.idle;
+
+ /* do not count disabled managed interfaces */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !sdata->u.mgd.associated &&
+ !sdata->u.mgd.auth_data &&
+ !sdata->u.mgd.assoc_data) {
+ sdata->vif.bss_conf.idle = true;
+ continue;
+ }
+ /* do not count unused IBSS interfaces */
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
+ !sdata->u.ibss.ssid_len) {
+ sdata->vif.bss_conf.idle = true;
+ continue;
+ }
+ /* count everything else */
+ sdata->vif.bss_conf.idle = false;
+ count++;
+ }
+
+ if (!local->ops->remain_on_channel) {
+ list_for_each_entry(roc, &local->roc_list, list) {
+ working = true;
+ roc->sdata->vif.bss_conf.idle = false;
+ }
+ }
+
+ sdata = rcu_dereference_protected(local->scan_sdata,
+ lockdep_is_held(&local->mtx));
+ if (sdata && !(local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)) {
+ scanning = true;
+ sdata->vif.bss_conf.idle = false;
+ }
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ continue;
+ if (sdata->old_idle == sdata->vif.bss_conf.idle)
+ continue;
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
+ }
+
+ if (working || scanning)
+ led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK;
+ else
+ led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK;
+
+ if (count)
+ led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
+ else
+ led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
+
+ ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
+
+ if (working)
+ return ieee80211_idle_off(local, "working");
+ if (scanning)
+ return ieee80211_idle_off(local, "scanning");
+ if (!count)
+ return ieee80211_idle_on(local);
+ else
+ return ieee80211_idle_off(local, "in use");
+
+ return 0;
+}
+
+void ieee80211_recalc_idle(struct ieee80211_local *local)
+{
+ u32 chg;
+
+ mutex_lock(&local->iflist_mtx);
+ chg = __ieee80211_recalc_idle(local);
+ mutex_unlock(&local->iflist_mtx);
+ if (chg)
+ ieee80211_hw_config(local, chg);
+}
+
static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
{
int meshhdrlen;
@@ -57,9 +179,6 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
dev->mtu = new_mtu;
return 0;
}
@@ -100,15 +219,12 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *nsdata;
- struct net_device *dev = sdata->dev;
ASSERT_RTNL();
/* we hold the RTNL here so can safely walk the list */
list_for_each_entry(nsdata, &local->interfaces, list) {
- struct net_device *ndev = nsdata->dev;
-
- if (ndev != dev && ieee80211_sdata_running(nsdata)) {
+ if (nsdata != sdata && ieee80211_sdata_running(nsdata)) {
/*
* Allow only a single IBSS interface to be up at any
* time. This is restricted because beacon distribution
@@ -127,7 +243,8 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
* The remaining checks are only performed for interfaces
* with the same MAC address.
*/
- if (!ether_addr_equal(dev->dev_addr, ndev->dev_addr))
+ if (!ether_addr_equal(sdata->vif.addr,
+ nsdata->vif.addr))
continue;
/*
@@ -217,17 +334,21 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
- int ret;
+ int ret = 0;
if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
return 0;
+ mutex_lock(&local->iflist_mtx);
+
if (local->monitor_sdata)
- return 0;
+ goto out_unlock;
sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
- if (!sdata)
- return -ENOMEM;
+ if (!sdata) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
/* set up data */
sdata->local = local;
@@ -241,18 +362,19 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
if (WARN_ON(ret)) {
/* ok .. stupid driver, it asked for this! */
kfree(sdata);
- return ret;
+ goto out_unlock;
}
ret = ieee80211_check_queues(sdata);
if (ret) {
kfree(sdata);
- return ret;
+ goto out_unlock;
}
rcu_assign_pointer(local->monitor_sdata, sdata);
-
- return 0;
+ out_unlock:
+ mutex_unlock(&local->iflist_mtx);
+ return ret;
}
static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
@@ -262,10 +384,12 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
return;
- sdata = rtnl_dereference(local->monitor_sdata);
+ mutex_lock(&local->iflist_mtx);
+ sdata = rcu_dereference_protected(local->monitor_sdata,
+ lockdep_is_held(&local->iflist_mtx));
if (!sdata)
- return;
+ goto out_unlock;
rcu_assign_pointer(local->monitor_sdata, NULL);
synchronize_net();
@@ -273,6 +397,8 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
drv_remove_interface(local, sdata);
kfree(sdata);
+ out_unlock:
+ mutex_unlock(&local->iflist_mtx);
}
/*
@@ -520,7 +646,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
clear_bit(SDATA_STATE_RUNNING, &sdata->state);
- if (local->scan_sdata == sdata)
+ if (rcu_access_pointer(local->scan_sdata) == sdata)
ieee80211_scan_cancel(local);
/*
@@ -528,10 +654,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
*/
netif_tx_stop_all_queues(sdata->dev);
- /*
- * Purge work for this interface.
- */
- ieee80211_work_purge(sdata);
+ ieee80211_roc_purge(sdata);
/*
* Remove all stations associated with this interface.
@@ -637,18 +760,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
ieee80211_configure_filter(local);
break;
default:
- mutex_lock(&local->mtx);
- if (local->hw_roc_dev == sdata->dev &&
- local->hw_roc_channel) {
- /* ignore return value since this is racy */
- drv_cancel_remain_on_channel(local);
- ieee80211_queue_work(&local->hw, &local->hw_roc_done);
- }
- mutex_unlock(&local->mtx);
-
- flush_work(&local->hw_roc_start);
- flush_work(&local->hw_roc_done);
-
flush_work(&sdata->work);
/*
* When we get here, the interface is marked down.
@@ -823,7 +934,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
- return ieee80211_select_queue_80211(local, skb, hdr);
+ return ieee80211_select_queue_80211(sdata, skb, hdr);
}
static const struct net_device_ops ieee80211_monitorif_ops = {
@@ -1238,7 +1349,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
if (__ffs64(mask) + hweight64(mask) != fls64(mask)) {
/* not a contiguous mask ... not handled now! */
- printk(KERN_DEBUG "not contiguous\n");
+ pr_info("not contiguous\n");
break;
}
@@ -1284,7 +1395,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
}
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
- struct net_device **new_dev, enum nl80211_iftype type,
+ struct wireless_dev **new_wdev, enum nl80211_iftype type,
struct vif_params *params)
{
struct net_device *ndev;
@@ -1364,6 +1475,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
sdata->u.mgd.use_4addr = params->use_4addr;
}
+ ndev->features |= local->hw.netdev_features;
+
ret = register_netdevice(ndev);
if (ret)
goto fail;
@@ -1372,8 +1485,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
list_add_tail_rcu(&sdata->list, &local->interfaces);
mutex_unlock(&local->iflist_mtx);
- if (new_dev)
- *new_dev = ndev;
+ if (new_wdev)
+ *new_wdev = &sdata->wdev;
return 0;
@@ -1421,138 +1534,6 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
list_del(&unreg_list);
}
-static u32 ieee80211_idle_off(struct ieee80211_local *local,
- const char *reason)
-{
- if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
- return 0;
-
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "device no longer idle - %s\n", reason);
-#endif
-
- local->hw.conf.flags &= ~IEEE80211_CONF_IDLE;
- return IEEE80211_CONF_CHANGE_IDLE;
-}
-
-static u32 ieee80211_idle_on(struct ieee80211_local *local)
-{
- if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
- return 0;
-
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "device now idle\n");
-#endif
-
- drv_flush(local, false);
-
- local->hw.conf.flags |= IEEE80211_CONF_IDLE;
- return IEEE80211_CONF_CHANGE_IDLE;
-}
-
-u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
-{
- struct ieee80211_sub_if_data *sdata;
- int count = 0;
- bool working = false, scanning = false, hw_roc = false;
- struct ieee80211_work *wk;
- unsigned int led_trig_start = 0, led_trig_stop = 0;
-
-#ifdef CONFIG_PROVE_LOCKING
- WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
- !lockdep_is_held(&local->iflist_mtx));
-#endif
- lockdep_assert_held(&local->mtx);
-
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (!ieee80211_sdata_running(sdata)) {
- sdata->vif.bss_conf.idle = true;
- continue;
- }
-
- sdata->old_idle = sdata->vif.bss_conf.idle;
-
- /* do not count disabled managed interfaces */
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- !sdata->u.mgd.associated &&
- !sdata->u.mgd.auth_data &&
- !sdata->u.mgd.assoc_data) {
- sdata->vif.bss_conf.idle = true;
- continue;
- }
- /* do not count unused IBSS interfaces */
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
- !sdata->u.ibss.ssid_len) {
- sdata->vif.bss_conf.idle = true;
- continue;
- }
- /* count everything else */
- sdata->vif.bss_conf.idle = false;
- count++;
- }
-
- list_for_each_entry(wk, &local->work_list, list) {
- working = true;
- wk->sdata->vif.bss_conf.idle = false;
- }
-
- if (local->scan_sdata &&
- !(local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)) {
- scanning = true;
- local->scan_sdata->vif.bss_conf.idle = false;
- }
-
- if (local->hw_roc_channel)
- hw_roc = true;
-
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- continue;
- if (sdata->old_idle == sdata->vif.bss_conf.idle)
- continue;
- if (!ieee80211_sdata_running(sdata))
- continue;
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
- }
-
- if (working || scanning || hw_roc)
- led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK;
- else
- led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK;
-
- if (count)
- led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
- else
- led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
-
- ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
-
- if (hw_roc)
- return ieee80211_idle_off(local, "hw remain-on-channel");
- if (working)
- return ieee80211_idle_off(local, "working");
- if (scanning)
- return ieee80211_idle_off(local, "scanning");
- if (!count)
- return ieee80211_idle_on(local);
- else
- return ieee80211_idle_off(local, "in use");
-
- return 0;
-}
-
-void ieee80211_recalc_idle(struct ieee80211_local *local)
-{
- u32 chg;
-
- mutex_lock(&local->iflist_mtx);
- chg = __ieee80211_recalc_idle(local);
- mutex_unlock(&local->iflist_mtx);
- if (chg)
- ieee80211_hw_config(local, chg);
-}
-
static int netdev_notify(struct notifier_block *nb,
unsigned long state,
void *ndev)
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 5bb600d93d7..7ae678ba5d6 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -139,7 +139,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
}
if (ret != -ENOSPC && ret != -EOPNOTSUPP)
- wiphy_err(key->local->hw.wiphy,
+ sdata_err(sdata,
"failed to set key (%d, %pM) to hardware (%d)\n",
key->conf.keyidx,
sta ? sta->sta.addr : bcast_addr, ret);
@@ -186,7 +186,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
sta ? &sta->sta : NULL, &key->conf);
if (ret)
- wiphy_err(key->local->hw.wiphy,
+ sdata_err(sdata,
"failed to remove key (%d, %pM) from hardware (%d)\n",
key->conf.keyidx,
sta ? sta->sta.addr : bcast_addr, ret);
@@ -194,26 +194,6 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
}
-void ieee80211_key_removed(struct ieee80211_key_conf *key_conf)
-{
- struct ieee80211_key *key;
-
- key = container_of(key_conf, struct ieee80211_key, conf);
-
- might_sleep();
- assert_key_lock(key->local);
-
- key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
-
- /*
- * Flush TX path to avoid attempts to use this key
- * after this function returns. Until then, drivers
- * must be prepared to handle the key.
- */
- synchronize_rcu();
-}
-EXPORT_SYMBOL_GPL(ieee80211_key_removed);
-
static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
int idx, bool uni, bool multi)
{
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index 1bf7903496f..bcffa690312 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -276,7 +276,7 @@ static void ieee80211_stop_tpt_led_trig(struct ieee80211_local *local)
read_lock(&tpt_trig->trig.leddev_list_lock);
list_for_each_entry(led_cdev, &tpt_trig->trig.led_cdevs, trig_list)
- led_brightness_set(led_cdev, LED_OFF);
+ led_set_brightness(led_cdev, LED_OFF);
read_unlock(&tpt_trig->trig.leddev_list_lock);
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index f5548e95325..c26e231c733 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -322,7 +322,8 @@ static void ieee80211_restart_work(struct work_struct *work)
mutex_lock(&local->mtx);
WARN(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
- local->sched_scanning,
+ rcu_dereference_protected(local->sched_scan_sdata,
+ lockdep_is_held(&local->mtx)),
"%s called with hardware scan in progress\n", __func__);
mutex_unlock(&local->mtx);
@@ -345,6 +346,13 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
ieee80211_stop_queues_by_reason(hw,
IEEE80211_QUEUE_STOP_REASON_SUSPEND);
+ /*
+ * Stop all Rx during the reconfig. We don't want state changes
+ * or driver callbacks while this is in progress.
+ */
+ local->in_reconfig = true;
+ barrier();
+
schedule_work(&local->restart_work);
}
EXPORT_SYMBOL(ieee80211_restart_hw);
@@ -455,7 +463,9 @@ static const struct ieee80211_txrx_stypes
ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_ADHOC] = {
.tx = 0xffff,
- .rx = BIT(IEEE80211_STYPE_ACTION >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4),
},
[NL80211_IFTYPE_STATION] = {
.tx = 0xffff,
@@ -578,7 +588,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
- BUG_ON(!ops->tx && !ops->tx_frags);
+ BUG_ON(!ops->tx);
BUG_ON(!ops->start);
BUG_ON(!ops->stop);
BUG_ON(!ops->config);
@@ -625,8 +635,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
- ieee80211_work_init(local);
-
INIT_WORK(&local->restart_work, ieee80211_restart_work);
INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
@@ -669,7 +677,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
ieee80211_led_names(local);
- ieee80211_hw_roc_setup(local);
+ ieee80211_roc_setup(local);
return &local->hw;
}
@@ -681,7 +689,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
int result, i;
enum ieee80211_band band;
int channels, max_bitrates;
- bool supp_ht;
+ bool supp_ht, supp_vht;
+ netdev_features_t feature_whitelist;
static const u32 cipher_suites[] = {
/* keep WEP first, it may be removed below */
WLAN_CIPHER_SUITE_WEP40,
@@ -698,16 +707,21 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->hw.offchannel_tx_hw_queue >= local->hw.queues))
return -EINVAL;
- if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns)
#ifdef CONFIG_PM
- && (!local->ops->suspend || !local->ops->resume)
-#endif
- )
+ if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns) &&
+ (!local->ops->suspend || !local->ops->resume))
return -EINVAL;
+#endif
if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan)
return -EINVAL;
+ /* Only HW csum features are currently compatible with mac80211 */
+ feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_CSUM;
+ if (WARN_ON(hw->netdev_features & ~feature_whitelist))
+ return -EINVAL;
+
if (hw->max_report_rates == 0)
hw->max_report_rates = hw->max_rates;
@@ -719,6 +733,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
channels = 0;
max_bitrates = 0;
supp_ht = false;
+ supp_vht = false;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
struct ieee80211_supported_band *sband;
@@ -736,6 +751,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (max_bitrates < sband->n_bitrates)
max_bitrates = sband->n_bitrates;
supp_ht = supp_ht || sband->ht_cap.ht_supported;
+ supp_vht = supp_vht || sband->vht_cap.vht_supported;
}
local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
@@ -811,6 +827,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (supp_ht)
local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap);
+ if (supp_vht)
+ local->scan_ies_len +=
+ 2 + sizeof(struct ieee80211_vht_capabilities);
+
if (!local->ops->hw_scan) {
/* For hw_scan, driver needs to set these up. */
local->hw.wiphy->max_scan_ssids = 4;
@@ -1009,12 +1029,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
rtnl_unlock();
- /*
- * Now all work items will be gone, but the
- * timer might still be armed, so delete it
- */
- del_timer_sync(&local->work_timer);
-
cancel_work_sync(&local->restart_work);
cancel_work_sync(&local->reconfig_filter);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 2913113c583..6fac18c0423 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -133,7 +133,7 @@ bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
}
/**
- * mesh_accept_plinks_update: update accepting_plink in local mesh beacons
+ * mesh_accept_plinks_update - update accepting_plink in local mesh beacons
*
* @sdata: mesh interface in which mesh beacons are going to be updated
*/
@@ -443,7 +443,7 @@ static void ieee80211_mesh_path_root_timer(unsigned long data)
void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
{
- if (ifmsh->mshcfg.dot11MeshHWMPRootMode)
+ if (ifmsh->mshcfg.dot11MeshHWMPRootMode > IEEE80211_ROOTMODE_ROOT)
set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
else {
clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
@@ -523,11 +523,6 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
{
bool free_plinks;
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: running mesh housekeeping\n",
- sdata->name);
-#endif
-
ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
mesh_path_expire(sdata);
@@ -542,11 +537,17 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u32 interval;
mesh_path_tx_root_frame(sdata);
+
+ if (ifmsh->mshcfg.dot11MeshHWMPRootMode == IEEE80211_PROACTIVE_RANN)
+ interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
+ else
+ interval = ifmsh->mshcfg.dot11MeshHWMProotInterval;
+
mod_timer(&ifmsh->mesh_path_root_timer,
- round_jiffies(TU_TO_EXP_TIME(
- ifmsh->mshcfg.dot11MeshHWMPRannInterval)));
+ round_jiffies(TU_TO_EXP_TIME(interval)));
}
#ifdef CONFIG_PM
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index e3642756f8f..faaa39bcfd1 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -104,6 +104,7 @@ enum mesh_deferred_task_flags {
* an mpath to a hash bucket on a path table.
* @rann_snd_addr: the RANN sender address
* @rann_metric: the aggregated path metric towards the root node
+ * @last_preq_to_root: Timestamp of last PREQ sent to root
* @is_root: the destination station of this path is a root node
* @is_gate: the destination station of this path is a mesh gate
*
@@ -131,6 +132,7 @@ struct mesh_path {
spinlock_t state_lock;
u8 rann_snd_addr[ETH_ALEN];
u32 rann_metric;
+ unsigned long last_preq_to_root;
bool is_root;
bool is_gate;
};
@@ -245,7 +247,7 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
void ieee80211s_init(void);
void ieee80211s_update_metric(struct ieee80211_local *local,
- struct sta_info *stainfo, struct sk_buff *skb);
+ struct sta_info *sta, struct sk_buff *skb);
void ieee80211s_stop(void);
void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 9b59658e865..494bc39f61a 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -13,13 +13,6 @@
#include "wme.h"
#include "mesh.h"
-#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
-#define mhwmp_dbg(fmt, args...) \
- printk(KERN_DEBUG "Mesh HWMP (%s): " fmt "\n", sdata->name, ##args)
-#else
-#define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0)
-#endif
-
#define TEST_FRAME_LEN 8192
#define MAX_METRIC 0xffffffff
#define ARITH_SHIFT 8
@@ -98,6 +91,8 @@ static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
#define disc_timeout_jiff(s) \
msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
+#define root_path_confirmation_jiffies(s) \
+ msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval)
enum mpath_frame_type {
MPATH_PREQ = 0,
@@ -142,19 +137,19 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
switch (action) {
case MPATH_PREQ:
- mhwmp_dbg("sending PREQ to %pM", target);
+ mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
ie_len = 37;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREQ;
break;
case MPATH_PREP:
- mhwmp_dbg("sending PREP to %pM", target);
+ mhwmp_dbg(sdata, "sending PREP to %pM\n", target);
ie_len = 31;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREP;
break;
case MPATH_RANN:
- mhwmp_dbg("sending RANN from %pM", orig_addr);
+ mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
ie_len = sizeof(struct ieee80211_rann_ie);
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_RANN;
@@ -303,7 +298,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
}
void ieee80211s_update_metric(struct ieee80211_local *local,
- struct sta_info *stainfo, struct sk_buff *skb)
+ struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -315,15 +310,14 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
/* moving average, scaled to 100 */
- stainfo->fail_avg = ((80 * stainfo->fail_avg + 5) / 100 + 20 * failed);
- if (stainfo->fail_avg > 95)
- mesh_plink_broken(stainfo);
+ sta->fail_avg = ((80 * sta->fail_avg + 5) / 100 + 20 * failed);
+ if (sta->fail_avg > 95)
+ mesh_plink_broken(sta);
}
static u32 airtime_link_metric_get(struct ieee80211_local *local,
struct sta_info *sta)
{
- struct ieee80211_supported_band *sband;
struct rate_info rinfo;
/* This should be adjusted for each device */
int device_constant = 1 << ARITH_SHIFT;
@@ -333,8 +327,6 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
u32 tx_time, estimated_retx;
u64 result;
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-
if (sta->fail_avg >= 100)
return MAX_METRIC;
@@ -519,10 +511,11 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
struct mesh_path *mpath = NULL;
u8 *target_addr, *orig_addr;
const u8 *da;
- u8 target_flags, ttl;
- u32 orig_sn, target_sn, lifetime;
+ u8 target_flags, ttl, flags;
+ u32 orig_sn, target_sn, lifetime, orig_metric;
bool reply = false;
bool forward = true;
+ bool root_is_gate;
/* Update target SN, if present */
target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
@@ -530,11 +523,15 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
target_sn = PREQ_IE_TARGET_SN(preq_elem);
orig_sn = PREQ_IE_ORIG_SN(preq_elem);
target_flags = PREQ_IE_TARGET_F(preq_elem);
+ orig_metric = metric;
+ /* Proactive PREQ gate announcements */
+ flags = PREQ_IE_FLAGS(preq_elem);
+ root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
- mhwmp_dbg("received PREQ from %pM", orig_addr);
+ mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr);
if (ether_addr_equal(target_addr, sdata->vif.addr)) {
- mhwmp_dbg("PREQ is for us");
+ mhwmp_dbg(sdata, "PREQ is for us\n");
forward = false;
reply = true;
metric = 0;
@@ -544,6 +541,22 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
target_sn = ++ifmsh->sn;
ifmsh->last_sn_update = jiffies;
}
+ } else if (is_broadcast_ether_addr(target_addr) &&
+ (target_flags & IEEE80211_PREQ_TO_FLAG)) {
+ rcu_read_lock();
+ mpath = mesh_path_lookup(orig_addr, sdata);
+ if (mpath) {
+ if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
+ reply = true;
+ target_addr = sdata->vif.addr;
+ target_sn = ++ifmsh->sn;
+ metric = 0;
+ ifmsh->last_sn_update = jiffies;
+ }
+ if (root_is_gate)
+ mesh_path_add_gate(mpath);
+ }
+ rcu_read_unlock();
} else {
rcu_read_lock();
mpath = mesh_path_lookup(target_addr, sdata);
@@ -570,19 +583,20 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
lifetime = PREQ_IE_LIFETIME(preq_elem);
ttl = ifmsh->mshcfg.element_ttl;
if (ttl != 0) {
- mhwmp_dbg("replying to the PREQ");
+ mhwmp_dbg(sdata, "replying to the PREQ\n");
mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
cpu_to_le32(orig_sn), 0, target_addr,
cpu_to_le32(target_sn), mgmt->sa, 0, ttl,
cpu_to_le32(lifetime), cpu_to_le32(metric),
0, sdata);
- } else
+ } else {
ifmsh->mshstats.dropped_frames_ttl++;
+ }
}
if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
u32 preq_id;
- u8 hopcount, flags;
+ u8 hopcount;
ttl = PREQ_IE_TTL(preq_elem);
lifetime = PREQ_IE_LIFETIME(preq_elem);
@@ -590,13 +604,19 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
ifmsh->mshstats.dropped_frames_ttl++;
return;
}
- mhwmp_dbg("forwarding the PREQ from %pM", orig_addr);
+ mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr);
--ttl;
- flags = PREQ_IE_FLAGS(preq_elem);
preq_id = PREQ_IE_PREQ_ID(preq_elem);
hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
da = (mpath && mpath->is_root) ?
mpath->rann_snd_addr : broadcast_addr;
+
+ if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
+ target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
+ target_sn = PREQ_IE_TARGET_SN(preq_elem);
+ metric = orig_metric;
+ }
+
mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
cpu_to_le32(orig_sn), target_flags, target_addr,
cpu_to_le32(target_sn), da,
@@ -631,7 +651,8 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
u8 next_hop[ETH_ALEN];
u32 target_sn, orig_sn, lifetime;
- mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
+ mhwmp_dbg(sdata, "received PREP from %pM\n",
+ PREP_IE_ORIG_ADDR(prep_elem));
orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
if (ether_addr_equal(orig_addr, sdata->vif.addr))
@@ -744,11 +765,6 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
bool root_is_gate;
ttl = rann->rann_ttl;
- if (ttl <= 1) {
- ifmsh->mshstats.dropped_frames_ttl++;
- return;
- }
- ttl--;
flags = rann->rann_flags;
root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
orig_addr = rann->rann_addr;
@@ -762,8 +778,9 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
if (ether_addr_equal(orig_addr, sdata->vif.addr))
return;
- mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)",
- orig_addr, mgmt->sa, root_is_gate);
+ mhwmp_dbg(sdata,
+ "received RANN from %pM via neighbour %pM (is_gate=%d)\n",
+ orig_addr, mgmt->sa, root_is_gate);
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
@@ -785,34 +802,50 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
}
}
+ if (!(SN_LT(mpath->sn, orig_sn)) &&
+ !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
+ rcu_read_unlock();
+ return;
+ }
+
if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
- time_after(jiffies, mpath->exp_time - 1*HZ)) &&
- !(mpath->flags & MESH_PATH_FIXED)) {
- mhwmp_dbg("%s time to refresh root mpath %pM", sdata->name,
- orig_addr);
+ (time_after(jiffies, mpath->last_preq_to_root +
+ root_path_confirmation_jiffies(sdata)) ||
+ time_before(jiffies, mpath->last_preq_to_root))) &&
+ !(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) {
+ mhwmp_dbg(sdata,
+ "time to refresh root mpath %pM\n",
+ orig_addr);
mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
+ mpath->last_preq_to_root = jiffies;
}
- if ((SN_LT(mpath->sn, orig_sn) || (mpath->sn == orig_sn &&
- metric < mpath->rann_metric)) && ifmsh->mshcfg.dot11MeshForwarding) {
+ mpath->sn = orig_sn;
+ mpath->rann_metric = metric + metric_txsta;
+ mpath->is_root = true;
+ /* Recording RANNs sender address to send individually
+ * addressed PREQs destined for root mesh STA */
+ memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
+
+ if (root_is_gate)
+ mesh_path_add_gate(mpath);
+
+ if (ttl <= 1) {
+ ifmsh->mshstats.dropped_frames_ttl++;
+ rcu_read_unlock();
+ return;
+ }
+ ttl--;
+
+ if (ifmsh->mshcfg.dot11MeshForwarding) {
mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
cpu_to_le32(orig_sn),
0, NULL, 0, broadcast_addr,
hopcount, ttl, cpu_to_le32(interval),
cpu_to_le32(metric + metric_txsta),
0, sdata);
- mpath->sn = orig_sn;
- mpath->rann_metric = metric + metric_txsta;
- /* Recording RANNs sender address to send individually
- * addressed PREQs destined for root mesh STA */
- memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
}
- mpath->is_root = true;
-
- if (root_is_gate)
- mesh_path_add_gate(mpath);
-
rcu_read_unlock();
}
@@ -889,7 +922,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
if (!preq_node) {
- mhwmp_dbg("could not allocate PREQ node");
+ mhwmp_dbg(sdata, "could not allocate PREQ node\n");
return;
}
@@ -898,7 +931,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
kfree(preq_node);
if (printk_ratelimit())
- mhwmp_dbg("PREQ node queue full");
+ mhwmp_dbg(sdata, "PREQ node queue full\n");
return;
}
@@ -1021,12 +1054,15 @@ enddiscovery:
kfree(preq_node);
}
-/* mesh_nexthop_resolve - lookup next hop for given skb and start path
- * discovery if no forwarding information is found.
+/**
+ * mesh_nexthop_resolve - lookup next hop; conditionally start path discovery
*
* @skb: 802.11 frame to be sent
* @sdata: network subif the frame will be sent through
*
+ * Lookup next hop for given skb and start path discovery if no
+ * forwarding information is found.
+ *
* Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
* skb is freeed here if no mpath could be allocated.
*/
@@ -1146,7 +1182,7 @@ void mesh_path_timer(unsigned long data)
if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
ret = mesh_path_send_to_gates(mpath);
if (ret)
- mhwmp_dbg("no gate was reachable");
+ mhwmp_dbg(sdata, "no gate was reachable\n");
} else
mesh_path_flush_pending(mpath);
}
@@ -1157,13 +1193,34 @@ mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
- u8 flags;
+ u8 flags, target_flags = 0;
flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
? RANN_FLAG_IS_GATE : 0;
- mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
+
+ switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) {
+ case IEEE80211_PROACTIVE_RANN:
+ mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
cpu_to_le32(++ifmsh->sn),
0, NULL, 0, broadcast_addr,
- 0, sdata->u.mesh.mshcfg.element_ttl,
+ 0, ifmsh->mshcfg.element_ttl,
cpu_to_le32(interval), 0, 0, sdata);
+ break;
+ case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
+ flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
+ case IEEE80211_PROACTIVE_PREQ_NO_PREP:
+ interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
+ target_flags |= IEEE80211_PREQ_TO_FLAG |
+ IEEE80211_PREQ_USN_FLAG;
+ mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr,
+ cpu_to_le32(++ifmsh->sn), target_flags,
+ (u8 *) broadcast_addr, 0, broadcast_addr,
+ 0, ifmsh->mshcfg.element_ttl,
+ cpu_to_le32(interval),
+ 0, cpu_to_le32(ifmsh->preq_id++), sdata);
+ break;
+ default:
+ mhwmp_dbg(sdata, "Proactive mechanism not supported\n");
+ return;
+ }
}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index b39224d8255..075bc535c60 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -18,12 +18,6 @@
#include "ieee80211_i.h"
#include "mesh.h"
-#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
-#define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
-#else
-#define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
-#endif
-
/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
#define INIT_PATHS_SIZE_ORDER 2
@@ -322,9 +316,8 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
skb_queue_splice(&gateq, &gate_mpath->frame_queue);
- mpath_dbg("Mpath queue for gate %pM has %d frames\n",
- gate_mpath->dst,
- skb_queue_len(&gate_mpath->frame_queue));
+ mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
+ gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
if (!copy)
@@ -446,9 +439,9 @@ int mesh_path_add_gate(struct mesh_path *mpath)
hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
spin_unlock_bh(&tbl->gates_lock);
rcu_read_unlock();
- mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
- mpath->sdata->name, mpath->dst,
- mpath->sdata->u.mesh.num_gates);
+ mpath_dbg(mpath->sdata,
+ "Mesh path: Recorded new gate: %pM. %d known gates\n",
+ mpath->dst, mpath->sdata->u.mesh.num_gates);
return 0;
err_rcu:
rcu_read_unlock();
@@ -477,8 +470,8 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
spin_unlock_bh(&tbl->gates_lock);
mpath->sdata->u.mesh.num_gates--;
mpath->is_gate = false;
- mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
- "%d known gates\n", mpath->sdata->name,
+ mpath_dbg(mpath->sdata,
+ "Mesh path: Deleted gate: %pM. %d known gates\n",
mpath->dst, mpath->sdata->u.mesh.num_gates);
break;
}
@@ -785,7 +778,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
/**
* mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
*
- * @sta - mesh peer to match
+ * @sta: mesh peer to match
*
* RCU notes: this function is called when a mesh plink transitions from
* PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
@@ -840,7 +833,7 @@ static void table_flush_by_iface(struct mesh_table *tbl,
*
* This function deletes both mesh paths as well as mesh portal paths.
*
- * @sdata - interface data to match
+ * @sdata: interface data to match
*
*/
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
@@ -946,19 +939,20 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
continue;
if (gate->mpath->flags & MESH_PATH_ACTIVE) {
- mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
+ mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
from_mpath = gate->mpath;
copy = true;
} else {
- mpath_dbg("Not forwarding %p\n", gate->mpath);
- mpath_dbg("flags %x\n", gate->mpath->flags);
+ mpath_dbg(sdata,
+ "Not forwarding %p (flags %#x)\n",
+ gate->mpath, gate->mpath->flags);
}
}
hlist_for_each_entry_rcu(gate, n, known_gates, list)
if (gate->mpath->sdata == sdata) {
- mpath_dbg("Sending to %pM\n", gate->mpath->dst);
+ mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
mesh_path_tx_pending(gate->mpath);
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 60ef235c9d9..af671b984df 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -13,12 +13,6 @@
#include "rate.h"
#include "mesh.h"
-#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
-#define mpl_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
-#else
-#define mpl_dbg(fmt, args...) do { (void)(0); } while (0)
-#endif
-
#define PLINK_GET_LLID(p) (p + 2)
#define PLINK_GET_PLID(p) (p + 4)
@@ -105,7 +99,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
return sta;
}
-/*
+/**
* mesh_set_ht_prot_mode - set correct HT protection mode
*
* Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT
@@ -134,12 +128,14 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
switch (sta->ch_type) {
case NL80211_CHAN_NO_HT:
- mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present",
+ mpl_dbg(sdata,
+ "mesh_plink %pM: nonHT sta (%pM) is present\n",
sdata->vif.addr, sta->sta.addr);
non_ht_sta = true;
goto out;
case NL80211_CHAN_HT20:
- mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present",
+ mpl_dbg(sdata,
+ "mesh_plink %pM: HT20 sta (%pM) is present\n",
sdata->vif.addr, sta->sta.addr);
ht20_sta = true;
default:
@@ -160,7 +156,8 @@ out:
sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
sdata->u.mesh.mshcfg.ht_opmode = ht_opmode;
changed = BSS_CHANGED_HT;
- mpl_dbg("mesh_plink %pM: protection mode changed to %d",
+ mpl_dbg(sdata,
+ "mesh_plink %pM: protection mode changed to %d\n",
sdata->vif.addr, ht_opmode);
}
@@ -261,8 +258,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, 2);
memcpy(pos + 2, &plid, 2);
}
- if (ieee80211_add_srates_ie(&sdata->vif, skb, true) ||
- ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) ||
+ if (ieee80211_add_srates_ie(sdata, skb, true) ||
+ ieee80211_add_ext_srates_ie(sdata, skb, true) ||
mesh_add_rsn_ie(skb, sdata) ||
mesh_add_meshid_ie(skb, sdata) ||
mesh_add_meshconf_ie(skb, sdata))
@@ -323,7 +320,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
return 0;
}
-/* mesh_peer_init - initialize new mesh peer and return resulting sta_info
+/**
+ * mesh_peer_init - initialize new mesh peer and return resulting sta_info
*
* @sdata: local meshif
* @addr: peer's address
@@ -437,7 +435,8 @@ static void mesh_plink_timer(unsigned long data)
spin_unlock_bh(&sta->lock);
return;
}
- mpl_dbg("Mesh plink timer for %pM fired on state %d\n",
+ mpl_dbg(sta->sdata,
+ "Mesh plink timer for %pM fired on state %d\n",
sta->sta.addr, sta->plink_state);
reason = 0;
llid = sta->llid;
@@ -450,7 +449,8 @@ static void mesh_plink_timer(unsigned long data)
/* retry timer */
if (sta->plink_retries < dot11MeshMaxRetries(sdata)) {
u32 rand;
- mpl_dbg("Mesh plink for %pM (retry, timeout): %d %d\n",
+ mpl_dbg(sta->sdata,
+ "Mesh plink for %pM (retry, timeout): %d %d\n",
sta->sta.addr, sta->plink_retries,
sta->plink_timeout);
get_random_bytes(&rand, sizeof(u32));
@@ -530,7 +530,8 @@ int mesh_plink_open(struct sta_info *sta)
sta->plink_state = NL80211_PLINK_OPN_SNT;
mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
spin_unlock_bh(&sta->lock);
- mpl_dbg("Mesh plink: starting establishment with %pM\n",
+ mpl_dbg(sdata,
+ "Mesh plink: starting establishment with %pM\n",
sta->sta.addr);
return mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
@@ -565,7 +566,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
u8 *baseaddr;
u32 changed = 0;
__le16 plid, llid, reason;
-#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
static const char *mplstates[] = {
[NL80211_PLINK_LISTEN] = "LISTEN",
[NL80211_PLINK_OPN_SNT] = "OPN-SNT",
@@ -575,14 +575,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
[NL80211_PLINK_HOLDING] = "HOLDING",
[NL80211_PLINK_BLOCKED] = "BLOCKED"
};
-#endif
/* need action_code, aux */
if (len < IEEE80211_MIN_ACTION_SIZE + 3)
return;
if (is_multicast_ether_addr(mgmt->da)) {
- mpl_dbg("Mesh plink: ignore frame from multicast address");
+ mpl_dbg(sdata,
+ "Mesh plink: ignore frame from multicast address\n");
return;
}
@@ -595,12 +595,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
}
ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
if (!elems.peering) {
- mpl_dbg("Mesh plink: missing necessary peer link ie\n");
+ mpl_dbg(sdata,
+ "Mesh plink: missing necessary peer link ie\n");
return;
}
if (elems.rsn_len &&
sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) {
- mpl_dbg("Mesh plink: can't establish link with secure peer\n");
+ mpl_dbg(sdata,
+ "Mesh plink: can't establish link with secure peer\n");
return;
}
@@ -610,14 +612,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
(ftype == WLAN_SP_MESH_PEERING_CONFIRM && ie_len != 6) ||
(ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len != 6
&& ie_len != 8)) {
- mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n",
- ftype, ie_len);
+ mpl_dbg(sdata,
+ "Mesh plink: incorrect plink ie length %d %d\n",
+ ftype, ie_len);
return;
}
if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
(!elems.mesh_id || !elems.mesh_config)) {
- mpl_dbg("Mesh plink: missing necessary ie\n");
+ mpl_dbg(sdata, "Mesh plink: missing necessary ie\n");
return;
}
/* Note the lines below are correct, the llid in the frame is the plid
@@ -632,21 +635,21 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
sta = sta_info_get(sdata, mgmt->sa);
if (!sta && ftype != WLAN_SP_MESH_PEERING_OPEN) {
- mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
+ mpl_dbg(sdata, "Mesh plink: cls or cnf from unknown peer\n");
rcu_read_unlock();
return;
}
if (ftype == WLAN_SP_MESH_PEERING_OPEN &&
!rssi_threshold_check(sta, sdata)) {
- mpl_dbg("Mesh plink: %pM does not meet rssi threshold\n",
+ mpl_dbg(sdata, "Mesh plink: %pM does not meet rssi threshold\n",
mgmt->sa);
rcu_read_unlock();
return;
}
if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) {
- mpl_dbg("Mesh plink: Action frame from non-authed peer\n");
+ mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n");
rcu_read_unlock();
return;
}
@@ -683,7 +686,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
} else if (!sta) {
/* ftype == WLAN_SP_MESH_PEERING_OPEN */
if (!mesh_plink_free_count(sdata)) {
- mpl_dbg("Mesh plink error: no more free plinks\n");
+ mpl_dbg(sdata, "Mesh plink error: no more free plinks\n");
rcu_read_unlock();
return;
}
@@ -724,7 +727,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
event = CLS_ACPT;
break;
default:
- mpl_dbg("Mesh plink: unknown frame subtype\n");
+ mpl_dbg(sdata, "Mesh plink: unknown frame subtype\n");
rcu_read_unlock();
return;
}
@@ -734,13 +737,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
/* allocate sta entry if necessary and update info */
sta = mesh_peer_init(sdata, mgmt->sa, &elems);
if (!sta) {
- mpl_dbg("Mesh plink: failed to init peer!\n");
+ mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
rcu_read_unlock();
return;
}
}
- mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
+ mpl_dbg(sdata,
+ "Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
mgmt->sa, mplstates[sta->plink_state],
le16_to_cpu(sta->llid), le16_to_cpu(sta->plid),
event);
@@ -851,7 +855,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
mesh_plink_inc_estab_count(sdata);
changed |= mesh_set_ht_prot_mode(sdata);
changed |= BSS_CHANGED_BEACON;
- mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
+ mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
sta->sta.addr);
break;
default:
@@ -887,7 +891,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
mesh_plink_inc_estab_count(sdata);
changed |= mesh_set_ht_prot_mode(sdata);
changed |= BSS_CHANGED_BEACON;
- mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
+ mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
sta->sta.addr);
mesh_plink_frame_tx(sdata,
WLAN_SP_MESH_PEERING_CONFIRM,
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index 38d30e8ce6d..accfa00ffcd 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -12,13 +12,6 @@
#include "mesh.h"
#include "driver-ops.h"
-#ifdef CONFIG_MAC80211_VERBOSE_MESH_SYNC_DEBUG
-#define msync_dbg(fmt, args...) \
- printk(KERN_DEBUG "Mesh sync (%s): " fmt "\n", sdata->name, ##args)
-#else
-#define msync_dbg(fmt, args...) do { (void)(0); } while (0)
-#endif
-
/* This is not in the standard. It represents a tolerable tbtt drift below
* which we do no TSF adjustment.
*/
@@ -65,14 +58,14 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
spin_lock_bh(&ifmsh->sync_offset_lock);
if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) {
- msync_dbg("TBTT : max clockdrift=%lld; adjusting",
- (long long) ifmsh->sync_offset_clockdrift_max);
+ msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting\n",
+ (long long) ifmsh->sync_offset_clockdrift_max);
tsfdelta = -ifmsh->sync_offset_clockdrift_max;
ifmsh->sync_offset_clockdrift_max = 0;
} else {
- msync_dbg("TBTT : max clockdrift=%lld; adjusting by %llu",
- (long long) ifmsh->sync_offset_clockdrift_max,
- (unsigned long long) beacon_int_fraction);
+ msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting by %llu\n",
+ (long long) ifmsh->sync_offset_clockdrift_max,
+ (unsigned long long) beacon_int_fraction);
tsfdelta = -beacon_int_fraction;
ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction;
}
@@ -120,7 +113,7 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
- msync_dbg("STA %pM : is adjusting TBTT", sta->sta.addr);
+ msync_dbg(sdata, "STA %pM : is adjusting TBTT\n", sta->sta.addr);
goto no_sync;
}
@@ -169,7 +162,8 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
s64 t_clockdrift = sta->t_offset_setpoint
- sta->t_offset;
- msync_dbg("STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld",
+ msync_dbg(sdata,
+ "STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld\n",
sta->sta.addr,
(long long) sta->t_offset,
(long long)
@@ -178,7 +172,8 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) {
- msync_dbg("STA %pM : t_clockdrift=%lld too large, setpoint reset",
+ msync_dbg(sdata,
+ "STA %pM : t_clockdrift=%lld too large, setpoint reset\n",
sta->sta.addr,
(long long) t_clockdrift);
clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
@@ -197,8 +192,8 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
} else {
sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
- msync_dbg("STA %pM : offset was invalid, "
- " sta->t_offset=%lld",
+ msync_dbg(sdata,
+ "STA %pM : offset was invalid, sta->t_offset=%lld\n",
sta->sta.addr,
(long long) sta->t_offset);
rcu_read_unlock();
@@ -226,17 +221,15 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
* to the driver tsf setter, we punt
* the tsf adjustment to the mesh tasklet
*/
- msync_dbg("TBTT : kicking off TBTT "
- "adjustment with "
- "clockdrift_max=%lld",
- ifmsh->sync_offset_clockdrift_max);
+ msync_dbg(sdata,
+ "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n",
+ ifmsh->sync_offset_clockdrift_max);
set_bit(MESH_WORK_DRIFT_ADJUST,
&ifmsh->wrkq_flags);
} else {
- msync_dbg("TBTT : max clockdrift=%lld; "
- "too small to adjust",
- (long long)
- ifmsh->sync_offset_clockdrift_max);
+ msync_dbg(sdata,
+ "TBTT : max clockdrift=%lld; too small to adjust\n",
+ (long long)ifmsh->sync_offset_clockdrift_max);
ifmsh->sync_offset_clockdrift_max = 0;
}
spin_unlock_bh(&ifmsh->sync_offset_lock);
@@ -268,7 +261,7 @@ static void mesh_sync_vendor_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
const u8 *oui;
WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
- msync_dbg("called mesh_sync_vendor_rx_bcn_presp");
+ msync_dbg(sdata, "called mesh_sync_vendor_rx_bcn_presp\n");
oui = mesh_get_vendor_oui(sdata);
/* here you would implement the vendor offset tracking for this oui */
}
@@ -278,7 +271,7 @@ static void mesh_sync_vendor_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
const u8 *oui;
WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
- msync_dbg("called mesh_sync_vendor_adjust_tbtt");
+ msync_dbg(sdata, "called mesh_sync_vendor_adjust_tbtt\n");
oui = mesh_get_vendor_oui(sdata);
/* here you would implement the vendor tsf adjustment for this oui */
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0db5d34a06b..cef0c9e79ab 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -258,12 +258,11 @@ static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
}
static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, const u8 *ht_oper_ie,
+ struct sk_buff *skb, u8 ap_ht_param,
struct ieee80211_supported_band *sband,
struct ieee80211_channel *channel,
enum ieee80211_smps_mode smps)
{
- struct ieee80211_ht_operation *ht_oper;
u8 *pos;
u32 flags = channel->flags;
u16 cap;
@@ -271,21 +270,13 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
- if (!ht_oper_ie)
- return;
-
- if (ht_oper_ie[1] < sizeof(struct ieee80211_ht_operation))
- return;
-
memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
ieee80211_apply_htcap_overrides(sdata, &ht_cap);
- ht_oper = (struct ieee80211_ht_operation *)(ht_oper_ie + 2);
-
/* determine capability flags */
cap = ht_cap.cap;
- switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+ switch (ap_ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -509,7 +500,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
}
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
- ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_operation_ie,
+ ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
sband, local->oper_channel, ifmgd->ap_smps);
/* if present, add any custom non-vendor IEs that go after HT */
@@ -550,6 +541,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
memcpy(pos, assoc_data->ie + offset, noffset - offset);
}
+ drv_mgd_prepare_tx(local, sdata);
+
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
}
@@ -589,6 +582,9 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
IEEE80211_SKB_CB(skb)->flags |=
IEEE80211_TX_INTFL_DONT_ENCRYPT;
+
+ drv_mgd_prepare_tx(local, sdata);
+
ieee80211_tx_skb(sdata, skb);
}
}
@@ -911,9 +907,6 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
if (!mgd->associated)
return false;
- if (!mgd->associated->beacon_ies)
- return false;
-
if (mgd->flags & (IEEE80211_STA_BEACON_POLL |
IEEE80211_STA_CONNECTION_POLL))
return false;
@@ -939,11 +932,6 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
return;
}
- if (!list_empty(&local->work_list)) {
- local->ps_sdata = NULL;
- goto change;
- }
-
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
@@ -1016,7 +1004,6 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
local->ps_sdata = NULL;
}
- change:
ieee80211_change_ps(local);
}
@@ -1121,7 +1108,7 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
}
/* MLME */
-static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
+static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
u8 *wmm_param, size_t wmm_param_len)
{
@@ -1132,23 +1119,23 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
u8 *pos, uapsd_queues = 0;
if (!local->ops->conf_tx)
- return;
+ return false;
if (local->hw.queues < IEEE80211_NUM_ACS)
- return;
+ return false;
if (!wmm_param)
- return;
+ return false;
if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
- return;
+ return false;
if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
uapsd_queues = ifmgd->uapsd_queues;
count = wmm_param[6] & 0x0f;
if (count == ifmgd->wmm_last_param_set)
- return;
+ return false;
ifmgd->wmm_last_param_set = count;
pos = wmm_param + 8;
@@ -1156,7 +1143,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
memset(&params, 0, sizeof(params));
- local->wmm_acm = 0;
+ sdata->wmm_acm = 0;
for (; left >= 4; left -= 4, pos += 4) {
int aci = (pos[0] >> 5) & 0x03;
int acm = (pos[0] >> 4) & 0x01;
@@ -1167,21 +1154,21 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
case 1: /* AC_BK */
queue = 3;
if (acm)
- local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
+ sdata->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
uapsd = true;
break;
case 2: /* AC_VI */
queue = 1;
if (acm)
- local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
+ sdata->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
uapsd = true;
break;
case 3: /* AC_VO */
queue = 0;
if (acm)
- local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
+ sdata->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
uapsd = true;
break;
@@ -1189,7 +1176,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
default:
queue = 2;
if (acm)
- local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
+ sdata->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
uapsd = true;
break;
@@ -1201,23 +1188,21 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
params.txop = get_unaligned_le16(pos + 2);
params.uapsd = uapsd;
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy,
- "WMM queue=%d aci=%d acm=%d aifs=%d "
- "cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
- queue, aci, acm,
- params.aifs, params.cw_min, params.cw_max,
- params.txop, params.uapsd);
-#endif
+ mlme_dbg(sdata,
+ "WMM queue=%d aci=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
+ queue, aci, acm,
+ params.aifs, params.cw_min, params.cw_max,
+ params.txop, params.uapsd);
sdata->tx_conf[queue] = params;
if (drv_conf_tx(local, sdata, queue, &params))
- wiphy_debug(local->hw.wiphy,
- "failed to set TX queue parameters for queue %d\n",
- queue);
+ sdata_err(sdata,
+ "failed to set TX queue parameters for queue %d\n",
+ queue);
}
/* enable WMM or activate new settings */
sdata->vif.bss_conf.qos = true;
+ return true;
}
static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
@@ -1284,13 +1269,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
bss_info_changed |= BSS_CHANGED_ASSOC;
- /* set timing information */
- bss_conf->beacon_int = cbss->beacon_interval;
- bss_conf->last_tsf = cbss->tsf;
-
- bss_info_changed |= BSS_CHANGED_BEACON_INT;
bss_info_changed |= ieee80211_handle_bss_capability(sdata,
- cbss->capability, bss->has_erp_value, bss->erp_value);
+ bss_conf->assoc_capability, bss->has_erp_value, bss->erp_value);
sdata->u.mgd.beacon_timeout = usecs_to_jiffies(ieee80211_tu_to_usec(
IEEE80211_BEACON_LOSS_COUNT * bss_conf->beacon_int));
@@ -1380,6 +1360,21 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
}
mutex_unlock(&local->sta_mtx);
+ /*
+ * if we want to get out of ps before disassoc (why?) we have
+ * to do it before sending disassoc, as otherwise the null-packet
+ * won't be valid.
+ */
+ if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+ local->hw.conf.flags &= ~IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ }
+ local->ps_sdata = NULL;
+
+ /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */
+ if (tx)
+ drv_flush(local, false);
+
/* deauthenticate/disassociate now */
if (tx || frame_buf)
ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid, stype,
@@ -1411,12 +1406,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
del_timer_sync(&local->dynamic_ps_timer);
cancel_work_sync(&local->dynamic_ps_enable_work);
- if (local->hw.conf.flags & IEEE80211_CONF_PS) {
- local->hw.conf.flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
- }
- local->ps_sdata = NULL;
-
/* Disable ARP filtering */
if (sdata->vif.bss_conf.arp_filter_enabled) {
sdata->vif.bss_conf.arp_filter_enabled = false;
@@ -1581,11 +1570,12 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
goto out;
}
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (beacon)
- net_dbg_ratelimited("%s: detected beacon loss from AP - sending probe request\n",
- sdata->name);
-#endif
+ mlme_dbg_ratelimited(sdata,
+ "detected beacon loss from AP - sending probe request\n");
+
+ ieee80211_cqm_rssi_notify(&sdata->vif,
+ NL80211_CQM_RSSI_BEACON_LOSS_EVENT, GFP_KERNEL);
/*
* The driver/our work has already reported this event or the
@@ -1627,6 +1617,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct cfg80211_bss *cbss;
struct sk_buff *skb;
const u8 *ssid;
int ssid_len;
@@ -1636,16 +1627,22 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
ASSERT_MGD_MTX(ifmgd);
- if (!ifmgd->associated)
+ if (ifmgd->associated)
+ cbss = ifmgd->associated;
+ else if (ifmgd->auth_data)
+ cbss = ifmgd->auth_data->bss;
+ else if (ifmgd->assoc_data)
+ cbss = ifmgd->assoc_data->bss;
+ else
return NULL;
- ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
+ ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
if (WARN_ON_ONCE(ssid == NULL))
ssid_len = 0;
else
ssid_len = ssid[1];
- skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid,
+ skb = ieee80211_build_probe_req(sdata, cbss->bssid,
(u32) -1, ssid + 2, ssid_len,
NULL, 0, true);
@@ -1668,8 +1665,7 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
- printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n",
- sdata->name, bssid);
+ sdata_info(sdata, "Connection to AP %pM lost\n", bssid);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
@@ -1765,6 +1761,7 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
if (!elems.challenge)
return;
auth_data->expected_transaction = 4;
+ drv_mgd_prepare_tx(sdata->local, sdata);
ieee80211_send_auth(sdata, 3, auth_data->algorithm,
elems.challenge - 2, elems.challenge_len + 2,
auth_data->bss->bssid, auth_data->bss->bssid,
@@ -1803,9 +1800,10 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
return RX_MGMT_NONE;
if (status_code != WLAN_STATUS_SUCCESS) {
- printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
- sdata->name, mgmt->sa, status_code);
- goto out;
+ sdata_info(sdata, "%pM denied authentication (status %d)\n",
+ mgmt->sa, status_code);
+ ieee80211_destroy_auth_data(sdata, false);
+ return RX_MGMT_CFG80211_RX_AUTH;
}
switch (ifmgd->auth_data->algorithm) {
@@ -1826,8 +1824,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
return RX_MGMT_NONE;
}
- printk(KERN_DEBUG "%s: authenticated\n", sdata->name);
- out:
+ sdata_info(sdata, "authenticated\n");
ifmgd->auth_data->done = true;
ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
run_again(ifmgd, ifmgd->auth_data->timeout);
@@ -1840,8 +1837,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
goto out_err;
}
if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
- printk(KERN_DEBUG "%s: failed moving %pM to auth\n",
- sdata->name, bssid);
+ sdata_info(sdata, "failed moving %pM to auth\n", bssid);
goto out_err;
}
mutex_unlock(&sdata->local->sta_mtx);
@@ -1875,8 +1871,8 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
- printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
- sdata->name, bssid, reason_code);
+ sdata_info(sdata, "deauthenticated from %pM (Reason: %u)\n",
+ bssid, reason_code);
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
@@ -1906,8 +1902,8 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
- printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
- sdata->name, mgmt->sa, reason_code);
+ sdata_info(sdata, "disassociated from %pM (Reason: %u)\n",
+ mgmt->sa, reason_code);
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
@@ -1999,17 +1995,15 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
- printk(KERN_DEBUG
- "%s: invalid AID value 0x%x; bits 15:14 not set\n",
- sdata->name, aid);
+ sdata_info(sdata, "invalid AID value 0x%x; bits 15:14 not set\n",
+ aid);
aid &= ~(BIT(15) | BIT(14));
ifmgd->broken_ap = false;
if (aid == 0 || aid > IEEE80211_MAX_AID) {
- printk(KERN_DEBUG
- "%s: invalid AID value %d (out of range), turn off PS\n",
- sdata->name, aid);
+ sdata_info(sdata, "invalid AID value %d (out of range), turn off PS\n",
+ aid);
aid = 0;
ifmgd->broken_ap = true;
}
@@ -2018,8 +2012,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
if (!elems.supp_rates) {
- printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
- sdata->name);
+ sdata_info(sdata, "no SuppRates element in AssocResp\n");
return false;
}
@@ -2059,9 +2052,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
if (err) {
- printk(KERN_DEBUG
- "%s: failed to move station %pM to desired state\n",
- sdata->name, sta->sta.addr);
+ sdata_info(sdata,
+ "failed to move station %pM to desired state\n",
+ sta->sta.addr);
WARN_ON(__sta_info_destroy(sta));
mutex_unlock(&sdata->local->sta_mtx);
return false;
@@ -2144,10 +2137,10 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
- printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
- "status=%d aid=%d)\n",
- sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
- capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
+ sdata_info(sdata,
+ "RX %sssocResp from %pM (capab=0x%x status=%d aid=%d)\n",
+ reassoc ? "Rea" : "A", mgmt->sa,
+ capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
pos = mgmt->u.assoc_resp.variable;
ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
@@ -2158,9 +2151,9 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
u32 tu, ms;
tu = get_unaligned_le32(elems.timeout_int + 1);
ms = tu * 1024 / 1000;
- printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
- "comeback duration %u TU (%u ms)\n",
- sdata->name, mgmt->sa, tu, ms);
+ sdata_info(sdata,
+ "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n",
+ mgmt->sa, tu, ms);
assoc_data->timeout = jiffies + msecs_to_jiffies(ms);
if (ms > IEEE80211_ASSOC_TIMEOUT)
run_again(ifmgd, assoc_data->timeout);
@@ -2170,8 +2163,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
*bss = assoc_data->bss;
if (status_code != WLAN_STATUS_SUCCESS) {
- printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
- sdata->name, mgmt->sa, status_code);
+ sdata_info(sdata, "%pM denied association (code=%d)\n",
+ mgmt->sa, status_code);
ieee80211_destroy_assoc_data(sdata, false);
} else {
if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
@@ -2180,7 +2173,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
cfg80211_put_bss(*bss);
return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
}
- printk(KERN_DEBUG "%s: associated\n", sdata->name);
+ sdata_info(sdata, "associated\n");
/*
* destroy assoc_data afterwards, as otherwise an idle
@@ -2280,7 +2273,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies &&
ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) {
/* got probe response, continue with auth */
- printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
+ sdata_info(sdata, "direct probe responded\n");
ifmgd->auth_data->tries = 0;
ifmgd->auth_data->timeout = jiffies;
run_again(ifmgd, ifmgd->auth_data->timeout);
@@ -2416,10 +2409,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
}
if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n",
- sdata->name);
-#endif
+ mlme_dbg_ratelimited(sdata,
+ "cancelling probereq poll due to a received beacon\n");
mutex_lock(&local->mtx);
ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
ieee80211_run_deferred_scan(local);
@@ -2445,14 +2436,6 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
directed_tim = ieee80211_check_tim(elems.tim, elems.tim_len,
ifmgd->aid);
- if (ncrc != ifmgd->beacon_crc || !ifmgd->beacon_crc_valid) {
- ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems,
- true);
-
- ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
- elems.wmm_param_len);
- }
-
if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) {
if (directed_tim) {
if (local->hw.conf.dynamic_ps_timeout > 0) {
@@ -2483,6 +2466,13 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ifmgd->beacon_crc = ncrc;
ifmgd->beacon_crc_valid = true;
+ ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems,
+ true);
+
+ if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
+ elems.wmm_param_len))
+ changed |= BSS_CHANGED_QOS;
+
if (elems.erp_info && elems.erp_info_len >= 1) {
erp_valid = true;
erp_value = elems.erp_info[0];
@@ -2642,8 +2632,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
auth_data->tries++;
if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
- printk(KERN_DEBUG "%s: authentication with %pM timed out\n",
- sdata->name, auth_data->bss->bssid);
+ sdata_info(sdata, "authentication with %pM timed out\n",
+ auth_data->bss->bssid);
/*
* Most likely AP is not in the range so remove the
@@ -2654,10 +2644,12 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
return -ETIMEDOUT;
}
+ drv_mgd_prepare_tx(local, sdata);
+
if (auth_data->bss->proberesp_ies) {
- printk(KERN_DEBUG "%s: send auth to %pM (try %d/%d)\n",
- sdata->name, auth_data->bss->bssid, auth_data->tries,
- IEEE80211_AUTH_MAX_TRIES);
+ sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
+ auth_data->bss->bssid, auth_data->tries,
+ IEEE80211_AUTH_MAX_TRIES);
auth_data->expected_transaction = 2;
ieee80211_send_auth(sdata, 1, auth_data->algorithm,
@@ -2667,9 +2659,9 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
} else {
const u8 *ssidie;
- printk(KERN_DEBUG "%s: direct probe to %pM (try %d/%i)\n",
- sdata->name, auth_data->bss->bssid, auth_data->tries,
- IEEE80211_AUTH_MAX_TRIES);
+ sdata_info(sdata, "direct probe to %pM (try %d/%i)\n",
+ auth_data->bss->bssid, auth_data->tries,
+ IEEE80211_AUTH_MAX_TRIES);
ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID);
if (!ssidie)
@@ -2697,8 +2689,8 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
assoc_data->tries++;
if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) {
- printk(KERN_DEBUG "%s: association with %pM timed out\n",
- sdata->name, assoc_data->bss->bssid);
+ sdata_info(sdata, "association with %pM timed out\n",
+ assoc_data->bss->bssid);
/*
* Most likely AP is not in the range so remove the
@@ -2709,9 +2701,9 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
return -ETIMEDOUT;
}
- printk(KERN_DEBUG "%s: associate with %pM (try %d/%d)\n",
- sdata->name, assoc_data->bss->bssid, assoc_data->tries,
- IEEE80211_ASSOC_MAX_TRIES);
+ sdata_info(sdata, "associate with %pM (try %d/%d)\n",
+ assoc_data->bss->bssid, assoc_data->tries,
+ IEEE80211_ASSOC_MAX_TRIES);
ieee80211_send_assoc(sdata);
assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
@@ -2784,45 +2776,31 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
ieee80211_reset_ap_probe(sdata);
else if (ifmgd->nullfunc_failed) {
if (ifmgd->probe_send_count < max_tries) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy,
- "%s: No ack for nullfunc frame to"
- " AP %pM, try %d/%i\n",
- sdata->name, bssid,
- ifmgd->probe_send_count, max_tries);
-#endif
+ mlme_dbg(sdata,
+ "No ack for nullfunc frame to AP %pM, try %d/%i\n",
+ bssid, ifmgd->probe_send_count,
+ max_tries);
ieee80211_mgd_probe_ap_send(sdata);
} else {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy,
- "%s: No ack for nullfunc frame to"
- " AP %pM, disconnecting.\n",
- sdata->name, bssid);
-#endif
+ mlme_dbg(sdata,
+ "No ack for nullfunc frame to AP %pM, disconnecting.\n",
+ bssid);
ieee80211_sta_connection_lost(sdata, bssid,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
}
} else if (time_is_after_jiffies(ifmgd->probe_timeout))
run_again(ifmgd, ifmgd->probe_timeout);
else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy,
- "%s: Failed to send nullfunc to AP %pM"
- " after %dms, disconnecting.\n",
- sdata->name,
- bssid, probe_wait_ms);
-#endif
+ mlme_dbg(sdata,
+ "Failed to send nullfunc to AP %pM after %dms, disconnecting\n",
+ bssid, probe_wait_ms);
ieee80211_sta_connection_lost(sdata, bssid,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
} else if (ifmgd->probe_send_count < max_tries) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy,
- "%s: No probe response from AP %pM"
- " after %dms, try %d/%i\n",
- sdata->name,
- bssid, probe_wait_ms,
- ifmgd->probe_send_count, max_tries);
-#endif
+ mlme_dbg(sdata,
+ "No probe response from AP %pM after %dms, try %d/%i\n",
+ bssid, probe_wait_ms,
+ ifmgd->probe_send_count, max_tries);
ieee80211_mgd_probe_ap_send(sdata);
} else {
/*
@@ -2937,11 +2915,8 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
mutex_lock(&ifmgd->mtx);
if (ifmgd->associated) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(sdata->local->hw.wiphy,
- "%s: driver requested disconnect after resume.\n",
- sdata->name);
-#endif
+ mlme_dbg(sdata,
+ "driver requested disconnect after resume\n");
ieee80211_sta_connection_lost(sdata,
ifmgd->associated->bssid,
WLAN_REASON_UNSPECIFIED);
@@ -2999,7 +2974,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
/* scan finished notification */
void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
{
- struct ieee80211_sub_if_data *sdata = local->scan_sdata;
+ struct ieee80211_sub_if_data *sdata;
/* Restart STA timers */
rcu_read_lock();
@@ -3029,7 +3004,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_bss *bss = (void *)cbss->priv;
- struct sta_info *sta;
+ struct sta_info *sta = NULL;
bool have_sta = false;
int err;
int ht_cfreq;
@@ -3082,13 +3057,11 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
* since we look at probe response/beacon data here
* it should be OK.
*/
- printk(KERN_DEBUG
- "%s: Wrong control channel: center-freq: %d"
- " ht-cfreq: %d ht->primary_chan: %d"
- " band: %d. Disabling HT.\n",
- sdata->name, cbss->channel->center_freq,
- ht_cfreq, ht_oper->primary_chan,
- cbss->channel->band);
+ sdata_info(sdata,
+ "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
+ cbss->channel->center_freq,
+ ht_cfreq, ht_oper->primary_chan,
+ cbss->channel->band);
ht_oper = NULL;
}
}
@@ -3112,9 +3085,8 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
/* can only fail due to HT40+/- mismatch */
channel_type = NL80211_CHAN_HT20;
- printk(KERN_DEBUG
- "%s: disabling 40 MHz due to multi-vif mismatch\n",
- sdata->name);
+ sdata_info(sdata,
+ "disabling 40 MHz due to multi-vif mismatch\n");
ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
WARN_ON(!ieee80211_set_channel_type(local, sdata,
channel_type));
@@ -3123,7 +3095,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
local->oper_channel = cbss->channel;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- if (!have_sta) {
+ if (sta) {
u32 rates = 0, basic_rates = 0;
bool have_higher_than_11mbit;
int min_rate = INT_MAX, min_rate_index = -1;
@@ -3143,9 +3115,8 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
* we can connect -- with a warning.
*/
if (!basic_rates && min_rate_index >= 0) {
- printk(KERN_DEBUG
- "%s: No basic rates, using min rate instead.\n",
- sdata->name);
+ sdata_info(sdata,
+ "No basic rates, using min rate instead\n");
basic_rates = BIT(min_rate_index);
}
@@ -3161,9 +3132,15 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
memcpy(ifmgd->bssid, cbss->bssid, ETH_ALEN);
- /* tell driver about BSSID and basic rates */
+ /* set timing information */
+ sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
+ sdata->vif.bss_conf.sync_tsf = cbss->tsf;
+ sdata->vif.bss_conf.sync_device_ts = bss->device_ts;
+
+ /* tell driver about BSSID, basic rates and timing */
ieee80211_bss_info_change_notify(sdata,
- BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES);
+ BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES |
+ BSS_CHANGED_BEACON_INT);
if (assoc)
sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
@@ -3171,9 +3148,9 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
err = sta_info_insert(sta);
sta = NULL;
if (err) {
- printk(KERN_DEBUG
- "%s: failed to insert STA entry for the AP (error %d)\n",
- sdata->name, err);
+ sdata_info(sdata,
+ "failed to insert STA entry for the AP (error %d)\n",
+ err);
return err;
}
} else
@@ -3251,8 +3228,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
if (ifmgd->associated)
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
- printk(KERN_DEBUG "%s: authenticate with %pM\n",
- sdata->name, req->bss->bssid);
+ sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
err = ieee80211_prep_connection(sdata, req->bss, false);
if (err)
@@ -3287,7 +3263,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss *bss = (void *)req->bss->priv;
struct ieee80211_mgd_assoc_data *assoc_data;
struct ieee80211_supported_band *sband;
- const u8 *ssidie;
+ const u8 *ssidie, *ht_ie;
int i, err;
ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
@@ -3335,11 +3311,15 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
* We can set this to true for non-11n hardware, that'll be checked
* separately along with the peer capabilities.
*/
- for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
+ for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
- req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104)
+ req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+ netdev_info(sdata->dev,
+ "disabling HT due to WEP/TKIP use\n");
+ }
+ }
if (req->flags & ASSOC_REQ_DISABLE_HT)
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
@@ -3347,8 +3327,11 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
/* Also disable HT if we don't support it or the AP doesn't use WMM */
sband = local->hw.wiphy->bands[req->bss->channel->band];
if (!sband->ht_cap.ht_supported ||
- local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used)
+ local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+ netdev_info(sdata->dev,
+ "disabling HT as WMM/QoS is not supported\n");
+ }
memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
@@ -3374,8 +3357,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
(local->hw.queues >= IEEE80211_NUM_ACS);
assoc_data->supp_rates = bss->supp_rates;
assoc_data->supp_rates_len = bss->supp_rates_len;
- assoc_data->ht_operation_ie =
- ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION);
+
+ ht_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION);
+ if (ht_ie && ht_ie[1] >= sizeof(struct ieee80211_ht_operation))
+ assoc_data->ap_ht_param =
+ ((struct ieee80211_ht_operation *)(ht_ie + 2))->ht_param;
+ else
+ ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
if (bss->wmm_used && bss->uapsd_supported &&
(sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
@@ -3422,8 +3410,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
* Wait up to one beacon interval ...
* should this be more if we miss one?
*/
- printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
- sdata->name, ifmgd->bssid);
+ sdata_info(sdata, "waiting for beacon from %pM\n",
+ ifmgd->bssid);
assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
} else {
assoc_data->have_beacon = true;
@@ -3442,8 +3430,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
corrupt_type = "beacon";
} else if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP)
corrupt_type = "probe response";
- printk(KERN_DEBUG "%s: associating with AP with corrupt %s\n",
- sdata->name, corrupt_type);
+ sdata_info(sdata, "associating with AP with corrupt %s\n",
+ corrupt_type);
}
err = 0;
@@ -3472,9 +3460,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
return 0;
}
- printk(KERN_DEBUG
- "%s: deauthenticating from %pM by local choice (reason=%d)\n",
- sdata->name, req->bssid, req->reason_code);
+ sdata_info(sdata,
+ "deauthenticating from %pM by local choice (reason=%d)\n",
+ req->bssid, req->reason_code);
if (ifmgd->associated &&
ether_addr_equal(ifmgd->associated->bssid, req->bssid))
@@ -3516,8 +3504,9 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
return -ENOLINK;
}
- printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
- sdata->name, req->bss->bssid, req->reason_code);
+ sdata_info(sdata,
+ "disassociating from %pM by local choice (reason=%d)\n",
+ req->bss->bssid, req->reason_code);
memcpy(bssid, req->bss->bssid, ETH_ALEN);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC,
@@ -3558,10 +3547,3 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp);
}
EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
-
-unsigned char ieee80211_get_operstate(struct ieee80211_vif *vif)
-{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
- return sdata->dev->operstate;
-}
-EXPORT_SYMBOL(ieee80211_get_operstate);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 935aa4b6dee..635c3250c66 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -15,7 +15,7 @@
#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
-#include "driver-trace.h"
+#include "driver-ops.h"
/*
* Tell our hardware to disable PS.
@@ -24,8 +24,7 @@
* because we *may* be doing work on-operating channel, and want our
* hardware unconditionally awake, but still let the AP send us normal frames.
*/
-static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
- bool tell_ap)
+static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -46,8 +45,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
- if (tell_ap && (!local->offchannel_ps_enabled ||
- !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)))
+ if (!local->offchannel_ps_enabled ||
+ !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
/*
* If power save was enabled, no need to send a nullfunc
* frame because AP knows that we are sleeping. But if the
@@ -132,7 +131,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
if (offchannel_ps_enable &&
(sdata->vif.type == NL80211_IFTYPE_STATION) &&
sdata->u.mgd.associated)
- ieee80211_offchannel_ps_enable(sdata, true);
+ ieee80211_offchannel_ps_enable(sdata);
}
}
mutex_unlock(&local->iflist_mtx);
@@ -181,34 +180,58 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
mutex_unlock(&local->iflist_mtx);
}
+void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc)
+{
+ if (roc->notified)
+ return;
+
+ if (roc->mgmt_tx_cookie) {
+ if (!WARN_ON(!roc->frame)) {
+ ieee80211_tx_skb(roc->sdata, roc->frame);
+ roc->frame = NULL;
+ }
+ } else {
+ cfg80211_ready_on_channel(&roc->sdata->wdev, (unsigned long)roc,
+ roc->chan, roc->chan_type,
+ roc->req_duration, GFP_KERNEL);
+ }
+
+ roc->notified = true;
+}
+
static void ieee80211_hw_roc_start(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, hw_roc_start);
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_roc_work *roc, *dep, *tmp;
mutex_lock(&local->mtx);
- if (!local->hw_roc_channel) {
- mutex_unlock(&local->mtx);
- return;
- }
+ if (list_empty(&local->roc_list))
+ goto out_unlock;
- if (local->hw_roc_skb) {
- sdata = IEEE80211_DEV_TO_SUB_IF(local->hw_roc_dev);
- ieee80211_tx_skb(sdata, local->hw_roc_skb);
- local->hw_roc_skb = NULL;
- } else {
- cfg80211_ready_on_channel(local->hw_roc_dev,
- local->hw_roc_cookie,
- local->hw_roc_channel,
- local->hw_roc_channel_type,
- local->hw_roc_duration,
- GFP_KERNEL);
- }
+ roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
+ list);
+
+ if (!roc->started)
+ goto out_unlock;
- ieee80211_recalc_idle(local);
+ roc->hw_begun = true;
+ roc->hw_start_time = local->hw_roc_start_time;
+ ieee80211_handle_roc_started(roc);
+ list_for_each_entry_safe(dep, tmp, &roc->dependents, list) {
+ ieee80211_handle_roc_started(dep);
+
+ if (dep->duration > roc->duration) {
+ u32 dur = dep->duration;
+ dep->duration = dur - roc->duration;
+ roc->duration = dur;
+ list_del(&dep->list);
+ list_add(&dep->list, &roc->list);
+ }
+ }
+ out_unlock:
mutex_unlock(&local->mtx);
}
@@ -216,52 +239,181 @@ void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
+ local->hw_roc_start_time = jiffies;
+
trace_api_ready_on_channel(local);
ieee80211_queue_work(hw, &local->hw_roc_start);
}
EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
-static void ieee80211_hw_roc_done(struct work_struct *work)
+void ieee80211_start_next_roc(struct ieee80211_local *local)
{
- struct ieee80211_local *local =
- container_of(work, struct ieee80211_local, hw_roc_done);
+ struct ieee80211_roc_work *roc;
- mutex_lock(&local->mtx);
+ lockdep_assert_held(&local->mtx);
- if (!local->hw_roc_channel) {
- mutex_unlock(&local->mtx);
+ if (list_empty(&local->roc_list)) {
+ ieee80211_run_deferred_scan(local);
return;
}
- /* was never transmitted */
- if (local->hw_roc_skb) {
- u64 cookie;
+ roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
+ list);
- cookie = local->hw_roc_cookie ^ 2;
+ if (WARN_ON_ONCE(roc->started))
+ return;
+
+ if (local->ops->remain_on_channel) {
+ int ret, duration = roc->duration;
+
+ /* XXX: duplicated, see ieee80211_start_roc_work() */
+ if (!duration)
+ duration = 10;
+
+ ret = drv_remain_on_channel(local, roc->chan,
+ roc->chan_type,
+ duration);
+
+ roc->started = true;
+
+ if (ret) {
+ wiphy_warn(local->hw.wiphy,
+ "failed to start next HW ROC (%d)\n", ret);
+ /*
+ * queue the work struct again to avoid recursion
+ * when multiple failures occur
+ */
+ ieee80211_remain_on_channel_expired(&local->hw);
+ }
+ } else {
+ /* delay it a bit */
+ ieee80211_queue_delayed_work(&local->hw, &roc->work,
+ round_jiffies_relative(HZ/2));
+ }
+}
- cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie,
- local->hw_roc_skb->data,
- local->hw_roc_skb->len, false,
- GFP_KERNEL);
+void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
+{
+ struct ieee80211_roc_work *dep, *tmp;
- kfree_skb(local->hw_roc_skb);
- local->hw_roc_skb = NULL;
- local->hw_roc_skb_for_status = NULL;
+ /* was never transmitted */
+ if (roc->frame) {
+ cfg80211_mgmt_tx_status(&roc->sdata->wdev,
+ (unsigned long)roc->frame,
+ roc->frame->data, roc->frame->len,
+ false, GFP_KERNEL);
+ kfree_skb(roc->frame);
}
- if (!local->hw_roc_for_tx)
- cfg80211_remain_on_channel_expired(local->hw_roc_dev,
- local->hw_roc_cookie,
- local->hw_roc_channel,
- local->hw_roc_channel_type,
+ if (!roc->mgmt_tx_cookie)
+ cfg80211_remain_on_channel_expired(&roc->sdata->wdev,
+ (unsigned long)roc,
+ roc->chan, roc->chan_type,
GFP_KERNEL);
- local->hw_roc_channel = NULL;
- local->hw_roc_cookie = 0;
+ list_for_each_entry_safe(dep, tmp, &roc->dependents, list)
+ ieee80211_roc_notify_destroy(dep);
+
+ kfree(roc);
+}
+
+void ieee80211_sw_roc_work(struct work_struct *work)
+{
+ struct ieee80211_roc_work *roc =
+ container_of(work, struct ieee80211_roc_work, work.work);
+ struct ieee80211_sub_if_data *sdata = roc->sdata;
+ struct ieee80211_local *local = sdata->local;
+ bool started;
+
+ mutex_lock(&local->mtx);
+
+ if (roc->abort)
+ goto finish;
+
+ if (WARN_ON(list_empty(&local->roc_list)))
+ goto out_unlock;
+
+ if (WARN_ON(roc != list_first_entry(&local->roc_list,
+ struct ieee80211_roc_work,
+ list)))
+ goto out_unlock;
- ieee80211_recalc_idle(local);
+ if (!roc->started) {
+ struct ieee80211_roc_work *dep;
+ /* start this ROC */
+
+ /* switch channel etc */
+ ieee80211_recalc_idle(local);
+
+ local->tmp_channel = roc->chan;
+ local->tmp_channel_type = roc->chan_type;
+ ieee80211_hw_config(local, 0);
+
+ /* tell userspace or send frame */
+ ieee80211_handle_roc_started(roc);
+ list_for_each_entry(dep, &roc->dependents, list)
+ ieee80211_handle_roc_started(dep);
+
+ /* if it was pure TX, just finish right away */
+ if (!roc->duration)
+ goto finish;
+
+ roc->started = true;
+ ieee80211_queue_delayed_work(&local->hw, &roc->work,
+ msecs_to_jiffies(roc->duration));
+ } else {
+ /* finish this ROC */
+ finish:
+ list_del(&roc->list);
+ started = roc->started;
+ ieee80211_roc_notify_destroy(roc);
+
+ if (started) {
+ drv_flush(local, false);
+
+ local->tmp_channel = NULL;
+ ieee80211_hw_config(local, 0);
+
+ ieee80211_offchannel_return(local, true);
+ }
+
+ ieee80211_recalc_idle(local);
+
+ if (started)
+ ieee80211_start_next_roc(local);
+ }
+
+ out_unlock:
+ mutex_unlock(&local->mtx);
+}
+
+static void ieee80211_hw_roc_done(struct work_struct *work)
+{
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, hw_roc_done);
+ struct ieee80211_roc_work *roc;
+
+ mutex_lock(&local->mtx);
+
+ if (list_empty(&local->roc_list))
+ goto out_unlock;
+
+ roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
+ list);
+
+ if (!roc->started)
+ goto out_unlock;
+
+ list_del(&roc->list);
+
+ ieee80211_roc_notify_destroy(roc);
+
+ /* if there's another roc, start it now */
+ ieee80211_start_next_roc(local);
+
+ out_unlock:
mutex_unlock(&local->mtx);
}
@@ -275,8 +427,47 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
-void ieee80211_hw_roc_setup(struct ieee80211_local *local)
+void ieee80211_roc_setup(struct ieee80211_local *local)
{
INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
+ INIT_LIST_HEAD(&local->roc_list);
+}
+
+void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_roc_work *roc, *tmp;
+ LIST_HEAD(tmp_list);
+
+ mutex_lock(&local->mtx);
+ list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+ if (roc->sdata != sdata)
+ continue;
+
+ if (roc->started && local->ops->remain_on_channel) {
+ /* can race, so ignore return value */
+ drv_cancel_remain_on_channel(local);
+ }
+
+ list_move_tail(&roc->list, &tmp_list);
+ roc->abort = true;
+ }
+
+ ieee80211_start_next_roc(local);
+ mutex_unlock(&local->mtx);
+
+ list_for_each_entry_safe(roc, tmp, &tmp_list, list) {
+ if (local->ops->remain_on_channel) {
+ list_del(&roc->list);
+ ieee80211_roc_notify_destroy(roc);
+ } else {
+ ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
+
+ /* work will clean up etc */
+ flush_delayed_work(&roc->work);
+ }
+ }
+
+ WARN_ON_ONCE(!list_empty(&tmp_list));
}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index af1c4e26e96..5c572e7a1a7 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -77,6 +77,17 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
int err = drv_suspend(local, wowlan);
if (err < 0) {
local->quiescing = false;
+ local->wowlan = false;
+ if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+ mutex_lock(&local->sta_mtx);
+ list_for_each_entry(sta,
+ &local->sta_list, list) {
+ clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
+ }
+ mutex_unlock(&local->sta_mtx);
+ }
+ ieee80211_wake_queues_by_reason(hw,
+ IEEE80211_QUEUE_STOP_REASON_SUSPEND);
return err;
} else if (err > 0) {
WARN_ON(err != 1);
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index f9e51ef8dfa..fb1d4aa65e8 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -626,8 +626,12 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
/* use fixed index if set */
- if (mp->fixed_rate_idx != -1)
- sample_idx = mp->fixed_rate_idx;
+ if (mp->fixed_rate_idx != -1) {
+ mi->max_tp_rate = mp->fixed_rate_idx;
+ mi->max_tp_rate2 = mp->fixed_rate_idx;
+ mi->max_prob_rate = mp->fixed_rate_idx;
+ sample_idx = -1;
+ }
#endif
if (sample_idx >= 0) {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 965e6ec0adb..0cb4edee6af 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -94,7 +94,7 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
return len;
}
-/*
+/**
* ieee80211_add_rx_radiotap_header - add radiotap header
*
* add a radiotap header containing all the fields which the hardware provided.
@@ -413,29 +413,6 @@ static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
/* rx handlers */
-static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
-{
- struct ieee80211_local *local = rx->local;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
- struct sk_buff *skb = rx->skb;
-
- if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
- !local->sched_scanning))
- return RX_CONTINUE;
-
- if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
- test_bit(SCAN_SW_SCANNING, &local->scanning) ||
- test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
- local->sched_scanning)
- return ieee80211_scan_rx(rx->sdata, skb);
-
- /* scanning finished during invoking of handlers */
- I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
- return RX_DROP_UNUSABLE;
-}
-
-
static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -554,11 +531,11 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
}
-static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
+static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_rx *tid_agg_rx,
int index)
{
- struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_local *local = sdata->local;
struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
struct ieee80211_rx_status *status;
@@ -578,7 +555,7 @@ no_frame:
tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
}
-static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
+static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_rx *tid_agg_rx,
u16 head_seq_num)
{
@@ -589,7 +566,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
tid_agg_rx->buf_size;
- ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
+ ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
}
}
@@ -604,7 +581,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
*/
#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
-static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
+static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_rx *tid_agg_rx)
{
int index, j;
@@ -632,12 +609,9 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
HT_RX_REORDER_BUF_TIMEOUT))
goto set_release_timer;
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- wiphy_debug(hw->wiphy,
- "release an RX reorder frame due to timeout on earlier frames\n");
-#endif
- ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
+ ht_dbg_ratelimited(sdata,
+ "release an RX reorder frame due to timeout on earlier frames\n");
+ ieee80211_release_reorder_frame(sdata, tid_agg_rx, j);
/*
* Increment the head seq# also for the skipped slots.
@@ -647,7 +621,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
skipped = 0;
}
} else while (tid_agg_rx->reorder_buf[index]) {
- ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
+ ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
tid_agg_rx->buf_size;
}
@@ -677,7 +651,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
* rcu_read_lock protection. It returns false if the frame
* can be processed immediately, true if it was consumed.
*/
-static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
+static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_rx *tid_agg_rx,
struct sk_buff *skb)
{
@@ -706,7 +680,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
/* release stored frames up to new head to stack */
- ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
+ ieee80211_release_reorder_frames(sdata, tid_agg_rx,
+ head_seq_num);
}
/* Now the new frame is always in the range of the reordering buffer */
@@ -736,7 +711,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
tid_agg_rx->reorder_buf[index] = skb;
tid_agg_rx->reorder_time[index] = jiffies;
tid_agg_rx->stored_mpdu_num++;
- ieee80211_sta_reorder_release(hw, tid_agg_rx);
+ ieee80211_sta_reorder_release(sdata, tid_agg_rx);
out:
spin_unlock(&tid_agg_rx->reorder_lock);
@@ -751,7 +726,6 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
{
struct sk_buff *skb = rx->skb;
struct ieee80211_local *local = rx->local;
- struct ieee80211_hw *hw = &local->hw;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct sta_info *sta = rx->sta;
@@ -813,7 +787,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
* sure that we cannot get to it any more before doing
* anything with it.
*/
- if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb))
+ if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb))
return;
dont_reorder:
@@ -1136,24 +1110,18 @@ static void ap_sta_ps_start(struct sta_info *sta)
set_sta_flag(sta, WLAN_STA_PS_STA);
if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
- sdata->name, sta->sta.addr, sta->sta.aid);
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
+ ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
+ sta->sta.addr, sta->sta.aid);
}
static void ap_sta_ps_end(struct sta_info *sta)
{
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
- sta->sdata->name, sta->sta.addr, sta->sta.aid);
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
+ ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
+ sta->sta.addr, sta->sta.aid);
if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
- sta->sdata->name, sta->sta.addr, sta->sta.aid);
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
+ ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
+ sta->sta.addr, sta->sta.aid);
return;
}
@@ -1383,19 +1351,8 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
sdata->fragment_next = 0;
- if (!skb_queue_empty(&entry->skb_list)) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- struct ieee80211_hdr *hdr =
- (struct ieee80211_hdr *) entry->skb_list.next->data;
- printk(KERN_DEBUG "%s: RX reassembly removed oldest "
- "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
- "addr1=%pM addr2=%pM\n",
- sdata->name, idx,
- jiffies - entry->first_frag_time, entry->seq,
- entry->last_frag, hdr->addr1, hdr->addr2);
-#endif
+ if (!skb_queue_empty(&entry->skb_list))
__skb_queue_purge(&entry->skb_list);
- }
__skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
*skb = NULL;
@@ -1753,7 +1710,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
*/
xmit_skb = skb_copy(skb, GFP_ATOMIC);
if (!xmit_skb)
- net_dbg_ratelimited("%s: failed to clone multicast frame\n",
+ net_info_ratelimited("%s: failed to clone multicast frame\n",
dev->name);
} else {
dsta = sta_info_get(sdata, skb->data);
@@ -1937,7 +1894,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
ether_addr_equal(sdata->vif.addr, hdr->addr3))
return RX_CONTINUE;
- q = ieee80211_select_queue_80211(local, skb, hdr);
+ q = ieee80211_select_queue_80211(sdata, skb, hdr);
if (ieee80211_queue_stopped(&local->hw, q)) {
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
return RX_DROP_MONITOR;
@@ -1957,7 +1914,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
fwd_skb = skb_copy(skb, GFP_ATOMIC);
if (!fwd_skb) {
- net_dbg_ratelimited("%s: failed to clone mesh frame\n",
+ net_info_ratelimited("%s: failed to clone mesh frame\n",
sdata->name);
goto out;
}
@@ -2060,8 +2017,6 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
{
- struct ieee80211_local *local = rx->local;
- struct ieee80211_hw *hw = &local->hw;
struct sk_buff *skb = rx->skb;
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
struct tid_ampdu_rx *tid_agg_rx;
@@ -2098,7 +2053,8 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
spin_lock(&tid_agg_rx->reorder_lock);
/* release stored frames up to start of BAR */
- ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num);
+ ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
+ start_seq_num);
spin_unlock(&tid_agg_rx->reorder_lock);
kfree_skb(skb);
@@ -2425,7 +2381,7 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
sig = status->signal;
- if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq, sig,
+ if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
rx->skb->data, rx->skb->len,
GFP_ATOMIC)) {
if (rx->sta)
@@ -2716,7 +2672,6 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
goto rxh_next; \
} while (0);
- CALL_RXH(ieee80211_rx_h_passive_scan)
CALL_RXH(ieee80211_rx_h_check)
ieee80211_rx_reorder_ampdu(rx);
@@ -2752,7 +2707,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
return;
spin_lock(&tid_agg_rx->reorder_lock);
- ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx);
+ ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx);
spin_unlock(&tid_agg_rx->reorder_lock);
ieee80211_rx_handlers(&rx);
@@ -2786,11 +2741,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
return 0;
if (ieee80211_is_beacon(hdr->frame_control)) {
return 1;
- }
- else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
- if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
- return 0;
- status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
+ } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
+ return 0;
} else if (!multicast &&
!ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
if (!(sdata->dev->flags & IFF_PROMISC))
@@ -2828,11 +2780,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
* and location updates. Note that mac80211
* itself never looks at these frames.
*/
- if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
- ieee80211_is_public_action(hdr, skb->len))
+ if (ieee80211_is_public_action(hdr, skb->len))
return 1;
- if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
- !ieee80211_is_beacon(hdr->frame_control))
+ if (!ieee80211_is_beacon(hdr->frame_control))
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
}
@@ -2898,7 +2848,6 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
struct ieee80211_hdr *hdr;
@@ -2916,11 +2865,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
local->dot11ReceivedFragmentCount++;
- if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
- test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
- test_bit(SCAN_SW_SCANNING, &local->scanning)))
- status->rx_flags |= IEEE80211_RX_IN_SCAN;
-
if (ieee80211_is_mgmt(fc))
err = skb_linearize(skb);
else
@@ -2935,6 +2879,10 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
ieee80211_parse_qos(&rx);
ieee80211_verify_alignment(&rx);
+ if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_beacon(hdr->frame_control)))
+ ieee80211_scan_rx(local, skb);
+
if (ieee80211_is_data(fc)) {
prev_sta = NULL;
@@ -3032,6 +2980,10 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (unlikely(local->quiescing || local->suspended))
goto drop;
+ /* We might be during a HW reconfig, prevent Rx for the same reason */
+ if (unlikely(local->in_reconfig))
+ goto drop;
+
/*
* The same happens when we're not even started,
* but that's worth a warning.
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 169da0742c8..bcaee5d1283 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -83,13 +83,14 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel,
mgmt, len, signal, GFP_ATOMIC);
-
if (!cbss)
return NULL;
cbss->free_priv = ieee80211_rx_bss_free;
bss = (void *)cbss->priv;
+ bss->device_ts = rx_status->device_timestamp;
+
if (elems->parse_error) {
if (beacon)
bss->corrupt_data |= IEEE80211_BSS_CORRUPT_BEACON;
@@ -114,8 +115,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
if (elems->tim && (!elems->parse_error ||
!(bss->valid_data & IEEE80211_BSS_VALID_DTIM))) {
- struct ieee80211_tim_ie *tim_ie =
- (struct ieee80211_tim_ie *)elems->tim;
+ struct ieee80211_tim_ie *tim_ie = elems->tim;
bss->dtim_period = tim_ie->dtim_period;
if (!elems->parse_error)
bss->valid_data |= IEEE80211_BSS_VALID_DTIM;
@@ -165,52 +165,47 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
return bss;
}
-ieee80211_rx_result
-ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_mgmt *mgmt;
+ struct ieee80211_sub_if_data *sdata1, *sdata2;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_bss *bss;
u8 *elements;
struct ieee80211_channel *channel;
size_t baselen;
int freq;
- __le16 fc;
- bool presp, beacon = false;
+ bool beacon;
struct ieee802_11_elems elems;
- if (skb->len < 2)
- return RX_DROP_UNUSABLE;
-
- mgmt = (struct ieee80211_mgmt *) skb->data;
- fc = mgmt->frame_control;
+ if (skb->len < 24 ||
+ (!ieee80211_is_probe_resp(mgmt->frame_control) &&
+ !ieee80211_is_beacon(mgmt->frame_control)))
+ return;
- if (ieee80211_is_ctl(fc))
- return RX_CONTINUE;
+ sdata1 = rcu_dereference(local->scan_sdata);
+ sdata2 = rcu_dereference(local->sched_scan_sdata);
- if (skb->len < 24)
- return RX_CONTINUE;
+ if (likely(!sdata1 && !sdata2))
+ return;
- presp = ieee80211_is_probe_resp(fc);
- if (presp) {
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
/* ignore ProbeResp to foreign address */
- if (!ether_addr_equal(mgmt->da, sdata->vif.addr))
- return RX_DROP_MONITOR;
+ if ((!sdata1 || !ether_addr_equal(mgmt->da, sdata1->vif.addr)) &&
+ (!sdata2 || !ether_addr_equal(mgmt->da, sdata2->vif.addr)))
+ return;
- presp = true;
elements = mgmt->u.probe_resp.variable;
baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+ beacon = false;
} else {
- beacon = ieee80211_is_beacon(fc);
baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable);
elements = mgmt->u.beacon.variable;
+ beacon = true;
}
- if (!presp && !beacon)
- return RX_CONTINUE;
-
if (baselen > skb->len)
- return RX_DROP_MONITOR;
+ return;
ieee802_11_parse_elems(elements, skb->len - baselen, &elems);
@@ -220,22 +215,16 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
else
freq = rx_status->freq;
- channel = ieee80211_get_channel(sdata->local->hw.wiphy, freq);
+ channel = ieee80211_get_channel(local->hw.wiphy, freq);
if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
- return RX_DROP_MONITOR;
+ return;
- bss = ieee80211_bss_info_update(sdata->local, rx_status,
+ bss = ieee80211_bss_info_update(local, rx_status,
mgmt, skb->len, &elems,
channel, beacon);
if (bss)
- ieee80211_rx_bss_put(sdata->local, bss);
-
- if (channel == sdata->local->oper_channel)
- return RX_CONTINUE;
-
- dev_kfree_skb(skb);
- return RX_QUEUED;
+ ieee80211_rx_bss_put(local, bss);
}
/* return false if no more work */
@@ -293,7 +282,13 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
return;
if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) {
- int rc = drv_hw_scan(local, local->scan_sdata, local->hw_scan_req);
+ int rc;
+
+ rc = drv_hw_scan(local,
+ rcu_dereference_protected(local->scan_sdata,
+ lockdep_is_held(&local->mtx)),
+ local->hw_scan_req);
+
if (rc == 0)
return;
}
@@ -323,7 +318,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
ieee80211_mlme_notify_scan_completed(local);
ieee80211_ibss_notify_scan_completed(local);
ieee80211_mesh_notify_scan_completed(local);
- ieee80211_queue_work(&local->hw, &local->work_work);
+ ieee80211_start_next_roc(local);
}
void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
@@ -376,7 +371,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
static bool ieee80211_can_scan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
- if (!list_empty(&local->work_list))
+ if (!list_empty(&local->roc_list))
return false;
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
@@ -394,7 +389,10 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local)
if (!local->scan_req || local->scanning)
return;
- if (!ieee80211_can_scan(local, local->scan_sdata))
+ if (!ieee80211_can_scan(local,
+ rcu_dereference_protected(
+ local->scan_sdata,
+ lockdep_is_held(&local->mtx))))
return;
ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
@@ -405,9 +403,12 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
unsigned long *next_delay)
{
int i;
- struct ieee80211_sub_if_data *sdata = local->scan_sdata;
+ struct ieee80211_sub_if_data *sdata;
enum ieee80211_band band = local->hw.conf.channel->band;
+ sdata = rcu_dereference_protected(local->scan_sdata,
+ lockdep_is_held(&local->mtx));;
+
for (i = 0; i < local->scan_req->n_ssids; i++)
ieee80211_send_probe_req(
sdata, NULL,
@@ -439,7 +440,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_can_scan(local, sdata)) {
/* wait for the work to finish/time out */
local->scan_req = req;
- local->scan_sdata = sdata;
+ rcu_assign_pointer(local->scan_sdata, sdata);
return 0;
}
@@ -473,7 +474,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
}
local->scan_req = req;
- local->scan_sdata = sdata;
+ rcu_assign_pointer(local->scan_sdata, sdata);
if (local->ops->hw_scan) {
__set_bit(SCAN_HW_SCANNING, &local->scanning);
@@ -533,7 +534,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
ieee80211_recalc_idle(local);
local->scan_req = NULL;
- local->scan_sdata = NULL;
+ rcu_assign_pointer(local->scan_sdata, NULL);
}
return rc;
@@ -720,7 +721,8 @@ void ieee80211_scan_work(struct work_struct *work)
mutex_lock(&local->mtx);
- sdata = local->scan_sdata;
+ sdata = rcu_dereference_protected(local->scan_sdata,
+ lockdep_is_held(&local->mtx));
/* When scanning on-channel, the first-callback means completed. */
if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) {
@@ -741,7 +743,7 @@ void ieee80211_scan_work(struct work_struct *work)
int rc;
local->scan_req = NULL;
- local->scan_sdata = NULL;
+ rcu_assign_pointer(local->scan_sdata, NULL);
rc = __ieee80211_start_scan(sdata, req);
if (rc) {
@@ -893,7 +895,9 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
if (local->ops->cancel_hw_scan)
- drv_cancel_hw_scan(local, local->scan_sdata);
+ drv_cancel_hw_scan(local,
+ rcu_dereference_protected(local->scan_sdata,
+ lockdep_is_held(&local->mtx)));
goto out;
}
@@ -915,9 +919,9 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
int ret, i;
- mutex_lock(&sdata->local->mtx);
+ mutex_lock(&local->mtx);
- if (local->sched_scanning) {
+ if (rcu_access_pointer(local->sched_scan_sdata)) {
ret = -EBUSY;
goto out;
}
@@ -928,6 +932,9 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
}
for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+ if (!local->hw.wiphy->bands[i])
+ continue;
+
local->sched_scan_ies.ie[i] = kzalloc(2 +
IEEE80211_MAX_SSID_LEN +
local->scan_ies_len +
@@ -948,7 +955,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
ret = drv_sched_scan_start(local, sdata, req,
&local->sched_scan_ies);
if (ret == 0) {
- local->sched_scanning = true;
+ rcu_assign_pointer(local->sched_scan_sdata, sdata);
goto out;
}
@@ -956,7 +963,7 @@ out_free:
while (i > 0)
kfree(local->sched_scan_ies.ie[--i]);
out:
- mutex_unlock(&sdata->local->mtx);
+ mutex_unlock(&local->mtx);
return ret;
}
@@ -965,22 +972,22 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
struct ieee80211_local *local = sdata->local;
int ret = 0, i;
- mutex_lock(&sdata->local->mtx);
+ mutex_lock(&local->mtx);
if (!local->ops->sched_scan_stop) {
ret = -ENOTSUPP;
goto out;
}
- if (local->sched_scanning) {
+ if (rcu_access_pointer(local->sched_scan_sdata)) {
for (i = 0; i < IEEE80211_NUM_BANDS; i++)
kfree(local->sched_scan_ies.ie[i]);
drv_sched_scan_stop(local, sdata);
- local->sched_scanning = false;
+ rcu_assign_pointer(local->sched_scan_sdata, NULL);
}
out:
- mutex_unlock(&sdata->local->mtx);
+ mutex_unlock(&local->mtx);
return ret;
}
@@ -1004,7 +1011,7 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
mutex_lock(&local->mtx);
- if (!local->sched_scanning) {
+ if (!rcu_access_pointer(local->sched_scan_sdata)) {
mutex_unlock(&local->mtx);
return;
}
@@ -1012,7 +1019,7 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
for (i = 0; i < IEEE80211_NUM_BANDS; i++)
kfree(local->sched_scan_ies.ie[i]);
- local->sched_scanning = false;
+ rcu_assign_pointer(local->sched_scan_sdata, NULL);
mutex_unlock(&local->mtx);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index de455f8bbb9..06fa75ceb02 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -169,9 +169,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
if (sta->rate_ctrl)
rate_control_free_sta(sta);
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Destroyed STA %pM\n", sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+ sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
kfree(sta);
}
@@ -278,9 +276,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Allocated STA %pM\n", sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+ sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
#ifdef CONFIG_MAC80211_MESH
sta->plink_state = NL80211_PLINK_LISTEN;
@@ -333,9 +329,9 @@ static int sta_info_insert_drv_state(struct ieee80211_local *local,
}
if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
- printk(KERN_DEBUG
- "%s: failed to move IBSS STA %pM to state %d (%d) - keeping it anyway.\n",
- sdata->name, sta->sta.addr, state + 1, err);
+ sdata_info(sdata,
+ "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n",
+ sta->sta.addr, state + 1, err);
err = 0;
}
@@ -390,9 +386,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
sinfo.generation = local->sta_generation;
cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+ sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
/* move reference to rcu-protected */
rcu_read_lock();
@@ -618,10 +612,8 @@ static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
break;
local->total_ps_buffered--;
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "Buffered frame expired (STA %pM)\n",
+ ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n",
sta->sta.addr);
-#endif
dev_kfree_skb(skb);
}
@@ -747,9 +739,8 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
mesh_accept_plinks_update(sdata);
#endif
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Removed STA %pM\n", sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+ sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
+
cancel_work_sync(&sta->drv_unblock_wk);
cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL);
@@ -889,10 +880,8 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
continue;
if (time_after(jiffies, sta->last_rx + exp_time)) {
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "%s: expiring inactive STA %pM\n",
- sdata->name, sta->sta.addr);
-#endif
+ ibss_dbg(sdata, "expiring inactive STA %pM\n",
+ sta->sta.addr);
WARN_ON(__sta_info_destroy(sta));
}
}
@@ -990,11 +979,9 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
sta_info_recalc_tim(sta);
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
- "since STA not sleeping anymore\n", sdata->name,
+ ps_dbg(sdata,
+ "STA %pM aid %d sending %d filtered/%d PS frames since STA not sleeping anymore\n",
sta->sta.addr, sta->sta.aid, filtered, buffered);
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
@@ -1384,10 +1371,8 @@ int sta_info_move_state(struct sta_info *sta,
return -EINVAL;
}
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: moving STA %pM to state %d\n",
- sta->sdata->name, sta->sta.addr, new_state);
-#endif
+ sta_dbg(sta->sdata, "moving STA %pM to state %d\n",
+ sta->sta.addr, new_state);
/*
* notify the driver before the actual changes so it can
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 28cfa981cfb..8cd72914cda 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -155,13 +155,10 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
return;
}
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (net_ratelimit())
- wiphy_debug(local->hw.wiphy,
- "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
- skb_queue_len(&sta->tx_filtered[ac]),
- !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
-#endif
+ ps_dbg_ratelimited(sta->sdata,
+ "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
+ skb_queue_len(&sta->tx_filtered[ac]),
+ !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
dev_kfree_skb(skb);
}
@@ -520,36 +517,21 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
u64 cookie = (unsigned long)skb;
+ acked = info->flags & IEEE80211_TX_STAT_ACK;
- if (ieee80211_is_nullfunc(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control)) {
- acked = info->flags & IEEE80211_TX_STAT_ACK;
+ /*
+ * TODO: When we have non-netdev frame TX,
+ * we cannot use skb->dev->ieee80211_ptr
+ */
+ if (ieee80211_is_nullfunc(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control))
cfg80211_probe_status(skb->dev, hdr->addr1,
cookie, acked, GFP_ATOMIC);
- } else {
- struct ieee80211_work *wk;
-
- rcu_read_lock();
- list_for_each_entry_rcu(wk, &local->work_list, list) {
- if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
- continue;
- if (wk->offchan_tx.frame != skb)
- continue;
- wk->offchan_tx.status = true;
- break;
- }
- rcu_read_unlock();
- if (local->hw_roc_skb_for_status == skb) {
- cookie = local->hw_roc_cookie ^ 2;
- local->hw_roc_skb_for_status = NULL;
- }
-
+ else
cfg80211_mgmt_tx_status(
- skb->dev, cookie, skb->data, skb->len,
- !!(info->flags & IEEE80211_TX_STAT_ACK),
- GFP_ATOMIC);
- }
+ skb->dev->ieee80211_ptr, cookie, skb->data,
+ skb->len, acked, GFP_ATOMIC);
}
if (unlikely(info->ack_frame_id)) {
@@ -589,7 +571,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
/* send frame to monitor interfaces now */
rtap_len = ieee80211_tx_radiotap_len(info);
if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
- printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
+ pr_err("ieee80211_tx_status: headroom too small\n");
dev_kfree_skb(skb);
return;
}
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 51077a956a8..57e14d59e12 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -260,17 +260,6 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
keyid = pos[3];
iv32 = get_unaligned_le32(pos + 4);
pos += 8;
-#ifdef CONFIG_MAC80211_TKIP_DEBUG
- {
- int i;
- printk(KERN_DEBUG "TKIP decrypt: data(len=%zd)", payload_len);
- for (i = 0; i < payload_len; i++)
- printk(" %02x", payload[i]);
- printk("\n");
- printk(KERN_DEBUG "TKIP decrypt: iv16=%04x iv32=%08x\n",
- iv16, iv32);
- }
-#endif
if (!(keyid & (1 << 5)))
return TKIP_DECRYPT_NO_EXT_IV;
@@ -281,16 +270,8 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT &&
(iv32 < key->u.tkip.rx[queue].iv32 ||
(iv32 == key->u.tkip.rx[queue].iv32 &&
- iv16 <= key->u.tkip.rx[queue].iv16))) {
-#ifdef CONFIG_MAC80211_TKIP_DEBUG
- printk(KERN_DEBUG "TKIP replay detected for RX frame from "
- "%pM (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n",
- ta,
- iv32, iv16, key->u.tkip.rx[queue].iv32,
- key->u.tkip.rx[queue].iv16);
-#endif
+ iv16 <= key->u.tkip.rx[queue].iv16)))
return TKIP_DECRYPT_REPLAY;
- }
if (only_iv) {
res = TKIP_DECRYPT_OK;
@@ -302,22 +283,6 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
key->u.tkip.rx[queue].iv32 != iv32) {
/* IV16 wrapped around - perform TKIP phase 1 */
tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32);
-#ifdef CONFIG_MAC80211_TKIP_DEBUG
- {
- int i;
- u8 key_offset = NL80211_TKIP_DATA_OFFSET_ENCR_KEY;
- printk(KERN_DEBUG "TKIP decrypt: Phase1 TA=%pM"
- " TK=", ta);
- for (i = 0; i < 16; i++)
- printk("%02x ",
- key->conf.key[key_offset + i]);
- printk("\n");
- printk(KERN_DEBUG "TKIP decrypt: P1K=");
- for (i = 0; i < 5; i++)
- printk("%04x ", key->u.tkip.rx[queue].p1k[i]);
- printk("\n");
- }
-#endif
}
if (key->local->ops->update_tkip_key &&
key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
@@ -333,15 +298,6 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
}
tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key);
-#ifdef CONFIG_MAC80211_TKIP_DEBUG
- {
- int i;
- printk(KERN_DEBUG "TKIP decrypt: Phase2 rc4key=");
- for (i = 0; i < 16; i++)
- printk("%02x ", rc4key[i]);
- printk("\n");
- }
-#endif
res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12);
done:
diff --git a/net/mac80211/trace.c b/net/mac80211/trace.c
new file mode 100644
index 00000000000..386e45d8a95
--- /dev/null
+++ b/net/mac80211/trace.c
@@ -0,0 +1,75 @@
+/* bug in tracepoint.h, it should include this */
+#include <linux/module.h>
+
+/* sparse isn't too happy with all macros... */
+#ifndef __CHECKER__
+#include <net/cfg80211.h>
+#include "driver-ops.h"
+#include "debug.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#ifdef CONFIG_MAC80211_MESSAGE_TRACING
+void __sdata_info(const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ pr_info("%pV", &vaf);
+ trace_mac80211_info(&vaf);
+ va_end(args);
+}
+
+void __sdata_dbg(bool print, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ if (print)
+ pr_debug("%pV", &vaf);
+ trace_mac80211_dbg(&vaf);
+ va_end(args);
+}
+
+void __sdata_err(const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ pr_err("%pV", &vaf);
+ trace_mac80211_err(&vaf);
+ va_end(args);
+}
+
+void __wiphy_dbg(struct wiphy *wiphy, bool print, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ if (print)
+ wiphy_dbg(wiphy, "%pV", &vaf);
+ trace_mac80211_dbg(&vaf);
+ va_end(args);
+}
+#endif
+#endif
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/trace.h
index 6de00b2c268..c6d33b55b2d 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/trace.h
@@ -306,7 +306,8 @@ TRACE_EVENT(drv_bss_info_changed,
__field(u8, dtimper)
__field(u16, bcnint)
__field(u16, assoc_cap)
- __field(u64, timestamp)
+ __field(u64, sync_tsf)
+ __field(u32, sync_device_ts)
__field(u32, basic_rates)
__field(u32, changed)
__field(bool, enable_beacon)
@@ -325,7 +326,8 @@ TRACE_EVENT(drv_bss_info_changed,
__entry->dtimper = info->dtim_period;
__entry->bcnint = info->beacon_int;
__entry->assoc_cap = info->assoc_capability;
- __entry->timestamp = info->last_tsf;
+ __entry->sync_tsf = info->sync_tsf;
+ __entry->sync_device_ts = info->sync_device_ts;
__entry->basic_rates = info->basic_rates;
__entry->enable_beacon = info->enable_beacon;
__entry->ht_operation_mode = info->ht_operation_mode;
@@ -1218,6 +1220,39 @@ DEFINE_EVENT(release_evt, drv_allow_buffered_frames,
TP_ARGS(local, sta, tids, num_frames, reason, more_data)
);
+TRACE_EVENT(drv_get_rssi,
+ TP_PROTO(struct ieee80211_local *local, struct ieee80211_sta *sta,
+ s8 rssi, int ret),
+
+ TP_ARGS(local, sta, rssi, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ STA_ENTRY
+ __field(s8, rssi)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ STA_ASSIGN;
+ __entry->rssi = rssi;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT STA_PR_FMT " rssi:%d ret:%d",
+ LOCAL_PR_ARG, STA_PR_ARG, __entry->rssi, __entry->ret
+ )
+);
+
+DEFINE_EVENT(local_sdata_evt, drv_mgd_prepare_tx,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+
+ TP_ARGS(local, sdata)
+);
+
/*
* Tracing for API calls that drivers call.
*/
@@ -1606,10 +1641,49 @@ TRACE_EVENT(stop_queue,
LOCAL_PR_ARG, __entry->queue, __entry->reason
)
);
+
+#ifdef CONFIG_MAC80211_MESSAGE_TRACING
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mac80211_msg
+
+#define MAX_MSG_LEN 100
+
+DECLARE_EVENT_CLASS(mac80211_msg_event,
+ TP_PROTO(struct va_format *vaf),
+
+ TP_ARGS(vaf),
+
+ TP_STRUCT__entry(
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+
+ TP_fast_assign(
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >= MAX_MSG_LEN);
+ ),
+
+ TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(mac80211_msg_event, mac80211_info,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+DEFINE_EVENT(mac80211_msg_event, mac80211_dbg,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+DEFINE_EVENT(mac80211_msg_event, mac80211_err,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+#endif
+
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE driver-trace
+#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e453212fa17..acf712ffb5e 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -140,6 +140,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
if (r->flags & IEEE80211_RATE_MANDATORY_A)
mrate = r->bitrate;
break;
+ case IEEE80211_BAND_60GHZ:
+ /* TODO, for now fall through */
case IEEE80211_NUM_BANDS:
WARN_ON(1);
break;
@@ -175,12 +177,6 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
return cpu_to_le16(dur);
}
-static inline int is_ieee80211_device(struct ieee80211_local *local,
- struct net_device *dev)
-{
- return local == wdev_priv(dev->ieee80211_ptr);
-}
-
/* tx handlers */
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
@@ -297,10 +293,10 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
if (unlikely(!assoc &&
ieee80211_is_data(hdr->frame_control))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: dropped data frame to not "
- "associated station %pM\n",
- tx->sdata->name, hdr->addr1);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+ sdata_info(tx->sdata,
+ "dropped data frame to not associated station %pM\n",
+ hdr->addr1);
+#endif
I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
return TX_DROP;
}
@@ -367,10 +363,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
rcu_read_unlock();
local->total_ps_buffered = total;
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- wiphy_debug(local->hw.wiphy, "PS buffers full - purged %d frames\n",
- purged);
-#endif
+ ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged);
}
static ieee80211_tx_result
@@ -412,10 +405,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
purge_old_ps_buffers(tx->local);
if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) {
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- net_dbg_ratelimited("%s: BC TX buffer full - dropping the oldest frame\n",
- tx->sdata->name);
-#endif
+ ps_dbg(tx->sdata,
+ "BC TX buffer full - dropping the oldest frame\n");
dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
} else
tx->local->total_ps_buffered++;
@@ -466,18 +457,15 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
return TX_CONTINUE;
}
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "STA %pM aid %d: PS buffer for AC %d\n",
+ ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
sta->sta.addr, sta->sta.aid, ac);
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
purge_old_ps_buffers(tx->local);
if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- net_dbg_ratelimited("%s: STA %pM TX buffer for AC %d full - dropping oldest frame\n",
- tx->sdata->name, sta->sta.addr, ac);
-#endif
+ ps_dbg(tx->sdata,
+ "STA %pM TX buffer for AC %d full - dropping oldest frame\n",
+ sta->sta.addr, ac);
dev_kfree_skb(old);
} else
tx->local->total_ps_buffered++;
@@ -499,14 +487,11 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
sta_info_recalc_tim(sta);
return TX_QUEUED;
+ } else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
+ ps_dbg(tx->sdata,
+ "STA %pM in PS mode, but polling/in SP -> send frame\n",
+ sta->sta.addr);
}
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
- printk(KERN_DEBUG
- "%s: STA %pM in PS mode, but polling/in SP -> send frame\n",
- tx->sdata->name, sta->sta.addr);
- }
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
return TX_CONTINUE;
}
@@ -538,7 +523,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
{
- struct ieee80211_key *key = NULL;
+ struct ieee80211_key *key;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
@@ -557,16 +542,23 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
else if (!is_multicast_ether_addr(hdr->addr1) &&
(key = rcu_dereference(tx->sdata->default_unicast_key)))
tx->key = key;
- else if (tx->sdata->drop_unencrypted &&
- (tx->skb->protocol != tx->sdata->control_port_protocol) &&
- !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
- (!ieee80211_is_robust_mgmt_frame(hdr) ||
- (ieee80211_is_action(hdr->frame_control) &&
- tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))) {
+ else if (info->flags & IEEE80211_TX_CTL_INJECTED)
+ tx->key = NULL;
+ else if (!tx->sdata->drop_unencrypted)
+ tx->key = NULL;
+ else if (tx->skb->protocol == tx->sdata->control_port_protocol)
+ tx->key = NULL;
+ else if (ieee80211_is_robust_mgmt_frame(hdr) &&
+ !(ieee80211_is_action(hdr->frame_control) &&
+ tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))
+ tx->key = NULL;
+ else if (ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_is_robust_mgmt_frame(hdr))
+ tx->key = NULL;
+ else {
I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
return TX_DROP;
- } else
- tx->key = NULL;
+ }
if (tx->key) {
bool skip_hw = false;
@@ -974,8 +966,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
info->control.rates[1].idx = -1;
info->control.rates[2].idx = -1;
info->control.rates[3].idx = -1;
- info->control.rates[4].idx = -1;
- BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5);
+ BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 4);
info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
} else {
hdr->frame_control &= ~morefrags;
@@ -1310,11 +1301,8 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
break;
}
- if (local->ops->tx_frags)
- drv_tx_frags(local, vif, pubsta, skbs);
- else
- result = ieee80211_tx_frags(local, vif, pubsta, skbs,
- txpending);
+ result = ieee80211_tx_frags(local, vif, pubsta, skbs,
+ txpending);
ieee80211_tpt_led_trig_tx(local, fc, led_len);
ieee80211_led_tx(local, 1);
@@ -1836,6 +1824,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
/* RA TA mDA mSA AE:DA SA */
mesh_da = mppath->mpp;
is_mesh_mcast = 0;
+ } else if (mpath) {
+ mesh_da = mpath->dst;
+ is_mesh_mcast = 0;
} else {
/* DA TA mSA AE:SA */
mesh_da = bcast;
@@ -1965,7 +1956,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
(cpu_to_be16(ethertype) != sdata->control_port_protocol ||
!ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- net_dbg_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
+ net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
dev->name, hdr.addr1);
#endif
@@ -2437,9 +2428,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
*pos++ = WLAN_EID_SSID;
*pos++ = 0x0;
- if (ieee80211_add_srates_ie(&sdata->vif, skb, true) ||
+ if (ieee80211_add_srates_ie(sdata, skb, true) ||
mesh_add_ds_params_ie(skb, sdata) ||
- ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) ||
+ ieee80211_add_ext_srates_ie(sdata, skb, true) ||
mesh_add_rsn_ie(skb, sdata) ||
mesh_add_ht_cap_ie(skb, sdata) ||
mesh_add_ht_oper_ie(skb, sdata) ||
@@ -2733,7 +2724,7 @@ EXPORT_SYMBOL(ieee80211_get_buffered_bc);
void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int tid)
{
- int ac = ieee802_1d_to_ac[tid];
+ int ac = ieee802_1d_to_ac[tid & 7];
skb_set_mac_header(skb, 0);
skb_set_network_header(skb, 0);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 8dd4712620f..39b82fee490 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -268,6 +268,10 @@ EXPORT_SYMBOL(ieee80211_ctstoself_duration);
void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
{
struct ieee80211_sub_if_data *sdata;
+ int n_acs = IEEE80211_NUM_ACS;
+
+ if (local->hw.queues < IEEE80211_NUM_ACS)
+ n_acs = 1;
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
int ac;
@@ -279,7 +283,7 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
local->queue_stop_reasons[sdata->vif.cab_queue] != 0)
continue;
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ for (ac = 0; ac < n_acs; ac++) {
int ac_queue = sdata->vif.hw_queue[ac];
if (ac_queue == queue ||
@@ -341,6 +345,7 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
+ int n_acs = IEEE80211_NUM_ACS;
trace_stop_queue(local, queue, reason);
@@ -352,11 +357,14 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
__set_bit(reason, &local->queue_stop_reasons[queue]);
+ if (local->hw.queues < IEEE80211_NUM_ACS)
+ n_acs = 1;
+
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
int ac;
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ for (ac = 0; ac < n_acs; ac++) {
if (sdata->vif.hw_queue[ac] == queue ||
sdata->vif.cab_queue == queue)
netif_stop_subqueue(sdata->dev, ac);
@@ -521,6 +529,11 @@ void ieee80211_iterate_active_interfaces(
&sdata->vif);
}
+ sdata = rcu_dereference_protected(local->monitor_sdata,
+ lockdep_is_held(&local->iflist_mtx));
+ if (sdata)
+ iterator(data, sdata->vif.addr, &sdata->vif);
+
mutex_unlock(&local->iflist_mtx);
}
EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
@@ -549,6 +562,10 @@ void ieee80211_iterate_active_interfaces_atomic(
&sdata->vif);
}
+ sdata = rcu_dereference(local->monitor_sdata);
+ if (sdata)
+ iterator(data, sdata->vif.addr, &sdata->vif);
+
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
@@ -804,7 +821,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_queue_params qparam;
int ac;
- bool use_11b;
+ bool use_11b, enable_qos;
int aCWmin, aCWmax;
if (!local->ops->conf_tx)
@@ -818,6 +835,13 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
!(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
+ /*
+ * By default disable QoS in STA mode for old access points, which do
+ * not support 802.11e. New APs will provide proper queue parameters,
+ * that we will configure later.
+ */
+ enable_qos = (sdata->vif.type != NL80211_IFTYPE_STATION);
+
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
/* Set defaults according to 802.11-2007 Table 7-37 */
aCWmax = 1023;
@@ -826,38 +850,47 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
else
aCWmin = 15;
- switch (ac) {
- case IEEE80211_AC_BK:
- qparam.cw_max = aCWmax;
- qparam.cw_min = aCWmin;
- qparam.txop = 0;
- qparam.aifs = 7;
- break;
- default: /* never happens but let's not leave undefined */
- case IEEE80211_AC_BE:
+ if (enable_qos) {
+ switch (ac) {
+ case IEEE80211_AC_BK:
+ qparam.cw_max = aCWmax;
+ qparam.cw_min = aCWmin;
+ qparam.txop = 0;
+ qparam.aifs = 7;
+ break;
+ /* never happens but let's not leave undefined */
+ default:
+ case IEEE80211_AC_BE:
+ qparam.cw_max = aCWmax;
+ qparam.cw_min = aCWmin;
+ qparam.txop = 0;
+ qparam.aifs = 3;
+ break;
+ case IEEE80211_AC_VI:
+ qparam.cw_max = aCWmin;
+ qparam.cw_min = (aCWmin + 1) / 2 - 1;
+ if (use_11b)
+ qparam.txop = 6016/32;
+ else
+ qparam.txop = 3008/32;
+ qparam.aifs = 2;
+ break;
+ case IEEE80211_AC_VO:
+ qparam.cw_max = (aCWmin + 1) / 2 - 1;
+ qparam.cw_min = (aCWmin + 1) / 4 - 1;
+ if (use_11b)
+ qparam.txop = 3264/32;
+ else
+ qparam.txop = 1504/32;
+ qparam.aifs = 2;
+ break;
+ }
+ } else {
+ /* Confiure old 802.11b/g medium access rules. */
qparam.cw_max = aCWmax;
qparam.cw_min = aCWmin;
qparam.txop = 0;
- qparam.aifs = 3;
- break;
- case IEEE80211_AC_VI:
- qparam.cw_max = aCWmin;
- qparam.cw_min = (aCWmin + 1) / 2 - 1;
- if (use_11b)
- qparam.txop = 6016/32;
- else
- qparam.txop = 3008/32;
- qparam.aifs = 2;
- break;
- case IEEE80211_AC_VO:
- qparam.cw_max = (aCWmin + 1) / 2 - 1;
- qparam.cw_min = (aCWmin + 1) / 4 - 1;
- if (use_11b)
- qparam.txop = 3264/32;
- else
- qparam.txop = 1504/32;
qparam.aifs = 2;
- break;
}
qparam.uapsd = false;
@@ -866,12 +899,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
drv_conf_tx(local, sdata, ac, &qparam);
}
- /* after reinitialize QoS TX queues setting to default,
- * disable QoS at all */
-
if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
- sdata->vif.bss_conf.qos =
- sdata->vif.type != NL80211_IFTYPE_STATION;
+ sdata->vif.bss_conf.qos = enable_qos;
if (bss_notify)
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_QOS);
@@ -979,6 +1008,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
int ext_rates_len;
sband = local->hw.wiphy->bands[band];
+ if (WARN_ON_ONCE(!sband))
+ return 0;
pos = buffer;
@@ -1060,6 +1091,10 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
pos += noffset - offset;
}
+ if (sband->vht_cap.vht_supported)
+ pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
+ sband->vht_cap.cap);
+
return pos - buffer;
}
@@ -1267,14 +1302,19 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* add STAs back */
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
- if (sta->uploaded) {
- enum ieee80211_sta_state state;
+ enum ieee80211_sta_state state;
- for (state = IEEE80211_STA_NOTEXIST;
- state < sta->sta_state; state++)
- WARN_ON(drv_sta_state(local, sta->sdata, sta,
- state, state + 1));
- }
+ if (!sta->uploaded)
+ continue;
+
+ /* AP-mode stations will be added later */
+ if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
+ continue;
+
+ for (state = IEEE80211_STA_NOTEXIST;
+ state < sta->sta_state; state++)
+ WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
+ state + 1));
}
mutex_unlock(&local->sta_mtx);
@@ -1371,12 +1411,33 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
}
+ /* APs are now beaconing, add back stations */
+ mutex_lock(&local->sta_mtx);
+ list_for_each_entry(sta, &local->sta_list, list) {
+ enum ieee80211_sta_state state;
+
+ if (!sta->uploaded)
+ continue;
+
+ if (sta->sdata->vif.type != NL80211_IFTYPE_AP)
+ continue;
+
+ for (state = IEEE80211_STA_NOTEXIST;
+ state < sta->sta_state; state++)
+ WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
+ state + 1));
+ }
+ mutex_unlock(&local->sta_mtx);
+
/* add back keys */
list_for_each_entry(sdata, &local->interfaces, list)
if (ieee80211_sdata_running(sdata))
ieee80211_enable_keys(sdata);
wake_up:
+ local->in_reconfig = false;
+ barrier();
+
/*
* Clear the WLAN_STA_BLOCK_BA flag so new aggregation
* sessions can be established after a resume.
@@ -1661,6 +1722,27 @@ u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
return pos;
}
+u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
+ u32 cap)
+{
+ __le32 tmp;
+
+ *pos++ = WLAN_EID_VHT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_vht_capabilities);
+ memset(pos, 0, sizeof(struct ieee80211_vht_capabilities));
+
+ /* capability flags */
+ tmp = cpu_to_le32(cap);
+ memcpy(pos, &tmp, sizeof(u32));
+ pos += sizeof(u32);
+
+ /* VHT MCS set */
+ memcpy(pos, &vht_cap->vht_mcs, sizeof(vht_cap->vht_mcs));
+ pos += sizeof(vht_cap->vht_mcs);
+
+ return pos;
+}
+
u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
struct ieee80211_channel *channel,
enum nl80211_channel_type channel_type,
@@ -1726,15 +1808,14 @@ ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
return channel_type;
}
-int ieee80211_add_srates_ie(struct ieee80211_vif *vif,
+int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, bool need_basic)
{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
int rate;
u8 i, rates, *pos;
- u32 basic_rates = vif->bss_conf.basic_rates;
+ u32 basic_rates = sdata->vif.bss_conf.basic_rates;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
rates = sband->n_bitrates;
@@ -1758,15 +1839,14 @@ int ieee80211_add_srates_ie(struct ieee80211_vif *vif,
return 0;
}
-int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif,
+int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, bool need_basic)
{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
int rate;
u8 i, exrates, *pos;
- u32 basic_rates = vif->bss_conf.basic_rates;
+ u32 basic_rates = sdata->vif.bss_conf.basic_rates;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
exrates = sband->n_bitrates;
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index c3d643a6536..cea06e9f26f 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -52,11 +52,11 @@ static int wme_downgrade_ac(struct sk_buff *skb)
}
}
-static u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
+static u16 ieee80211_downgrade_queue(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
/* in case we are a client verify acm is not set for this ac */
- while (unlikely(local->wmm_acm & BIT(skb->priority))) {
+ while (unlikely(sdata->wmm_acm & BIT(skb->priority))) {
if (wme_downgrade_ac(skb)) {
/*
* This should not really happen. The AP has marked all
@@ -73,10 +73,11 @@ static u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
}
/* Indicate which queue to use for this fully formed 802.11 frame */
-u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
+u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_hdr *hdr)
{
+ struct ieee80211_local *local = sdata->local;
u8 *p;
if (local->hw.queues < IEEE80211_NUM_ACS)
@@ -94,7 +95,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
p = ieee80211_get_qos_ctl(hdr);
skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
- return ieee80211_downgrade_queue(local, skb);
+ return ieee80211_downgrade_queue(sdata, skb);
}
/* Indicate which queue to use. */
@@ -156,7 +157,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
* data frame has */
skb->priority = cfg80211_classify8021d(skb);
- return ieee80211_downgrade_queue(local, skb);
+ return ieee80211_downgrade_queue(sdata, skb);
}
void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index ca80818b7b6..7fea4bb8acb 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -15,7 +15,7 @@
extern const int ieee802_1d_to_ac[8];
-u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
+u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_hdr *hdr);
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
deleted file mode 100644
index b2650a9d45f..00000000000
--- a/net/mac80211/work.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * mac80211 work implementation
- *
- * Copyright 2003-2008, Jouni Malinen <j@w1.fi>
- * Copyright 2004, Instant802 Networks, Inc.
- * Copyright 2005, Devicescape Software, Inc.
- * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
- * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
- * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/if_ether.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/crc32.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-#include <asm/unaligned.h>
-
-#include "ieee80211_i.h"
-#include "rate.h"
-#include "driver-ops.h"
-
-enum work_action {
- WORK_ACT_NONE,
- WORK_ACT_TIMEOUT,
-};
-
-
-/* utils */
-static inline void ASSERT_WORK_MTX(struct ieee80211_local *local)
-{
- lockdep_assert_held(&local->mtx);
-}
-
-/*
- * We can have multiple work items (and connection probing)
- * scheduling this timer, but we need to take care to only
- * reschedule it when it should fire _earlier_ than it was
- * asked for before, or if it's not pending right now. This
- * function ensures that. Note that it then is required to
- * run this function for all timeouts after the first one
- * has happened -- the work that runs from this timer will
- * do that.
- */
-static void run_again(struct ieee80211_local *local,
- unsigned long timeout)
-{
- ASSERT_WORK_MTX(local);
-
- if (!timer_pending(&local->work_timer) ||
- time_before(timeout, local->work_timer.expires))
- mod_timer(&local->work_timer, timeout);
-}
-
-void free_work(struct ieee80211_work *wk)
-{
- kfree_rcu(wk, rcu_head);
-}
-
-static enum work_action __must_check
-ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
-{
- /*
- * First time we run, do nothing -- the generic code will
- * have switched to the right channel etc.
- */
- if (!wk->started) {
- wk->timeout = jiffies + msecs_to_jiffies(wk->remain.duration);
-
- cfg80211_ready_on_channel(wk->sdata->dev, (unsigned long) wk,
- wk->chan, wk->chan_type,
- wk->remain.duration, GFP_KERNEL);
-
- return WORK_ACT_NONE;
- }
-
- return WORK_ACT_TIMEOUT;
-}
-
-static enum work_action __must_check
-ieee80211_offchannel_tx(struct ieee80211_work *wk)
-{
- if (!wk->started) {
- wk->timeout = jiffies + msecs_to_jiffies(wk->offchan_tx.wait);
-
- /*
- * After this, offchan_tx.frame remains but now is no
- * longer a valid pointer -- we still need it as the
- * cookie for canceling this work/status matching.
- */
- ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame);
-
- return WORK_ACT_NONE;
- }
-
- return WORK_ACT_TIMEOUT;
-}
-
-static void ieee80211_work_timer(unsigned long data)
-{
- struct ieee80211_local *local = (void *) data;
-
- if (local->quiescing)
- return;
-
- ieee80211_queue_work(&local->hw, &local->work_work);
-}
-
-static void ieee80211_work_work(struct work_struct *work)
-{
- struct ieee80211_local *local =
- container_of(work, struct ieee80211_local, work_work);
- struct ieee80211_work *wk, *tmp;
- LIST_HEAD(free_work);
- enum work_action rma;
- bool remain_off_channel = false;
-
- /*
- * ieee80211_queue_work() should have picked up most cases,
- * here we'll pick the rest.
- */
- if (WARN(local->suspended, "work scheduled while going to suspend\n"))
- return;
-
- mutex_lock(&local->mtx);
-
- if (local->scanning) {
- mutex_unlock(&local->mtx);
- return;
- }
-
- ieee80211_recalc_idle(local);
-
- list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
- bool started = wk->started;
-
- /* mark work as started if it's on the current off-channel */
- if (!started && local->tmp_channel &&
- wk->chan == local->tmp_channel &&
- wk->chan_type == local->tmp_channel_type) {
- started = true;
- wk->timeout = jiffies;
- }
-
- if (!started && !local->tmp_channel) {
- ieee80211_offchannel_stop_vifs(local, true);
-
- local->tmp_channel = wk->chan;
- local->tmp_channel_type = wk->chan_type;
-
- ieee80211_hw_config(local, 0);
-
- started = true;
- wk->timeout = jiffies;
- }
-
- /* don't try to work with items that aren't started */
- if (!started)
- continue;
-
- if (time_is_after_jiffies(wk->timeout)) {
- /*
- * This work item isn't supposed to be worked on
- * right now, but take care to adjust the timer
- * properly.
- */
- run_again(local, wk->timeout);
- continue;
- }
-
- switch (wk->type) {
- default:
- WARN_ON(1);
- /* nothing */
- rma = WORK_ACT_NONE;
- break;
- case IEEE80211_WORK_ABORT:
- rma = WORK_ACT_TIMEOUT;
- break;
- case IEEE80211_WORK_REMAIN_ON_CHANNEL:
- rma = ieee80211_remain_on_channel_timeout(wk);
- break;
- case IEEE80211_WORK_OFFCHANNEL_TX:
- rma = ieee80211_offchannel_tx(wk);
- break;
- }
-
- wk->started = started;
-
- switch (rma) {
- case WORK_ACT_NONE:
- /* might have changed the timeout */
- run_again(local, wk->timeout);
- break;
- case WORK_ACT_TIMEOUT:
- list_del_rcu(&wk->list);
- synchronize_rcu();
- list_add(&wk->list, &free_work);
- break;
- default:
- WARN(1, "unexpected: %d", rma);
- }
- }
-
- list_for_each_entry(wk, &local->work_list, list) {
- if (!wk->started)
- continue;
- if (wk->chan != local->tmp_channel ||
- wk->chan_type != local->tmp_channel_type)
- continue;
- remain_off_channel = true;
- }
-
- if (!remain_off_channel && local->tmp_channel) {
- local->tmp_channel = NULL;
- ieee80211_hw_config(local, 0);
-
- ieee80211_offchannel_return(local, true);
-
- /* give connection some time to breathe */
- run_again(local, jiffies + HZ/2);
- }
-
- ieee80211_recalc_idle(local);
- ieee80211_run_deferred_scan(local);
-
- mutex_unlock(&local->mtx);
-
- list_for_each_entry_safe(wk, tmp, &free_work, list) {
- wk->done(wk, NULL);
- list_del(&wk->list);
- kfree(wk);
- }
-}
-
-void ieee80211_add_work(struct ieee80211_work *wk)
-{
- struct ieee80211_local *local;
-
- if (WARN_ON(!wk->chan))
- return;
-
- if (WARN_ON(!wk->sdata))
- return;
-
- if (WARN_ON(!wk->done))
- return;
-
- if (WARN_ON(!ieee80211_sdata_running(wk->sdata)))
- return;
-
- wk->started = false;
-
- local = wk->sdata->local;
- mutex_lock(&local->mtx);
- list_add_tail(&wk->list, &local->work_list);
- mutex_unlock(&local->mtx);
-
- ieee80211_queue_work(&local->hw, &local->work_work);
-}
-
-void ieee80211_work_init(struct ieee80211_local *local)
-{
- INIT_LIST_HEAD(&local->work_list);
- setup_timer(&local->work_timer, ieee80211_work_timer,
- (unsigned long)local);
- INIT_WORK(&local->work_work, ieee80211_work_work);
-}
-
-void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_work *wk;
- bool cleanup = false;
-
- mutex_lock(&local->mtx);
- list_for_each_entry(wk, &local->work_list, list) {
- if (wk->sdata != sdata)
- continue;
- cleanup = true;
- wk->type = IEEE80211_WORK_ABORT;
- wk->started = true;
- wk->timeout = jiffies;
- }
- mutex_unlock(&local->mtx);
-
- /* run cleanups etc. */
- if (cleanup)
- ieee80211_work_work(&local->work_work);
-
- mutex_lock(&local->mtx);
- list_for_each_entry(wk, &local->work_list, list) {
- if (wk->sdata != sdata)
- continue;
- WARN_ON(1);
- break;
- }
- mutex_unlock(&local->mtx);
-}
-
-static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk,
- struct sk_buff *skb)
-{
- /*
- * We are done serving the remain-on-channel command.
- */
- cfg80211_remain_on_channel_expired(wk->sdata->dev, (unsigned long) wk,
- wk->chan, wk->chan_type,
- GFP_KERNEL);
-
- return WORK_DONE_DESTROY;
-}
-
-int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- unsigned int duration, u64 *cookie)
-{
- struct ieee80211_work *wk;
-
- wk = kzalloc(sizeof(*wk), GFP_KERNEL);
- if (!wk)
- return -ENOMEM;
-
- wk->type = IEEE80211_WORK_REMAIN_ON_CHANNEL;
- wk->chan = chan;
- wk->chan_type = channel_type;
- wk->sdata = sdata;
- wk->done = ieee80211_remain_done;
-
- wk->remain.duration = duration;
-
- *cookie = (unsigned long) wk;
-
- ieee80211_add_work(wk);
-
- return 0;
-}
-
-int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
- u64 cookie)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_work *wk, *tmp;
- bool found = false;
-
- mutex_lock(&local->mtx);
- list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
- if ((unsigned long) wk == cookie) {
- wk->timeout = jiffies;
- found = true;
- break;
- }
- }
- mutex_unlock(&local->mtx);
-
- if (!found)
- return -ENOENT;
-
- ieee80211_queue_work(&local->hw, &local->work_work);
-
- return 0;
-}
diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile
index ec1bd3fc127..57cf5d1a2e4 100644
--- a/net/mac802154/Makefile
+++ b/net/mac802154/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_MAC802154) += mac802154.o
-mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o
+mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o wpan.o
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
index e3edfb0661b..e748aed290a 100644
--- a/net/mac802154/ieee802154_dev.c
+++ b/net/mac802154/ieee802154_dev.c
@@ -140,6 +140,10 @@ mac802154_add_iface(struct wpan_phy *phy, const char *name, int type)
dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
name, mac802154_monitor_setup);
break;
+ case IEEE802154_DEV_WPAN:
+ dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
+ name, mac802154_wpan_setup);
+ break;
default:
dev = NULL;
err = -EINVAL;
diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h
index 789d9c948ae..a4dcaf1dd4b 100644
--- a/net/mac802154/mac802154.h
+++ b/net/mac802154/mac802154.h
@@ -93,6 +93,7 @@ struct mac802154_sub_if_data {
#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */
extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
+extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
int mac802154_slave_open(struct net_device *dev);
int mac802154_slave_close(struct net_device *dev);
@@ -100,10 +101,18 @@ int mac802154_slave_close(struct net_device *dev);
void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb);
void mac802154_monitor_setup(struct net_device *dev);
+void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb);
+void mac802154_wpan_setup(struct net_device *dev);
+
netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
u8 page, u8 chan);
/* MIB callbacks */
+void mac802154_dev_set_short_addr(struct net_device *dev, u16 val);
+u16 mac802154_dev_get_short_addr(const struct net_device *dev);
void mac802154_dev_set_ieee_addr(struct net_device *dev);
+u16 mac802154_dev_get_pan_id(const struct net_device *dev);
+void mac802154_dev_set_pan_id(struct net_device *dev, u16 val);
+void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
#endif /* MAC802154_H */
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
index 7a5d0e052cd..d8d27700608 100644
--- a/net/mac802154/mac_cmd.c
+++ b/net/mac802154/mac_cmd.c
@@ -25,13 +25,37 @@
#include <linux/skbuff.h>
#include <linux/if_arp.h>
+#include <net/ieee802154.h>
#include <net/ieee802154_netdev.h>
#include <net/wpan-phy.h>
#include <net/mac802154.h>
+#include <net/nl802154.h>
#include "mac802154.h"
-struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
+static int mac802154_mlme_start_req(struct net_device *dev,
+ struct ieee802154_addr *addr,
+ u8 channel, u8 page,
+ u8 bcn_ord, u8 sf_ord,
+ u8 pan_coord, u8 blx,
+ u8 coord_realign)
+{
+ BUG_ON(addr->addr_type != IEEE802154_ADDR_SHORT);
+
+ mac802154_dev_set_pan_id(dev, addr->pan_id);
+ mac802154_dev_set_short_addr(dev, addr->short_addr);
+ mac802154_dev_set_ieee_addr(dev);
+ mac802154_dev_set_page_channel(dev, page, channel);
+
+ /* FIXME: add validation for unused parameters to be sane
+ * for SoftMAC
+ */
+ ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS);
+
+ return 0;
+}
+
+static struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
{
struct mac802154_sub_if_data *priv = netdev_priv(dev);
@@ -43,3 +67,10 @@ struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = {
.get_phy = mac802154_get_phy,
};
+
+struct ieee802154_mlme_ops mac802154_mlme_wpan = {
+ .get_phy = mac802154_get_phy,
+ .start_req = mac802154_mlme_start_req,
+ .get_pan_id = mac802154_dev_get_pan_id,
+ .get_short_addr = mac802154_dev_get_short_addr,
+};
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
index ab59821ec72..f47781ab0cc 100644
--- a/net/mac802154/mib.c
+++ b/net/mac802154/mib.c
@@ -28,13 +28,18 @@
#include "mac802154.h"
+struct phy_chan_notify_work {
+ struct work_struct work;
+ struct net_device *dev;
+};
+
struct hw_addr_filt_notify_work {
struct work_struct work;
struct net_device *dev;
unsigned long changed;
};
-struct mac802154_priv *mac802154_slave_get_priv(struct net_device *dev)
+static struct mac802154_priv *mac802154_slave_get_priv(struct net_device *dev)
{
struct mac802154_sub_if_data *priv = netdev_priv(dev);
@@ -78,6 +83,37 @@ static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
return;
}
+void mac802154_dev_set_short_addr(struct net_device *dev, u16 val)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ spin_lock_bh(&priv->mib_lock);
+ priv->short_addr = val;
+ spin_unlock_bh(&priv->mib_lock);
+
+ if ((priv->hw->ops->set_hw_addr_filt) &&
+ (priv->hw->hw.hw_filt.short_addr != priv->short_addr)) {
+ priv->hw->hw.hw_filt.short_addr = priv->short_addr;
+ set_hw_addr_filt(dev, IEEE802515_AFILT_SADDR_CHANGED);
+ }
+}
+
+u16 mac802154_dev_get_short_addr(const struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ u16 ret;
+
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ spin_lock_bh(&priv->mib_lock);
+ ret = priv->short_addr;
+ spin_unlock_bh(&priv->mib_lock);
+
+ return ret;
+}
+
void mac802154_dev_set_ieee_addr(struct net_device *dev)
{
struct mac802154_sub_if_data *priv = netdev_priv(dev);
@@ -91,3 +127,73 @@ void mac802154_dev_set_ieee_addr(struct net_device *dev)
set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED);
}
}
+
+u16 mac802154_dev_get_pan_id(const struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ u16 ret;
+
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ spin_lock_bh(&priv->mib_lock);
+ ret = priv->pan_id;
+ spin_unlock_bh(&priv->mib_lock);
+
+ return ret;
+}
+
+void mac802154_dev_set_pan_id(struct net_device *dev, u16 val)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ spin_lock_bh(&priv->mib_lock);
+ priv->pan_id = val;
+ spin_unlock_bh(&priv->mib_lock);
+
+ if ((priv->hw->ops->set_hw_addr_filt) &&
+ (priv->hw->hw.hw_filt.pan_id != priv->pan_id)) {
+ priv->hw->hw.hw_filt.pan_id = priv->pan_id;
+ set_hw_addr_filt(dev, IEEE802515_AFILT_PANID_CHANGED);
+ }
+}
+
+static void phy_chan_notify(struct work_struct *work)
+{
+ struct phy_chan_notify_work *nw = container_of(work,
+ struct phy_chan_notify_work, work);
+ struct mac802154_priv *hw = mac802154_slave_get_priv(nw->dev);
+ struct mac802154_sub_if_data *priv = netdev_priv(nw->dev);
+ int res;
+
+ res = hw->ops->set_channel(&hw->hw, priv->page, priv->chan);
+ if (res)
+ pr_debug("set_channel failed\n");
+
+ kfree(nw);
+}
+
+void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct phy_chan_notify_work *work;
+
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ spin_lock_bh(&priv->mib_lock);
+ priv->page = page;
+ priv->chan = chan;
+ spin_unlock_bh(&priv->mib_lock);
+
+ if (priv->hw->phy->current_channel != priv->chan ||
+ priv->hw->phy->current_page != priv->page) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, phy_chan_notify);
+ work->dev = dev;
+ queue_work(priv->hw->dev_workqueue, &work->work);
+ }
+}
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 4a7d76d4f8b..38548ec2098 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -77,6 +77,7 @@ mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
}
mac802154_monitors_rx(priv, skb);
+ mac802154_wpans_rx(priv, skb);
out:
dev_kfree_skb(skb);
return;
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 434b6873b35..1a4df39c722 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -88,6 +88,8 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ mac802154_monitors_rx(mac802154_to_priv(&priv->hw), skb);
+
if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
u16 crc = crc_ccitt(0, skb->data, skb->len);
u8 *data = skb_put(skb, 2);
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
new file mode 100644
index 00000000000..f30f6d4beea
--- /dev/null
+++ b/net/mac802154/wpan.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Sergey Lapin <slapin@ossfans.org>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/if_arp.h>
+
+#include <net/rtnetlink.h>
+#include <linux/nl802154.h>
+#include <net/af_ieee802154.h>
+#include <net/mac802154.h>
+#include <net/ieee802154_netdev.h>
+#include <net/ieee802154.h>
+#include <net/wpan-phy.h>
+
+#include "mac802154.h"
+
+static inline int mac802154_fetch_skb_u8(struct sk_buff *skb, u8 *val)
+{
+ if (unlikely(!pskb_may_pull(skb, 1)))
+ return -EINVAL;
+
+ *val = skb->data[0];
+ skb_pull(skb, 1);
+
+ return 0;
+}
+
+static inline int mac802154_fetch_skb_u16(struct sk_buff *skb, u16 *val)
+{
+ if (unlikely(!pskb_may_pull(skb, 2)))
+ return -EINVAL;
+
+ *val = skb->data[0] | (skb->data[1] << 8);
+ skb_pull(skb, 2);
+
+ return 0;
+}
+
+static inline void mac802154_haddr_copy_swap(u8 *dest, const u8 *src)
+{
+ int i;
+ for (i = 0; i < IEEE802154_ADDR_LEN; i++)
+ dest[IEEE802154_ADDR_LEN - i - 1] = src[i];
+}
+
+static int
+mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct sockaddr_ieee802154 *sa =
+ (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
+ int err = -ENOIOCTLCMD;
+
+ spin_lock_bh(&priv->mib_lock);
+
+ switch (cmd) {
+ case SIOCGIFADDR:
+ if (priv->pan_id == IEEE802154_PANID_BROADCAST ||
+ priv->short_addr == IEEE802154_ADDR_BROADCAST) {
+ err = -EADDRNOTAVAIL;
+ break;
+ }
+
+ sa->family = AF_IEEE802154;
+ sa->addr.addr_type = IEEE802154_ADDR_SHORT;
+ sa->addr.pan_id = priv->pan_id;
+ sa->addr.short_addr = priv->short_addr;
+
+ err = 0;
+ break;
+ case SIOCSIFADDR:
+ dev_warn(&dev->dev,
+ "Using DEBUGing ioctl SIOCSIFADDR isn't recommened!\n");
+ if (sa->family != AF_IEEE802154 ||
+ sa->addr.addr_type != IEEE802154_ADDR_SHORT ||
+ sa->addr.pan_id == IEEE802154_PANID_BROADCAST ||
+ sa->addr.short_addr == IEEE802154_ADDR_BROADCAST ||
+ sa->addr.short_addr == IEEE802154_ADDR_UNDEF) {
+ err = -EINVAL;
+ break;
+ }
+
+ priv->pan_id = sa->addr.pan_id;
+ priv->short_addr = sa->addr.short_addr;
+
+ err = 0;
+ break;
+ }
+
+ spin_unlock_bh(&priv->mib_lock);
+ return err;
+}
+
+static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* FIXME: validate addr */
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ mac802154_dev_set_ieee_addr(dev);
+ return 0;
+}
+
+static int mac802154_header_create(struct sk_buff *skb,
+ struct net_device *dev,
+ unsigned short type,
+ const void *_daddr,
+ const void *_saddr,
+ unsigned len)
+{
+ const struct ieee802154_addr *saddr = _saddr;
+ const struct ieee802154_addr *daddr = _daddr;
+ struct ieee802154_addr dev_addr;
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ int pos = 2;
+ u8 *head;
+ u16 fc;
+
+ if (!daddr)
+ return -EINVAL;
+
+ head = kzalloc(MAC802154_FRAME_HARD_HEADER_LEN, GFP_KERNEL);
+ if (head == NULL)
+ return -ENOMEM;
+
+ head[pos++] = mac_cb(skb)->seq; /* DSN/BSN */
+ fc = mac_cb_type(skb);
+
+ if (!saddr) {
+ spin_lock_bh(&priv->mib_lock);
+
+ if (priv->short_addr == IEEE802154_ADDR_BROADCAST ||
+ priv->short_addr == IEEE802154_ADDR_UNDEF ||
+ priv->pan_id == IEEE802154_PANID_BROADCAST) {
+ dev_addr.addr_type = IEEE802154_ADDR_LONG;
+ memcpy(dev_addr.hwaddr, dev->dev_addr,
+ IEEE802154_ADDR_LEN);
+ } else {
+ dev_addr.addr_type = IEEE802154_ADDR_SHORT;
+ dev_addr.short_addr = priv->short_addr;
+ }
+
+ dev_addr.pan_id = priv->pan_id;
+ saddr = &dev_addr;
+
+ spin_unlock_bh(&priv->mib_lock);
+ }
+
+ if (daddr->addr_type != IEEE802154_ADDR_NONE) {
+ fc |= (daddr->addr_type << IEEE802154_FC_DAMODE_SHIFT);
+
+ head[pos++] = daddr->pan_id & 0xff;
+ head[pos++] = daddr->pan_id >> 8;
+
+ if (daddr->addr_type == IEEE802154_ADDR_SHORT) {
+ head[pos++] = daddr->short_addr & 0xff;
+ head[pos++] = daddr->short_addr >> 8;
+ } else {
+ mac802154_haddr_copy_swap(head + pos, daddr->hwaddr);
+ pos += IEEE802154_ADDR_LEN;
+ }
+ }
+
+ if (saddr->addr_type != IEEE802154_ADDR_NONE) {
+ fc |= (saddr->addr_type << IEEE802154_FC_SAMODE_SHIFT);
+
+ if ((saddr->pan_id == daddr->pan_id) &&
+ (saddr->pan_id != IEEE802154_PANID_BROADCAST)) {
+ /* PANID compression/intra PAN */
+ fc |= IEEE802154_FC_INTRA_PAN;
+ } else {
+ head[pos++] = saddr->pan_id & 0xff;
+ head[pos++] = saddr->pan_id >> 8;
+ }
+
+ if (saddr->addr_type == IEEE802154_ADDR_SHORT) {
+ head[pos++] = saddr->short_addr & 0xff;
+ head[pos++] = saddr->short_addr >> 8;
+ } else {
+ mac802154_haddr_copy_swap(head + pos, saddr->hwaddr);
+ pos += IEEE802154_ADDR_LEN;
+ }
+ }
+
+ head[0] = fc;
+ head[1] = fc >> 8;
+
+ memcpy(skb_push(skb, pos), head, pos);
+ kfree(head);
+
+ return pos;
+}
+
+static int
+mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+{
+ const u8 *hdr = skb_mac_header(skb);
+ const u8 *tail = skb_tail_pointer(skb);
+ struct ieee802154_addr *addr = (struct ieee802154_addr *)haddr;
+ u16 fc;
+ int da_type;
+
+ if (hdr + 3 > tail)
+ goto malformed;
+
+ fc = hdr[0] | (hdr[1] << 8);
+
+ hdr += 3;
+
+ da_type = IEEE802154_FC_DAMODE(fc);
+ addr->addr_type = IEEE802154_FC_SAMODE(fc);
+
+ switch (da_type) {
+ case IEEE802154_ADDR_NONE:
+ if (fc & IEEE802154_FC_INTRA_PAN)
+ goto malformed;
+ break;
+ case IEEE802154_ADDR_LONG:
+ if (fc & IEEE802154_FC_INTRA_PAN) {
+ if (hdr + 2 > tail)
+ goto malformed;
+ addr->pan_id = hdr[0] | (hdr[1] << 8);
+ hdr += 2;
+ }
+
+ if (hdr + IEEE802154_ADDR_LEN > tail)
+ goto malformed;
+
+ hdr += IEEE802154_ADDR_LEN;
+ break;
+ case IEEE802154_ADDR_SHORT:
+ if (fc & IEEE802154_FC_INTRA_PAN) {
+ if (hdr + 2 > tail)
+ goto malformed;
+ addr->pan_id = hdr[0] | (hdr[1] << 8);
+ hdr += 2;
+ }
+
+ if (hdr + 2 > tail)
+ goto malformed;
+
+ hdr += 2;
+ break;
+ default:
+ goto malformed;
+
+ }
+
+ switch (addr->addr_type) {
+ case IEEE802154_ADDR_NONE:
+ break;
+ case IEEE802154_ADDR_LONG:
+ if (!(fc & IEEE802154_FC_INTRA_PAN)) {
+ if (hdr + 2 > tail)
+ goto malformed;
+ addr->pan_id = hdr[0] | (hdr[1] << 8);
+ hdr += 2;
+ }
+
+ if (hdr + IEEE802154_ADDR_LEN > tail)
+ goto malformed;
+
+ mac802154_haddr_copy_swap(addr->hwaddr, hdr);
+ hdr += IEEE802154_ADDR_LEN;
+ break;
+ case IEEE802154_ADDR_SHORT:
+ if (!(fc & IEEE802154_FC_INTRA_PAN)) {
+ if (hdr + 2 > tail)
+ goto malformed;
+ addr->pan_id = hdr[0] | (hdr[1] << 8);
+ hdr += 2;
+ }
+
+ if (hdr + 2 > tail)
+ goto malformed;
+
+ addr->short_addr = hdr[0] | (hdr[1] << 8);
+ hdr += 2;
+ break;
+ default:
+ goto malformed;
+ }
+
+ return sizeof(struct ieee802154_addr);
+
+malformed:
+ pr_debug("malformed packet\n");
+ return 0;
+}
+
+static netdev_tx_t
+mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv;
+ u8 chan, page;
+
+ priv = netdev_priv(dev);
+
+ spin_lock_bh(&priv->mib_lock);
+ chan = priv->chan;
+ page = priv->page;
+ spin_unlock_bh(&priv->mib_lock);
+
+ if (chan == MAC802154_CHAN_NONE ||
+ page >= WPAN_NUM_PAGES ||
+ chan >= WPAN_NUM_CHANNELS)
+ return NETDEV_TX_OK;
+
+ skb->skb_iif = dev->ifindex;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ return mac802154_tx(priv->hw, skb, page, chan);
+}
+
+static struct header_ops mac802154_header_ops = {
+ .create = mac802154_header_create,
+ .parse = mac802154_header_parse,
+};
+
+static const struct net_device_ops mac802154_wpan_ops = {
+ .ndo_open = mac802154_slave_open,
+ .ndo_stop = mac802154_slave_close,
+ .ndo_start_xmit = mac802154_wpan_xmit,
+ .ndo_do_ioctl = mac802154_wpan_ioctl,
+ .ndo_set_mac_address = mac802154_wpan_mac_addr,
+};
+
+void mac802154_wpan_setup(struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv;
+
+ dev->addr_len = IEEE802154_ADDR_LEN;
+ memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
+
+ dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN;
+ dev->header_ops = &mac802154_header_ops;
+ dev->needed_tailroom = 2; /* FCS */
+ dev->mtu = IEEE802154_MTU;
+ dev->tx_queue_len = 10;
+ dev->type = ARPHRD_IEEE802154;
+ dev->flags = IFF_NOARP | IFF_BROADCAST;
+ dev->watchdog_timeo = 0;
+
+ dev->destructor = free_netdev;
+ dev->netdev_ops = &mac802154_wpan_ops;
+ dev->ml_priv = &mac802154_mlme_wpan;
+
+ priv = netdev_priv(dev);
+ priv->type = IEEE802154_DEV_WPAN;
+
+ priv->chan = MAC802154_CHAN_NONE;
+ priv->page = 0;
+
+ spin_lock_init(&priv->mib_lock);
+
+ get_random_bytes(&priv->bsn, 1);
+ get_random_bytes(&priv->dsn, 1);
+
+ priv->pan_id = IEEE802154_PANID_BROADCAST;
+ priv->short_addr = IEEE802154_ADDR_BROADCAST;
+}
+
+static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
+{
+ return netif_rx(skb);
+}
+
+static int
+mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb)
+{
+ pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
+
+ spin_lock_bh(&sdata->mib_lock);
+
+ switch (mac_cb(skb)->da.addr_type) {
+ case IEEE802154_ADDR_NONE:
+ if (mac_cb(skb)->sa.addr_type != IEEE802154_ADDR_NONE)
+ /* FIXME: check if we are PAN coordinator */
+ skb->pkt_type = PACKET_OTHERHOST;
+ else
+ /* ACK comes with both addresses empty */
+ skb->pkt_type = PACKET_HOST;
+ break;
+ case IEEE802154_ADDR_LONG:
+ if (mac_cb(skb)->da.pan_id != sdata->pan_id &&
+ mac_cb(skb)->da.pan_id != IEEE802154_PANID_BROADCAST)
+ skb->pkt_type = PACKET_OTHERHOST;
+ else if (!memcmp(mac_cb(skb)->da.hwaddr, sdata->dev->dev_addr,
+ IEEE802154_ADDR_LEN))
+ skb->pkt_type = PACKET_HOST;
+ else
+ skb->pkt_type = PACKET_OTHERHOST;
+ break;
+ case IEEE802154_ADDR_SHORT:
+ if (mac_cb(skb)->da.pan_id != sdata->pan_id &&
+ mac_cb(skb)->da.pan_id != IEEE802154_PANID_BROADCAST)
+ skb->pkt_type = PACKET_OTHERHOST;
+ else if (mac_cb(skb)->da.short_addr == sdata->short_addr)
+ skb->pkt_type = PACKET_HOST;
+ else if (mac_cb(skb)->da.short_addr ==
+ IEEE802154_ADDR_BROADCAST)
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_OTHERHOST;
+ break;
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&sdata->mib_lock);
+
+ skb->dev = sdata->dev;
+
+ sdata->dev->stats.rx_packets++;
+ sdata->dev->stats.rx_bytes += skb->len;
+
+ switch (mac_cb_type(skb)) {
+ case IEEE802154_FC_TYPE_DATA:
+ return mac802154_process_data(sdata->dev, skb);
+ default:
+ pr_warning("ieee802154: bad frame received (type = %d)\n",
+ mac_cb_type(skb));
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+}
+
+static int mac802154_parse_frame_start(struct sk_buff *skb)
+{
+ u8 *head = skb->data;
+ u16 fc;
+
+ if (mac802154_fetch_skb_u16(skb, &fc) ||
+ mac802154_fetch_skb_u8(skb, &(mac_cb(skb)->seq)))
+ goto err;
+
+ pr_debug("fc: %04x dsn: %02x\n", fc, head[2]);
+
+ mac_cb(skb)->flags = IEEE802154_FC_TYPE(fc);
+ mac_cb(skb)->sa.addr_type = IEEE802154_FC_SAMODE(fc);
+ mac_cb(skb)->da.addr_type = IEEE802154_FC_DAMODE(fc);
+
+ if (fc & IEEE802154_FC_INTRA_PAN)
+ mac_cb(skb)->flags |= MAC_CB_FLAG_INTRAPAN;
+
+ if (mac_cb(skb)->da.addr_type != IEEE802154_ADDR_NONE) {
+ if (mac802154_fetch_skb_u16(skb, &(mac_cb(skb)->da.pan_id)))
+ goto err;
+
+ /* source PAN id compression */
+ if (mac_cb_is_intrapan(skb))
+ mac_cb(skb)->sa.pan_id = mac_cb(skb)->da.pan_id;
+
+ pr_debug("dest PAN addr: %04x\n", mac_cb(skb)->da.pan_id);
+
+ if (mac_cb(skb)->da.addr_type == IEEE802154_ADDR_SHORT) {
+ u16 *da = &(mac_cb(skb)->da.short_addr);
+
+ if (mac802154_fetch_skb_u16(skb, da))
+ goto err;
+
+ pr_debug("destination address is short: %04x\n",
+ mac_cb(skb)->da.short_addr);
+ } else {
+ if (!pskb_may_pull(skb, IEEE802154_ADDR_LEN))
+ goto err;
+
+ mac802154_haddr_copy_swap(mac_cb(skb)->da.hwaddr,
+ skb->data);
+ skb_pull(skb, IEEE802154_ADDR_LEN);
+
+ pr_debug("destination address is hardware\n");
+ }
+ }
+
+ if (mac_cb(skb)->sa.addr_type != IEEE802154_ADDR_NONE) {
+ /* non PAN-compression, fetch source address id */
+ if (!(mac_cb_is_intrapan(skb))) {
+ u16 *sa_pan = &(mac_cb(skb)->sa.pan_id);
+
+ if (mac802154_fetch_skb_u16(skb, sa_pan))
+ goto err;
+ }
+
+ pr_debug("source PAN addr: %04x\n", mac_cb(skb)->da.pan_id);
+
+ if (mac_cb(skb)->sa.addr_type == IEEE802154_ADDR_SHORT) {
+ u16 *sa = &(mac_cb(skb)->sa.short_addr);
+
+ if (mac802154_fetch_skb_u16(skb, sa))
+ goto err;
+
+ pr_debug("source address is short: %04x\n",
+ mac_cb(skb)->sa.short_addr);
+ } else {
+ if (!pskb_may_pull(skb, IEEE802154_ADDR_LEN))
+ goto err;
+
+ mac802154_haddr_copy_swap(mac_cb(skb)->sa.hwaddr,
+ skb->data);
+ skb_pull(skb, IEEE802154_ADDR_LEN);
+
+ pr_debug("source address is hardware\n");
+ }
+ }
+
+ return 0;
+err:
+ return -EINVAL;
+}
+
+void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
+{
+ int ret;
+ struct sk_buff *sskb;
+ struct mac802154_sub_if_data *sdata;
+
+ ret = mac802154_parse_frame_start(skb);
+ if (ret) {
+ pr_debug("got invalid frame\n");
+ return;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &priv->slaves, list) {
+ if (sdata->type != IEEE802154_DEV_WPAN)
+ continue;
+
+ sskb = skb_clone(skb, GFP_ATOMIC);
+ if (sskb)
+ mac802154_subif_frame(sdata, sskb);
+ }
+ rcu_read_unlock();
+}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 209c1ed4336..c19b214ffd5 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -335,6 +335,27 @@ config NF_CT_NETLINK_TIMEOUT
If unsure, say `N'.
+config NF_CT_NETLINK_HELPER
+ tristate 'Connection tracking helpers in user-space via Netlink'
+ select NETFILTER_NETLINK
+ depends on NF_CT_NETLINK
+ depends on NETFILTER_NETLINK_QUEUE
+ depends on NETFILTER_NETLINK_QUEUE_CT
+ depends on NETFILTER_ADVANCED
+ help
+ This option enables the user-space connection tracking helpers
+ infrastructure.
+
+ If unsure, say `N'.
+
+config NETFILTER_NETLINK_QUEUE_CT
+ bool "NFQUEUE integration with Connection Tracking"
+ default n
+ depends on NETFILTER_NETLINK_QUEUE
+ help
+ If this option is enabled, NFQUEUE can include Connection Tracking
+ information together with the packet is the enqueued via NFNETLINK.
+
endif # NF_CONNTRACK
# transparent proxy support
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 4e7960cc7b9..1c5160f2278 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_NETFILTER) = netfilter.o
obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o
+nfnetlink_queue-y := nfnetlink_queue_core.o
+nfnetlink_queue-$(CONFIG_NETFILTER_NETLINK_QUEUE_CT) += nfnetlink_queue_ct.o
obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
@@ -24,6 +26,7 @@ obj-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
# netlink interface for nf_conntrack
obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
obj-$(CONFIG_NF_CT_NETLINK_TIMEOUT) += nfnetlink_cttimeout.o
+obj-$(CONFIG_NF_CT_NETLINK_HELPER) += nfnetlink_cthelper.o
# connection tracking helpers
nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index e19f3653db2..0bc6b60db4d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -264,6 +264,13 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
rcu_read_unlock();
}
EXPORT_SYMBOL(nf_conntrack_destroy);
+
+struct nfq_ct_hook __rcu *nfq_ct_hook __read_mostly;
+EXPORT_SYMBOL_GPL(nfq_ct_hook);
+
+struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook __read_mostly;
+EXPORT_SYMBOL_GPL(nfq_ct_nat_hook);
+
#endif /* CONFIG_NF_CONNTRACK */
#ifdef CONFIG_PROC_FS
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index a54b018c6ee..b54eccef40b 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1742,7 +1742,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_reply4,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC - 2,
},
@@ -1752,7 +1752,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_remote_request4,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC - 1,
},
@@ -1760,7 +1760,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_local_reply4,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST + 1,
},
@@ -1768,7 +1768,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_local_request4,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST + 2,
},
@@ -1777,7 +1777,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_forward_icmp,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_FORWARD,
.priority = 99,
},
@@ -1785,7 +1785,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_reply4,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_FORWARD,
.priority = 100,
},
@@ -1794,7 +1794,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_reply6,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_NAT_SRC - 2,
},
@@ -1804,7 +1804,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_remote_request6,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_NAT_SRC - 1,
},
@@ -1812,7 +1812,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_local_reply6,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_NAT_DST + 1,
},
@@ -1820,7 +1820,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_local_request6,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_NAT_DST + 2,
},
@@ -1829,7 +1829,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_forward_icmp_v6,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = 99,
},
@@ -1837,7 +1837,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_reply6,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = 100,
},
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 7fd66dec859..65b616ae171 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -797,7 +797,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_put;
}
if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
df |= (old_iph->frag_off & htons(IP_DF));
@@ -823,7 +823,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
IP_VS_ERR_RL("%s(): no memory\n", __func__);
return NF_STOLEN;
}
- kfree_skb(skb);
+ consume_skb(skb);
skb = new_skb;
old_iph = ip_hdr(skb);
}
@@ -913,7 +913,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_put;
}
if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
!skb_is_gso(skb)) {
@@ -942,7 +942,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
IP_VS_ERR_RL("%s(): no memory\n", __func__);
return NF_STOLEN;
}
- kfree_skb(skb);
+ consume_skb(skb);
skb = new_skb;
old_iph = ipv6_hdr(skb);
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ac3af97cc46..cf4875565d6 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -531,7 +531,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
tstamp = nf_conn_tstamp_find(ct);
if (tstamp) {
if (skb->tstamp.tv64 == 0)
- __net_timestamp((struct sk_buff *)skb);
+ __net_timestamp(skb);
tstamp->start = ktime_to_ns(skb->tstamp);
}
@@ -819,7 +819,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
__set_bit(IPS_EXPECTED_BIT, &ct->status);
ct->master = exp->master;
if (exp->helper) {
- help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+ help = nf_ct_helper_ext_add(ct, exp->helper,
+ GFP_ATOMIC);
if (help)
rcu_assign_pointer(help->helper, exp->helper);
}
@@ -1333,7 +1334,6 @@ static void nf_conntrack_cleanup_init_net(void)
while (untrack_refs() > 0)
schedule();
- nf_conntrack_proto_fini();
#ifdef CONFIG_NF_CONNTRACK_ZONES
nf_ct_extend_unregister(&nf_ct_zone_extend);
#endif
@@ -1372,7 +1372,7 @@ void nf_conntrack_cleanup(struct net *net)
netfilter framework. Roll on, two-stage module
delete... */
synchronize_net();
-
+ nf_conntrack_proto_fini(net);
nf_conntrack_cleanup_net(net);
if (net_eq(net, &init_net)) {
@@ -1496,11 +1496,6 @@ static int nf_conntrack_init_init_net(void)
printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
nf_conntrack_max);
-
- ret = nf_conntrack_proto_init();
- if (ret < 0)
- goto err_proto;
-
#ifdef CONFIG_NF_CONNTRACK_ZONES
ret = nf_ct_extend_register(&nf_ct_zone_extend);
if (ret < 0)
@@ -1518,9 +1513,7 @@ static int nf_conntrack_init_init_net(void)
#ifdef CONFIG_NF_CONNTRACK_ZONES
err_extend:
- nf_conntrack_proto_fini();
#endif
-err_proto:
return ret;
}
@@ -1583,9 +1576,7 @@ static int nf_conntrack_init_net(struct net *net)
ret = nf_conntrack_helper_init(net);
if (ret < 0)
goto err_helper;
-
return 0;
-
err_helper:
nf_conntrack_timeout_fini(net);
err_timeout:
@@ -1622,6 +1613,9 @@ int nf_conntrack_init(struct net *net)
if (ret < 0)
goto out_init_net;
}
+ ret = nf_conntrack_proto_init(net);
+ if (ret < 0)
+ goto out_proto;
ret = nf_conntrack_init_net(net);
if (ret < 0)
goto out_net;
@@ -1637,6 +1631,8 @@ int nf_conntrack_init(struct net *net)
return 0;
out_net:
+ nf_conntrack_proto_fini(net);
+out_proto:
if (net_eq(net, &init_net))
nf_conntrack_cleanup_init_net();
out_init_net:
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 641ff5f9671..1a9545965c0 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -44,7 +44,8 @@ void __nf_ct_ext_destroy(struct nf_conn *ct)
EXPORT_SYMBOL(__nf_ct_ext_destroy);
static void *
-nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
+nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
+ size_t var_alloc_len, gfp_t gfp)
{
unsigned int off, len;
struct nf_ct_ext_type *t;
@@ -54,8 +55,8 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
t = rcu_dereference(nf_ct_ext_types[id]);
BUG_ON(t == NULL);
off = ALIGN(sizeof(struct nf_ct_ext), t->align);
- len = off + t->len;
- alloc_size = t->alloc_size;
+ len = off + t->len + var_alloc_len;
+ alloc_size = t->alloc_size + var_alloc_len;
rcu_read_unlock();
*ext = kzalloc(alloc_size, gfp);
@@ -68,7 +69,8 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
return (void *)(*ext) + off;
}
-void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
+void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
+ size_t var_alloc_len, gfp_t gfp)
{
struct nf_ct_ext *old, *new;
int i, newlen, newoff;
@@ -79,7 +81,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
old = ct->ext;
if (!old)
- return nf_ct_ext_create(&ct->ext, id, gfp);
+ return nf_ct_ext_create(&ct->ext, id, var_alloc_len, gfp);
if (__nf_ct_ext_exist(old, id))
return NULL;
@@ -89,7 +91,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
BUG_ON(t == NULL);
newoff = ALIGN(old->len, t->align);
- newlen = newoff + t->len;
+ newlen = newoff + t->len + var_alloc_len;
rcu_read_unlock();
new = __krealloc(old, newlen, gfp);
@@ -117,7 +119,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
memset((void *)new + newoff, 0, newlen - newoff);
return (void *)new + newoff;
}
-EXPORT_SYMBOL(__nf_ct_ext_add);
+EXPORT_SYMBOL(__nf_ct_ext_add_length);
static void update_alloc_size(struct nf_ct_ext_type *type)
{
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 8c5c95c6d34..4bb771d1f57 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -358,7 +358,7 @@ static int help(struct sk_buff *skb,
u32 seq;
int dir = CTINFO2DIR(ctinfo);
unsigned int uninitialized_var(matchlen), uninitialized_var(matchoff);
- struct nf_ct_ftp_master *ct_ftp_info = &nfct_help(ct)->help.ct_ftp_info;
+ struct nf_ct_ftp_master *ct_ftp_info = nfct_help_data(ct);
struct nf_conntrack_expect *exp;
union nf_inet_addr *daddr;
struct nf_conntrack_man cmd = {};
@@ -512,7 +512,6 @@ out_update_nl:
}
static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly;
-static char ftp_names[MAX_PORTS][2][sizeof("ftp-65535")] __read_mostly;
static const struct nf_conntrack_expect_policy ftp_exp_policy = {
.max_expected = 1,
@@ -541,7 +540,6 @@ static void nf_conntrack_ftp_fini(void)
static int __init nf_conntrack_ftp_init(void)
{
int i, j = -1, ret = 0;
- char *tmpname;
ftp_buffer = kmalloc(65536, GFP_KERNEL);
if (!ftp_buffer)
@@ -556,17 +554,16 @@ static int __init nf_conntrack_ftp_init(void)
ftp[i][0].tuple.src.l3num = PF_INET;
ftp[i][1].tuple.src.l3num = PF_INET6;
for (j = 0; j < 2; j++) {
+ ftp[i][j].data_len = sizeof(struct nf_ct_ftp_master);
ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]);
ftp[i][j].tuple.dst.protonum = IPPROTO_TCP;
ftp[i][j].expect_policy = &ftp_exp_policy;
ftp[i][j].me = THIS_MODULE;
ftp[i][j].help = help;
- tmpname = &ftp_names[i][j][0];
if (ports[i] == FTP_PORT)
- sprintf(tmpname, "ftp");
+ sprintf(ftp[i][j].name, "ftp");
else
- sprintf(tmpname, "ftp-%d", ports[i]);
- ftp[i][j].name = tmpname;
+ sprintf(ftp[i][j].name, "ftp-%d", ports[i]);
pr_debug("nf_ct_ftp: registering helper for pf: %d "
"port: %d\n",
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 31f50bc3a31..4283b207e63 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -114,7 +114,7 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo,
unsigned char **data, int *datalen, int *dataoff)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
const struct tcphdr *th;
struct tcphdr _tcph;
@@ -617,6 +617,7 @@ static const struct nf_conntrack_expect_policy h245_exp_policy = {
static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = {
.name = "H.245",
.me = THIS_MODULE,
+ .data_len = sizeof(struct nf_ct_h323_master),
.tuple.src.l3num = AF_UNSPEC,
.tuple.dst.protonum = IPPROTO_UDP,
.help = h245_help,
@@ -1169,6 +1170,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
{
.name = "Q.931",
.me = THIS_MODULE,
+ .data_len = sizeof(struct nf_ct_h323_master),
.tuple.src.l3num = AF_INET,
.tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT),
.tuple.dst.protonum = IPPROTO_TCP,
@@ -1244,7 +1246,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
unsigned char **data,
TransportAddress *taddr, int count)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
int i;
@@ -1359,7 +1361,7 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, RegistrationRequest *rrq)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int ret;
typeof(set_ras_addr_hook) set_ras_addr;
@@ -1394,7 +1396,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, RegistrationConfirm *rcf)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
int ret;
struct nf_conntrack_expect *exp;
@@ -1443,7 +1445,7 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, UnregistrationRequest *urq)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
int ret;
typeof(set_sig_addr_hook) set_sig_addr;
@@ -1475,7 +1477,7 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, AdmissionRequest *arq)
{
- const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ const struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
__be16 port;
union nf_inet_addr addr;
@@ -1742,6 +1744,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
{
.name = "RAS",
.me = THIS_MODULE,
+ .data_len = sizeof(struct nf_ct_h323_master),
.tuple.src.l3num = AF_INET,
.tuple.src.u.udp.port = cpu_to_be16(RAS_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
@@ -1751,6 +1754,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
{
.name = "RAS",
.me = THIS_MODULE,
+ .data_len = sizeof(struct nf_ct_h323_master),
.tuple.src.l3num = AF_INET6,
.tuple.src.u.udp.port = cpu_to_be16(RAS_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 4fa2ff961f5..c4bc637feb7 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -30,8 +30,10 @@
#include <net/netfilter/nf_conntrack_extend.h>
static DEFINE_MUTEX(nf_ct_helper_mutex);
-static struct hlist_head *nf_ct_helper_hash __read_mostly;
-static unsigned int nf_ct_helper_hsize __read_mostly;
+struct hlist_head *nf_ct_helper_hash __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ct_helper_hash);
+unsigned int nf_ct_helper_hsize __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ct_helper_hsize);
static unsigned int nf_ct_helper_count __read_mostly;
static bool nf_ct_auto_assign_helper __read_mostly = true;
@@ -161,11 +163,14 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
-struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
+struct nf_conn_help *
+nf_ct_helper_ext_add(struct nf_conn *ct,
+ struct nf_conntrack_helper *helper, gfp_t gfp)
{
struct nf_conn_help *help;
- help = nf_ct_ext_add(ct, NF_CT_EXT_HELPER, gfp);
+ help = nf_ct_ext_add_length(ct, NF_CT_EXT_HELPER,
+ helper->data_len, gfp);
if (help)
INIT_HLIST_HEAD(&help->expectations);
else
@@ -218,13 +223,19 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
}
if (help == NULL) {
- help = nf_ct_helper_ext_add(ct, flags);
+ help = nf_ct_helper_ext_add(ct, helper, flags);
if (help == NULL) {
ret = -ENOMEM;
goto out;
}
} else {
- memset(&help->help, 0, sizeof(help->help));
+ /* We only allow helper re-assignment of the same sort since
+ * we cannot reallocate the helper extension area.
+ */
+ if (help->helper != helper) {
+ RCU_INIT_POINTER(help->helper, NULL);
+ goto out;
+ }
}
rcu_assign_pointer(help->helper, helper);
@@ -319,6 +330,9 @@ EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
{
+ int ret = 0;
+ struct nf_conntrack_helper *cur;
+ struct hlist_node *n;
unsigned int h = helper_hash(&me->tuple);
BUG_ON(me->expect_policy == NULL);
@@ -326,11 +340,19 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
mutex_lock(&nf_ct_helper_mutex);
+ hlist_for_each_entry(cur, n, &nf_ct_helper_hash[h], hnode) {
+ if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
+ cur->tuple.src.l3num == me->tuple.src.l3num &&
+ cur->tuple.dst.protonum == me->tuple.dst.protonum) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
nf_ct_helper_count++;
+out:
mutex_unlock(&nf_ct_helper_mutex);
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_register);
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 81366c11827..009c52cfd1e 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -221,7 +221,6 @@ static int help(struct sk_buff *skb, unsigned int protoff,
}
static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly;
-static char irc_names[MAX_PORTS][sizeof("irc-65535")] __read_mostly;
static struct nf_conntrack_expect_policy irc_exp_policy;
static void nf_conntrack_irc_fini(void);
@@ -229,7 +228,6 @@ static void nf_conntrack_irc_fini(void);
static int __init nf_conntrack_irc_init(void)
{
int i, ret;
- char *tmpname;
if (max_dcc_channels < 1) {
printk(KERN_ERR "nf_ct_irc: max_dcc_channels must not be zero\n");
@@ -255,12 +253,10 @@ static int __init nf_conntrack_irc_init(void)
irc[i].me = THIS_MODULE;
irc[i].help = help;
- tmpname = &irc_names[i][0];
if (ports[i] == IRC_PORT)
- sprintf(tmpname, "irc");
+ sprintf(irc[i].name, "irc");
else
- sprintf(tmpname, "irc-%u", i);
- irc[i].name = tmpname;
+ sprintf(irc[i].name, "irc-%u", i);
ret = nf_conntrack_helper_register(&irc[i]);
if (ret) {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6f4b00a8fc7..14f67a2cbcb 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -4,7 +4,7 @@
* (C) 2001 by Jay Schulist <jschlst@samba.org>
* (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
* (C) 2003 by Patrick Mchardy <kaber@trash.net>
- * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
*
* Initial connection tracking via netlink development funded and
* generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -46,6 +46,7 @@
#ifdef CONFIG_NF_NAT_NEEDED
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_helper.h>
#endif
#include <linux/netfilter/nfnetlink.h>
@@ -477,7 +478,6 @@ nla_put_failure:
return -1;
}
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
static inline size_t
ctnetlink_proto_size(const struct nf_conn *ct)
{
@@ -564,6 +564,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
;
}
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
static int
ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
{
@@ -901,7 +902,8 @@ static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
};
static inline int
-ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
+ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
+ struct nlattr **helpinfo)
{
struct nlattr *tb[CTA_HELP_MAX+1];
@@ -912,6 +914,9 @@ ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
*helper_name = nla_data(tb[CTA_HELP_NAME]);
+ if (tb[CTA_HELP_INFO])
+ *helpinfo = tb[CTA_HELP_INFO];
+
return 0;
}
@@ -1172,13 +1177,14 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
struct nf_conntrack_helper *helper;
struct nf_conn_help *help = nfct_help(ct);
char *helpname = NULL;
+ struct nlattr *helpinfo = NULL;
int err;
/* don't change helper of sibling connections */
if (ct->master)
return -EBUSY;
- err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
+ err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
if (err < 0)
return err;
@@ -1213,20 +1219,17 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
}
if (help) {
- if (help->helper == helper)
+ if (help->helper == helper) {
+ /* update private helper data if allowed. */
+ if (helper->from_nlattr && helpinfo)
+ helper->from_nlattr(helpinfo, ct);
return 0;
- if (help->helper)
+ } else
return -EBUSY;
- /* need to zero data of old helper */
- memset(&help->help, 0, sizeof(help->help));
- } else {
- /* we cannot set a helper for an existing conntrack */
- return -EOPNOTSUPP;
}
- rcu_assign_pointer(help->helper, helper);
-
- return 0;
+ /* we cannot set a helper for an existing conntrack */
+ return -EOPNOTSUPP;
}
static inline int
@@ -1410,8 +1413,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
rcu_read_lock();
if (cda[CTA_HELP]) {
char *helpname = NULL;
-
- err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
+ struct nlattr *helpinfo = NULL;
+
+ err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
if (err < 0)
goto err2;
@@ -1440,11 +1444,14 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
} else {
struct nf_conn_help *help;
- help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+ help = nf_ct_helper_ext_add(ct, helper, GFP_ATOMIC);
if (help == NULL) {
err = -ENOMEM;
goto err2;
}
+ /* set private helper data if allowed. */
+ if (helper->from_nlattr && helpinfo)
+ helper->from_nlattr(helpinfo, ct);
/* not in hash table yet so not strictly necessary */
RCU_INIT_POINTER(help->helper, helper);
@@ -1620,6 +1627,288 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
return err;
}
+static int
+ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
+ __u16 cpu, const struct ip_conntrack_stat *st)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ unsigned int flags = pid ? NLM_F_MULTI : 0, event;
+
+ event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ goto nlmsg_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_UNSPEC;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = htons(cpu);
+
+ if (nla_put_be32(skb, CTA_STATS_SEARCHED, htonl(st->searched)) ||
+ nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
+ nla_put_be32(skb, CTA_STATS_NEW, htonl(st->new)) ||
+ nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
+ nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
+ nla_put_be32(skb, CTA_STATS_DELETE, htonl(st->delete)) ||
+ nla_put_be32(skb, CTA_STATS_DELETE_LIST, htonl(st->delete_list)) ||
+ nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
+ nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
+ htonl(st->insert_failed)) ||
+ nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
+ nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
+ nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
+ nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
+ htonl(st->search_restart)))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+nla_put_failure:
+nlmsg_failure:
+ nlmsg_cancel(skb, nlh);
+ return -1;
+}
+
+static int
+ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int cpu;
+ struct net *net = sock_net(skb->sk);
+
+ if (cb->args[0] == nr_cpu_ids)
+ return 0;
+
+ for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
+ const struct ip_conntrack_stat *st;
+
+ if (!cpu_possible(cpu))
+ continue;
+
+ st = per_cpu_ptr(net->ct.stat, cpu);
+ if (ctnetlink_ct_stat_cpu_fill_info(skb,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ cpu, st) < 0)
+ break;
+ }
+ cb->args[0] = cpu;
+
+ return skb->len;
+}
+
+static int
+ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
+{
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = ctnetlink_ct_stat_cpu_dump,
+ };
+ return netlink_dump_start(ctnl, skb, nlh, &c);
+ }
+
+ return 0;
+}
+
+static int
+ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+ struct net *net)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ unsigned int flags = pid ? NLM_F_MULTI : 0, event;
+ unsigned int nr_conntracks = atomic_read(&net->ct.count);
+
+ event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ goto nlmsg_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_UNSPEC;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+nla_put_failure:
+nlmsg_failure:
+ nlmsg_cancel(skb, nlh);
+ return -1;
+}
+
+static int
+ctnetlink_stat_ct(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
+{
+ struct sk_buff *skb2;
+ int err;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+
+ err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).pid,
+ nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(nlh->nlmsg_type),
+ sock_net(skb->sk));
+ if (err <= 0)
+ goto free;
+
+ err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ if (err < 0)
+ goto out;
+
+ return 0;
+
+free:
+ kfree_skb(skb2);
+out:
+ /* this avoids a loop in nfnetlink. */
+ return err == -EAGAIN ? -ENOBUFS : err;
+}
+
+#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
+static size_t
+ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
+{
+ return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
+ + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
+ + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
+ + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
+ + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
+ + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
+ + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
+ + nla_total_size(0) /* CTA_PROTOINFO */
+ + nla_total_size(0) /* CTA_HELP */
+ + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
+ + ctnetlink_secctx_size(ct)
+#ifdef CONFIG_NF_NAT_NEEDED
+ + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
+ + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
+#endif
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
+#endif
+ + ctnetlink_proto_size(ct)
+ ;
+}
+
+static int
+ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
+{
+ struct nlattr *nest_parms;
+
+ rcu_read_lock();
+ nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
+ if (!nest_parms)
+ goto nla_put_failure;
+ if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
+ goto nla_put_failure;
+ nla_nest_end(skb, nest_parms);
+
+ nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
+ if (!nest_parms)
+ goto nla_put_failure;
+ if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
+ goto nla_put_failure;
+ nla_nest_end(skb, nest_parms);
+
+ if (nf_ct_zone(ct)) {
+ if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+ goto nla_put_failure;
+ }
+
+ if (ctnetlink_dump_id(skb, ct) < 0)
+ goto nla_put_failure;
+
+ if (ctnetlink_dump_status(skb, ct) < 0)
+ goto nla_put_failure;
+
+ if (ctnetlink_dump_timeout(skb, ct) < 0)
+ goto nla_put_failure;
+
+ if (ctnetlink_dump_protoinfo(skb, ct) < 0)
+ goto nla_put_failure;
+
+ if (ctnetlink_dump_helpinfo(skb, ct) < 0)
+ goto nla_put_failure;
+
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+ if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
+ goto nla_put_failure;
+#endif
+ if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
+ goto nla_put_failure;
+
+ if ((ct->status & IPS_SEQ_ADJUST) &&
+ ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
+ goto nla_put_failure;
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
+ goto nla_put_failure;
+#endif
+ rcu_read_unlock();
+ return 0;
+
+nla_put_failure:
+ rcu_read_unlock();
+ return -ENOSPC;
+}
+
+static int
+ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
+{
+ int err;
+
+ if (cda[CTA_TIMEOUT]) {
+ err = ctnetlink_change_timeout(ct, cda);
+ if (err < 0)
+ return err;
+ }
+ if (cda[CTA_STATUS]) {
+ err = ctnetlink_change_status(ct, cda);
+ if (err < 0)
+ return err;
+ }
+ if (cda[CTA_HELP]) {
+ err = ctnetlink_change_helper(ct, cda);
+ if (err < 0)
+ return err;
+ }
+#if defined(CONFIG_NF_CONNTRACK_MARK)
+ if (cda[CTA_MARK])
+ ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
+#endif
+ return 0;
+}
+
+static int
+ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
+{
+ struct nlattr *cda[CTA_MAX+1];
+
+ nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy);
+
+ return ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
+}
+
+static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
+ .build_size = ctnetlink_nfqueue_build_size,
+ .build = ctnetlink_nfqueue_build,
+ .parse = ctnetlink_nfqueue_parse,
+};
+#endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
+
/***********************************************************************
* EXPECT
***********************************************************************/
@@ -2300,6 +2589,79 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
return err;
}
+static int
+ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int cpu,
+ const struct ip_conntrack_stat *st)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ unsigned int flags = pid ? NLM_F_MULTI : 0, event;
+
+ event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ goto nlmsg_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_UNSPEC;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = htons(cpu);
+
+ if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
+ nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
+ nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+nla_put_failure:
+nlmsg_failure:
+ nlmsg_cancel(skb, nlh);
+ return -1;
+}
+
+static int
+ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int cpu;
+ struct net *net = sock_net(skb->sk);
+
+ if (cb->args[0] == nr_cpu_ids)
+ return 0;
+
+ for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
+ const struct ip_conntrack_stat *st;
+
+ if (!cpu_possible(cpu))
+ continue;
+
+ st = per_cpu_ptr(net->ct.stat, cpu);
+ if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ cpu, st) < 0)
+ break;
+ }
+ cb->args[0] = cpu;
+
+ return skb->len;
+}
+
+static int
+ctnetlink_stat_exp_cpu(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
+{
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = ctnetlink_exp_stat_cpu_dump,
+ };
+ return netlink_dump_start(ctnl, skb, nlh, &c);
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_NF_CONNTRACK_EVENTS
static struct nf_ct_event_notifier ctnl_notifier = {
.fcn = ctnetlink_conntrack_event,
@@ -2323,6 +2685,8 @@ static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
[IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
.attr_count = CTA_MAX,
.policy = ct_nla_policy },
+ [IPCTNL_MSG_CT_GET_STATS_CPU] = { .call = ctnetlink_stat_ct_cpu },
+ [IPCTNL_MSG_CT_GET_STATS] = { .call = ctnetlink_stat_ct },
};
static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
@@ -2335,6 +2699,7 @@ static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
[IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
.attr_count = CTA_EXPECT_MAX,
.policy = exp_nla_policy },
+ [IPCTNL_MSG_EXP_GET_STATS_CPU] = { .call = ctnetlink_stat_exp_cpu },
};
static const struct nfnetlink_subsystem ctnl_subsys = {
@@ -2424,7 +2789,10 @@ static int __init ctnetlink_init(void)
pr_err("ctnetlink_init: cannot register pernet operations\n");
goto err_unreg_exp_subsys;
}
-
+#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
+ /* setup interaction between nf_queue and nf_conntrack_netlink. */
+ RCU_INIT_POINTER(nfq_ct_hook, &ctnetlink_nfqueue_hook);
+#endif
return 0;
err_unreg_exp_subsys:
@@ -2442,6 +2810,9 @@ static void __exit ctnetlink_exit(void)
unregister_pernet_subsys(&ctnetlink_net_ops);
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
nfnetlink_subsys_unregister(&ctnl_subsys);
+#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
+ RCU_INIT_POINTER(nfq_ct_hook, NULL);
+#endif
}
module_init(ctnetlink_init);
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 31d56b23b9e..6fed9ec3524 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -174,7 +174,7 @@ static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct,
static void pptp_destroy_siblings(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
- const struct nf_conn_help *help = nfct_help(ct);
+ const struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
struct nf_conntrack_tuple t;
nf_ct_gre_keymap_destroy(ct);
@@ -182,16 +182,16 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
/* try original (pns->pac) tuple */
memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t));
t.dst.protonum = IPPROTO_GRE;
- t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id;
- t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id;
+ t.src.u.gre.key = ct_pptp_info->pns_call_id;
+ t.dst.u.gre.key = ct_pptp_info->pac_call_id;
if (!destroy_sibling_or_exp(net, ct, &t))
pr_debug("failed to timeout original pns->pac ct/exp\n");
/* try reply (pac->pns) tuple */
memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t));
t.dst.protonum = IPPROTO_GRE;
- t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id;
- t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id;
+ t.src.u.gre.key = ct_pptp_info->pac_call_id;
+ t.dst.u.gre.key = ct_pptp_info->pns_call_id;
if (!destroy_sibling_or_exp(net, ct, &t))
pr_debug("failed to timeout reply pac->pns ct/exp\n");
}
@@ -269,7 +269,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
- struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info;
+ struct nf_ct_pptp_master *info = nfct_help_data(ct);
u_int16_t msg;
__be16 cid = 0, pcid = 0;
typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound;
@@ -396,7 +396,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
- struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info;
+ struct nf_ct_pptp_master *info = nfct_help_data(ct);
u_int16_t msg;
__be16 cid = 0, pcid = 0;
typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;
@@ -506,7 +506,7 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
{
int dir = CTINFO2DIR(ctinfo);
- const struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info;
+ const struct nf_ct_pptp_master *info = nfct_help_data(ct);
const struct tcphdr *tcph;
struct tcphdr _tcph;
const struct pptp_pkt_hdr *pptph;
@@ -592,6 +592,7 @@ static const struct nf_conntrack_expect_policy pptp_exp_policy = {
static struct nf_conntrack_helper pptp __read_mostly = {
.name = "pptp",
.me = THIS_MODULE,
+ .data_len = sizeof(struct nf_ct_pptp_master),
.tuple.src.l3num = AF_INET,
.tuple.src.u.tcp.port = cpu_to_be16(PPTP_CONTROL_PORT),
.tuple.dst.protonum = IPPROTO_TCP,
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 8b631b07a64..0dc63854390 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -36,28 +36,32 @@ static DEFINE_MUTEX(nf_ct_proto_mutex);
#ifdef CONFIG_SYSCTL
static int
-nf_ct_register_sysctl(struct ctl_table_header **header, const char *path,
- struct ctl_table *table, unsigned int *users)
+nf_ct_register_sysctl(struct net *net,
+ struct ctl_table_header **header,
+ const char *path,
+ struct ctl_table *table)
{
if (*header == NULL) {
- *header = register_net_sysctl(&init_net, path, table);
+ *header = register_net_sysctl(net, path, table);
if (*header == NULL)
return -ENOMEM;
}
- if (users != NULL)
- (*users)++;
+
return 0;
}
static void
nf_ct_unregister_sysctl(struct ctl_table_header **header,
- struct ctl_table *table, unsigned int *users)
+ struct ctl_table **table,
+ unsigned int users)
{
- if (users != NULL && --*users > 0)
+ if (users > 0)
return;
unregister_net_sysctl_table(*header);
+ kfree(*table);
*header = NULL;
+ *table = NULL;
}
#endif
@@ -161,30 +165,56 @@ static int kill_l4proto(struct nf_conn *i, void *data)
nf_ct_l3num(i) == l4proto->l3proto;
}
-static int nf_ct_l3proto_register_sysctl(struct nf_conntrack_l3proto *l3proto)
+static struct nf_ip_net *nf_ct_l3proto_net(struct net *net,
+ struct nf_conntrack_l3proto *l3proto)
{
- int err = 0;
+ if (l3proto->l3proto == PF_INET)
+ return &net->ct.nf_ct_proto;
+ else
+ return NULL;
+}
-#ifdef CONFIG_SYSCTL
- if (l3proto->ctl_table != NULL) {
- err = nf_ct_register_sysctl(&l3proto->ctl_table_header,
+static int nf_ct_l3proto_register_sysctl(struct net *net,
+ struct nf_conntrack_l3proto *l3proto)
+{
+ int err = 0;
+ struct nf_ip_net *in = nf_ct_l3proto_net(net, l3proto);
+ /* nf_conntrack_l3proto_ipv6 doesn't support sysctl */
+ if (in == NULL)
+ return 0;
+
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+ if (in->ctl_table != NULL) {
+ err = nf_ct_register_sysctl(net,
+ &in->ctl_table_header,
l3proto->ctl_table_path,
- l3proto->ctl_table, NULL);
+ in->ctl_table);
+ if (err < 0) {
+ kfree(in->ctl_table);
+ in->ctl_table = NULL;
+ }
}
#endif
return err;
}
-static void nf_ct_l3proto_unregister_sysctl(struct nf_conntrack_l3proto *l3proto)
+static void nf_ct_l3proto_unregister_sysctl(struct net *net,
+ struct nf_conntrack_l3proto *l3proto)
{
-#ifdef CONFIG_SYSCTL
- if (l3proto->ctl_table_header != NULL)
- nf_ct_unregister_sysctl(&l3proto->ctl_table_header,
- l3proto->ctl_table, NULL);
+ struct nf_ip_net *in = nf_ct_l3proto_net(net, l3proto);
+
+ if (in == NULL)
+ return;
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+ if (in->ctl_table_header != NULL)
+ nf_ct_unregister_sysctl(&in->ctl_table_header,
+ &in->ctl_table,
+ 0);
#endif
}
-int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
+static int
+nf_conntrack_l3proto_register_net(struct nf_conntrack_l3proto *proto)
{
int ret = 0;
struct nf_conntrack_l3proto *old;
@@ -203,10 +233,6 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
goto out_unlock;
}
- ret = nf_ct_l3proto_register_sysctl(proto);
- if (ret < 0)
- goto out_unlock;
-
if (proto->nlattr_tuple_size)
proto->nla_size = 3 * proto->nlattr_tuple_size();
@@ -215,13 +241,37 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
out_unlock:
mutex_unlock(&nf_ct_proto_mutex);
return ret;
+
}
-EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register);
-void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
+int nf_conntrack_l3proto_register(struct net *net,
+ struct nf_conntrack_l3proto *proto)
{
- struct net *net;
+ int ret = 0;
+
+ if (proto->init_net) {
+ ret = proto->init_net(net);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = nf_ct_l3proto_register_sysctl(net, proto);
+ if (ret < 0)
+ return ret;
+ if (net == &init_net) {
+ ret = nf_conntrack_l3proto_register_net(proto);
+ if (ret < 0)
+ nf_ct_l3proto_unregister_sysctl(net, proto);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register);
+
+static void
+nf_conntrack_l3proto_unregister_net(struct nf_conntrack_l3proto *proto)
+{
BUG_ON(proto->l3proto >= AF_MAX);
mutex_lock(&nf_ct_proto_mutex);
@@ -230,68 +280,107 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
) != proto);
rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
&nf_conntrack_l3proto_generic);
- nf_ct_l3proto_unregister_sysctl(proto);
mutex_unlock(&nf_ct_proto_mutex);
synchronize_rcu();
+}
+
+void nf_conntrack_l3proto_unregister(struct net *net,
+ struct nf_conntrack_l3proto *proto)
+{
+ if (net == &init_net)
+ nf_conntrack_l3proto_unregister_net(proto);
+
+ nf_ct_l3proto_unregister_sysctl(net, proto);
/* Remove all contrack entries for this protocol */
rtnl_lock();
- for_each_net(net)
- nf_ct_iterate_cleanup(net, kill_l3proto, proto);
+ nf_ct_iterate_cleanup(net, kill_l3proto, proto);
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
-static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto)
+static struct nf_proto_net *nf_ct_l4proto_net(struct net *net,
+ struct nf_conntrack_l4proto *l4proto)
+{
+ if (l4proto->get_net_proto) {
+ /* statically built-in protocols use static per-net */
+ return l4proto->get_net_proto(net);
+ } else if (l4proto->net_id) {
+ /* ... and loadable protocols use dynamic per-net */
+ return net_generic(net, *l4proto->net_id);
+ }
+ return NULL;
+}
+
+static
+int nf_ct_l4proto_register_sysctl(struct net *net,
+ struct nf_proto_net *pn,
+ struct nf_conntrack_l4proto *l4proto)
{
int err = 0;
#ifdef CONFIG_SYSCTL
- if (l4proto->ctl_table != NULL) {
- err = nf_ct_register_sysctl(l4proto->ctl_table_header,
+ if (pn->ctl_table != NULL) {
+ err = nf_ct_register_sysctl(net,
+ &pn->ctl_table_header,
"net/netfilter",
- l4proto->ctl_table,
- l4proto->ctl_table_users);
- if (err < 0)
- goto out;
+ pn->ctl_table);
+ if (err < 0) {
+ if (!pn->users) {
+ kfree(pn->ctl_table);
+ pn->ctl_table = NULL;
+ }
+ }
}
#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- if (l4proto->ctl_compat_table != NULL) {
- err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header,
+ if (l4proto->l3proto != AF_INET6 && pn->ctl_compat_table != NULL) {
+ if (err < 0) {
+ nf_ct_kfree_compat_sysctl_table(pn);
+ goto out;
+ }
+ err = nf_ct_register_sysctl(net,
+ &pn->ctl_compat_header,
"net/ipv4/netfilter",
- l4proto->ctl_compat_table, NULL);
+ pn->ctl_compat_table);
if (err == 0)
goto out;
- nf_ct_unregister_sysctl(l4proto->ctl_table_header,
- l4proto->ctl_table,
- l4proto->ctl_table_users);
+
+ nf_ct_kfree_compat_sysctl_table(pn);
+ nf_ct_unregister_sysctl(&pn->ctl_table_header,
+ &pn->ctl_table,
+ pn->users);
}
-#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
out:
+#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
return err;
}
-static void nf_ct_l4proto_unregister_sysctl(struct nf_conntrack_l4proto *l4proto)
+static
+void nf_ct_l4proto_unregister_sysctl(struct net *net,
+ struct nf_proto_net *pn,
+ struct nf_conntrack_l4proto *l4proto)
{
#ifdef CONFIG_SYSCTL
- if (l4proto->ctl_table_header != NULL &&
- *l4proto->ctl_table_header != NULL)
- nf_ct_unregister_sysctl(l4proto->ctl_table_header,
- l4proto->ctl_table,
- l4proto->ctl_table_users);
+ if (pn->ctl_table_header != NULL)
+ nf_ct_unregister_sysctl(&pn->ctl_table_header,
+ &pn->ctl_table,
+ pn->users);
+
#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- if (l4proto->ctl_compat_table_header != NULL)
- nf_ct_unregister_sysctl(&l4proto->ctl_compat_table_header,
- l4proto->ctl_compat_table, NULL);
+ if (l4proto->l3proto != AF_INET6 && pn->ctl_compat_header != NULL)
+ nf_ct_unregister_sysctl(&pn->ctl_compat_header,
+ &pn->ctl_compat_table,
+ 0);
#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
}
/* FIXME: Allow NULL functions and sub in pointers to generic for
them. --RR */
-int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
+static int
+nf_conntrack_l4proto_register_net(struct nf_conntrack_l4proto *l4proto)
{
int ret = 0;
@@ -333,10 +422,6 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
goto out_unlock;
}
- ret = nf_ct_l4proto_register_sysctl(l4proto);
- if (ret < 0)
- goto out_unlock;
-
l4proto->nla_size = 0;
if (l4proto->nlattr_size)
l4proto->nla_size += l4proto->nlattr_size();
@@ -345,17 +430,48 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
l4proto);
-
out_unlock:
mutex_unlock(&nf_ct_proto_mutex);
return ret;
}
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register);
-void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
+int nf_conntrack_l4proto_register(struct net *net,
+ struct nf_conntrack_l4proto *l4proto)
{
- struct net *net;
+ int ret = 0;
+ struct nf_proto_net *pn = NULL;
+ if (l4proto->init_net) {
+ ret = l4proto->init_net(net, l4proto->l3proto);
+ if (ret < 0)
+ goto out;
+ }
+
+ pn = nf_ct_l4proto_net(net, l4proto);
+ if (pn == NULL)
+ goto out;
+
+ ret = nf_ct_l4proto_register_sysctl(net, pn, l4proto);
+ if (ret < 0)
+ goto out;
+
+ if (net == &init_net) {
+ ret = nf_conntrack_l4proto_register_net(l4proto);
+ if (ret < 0) {
+ nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
+ goto out;
+ }
+ }
+
+ pn->users++;
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register);
+
+static void
+nf_conntrack_l4proto_unregister_net(struct nf_conntrack_l4proto *l4proto)
+{
BUG_ON(l4proto->l3proto >= PF_MAX);
mutex_lock(&nf_ct_proto_mutex);
@@ -365,41 +481,73 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
) != l4proto);
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
&nf_conntrack_l4proto_generic);
- nf_ct_l4proto_unregister_sysctl(l4proto);
mutex_unlock(&nf_ct_proto_mutex);
synchronize_rcu();
+}
+
+void nf_conntrack_l4proto_unregister(struct net *net,
+ struct nf_conntrack_l4proto *l4proto)
+{
+ struct nf_proto_net *pn = NULL;
+
+ if (net == &init_net)
+ nf_conntrack_l4proto_unregister_net(l4proto);
+
+ pn = nf_ct_l4proto_net(net, l4proto);
+ if (pn == NULL)
+ return;
+
+ pn->users--;
+ nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
/* Remove all contrack entries for this protocol */
rtnl_lock();
- for_each_net(net)
- nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
+ nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
-int nf_conntrack_proto_init(void)
+int nf_conntrack_proto_init(struct net *net)
{
unsigned int i;
int err;
+ struct nf_proto_net *pn = nf_ct_l4proto_net(net,
+ &nf_conntrack_l4proto_generic);
- err = nf_ct_l4proto_register_sysctl(&nf_conntrack_l4proto_generic);
+ err = nf_conntrack_l4proto_generic.init_net(net,
+ nf_conntrack_l4proto_generic.l3proto);
+ if (err < 0)
+ return err;
+ err = nf_ct_l4proto_register_sysctl(net,
+ pn,
+ &nf_conntrack_l4proto_generic);
if (err < 0)
return err;
- for (i = 0; i < AF_MAX; i++)
- rcu_assign_pointer(nf_ct_l3protos[i],
- &nf_conntrack_l3proto_generic);
+ if (net == &init_net) {
+ for (i = 0; i < AF_MAX; i++)
+ rcu_assign_pointer(nf_ct_l3protos[i],
+ &nf_conntrack_l3proto_generic);
+ }
+
+ pn->users++;
return 0;
}
-void nf_conntrack_proto_fini(void)
+void nf_conntrack_proto_fini(struct net *net)
{
unsigned int i;
-
- nf_ct_l4proto_unregister_sysctl(&nf_conntrack_l4proto_generic);
-
- /* free l3proto protocol tables */
- for (i = 0; i < PF_MAX; i++)
- kfree(nf_ct_protos[i]);
+ struct nf_proto_net *pn = nf_ct_l4proto_net(net,
+ &nf_conntrack_l4proto_generic);
+
+ pn->users--;
+ nf_ct_l4proto_unregister_sysctl(net,
+ pn,
+ &nf_conntrack_l4proto_generic);
+ if (net == &init_net) {
+ /* free l3proto protocol tables */
+ for (i = 0; i < PF_MAX; i++)
+ kfree(nf_ct_protos[i]);
+ }
}
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index ef706a485be..6535326cf07 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -387,12 +387,9 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
/* this module per-net specifics */
static int dccp_net_id __read_mostly;
struct dccp_net {
+ struct nf_proto_net pn;
int dccp_loose;
unsigned int dccp_timeout[CT_DCCP_MAX + 1];
-#ifdef CONFIG_SYSCTL
- struct ctl_table_header *sysctl_header;
- struct ctl_table *sysctl_table;
-#endif
};
static inline struct dccp_net *dccp_pernet(struct net *net)
@@ -715,9 +712,10 @@ static int dccp_nlattr_size(void)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
- struct dccp_net *dn = dccp_pernet(&init_net);
+ struct dccp_net *dn = dccp_pernet(net);
unsigned int *timeouts = data;
int i;
@@ -817,6 +815,51 @@ static struct ctl_table dccp_sysctl_table[] = {
};
#endif /* CONFIG_SYSCTL */
+static int dccp_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct dccp_net *dn)
+{
+#ifdef CONFIG_SYSCTL
+ if (pn->ctl_table)
+ return 0;
+
+ pn->ctl_table = kmemdup(dccp_sysctl_table,
+ sizeof(dccp_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+
+ pn->ctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST];
+ pn->ctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND];
+ pn->ctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN];
+ pn->ctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN];
+ pn->ctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ];
+ pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING];
+ pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
+ pn->ctl_table[7].data = &dn->dccp_loose;
+#endif
+ return 0;
+}
+
+static int dccp_init_net(struct net *net, u_int16_t proto)
+{
+ struct dccp_net *dn = dccp_pernet(net);
+ struct nf_proto_net *pn = &dn->pn;
+
+ if (!pn->users) {
+ /* default values */
+ dn->dccp_loose = 1;
+ dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL;
+ dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL;
+ dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL;
+ dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ;
+ dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
+ dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
+ dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
+ }
+
+ return dccp_kmemdup_sysctl_table(pn, dn);
+}
+
static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
.l3proto = AF_INET,
.l4proto = IPPROTO_DCCP,
@@ -847,6 +890,8 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
.nla_policy = dccp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+ .net_id = &dccp_net_id,
+ .init_net = dccp_init_net,
};
static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
@@ -879,55 +924,39 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
.nla_policy = dccp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+ .net_id = &dccp_net_id,
+ .init_net = dccp_init_net,
};
static __net_init int dccp_net_init(struct net *net)
{
- struct dccp_net *dn = dccp_pernet(net);
-
- /* default values */
- dn->dccp_loose = 1;
- dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ;
- dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
- dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
- dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
-
-#ifdef CONFIG_SYSCTL
- dn->sysctl_table = kmemdup(dccp_sysctl_table,
- sizeof(dccp_sysctl_table), GFP_KERNEL);
- if (!dn->sysctl_table)
- return -ENOMEM;
-
- dn->sysctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST];
- dn->sysctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND];
- dn->sysctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN];
- dn->sysctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN];
- dn->sysctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ];
- dn->sysctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING];
- dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
- dn->sysctl_table[7].data = &dn->dccp_loose;
-
- dn->sysctl_header = register_net_sysctl(net, "net/netfilter",
- dn->sysctl_table);
- if (!dn->sysctl_header) {
- kfree(dn->sysctl_table);
- return -ENOMEM;
+ int ret = 0;
+ ret = nf_conntrack_l4proto_register(net,
+ &dccp_proto4);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_dccp4 :protocol register failed.\n");
+ goto out;
+ }
+ ret = nf_conntrack_l4proto_register(net,
+ &dccp_proto6);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_dccp6 :protocol register failed.\n");
+ goto cleanup_dccp4;
}
-#endif
-
return 0;
+cleanup_dccp4:
+ nf_conntrack_l4proto_unregister(net,
+ &dccp_proto4);
+out:
+ return ret;
}
static __net_exit void dccp_net_exit(struct net *net)
{
- struct dccp_net *dn = dccp_pernet(net);
-#ifdef CONFIG_SYSCTL
- unregister_net_sysctl_table(dn->sysctl_header);
- kfree(dn->sysctl_table);
-#endif
+ nf_conntrack_l4proto_unregister(net,
+ &dccp_proto6);
+ nf_conntrack_l4proto_unregister(net,
+ &dccp_proto4);
}
static struct pernet_operations dccp_net_ops = {
@@ -939,34 +968,12 @@ static struct pernet_operations dccp_net_ops = {
static int __init nf_conntrack_proto_dccp_init(void)
{
- int err;
-
- err = register_pernet_subsys(&dccp_net_ops);
- if (err < 0)
- goto err1;
-
- err = nf_conntrack_l4proto_register(&dccp_proto4);
- if (err < 0)
- goto err2;
-
- err = nf_conntrack_l4proto_register(&dccp_proto6);
- if (err < 0)
- goto err3;
- return 0;
-
-err3:
- nf_conntrack_l4proto_unregister(&dccp_proto4);
-err2:
- unregister_pernet_subsys(&dccp_net_ops);
-err1:
- return err;
+ return register_pernet_subsys(&dccp_net_ops);
}
static void __exit nf_conntrack_proto_dccp_fini(void)
{
unregister_pernet_subsys(&dccp_net_ops);
- nf_conntrack_l4proto_unregister(&dccp_proto6);
- nf_conntrack_l4proto_unregister(&dccp_proto4);
}
module_init(nf_conntrack_proto_dccp_init);
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index d8923d54b35..d25f2937764 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -14,6 +14,11 @@
static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
+static inline struct nf_generic_net *generic_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.generic;
+}
+
static bool generic_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
@@ -42,7 +47,7 @@ static int generic_print_tuple(struct seq_file *s,
static unsigned int *generic_get_timeouts(struct net *net)
{
- return &nf_ct_generic_timeout;
+ return &(generic_pernet(net)->timeout);
}
/* Returns verdict for packet, or -1 for invalid. */
@@ -70,16 +75,18 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int generic_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeout = data;
+ struct nf_generic_net *gn = generic_pernet(net);
if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT])
*timeout =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ;
else {
/* Set default generic timeout. */
- *timeout = nf_ct_generic_timeout;
+ *timeout = gn->timeout;
}
return 0;
@@ -106,11 +113,9 @@ generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *generic_sysctl_header;
static struct ctl_table generic_sysctl_table[] = {
{
.procname = "nf_conntrack_generic_timeout",
- .data = &nf_ct_generic_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -121,7 +126,6 @@ static struct ctl_table generic_sysctl_table[] = {
static struct ctl_table generic_compat_sysctl_table[] = {
{
.procname = "ip_conntrack_generic_timeout",
- .data = &nf_ct_generic_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -131,6 +135,62 @@ static struct ctl_table generic_compat_sysctl_table[] = {
#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
+static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct nf_generic_net *gn)
+{
+#ifdef CONFIG_SYSCTL
+ pn->ctl_table = kmemdup(generic_sysctl_table,
+ sizeof(generic_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+
+ pn->ctl_table[0].data = &gn->timeout;
+#endif
+ return 0;
+}
+
+static int generic_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
+ struct nf_generic_net *gn)
+{
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+ pn->ctl_compat_table = kmemdup(generic_compat_sysctl_table,
+ sizeof(generic_compat_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_compat_table)
+ return -ENOMEM;
+
+ pn->ctl_compat_table[0].data = &gn->timeout;
+#endif
+#endif
+ return 0;
+}
+
+static int generic_init_net(struct net *net, u_int16_t proto)
+{
+ int ret;
+ struct nf_generic_net *gn = generic_pernet(net);
+ struct nf_proto_net *pn = &gn->pn;
+
+ gn->timeout = nf_ct_generic_timeout;
+
+ ret = generic_kmemdup_compat_sysctl_table(pn, gn);
+ if (ret < 0)
+ return ret;
+
+ ret = generic_kmemdup_sysctl_table(pn, gn);
+ if (ret < 0)
+ nf_ct_kfree_compat_sysctl_table(pn);
+
+ return ret;
+}
+
+static struct nf_proto_net *generic_get_net_proto(struct net *net)
+{
+ return &net->ct.nf_ct_proto.generic.pn;
+}
+
struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
{
.l3proto = PF_UNSPEC,
@@ -151,11 +211,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
.nla_policy = generic_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_header = &generic_sysctl_header,
- .ctl_table = generic_sysctl_table,
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- .ctl_compat_table = generic_compat_sysctl_table,
-#endif
-#endif
+ .init_net = generic_init_net,
+ .get_net_proto = generic_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 4bf6b4e4b77..b09b7af7f6f 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -54,13 +54,20 @@ static unsigned int gre_timeouts[GRE_CT_MAX] = {
static int proto_gre_net_id __read_mostly;
struct netns_proto_gre {
+ struct nf_proto_net nf;
rwlock_t keymap_lock;
struct list_head keymap_list;
+ unsigned int gre_timeouts[GRE_CT_MAX];
};
+static inline struct netns_proto_gre *gre_pernet(struct net *net)
+{
+ return net_generic(net, proto_gre_net_id);
+}
+
void nf_ct_gre_keymap_flush(struct net *net)
{
- struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id);
+ struct netns_proto_gre *net_gre = gre_pernet(net);
struct nf_ct_gre_keymap *km, *tmp;
write_lock_bh(&net_gre->keymap_lock);
@@ -85,7 +92,7 @@ static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
/* look up the source key for a given tuple */
static __be16 gre_keymap_lookup(struct net *net, struct nf_conntrack_tuple *t)
{
- struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id);
+ struct netns_proto_gre *net_gre = gre_pernet(net);
struct nf_ct_gre_keymap *km;
__be16 key = 0;
@@ -109,11 +116,11 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t)
{
struct net *net = nf_ct_net(ct);
- struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id);
- struct nf_conn_help *help = nfct_help(ct);
+ struct netns_proto_gre *net_gre = gre_pernet(net);
+ struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
struct nf_ct_gre_keymap **kmp, *km;
- kmp = &help->help.ct_pptp_info.keymap[dir];
+ kmp = &ct_pptp_info->keymap[dir];
if (*kmp) {
/* check whether it's a retransmission */
read_lock_bh(&net_gre->keymap_lock);
@@ -150,20 +157,20 @@ EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_add);
void nf_ct_gre_keymap_destroy(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
- struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id);
- struct nf_conn_help *help = nfct_help(ct);
+ struct netns_proto_gre *net_gre = gre_pernet(net);
+ struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
enum ip_conntrack_dir dir;
pr_debug("entering for ct %p\n", ct);
write_lock_bh(&net_gre->keymap_lock);
for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) {
- if (help->help.ct_pptp_info.keymap[dir]) {
+ if (ct_pptp_info->keymap[dir]) {
pr_debug("removing %p from list\n",
- help->help.ct_pptp_info.keymap[dir]);
- list_del(&help->help.ct_pptp_info.keymap[dir]->list);
- kfree(help->help.ct_pptp_info.keymap[dir]);
- help->help.ct_pptp_info.keymap[dir] = NULL;
+ ct_pptp_info->keymap[dir]);
+ list_del(&ct_pptp_info->keymap[dir]->list);
+ kfree(ct_pptp_info->keymap[dir]);
+ ct_pptp_info->keymap[dir] = NULL;
}
}
write_unlock_bh(&net_gre->keymap_lock);
@@ -237,7 +244,7 @@ static int gre_print_conntrack(struct seq_file *s, struct nf_conn *ct)
static unsigned int *gre_get_timeouts(struct net *net)
{
- return gre_timeouts;
+ return gre_pernet(net)->gre_timeouts;
}
/* Returns verdict for packet, and may modify conntrack */
@@ -297,13 +304,15 @@ static void gre_destroy(struct nf_conn *ct)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int gre_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int gre_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeouts = data;
+ struct netns_proto_gre *net_gre = gre_pernet(net);
/* set default timeouts for GRE. */
- timeouts[GRE_CT_UNREPLIED] = gre_timeouts[GRE_CT_UNREPLIED];
- timeouts[GRE_CT_REPLIED] = gre_timeouts[GRE_CT_REPLIED];
+ timeouts[GRE_CT_UNREPLIED] = net_gre->gre_timeouts[GRE_CT_UNREPLIED];
+ timeouts[GRE_CT_REPLIED] = net_gre->gre_timeouts[GRE_CT_REPLIED];
if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) {
timeouts[GRE_CT_UNREPLIED] =
@@ -339,6 +348,19 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+static int gre_init_net(struct net *net, u_int16_t proto)
+{
+ struct netns_proto_gre *net_gre = gre_pernet(net);
+ int i;
+
+ rwlock_init(&net_gre->keymap_lock);
+ INIT_LIST_HEAD(&net_gre->keymap_list);
+ for (i = 0; i < GRE_CT_MAX; i++)
+ net_gre->gre_timeouts[i] = gre_timeouts[i];
+
+ return 0;
+}
+
/* protocol helper struct */
static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
.l3proto = AF_INET,
@@ -368,20 +390,22 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
.nla_policy = gre_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+ .net_id = &proto_gre_net_id,
+ .init_net = gre_init_net,
};
static int proto_gre_net_init(struct net *net)
{
- struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id);
-
- rwlock_init(&net_gre->keymap_lock);
- INIT_LIST_HEAD(&net_gre->keymap_list);
-
- return 0;
+ int ret = 0;
+ ret = nf_conntrack_l4proto_register(net, &nf_conntrack_l4proto_gre4);
+ if (ret < 0)
+ pr_err("nf_conntrack_l4proto_gre4 :protocol register failed.\n");
+ return ret;
}
static void proto_gre_net_exit(struct net *net)
{
+ nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_gre4);
nf_ct_gre_keymap_flush(net);
}
@@ -394,20 +418,11 @@ static struct pernet_operations proto_gre_net_ops = {
static int __init nf_ct_proto_gre_init(void)
{
- int rv;
-
- rv = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_gre4);
- if (rv < 0)
- return rv;
- rv = register_pernet_subsys(&proto_gre_net_ops);
- if (rv < 0)
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
- return rv;
+ return register_pernet_subsys(&proto_gre_net_ops);
}
static void __exit nf_ct_proto_gre_fini(void)
{
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
unregister_pernet_subsys(&proto_gre_net_ops);
}
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 996db2fa21f..c746d61f83e 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -127,6 +127,17 @@ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
}
};
+static int sctp_net_id __read_mostly;
+struct sctp_net {
+ struct nf_proto_net pn;
+ unsigned int timeouts[SCTP_CONNTRACK_MAX];
+};
+
+static inline struct sctp_net *sctp_pernet(struct net *net)
+{
+ return net_generic(net, sctp_net_id);
+}
+
static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
@@ -281,7 +292,7 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
static unsigned int *sctp_get_timeouts(struct net *net)
{
- return sctp_timeouts;
+ return sctp_pernet(net)->timeouts;
}
/* Returns verdict for packet, or -NF_ACCEPT for invalid. */
@@ -551,14 +562,16 @@ static int sctp_nlattr_size(void)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeouts = data;
+ struct sctp_net *sn = sctp_pernet(net);
int i;
/* set default SCTP timeouts. */
for (i=0; i<SCTP_CONNTRACK_MAX; i++)
- timeouts[i] = sctp_timeouts[i];
+ timeouts[i] = sn->timeouts[i];
/* there's a 1:1 mapping between attributes and protocol states. */
for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
@@ -599,54 +612,45 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
#ifdef CONFIG_SYSCTL
-static unsigned int sctp_sysctl_table_users;
-static struct ctl_table_header *sctp_sysctl_header;
static struct ctl_table sctp_sysctl_table[] = {
{
.procname = "nf_conntrack_sctp_timeout_closed",
- .data = &sctp_timeouts[SCTP_CONNTRACK_CLOSED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_sctp_timeout_cookie_wait",
- .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_sctp_timeout_cookie_echoed",
- .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_ECHOED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_sctp_timeout_established",
- .data = &sctp_timeouts[SCTP_CONNTRACK_ESTABLISHED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_sctp_timeout_shutdown_sent",
- .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_sctp_timeout_shutdown_recd",
- .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent",
- .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -658,49 +662,42 @@ static struct ctl_table sctp_sysctl_table[] = {
static struct ctl_table sctp_compat_sysctl_table[] = {
{
.procname = "ip_conntrack_sctp_timeout_closed",
- .data = &sctp_timeouts[SCTP_CONNTRACK_CLOSED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_sctp_timeout_cookie_wait",
- .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_sctp_timeout_cookie_echoed",
- .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_ECHOED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_sctp_timeout_established",
- .data = &sctp_timeouts[SCTP_CONNTRACK_ESTABLISHED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_sctp_timeout_shutdown_sent",
- .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_sctp_timeout_shutdown_recd",
- .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_sctp_timeout_shutdown_ack_sent",
- .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -710,6 +707,80 @@ static struct ctl_table sctp_compat_sysctl_table[] = {
#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif
+static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct sctp_net *sn)
+{
+#ifdef CONFIG_SYSCTL
+ if (pn->ctl_table)
+ return 0;
+
+ pn->ctl_table = kmemdup(sctp_sysctl_table,
+ sizeof(sctp_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+
+ pn->ctl_table[0].data = &sn->timeouts[SCTP_CONNTRACK_CLOSED];
+ pn->ctl_table[1].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_WAIT];
+ pn->ctl_table[2].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_ECHOED];
+ pn->ctl_table[3].data = &sn->timeouts[SCTP_CONNTRACK_ESTABLISHED];
+ pn->ctl_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
+ pn->ctl_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
+ pn->ctl_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
+#endif
+ return 0;
+}
+
+static int sctp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
+ struct sctp_net *sn)
+{
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+ pn->ctl_compat_table = kmemdup(sctp_compat_sysctl_table,
+ sizeof(sctp_compat_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_compat_table)
+ return -ENOMEM;
+
+ pn->ctl_compat_table[0].data = &sn->timeouts[SCTP_CONNTRACK_CLOSED];
+ pn->ctl_compat_table[1].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_WAIT];
+ pn->ctl_compat_table[2].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_ECHOED];
+ pn->ctl_compat_table[3].data = &sn->timeouts[SCTP_CONNTRACK_ESTABLISHED];
+ pn->ctl_compat_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
+ pn->ctl_compat_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
+ pn->ctl_compat_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
+#endif
+#endif
+ return 0;
+}
+
+static int sctp_init_net(struct net *net, u_int16_t proto)
+{
+ int ret;
+ struct sctp_net *sn = sctp_pernet(net);
+ struct nf_proto_net *pn = &sn->pn;
+
+ if (!pn->users) {
+ int i;
+
+ for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
+ sn->timeouts[i] = sctp_timeouts[i];
+ }
+
+ if (proto == AF_INET) {
+ ret = sctp_kmemdup_compat_sysctl_table(pn, sn);
+ if (ret < 0)
+ return ret;
+
+ ret = sctp_kmemdup_sysctl_table(pn, sn);
+ if (ret < 0)
+ nf_ct_kfree_compat_sysctl_table(pn);
+ } else
+ ret = sctp_kmemdup_sysctl_table(pn, sn);
+
+ return ret;
+}
+
static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
.l3proto = PF_INET,
.l4proto = IPPROTO_SCTP,
@@ -740,14 +811,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
.nla_policy = sctp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &sctp_sysctl_table_users,
- .ctl_table_header = &sctp_sysctl_header,
- .ctl_table = sctp_sysctl_table,
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- .ctl_compat_table = sctp_compat_sysctl_table,
-#endif
-#endif
+ .net_id = &sctp_net_id,
+ .init_net = sctp_init_net,
};
static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
@@ -780,40 +845,58 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &sctp_sysctl_table_users,
- .ctl_table_header = &sctp_sysctl_header,
- .ctl_table = sctp_sysctl_table,
-#endif
+ .net_id = &sctp_net_id,
+ .init_net = sctp_init_net,
};
-static int __init nf_conntrack_proto_sctp_init(void)
+static int sctp_net_init(struct net *net)
{
- int ret;
+ int ret = 0;
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp4);
- if (ret) {
- pr_err("nf_conntrack_l4proto_sctp4: protocol register failed\n");
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_sctp4);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_sctp4 :protocol register failed.\n");
goto out;
}
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp6);
- if (ret) {
- pr_err("nf_conntrack_l4proto_sctp6: protocol register failed\n");
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_sctp6);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_sctp6 :protocol register failed.\n");
goto cleanup_sctp4;
}
+ return 0;
+cleanup_sctp4:
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_sctp4);
+out:
return ret;
+}
- cleanup_sctp4:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
- out:
- return ret;
+static void sctp_net_exit(struct net *net)
+{
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_sctp6);
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_sctp4);
+}
+
+static struct pernet_operations sctp_net_ops = {
+ .init = sctp_net_init,
+ .exit = sctp_net_exit,
+ .id = &sctp_net_id,
+ .size = sizeof(struct sctp_net),
+};
+
+static int __init nf_conntrack_proto_sctp_init(void)
+{
+ return register_pernet_subsys(&sctp_net_ops);
}
static void __exit nf_conntrack_proto_sctp_fini(void)
{
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp6);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
+ unregister_pernet_subsys(&sctp_net_ops);
}
module_init(nf_conntrack_proto_sctp_init);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 21ff1a99f53..a5ac11ebef3 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -270,6 +270,11 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
}
};
+static inline struct nf_tcp_net *tcp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.tcp;
+}
+
static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
@@ -516,6 +521,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
u_int8_t pf)
{
struct net *net = nf_ct_net(ct);
+ struct nf_tcp_net *tn = tcp_pernet(net);
struct ip_ct_tcp_state *sender = &state->seen[dir];
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
@@ -720,7 +726,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
} else {
res = false;
if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
- nf_ct_tcp_be_liberal)
+ tn->tcp_be_liberal)
res = true;
if (!res && LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
@@ -815,7 +821,7 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
static unsigned int *tcp_get_timeouts(struct net *net)
{
- return tcp_timeouts;
+ return tcp_pernet(net)->timeouts;
}
/* Returns verdict for packet, or -1 for invalid. */
@@ -828,6 +834,7 @@ static int tcp_packet(struct nf_conn *ct,
unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
+ struct nf_tcp_net *tn = tcp_pernet(net);
struct nf_conntrack_tuple *tuple;
enum tcp_conntrack new_state, old_state;
enum ip_conntrack_dir dir;
@@ -1020,7 +1027,7 @@ static int tcp_packet(struct nf_conn *ct,
&& new_state == TCP_CONNTRACK_FIN_WAIT)
ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
- if (ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans &&
+ if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
@@ -1065,6 +1072,8 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
enum tcp_conntrack new_state;
const struct tcphdr *th;
struct tcphdr _tcph;
+ struct net *net = nf_ct_net(ct);
+ struct nf_tcp_net *tn = tcp_pernet(net);
const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
@@ -1093,7 +1102,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
ct->proto.tcp.seen[0].td_end;
tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
- } else if (nf_ct_tcp_loose == 0) {
+ } else if (tn->tcp_loose == 0) {
/* Don't try to pick up connections. */
return false;
} else {
@@ -1251,14 +1260,16 @@ static int tcp_nlattr_tuple_size(void)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeouts = data;
+ struct nf_tcp_net *tn = tcp_pernet(net);
int i;
/* set default TCP timeouts. */
for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
- timeouts[i] = tcp_timeouts[i];
+ timeouts[i] = tn->timeouts[i];
if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
timeouts[TCP_CONNTRACK_SYN_SENT] =
@@ -1355,96 +1366,81 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
-static unsigned int tcp_sysctl_table_users;
-static struct ctl_table_header *tcp_sysctl_header;
static struct ctl_table tcp_sysctl_table[] = {
{
.procname = "nf_conntrack_tcp_timeout_syn_sent",
- .data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_syn_recv",
- .data = &tcp_timeouts[TCP_CONNTRACK_SYN_RECV],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_established",
- .data = &tcp_timeouts[TCP_CONNTRACK_ESTABLISHED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_fin_wait",
- .data = &tcp_timeouts[TCP_CONNTRACK_FIN_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_close_wait",
- .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_last_ack",
- .data = &tcp_timeouts[TCP_CONNTRACK_LAST_ACK],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_time_wait",
- .data = &tcp_timeouts[TCP_CONNTRACK_TIME_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_close",
- .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_max_retrans",
- .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_unacknowledged",
- .data = &tcp_timeouts[TCP_CONNTRACK_UNACK],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_loose",
- .data = &nf_ct_tcp_loose,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_tcp_be_liberal",
- .data = &nf_ct_tcp_be_liberal,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_tcp_max_retrans",
- .data = &nf_ct_tcp_max_retrans,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
@@ -1456,91 +1452,78 @@ static struct ctl_table tcp_sysctl_table[] = {
static struct ctl_table tcp_compat_sysctl_table[] = {
{
.procname = "ip_conntrack_tcp_timeout_syn_sent",
- .data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_syn_sent2",
- .data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT2],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_syn_recv",
- .data = &tcp_timeouts[TCP_CONNTRACK_SYN_RECV],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_established",
- .data = &tcp_timeouts[TCP_CONNTRACK_ESTABLISHED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_fin_wait",
- .data = &tcp_timeouts[TCP_CONNTRACK_FIN_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_close_wait",
- .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_last_ack",
- .data = &tcp_timeouts[TCP_CONNTRACK_LAST_ACK],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_time_wait",
- .data = &tcp_timeouts[TCP_CONNTRACK_TIME_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_close",
- .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_max_retrans",
- .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_loose",
- .data = &nf_ct_tcp_loose,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_tcp_be_liberal",
- .data = &nf_ct_tcp_be_liberal,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_tcp_max_retrans",
- .data = &nf_ct_tcp_max_retrans,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
@@ -1550,6 +1533,101 @@ static struct ctl_table tcp_compat_sysctl_table[] = {
#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
+static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct nf_tcp_net *tn)
+{
+#ifdef CONFIG_SYSCTL
+ if (pn->ctl_table)
+ return 0;
+
+ pn->ctl_table = kmemdup(tcp_sysctl_table,
+ sizeof(tcp_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+
+ pn->ctl_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
+ pn->ctl_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
+ pn->ctl_table[2].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
+ pn->ctl_table[3].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
+ pn->ctl_table[4].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
+ pn->ctl_table[5].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
+ pn->ctl_table[6].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
+ pn->ctl_table[7].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
+ pn->ctl_table[8].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
+ pn->ctl_table[9].data = &tn->timeouts[TCP_CONNTRACK_UNACK];
+ pn->ctl_table[10].data = &tn->tcp_loose;
+ pn->ctl_table[11].data = &tn->tcp_be_liberal;
+ pn->ctl_table[12].data = &tn->tcp_max_retrans;
+#endif
+ return 0;
+}
+
+static int tcp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
+ struct nf_tcp_net *tn)
+{
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+ pn->ctl_compat_table = kmemdup(tcp_compat_sysctl_table,
+ sizeof(tcp_compat_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_compat_table)
+ return -ENOMEM;
+
+ pn->ctl_compat_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
+ pn->ctl_compat_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT2];
+ pn->ctl_compat_table[2].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
+ pn->ctl_compat_table[3].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
+ pn->ctl_compat_table[4].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
+ pn->ctl_compat_table[5].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
+ pn->ctl_compat_table[6].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
+ pn->ctl_compat_table[7].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
+ pn->ctl_compat_table[8].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
+ pn->ctl_compat_table[9].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
+ pn->ctl_compat_table[10].data = &tn->tcp_loose;
+ pn->ctl_compat_table[11].data = &tn->tcp_be_liberal;
+ pn->ctl_compat_table[12].data = &tn->tcp_max_retrans;
+#endif
+#endif
+ return 0;
+}
+
+static int tcp_init_net(struct net *net, u_int16_t proto)
+{
+ int ret;
+ struct nf_tcp_net *tn = tcp_pernet(net);
+ struct nf_proto_net *pn = &tn->pn;
+
+ if (!pn->users) {
+ int i;
+
+ for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
+ tn->timeouts[i] = tcp_timeouts[i];
+
+ tn->tcp_loose = nf_ct_tcp_loose;
+ tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
+ tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
+ }
+
+ if (proto == AF_INET) {
+ ret = tcp_kmemdup_compat_sysctl_table(pn, tn);
+ if (ret < 0)
+ return ret;
+
+ ret = tcp_kmemdup_sysctl_table(pn, tn);
+ if (ret < 0)
+ nf_ct_kfree_compat_sysctl_table(pn);
+ } else
+ ret = tcp_kmemdup_sysctl_table(pn, tn);
+
+ return ret;
+}
+
+static struct nf_proto_net *tcp_get_net_proto(struct net *net)
+{
+ return &net->ct.nf_ct_proto.tcp.pn;
+}
+
struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
{
.l3proto = PF_INET,
@@ -1582,14 +1660,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
.nla_policy = tcp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &tcp_sysctl_table_users,
- .ctl_table_header = &tcp_sysctl_header,
- .ctl_table = tcp_sysctl_table,
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- .ctl_compat_table = tcp_compat_sysctl_table,
-#endif
-#endif
+ .init_net = tcp_init_net,
+ .get_net_proto = tcp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
@@ -1625,10 +1697,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
.nla_policy = tcp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &tcp_sysctl_table_users,
- .ctl_table_header = &tcp_sysctl_header,
- .ctl_table = tcp_sysctl_table,
-#endif
+ .init_net = tcp_init_net,
+ .get_net_proto = tcp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6);
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 7259a6bdeb4..59623cc56e8 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -25,17 +25,16 @@
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
-enum udp_conntrack {
- UDP_CT_UNREPLIED,
- UDP_CT_REPLIED,
- UDP_CT_MAX
-};
-
static unsigned int udp_timeouts[UDP_CT_MAX] = {
[UDP_CT_UNREPLIED] = 30*HZ,
[UDP_CT_REPLIED] = 180*HZ,
};
+static inline struct nf_udp_net *udp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.udp;
+}
+
static bool udp_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
@@ -73,7 +72,7 @@ static int udp_print_tuple(struct seq_file *s,
static unsigned int *udp_get_timeouts(struct net *net)
{
- return udp_timeouts;
+ return udp_pernet(net)->timeouts;
}
/* Returns verdict for packet, and may modify conntracktype */
@@ -157,13 +156,15 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int udp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int udp_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeouts = data;
+ struct nf_udp_net *un = udp_pernet(net);
/* set default timeouts for UDP. */
- timeouts[UDP_CT_UNREPLIED] = udp_timeouts[UDP_CT_UNREPLIED];
- timeouts[UDP_CT_REPLIED] = udp_timeouts[UDP_CT_REPLIED];
+ timeouts[UDP_CT_UNREPLIED] = un->timeouts[UDP_CT_UNREPLIED];
+ timeouts[UDP_CT_REPLIED] = un->timeouts[UDP_CT_REPLIED];
if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) {
timeouts[UDP_CT_UNREPLIED] =
@@ -200,19 +201,15 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
-static unsigned int udp_sysctl_table_users;
-static struct ctl_table_header *udp_sysctl_header;
static struct ctl_table udp_sysctl_table[] = {
{
.procname = "nf_conntrack_udp_timeout",
- .data = &udp_timeouts[UDP_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_udp_timeout_stream",
- .data = &udp_timeouts[UDP_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -223,14 +220,12 @@ static struct ctl_table udp_sysctl_table[] = {
static struct ctl_table udp_compat_sysctl_table[] = {
{
.procname = "ip_conntrack_udp_timeout",
- .data = &udp_timeouts[UDP_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_udp_timeout_stream",
- .data = &udp_timeouts[UDP_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -240,6 +235,73 @@ static struct ctl_table udp_compat_sysctl_table[] = {
#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
+static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct nf_udp_net *un)
+{
+#ifdef CONFIG_SYSCTL
+ if (pn->ctl_table)
+ return 0;
+ pn->ctl_table = kmemdup(udp_sysctl_table,
+ sizeof(udp_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+ pn->ctl_table[0].data = &un->timeouts[UDP_CT_UNREPLIED];
+ pn->ctl_table[1].data = &un->timeouts[UDP_CT_REPLIED];
+#endif
+ return 0;
+}
+
+static int udp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
+ struct nf_udp_net *un)
+{
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+ pn->ctl_compat_table = kmemdup(udp_compat_sysctl_table,
+ sizeof(udp_compat_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_compat_table)
+ return -ENOMEM;
+
+ pn->ctl_compat_table[0].data = &un->timeouts[UDP_CT_UNREPLIED];
+ pn->ctl_compat_table[1].data = &un->timeouts[UDP_CT_REPLIED];
+#endif
+#endif
+ return 0;
+}
+
+static int udp_init_net(struct net *net, u_int16_t proto)
+{
+ int ret;
+ struct nf_udp_net *un = udp_pernet(net);
+ struct nf_proto_net *pn = &un->pn;
+
+ if (!pn->users) {
+ int i;
+
+ for (i = 0; i < UDP_CT_MAX; i++)
+ un->timeouts[i] = udp_timeouts[i];
+ }
+
+ if (proto == AF_INET) {
+ ret = udp_kmemdup_compat_sysctl_table(pn, un);
+ if (ret < 0)
+ return ret;
+
+ ret = udp_kmemdup_sysctl_table(pn, un);
+ if (ret < 0)
+ nf_ct_kfree_compat_sysctl_table(pn);
+ } else
+ ret = udp_kmemdup_sysctl_table(pn, un);
+
+ return ret;
+}
+
+static struct nf_proto_net *udp_get_net_proto(struct net *net)
+{
+ return &net->ct.nf_ct_proto.udp.pn;
+}
+
struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
{
.l3proto = PF_INET,
@@ -267,14 +329,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &udp_sysctl_table_users,
- .ctl_table_header = &udp_sysctl_header,
- .ctl_table = udp_sysctl_table,
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- .ctl_compat_table = udp_compat_sysctl_table,
-#endif
-#endif
+ .init_net = udp_init_net,
+ .get_net_proto = udp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
@@ -305,10 +361,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &udp_sysctl_table_users,
- .ctl_table_header = &udp_sysctl_header,
- .ctl_table = udp_sysctl_table,
-#endif
+ .init_net = udp_init_net,
+ .get_net_proto = udp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 4d60a5376aa..4b66df20928 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -35,6 +35,17 @@ static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = {
[UDPLITE_CT_REPLIED] = 180*HZ,
};
+static int udplite_net_id __read_mostly;
+struct udplite_net {
+ struct nf_proto_net pn;
+ unsigned int timeouts[UDPLITE_CT_MAX];
+};
+
+static inline struct udplite_net *udplite_pernet(struct net *net)
+{
+ return net_generic(net, udplite_net_id);
+}
+
static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
@@ -70,7 +81,7 @@ static int udplite_print_tuple(struct seq_file *s,
static unsigned int *udplite_get_timeouts(struct net *net)
{
- return udplite_timeouts;
+ return udplite_pernet(net)->timeouts;
}
/* Returns verdict for packet, and may modify conntracktype */
@@ -161,13 +172,15 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeouts = data;
+ struct udplite_net *un = udplite_pernet(net);
/* set default timeouts for UDPlite. */
- timeouts[UDPLITE_CT_UNREPLIED] = udplite_timeouts[UDPLITE_CT_UNREPLIED];
- timeouts[UDPLITE_CT_REPLIED] = udplite_timeouts[UDPLITE_CT_REPLIED];
+ timeouts[UDPLITE_CT_UNREPLIED] = un->timeouts[UDPLITE_CT_UNREPLIED];
+ timeouts[UDPLITE_CT_REPLIED] = un->timeouts[UDPLITE_CT_REPLIED];
if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) {
timeouts[UDPLITE_CT_UNREPLIED] =
@@ -204,19 +217,15 @@ udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = {
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
-static unsigned int udplite_sysctl_table_users;
-static struct ctl_table_header *udplite_sysctl_header;
static struct ctl_table udplite_sysctl_table[] = {
{
.procname = "nf_conntrack_udplite_timeout",
- .data = &udplite_timeouts[UDPLITE_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_udplite_timeout_stream",
- .data = &udplite_timeouts[UDPLITE_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -225,6 +234,40 @@ static struct ctl_table udplite_sysctl_table[] = {
};
#endif /* CONFIG_SYSCTL */
+static int udplite_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct udplite_net *un)
+{
+#ifdef CONFIG_SYSCTL
+ if (pn->ctl_table)
+ return 0;
+
+ pn->ctl_table = kmemdup(udplite_sysctl_table,
+ sizeof(udplite_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+
+ pn->ctl_table[0].data = &un->timeouts[UDPLITE_CT_UNREPLIED];
+ pn->ctl_table[1].data = &un->timeouts[UDPLITE_CT_REPLIED];
+#endif
+ return 0;
+}
+
+static int udplite_init_net(struct net *net, u_int16_t proto)
+{
+ struct udplite_net *un = udplite_pernet(net);
+ struct nf_proto_net *pn = &un->pn;
+
+ if (!pn->users) {
+ int i;
+
+ for (i = 0 ; i < UDPLITE_CT_MAX; i++)
+ un->timeouts[i] = udplite_timeouts[i];
+ }
+
+ return udplite_kmemdup_sysctl_table(pn, un);
+}
+
static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
{
.l3proto = PF_INET,
@@ -253,11 +296,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
.nla_policy = udplite_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &udplite_sysctl_table_users,
- .ctl_table_header = &udplite_sysctl_header,
- .ctl_table = udplite_sysctl_table,
-#endif
+ .net_id = &udplite_net_id,
+ .init_net = udplite_init_net,
};
static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
@@ -288,34 +328,55 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
.nla_policy = udplite_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &udplite_sysctl_table_users,
- .ctl_table_header = &udplite_sysctl_header,
- .ctl_table = udplite_sysctl_table,
-#endif
+ .net_id = &udplite_net_id,
+ .init_net = udplite_init_net,
};
-static int __init nf_conntrack_proto_udplite_init(void)
+static int udplite_net_init(struct net *net)
{
- int err;
-
- err = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udplite4);
- if (err < 0)
- goto err1;
- err = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udplite6);
- if (err < 0)
- goto err2;
+ int ret = 0;
+
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_udplite4);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_udplite4 :protocol register failed.\n");
+ goto out;
+ }
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_udplite6);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_udplite4 :protocol register failed.\n");
+ goto cleanup_udplite4;
+ }
return 0;
-err2:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
-err1:
- return err;
+
+cleanup_udplite4:
+ nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite4);
+out:
+ return ret;
+}
+
+static void udplite_net_exit(struct net *net)
+{
+ nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite6);
+ nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite4);
+}
+
+static struct pernet_operations udplite_net_ops = {
+ .init = udplite_net_init,
+ .exit = udplite_net_exit,
+ .id = &udplite_net_id,
+ .size = sizeof(struct udplite_net),
+};
+
+static int __init nf_conntrack_proto_udplite_init(void)
+{
+ return register_pernet_subsys(&udplite_net_ops);
}
static void __exit nf_conntrack_proto_udplite_exit(void)
{
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite6);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
+ unregister_pernet_subsys(&udplite_net_ops);
}
module_init(nf_conntrack_proto_udplite_init);
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index 8501823b3f9..295429f3908 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -69,13 +69,12 @@ static int help(struct sk_buff *skb,
void *sb_ptr;
int ret = NF_ACCEPT;
int dir = CTINFO2DIR(ctinfo);
- struct nf_ct_sane_master *ct_sane_info;
+ struct nf_ct_sane_master *ct_sane_info = nfct_help_data(ct);
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple *tuple;
struct sane_request *req;
struct sane_reply_net_start *reply;
- ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED_REPLY)
@@ -163,7 +162,6 @@ out:
}
static struct nf_conntrack_helper sane[MAX_PORTS][2] __read_mostly;
-static char sane_names[MAX_PORTS][2][sizeof("sane-65535")] __read_mostly;
static const struct nf_conntrack_expect_policy sane_exp_policy = {
.max_expected = 1,
@@ -190,7 +188,6 @@ static void nf_conntrack_sane_fini(void)
static int __init nf_conntrack_sane_init(void)
{
int i, j = -1, ret = 0;
- char *tmpname;
sane_buffer = kmalloc(65536, GFP_KERNEL);
if (!sane_buffer)
@@ -205,17 +202,16 @@ static int __init nf_conntrack_sane_init(void)
sane[i][0].tuple.src.l3num = PF_INET;
sane[i][1].tuple.src.l3num = PF_INET6;
for (j = 0; j < 2; j++) {
+ sane[i][j].data_len = sizeof(struct nf_ct_sane_master);
sane[i][j].tuple.src.u.tcp.port = htons(ports[i]);
sane[i][j].tuple.dst.protonum = IPPROTO_TCP;
sane[i][j].expect_policy = &sane_exp_policy;
sane[i][j].me = THIS_MODULE;
sane[i][j].help = help;
- tmpname = &sane_names[i][j][0];
if (ports[i] == SANE_PORT)
- sprintf(tmpname, "sane");
+ sprintf(sane[i][j].name, "sane");
else
- sprintf(tmpname, "sane-%d", ports[i]);
- sane[i][j].name = tmpname;
+ sprintf(sane[i][j].name, "sane-%d", ports[i]);
pr_debug("nf_ct_sane: registering helper for pf: %d "
"port: %d\n",
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 93faf6a3a63..758a1bacc12 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1075,12 +1075,12 @@ static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_help *help = nfct_help(ct);
+ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
if ((code >= 100 && code <= 199) ||
(code >= 200 && code <= 299))
return process_sdp(skb, dataoff, dptr, datalen, cseq);
- else if (help->help.ct_sip_info.invite_cseq == cseq)
+ else if (ct_sip_info->invite_cseq == cseq)
flush_expectations(ct, true);
return NF_ACCEPT;
}
@@ -1091,12 +1091,12 @@ static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_help *help = nfct_help(ct);
+ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
if ((code >= 100 && code <= 199) ||
(code >= 200 && code <= 299))
return process_sdp(skb, dataoff, dptr, datalen, cseq);
- else if (help->help.ct_sip_info.invite_cseq == cseq)
+ else if (ct_sip_info->invite_cseq == cseq)
flush_expectations(ct, true);
return NF_ACCEPT;
}
@@ -1107,12 +1107,12 @@ static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_help *help = nfct_help(ct);
+ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
if ((code >= 100 && code <= 199) ||
(code >= 200 && code <= 299))
return process_sdp(skb, dataoff, dptr, datalen, cseq);
- else if (help->help.ct_sip_info.invite_cseq == cseq)
+ else if (ct_sip_info->invite_cseq == cseq)
flush_expectations(ct, true);
return NF_ACCEPT;
}
@@ -1123,13 +1123,13 @@ static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_help *help = nfct_help(ct);
+ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
unsigned int ret;
flush_expectations(ct, true);
ret = process_sdp(skb, dataoff, dptr, datalen, cseq);
if (ret == NF_ACCEPT)
- help->help.ct_sip_info.invite_cseq = cseq;
+ ct_sip_info->invite_cseq = cseq;
return ret;
}
@@ -1154,7 +1154,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_help *help = nfct_help(ct);
+ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
unsigned int matchoff, matchlen;
struct nf_conntrack_expect *exp;
@@ -1235,7 +1235,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
store_cseq:
if (ret == NF_ACCEPT)
- help->help.ct_sip_info.register_cseq = cseq;
+ ct_sip_info->register_cseq = cseq;
return ret;
}
@@ -1245,7 +1245,7 @@ static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_help *help = nfct_help(ct);
+ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
union nf_inet_addr addr;
__be16 port;
@@ -1262,7 +1262,7 @@ static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
* responses, so we store the sequence number of the last valid
* request and compare it here.
*/
- if (help->help.ct_sip_info.register_cseq != cseq)
+ if (ct_sip_info->register_cseq != cseq)
return NF_ACCEPT;
if (code >= 100 && code <= 199)
@@ -1556,7 +1556,6 @@ static void nf_conntrack_sip_fini(void)
static int __init nf_conntrack_sip_init(void)
{
int i, j, ret;
- char *tmpname;
if (ports_c == 0)
ports[ports_c++] = SIP_PORT;
@@ -1579,17 +1578,16 @@ static int __init nf_conntrack_sip_init(void)
sip[i][3].help = sip_help_tcp;
for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
+ sip[i][j].data_len = sizeof(struct nf_ct_sip_master);
sip[i][j].tuple.src.u.udp.port = htons(ports[i]);
sip[i][j].expect_policy = sip_exp_policy;
sip[i][j].expect_class_max = SIP_EXPECT_MAX;
sip[i][j].me = THIS_MODULE;
- tmpname = &sip_names[i][j][0];
if (ports[i] == SIP_PORT)
- sprintf(tmpname, "sip");
+ sprintf(sip_names[i][j], "sip");
else
- sprintf(tmpname, "sip-%u", i);
- sip[i][j].name = tmpname;
+ sprintf(sip_names[i][j], "sip-%u", i);
pr_debug("port #%u: %u\n", i, ports[i]);
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index 75466fd72f4..81fc61c0526 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -92,7 +92,6 @@ static int tftp_help(struct sk_buff *skb,
}
static struct nf_conntrack_helper tftp[MAX_PORTS][2] __read_mostly;
-static char tftp_names[MAX_PORTS][2][sizeof("tftp-65535")] __read_mostly;
static const struct nf_conntrack_expect_policy tftp_exp_policy = {
.max_expected = 1,
@@ -112,7 +111,6 @@ static void nf_conntrack_tftp_fini(void)
static int __init nf_conntrack_tftp_init(void)
{
int i, j, ret;
- char *tmpname;
if (ports_c == 0)
ports[ports_c++] = TFTP_PORT;
@@ -129,12 +127,10 @@ static int __init nf_conntrack_tftp_init(void)
tftp[i][j].me = THIS_MODULE;
tftp[i][j].help = tftp_help;
- tmpname = &tftp_names[i][j][0];
if (ports[i] == TFTP_PORT)
- sprintf(tmpname, "tftp");
+ sprintf(tftp[i][j].name, "tftp");
else
- sprintf(tmpname, "tftp-%u", i);
- tftp[i][j].name = tmpname;
+ sprintf(tftp[i][j].name, "tftp-%u", i);
ret = nf_conntrack_helper_register(&tftp[i][j]);
if (ret) {
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 791d56bbd74..a26503342e7 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -39,6 +39,15 @@ static char __initdata nfversion[] = "0.30";
static const struct nfnetlink_subsystem __rcu *subsys_table[NFNL_SUBSYS_COUNT];
static DEFINE_MUTEX(nfnl_mutex);
+static const int nfnl_group2type[NFNLGRP_MAX+1] = {
+ [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK,
+ [NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK,
+ [NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK,
+ [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP,
+ [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP,
+ [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP,
+};
+
void nfnl_lock(void)
{
mutex_lock(&nfnl_mutex);
@@ -186,9 +195,11 @@ replay:
lockdep_is_held(&nfnl_mutex)) != ss ||
nfnetlink_find_client(type, ss) != nc)
err = -EAGAIN;
- else
+ else if (nc->call)
err = nc->call(net->nfnl, skb, nlh,
(const struct nlattr **)cda);
+ else
+ err = -EINVAL;
nfnl_unlock();
}
if (err == -EAGAIN)
@@ -202,12 +213,35 @@ static void nfnetlink_rcv(struct sk_buff *skb)
netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
}
+#ifdef CONFIG_MODULES
+static void nfnetlink_bind(int group)
+{
+ const struct nfnetlink_subsystem *ss;
+ int type = nfnl_group2type[group];
+
+ rcu_read_lock();
+ ss = nfnetlink_get_subsys(type);
+ if (!ss) {
+ rcu_read_unlock();
+ request_module("nfnetlink-subsys-%d", type);
+ return;
+ }
+ rcu_read_unlock();
+}
+#endif
+
static int __net_init nfnetlink_net_init(struct net *net)
{
struct sock *nfnl;
+ struct netlink_kernel_cfg cfg = {
+ .groups = NFNLGRP_MAX,
+ .input = nfnetlink_rcv,
+#ifdef CONFIG_MODULES
+ .bind = nfnetlink_bind,
+#endif
+ };
- nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, NFNLGRP_MAX,
- nfnetlink_rcv, NULL, THIS_MODULE);
+ nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, THIS_MODULE, &cfg);
if (!nfnl)
return -ENOMEM;
net->nfnl_stash = nfnl;
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
new file mode 100644
index 00000000000..d6836193d47
--- /dev/null
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -0,0 +1,672 @@
+/*
+ * (C) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ *
+ * This software has been sponsored by Vyatta Inc. <http://www.vyatta.com>
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_conntrack.h>
+#include <linux/netfilter/nfnetlink_cthelper.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
+
+static int
+nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ const struct nf_conn_help *help;
+ struct nf_conntrack_helper *helper;
+
+ help = nfct_help(ct);
+ if (help == NULL)
+ return NF_DROP;
+
+ /* rcu_read_lock()ed by nf_hook_slow */
+ helper = rcu_dereference(help->helper);
+ if (helper == NULL)
+ return NF_DROP;
+
+ /* This is an user-space helper not yet configured, skip. */
+ if ((helper->flags &
+ (NF_CT_HELPER_F_USERSPACE | NF_CT_HELPER_F_CONFIGURED)) ==
+ NF_CT_HELPER_F_USERSPACE)
+ return NF_ACCEPT;
+
+ /* If the user-space helper is not available, don't block traffic. */
+ return NF_QUEUE_NR(helper->queue_num) | NF_VERDICT_FLAG_QUEUE_BYPASS;
+}
+
+static const struct nla_policy nfnl_cthelper_tuple_pol[NFCTH_TUPLE_MAX+1] = {
+ [NFCTH_TUPLE_L3PROTONUM] = { .type = NLA_U16, },
+ [NFCTH_TUPLE_L4PROTONUM] = { .type = NLA_U8, },
+};
+
+static int
+nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nlattr *attr)
+{
+ struct nlattr *tb[NFCTH_TUPLE_MAX+1];
+
+ nla_parse_nested(tb, NFCTH_TUPLE_MAX, attr, nfnl_cthelper_tuple_pol);
+
+ if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
+ return -EINVAL;
+
+ tuple->src.l3num = ntohs(nla_get_u16(tb[NFCTH_TUPLE_L3PROTONUM]));
+ tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
+
+ return 0;
+}
+
+static int
+nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
+{
+ const struct nf_conn_help *help = nfct_help(ct);
+
+ if (help->helper->data_len == 0)
+ return -EINVAL;
+
+ memcpy(&help->data, nla_data(attr), help->helper->data_len);
+ return 0;
+}
+
+static int
+nfnl_cthelper_to_nlattr(struct sk_buff *skb, const struct nf_conn *ct)
+{
+ const struct nf_conn_help *help = nfct_help(ct);
+
+ if (help->helper->data_len &&
+ nla_put(skb, CTA_HELP_INFO, help->helper->data_len, &help->data))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy nfnl_cthelper_expect_pol[NFCTH_POLICY_MAX+1] = {
+ [NFCTH_POLICY_NAME] = { .type = NLA_NUL_STRING,
+ .len = NF_CT_HELPER_NAME_LEN-1 },
+ [NFCTH_POLICY_EXPECT_MAX] = { .type = NLA_U32, },
+ [NFCTH_POLICY_EXPECT_TIMEOUT] = { .type = NLA_U32, },
+};
+
+static int
+nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy,
+ const struct nlattr *attr)
+{
+ struct nlattr *tb[NFCTH_POLICY_MAX+1];
+
+ nla_parse_nested(tb, NFCTH_POLICY_MAX, attr, nfnl_cthelper_expect_pol);
+
+ if (!tb[NFCTH_POLICY_NAME] ||
+ !tb[NFCTH_POLICY_EXPECT_MAX] ||
+ !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
+ return -EINVAL;
+
+ strncpy(expect_policy->name,
+ nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN);
+ expect_policy->max_expected =
+ ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
+ expect_policy->timeout =
+ ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
+
+ return 0;
+}
+
+static const struct nla_policy
+nfnl_cthelper_expect_policy_set[NFCTH_POLICY_SET_MAX+1] = {
+ [NFCTH_POLICY_SET_NUM] = { .type = NLA_U32, },
+};
+
+static int
+nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
+ const struct nlattr *attr)
+{
+ int i, ret;
+ struct nf_conntrack_expect_policy *expect_policy;
+ struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
+
+ nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
+ nfnl_cthelper_expect_policy_set);
+
+ if (!tb[NFCTH_POLICY_SET_NUM])
+ return -EINVAL;
+
+ helper->expect_class_max =
+ ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+
+ if (helper->expect_class_max != 0 &&
+ helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
+ return -EOVERFLOW;
+
+ expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
+ helper->expect_class_max, GFP_KERNEL);
+ if (expect_policy == NULL)
+ return -ENOMEM;
+
+ for (i=0; i<helper->expect_class_max; i++) {
+ if (!tb[NFCTH_POLICY_SET+i])
+ goto err;
+
+ ret = nfnl_cthelper_expect_policy(&expect_policy[i],
+ tb[NFCTH_POLICY_SET+i]);
+ if (ret < 0)
+ goto err;
+ }
+ helper->expect_policy = expect_policy;
+ return 0;
+err:
+ kfree(expect_policy);
+ return -EINVAL;
+}
+
+static int
+nfnl_cthelper_create(const struct nlattr * const tb[],
+ struct nf_conntrack_tuple *tuple)
+{
+ struct nf_conntrack_helper *helper;
+ int ret;
+
+ if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
+ return -EINVAL;
+
+ helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL);
+ if (helper == NULL)
+ return -ENOMEM;
+
+ ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
+ if (ret < 0)
+ goto err;
+
+ strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
+ helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
+ helper->flags |= NF_CT_HELPER_F_USERSPACE;
+ memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple));
+
+ helper->me = THIS_MODULE;
+ helper->help = nfnl_userspace_cthelper;
+ helper->from_nlattr = nfnl_cthelper_from_nlattr;
+ helper->to_nlattr = nfnl_cthelper_to_nlattr;
+
+ /* Default to queue number zero, this can be updated at any time. */
+ if (tb[NFCTH_QUEUE_NUM])
+ helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM]));
+
+ if (tb[NFCTH_STATUS]) {
+ int status = ntohl(nla_get_be32(tb[NFCTH_STATUS]));
+
+ switch(status) {
+ case NFCT_HELPER_STATUS_ENABLED:
+ helper->flags |= NF_CT_HELPER_F_CONFIGURED;
+ break;
+ case NFCT_HELPER_STATUS_DISABLED:
+ helper->flags &= ~NF_CT_HELPER_F_CONFIGURED;
+ break;
+ }
+ }
+
+ ret = nf_conntrack_helper_register(helper);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+err:
+ kfree(helper);
+ return ret;
+}
+
+static int
+nfnl_cthelper_update(const struct nlattr * const tb[],
+ struct nf_conntrack_helper *helper)
+{
+ int ret;
+
+ if (tb[NFCTH_PRIV_DATA_LEN])
+ return -EBUSY;
+
+ if (tb[NFCTH_POLICY]) {
+ ret = nfnl_cthelper_parse_expect_policy(helper,
+ tb[NFCTH_POLICY]);
+ if (ret < 0)
+ return ret;
+ }
+ if (tb[NFCTH_QUEUE_NUM])
+ helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM]));
+
+ if (tb[NFCTH_STATUS]) {
+ int status = ntohl(nla_get_be32(tb[NFCTH_STATUS]));
+
+ switch(status) {
+ case NFCT_HELPER_STATUS_ENABLED:
+ helper->flags |= NF_CT_HELPER_F_CONFIGURED;
+ break;
+ case NFCT_HELPER_STATUS_DISABLED:
+ helper->flags &= ~NF_CT_HELPER_F_CONFIGURED;
+ break;
+ }
+ }
+ return 0;
+}
+
+static int
+nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+ const char *helper_name;
+ struct nf_conntrack_helper *cur, *helper = NULL;
+ struct nf_conntrack_tuple tuple;
+ struct hlist_node *n;
+ int ret = 0, i;
+
+ if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
+ return -EINVAL;
+
+ helper_name = nla_data(tb[NFCTH_NAME]);
+
+ ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
+ if (ret < 0)
+ return ret;
+
+ rcu_read_lock();
+ for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
+ hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) {
+
+ /* skip non-userspace conntrack helpers. */
+ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+ continue;
+
+ if (strncmp(cur->name, helper_name,
+ NF_CT_HELPER_NAME_LEN) != 0)
+ continue;
+
+ if ((tuple.src.l3num != cur->tuple.src.l3num ||
+ tuple.dst.protonum != cur->tuple.dst.protonum))
+ continue;
+
+ if (nlh->nlmsg_flags & NLM_F_EXCL) {
+ ret = -EEXIST;
+ goto err;
+ }
+ helper = cur;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (helper == NULL)
+ ret = nfnl_cthelper_create(tb, &tuple);
+ else
+ ret = nfnl_cthelper_update(tb, helper);
+
+ return ret;
+err:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int
+nfnl_cthelper_dump_tuple(struct sk_buff *skb,
+ struct nf_conntrack_helper *helper)
+{
+ struct nlattr *nest_parms;
+
+ nest_parms = nla_nest_start(skb, NFCTH_TUPLE | NLA_F_NESTED);
+ if (nest_parms == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_be16(skb, NFCTH_TUPLE_L3PROTONUM,
+ htons(helper->tuple.src.l3num)))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, NFCTH_TUPLE_L4PROTONUM, helper->tuple.dst.protonum))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest_parms);
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static int
+nfnl_cthelper_dump_policy(struct sk_buff *skb,
+ struct nf_conntrack_helper *helper)
+{
+ int i;
+ struct nlattr *nest_parms1, *nest_parms2;
+
+ nest_parms1 = nla_nest_start(skb, NFCTH_POLICY | NLA_F_NESTED);
+ if (nest_parms1 == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
+ htonl(helper->expect_class_max)))
+ goto nla_put_failure;
+
+ for (i=0; i<helper->expect_class_max; i++) {
+ nest_parms2 = nla_nest_start(skb,
+ (NFCTH_POLICY_SET+i) | NLA_F_NESTED);
+ if (nest_parms2 == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_string(skb, NFCTH_POLICY_NAME,
+ helper->expect_policy[i].name))
+ goto nla_put_failure;
+
+ if (nla_put_be32(skb, NFCTH_POLICY_EXPECT_MAX,
+ htonl(helper->expect_policy[i].max_expected)))
+ goto nla_put_failure;
+
+ if (nla_put_be32(skb, NFCTH_POLICY_EXPECT_TIMEOUT,
+ htonl(helper->expect_policy[i].timeout)))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest_parms2);
+ }
+ nla_nest_end(skb, nest_parms1);
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static int
+nfnl_cthelper_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+ int event, struct nf_conntrack_helper *helper)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ unsigned int flags = pid ? NLM_F_MULTI : 0;
+ int status;
+
+ event |= NFNL_SUBSYS_CTHELPER << 8;
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ goto nlmsg_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_UNSPEC;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ if (nla_put_string(skb, NFCTH_NAME, helper->name))
+ goto nla_put_failure;
+
+ if (nla_put_be32(skb, NFCTH_QUEUE_NUM, htonl(helper->queue_num)))
+ goto nla_put_failure;
+
+ if (nfnl_cthelper_dump_tuple(skb, helper) < 0)
+ goto nla_put_failure;
+
+ if (nfnl_cthelper_dump_policy(skb, helper) < 0)
+ goto nla_put_failure;
+
+ if (nla_put_be32(skb, NFCTH_PRIV_DATA_LEN, htonl(helper->data_len)))
+ goto nla_put_failure;
+
+ if (helper->flags & NF_CT_HELPER_F_CONFIGURED)
+ status = NFCT_HELPER_STATUS_ENABLED;
+ else
+ status = NFCT_HELPER_STATUS_DISABLED;
+
+ if (nla_put_be32(skb, NFCTH_STATUS, htonl(status)))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -1;
+}
+
+static int
+nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct nf_conntrack_helper *cur, *last;
+ struct hlist_node *n;
+
+ rcu_read_lock();
+ last = (struct nf_conntrack_helper *)cb->args[1];
+ for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) {
+restart:
+ hlist_for_each_entry_rcu(cur, n,
+ &nf_ct_helper_hash[cb->args[0]], hnode) {
+
+ /* skip non-userspace conntrack helpers. */
+ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+ continue;
+
+ if (cb->args[1]) {
+ if (cur != last)
+ continue;
+ cb->args[1] = 0;
+ }
+ if (nfnl_cthelper_fill_info(skb,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
+ NFNL_MSG_CTHELPER_NEW, cur) < 0) {
+ cb->args[1] = (unsigned long)cur;
+ goto out;
+ }
+ }
+ }
+ if (cb->args[1]) {
+ cb->args[1] = 0;
+ goto restart;
+ }
+out:
+ rcu_read_unlock();
+ return skb->len;
+}
+
+static int
+nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+ int ret = -ENOENT, i;
+ struct nf_conntrack_helper *cur;
+ struct hlist_node *n;
+ struct sk_buff *skb2;
+ char *helper_name = NULL;
+ struct nf_conntrack_tuple tuple;
+ bool tuple_set = false;
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = nfnl_cthelper_dump_table,
+ };
+ return netlink_dump_start(nfnl, skb, nlh, &c);
+ }
+
+ if (tb[NFCTH_NAME])
+ helper_name = nla_data(tb[NFCTH_NAME]);
+
+ if (tb[NFCTH_TUPLE]) {
+ ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
+ if (ret < 0)
+ return ret;
+
+ tuple_set = true;
+ }
+
+ for (i = 0; i < nf_ct_helper_hsize; i++) {
+ hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) {
+
+ /* skip non-userspace conntrack helpers. */
+ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+ continue;
+
+ if (helper_name && strncmp(cur->name, helper_name,
+ NF_CT_HELPER_NAME_LEN) != 0) {
+ continue;
+ }
+ if (tuple_set &&
+ (tuple.src.l3num != cur->tuple.src.l3num ||
+ tuple.dst.protonum != cur->tuple.dst.protonum))
+ continue;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).pid,
+ nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(nlh->nlmsg_type),
+ NFNL_MSG_CTHELPER_NEW, cur);
+ if (ret <= 0) {
+ kfree_skb(skb2);
+ break;
+ }
+
+ ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).pid,
+ MSG_DONTWAIT);
+ if (ret > 0)
+ ret = 0;
+
+ /* this avoids a loop in nfnetlink. */
+ return ret == -EAGAIN ? -ENOBUFS : ret;
+ }
+ }
+ return ret;
+}
+
+static int
+nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+ char *helper_name = NULL;
+ struct nf_conntrack_helper *cur;
+ struct hlist_node *n, *tmp;
+ struct nf_conntrack_tuple tuple;
+ bool tuple_set = false, found = false;
+ int i, j = 0, ret;
+
+ if (tb[NFCTH_NAME])
+ helper_name = nla_data(tb[NFCTH_NAME]);
+
+ if (tb[NFCTH_TUPLE]) {
+ ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
+ if (ret < 0)
+ return ret;
+
+ tuple_set = true;
+ }
+
+ for (i = 0; i < nf_ct_helper_hsize; i++) {
+ hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i],
+ hnode) {
+ /* skip non-userspace conntrack helpers. */
+ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+ continue;
+
+ j++;
+
+ if (helper_name && strncmp(cur->name, helper_name,
+ NF_CT_HELPER_NAME_LEN) != 0) {
+ continue;
+ }
+ if (tuple_set &&
+ (tuple.src.l3num != cur->tuple.src.l3num ||
+ tuple.dst.protonum != cur->tuple.dst.protonum))
+ continue;
+
+ found = true;
+ nf_conntrack_helper_unregister(cur);
+ }
+ }
+ /* Make sure we return success if we flush and there is no helpers */
+ return (found || j == 0) ? 0 : -ENOENT;
+}
+
+static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
+ [NFCTH_NAME] = { .type = NLA_NUL_STRING,
+ .len = NF_CT_HELPER_NAME_LEN-1 },
+ [NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
+};
+
+static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {
+ [NFNL_MSG_CTHELPER_NEW] = { .call = nfnl_cthelper_new,
+ .attr_count = NFCTH_MAX,
+ .policy = nfnl_cthelper_policy },
+ [NFNL_MSG_CTHELPER_GET] = { .call = nfnl_cthelper_get,
+ .attr_count = NFCTH_MAX,
+ .policy = nfnl_cthelper_policy },
+ [NFNL_MSG_CTHELPER_DEL] = { .call = nfnl_cthelper_del,
+ .attr_count = NFCTH_MAX,
+ .policy = nfnl_cthelper_policy },
+};
+
+static const struct nfnetlink_subsystem nfnl_cthelper_subsys = {
+ .name = "cthelper",
+ .subsys_id = NFNL_SUBSYS_CTHELPER,
+ .cb_count = NFNL_MSG_CTHELPER_MAX,
+ .cb = nfnl_cthelper_cb,
+};
+
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTHELPER);
+
+static int __init nfnl_cthelper_init(void)
+{
+ int ret;
+
+ ret = nfnetlink_subsys_register(&nfnl_cthelper_subsys);
+ if (ret < 0) {
+ pr_err("nfnl_cthelper: cannot register with nfnetlink.\n");
+ goto err_out;
+ }
+ return 0;
+err_out:
+ return ret;
+}
+
+static void __exit nfnl_cthelper_exit(void)
+{
+ struct nf_conntrack_helper *cur;
+ struct hlist_node *n, *tmp;
+ int i;
+
+ nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
+
+ for (i=0; i<nf_ct_helper_hsize; i++) {
+ hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i],
+ hnode) {
+ /* skip non-userspace conntrack helpers. */
+ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+ continue;
+
+ nf_conntrack_helper_unregister(cur);
+ }
+ }
+}
+
+module_init(nfnl_cthelper_init);
+module_exit(nfnl_cthelper_exit);
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 3e655288d1d..cdecbc8fe96 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -49,8 +49,9 @@ static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
static int
ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
- struct nf_conntrack_l4proto *l4proto,
- const struct nlattr *attr)
+ struct nf_conntrack_l4proto *l4proto,
+ struct net *net,
+ const struct nlattr *attr)
{
int ret = 0;
@@ -60,7 +61,8 @@ ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
nla_parse_nested(tb, l4proto->ctnl_timeout.nlattr_max,
attr, l4proto->ctnl_timeout.nla_policy);
- ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, &timeout->data);
+ ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net,
+ &timeout->data);
}
return ret;
}
@@ -74,6 +76,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
__u8 l4num;
struct nf_conntrack_l4proto *l4proto;
struct ctnl_timeout *timeout, *matching = NULL;
+ struct net *net = sock_net(skb->sk);
char *name;
int ret;
@@ -117,7 +120,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
goto err_proto_put;
}
- ret = ctnl_timeout_parse_policy(matching, l4proto,
+ ret = ctnl_timeout_parse_policy(matching, l4proto, net,
cda[CTA_TIMEOUT_DATA]);
return ret;
}
@@ -132,7 +135,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
goto err_proto_put;
}
- ret = ctnl_timeout_parse_policy(timeout, l4proto,
+ ret = ctnl_timeout_parse_policy(timeout, l4proto, net,
cda[CTA_TIMEOUT_DATA]);
if (ret < 0)
goto err;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 3c3cfc0cc9b..169ab59ed9d 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -326,18 +326,20 @@ __nfulnl_send(struct nfulnl_instance *inst)
{
int status = -1;
- if (inst->qlen > 1)
- NLMSG_PUT(inst->skb, 0, 0,
- NLMSG_DONE,
- sizeof(struct nfgenmsg));
-
+ if (inst->qlen > 1) {
+ struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
+ NLMSG_DONE,
+ sizeof(struct nfgenmsg),
+ 0);
+ if (!nlh)
+ goto out;
+ }
status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid,
MSG_DONTWAIT);
inst->qlen = 0;
inst->skb = NULL;
-
-nlmsg_failure:
+out:
return status;
}
@@ -380,10 +382,12 @@ __build_packet_message(struct nfulnl_instance *inst,
struct nfgenmsg *nfmsg;
sk_buff_data_t old_tail = inst->skb->tail;
- nlh = NLMSG_PUT(inst->skb, 0, 0,
+ nlh = nlmsg_put(inst->skb, 0, 0,
NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
- sizeof(struct nfgenmsg));
- nfmsg = NLMSG_DATA(nlh);
+ sizeof(struct nfgenmsg), 0);
+ if (!nlh)
+ return -1;
+ nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = pf;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(inst->group_num);
@@ -526,7 +530,7 @@ __build_packet_message(struct nfulnl_instance *inst,
if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
- goto nlmsg_failure;
+ return -1;
}
nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
@@ -540,7 +544,6 @@ __build_packet_message(struct nfulnl_instance *inst,
nlh->nlmsg_len = inst->skb->tail - old_tail;
return 0;
-nlmsg_failure:
nla_put_failure:
PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
return -1;
@@ -745,7 +748,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nfula[])
{
- struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
+ struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int16_t group_num = ntohs(nfmsg->res_id);
struct nfulnl_instance *inst;
struct nfulnl_msg_config_cmd *cmd = NULL;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue_core.c
index 4162437b836..c0496a55ad0 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -30,6 +30,7 @@
#include <linux/list.h>
#include <net/sock.h>
#include <net/netfilter/nf_queue.h>
+#include <net/netfilter/nfnetlink_queue.h>
#include <linux/atomic.h>
@@ -52,6 +53,7 @@ struct nfqnl_instance {
u_int16_t queue_num; /* number of this queue */
u_int8_t copy_mode;
+ u_int32_t flags; /* Set using NFQA_CFG_FLAGS */
/*
* Following fields are dirtied for each queued packet,
* keep them in same cache line if possible.
@@ -232,6 +234,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
struct sk_buff *entskb = entry->skb;
struct net_device *indev;
struct net_device *outdev;
+ struct nf_conn *ct = NULL;
+ enum ip_conntrack_info uninitialized_var(ctinfo);
size = NLMSG_SPACE(sizeof(struct nfgenmsg))
+ nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
@@ -265,16 +269,22 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
break;
}
+ if (queue->flags & NFQA_CFG_F_CONNTRACK)
+ ct = nfqnl_ct_get(entskb, &size, &ctinfo);
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
- goto nlmsg_failure;
+ return NULL;
old_tail = skb->tail;
- nlh = NLMSG_PUT(skb, 0, 0,
+ nlh = nlmsg_put(skb, 0, 0,
NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
- sizeof(struct nfgenmsg));
- nfmsg = NLMSG_DATA(nlh);
+ sizeof(struct nfgenmsg), 0);
+ if (!nlh) {
+ kfree_skb(skb);
+ return NULL;
+ }
+ nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = entry->pf;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(queue->queue_num);
@@ -377,7 +387,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
if (skb_tailroom(skb) < nla_total_size(data_len)) {
printk(KERN_WARNING "nf_queue: no tailroom!\n");
- goto nlmsg_failure;
+ kfree_skb(skb);
+ return NULL;
}
nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
@@ -388,10 +399,12 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
BUG();
}
+ if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
+ goto nla_put_failure;
+
nlh->nlmsg_len = skb->tail - old_tail;
return skb;
-nlmsg_failure:
nla_put_failure:
if (skb)
kfree_skb(skb);
@@ -406,6 +419,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
struct nfqnl_instance *queue;
int err = -ENOBUFS;
__be32 *packet_id_ptr;
+ int failopen = 0;
/* rcu_read_lock()ed by nf_hook_slow() */
queue = instance_lookup(queuenum);
@@ -431,9 +445,14 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
goto err_out_free_nskb;
}
if (queue->queue_total >= queue->queue_maxlen) {
- queue->queue_dropped++;
- net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
- queue->queue_total);
+ if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
+ failopen = 1;
+ err = 0;
+ } else {
+ queue->queue_dropped++;
+ net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
+ queue->queue_total);
+ }
goto err_out_free_nskb;
}
entry->id = ++queue->id_sequence;
@@ -455,17 +474,17 @@ err_out_free_nskb:
kfree_skb(nskb);
err_out_unlock:
spin_unlock_bh(&queue->lock);
+ if (failopen)
+ nf_reinject(entry, NF_ACCEPT);
err_out:
return err;
}
static int
-nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
+nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
{
struct sk_buff *nskb;
- int diff;
- diff = data_len - e->skb->len;
if (diff < 0) {
if (pskb_trim(e->skb, data_len))
return -ENOMEM;
@@ -623,6 +642,7 @@ static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
[NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
[NFQA_MARK] = { .type = NLA_U32 },
[NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
+ [NFQA_CT] = { .type = NLA_UNSPEC },
};
static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
@@ -670,7 +690,7 @@ nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nfqa[])
{
- struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
+ struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nf_queue_entry *entry, *tmp;
unsigned int verdict, maxid;
struct nfqnl_msg_verdict_hdr *vhdr;
@@ -716,13 +736,15 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nfqa[])
{
- struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
+ struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int16_t queue_num = ntohs(nfmsg->res_id);
struct nfqnl_msg_verdict_hdr *vhdr;
struct nfqnl_instance *queue;
unsigned int verdict;
struct nf_queue_entry *entry;
+ enum ip_conntrack_info uninitialized_var(ctinfo);
+ struct nf_conn *ct = NULL;
queue = instance_lookup(queue_num);
if (!queue)
@@ -741,11 +763,22 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
if (entry == NULL)
return -ENOENT;
+ rcu_read_lock();
+ if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK))
+ ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
+
if (nfqa[NFQA_PAYLOAD]) {
+ u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
+ int diff = payload_len - entry->skb->len;
+
if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
- nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0)
+ payload_len, entry, diff) < 0)
verdict = NF_DROP;
+
+ if (ct)
+ nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
}
+ rcu_read_unlock();
if (nfqa[NFQA_MARK])
entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
@@ -777,7 +810,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nfqa[])
{
- struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
+ struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int16_t queue_num = ntohs(nfmsg->res_id);
struct nfqnl_instance *queue;
struct nfqnl_msg_config_cmd *cmd = NULL;
@@ -858,6 +891,36 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
spin_unlock_bh(&queue->lock);
}
+ if (nfqa[NFQA_CFG_FLAGS]) {
+ __u32 flags, mask;
+
+ if (!queue) {
+ ret = -ENODEV;
+ goto err_out_unlock;
+ }
+
+ if (!nfqa[NFQA_CFG_MASK]) {
+ /* A mask is needed to specify which flags are being
+ * changed.
+ */
+ ret = -EINVAL;
+ goto err_out_unlock;
+ }
+
+ flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
+ mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
+
+ if (flags >= NFQA_CFG_F_MAX) {
+ ret = -EOPNOTSUPP;
+ goto err_out_unlock;
+ }
+
+ spin_lock_bh(&queue->lock);
+ queue->flags &= ~mask;
+ queue->flags |= flags & mask;
+ spin_unlock_bh(&queue->lock);
+ }
+
err_out_unlock:
rcu_read_unlock();
return ret;
diff --git a/net/netfilter/nfnetlink_queue_ct.c b/net/netfilter/nfnetlink_queue_ct.c
new file mode 100644
index 00000000000..ab61d66bc0b
--- /dev/null
+++ b/net/netfilter/nfnetlink_queue_ct.c
@@ -0,0 +1,98 @@
+/*
+ * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_queue.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nfnetlink_queue.h>
+
+struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size,
+ enum ip_conntrack_info *ctinfo)
+{
+ struct nfq_ct_hook *nfq_ct;
+ struct nf_conn *ct;
+
+ /* rcu_read_lock()ed by __nf_queue already. */
+ nfq_ct = rcu_dereference(nfq_ct_hook);
+ if (nfq_ct == NULL)
+ return NULL;
+
+ ct = nf_ct_get(entskb, ctinfo);
+ if (ct) {
+ if (!nf_ct_is_untracked(ct))
+ *size += nfq_ct->build_size(ct);
+ else
+ ct = NULL;
+ }
+ return ct;
+}
+
+struct nf_conn *
+nfqnl_ct_parse(const struct sk_buff *skb, const struct nlattr *attr,
+ enum ip_conntrack_info *ctinfo)
+{
+ struct nfq_ct_hook *nfq_ct;
+ struct nf_conn *ct;
+
+ /* rcu_read_lock()ed by __nf_queue already. */
+ nfq_ct = rcu_dereference(nfq_ct_hook);
+ if (nfq_ct == NULL)
+ return NULL;
+
+ ct = nf_ct_get(skb, ctinfo);
+ if (ct && !nf_ct_is_untracked(ct))
+ nfq_ct->parse(attr, ct);
+
+ return ct;
+}
+
+int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ struct nfq_ct_hook *nfq_ct;
+ struct nlattr *nest_parms;
+ u_int32_t tmp;
+
+ nfq_ct = rcu_dereference(nfq_ct_hook);
+ if (nfq_ct == NULL)
+ return 0;
+
+ nest_parms = nla_nest_start(skb, NFQA_CT | NLA_F_NESTED);
+ if (!nest_parms)
+ goto nla_put_failure;
+
+ if (nfq_ct->build(skb, ct) < 0)
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest_parms);
+
+ tmp = ctinfo;
+ if (nla_put_be32(skb, NFQA_CT_INFO, htonl(tmp)))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, int diff)
+{
+ struct nfq_ct_nat_hook *nfq_nat_ct;
+
+ nfq_nat_ct = rcu_dereference(nfq_ct_nat_hook);
+ if (nfq_nat_ct == NULL)
+ return;
+
+ if ((ct->status & IPS_NAT_MASK) && diff)
+ nfq_nat_ct->seq_adjust(skb, ct, ctinfo, diff);
+}
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index a51de9b052b..116018560c6 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -112,6 +112,8 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
goto err3;
if (info->helper[0]) {
+ struct nf_conntrack_helper *helper;
+
ret = -ENOENT;
proto = xt_ct_find_proto(par);
if (!proto) {
@@ -120,19 +122,21 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
goto err3;
}
- ret = -ENOMEM;
- help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
- if (help == NULL)
- goto err3;
-
ret = -ENOENT;
- help->helper = nf_conntrack_helper_try_module_get(info->helper,
- par->family,
- proto);
- if (help->helper == NULL) {
+ helper = nf_conntrack_helper_try_module_get(info->helper,
+ par->family,
+ proto);
+ if (helper == NULL) {
pr_info("No such helper \"%s\"\n", info->helper);
goto err3;
}
+
+ ret = -ENOMEM;
+ help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
+ if (help == NULL)
+ goto err3;
+
+ help->helper = helper;
}
__set_bit(IPS_TEMPLATE_BIT, &ct->status);
@@ -202,6 +206,8 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
goto err3;
if (info->helper[0]) {
+ struct nf_conntrack_helper *helper;
+
ret = -ENOENT;
proto = xt_ct_find_proto(par);
if (!proto) {
@@ -210,19 +216,21 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
goto err3;
}
- ret = -ENOMEM;
- help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
- if (help == NULL)
- goto err3;
-
ret = -ENOENT;
- help->helper = nf_conntrack_helper_try_module_get(info->helper,
- par->family,
- proto);
- if (help->helper == NULL) {
+ helper = nf_conntrack_helper_try_module_get(info->helper,
+ par->family,
+ proto);
+ if (helper == NULL) {
pr_info("No such helper \"%s\"\n", info->helper);
goto err3;
}
+
+ ret = -ENOMEM;
+ help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
+ if (help == NULL)
+ goto err3;
+
+ help->helper = helper;
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 95237c89607..7babe7d6871 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -41,26 +41,36 @@ nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par)
static u32 hash_v4(const struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
- __be32 ipaddr;
/* packets in either direction go into same queue */
- ipaddr = iph->saddr ^ iph->daddr;
+ if (iph->saddr < iph->daddr)
+ return jhash_3words((__force u32)iph->saddr,
+ (__force u32)iph->daddr, iph->protocol, jhash_initval);
- return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval);
+ return jhash_3words((__force u32)iph->daddr,
+ (__force u32)iph->saddr, iph->protocol, jhash_initval);
}
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static u32 hash_v6(const struct sk_buff *skb)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
- __be32 addr[4];
+ u32 a, b, c;
+
+ if (ip6h->saddr.s6_addr32[3] < ip6h->daddr.s6_addr32[3]) {
+ a = (__force u32) ip6h->saddr.s6_addr32[3];
+ b = (__force u32) ip6h->daddr.s6_addr32[3];
+ } else {
+ b = (__force u32) ip6h->saddr.s6_addr32[3];
+ a = (__force u32) ip6h->daddr.s6_addr32[3];
+ }
- addr[0] = ip6h->saddr.s6_addr32[0] ^ ip6h->daddr.s6_addr32[0];
- addr[1] = ip6h->saddr.s6_addr32[1] ^ ip6h->daddr.s6_addr32[1];
- addr[2] = ip6h->saddr.s6_addr32[2] ^ ip6h->daddr.s6_addr32[2];
- addr[3] = ip6h->saddr.s6_addr32[3] ^ ip6h->daddr.s6_addr32[3];
+ if (ip6h->saddr.s6_addr32[1] < ip6h->daddr.s6_addr32[1])
+ c = (__force u32) ip6h->saddr.s6_addr32[1];
+ else
+ c = (__force u32) ip6h->daddr.s6_addr32[1];
- return jhash2((__force u32 *)addr, ARRAY_SIZE(addr), jhash_initval);
+ return jhash_3words(a, b, c, jhash_initval);
}
#endif
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 146033a86de..d7f195388f6 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -69,7 +69,7 @@ tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
}
/**
- * tproxy_handle_time_wait4() - handle IPv4 TCP TIME_WAIT reopen redirections
+ * tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
* @skb: The skb being processed.
* @laddr: IPv4 address to redirect to or zero.
* @lport: TCP port to redirect to or zero.
@@ -220,7 +220,7 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
}
/**
- * tproxy_handle_time_wait6() - handle IPv6 TCP TIME_WAIT reopen redirections
+ * tproxy_handle_time_wait6 - handle IPv6 TCP TIME_WAIT reopen redirections
* @skb: The skb being processed.
* @tproto: Transport protocol.
* @thoff: Transport protocol header offset.
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index c6d5a83450c..70b5591a258 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -274,38 +274,25 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
kfree(info->data);
}
-static struct xt_match connlimit_mt_reg[] __read_mostly = {
- {
- .name = "connlimit",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .checkentry = connlimit_mt_check,
- .match = connlimit_mt,
- .matchsize = sizeof(struct xt_connlimit_info),
- .destroy = connlimit_mt_destroy,
- .me = THIS_MODULE,
- },
- {
- .name = "connlimit",
- .revision = 1,
- .family = NFPROTO_UNSPEC,
- .checkentry = connlimit_mt_check,
- .match = connlimit_mt,
- .matchsize = sizeof(struct xt_connlimit_info),
- .destroy = connlimit_mt_destroy,
- .me = THIS_MODULE,
- },
+static struct xt_match connlimit_mt_reg __read_mostly = {
+ .name = "connlimit",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = connlimit_mt_check,
+ .match = connlimit_mt,
+ .matchsize = sizeof(struct xt_connlimit_info),
+ .destroy = connlimit_mt_destroy,
+ .me = THIS_MODULE,
};
static int __init connlimit_mt_init(void)
{
- return xt_register_matches(connlimit_mt_reg,
- ARRAY_SIZE(connlimit_mt_reg));
+ return xt_register_match(&connlimit_mt_reg);
}
static void __exit connlimit_mt_exit(void)
{
- xt_unregister_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg));
+ xt_unregister_match(&connlimit_mt_reg);
}
module_init(connlimit_mt_init);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index fc0d6dbe5d1..ae2ad1eec8d 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -75,6 +75,7 @@ struct recent_entry {
struct recent_table {
struct list_head list;
char name[XT_RECENT_NAME_LEN];
+ union nf_inet_addr mask;
unsigned int refcnt;
unsigned int entries;
struct list_head lru_list;
@@ -228,10 +229,10 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct net *net = dev_net(par->in ? par->in : par->out);
struct recent_net *recent_net = recent_pernet(net);
- const struct xt_recent_mtinfo *info = par->matchinfo;
+ const struct xt_recent_mtinfo_v1 *info = par->matchinfo;
struct recent_table *t;
struct recent_entry *e;
- union nf_inet_addr addr = {};
+ union nf_inet_addr addr = {}, addr_mask;
u_int8_t ttl;
bool ret = info->invert;
@@ -261,12 +262,15 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
spin_lock_bh(&recent_lock);
t = recent_table_lookup(recent_net, info->name);
- e = recent_entry_lookup(t, &addr, par->family,
+
+ nf_inet_addr_mask(&addr, &addr_mask, &t->mask);
+
+ e = recent_entry_lookup(t, &addr_mask, par->family,
(info->check_set & XT_RECENT_TTL) ? ttl : 0);
if (e == NULL) {
if (!(info->check_set & XT_RECENT_SET))
goto out;
- e = recent_entry_init(t, &addr, par->family, ttl);
+ e = recent_entry_init(t, &addr_mask, par->family, ttl);
if (e == NULL)
par->hotdrop = true;
ret = !ret;
@@ -306,10 +310,10 @@ out:
return ret;
}
-static int recent_mt_check(const struct xt_mtchk_param *par)
+static int recent_mt_check(const struct xt_mtchk_param *par,
+ const struct xt_recent_mtinfo_v1 *info)
{
struct recent_net *recent_net = recent_pernet(par->net);
- const struct xt_recent_mtinfo *info = par->matchinfo;
struct recent_table *t;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *pde;
@@ -361,6 +365,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par)
goto out;
}
t->refcnt = 1;
+
+ memcpy(&t->mask, &info->mask, sizeof(t->mask));
strcpy(t->name, info->name);
INIT_LIST_HEAD(&t->lru_list);
for (i = 0; i < ip_list_hash_size; i++)
@@ -385,10 +391,28 @@ out:
return ret;
}
+static int recent_mt_check_v0(const struct xt_mtchk_param *par)
+{
+ const struct xt_recent_mtinfo_v0 *info_v0 = par->matchinfo;
+ struct xt_recent_mtinfo_v1 info_v1;
+
+ /* Copy revision 0 structure to revision 1 */
+ memcpy(&info_v1, info_v0, sizeof(struct xt_recent_mtinfo));
+ /* Set default mask to ensure backward compatible behaviour */
+ memset(info_v1.mask.all, 0xFF, sizeof(info_v1.mask.all));
+
+ return recent_mt_check(par, &info_v1);
+}
+
+static int recent_mt_check_v1(const struct xt_mtchk_param *par)
+{
+ return recent_mt_check(par, par->matchinfo);
+}
+
static void recent_mt_destroy(const struct xt_mtdtor_param *par)
{
struct recent_net *recent_net = recent_pernet(par->net);
- const struct xt_recent_mtinfo *info = par->matchinfo;
+ const struct xt_recent_mtinfo_v1 *info = par->matchinfo;
struct recent_table *t;
mutex_lock(&recent_mutex);
@@ -625,7 +649,7 @@ static struct xt_match recent_mt_reg[] __read_mostly = {
.family = NFPROTO_IPV4,
.match = recent_mt,
.matchsize = sizeof(struct xt_recent_mtinfo),
- .checkentry = recent_mt_check,
+ .checkentry = recent_mt_check_v0,
.destroy = recent_mt_destroy,
.me = THIS_MODULE,
},
@@ -635,10 +659,30 @@ static struct xt_match recent_mt_reg[] __read_mostly = {
.family = NFPROTO_IPV6,
.match = recent_mt,
.matchsize = sizeof(struct xt_recent_mtinfo),
- .checkentry = recent_mt_check,
+ .checkentry = recent_mt_check_v0,
+ .destroy = recent_mt_destroy,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "recent",
+ .revision = 1,
+ .family = NFPROTO_IPV4,
+ .match = recent_mt,
+ .matchsize = sizeof(struct xt_recent_mtinfo_v1),
+ .checkentry = recent_mt_check_v1,
.destroy = recent_mt_destroy,
.me = THIS_MODULE,
},
+ {
+ .name = "recent",
+ .revision = 1,
+ .family = NFPROTO_IPV6,
+ .match = recent_mt,
+ .matchsize = sizeof(struct xt_recent_mtinfo_v1),
+ .checkentry = recent_mt_check_v1,
+ .destroy = recent_mt_destroy,
+ .me = THIS_MODULE,
+ }
};
static int __init recent_mt_init(void)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index b3025a603d5..5463969da45 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -80,6 +80,7 @@ struct netlink_sock {
struct mutex *cb_mutex;
struct mutex cb_def_mutex;
void (*netlink_rcv)(struct sk_buff *skb);
+ void (*netlink_bind)(int group);
struct module *module;
};
@@ -124,6 +125,7 @@ struct netlink_table {
unsigned int groups;
struct mutex *cb_mutex;
struct module *module;
+ void (*bind)(int group);
int registered;
};
@@ -444,6 +446,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
struct module *module = NULL;
struct mutex *cb_mutex;
struct netlink_sock *nlk;
+ void (*bind)(int group);
int err = 0;
sock->state = SS_UNCONNECTED;
@@ -468,6 +471,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
else
err = -EPROTONOSUPPORT;
cb_mutex = nl_table[protocol].cb_mutex;
+ bind = nl_table[protocol].bind;
netlink_unlock_table();
if (err < 0)
@@ -483,6 +487,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
nlk = nlk_sk(sock->sk);
nlk->module = module;
+ nlk->netlink_bind = bind;
out:
return err;
@@ -683,6 +688,15 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
netlink_update_listeners(sk);
netlink_table_ungrab();
+ if (nlk->netlink_bind && nlk->groups[0]) {
+ int i;
+
+ for (i=0; i<nlk->ngroups; i++) {
+ if (test_bit(i, nlk->groups))
+ nlk->netlink_bind(i);
+ }
+ }
+
return 0;
}
@@ -1239,6 +1253,10 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
netlink_update_socket_mc(nlk, val,
optname == NETLINK_ADD_MEMBERSHIP);
netlink_table_ungrab();
+
+ if (nlk->netlink_bind)
+ nlk->netlink_bind(val);
+
err = 0;
break;
}
@@ -1503,14 +1521,16 @@ static void netlink_data_ready(struct sock *sk, int len)
*/
struct sock *
-netlink_kernel_create(struct net *net, int unit, unsigned int groups,
- void (*input)(struct sk_buff *skb),
- struct mutex *cb_mutex, struct module *module)
+netlink_kernel_create(struct net *net, int unit,
+ struct module *module,
+ struct netlink_kernel_cfg *cfg)
{
struct socket *sock;
struct sock *sk;
struct netlink_sock *nlk;
struct listeners *listeners = NULL;
+ struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
+ unsigned int groups;
BUG_ON(!nl_table);
@@ -1532,16 +1552,18 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
sk = sock->sk;
sk_change_net(sk, net);
- if (groups < 32)
+ if (!cfg || cfg->groups < 32)
groups = 32;
+ else
+ groups = cfg->groups;
listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
if (!listeners)
goto out_sock_release;
sk->sk_data_ready = netlink_data_ready;
- if (input)
- nlk_sk(sk)->netlink_rcv = input;
+ if (cfg && cfg->input)
+ nlk_sk(sk)->netlink_rcv = cfg->input;
if (netlink_insert(sk, net, 0))
goto out_sock_release;
@@ -1555,6 +1577,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
rcu_assign_pointer(nl_table[unit].listeners, listeners);
nl_table[unit].cb_mutex = cb_mutex;
nl_table[unit].module = module;
+ nl_table[unit].bind = cfg ? cfg->bind : NULL;
nl_table[unit].registered = 1;
} else {
kfree(listeners);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 2cc7c1ee769..fda497412fc 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -33,7 +33,7 @@ void genl_unlock(void)
}
EXPORT_SYMBOL(genl_unlock);
-#ifdef CONFIG_PROVE_LOCKING
+#ifdef CONFIG_LOCKDEP
int lockdep_genl_is_held(void)
{
return lockdep_is_held(&genl_mutex);
@@ -504,7 +504,7 @@ EXPORT_SYMBOL(genl_unregister_family);
* @pid: netlink pid the message is addressed to
* @seq: sequence number (usually the one of the sender)
* @family: generic netlink family
- * @flags netlink message flags
+ * @flags: netlink message flags
* @cmd: generic netlink command
*
* Returns pointer to user specific header
@@ -915,10 +915,14 @@ static struct genl_multicast_group notify_grp = {
static int __net_init genl_pernet_init(struct net *net)
{
+ struct netlink_kernel_cfg cfg = {
+ .input = genl_rcv,
+ .cb_mutex = &genl_mutex,
+ };
+
/* we'll bump the group number right afterwards */
- net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, 0,
- genl_rcv, &genl_mutex,
- THIS_MODULE);
+ net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC,
+ THIS_MODULE, &cfg);
if (!net->genl_sock && net_eq(net, &init_net))
panic("GENL: Cannot initialize generic netlink\n");
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 9f6ce011d35..ff749794bc5 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -29,6 +29,8 @@
#include <linux/slab.h>
#include <linux/nfc.h>
+#include <net/genetlink.h>
+
#include "nfc.h"
#define VERSION "0.1"
@@ -121,14 +123,14 @@ error:
* The device remains polling for targets until a target is found or
* the nfc_stop_poll function is called.
*/
-int nfc_start_poll(struct nfc_dev *dev, u32 protocols)
+int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols)
{
int rc;
- pr_debug("dev_name=%s protocols=0x%x\n",
- dev_name(&dev->dev), protocols);
+ pr_debug("dev_name %s initiator protocols 0x%x target protocols 0x%x\n",
+ dev_name(&dev->dev), im_protocols, tm_protocols);
- if (!protocols)
+ if (!im_protocols && !tm_protocols)
return -EINVAL;
device_lock(&dev->dev);
@@ -143,9 +145,11 @@ int nfc_start_poll(struct nfc_dev *dev, u32 protocols)
goto error;
}
- rc = dev->ops->start_poll(dev, protocols);
- if (!rc)
+ rc = dev->ops->start_poll(dev, im_protocols, tm_protocols);
+ if (!rc) {
dev->polling = true;
+ dev->rf_mode = NFC_RF_NONE;
+ }
error:
device_unlock(&dev->dev);
@@ -235,8 +239,10 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
}
rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len);
- if (!rc)
+ if (!rc) {
dev->active_target = target;
+ dev->rf_mode = NFC_RF_INITIATOR;
+ }
error:
device_unlock(&dev->dev);
@@ -264,11 +270,6 @@ int nfc_dep_link_down(struct nfc_dev *dev)
goto error;
}
- if (dev->dep_rf_mode == NFC_RF_TARGET) {
- rc = -EOPNOTSUPP;
- goto error;
- }
-
rc = dev->ops->dep_link_down(dev);
if (!rc) {
dev->dep_link_up = false;
@@ -286,7 +287,6 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
u8 comm_mode, u8 rf_mode)
{
dev->dep_link_up = true;
- dev->dep_rf_mode = rf_mode;
nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode);
@@ -330,6 +330,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
rc = dev->ops->activate_target(dev, target, protocol);
if (!rc) {
dev->active_target = target;
+ dev->rf_mode = NFC_RF_INITIATOR;
if (dev->ops->check_presence)
mod_timer(&dev->check_pres_timer, jiffies +
@@ -409,27 +410,30 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
goto error;
}
- if (dev->active_target == NULL) {
- rc = -ENOTCONN;
- kfree_skb(skb);
- goto error;
- }
+ if (dev->rf_mode == NFC_RF_INITIATOR && dev->active_target != NULL) {
+ if (dev->active_target->idx != target_idx) {
+ rc = -EADDRNOTAVAIL;
+ kfree_skb(skb);
+ goto error;
+ }
- if (dev->active_target->idx != target_idx) {
- rc = -EADDRNOTAVAIL;
+ if (dev->ops->check_presence)
+ del_timer_sync(&dev->check_pres_timer);
+
+ rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb,
+ cb_context);
+
+ if (!rc && dev->ops->check_presence)
+ mod_timer(&dev->check_pres_timer, jiffies +
+ msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
+ } else if (dev->rf_mode == NFC_RF_TARGET && dev->ops->tm_send != NULL) {
+ rc = dev->ops->tm_send(dev, skb);
+ } else {
+ rc = -ENOTCONN;
kfree_skb(skb);
goto error;
}
- if (dev->ops->check_presence)
- del_timer_sync(&dev->check_pres_timer);
-
- rc = dev->ops->data_exchange(dev, dev->active_target, skb, cb,
- cb_context);
-
- if (!rc && dev->ops->check_presence)
- mod_timer(&dev->check_pres_timer, jiffies +
- msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
error:
device_unlock(&dev->dev);
@@ -447,6 +451,63 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
}
EXPORT_SYMBOL(nfc_set_remote_general_bytes);
+u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len)
+{
+ pr_debug("dev_name=%s\n", dev_name(&dev->dev));
+
+ return nfc_llcp_general_bytes(dev, gb_len);
+}
+EXPORT_SYMBOL(nfc_get_local_general_bytes);
+
+int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb)
+{
+ /* Only LLCP target mode for now */
+ if (dev->dep_link_up == false) {
+ kfree_skb(skb);
+ return -ENOLINK;
+ }
+
+ return nfc_llcp_data_received(dev, skb);
+}
+EXPORT_SYMBOL(nfc_tm_data_received);
+
+int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode,
+ u8 *gb, size_t gb_len)
+{
+ int rc;
+
+ device_lock(&dev->dev);
+
+ dev->polling = false;
+
+ if (gb != NULL) {
+ rc = nfc_set_remote_general_bytes(dev, gb, gb_len);
+ if (rc < 0)
+ goto out;
+ }
+
+ dev->rf_mode = NFC_RF_TARGET;
+
+ if (protocol == NFC_PROTO_NFC_DEP_MASK)
+ nfc_dep_link_is_up(dev, 0, comm_mode, NFC_RF_TARGET);
+
+ rc = nfc_genl_tm_activated(dev, protocol);
+
+out:
+ device_unlock(&dev->dev);
+
+ return rc;
+}
+EXPORT_SYMBOL(nfc_tm_activated);
+
+int nfc_tm_deactivated(struct nfc_dev *dev)
+{
+ dev->dep_link_up = false;
+
+ return nfc_genl_tm_deactivated(dev);
+}
+EXPORT_SYMBOL(nfc_tm_deactivated);
+
/**
* nfc_alloc_send_skb - allocate a skb for data exchange responses
*
@@ -501,6 +562,8 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb);
* The device driver must call this function when one or many nfc targets
* are found. After calling this function, the device driver must stop
* polling for targets.
+ * NOTE: This function can be called with targets=NULL and n_targets=0 to
+ * notify a driver error, meaning that the polling operation cannot complete.
* IMPORTANT: this function must not be called from an atomic context.
* In addition, it must also not be called from a context that would prevent
* the NFC Core to call other nfc ops entry point concurrently.
@@ -512,23 +575,33 @@ int nfc_targets_found(struct nfc_dev *dev,
pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets);
- dev->polling = false;
-
for (i = 0; i < n_targets; i++)
targets[i].idx = dev->target_next_idx++;
device_lock(&dev->dev);
+ if (dev->polling == false) {
+ device_unlock(&dev->dev);
+ return 0;
+ }
+
+ dev->polling = false;
+
dev->targets_generation++;
kfree(dev->targets);
- dev->targets = kmemdup(targets, n_targets * sizeof(struct nfc_target),
- GFP_ATOMIC);
+ dev->targets = NULL;
- if (!dev->targets) {
- dev->n_targets = 0;
- device_unlock(&dev->dev);
- return -ENOMEM;
+ if (targets) {
+ dev->targets = kmemdup(targets,
+ n_targets * sizeof(struct nfc_target),
+ GFP_ATOMIC);
+
+ if (!dev->targets) {
+ dev->n_targets = 0;
+ device_unlock(&dev->dev);
+ return -ENOMEM;
+ }
}
dev->n_targets = n_targets;
@@ -592,6 +665,12 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
}
EXPORT_SYMBOL(nfc_target_lost);
+inline void nfc_driver_failure(struct nfc_dev *dev, int err)
+{
+ nfc_targets_found(dev, NULL, 0);
+}
+EXPORT_SYMBOL(nfc_driver_failure);
+
static void nfc_release(struct device *d)
{
struct nfc_dev *dev = to_nfc_dev(d);
@@ -678,7 +757,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
struct nfc_dev *dev;
if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
- !ops->deactivate_target || !ops->data_exchange)
+ !ops->deactivate_target || !ops->im_transceive)
return NULL;
if (!supported_protocols)
@@ -847,3 +926,5 @@ MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>");
MODULE_DESCRIPTION("NFC Core ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(PF_NFC);
+MODULE_ALIAS_GENL_FAMILY(NFC_GENL_NAME);
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
index 8729abf5f18..46362ef979d 100644
--- a/net/nfc/hci/command.c
+++ b/net/nfc/hci/command.c
@@ -28,26 +28,14 @@
#include "hci.h"
-static int nfc_hci_result_to_errno(u8 result)
-{
- switch (result) {
- case NFC_HCI_ANY_OK:
- return 0;
- case NFC_HCI_ANY_E_TIMEOUT:
- return -ETIMEDOUT;
- default:
- return -1;
- }
-}
-
-static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, u8 result,
+static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, int err,
struct sk_buff *skb, void *cb_data)
{
struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)cb_data;
- pr_debug("HCI Cmd completed with HCI result=%d\n", result);
+ pr_debug("HCI Cmd completed with result=%d\n", err);
- hcp_ew->exec_result = nfc_hci_result_to_errno(result);
+ hcp_ew->exec_result = err;
if (hcp_ew->exec_result == 0)
hcp_ew->result_skb = skb;
else
@@ -311,9 +299,9 @@ int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev)
}
EXPORT_SYMBOL(nfc_hci_disconnect_all_gates);
-int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate)
+int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate,
+ u8 pipe)
{
- u8 pipe = NFC_HCI_INVALID_PIPE;
bool pipe_created = false;
int r;
@@ -322,6 +310,9 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate)
if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE)
return -EADDRINUSE;
+ if (pipe != NFC_HCI_INVALID_PIPE)
+ goto pipe_is_open;
+
switch (dest_gate) {
case NFC_HCI_LINK_MGMT_GATE:
pipe = NFC_HCI_LINK_MGMT_PIPE;
@@ -347,6 +338,7 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate)
return r;
}
+pipe_is_open:
hdev->gate2pipe[dest_gate] = pipe;
return 0;
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index e1a640d2b58..1ac7b3fac6c 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -32,6 +32,18 @@
/* Largest headroom needed for outgoing HCI commands */
#define HCI_CMDS_HEADROOM 1
+static int nfc_hci_result_to_errno(u8 result)
+{
+ switch (result) {
+ case NFC_HCI_ANY_OK:
+ return 0;
+ case NFC_HCI_ANY_E_TIMEOUT:
+ return -ETIME;
+ default:
+ return -1;
+ }
+}
+
static void nfc_hci_msg_tx_work(struct work_struct *work)
{
struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
@@ -46,7 +58,7 @@ static void nfc_hci_msg_tx_work(struct work_struct *work)
if (timer_pending(&hdev->cmd_timer) == 0) {
if (hdev->cmd_pending_msg->cb)
hdev->cmd_pending_msg->cb(hdev,
- NFC_HCI_ANY_E_TIMEOUT,
+ -ETIME,
NULL,
hdev->
cmd_pending_msg->
@@ -71,8 +83,7 @@ next_msg:
kfree_skb(skb);
skb_queue_purge(&msg->msg_frags);
if (msg->cb)
- msg->cb(hdev, NFC_HCI_ANY_E_NOK, NULL,
- msg->cb_context);
+ msg->cb(hdev, r, NULL, msg->cb_context);
kfree(msg);
break;
}
@@ -116,20 +127,13 @@ static void nfc_hci_msg_rx_work(struct work_struct *work)
}
}
-void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
- struct sk_buff *skb)
+static void __nfc_hci_cmd_completion(struct nfc_hci_dev *hdev, int err,
+ struct sk_buff *skb)
{
- mutex_lock(&hdev->msg_tx_mutex);
-
- if (hdev->cmd_pending_msg == NULL) {
- kfree_skb(skb);
- goto exit;
- }
-
del_timer_sync(&hdev->cmd_timer);
if (hdev->cmd_pending_msg->cb)
- hdev->cmd_pending_msg->cb(hdev, result, skb,
+ hdev->cmd_pending_msg->cb(hdev, err, skb,
hdev->cmd_pending_msg->cb_context);
else
kfree_skb(skb);
@@ -138,6 +142,19 @@ void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
hdev->cmd_pending_msg = NULL;
queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
+}
+
+void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
+ struct sk_buff *skb)
+{
+ mutex_lock(&hdev->msg_tx_mutex);
+
+ if (hdev->cmd_pending_msg == NULL) {
+ kfree_skb(skb);
+ goto exit;
+ }
+
+ __nfc_hci_cmd_completion(hdev, nfc_hci_result_to_errno(result), skb);
exit:
mutex_unlock(&hdev->msg_tx_mutex);
@@ -170,6 +187,7 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
struct nfc_target *targets;
struct sk_buff *atqa_skb = NULL;
struct sk_buff *sak_skb = NULL;
+ struct sk_buff *uid_skb = NULL;
int r;
pr_debug("from gate %d\n", gate);
@@ -205,6 +223,19 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
targets->sens_res = be16_to_cpu(*(u16 *)atqa_skb->data);
targets->sel_res = sak_skb->data[0];
+ r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_RF_READER_A_UID, &uid_skb);
+ if (r < 0)
+ goto exit;
+
+ if (uid_skb->len == 0 || uid_skb->len > NFC_NFCID1_MAXSIZE) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ memcpy(targets->nfcid1, uid_skb->data, uid_skb->len);
+ targets->nfcid1_len = uid_skb->len;
+
if (hdev->ops->complete_target_discovered) {
r = hdev->ops->complete_target_discovered(hdev, gate,
targets);
@@ -213,7 +244,7 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
}
break;
case NFC_HCI_RF_READER_B_GATE:
- targets->supported_protocols = NFC_PROTO_ISO14443_MASK;
+ targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
break;
default:
if (hdev->ops->target_from_gate)
@@ -240,6 +271,7 @@ exit:
kfree(targets);
kfree_skb(atqa_skb);
kfree_skb(sak_skb);
+ kfree_skb(uid_skb);
return r;
}
@@ -298,15 +330,15 @@ static void nfc_hci_cmd_timeout(unsigned long data)
}
static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count,
- u8 gates[])
+ struct nfc_hci_gate *gates)
{
int r;
- u8 *p = gates;
while (gate_count--) {
- r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, *p);
+ r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
+ gates->gate, gates->pipe);
if (r < 0)
return r;
- p++;
+ gates++;
}
return 0;
@@ -316,14 +348,13 @@ static int hci_dev_session_init(struct nfc_hci_dev *hdev)
{
struct sk_buff *skb = NULL;
int r;
- u8 hci_gates[] = { /* NFC_HCI_ADMIN_GATE MUST be first */
- NFC_HCI_ADMIN_GATE, NFC_HCI_LOOPBACK_GATE,
- NFC_HCI_ID_MGMT_GATE, NFC_HCI_LINK_MGMT_GATE,
- NFC_HCI_RF_READER_B_GATE, NFC_HCI_RF_READER_A_GATE
- };
+
+ if (hdev->init_data.gates[0].gate != NFC_HCI_ADMIN_GATE)
+ return -EPROTO;
r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
- NFC_HCI_ADMIN_GATE);
+ hdev->init_data.gates[0].gate,
+ hdev->init_data.gates[0].pipe);
if (r < 0)
goto exit;
@@ -351,10 +382,6 @@ static int hci_dev_session_init(struct nfc_hci_dev *hdev)
if (r < 0)
goto exit;
- r = hci_dev_connect_gates(hdev, sizeof(hci_gates), hci_gates);
- if (r < 0)
- goto disconnect_all;
-
r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count,
hdev->init_data.gates);
if (r < 0)
@@ -481,12 +508,13 @@ static int hci_dev_down(struct nfc_dev *nfc_dev)
return 0;
}
-static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols)
+static int hci_start_poll(struct nfc_dev *nfc_dev,
+ u32 im_protocols, u32 tm_protocols)
{
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
if (hdev->ops->start_poll)
- return hdev->ops->start_poll(hdev, protocols);
+ return hdev->ops->start_poll(hdev, im_protocols, tm_protocols);
else
return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
@@ -511,9 +539,9 @@ static void hci_deactivate_target(struct nfc_dev *nfc_dev,
{
}
-static int hci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
- struct sk_buff *skb, data_exchange_cb_t cb,
- void *cb_context)
+static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ struct sk_buff *skb, data_exchange_cb_t cb,
+ void *cb_context)
{
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
int r;
@@ -579,7 +607,7 @@ static struct nfc_ops hci_nfc_ops = {
.stop_poll = hci_stop_poll,
.activate_target = hci_activate_target,
.deactivate_target = hci_deactivate_target,
- .data_exchange = hci_data_exchange,
+ .im_transceive = hci_transceive,
.check_presence = hci_check_presence,
};
@@ -682,13 +710,12 @@ EXPORT_SYMBOL(nfc_hci_register_device);
void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
{
- struct hci_msg *msg;
+ struct hci_msg *msg, *n;
skb_queue_purge(&hdev->rx_hcp_frags);
skb_queue_purge(&hdev->msg_rx_queue);
- while ((msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg,
- msg_l)) != NULL) {
+ list_for_each_entry_safe(msg, n, &hdev->msg_tx_queue, msg_l) {
list_del(&msg->msg_l);
skb_queue_purge(&msg->msg_frags);
kfree(msg);
@@ -716,6 +743,27 @@ void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev)
}
EXPORT_SYMBOL(nfc_hci_get_clientdata);
+static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err)
+{
+ mutex_lock(&hdev->msg_tx_mutex);
+
+ if (hdev->cmd_pending_msg == NULL) {
+ nfc_driver_failure(hdev->ndev, err);
+ goto exit;
+ }
+
+ __nfc_hci_cmd_completion(hdev, err, NULL);
+
+exit:
+ mutex_unlock(&hdev->msg_tx_mutex);
+}
+
+void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err)
+{
+ nfc_hci_failure(hdev, err);
+}
+EXPORT_SYMBOL(nfc_hci_driver_failure);
+
void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
{
struct hcp_packet *packet;
@@ -726,16 +774,6 @@ void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
struct sk_buff *frag_skb;
int msg_len;
- if (skb == NULL) {
- /* TODO ELa: lower layer had permanent failure, need to
- * propagate that up
- */
-
- skb_queue_purge(&hdev->rx_hcp_frags);
-
- return;
- }
-
packet = (struct hcp_packet *)skb->data;
if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) {
skb_queue_tail(&hdev->rx_hcp_frags, skb);
@@ -756,9 +794,8 @@ void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN +
msg_len, GFP_KERNEL);
if (hcp_skb == NULL) {
- /* TODO ELa: cannot deliver HCP message. How to
- * propagate error up?
- */
+ nfc_hci_failure(hdev, -ENOMEM);
+ return;
}
*skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe;
diff --git a/net/nfc/hci/hci.h b/net/nfc/hci/hci.h
index 45f2fe4fd48..fa9a21e9223 100644
--- a/net/nfc/hci/hci.h
+++ b/net/nfc/hci/hci.h
@@ -37,10 +37,11 @@ struct hcp_packet {
/*
* HCI command execution completion callback.
- * result will be one of the HCI response codes.
- * skb contains the response data and must be disposed.
+ * result will be a standard linux error (may be converted from HCI response)
+ * skb contains the response data and must be disposed, or may be NULL if
+ * an error occured
*/
-typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, u8 result,
+typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, int result,
struct sk_buff *skb, void *cb_data);
struct hcp_exec_waiter {
@@ -131,9 +132,4 @@ void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
#define NFC_HCI_ANY_E_REG_ACCESS_DENIED 0x0a
#define NFC_HCI_ANY_E_PIPE_ACCESS_DENIED 0x0b
-/* Pipes */
-#define NFC_HCI_INVALID_PIPE 0x80
-#define NFC_HCI_LINK_MGMT_PIPE 0x00
-#define NFC_HCI_ADMIN_PIPE 0x01
-
#endif /* __LOCAL_HCI_H */
diff --git a/net/nfc/hci/hcp.c b/net/nfc/hci/hcp.c
index 7212cf2c578..f4dad1a8974 100644
--- a/net/nfc/hci/hcp.c
+++ b/net/nfc/hci/hcp.c
@@ -105,7 +105,7 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
}
mutex_lock(&hdev->msg_tx_mutex);
- list_add_tail(&hdev->msg_tx_queue, &cmd->msg_l);
+ list_add_tail(&cmd->msg_l, &hdev->msg_tx_queue);
mutex_unlock(&hdev->msg_tx_mutex);
queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c
index 5665dc6d893..6f840c18c89 100644
--- a/net/nfc/hci/shdlc.c
+++ b/net/nfc/hci/shdlc.c
@@ -340,15 +340,6 @@ static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r)
shdlc->state = SHDLC_CONNECTED;
} else {
shdlc->state = SHDLC_DISCONNECTED;
-
- /*
- * TODO: Could it be possible that there are pending
- * executing commands that are waiting for connect to complete
- * before they can be carried? As connect is a blocking
- * operation, it would require that the userspace process can
- * send commands on the same device from a second thread before
- * the device is up. I don't think that is possible, is it?
- */
}
shdlc->connect_result = r;
@@ -413,12 +404,12 @@ static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
r = nfc_shdlc_connect_send_ua(shdlc);
nfc_shdlc_connect_complete(shdlc, r);
}
- } else if (shdlc->state > SHDLC_NEGOCIATING) {
+ } else if (shdlc->state == SHDLC_CONNECTED) {
/*
- * TODO: Chip wants to reset link
- * send ua, empty skb lists, reset counters
- * propagate info to HCI layer
+ * Chip wants to reset link. This is unexpected and
+ * unsupported.
*/
+ shdlc->hard_fault = -ECONNRESET;
}
break;
case U_FRAME_UA:
@@ -523,10 +514,6 @@ static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
r = shdlc->ops->xmit(shdlc, skb);
if (r < 0) {
- /*
- * TODO: Cannot send, shdlc machine is dead, we
- * must propagate the information up to HCI.
- */
shdlc->hard_fault = r;
break;
}
@@ -590,6 +577,11 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
skb_queue_purge(&shdlc->ack_pending_q);
break;
case SHDLC_CONNECTING:
+ if (shdlc->hard_fault) {
+ nfc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
+ break;
+ }
+
if (shdlc->connect_tries++ < 5)
r = nfc_shdlc_connect_initiate(shdlc);
else
@@ -610,6 +602,11 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
}
nfc_shdlc_handle_rcv_queue(shdlc);
+
+ if (shdlc->hard_fault) {
+ nfc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
+ break;
+ }
break;
case SHDLC_CONNECTED:
nfc_shdlc_handle_rcv_queue(shdlc);
@@ -637,10 +634,7 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
}
if (shdlc->hard_fault) {
- /*
- * TODO: Handle hard_fault that occured during
- * this invocation of the shdlc worker
- */
+ nfc_hci_driver_failure(shdlc->hdev, shdlc->hard_fault);
}
break;
default:
@@ -765,14 +759,16 @@ static int nfc_shdlc_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
return 0;
}
-static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev, u32 protocols)
+static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev,
+ u32 im_protocols, u32 tm_protocols)
{
struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
pr_debug("\n");
if (shdlc->ops->start_poll)
- return shdlc->ops->start_poll(shdlc, protocols);
+ return shdlc->ops->start_poll(shdlc,
+ im_protocols, tm_protocols);
return 0;
}
@@ -921,8 +917,6 @@ void nfc_shdlc_free(struct nfc_shdlc *shdlc)
{
pr_debug("\n");
- /* TODO: Check that this cannot be called while still in use */
-
nfc_hci_unregister_device(shdlc->hdev);
nfc_hci_free_device(shdlc->hdev);
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index bf8ae4f0b90..b982b5b890d 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -51,7 +51,7 @@ static u8 llcp_tlv8(u8 *tlv, u8 type)
return tlv[2];
}
-static u8 llcp_tlv16(u8 *tlv, u8 type)
+static u16 llcp_tlv16(u8 *tlv, u8 type)
{
if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]])
return 0;
@@ -67,7 +67,7 @@ static u8 llcp_tlv_version(u8 *tlv)
static u16 llcp_tlv_miux(u8 *tlv)
{
- return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7f;
+ return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7ff;
}
static u16 llcp_tlv_wks(u8 *tlv)
@@ -117,8 +117,8 @@ u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
return tlv;
}
-int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
- u8 *tlv_array, u16 tlv_array_len)
+int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local,
+ u8 *tlv_array, u16 tlv_array_len)
{
u8 *tlv = tlv_array, type, length, offset = 0;
@@ -149,8 +149,45 @@ int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
case LLCP_TLV_OPT:
local->remote_opt = llcp_tlv_opt(tlv);
break;
+ default:
+ pr_err("Invalid gt tlv value 0x%x\n", type);
+ break;
+ }
+
+ offset += length + 2;
+ tlv += length + 2;
+ }
+
+ pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x\n",
+ local->remote_version, local->remote_miu,
+ local->remote_lto, local->remote_opt,
+ local->remote_wks);
+
+ return 0;
+}
+
+int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
+ u8 *tlv_array, u16 tlv_array_len)
+{
+ u8 *tlv = tlv_array, type, length, offset = 0;
+
+ pr_debug("TLV array length %d\n", tlv_array_len);
+
+ if (sock == NULL)
+ return -ENOTCONN;
+
+ while (offset < tlv_array_len) {
+ type = tlv[0];
+ length = tlv[1];
+
+ pr_debug("type 0x%x length %d\n", type, length);
+
+ switch (type) {
+ case LLCP_TLV_MIUX:
+ sock->miu = llcp_tlv_miux(tlv) + 128;
+ break;
case LLCP_TLV_RW:
- local->remote_rw = llcp_tlv_rw(tlv);
+ sock->rw = llcp_tlv_rw(tlv);
break;
case LLCP_TLV_SN:
break;
@@ -163,10 +200,7 @@ int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
tlv += length + 2;
}
- pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x rw %d\n",
- local->remote_version, local->remote_miu,
- local->remote_lto, local->remote_opt,
- local->remote_wks, local->remote_rw);
+ pr_debug("sock %p rw %d miu %d\n", sock, sock->rw, sock->miu);
return 0;
}
@@ -474,7 +508,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
while (remaining_len > 0) {
- frag_len = min_t(size_t, local->remote_miu, remaining_len);
+ frag_len = min_t(size_t, sock->miu, remaining_len);
pr_debug("Fragment %zd bytes remaining %zd",
frag_len, remaining_len);
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 42994fac26d..82f0f7588b4 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -31,47 +31,41 @@ static u8 llcp_magic[3] = {0x46, 0x66, 0x6d};
static struct list_head llcp_devices;
-static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
+void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *sk)
{
- struct nfc_llcp_sock *parent, *s, *n;
- struct sock *sk, *parent_sk;
- int i;
-
- mutex_lock(&local->socket_lock);
-
- for (i = 0; i < LLCP_MAX_SAP; i++) {
- parent = local->sockets[i];
- if (parent == NULL)
- continue;
-
- /* Release all child sockets */
- list_for_each_entry_safe(s, n, &parent->list, list) {
- list_del_init(&s->list);
- sk = &s->sk;
-
- lock_sock(sk);
-
- if (sk->sk_state == LLCP_CONNECTED)
- nfc_put_device(s->dev);
+ write_lock(&l->lock);
+ sk_add_node(sk, &l->head);
+ write_unlock(&l->lock);
+}
- sk->sk_state = LLCP_CLOSED;
+void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *sk)
+{
+ write_lock(&l->lock);
+ sk_del_node_init(sk);
+ write_unlock(&l->lock);
+}
- release_sock(sk);
+static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
+{
+ struct sock *sk;
+ struct hlist_node *node, *tmp;
+ struct nfc_llcp_sock *llcp_sock;
- sock_orphan(sk);
+ write_lock(&local->sockets.lock);
- s->local = NULL;
- }
+ sk_for_each_safe(sk, node, tmp, &local->sockets.head) {
+ llcp_sock = nfc_llcp_sock(sk);
- parent_sk = &parent->sk;
+ lock_sock(sk);
- lock_sock(parent_sk);
+ if (sk->sk_state == LLCP_CONNECTED)
+ nfc_put_device(llcp_sock->dev);
- if (parent_sk->sk_state == LLCP_LISTEN) {
+ if (sk->sk_state == LLCP_LISTEN) {
struct nfc_llcp_sock *lsk, *n;
struct sock *accept_sk;
- list_for_each_entry_safe(lsk, n, &parent->accept_queue,
+ list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue,
accept_queue) {
accept_sk = &lsk->sk;
lock_sock(accept_sk);
@@ -83,35 +77,94 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
release_sock(accept_sk);
sock_orphan(accept_sk);
+ }
- lsk->local = NULL;
+ if (listen == true) {
+ release_sock(sk);
+ continue;
}
}
- if (parent_sk->sk_state == LLCP_CONNECTED)
- nfc_put_device(parent->dev);
-
- parent_sk->sk_state = LLCP_CLOSED;
+ sk->sk_state = LLCP_CLOSED;
- release_sock(parent_sk);
+ release_sock(sk);
- sock_orphan(parent_sk);
+ sock_orphan(sk);
- parent->local = NULL;
+ sk_del_node_init(sk);
}
- mutex_unlock(&local->socket_lock);
+ write_unlock(&local->sockets.lock);
}
-static void nfc_llcp_clear_sdp(struct nfc_llcp_local *local)
+struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
{
- mutex_lock(&local->sdp_lock);
+ kref_get(&local->ref);
- local->local_wks = 0;
- local->local_sdp = 0;
- local->local_sap = 0;
+ return local;
+}
- mutex_unlock(&local->sdp_lock);
+static void local_release(struct kref *ref)
+{
+ struct nfc_llcp_local *local;
+
+ local = container_of(ref, struct nfc_llcp_local, ref);
+
+ list_del(&local->list);
+ nfc_llcp_socket_release(local, false);
+ del_timer_sync(&local->link_timer);
+ skb_queue_purge(&local->tx_queue);
+ destroy_workqueue(local->tx_wq);
+ destroy_workqueue(local->rx_wq);
+ destroy_workqueue(local->timeout_wq);
+ kfree_skb(local->rx_pending);
+ kfree(local);
+}
+
+int nfc_llcp_local_put(struct nfc_llcp_local *local)
+{
+ if (local == NULL)
+ return 0;
+
+ return kref_put(&local->ref, local_release);
+}
+
+static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
+ u8 ssap, u8 dsap)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+ struct nfc_llcp_sock *llcp_sock;
+
+ pr_debug("ssap dsap %d %d\n", ssap, dsap);
+
+ if (ssap == 0 && dsap == 0)
+ return NULL;
+
+ read_lock(&local->sockets.lock);
+
+ llcp_sock = NULL;
+
+ sk_for_each(sk, node, &local->sockets.head) {
+ llcp_sock = nfc_llcp_sock(sk);
+
+ if (llcp_sock->ssap == ssap && llcp_sock->dsap == dsap)
+ break;
+ }
+
+ read_unlock(&local->sockets.lock);
+
+ if (llcp_sock == NULL)
+ return NULL;
+
+ sock_hold(&llcp_sock->sk);
+
+ return llcp_sock;
+}
+
+static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock)
+{
+ sock_put(&sock->sk);
}
static void nfc_llcp_timeout_work(struct work_struct *work)
@@ -174,6 +227,51 @@ static int nfc_llcp_wks_sap(char *service_name, size_t service_name_len)
return -EINVAL;
}
+static
+struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
+ u8 *sn, size_t sn_len)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+ struct nfc_llcp_sock *llcp_sock, *tmp_sock;
+
+ pr_debug("sn %zd %p\n", sn_len, sn);
+
+ if (sn == NULL || sn_len == 0)
+ return NULL;
+
+ read_lock(&local->sockets.lock);
+
+ llcp_sock = NULL;
+
+ sk_for_each(sk, node, &local->sockets.head) {
+ tmp_sock = nfc_llcp_sock(sk);
+
+ pr_debug("llcp sock %p\n", tmp_sock);
+
+ if (tmp_sock->sk.sk_state != LLCP_LISTEN)
+ continue;
+
+ if (tmp_sock->service_name == NULL ||
+ tmp_sock->service_name_len == 0)
+ continue;
+
+ if (tmp_sock->service_name_len != sn_len)
+ continue;
+
+ if (memcmp(sn, tmp_sock->service_name, sn_len) == 0) {
+ llcp_sock = tmp_sock;
+ break;
+ }
+ }
+
+ read_unlock(&local->sockets.lock);
+
+ pr_debug("Found llcp sock %p\n", llcp_sock);
+
+ return llcp_sock;
+}
+
u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
struct nfc_llcp_sock *sock)
{
@@ -200,41 +298,26 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
}
/*
- * This is not a well known service,
- * we should try to find a local SDP free spot
+ * Check if there already is a non WKS socket bound
+ * to this service name.
*/
- ssap = find_first_zero_bit(&local->local_sdp, LLCP_SDP_NUM_SAP);
- if (ssap == LLCP_SDP_NUM_SAP) {
+ if (nfc_llcp_sock_from_sn(local, sock->service_name,
+ sock->service_name_len) != NULL) {
mutex_unlock(&local->sdp_lock);
return LLCP_SAP_MAX;
}
- pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap);
-
- set_bit(ssap, &local->local_sdp);
mutex_unlock(&local->sdp_lock);
- return LLCP_WKS_NUM_SAP + ssap;
+ return LLCP_SDP_UNBOUND;
- } else if (sock->ssap != 0) {
- if (sock->ssap < LLCP_WKS_NUM_SAP) {
- if (!test_bit(sock->ssap, &local->local_wks)) {
- set_bit(sock->ssap, &local->local_wks);
- mutex_unlock(&local->sdp_lock);
-
- return sock->ssap;
- }
-
- } else if (sock->ssap < LLCP_SDP_NUM_SAP) {
- if (!test_bit(sock->ssap - LLCP_WKS_NUM_SAP,
- &local->local_sdp)) {
- set_bit(sock->ssap - LLCP_WKS_NUM_SAP,
- &local->local_sdp);
- mutex_unlock(&local->sdp_lock);
+ } else if (sock->ssap != 0 && sock->ssap < LLCP_WKS_NUM_SAP) {
+ if (!test_bit(sock->ssap, &local->local_wks)) {
+ set_bit(sock->ssap, &local->local_wks);
+ mutex_unlock(&local->sdp_lock);
- return sock->ssap;
- }
+ return sock->ssap;
}
}
@@ -271,8 +354,34 @@ void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap)
local_ssap = ssap;
sdp = &local->local_wks;
} else if (ssap < LLCP_LOCAL_NUM_SAP) {
+ atomic_t *client_cnt;
+
local_ssap = ssap - LLCP_WKS_NUM_SAP;
sdp = &local->local_sdp;
+ client_cnt = &local->local_sdp_cnt[local_ssap];
+
+ pr_debug("%d clients\n", atomic_read(client_cnt));
+
+ mutex_lock(&local->sdp_lock);
+
+ if (atomic_dec_and_test(client_cnt)) {
+ struct nfc_llcp_sock *l_sock;
+
+ pr_debug("No more clients for SAP %d\n", ssap);
+
+ clear_bit(local_ssap, sdp);
+
+ /* Find the listening sock and set it back to UNBOUND */
+ l_sock = nfc_llcp_sock_get(local, ssap, LLCP_SAP_SDP);
+ if (l_sock) {
+ l_sock->ssap = LLCP_SDP_UNBOUND;
+ nfc_llcp_sock_put(l_sock);
+ }
+ }
+
+ mutex_unlock(&local->sdp_lock);
+
+ return;
} else if (ssap < LLCP_MAX_SAP) {
local_ssap = ssap - LLCP_LOCAL_NUM_SAP;
sdp = &local->local_sap;
@@ -287,19 +396,26 @@ void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap)
mutex_unlock(&local->sdp_lock);
}
-u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
+static u8 nfc_llcp_reserve_sdp_ssap(struct nfc_llcp_local *local)
{
- struct nfc_llcp_local *local;
+ u8 ssap;
- local = nfc_llcp_find_local(dev);
- if (local == NULL) {
- *general_bytes_len = 0;
- return NULL;
+ mutex_lock(&local->sdp_lock);
+
+ ssap = find_first_zero_bit(&local->local_sdp, LLCP_SDP_NUM_SAP);
+ if (ssap == LLCP_SDP_NUM_SAP) {
+ mutex_unlock(&local->sdp_lock);
+
+ return LLCP_SAP_MAX;
}
- *general_bytes_len = local->gb_len;
+ pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap);
- return local->gb;
+ set_bit(ssap, &local->local_sdp);
+
+ mutex_unlock(&local->sdp_lock);
+
+ return LLCP_WKS_NUM_SAP + ssap;
}
static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
@@ -363,6 +479,23 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
return 0;
}
+u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
+{
+ struct nfc_llcp_local *local;
+
+ local = nfc_llcp_find_local(dev);
+ if (local == NULL) {
+ *general_bytes_len = 0;
+ return NULL;
+ }
+
+ nfc_llcp_build_gb(local);
+
+ *general_bytes_len = local->gb_len;
+
+ return local->gb;
+}
+
int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
{
struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
@@ -384,31 +517,9 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
return -EINVAL;
}
- return nfc_llcp_parse_tlv(local,
- &local->remote_gb[3],
- local->remote_gb_len - 3);
-}
-
-static void nfc_llcp_tx_work(struct work_struct *work)
-{
- struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
- tx_work);
- struct sk_buff *skb;
-
- skb = skb_dequeue(&local->tx_queue);
- if (skb != NULL) {
- pr_debug("Sending pending skb\n");
- print_hex_dump(KERN_DEBUG, "LLCP Tx: ", DUMP_PREFIX_OFFSET,
- 16, 1, skb->data, skb->len, true);
-
- nfc_data_exchange(local->dev, local->target_idx,
- skb, nfc_llcp_recv, local);
- } else {
- nfc_llcp_send_symm(local->dev);
- }
-
- mod_timer(&local->link_timer,
- jiffies + msecs_to_jiffies(local->remote_lto));
+ return nfc_llcp_parse_gb_tlv(local,
+ &local->remote_gb[3],
+ local->remote_gb_len - 3);
}
static u8 nfc_llcp_dsap(struct sk_buff *pdu)
@@ -443,51 +554,84 @@ static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
sock->recv_ack_n = (sock->recv_n - 1) % 16;
}
-static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
- u8 ssap, u8 dsap)
+static void nfc_llcp_tx_work(struct work_struct *work)
{
- struct nfc_llcp_sock *sock, *llcp_sock, *n;
+ struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
+ tx_work);
+ struct sk_buff *skb;
+ struct sock *sk;
+ struct nfc_llcp_sock *llcp_sock;
- pr_debug("ssap dsap %d %d\n", ssap, dsap);
+ skb = skb_dequeue(&local->tx_queue);
+ if (skb != NULL) {
+ sk = skb->sk;
+ llcp_sock = nfc_llcp_sock(sk);
+ if (llcp_sock != NULL) {
+ int ret;
+
+ pr_debug("Sending pending skb\n");
+ print_hex_dump(KERN_DEBUG, "LLCP Tx: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb->len, true);
+
+ ret = nfc_data_exchange(local->dev, local->target_idx,
+ skb, nfc_llcp_recv, local);
+
+ if (!ret && nfc_llcp_ptype(skb) == LLCP_PDU_I) {
+ skb = skb_get(skb);
+ skb_queue_tail(&llcp_sock->tx_pending_queue,
+ skb);
+ }
+ } else {
+ nfc_llcp_send_symm(local->dev);
+ }
+ } else {
+ nfc_llcp_send_symm(local->dev);
+ }
- if (ssap == 0 && dsap == 0)
- return NULL;
+ mod_timer(&local->link_timer,
+ jiffies + msecs_to_jiffies(2 * local->remote_lto));
+}
- mutex_lock(&local->socket_lock);
- sock = local->sockets[ssap];
- if (sock == NULL) {
- mutex_unlock(&local->socket_lock);
- return NULL;
- }
+static struct nfc_llcp_sock *nfc_llcp_connecting_sock_get(struct nfc_llcp_local *local,
+ u8 ssap)
+{
+ struct sock *sk;
+ struct nfc_llcp_sock *llcp_sock;
+ struct hlist_node *node;
- pr_debug("root dsap %d (%d)\n", sock->dsap, dsap);
+ read_lock(&local->connecting_sockets.lock);
- if (sock->dsap == dsap) {
- sock_hold(&sock->sk);
- mutex_unlock(&local->socket_lock);
- return sock;
- }
+ sk_for_each(sk, node, &local->connecting_sockets.head) {
+ llcp_sock = nfc_llcp_sock(sk);
- list_for_each_entry_safe(llcp_sock, n, &sock->list, list) {
- pr_debug("llcp_sock %p sk %p dsap %d\n", llcp_sock,
- &llcp_sock->sk, llcp_sock->dsap);
- if (llcp_sock->dsap == dsap) {
+ if (llcp_sock->ssap == ssap) {
sock_hold(&llcp_sock->sk);
- mutex_unlock(&local->socket_lock);
- return llcp_sock;
+ goto out;
}
}
- pr_err("Could not find socket for %d %d\n", ssap, dsap);
+ llcp_sock = NULL;
- mutex_unlock(&local->socket_lock);
+out:
+ read_unlock(&local->connecting_sockets.lock);
- return NULL;
+ return llcp_sock;
}
-static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock)
+static struct nfc_llcp_sock *nfc_llcp_sock_get_sn(struct nfc_llcp_local *local,
+ u8 *sn, size_t sn_len)
{
- sock_put(&sock->sk);
+ struct nfc_llcp_sock *llcp_sock;
+
+ llcp_sock = nfc_llcp_sock_from_sn(local, sn, sn_len);
+
+ if (llcp_sock == NULL)
+ return NULL;
+
+ sock_hold(&llcp_sock->sk);
+
+ return llcp_sock;
}
static u8 *nfc_llcp_connect_sn(struct sk_buff *skb, size_t *sn_len)
@@ -518,35 +662,19 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
{
struct sock *new_sk, *parent;
struct nfc_llcp_sock *sock, *new_sock;
- u8 dsap, ssap, bound_sap, reason;
+ u8 dsap, ssap, reason;
dsap = nfc_llcp_dsap(skb);
ssap = nfc_llcp_ssap(skb);
pr_debug("%d %d\n", dsap, ssap);
- nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
- skb->len - LLCP_HEADER_SIZE);
-
if (dsap != LLCP_SAP_SDP) {
- bound_sap = dsap;
-
- mutex_lock(&local->socket_lock);
- sock = local->sockets[dsap];
- if (sock == NULL) {
- mutex_unlock(&local->socket_lock);
+ sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP);
+ if (sock == NULL || sock->sk.sk_state != LLCP_LISTEN) {
reason = LLCP_DM_NOBOUND;
goto fail;
}
-
- sock_hold(&sock->sk);
- mutex_unlock(&local->socket_lock);
-
- lock_sock(&sock->sk);
-
- if (sock->dsap == LLCP_SAP_SDP &&
- sock->sk.sk_state == LLCP_LISTEN)
- goto enqueue;
} else {
u8 *sn;
size_t sn_len;
@@ -559,40 +687,15 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
pr_debug("Service name length %zu\n", sn_len);
- mutex_lock(&local->socket_lock);
- for (bound_sap = 0; bound_sap < LLCP_LOCAL_SAP_OFFSET;
- bound_sap++) {
- sock = local->sockets[bound_sap];
- if (sock == NULL)
- continue;
-
- if (sock->service_name == NULL ||
- sock->service_name_len == 0)
- continue;
-
- if (sock->service_name_len != sn_len)
- continue;
-
- if (sock->dsap == LLCP_SAP_SDP &&
- sock->sk.sk_state == LLCP_LISTEN &&
- !memcmp(sn, sock->service_name, sn_len)) {
- pr_debug("Found service name at SAP %d\n",
- bound_sap);
- sock_hold(&sock->sk);
- mutex_unlock(&local->socket_lock);
-
- lock_sock(&sock->sk);
-
- goto enqueue;
- }
+ sock = nfc_llcp_sock_get_sn(local, sn, sn_len);
+ if (sock == NULL) {
+ reason = LLCP_DM_NOBOUND;
+ goto fail;
}
- mutex_unlock(&local->socket_lock);
}
- reason = LLCP_DM_NOBOUND;
- goto fail;
+ lock_sock(&sock->sk);
-enqueue:
parent = &sock->sk;
if (sk_acceptq_is_full(parent)) {
@@ -602,6 +705,21 @@ enqueue:
goto fail;
}
+ if (sock->ssap == LLCP_SDP_UNBOUND) {
+ u8 ssap = nfc_llcp_reserve_sdp_ssap(local);
+
+ pr_debug("First client, reserving %d\n", ssap);
+
+ if (ssap == LLCP_SAP_MAX) {
+ reason = LLCP_DM_REJ;
+ release_sock(&sock->sk);
+ sock_put(&sock->sk);
+ goto fail;
+ }
+
+ sock->ssap = ssap;
+ }
+
new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, GFP_ATOMIC);
if (new_sk == NULL) {
reason = LLCP_DM_REJ;
@@ -612,15 +730,31 @@ enqueue:
new_sock = nfc_llcp_sock(new_sk);
new_sock->dev = local->dev;
- new_sock->local = local;
+ new_sock->local = nfc_llcp_local_get(local);
+ new_sock->miu = local->remote_miu;
new_sock->nfc_protocol = sock->nfc_protocol;
- new_sock->ssap = bound_sap;
new_sock->dsap = ssap;
+ new_sock->target_idx = local->target_idx;
new_sock->parent = parent;
+ new_sock->ssap = sock->ssap;
+ if (sock->ssap < LLCP_LOCAL_NUM_SAP && sock->ssap >= LLCP_WKS_NUM_SAP) {
+ atomic_t *client_count;
+
+ pr_debug("reserved_ssap %d for %p\n", sock->ssap, new_sock);
+
+ client_count =
+ &local->local_sdp_cnt[sock->ssap - LLCP_WKS_NUM_SAP];
+
+ atomic_inc(client_count);
+ new_sock->reserved_ssap = sock->ssap;
+ }
+
+ nfc_llcp_parse_connection_tlv(new_sock, &skb->data[LLCP_HEADER_SIZE],
+ skb->len - LLCP_HEADER_SIZE);
pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk);
- list_add_tail(&new_sock->list, &sock->list);
+ nfc_llcp_sock_link(&local->sockets, new_sk);
nfc_llcp_accept_enqueue(&sock->sk, new_sk);
@@ -654,12 +788,12 @@ int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
pr_debug("Remote ready %d tx queue len %d remote rw %d",
sock->remote_ready, skb_queue_len(&sock->tx_pending_queue),
- local->remote_rw);
+ sock->rw);
/* Try to queue some I frames for transmission */
while (sock->remote_ready &&
- skb_queue_len(&sock->tx_pending_queue) < local->remote_rw) {
- struct sk_buff *pdu, *pending_pdu;
+ skb_queue_len(&sock->tx_pending_queue) < sock->rw) {
+ struct sk_buff *pdu;
pdu = skb_dequeue(&sock->tx_queue);
if (pdu == NULL)
@@ -668,10 +802,7 @@ int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
/* Update N(S)/N(R) */
nfc_llcp_set_nrns(sock, pdu);
- pending_pdu = skb_clone(pdu, GFP_KERNEL);
-
skb_queue_tail(&local->tx_queue, pdu);
- skb_queue_tail(&sock->tx_pending_queue, pending_pdu);
nr_frames++;
}
@@ -728,11 +859,21 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
llcp_sock->send_ack_n = nr;
- skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp)
- if (nfc_llcp_ns(s) <= nr) {
- skb_unlink(s, &llcp_sock->tx_pending_queue);
- kfree_skb(s);
- }
+ /* Remove and free all skbs until ns == nr */
+ skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp) {
+ skb_unlink(s, &llcp_sock->tx_pending_queue);
+ kfree_skb(s);
+
+ if (nfc_llcp_ns(s) == nr)
+ break;
+ }
+
+ /* Re-queue the remaining skbs for transmission */
+ skb_queue_reverse_walk_safe(&llcp_sock->tx_pending_queue,
+ s, tmp) {
+ skb_unlink(s, &llcp_sock->tx_pending_queue);
+ skb_queue_head(&local->tx_queue, s);
+ }
}
if (ptype == LLCP_PDU_RR)
@@ -740,7 +881,7 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
else if (ptype == LLCP_PDU_RNR)
llcp_sock->remote_ready = false;
- if (nfc_llcp_queue_i_frames(llcp_sock) == 0)
+ if (nfc_llcp_queue_i_frames(llcp_sock) == 0 && ptype == LLCP_PDU_I)
nfc_llcp_send_rr(llcp_sock);
release_sock(sk);
@@ -791,11 +932,7 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
dsap = nfc_llcp_dsap(skb);
ssap = nfc_llcp_ssap(skb);
- llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
-
- if (llcp_sock == NULL)
- llcp_sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP);
-
+ llcp_sock = nfc_llcp_connecting_sock_get(local, dsap);
if (llcp_sock == NULL) {
pr_err("Invalid CC\n");
nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN);
@@ -803,11 +940,15 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
return;
}
- llcp_sock->dsap = ssap;
sk = &llcp_sock->sk;
- nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
- skb->len - LLCP_HEADER_SIZE);
+ /* Unlink from connecting and link to the client array */
+ nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
+ nfc_llcp_sock_link(&local->sockets, sk);
+ llcp_sock->dsap = ssap;
+
+ nfc_llcp_parse_connection_tlv(llcp_sock, &skb->data[LLCP_HEADER_SIZE],
+ skb->len - LLCP_HEADER_SIZE);
sk->sk_state = LLCP_CONNECTED;
sk->sk_state_change(sk);
@@ -815,6 +956,45 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
nfc_llcp_sock_put(llcp_sock);
}
+static void nfc_llcp_recv_dm(struct nfc_llcp_local *local, struct sk_buff *skb)
+{
+ struct nfc_llcp_sock *llcp_sock;
+ struct sock *sk;
+ u8 dsap, ssap, reason;
+
+ dsap = nfc_llcp_dsap(skb);
+ ssap = nfc_llcp_ssap(skb);
+ reason = skb->data[2];
+
+ pr_debug("%d %d reason %d\n", ssap, dsap, reason);
+
+ switch (reason) {
+ case LLCP_DM_NOBOUND:
+ case LLCP_DM_REJ:
+ llcp_sock = nfc_llcp_connecting_sock_get(local, dsap);
+ break;
+
+ default:
+ llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
+ break;
+ }
+
+ if (llcp_sock == NULL) {
+ pr_err("Invalid DM\n");
+ return;
+ }
+
+ sk = &llcp_sock->sk;
+
+ sk->sk_err = ENXIO;
+ sk->sk_state = LLCP_CLOSED;
+ sk->sk_state_change(sk);
+
+ nfc_llcp_sock_put(llcp_sock);
+
+ return;
+}
+
static void nfc_llcp_rx_work(struct work_struct *work)
{
struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
@@ -858,6 +1038,11 @@ static void nfc_llcp_rx_work(struct work_struct *work)
nfc_llcp_recv_cc(local, skb);
break;
+ case LLCP_PDU_DM:
+ pr_debug("DM\n");
+ nfc_llcp_recv_dm(local, skb);
+ break;
+
case LLCP_PDU_I:
case LLCP_PDU_RR:
case LLCP_PDU_RNR:
@@ -891,6 +1076,21 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
return;
}
+int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
+{
+ struct nfc_llcp_local *local;
+
+ local = nfc_llcp_find_local(dev);
+ if (local == NULL)
+ return -ENODEV;
+
+ local->rx_pending = skb_get(skb);
+ del_timer(&local->link_timer);
+ queue_work(local->rx_wq, &local->rx_work);
+
+ return 0;
+}
+
void nfc_llcp_mac_is_down(struct nfc_dev *dev)
{
struct nfc_llcp_local *local;
@@ -899,10 +1099,8 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
if (local == NULL)
return;
- nfc_llcp_clear_sdp(local);
-
/* Close and purge all existing sockets */
- nfc_llcp_socket_release(local);
+ nfc_llcp_socket_release(local, true);
}
void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
@@ -943,8 +1141,8 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
local->dev = ndev;
INIT_LIST_HEAD(&local->list);
+ kref_init(&local->ref);
mutex_init(&local->sdp_lock);
- mutex_init(&local->socket_lock);
init_timer(&local->link_timer);
local->link_timer.data = (unsigned long) local;
local->link_timer.function = nfc_llcp_symm_timer;
@@ -984,11 +1182,13 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
goto err_rx_wq;
}
+ local->sockets.lock = __RW_LOCK_UNLOCKED(local->sockets.lock);
+ local->connecting_sockets.lock = __RW_LOCK_UNLOCKED(local->connecting_sockets.lock);
+
nfc_llcp_build_gb(local);
local->remote_miu = LLCP_DEFAULT_MIU;
local->remote_lto = LLCP_DEFAULT_LTO;
- local->remote_rw = LLCP_DEFAULT_RW;
list_add(&llcp_devices, &local->list);
@@ -1015,14 +1215,7 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev)
return;
}
- list_del(&local->list);
- nfc_llcp_socket_release(local);
- del_timer_sync(&local->link_timer);
- skb_queue_purge(&local->tx_queue);
- destroy_workqueue(local->tx_wq);
- destroy_workqueue(local->rx_wq);
- kfree_skb(local->rx_pending);
- kfree(local);
+ nfc_llcp_local_put(local);
}
int __init nfc_llcp_init(void)
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
index 50680ce5ae4..83b8bba5a28 100644
--- a/net/nfc/llcp/llcp.h
+++ b/net/nfc/llcp/llcp.h
@@ -37,15 +37,22 @@ enum llcp_state {
#define LLCP_LOCAL_NUM_SAP 32
#define LLCP_LOCAL_SAP_OFFSET (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP)
#define LLCP_MAX_SAP (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP + LLCP_LOCAL_NUM_SAP)
+#define LLCP_SDP_UNBOUND (LLCP_MAX_SAP + 1)
struct nfc_llcp_sock;
+struct llcp_sock_list {
+ struct hlist_head head;
+ rwlock_t lock;
+};
+
struct nfc_llcp_local {
struct list_head list;
struct nfc_dev *dev;
+ struct kref ref;
+
struct mutex sdp_lock;
- struct mutex socket_lock;
struct timer_list link_timer;
struct sk_buff_head tx_queue;
@@ -63,6 +70,7 @@ struct nfc_llcp_local {
unsigned long local_wks; /* Well known services */
unsigned long local_sdp; /* Local services */
unsigned long local_sap; /* Local SAPs, not available for discovery */
+ atomic_t local_sdp_cnt[LLCP_SDP_NUM_SAP];
/* local */
u8 gb[NFC_MAX_GT_LEN];
@@ -77,24 +85,26 @@ struct nfc_llcp_local {
u16 remote_lto;
u8 remote_opt;
u16 remote_wks;
- u8 remote_rw;
/* sockets array */
- struct nfc_llcp_sock *sockets[LLCP_MAX_SAP];
+ struct llcp_sock_list sockets;
+ struct llcp_sock_list connecting_sockets;
};
struct nfc_llcp_sock {
struct sock sk;
- struct list_head list;
struct nfc_dev *dev;
struct nfc_llcp_local *local;
u32 target_idx;
u32 nfc_protocol;
+ /* Link parameters */
u8 ssap;
u8 dsap;
char *service_name;
size_t service_name_len;
+ u8 rw;
+ u16 miu;
/* Link variables */
u8 send_n;
@@ -105,6 +115,9 @@ struct nfc_llcp_sock {
/* Is the remote peer ready to receive */
u8 remote_ready;
+ /* Reserved source SAP */
+ u8 reserved_ssap;
+
struct sk_buff_head tx_queue;
struct sk_buff_head tx_pending_queue;
struct sk_buff_head tx_backlog_queue;
@@ -164,7 +177,11 @@ struct nfc_llcp_sock {
#define LLCP_DM_REJ 0x03
+void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *s);
+void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *s);
struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
+struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local);
+int nfc_llcp_local_put(struct nfc_llcp_local *local);
u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
struct nfc_llcp_sock *sock);
u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local);
@@ -179,8 +196,10 @@ void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk);
struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock);
/* TLV API */
-int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
- u8 *tlv_array, u16 tlv_array_len);
+int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local,
+ u8 *tlv_array, u16 tlv_array_len);
+int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
+ u8 *tlv_array, u16 tlv_array_len);
/* Commands API */
void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index e06d458fc71..ddeb9aa398f 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -78,11 +78,11 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
struct sockaddr_nfc_llcp llcp_addr;
int len, ret = 0;
- pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
-
if (!addr || addr->sa_family != AF_NFC)
return -EINVAL;
+ pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
+
memset(&llcp_addr, 0, sizeof(llcp_addr));
len = min_t(unsigned int, sizeof(llcp_addr), alen);
memcpy(&llcp_addr, addr, len);
@@ -111,7 +111,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
}
llcp_sock->dev = dev;
- llcp_sock->local = local;
+ llcp_sock->local = nfc_llcp_local_get(local);
llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
llcp_sock->service_name_len = min_t(unsigned int,
llcp_addr.service_name_len,
@@ -121,10 +121,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
GFP_KERNEL);
llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
- if (llcp_sock->ssap == LLCP_MAX_SAP)
+ if (llcp_sock->ssap == LLCP_SAP_MAX) {
+ ret = -EADDRINUSE;
goto put_dev;
+ }
+
+ llcp_sock->reserved_ssap = llcp_sock->ssap;
- local->sockets[llcp_sock->ssap] = llcp_sock;
+ nfc_llcp_sock_link(&local->sockets, sk);
pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap);
@@ -283,22 +287,28 @@ error:
return ret;
}
-static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
+static int llcp_sock_getname(struct socket *sock, struct sockaddr *uaddr,
int *len, int peer)
{
- struct sockaddr_nfc_llcp *llcp_addr = (struct sockaddr_nfc_llcp *)addr;
struct sock *sk = sock->sk;
struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+ DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, llcp_addr, uaddr);
- pr_debug("%p\n", sk);
+ if (llcp_sock == NULL || llcp_sock->dev == NULL)
+ return -EBADFD;
+
+ pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx,
+ llcp_sock->dsap, llcp_sock->ssap);
if (llcp_sock == NULL || llcp_sock->dev == NULL)
return -EBADFD;
- addr->sa_family = AF_NFC;
+ uaddr->sa_family = AF_NFC;
+
*len = sizeof(struct sockaddr_nfc_llcp);
llcp_addr->dev_idx = llcp_sock->dev->idx;
+ llcp_addr->target_idx = llcp_sock->target_idx;
llcp_addr->dsap = llcp_sock->dsap;
llcp_addr->ssap = llcp_sock->ssap;
llcp_addr->service_name_len = llcp_sock->service_name_len;
@@ -382,15 +392,6 @@ static int llcp_sock_release(struct socket *sock)
goto out;
}
- mutex_lock(&local->socket_lock);
-
- if (llcp_sock == local->sockets[llcp_sock->ssap])
- local->sockets[llcp_sock->ssap] = NULL;
- else
- list_del_init(&llcp_sock->list);
-
- mutex_unlock(&local->socket_lock);
-
lock_sock(sk);
/* Send a DISC */
@@ -415,14 +416,13 @@ static int llcp_sock_release(struct socket *sock)
}
}
- /* Freeing the SAP */
- if ((sk->sk_state == LLCP_CONNECTED
- && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) ||
- sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN)
+ if (llcp_sock->reserved_ssap < LLCP_SAP_MAX)
nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap);
release_sock(sk);
+ nfc_llcp_sock_unlink(&local->sockets, sk);
+
out:
sock_orphan(sk);
sock_put(sk);
@@ -490,12 +490,16 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
}
llcp_sock->dev = dev;
- llcp_sock->local = local;
+ llcp_sock->local = nfc_llcp_local_get(local);
+ llcp_sock->miu = llcp_sock->local->remote_miu;
llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
if (llcp_sock->ssap == LLCP_SAP_MAX) {
ret = -ENOMEM;
goto put_dev;
}
+
+ llcp_sock->reserved_ssap = llcp_sock->ssap;
+
if (addr->service_name_len == 0)
llcp_sock->dsap = addr->dsap;
else
@@ -508,21 +512,26 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
llcp_sock->service_name_len,
GFP_KERNEL);
- local->sockets[llcp_sock->ssap] = llcp_sock;
+ nfc_llcp_sock_link(&local->connecting_sockets, sk);
ret = nfc_llcp_send_connect(llcp_sock);
if (ret)
- goto put_dev;
+ goto sock_unlink;
ret = sock_wait_state(sk, LLCP_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
if (ret)
- goto put_dev;
+ goto sock_unlink;
release_sock(sk);
return 0;
+sock_unlink:
+ nfc_llcp_put_ssap(local, llcp_sock->ssap);
+
+ nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
+
put_dev:
nfc_put_device(dev);
@@ -687,13 +696,15 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
llcp_sock->ssap = 0;
llcp_sock->dsap = LLCP_SAP_SDP;
+ llcp_sock->rw = LLCP_DEFAULT_RW;
+ llcp_sock->miu = LLCP_DEFAULT_MIU;
llcp_sock->send_n = llcp_sock->send_ack_n = 0;
llcp_sock->recv_n = llcp_sock->recv_ack_n = 0;
llcp_sock->remote_ready = 1;
+ llcp_sock->reserved_ssap = LLCP_SAP_MAX;
skb_queue_head_init(&llcp_sock->tx_queue);
skb_queue_head_init(&llcp_sock->tx_pending_queue);
skb_queue_head_init(&llcp_sock->tx_backlog_queue);
- INIT_LIST_HEAD(&llcp_sock->list);
INIT_LIST_HEAD(&llcp_sock->accept_queue);
if (sock != NULL)
@@ -704,8 +715,6 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
{
- struct nfc_llcp_local *local = sock->local;
-
kfree(sock->service_name);
skb_queue_purge(&sock->tx_queue);
@@ -714,12 +723,9 @@ void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
list_del_init(&sock->accept_queue);
- if (local != NULL && sock == local->sockets[sock->ssap])
- local->sockets[sock->ssap] = NULL;
- else
- list_del_init(&sock->list);
-
sock->parent = NULL;
+
+ nfc_llcp_local_put(sock->local);
}
static int llcp_sock_create(struct net *net, struct socket *sock,
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index d560e6f1307..f81efe13985 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -27,6 +27,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
@@ -194,7 +195,7 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
}
if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
- (protocols & NFC_PROTO_ISO14443_MASK)) {
+ (protocols & NFC_PROTO_ISO14443_B_MASK)) {
cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
NCI_NFC_B_PASSIVE_POLL_MODE;
cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
@@ -387,7 +388,8 @@ static int nci_dev_down(struct nfc_dev *nfc_dev)
return nci_close_device(ndev);
}
-static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
+static int nci_start_poll(struct nfc_dev *nfc_dev,
+ __u32 im_protocols, __u32 tm_protocols)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
int rc;
@@ -413,11 +415,11 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
return -EBUSY;
}
- rc = nci_request(ndev, nci_rf_discover_req, protocols,
+ rc = nci_request(ndev, nci_rf_discover_req, im_protocols,
msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
if (!rc)
- ndev->poll_prots = protocols;
+ ndev->poll_prots = im_protocols;
return rc;
}
@@ -485,7 +487,8 @@ static int nci_activate_target(struct nfc_dev *nfc_dev,
param.rf_protocol = NCI_RF_PROTOCOL_T2T;
else if (protocol == NFC_PROTO_FELICA)
param.rf_protocol = NCI_RF_PROTOCOL_T3T;
- else if (protocol == NFC_PROTO_ISO14443)
+ else if (protocol == NFC_PROTO_ISO14443 ||
+ protocol == NFC_PROTO_ISO14443_B)
param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
else
param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
@@ -521,9 +524,9 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev,
}
}
-static int nci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
- struct sk_buff *skb,
- data_exchange_cb_t cb, void *cb_context)
+static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ struct sk_buff *skb,
+ data_exchange_cb_t cb, void *cb_context)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
int rc;
@@ -556,7 +559,7 @@ static struct nfc_ops nci_nfc_ops = {
.stop_poll = nci_stop_poll,
.activate_target = nci_activate_target,
.deactivate_target = nci_deactivate_target,
- .data_exchange = nci_data_exchange,
+ .im_transceive = nci_transceive,
};
/* ---- Interface to NCI drivers ---- */
@@ -878,3 +881,5 @@ static void nci_cmd_work(struct work_struct *work)
jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
}
}
+
+MODULE_LICENSE("GPL");
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 2ab196a9f22..af7a93b0439 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -170,7 +170,10 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
if (rf_protocol == NCI_RF_PROTOCOL_T2T)
protocol = NFC_PROTO_MIFARE_MASK;
else if (rf_protocol == NCI_RF_PROTOCOL_ISO_DEP)
- protocol = NFC_PROTO_ISO14443_MASK;
+ if (rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE)
+ protocol = NFC_PROTO_ISO14443_MASK;
+ else
+ protocol = NFC_PROTO_ISO14443_B_MASK;
else if (rf_protocol == NCI_RF_PROTOCOL_T3T)
protocol = NFC_PROTO_FELICA_MASK;
else
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 581d419083a..4c51714ee74 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -49,6 +49,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
[NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
[NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
[NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
+ [NFC_ATTR_IM_PROTOCOLS] = { .type = NLA_U32 },
+ [NFC_ATTR_TM_PROTOCOLS] = { .type = NLA_U32 },
};
static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
@@ -165,7 +167,7 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
dev->genl_data.poll_req_pid = 0;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
@@ -193,7 +195,7 @@ int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx)
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -219,12 +221,74 @@ free_msg:
return -EMSGSIZE;
}
+int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+ NFC_EVENT_TM_ACTIVATED);
+ if (!hdr)
+ goto free_msg;
+
+ if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, NFC_ATTR_TM_PROTOCOLS, protocol))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+free_msg:
+ nlmsg_free(msg);
+ return -EMSGSIZE;
+}
+
+int nfc_genl_tm_deactivated(struct nfc_dev *dev)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+ NFC_EVENT_TM_DEACTIVATED);
+ if (!hdr)
+ goto free_msg;
+
+ if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+free_msg:
+ nlmsg_free(msg);
+ return -EMSGSIZE;
+}
+
int nfc_genl_device_added(struct nfc_dev *dev)
{
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -257,7 +321,7 @@ int nfc_genl_device_removed(struct nfc_dev *dev)
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -370,7 +434,7 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
pr_debug("DEP link is up\n");
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
@@ -409,7 +473,7 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev)
pr_debug("DEP link is down\n");
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
@@ -450,7 +514,7 @@ static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
if (!dev)
return -ENODEV;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
rc = -ENOMEM;
goto out_putdev;
@@ -519,16 +583,25 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
struct nfc_dev *dev;
int rc;
u32 idx;
- u32 protocols;
+ u32 im_protocols = 0, tm_protocols = 0;
pr_debug("Poll start\n");
if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
- !info->attrs[NFC_ATTR_PROTOCOLS])
+ ((!info->attrs[NFC_ATTR_IM_PROTOCOLS] &&
+ !info->attrs[NFC_ATTR_PROTOCOLS]) &&
+ !info->attrs[NFC_ATTR_TM_PROTOCOLS]))
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
- protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]);
+
+ if (info->attrs[NFC_ATTR_TM_PROTOCOLS])
+ tm_protocols = nla_get_u32(info->attrs[NFC_ATTR_TM_PROTOCOLS]);
+
+ if (info->attrs[NFC_ATTR_IM_PROTOCOLS])
+ im_protocols = nla_get_u32(info->attrs[NFC_ATTR_IM_PROTOCOLS]);
+ else if (info->attrs[NFC_ATTR_PROTOCOLS])
+ im_protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]);
dev = nfc_get_device(idx);
if (!dev)
@@ -536,7 +609,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&dev->genl_data.genl_data_mutex);
- rc = nfc_start_poll(dev, protocols);
+ rc = nfc_start_poll(dev, im_protocols, tm_protocols);
if (!rc)
dev->genl_data.poll_req_pid = info->snd_pid;
@@ -561,6 +634,15 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
if (!dev)
return -ENODEV;
+ device_lock(&dev->dev);
+
+ if (!dev->polling) {
+ device_unlock(&dev->dev);
+ return -EINVAL;
+ }
+
+ device_unlock(&dev->dev);
+
mutex_lock(&dev->genl_data.genl_data_mutex);
if (dev->genl_data.poll_req_pid != info->snd_pid) {
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 3dd4232ae66..c5e42b79a41 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -55,6 +55,7 @@ int nfc_llcp_register_device(struct nfc_dev *dev);
void nfc_llcp_unregister_device(struct nfc_dev *dev);
int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len);
u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len);
+int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb);
int __init nfc_llcp_init(void);
void nfc_llcp_exit(void);
@@ -90,6 +91,12 @@ static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *gb_len)
return NULL;
}
+static inline int nfc_llcp_data_received(struct nfc_dev *dev,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
static inline int nfc_llcp_init(void)
{
return 0;
@@ -128,6 +135,9 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
u8 comm_mode, u8 rf_mode);
int nfc_genl_dep_link_down_event(struct nfc_dev *dev);
+int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol);
+int nfc_genl_tm_deactivated(struct nfc_dev *dev);
+
struct nfc_dev *nfc_get_device(unsigned int idx);
static inline void nfc_put_device(struct nfc_dev *dev)
@@ -158,7 +168,7 @@ int nfc_dev_up(struct nfc_dev *dev);
int nfc_dev_down(struct nfc_dev *dev);
-int nfc_start_poll(struct nfc_dev *dev, u32 protocols);
+int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols);
int nfc_stop_poll(struct nfc_dev *dev);
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 48badffaafc..f3f96badf5a 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2012 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 2c74daa5aca..d8277d29e71 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2012 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -263,14 +263,15 @@ err:
static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
const struct dp_upcall_info *upcall_info)
{
+ unsigned short gso_type = skb_shinfo(skb)->gso_type;
struct dp_upcall_info later_info;
struct sw_flow_key later_key;
struct sk_buff *segs, *nskb;
int err;
segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ if (IS_ERR(segs))
+ return PTR_ERR(segs);
/* Queue all of the segments. */
skb = segs;
@@ -279,7 +280,7 @@ static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
if (err)
break;
- if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
+ if (skb == segs && gso_type & SKB_GSO_UDP) {
/* The initial flow key extracted by ovs_flow_extract()
* in this case is for a first fragment, so we need to
* properly mark later fragments.
@@ -1649,7 +1650,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
if (!err && a[OVS_VPORT_ATTR_OPTIONS])
err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
- if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
+ if (err)
+ goto exit_unlock;
+ if (a[OVS_VPORT_ATTR_UPCALL_PID])
vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index c73370cc1f0..c1105c14753 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index 46736518c45..36dcee8fc84 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 6d4d8097cf9..b7f38b16190 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2011 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -182,7 +182,8 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
{
u8 tcp_flags = 0;
- if (flow->key.eth.type == htons(ETH_P_IP) &&
+ if ((flow->key.eth.type == htons(ETH_P_IP) ||
+ flow->key.eth.type == htons(ETH_P_IPV6)) &&
flow->key.ip.proto == IPPROTO_TCP &&
likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
u8 *tcp = (u8 *)tcp_hdr(skb);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 2747dc2c4ac..9b75617ca4e 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2011 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index b6b1d7daa3c..4061b9ee07f 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -24,6 +24,9 @@
#include <linux/ethtool.h>
#include <linux/skbuff.h>
+#include <net/dst.h>
+#include <net/xfrm.h>
+
#include "datapath.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"
@@ -209,6 +212,11 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
int len;
len = skb->len;
+
+ skb_dst_drop(skb);
+ nf_reset(skb);
+ secpath_reset(skb);
+
skb->dev = netdev;
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, netdev);
diff --git a/net/openvswitch/vport-internal_dev.h b/net/openvswitch/vport-internal_dev.h
index 3454447c5f1..9a7d30ecc6a 100644
--- a/net/openvswitch/vport-internal_dev.h
+++ b/net/openvswitch/vport-internal_dev.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2011 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 3fd6c0d88e1..6ea3551cc78 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index fd9b008a0e6..f7072a25c60 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2011 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 6c066ba25dc..6140336e79d 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 19609629dab..aac680ca2b0 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 0f661745df0..ceaca7c134a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -531,6 +531,7 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po,
unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
struct ethtool_cmd ecmd;
int err;
+ u32 speed;
rtnl_lock();
dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
@@ -539,25 +540,18 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po,
return DEFAULT_PRB_RETIRE_TOV;
}
err = __ethtool_get_settings(dev, &ecmd);
+ speed = ethtool_cmd_speed(&ecmd);
rtnl_unlock();
if (!err) {
- switch (ecmd.speed) {
- case SPEED_10000:
- msec = 1;
- div = 10000/1000;
- break;
- case SPEED_1000:
- msec = 1;
- div = 1000/1000;
- break;
/*
* If the link speed is so slow you don't really
* need to worry about perf anyways
*/
- case SPEED_100:
- case SPEED_10:
- default:
+ if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
return DEFAULT_PRB_RETIRE_TOV;
+ } else {
+ msec = 1;
+ div = speed / 1000;
}
}
@@ -592,7 +586,7 @@ static void init_prb_bdqc(struct packet_sock *po,
p1->knxt_seq_num = 1;
p1->pkbdq = pg_vec;
pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
- p1->pkblk_start = (char *)pg_vec[0].buffer;
+ p1->pkblk_start = pg_vec[0].buffer;
p1->kblk_size = req_u->req3.tp_block_size;
p1->knum_blocks = req_u->req3.tp_block_nr;
p1->hdrlen = po->tp_hdrlen;
@@ -824,8 +818,7 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
h1->ts_first_pkt.ts_sec = ts.tv_sec;
h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
pkc1->pkblk_start = (char *)pbd1;
- pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
- BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
+ pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
pbd1->version = pkc1->version;
@@ -1018,7 +1011,7 @@ static void *__packet_lookup_frame_in_block(struct packet_sock *po,
struct tpacket_block_desc *pbd;
char *curr, *end;
- pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring));
+ pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
/* Queue is frozen when user space is lagging behind */
@@ -1044,7 +1037,7 @@ static void *__packet_lookup_frame_in_block(struct packet_sock *po,
smp_mb();
curr = pkc->nxt_offset;
pkc->skb = skb;
- end = (char *) ((char *)pbd + pkc->kblk_size);
+ end = (char *)pbd + pkc->kblk_size;
/* first try the current block */
if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
@@ -1476,7 +1469,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
* Find the device first to size check it
*/
- saddr->spkt_device[13] = 0;
+ saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
retry:
rcu_read_lock();
dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
diff --git a/net/rds/page.c b/net/rds/page.c
index 2499cd10842..9005a2c920e 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -74,11 +74,12 @@ int rds_page_copy_user(struct page *page, unsigned long offset,
}
EXPORT_SYMBOL_GPL(rds_page_copy_user);
-/*
- * Message allocation uses this to build up regions of a message.
+/**
+ * rds_page_remainder_alloc - build up regions of a message.
*
- * @bytes - the number of bytes needed.
- * @gfp - the waiting behaviour of the allocation
+ * @scat: Scatter list for message
+ * @bytes: the number of bytes needed.
+ * @gfp: the waiting behaviour of the allocation
*
* @gfp is always ored with __GFP_HIGHMEM. Callers must be prepared to
* kmap the pages, etc.
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 5c6e9f13202..9f0f17cf6bf 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -410,6 +410,8 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
+ msg->msg_namelen = 0;
+
if (msg_flags & MSG_OOB)
goto out;
@@ -485,6 +487,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
sin->sin_port = inc->i_hdr.h_sport;
sin->sin_addr.s_addr = inc->i_saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+ msg->msg_namelen = sizeof(*sin);
}
break;
}
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index f974961754c..752b72360eb 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -325,7 +325,7 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
rfkill_global_states[type].cur = blocked;
list_for_each_entry(rfkill, &rfkill_list, node) {
- if (rfkill->type != type)
+ if (rfkill->type != type && type != RFKILL_TYPE_ALL)
continue;
rfkill_set_block(rfkill, blocked);
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index 5d6b572a670..a9206087b4d 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -81,10 +81,6 @@ void rxrpc_UDP_error_report(struct sock *sk)
_net("I/F MTU %u", mtu);
}
- /* ip_rt_frag_needed() may have eaten the info */
- if (mtu == 0)
- mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
-
if (mtu == 0) {
/* they didn't give us a size, estimate one */
if (mtu > 1500) {
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 16ae88762d0..e1ac183d50b 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -242,7 +242,7 @@ int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
EXPORT_SYMBOL(rxrpc_kernel_send_data);
-/*
+/**
* rxrpc_kernel_abort_call - Allow a kernel service to abort a call
* @call: The call to be aborted
* @abort_code: The abort code to stick into the ABORT packet
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index e7a8976bf25..62fb51face8 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -507,6 +507,26 @@ config NET_EMATCH_TEXT
To compile this code as a module, choose M here: the
module will be called em_text.
+config NET_EMATCH_CANID
+ tristate "CAN Identifier"
+ depends on NET_EMATCH && CAN
+ ---help---
+ Say Y here if you want to be able to classify CAN frames based
+ on CAN Identifier.
+
+ To compile this code as a module, choose M here: the
+ module will be called em_canid.
+
+config NET_EMATCH_IPSET
+ tristate "IPset"
+ depends on NET_EMATCH && IP_SET
+ ---help---
+ Say Y here if you want to be able to classify packets based on
+ ipset membership.
+
+ To compile this code as a module, choose M here: the
+ module will be called em_ipset.
+
config NET_CLS_ACT
bool "Actions"
---help---
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 5940a1992f0..978cbf004e8 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -55,3 +55,5 @@ obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o
obj-$(CONFIG_NET_EMATCH_U32) += em_u32.o
obj-$(CONFIG_NET_EMATCH_META) += em_meta.o
obj-$(CONFIG_NET_EMATCH_TEXT) += em_text.o
+obj-$(CONFIG_NET_EMATCH_CANID) += em_canid.o
+obj-$(CONFIG_NET_EMATCH_IPSET) += em_ipset.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 5cfb160df06..e3d2c78cb52 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -652,27 +652,27 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
-
- t = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*t), flags);
+ if (!nlh)
+ goto out_nlmsg_trim;
+ t = nlmsg_data(nlh);
t->tca_family = AF_UNSPEC;
t->tca__pad1 = 0;
t->tca__pad2 = 0;
nest = nla_nest_start(skb, TCA_ACT_TAB);
if (nest == NULL)
- goto nla_put_failure;
+ goto out_nlmsg_trim;
if (tcf_action_dump(skb, a, bind, ref) < 0)
- goto nla_put_failure;
+ goto out_nlmsg_trim;
nla_nest_end(skb, nest);
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
-nla_put_failure:
-nlmsg_failure:
+out_nlmsg_trim:
nlmsg_trim(skb, b);
return -1;
}
@@ -799,19 +799,21 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
if (a->ops == NULL)
goto err_out;
- nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t));
- t = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
+ if (!nlh)
+ goto out_module_put;
+ t = nlmsg_data(nlh);
t->tca_family = AF_UNSPEC;
t->tca__pad1 = 0;
t->tca__pad2 = 0;
nest = nla_nest_start(skb, TCA_ACT_TAB);
if (nest == NULL)
- goto nla_put_failure;
+ goto out_module_put;
err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
if (err < 0)
- goto nla_put_failure;
+ goto out_module_put;
if (err == 0)
goto noflush_out;
@@ -828,8 +830,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
return err;
-nla_put_failure:
-nlmsg_failure:
+out_module_put:
module_put(a->ops->owner);
err_out:
noflush_out:
@@ -919,18 +920,20 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
b = skb_tail_pointer(skb);
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
- t = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*t), flags);
+ if (!nlh)
+ goto out_kfree_skb;
+ t = nlmsg_data(nlh);
t->tca_family = AF_UNSPEC;
t->tca__pad1 = 0;
t->tca__pad2 = 0;
nest = nla_nest_start(skb, TCA_ACT_TAB);
if (nest == NULL)
- goto nla_put_failure;
+ goto out_kfree_skb;
if (tcf_action_dump(skb, a, 0, 0) < 0)
- goto nla_put_failure;
+ goto out_kfree_skb;
nla_nest_end(skb, nest);
@@ -942,8 +945,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
err = 0;
return err;
-nla_put_failure:
-nlmsg_failure:
+out_kfree_skb:
kfree_skb(skb);
return -1;
}
@@ -1062,7 +1064,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
struct tc_action_ops *a_o;
struct tc_action a;
int ret = 0;
- struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh);
+ struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
struct nlattr *kind = find_dump_kind(cb->nlh);
if (kind == NULL) {
@@ -1080,23 +1082,25 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
if (a_o->walk == NULL) {
WARN(1, "tc_dump_action: %s !capable of dumping table\n",
a_o->kind);
- goto nla_put_failure;
+ goto out_module_put;
}
- nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
- cb->nlh->nlmsg_type, sizeof(*t));
- t = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+ cb->nlh->nlmsg_type, sizeof(*t), 0);
+ if (!nlh)
+ goto out_module_put;
+ t = nlmsg_data(nlh);
t->tca_family = AF_UNSPEC;
t->tca__pad1 = 0;
t->tca__pad2 = 0;
nest = nla_nest_start(skb, TCA_ACT_TAB);
if (nest == NULL)
- goto nla_put_failure;
+ goto out_module_put;
ret = a_o->walk(skb, cb, RTM_GETACTION, &a);
if (ret < 0)
- goto nla_put_failure;
+ goto out_module_put;
if (ret > 0) {
nla_nest_end(skb, nest);
@@ -1110,8 +1114,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
module_put(a_o->owner);
return skb->len;
-nla_put_failure:
-nlmsg_failure:
+out_module_put:
module_put(a_o->owner);
nlmsg_trim(skb, b);
return skb->len;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index f452f696b4b..6dd1131f2ec 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -140,7 +140,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
int tp_created = 0;
replay:
- t = NLMSG_DATA(n);
+ t = nlmsg_data(n);
protocol = TC_H_MIN(t->tcm_info);
prio = TC_H_MAJ(t->tcm_info);
nprio = prio;
@@ -349,8 +349,10 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb);
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
- tcm = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
+ if (!nlh)
+ goto out_nlmsg_trim;
+ tcm = nlmsg_data(nlh);
tcm->tcm_family = AF_UNSPEC;
tcm->tcm__pad1 = 0;
tcm->tcm__pad2 = 0;
@@ -368,7 +370,7 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
-nlmsg_failure:
+out_nlmsg_trim:
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
@@ -418,7 +420,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
struct net_device *dev;
struct Qdisc *q;
struct tcf_proto *tp, **chain;
- struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
+ struct tcmsg *tcm = nlmsg_data(cb->nlh);
unsigned long cl = 0;
const struct Qdisc_class_ops *cops;
struct tcf_dump_args arg;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 36fec422740..44f405cb9aa 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -143,7 +143,7 @@ static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
if (head == NULL)
goto old_method;
- iif = ((struct rtable *)dst)->rt_iif;
+ iif = inet_iif(skb);
h = route4_fastmap_hash(id, iif);
if (id == head->fastmap[h].id &&
diff --git a/net/sched/em_canid.c b/net/sched/em_canid.c
new file mode 100644
index 00000000000..bfd34e4c1af
--- /dev/null
+++ b/net/sched/em_canid.c
@@ -0,0 +1,240 @@
+/*
+ * em_canid.c Ematch rule to match CAN frames according to their CAN IDs
+ *
+ * This program is free software; you can distribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Idea: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
+ * Copyright: (c) 2011 Czech Technical University in Prague
+ * (c) 2011 Volkswagen Group Research
+ * Authors: Michal Sojka <sojkam1@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ * Rostislav Lisovy <lisovy@gmail.cz>
+ * Funded by: Volkswagen Group Research
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <net/pkt_cls.h>
+#include <linux/can.h>
+
+#define EM_CAN_RULES_MAX 500
+
+struct canid_match {
+ /* For each SFF CAN ID (11 bit) there is one record in this bitfield */
+ DECLARE_BITMAP(match_sff, (1 << CAN_SFF_ID_BITS));
+
+ int rules_count;
+ int sff_rules_count;
+ int eff_rules_count;
+
+ /*
+ * Raw rules copied from netlink message; Used for sending
+ * information to userspace (when 'tc filter show' is invoked)
+ * AND when matching EFF frames
+ */
+ struct can_filter rules_raw[];
+};
+
+/**
+ * em_canid_get_id() - Extracts Can ID out of the sk_buff structure.
+ */
+static canid_t em_canid_get_id(struct sk_buff *skb)
+{
+ /* CAN ID is stored within the data field */
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ return cf->can_id;
+}
+
+static void em_canid_sff_match_add(struct canid_match *cm, u32 can_id,
+ u32 can_mask)
+{
+ int i;
+
+ /*
+ * Limit can_mask and can_id to SFF range to
+ * protect against write after end of array
+ */
+ can_mask &= CAN_SFF_MASK;
+ can_id &= can_mask;
+
+ /* Single frame */
+ if (can_mask == CAN_SFF_MASK) {
+ set_bit(can_id, cm->match_sff);
+ return;
+ }
+
+ /* All frames */
+ if (can_mask == 0) {
+ bitmap_fill(cm->match_sff, (1 << CAN_SFF_ID_BITS));
+ return;
+ }
+
+ /*
+ * Individual frame filter.
+ * Add record (set bit to 1) for each ID that
+ * conforms particular rule
+ */
+ for (i = 0; i < (1 << CAN_SFF_ID_BITS); i++) {
+ if ((i & can_mask) == can_id)
+ set_bit(i, cm->match_sff);
+ }
+}
+
+static inline struct canid_match *em_canid_priv(struct tcf_ematch *m)
+{
+ return (struct canid_match *)m->data;
+}
+
+static int em_canid_match(struct sk_buff *skb, struct tcf_ematch *m,
+ struct tcf_pkt_info *info)
+{
+ struct canid_match *cm = em_canid_priv(m);
+ canid_t can_id;
+ int match = 0;
+ int i;
+ const struct can_filter *lp;
+
+ can_id = em_canid_get_id(skb);
+
+ if (can_id & CAN_EFF_FLAG) {
+ for (i = 0, lp = cm->rules_raw;
+ i < cm->eff_rules_count; i++, lp++) {
+ if (!(((lp->can_id ^ can_id) & lp->can_mask))) {
+ match = 1;
+ break;
+ }
+ }
+ } else { /* SFF */
+ can_id &= CAN_SFF_MASK;
+ match = (test_bit(can_id, cm->match_sff) ? 1 : 0);
+ }
+
+ return match;
+}
+
+static int em_canid_change(struct tcf_proto *tp, void *data, int len,
+ struct tcf_ematch *m)
+{
+ struct can_filter *conf = data; /* Array with rules */
+ struct canid_match *cm;
+ struct canid_match *cm_old = (struct canid_match *)m->data;
+ int i;
+
+ if (!len)
+ return -EINVAL;
+
+ if (len % sizeof(struct can_filter))
+ return -EINVAL;
+
+ if (len > sizeof(struct can_filter) * EM_CAN_RULES_MAX)
+ return -EINVAL;
+
+ cm = kzalloc(sizeof(struct canid_match) + len, GFP_KERNEL);
+ if (!cm)
+ return -ENOMEM;
+
+ cm->rules_count = len / sizeof(struct can_filter);
+
+ /*
+ * We need two for() loops for copying rules into two contiguous
+ * areas in rules_raw to process all eff rules with a simple loop.
+ * NB: The configuration interface supports sff and eff rules.
+ * We do not support filters here that match for the same can_id
+ * provided in a SFF and EFF frame (e.g. 0x123 / 0x80000123).
+ * For this (unusual case) two filters have to be specified. The
+ * SFF/EFF separation is done with the CAN_EFF_FLAG in the can_id.
+ */
+
+ /* Fill rules_raw with EFF rules first */
+ for (i = 0; i < cm->rules_count; i++) {
+ if (conf[i].can_id & CAN_EFF_FLAG) {
+ memcpy(cm->rules_raw + cm->eff_rules_count,
+ &conf[i],
+ sizeof(struct can_filter));
+
+ cm->eff_rules_count++;
+ }
+ }
+
+ /* append SFF frame rules */
+ for (i = 0; i < cm->rules_count; i++) {
+ if (!(conf[i].can_id & CAN_EFF_FLAG)) {
+ memcpy(cm->rules_raw
+ + cm->eff_rules_count
+ + cm->sff_rules_count,
+ &conf[i], sizeof(struct can_filter));
+
+ cm->sff_rules_count++;
+
+ em_canid_sff_match_add(cm,
+ conf[i].can_id, conf[i].can_mask);
+ }
+ }
+
+ m->datalen = sizeof(struct canid_match) + len;
+ m->data = (unsigned long)cm;
+
+ if (cm_old != NULL) {
+ pr_err("canid: Configuring an existing ematch!\n");
+ kfree(cm_old);
+ }
+
+ return 0;
+}
+
+static void em_canid_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
+{
+ struct canid_match *cm = em_canid_priv(m);
+
+ kfree(cm);
+}
+
+static int em_canid_dump(struct sk_buff *skb, struct tcf_ematch *m)
+{
+ struct canid_match *cm = em_canid_priv(m);
+
+ /*
+ * When configuring this ematch 'rules_count' is set not to exceed
+ * 'rules_raw' array size
+ */
+ if (nla_put_nohdr(skb, sizeof(struct can_filter) * cm->rules_count,
+ &cm->rules_raw) < 0)
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static struct tcf_ematch_ops em_canid_ops = {
+ .kind = TCF_EM_CANID,
+ .change = em_canid_change,
+ .match = em_canid_match,
+ .destroy = em_canid_destroy,
+ .dump = em_canid_dump,
+ .owner = THIS_MODULE,
+ .link = LIST_HEAD_INIT(em_canid_ops.link)
+};
+
+static int __init init_em_canid(void)
+{
+ return tcf_em_register(&em_canid_ops);
+}
+
+static void __exit exit_em_canid(void)
+{
+ tcf_em_unregister(&em_canid_ops);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_em_canid);
+module_exit(exit_em_canid);
+
+MODULE_ALIAS_TCF_EMATCH(TCF_EM_CANID);
diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c
new file mode 100644
index 00000000000..3130320997e
--- /dev/null
+++ b/net/sched/em_ipset.c
@@ -0,0 +1,135 @@
+/*
+ * net/sched/em_ipset.c ipset ematch
+ *
+ * Copyright (c) 2012 Florian Westphal <fw@strlen.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter/xt_set.h>
+#include <linux/ipv6.h>
+#include <net/ip.h>
+#include <net/pkt_cls.h>
+
+static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len,
+ struct tcf_ematch *em)
+{
+ struct xt_set_info *set = data;
+ ip_set_id_t index;
+
+ if (data_len != sizeof(*set))
+ return -EINVAL;
+
+ index = ip_set_nfnl_get_byindex(set->index);
+ if (index == IPSET_INVALID_ID)
+ return -ENOENT;
+
+ em->datalen = sizeof(*set);
+ em->data = (unsigned long)kmemdup(data, em->datalen, GFP_KERNEL);
+ if (em->data)
+ return 0;
+
+ ip_set_nfnl_put(index);
+ return -ENOMEM;
+}
+
+static void em_ipset_destroy(struct tcf_proto *p, struct tcf_ematch *em)
+{
+ const struct xt_set_info *set = (const void *) em->data;
+ if (set) {
+ ip_set_nfnl_put(set->index);
+ kfree((void *) em->data);
+ }
+}
+
+static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em,
+ struct tcf_pkt_info *info)
+{
+ struct ip_set_adt_opt opt;
+ struct xt_action_param acpar;
+ const struct xt_set_info *set = (const void *) em->data;
+ struct net_device *dev, *indev = NULL;
+ int ret, network_offset;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ acpar.family = NFPROTO_IPV4;
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ return 0;
+ acpar.thoff = ip_hdrlen(skb);
+ break;
+ case htons(ETH_P_IPV6):
+ acpar.family = NFPROTO_IPV6;
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ return 0;
+ /* doesn't call ipv6_find_hdr() because ipset doesn't use thoff, yet */
+ acpar.thoff = sizeof(struct ipv6hdr);
+ break;
+ default:
+ return 0;
+ }
+
+ acpar.hooknum = 0;
+
+ opt.family = acpar.family;
+ opt.dim = set->dim;
+ opt.flags = set->flags;
+ opt.cmdflags = 0;
+ opt.timeout = ~0u;
+
+ network_offset = skb_network_offset(skb);
+ skb_pull(skb, network_offset);
+
+ dev = skb->dev;
+
+ rcu_read_lock();
+
+ if (dev && skb->skb_iif)
+ indev = dev_get_by_index_rcu(dev_net(dev), skb->skb_iif);
+
+ acpar.in = indev ? indev : dev;
+ acpar.out = dev;
+
+ ret = ip_set_test(set->index, skb, &acpar, &opt);
+
+ rcu_read_unlock();
+
+ skb_push(skb, network_offset);
+ return ret;
+}
+
+static struct tcf_ematch_ops em_ipset_ops = {
+ .kind = TCF_EM_IPSET,
+ .change = em_ipset_change,
+ .destroy = em_ipset_destroy,
+ .match = em_ipset_match,
+ .owner = THIS_MODULE,
+ .link = LIST_HEAD_INIT(em_ipset_ops.link)
+};
+
+static int __init init_em_ipset(void)
+{
+ return tcf_em_register(&em_ipset_ops);
+}
+
+static void __exit exit_em_ipset(void)
+{
+ tcf_em_unregister(&em_ipset_ops);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_DESCRIPTION("TC extended match for IP sets");
+
+module_init(init_em_ipset);
+module_exit(exit_em_ipset);
+
+MODULE_ALIAS_TCF_EMATCH(TCF_EM_IPSET);
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 4790c696cbc..4ab6e332557 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -264,7 +264,7 @@ META_COLLECTOR(int_rtiif)
if (unlikely(skb_rtable(skb) == NULL))
*err = -1;
else
- dst->value = skb_rtable(skb)->rt_iif;
+ dst->value = inet_iif(skb);
}
/**************************************************************************
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 085ce53d570..a08b4ab3e42 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -973,7 +973,7 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
{
struct net *net = sock_net(skb->sk);
- struct tcmsg *tcm = NLMSG_DATA(n);
+ struct tcmsg *tcm = nlmsg_data(n);
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
u32 clid = tcm->tcm_parent;
@@ -1046,7 +1046,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
replay:
/* Reinit, just in case something touches this. */
- tcm = NLMSG_DATA(n);
+ tcm = nlmsg_data(n);
clid = tcm->tcm_parent;
q = p = NULL;
@@ -1193,8 +1193,10 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
struct gnet_dump d;
struct qdisc_size_table *stab;
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
- tcm = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
+ if (!nlh)
+ goto out_nlmsg_trim;
+ tcm = nlmsg_data(nlh);
tcm->tcm_family = AF_UNSPEC;
tcm->tcm__pad1 = 0;
tcm->tcm__pad2 = 0;
@@ -1230,7 +1232,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
-nlmsg_failure:
+out_nlmsg_trim:
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
@@ -1366,7 +1368,7 @@ done:
static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
{
struct net *net = sock_net(skb->sk);
- struct tcmsg *tcm = NLMSG_DATA(n);
+ struct tcmsg *tcm = nlmsg_data(n);
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
struct Qdisc *q = NULL;
@@ -1498,8 +1500,10 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
struct gnet_dump d;
const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
- tcm = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
+ if (!nlh)
+ goto out_nlmsg_trim;
+ tcm = nlmsg_data(nlh);
tcm->tcm_family = AF_UNSPEC;
tcm->tcm__pad1 = 0;
tcm->tcm__pad2 = 0;
@@ -1525,7 +1529,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
-nlmsg_failure:
+out_nlmsg_trim:
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
@@ -1616,7 +1620,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
+ struct tcmsg *tcm = nlmsg_data(cb->nlh);
struct net *net = sock_net(skb->sk);
struct netdev_queue *dev_queue;
struct net_device *dev;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index c412ad0d030..298c0ddfb57 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -380,7 +380,14 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
}
- skb_orphan(skb);
+ /* If a delay is expected, orphan the skb. (orphaning usually takes
+ * place at TX completion time, so _before_ the link transit delay)
+ * Ideally, this orphaning should be done after the rate limiting
+ * module, because this breaks TCP Small Queue, and other mechanisms
+ * based on socket sk_wmem_alloc.
+ */
+ if (q->latency || q->jitter)
+ skb_orphan(skb);
/*
* If we need to duplicate packet, then re-insert at top of the
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index ca0c29695d5..47416716294 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -67,7 +67,6 @@ struct teql_master {
struct teql_sched_data {
struct Qdisc *next;
struct teql_master *m;
- struct neighbour *ncache;
struct sk_buff_head q;
};
@@ -134,7 +133,6 @@ teql_reset(struct Qdisc *sch)
skb_queue_purge(&dat->q);
sch->q.qlen = 0;
- teql_neigh_release(xchg(&dat->ncache, NULL));
}
static void
@@ -166,7 +164,6 @@ teql_destroy(struct Qdisc *sch)
}
}
skb_queue_purge(&dat->q);
- teql_neigh_release(xchg(&dat->ncache, NULL));
break;
}
@@ -225,21 +222,25 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
static int
__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
struct net_device *dev, struct netdev_queue *txq,
- struct neighbour *mn)
+ struct dst_entry *dst)
{
- struct teql_sched_data *q = qdisc_priv(txq->qdisc);
- struct neighbour *n = q->ncache;
+ struct neighbour *n;
+ int err = 0;
- if (mn->tbl == NULL)
- return -EINVAL;
- if (n && n->tbl == mn->tbl &&
- memcmp(n->primary_key, mn->primary_key, mn->tbl->key_len) == 0) {
- atomic_inc(&n->refcnt);
- } else {
- n = __neigh_lookup_errno(mn->tbl, mn->primary_key, dev);
- if (IS_ERR(n))
- return PTR_ERR(n);
+ n = dst_neigh_lookup_skb(dst, skb);
+ if (!n)
+ return -ENOENT;
+
+ if (dst->dev != dev) {
+ struct neighbour *mn;
+
+ mn = __neigh_lookup_errno(n->tbl, n->primary_key, dev);
+ neigh_release(n);
+ if (IS_ERR(mn))
+ return PTR_ERR(mn);
+ n = mn;
}
+
if (neigh_event_send(n, skb_res) == 0) {
int err;
char haddr[MAX_ADDR_LEN];
@@ -248,15 +249,13 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr,
NULL, skb->len);
- if (err < 0) {
- neigh_release(n);
- return -EINVAL;
- }
- teql_neigh_release(xchg(&q->ncache, n));
- return 0;
+ if (err < 0)
+ err = -EINVAL;
+ } else {
+ err = (skb_res == NULL) ? -EAGAIN : 1;
}
neigh_release(n);
- return (skb_res == NULL) ? -EAGAIN : 1;
+ return err;
}
static inline int teql_resolve(struct sk_buff *skb,
@@ -265,7 +264,6 @@ static inline int teql_resolve(struct sk_buff *skb,
struct netdev_queue *txq)
{
struct dst_entry *dst = skb_dst(skb);
- struct neighbour *mn;
int res;
if (txq->qdisc == &noop_qdisc)
@@ -275,8 +273,7 @@ static inline int teql_resolve(struct sk_buff *skb,
return 0;
rcu_read_lock();
- mn = dst_get_neighbour_noref(dst);
- res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
+ res = __teql_resolve(skb, skb_res, dev, txq, dst);
rcu_read_unlock();
return res;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index b16517ee1aa..ebaef3ed606 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -124,6 +124,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
* socket values.
*/
asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
+ asoc->pf_retrans = sctp_pf_retrans;
+
asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
@@ -686,6 +688,9 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
/* Set the path max_retrans. */
peer->pathmaxrxt = asoc->pathmaxrxt;
+ /* And the partial failure retrnas threshold */
+ peer->pf_retrans = asoc->pf_retrans;
+
/* Initialize the peer's SACK delay timeout based on the
* association configured value.
*/
@@ -841,6 +846,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
struct sctp_ulpevent *event;
struct sockaddr_storage addr;
int spc_state = 0;
+ bool ulp_notify = true;
/* Record the transition on the transport. */
switch (command) {
@@ -854,6 +860,14 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
spc_state = SCTP_ADDR_CONFIRMED;
else
spc_state = SCTP_ADDR_AVAILABLE;
+ /* Don't inform ULP about transition from PF to
+ * active state and set cwnd to 1, see SCTP
+ * Quick failover draft section 5.1, point 5
+ */
+ if (transport->state == SCTP_PF) {
+ ulp_notify = false;
+ transport->cwnd = 1;
+ }
transport->state = SCTP_ACTIVE;
break;
@@ -872,6 +886,11 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
spc_state = SCTP_ADDR_UNREACHABLE;
break;
+ case SCTP_TRANSPORT_PF:
+ transport->state = SCTP_PF;
+ ulp_notify = false;
+ break;
+
default:
return;
}
@@ -879,12 +898,15 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
/* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
* user.
*/
- memset(&addr, 0, sizeof(struct sockaddr_storage));
- memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len);
- event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
- 0, spc_state, error, GFP_ATOMIC);
- if (event)
- sctp_ulpq_tail_event(&asoc->ulpq, event);
+ if (ulp_notify) {
+ memset(&addr, 0, sizeof(struct sockaddr_storage));
+ memcpy(&addr, &transport->ipaddr,
+ transport->af_specific->sockaddr_len);
+ event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
+ 0, spc_state, error, GFP_ATOMIC);
+ if (event)
+ sctp_ulpq_tail_event(&asoc->ulpq, event);
+ }
/* Select new active and retran paths. */
@@ -900,7 +922,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
transports) {
if ((t->state == SCTP_INACTIVE) ||
- (t->state == SCTP_UNCONFIRMED))
+ (t->state == SCTP_UNCONFIRMED) ||
+ (t->state == SCTP_PF))
continue;
if (!first || t->last_time_heard > first->last_time_heard) {
second = first;
@@ -1360,7 +1383,7 @@ struct sctp_transport *sctp_assoc_choose_alter_transport(
/* Update the association's pmtu and frag_point by going through all the
* transports. This routine is called when a transport's PMTU has changed.
*/
-void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
+void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
{
struct sctp_transport *t;
__u32 pmtu = 0;
@@ -1372,7 +1395,7 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
if (t->pmtu_pending && t->dst) {
- sctp_transport_update_pmtu(t, dst_mtu(t->dst));
+ sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst));
t->pmtu_pending = 0;
}
if (!pmtu || (t->pathmtu < pmtu))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 8b9b6790a3d..e64d5210ed1 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -408,10 +408,10 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
if (t->param_flags & SPP_PMTUD_ENABLE) {
/* Update transports view of the MTU */
- sctp_transport_update_pmtu(t, pmtu);
+ sctp_transport_update_pmtu(sk, t, pmtu);
/* Update association pmtu. */
- sctp_assoc_sync_pmtu(asoc);
+ sctp_assoc_sync_pmtu(sk, asoc);
}
/* Retransmit with the new pmtu setting.
@@ -423,6 +423,18 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
}
+void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
+ struct sk_buff *skb)
+{
+ struct dst_entry *dst;
+
+ if (!t)
+ return;
+ dst = sctp_transport_dst_check(t);
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+}
+
/*
* SCTP Implementer's Guide, 2.37 ICMP handling procedures
*
@@ -628,6 +640,10 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
err = EHOSTUNREACH;
break;
+ case ICMP_REDIRECT:
+ sctp_icmp_redirect(sk, transport, skb);
+ err = 0;
+ break;
default:
goto out_unlock;
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 91f479121c5..ed7139ea797 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -185,6 +185,9 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
goto out_unlock;
}
break;
+ case NDISC_REDIRECT:
+ sctp_icmp_redirect(sk, transport, skb);
+ break;
default:
break;
}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 6ae47acaaec..838e18b4d7e 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -64,6 +64,8 @@
#include <net/sctp/checksum.h>
/* Forward declarations for private helpers. */
+static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk);
static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
struct sctp_chunk *chunk);
static void sctp_packet_append_data(struct sctp_packet *packet,
@@ -224,7 +226,10 @@ static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt,
if (!auth)
return retval;
- retval = sctp_packet_append_chunk(pkt, auth);
+ retval = __sctp_packet_append_chunk(pkt, auth);
+
+ if (retval != SCTP_XMIT_OK)
+ sctp_chunk_free(auth);
return retval;
}
@@ -256,48 +261,31 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
asoc->a_rwnd = asoc->rwnd;
sack = sctp_make_sack(asoc);
if (sack) {
- retval = sctp_packet_append_chunk(pkt, sack);
+ retval = __sctp_packet_append_chunk(pkt, sack);
+ if (retval != SCTP_XMIT_OK) {
+ sctp_chunk_free(sack);
+ goto out;
+ }
asoc->peer.sack_needed = 0;
if (del_timer(timer))
sctp_association_put(asoc);
}
}
}
+out:
return retval;
}
+
/* Append a chunk to the offered packet reporting back any inability to do
* so.
*/
-sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
- struct sctp_chunk *chunk)
+static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk)
{
sctp_xmit_t retval = SCTP_XMIT_OK;
__u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length));
- SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet,
- chunk);
-
- /* Data chunks are special. Before seeing what else we can
- * bundle into this packet, check to see if we are allowed to
- * send this DATA.
- */
- if (sctp_chunk_is_data(chunk)) {
- retval = sctp_packet_can_append_data(packet, chunk);
- if (retval != SCTP_XMIT_OK)
- goto finish;
- }
-
- /* Try to bundle AUTH chunk */
- retval = sctp_packet_bundle_auth(packet, chunk);
- if (retval != SCTP_XMIT_OK)
- goto finish;
-
- /* Try to bundle SACK chunk */
- retval = sctp_packet_bundle_sack(packet, chunk);
- if (retval != SCTP_XMIT_OK)
- goto finish;
-
/* Check to see if this chunk will fit into the packet */
retval = sctp_packet_will_fit(packet, chunk, chunk_len);
if (retval != SCTP_XMIT_OK)
@@ -339,6 +327,43 @@ finish:
return retval;
}
+/* Append a chunk to the offered packet reporting back any inability to do
+ * so.
+ */
+sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk)
+{
+ sctp_xmit_t retval = SCTP_XMIT_OK;
+
+ SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet,
+ chunk);
+
+ /* Data chunks are special. Before seeing what else we can
+ * bundle into this packet, check to see if we are allowed to
+ * send this DATA.
+ */
+ if (sctp_chunk_is_data(chunk)) {
+ retval = sctp_packet_can_append_data(packet, chunk);
+ if (retval != SCTP_XMIT_OK)
+ goto finish;
+ }
+
+ /* Try to bundle AUTH chunk */
+ retval = sctp_packet_bundle_auth(packet, chunk);
+ if (retval != SCTP_XMIT_OK)
+ goto finish;
+
+ /* Try to bundle SACK chunk */
+ retval = sctp_packet_bundle_sack(packet, chunk);
+ if (retval != SCTP_XMIT_OK)
+ goto finish;
+
+ retval = __sctp_packet_append_chunk(packet, chunk);
+
+finish:
+ return retval;
+}
+
/* All packets are sent to the network through this function from
* sctp_outq_tail().
*
@@ -385,7 +410,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
if (!sctp_transport_dst_check(tp)) {
sctp_transport_route(tp, NULL, sctp_sk(sk));
if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
- sctp_assoc_sync_pmtu(asoc);
+ sctp_assoc_sync_pmtu(sk, asoc);
}
}
dst = dst_clone(tp->dst);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index a0fa19f5650..e7aa177c952 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -792,7 +792,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
if (!new_transport)
new_transport = asoc->peer.active_path;
} else if ((new_transport->state == SCTP_INACTIVE) ||
- (new_transport->state == SCTP_UNCONFIRMED)) {
+ (new_transport->state == SCTP_UNCONFIRMED) ||
+ (new_transport->state == SCTP_PF)) {
/* If the chunk is Heartbeat or Heartbeat Ack,
* send it to chunk->transport, even if it's
* inactive.
@@ -987,7 +988,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
new_transport = chunk->transport;
if (!new_transport ||
((new_transport->state == SCTP_INACTIVE) ||
- (new_transport->state == SCTP_UNCONFIRMED)))
+ (new_transport->state == SCTP_UNCONFIRMED) ||
+ (new_transport->state == SCTP_PF)))
new_transport = asoc->peer.active_path;
if (new_transport->state == SCTP_UNCONFIRMED)
continue;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 9c90811d113..1f89c4e6964 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -568,7 +568,7 @@ static void sctp_v4_get_saddr(struct sctp_sock *sk,
/* What interface did this skb arrive on? */
static int sctp_v4_skb_iif(const struct sk_buff *skb)
{
- return skb_rtable(skb)->rt_iif;
+ return inet_iif(skb);
}
/* Was this packet marked by Explicit Congestion Notification? */
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index b6de71efb14..479a70ef6ff 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -132,7 +132,7 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
* abort chunk. Differs from sctp_init_cause in that it won't oops
* if there isn't enough space in the op error chunk
*/
-int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
+static int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
size_t paylen)
{
sctp_errhdr_t err;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 8716da1a859..fe99628e125 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -76,6 +76,8 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
sctp_cmd_seq_t *commands,
gfp_t gfp);
+static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
+ struct sctp_transport *t);
/********************************************************************
* Helper functions
********************************************************************/
@@ -470,7 +472,8 @@ sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
* notification SHOULD be sent to the upper layer.
*
*/
-static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
+static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
+ struct sctp_association *asoc,
struct sctp_transport *transport,
int is_hb)
{
@@ -495,6 +498,23 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
transport->error_count++;
}
+ /* If the transport error count is greater than the pf_retrans
+ * threshold, and less than pathmaxrtx, then mark this transport
+ * as Partially Failed, ee SCTP Quick Failover Draft, secon 5.1,
+ * point 1
+ */
+ if ((transport->state != SCTP_PF) &&
+ (asoc->pf_retrans < transport->pathmaxrxt) &&
+ (transport->error_count > asoc->pf_retrans)) {
+
+ sctp_assoc_control_transport(asoc, transport,
+ SCTP_TRANSPORT_PF,
+ 0);
+
+ /* Update the hb timer to resend a heartbeat every rto */
+ sctp_cmd_hb_timer_update(commands, transport);
+ }
+
if (transport->state != SCTP_INACTIVE &&
(transport->error_count > transport->pathmaxrxt)) {
SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p",
@@ -699,6 +719,10 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
SCTP_HEARTBEAT_SUCCESS);
}
+ if (t->state == SCTP_PF)
+ sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
+ SCTP_HEARTBEAT_SUCCESS);
+
/* The receiver of the HEARTBEAT ACK should also perform an
* RTT measurement for that destination transport address
* using the time value carried in the HEARTBEAT ACK chunk.
@@ -1565,8 +1589,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_STRIKE:
/* Mark one strike against a transport. */
- sctp_do_8_2_transport_strike(asoc, cmd->obj.transport,
- 0);
+ sctp_do_8_2_transport_strike(commands, asoc,
+ cmd->obj.transport, 0);
break;
case SCTP_CMD_TRANSPORT_IDLE:
@@ -1576,7 +1600,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_TRANSPORT_HB_SENT:
t = cmd->obj.transport;
- sctp_do_8_2_transport_strike(asoc, t, 1);
+ sctp_do_8_2_transport_strike(commands, asoc,
+ t, 1);
t->hb_sent = 1;
break;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 31c7bfcd9b5..5e259817a7f 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1859,7 +1859,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
}
if (asoc->pmtu_pending)
- sctp_assoc_pending_pmtu(asoc);
+ sctp_assoc_pending_pmtu(sk, asoc);
/* If fragmentation is disabled and the message length exceeds the
* association fragmentation point, return EMSGSIZE. The I-D
@@ -2373,7 +2373,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
if (trans) {
trans->pathmtu = params->spp_pathmtu;
- sctp_assoc_sync_pmtu(asoc);
+ sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
} else if (asoc) {
asoc->pathmtu = params->spp_pathmtu;
sctp_frag_point(asoc, params->spp_pathmtu);
@@ -2390,7 +2390,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
(trans->param_flags & ~SPP_PMTUD) | pmtud_change;
if (update) {
sctp_transport_pmtu(trans, sctp_opt2sk(sp));
- sctp_assoc_sync_pmtu(asoc);
+ sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
}
} else if (asoc) {
asoc->param_flags =
@@ -3478,6 +3478,56 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
}
+/*
+ * SCTP_PEER_ADDR_THLDS
+ *
+ * This option allows us to alter the partially failed threshold for one or all
+ * transports in an association. See Section 6.1 of:
+ * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
+ */
+static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
+ char __user *optval,
+ unsigned int optlen)
+{
+ struct sctp_paddrthlds val;
+ struct sctp_transport *trans;
+ struct sctp_association *asoc;
+
+ if (optlen < sizeof(struct sctp_paddrthlds))
+ return -EINVAL;
+ if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
+ sizeof(struct sctp_paddrthlds)))
+ return -EFAULT;
+
+
+ if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
+ asoc = sctp_id2assoc(sk, val.spt_assoc_id);
+ if (!asoc)
+ return -ENOENT;
+ list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+ transports) {
+ if (val.spt_pathmaxrxt)
+ trans->pathmaxrxt = val.spt_pathmaxrxt;
+ trans->pf_retrans = val.spt_pathpfthld;
+ }
+
+ if (val.spt_pathmaxrxt)
+ asoc->pathmaxrxt = val.spt_pathmaxrxt;
+ asoc->pf_retrans = val.spt_pathpfthld;
+ } else {
+ trans = sctp_addr_id2transport(sk, &val.spt_address,
+ val.spt_assoc_id);
+ if (!trans)
+ return -ENOENT;
+
+ if (val.spt_pathmaxrxt)
+ trans->pathmaxrxt = val.spt_pathmaxrxt;
+ trans->pf_retrans = val.spt_pathpfthld;
+ }
+
+ return 0;
+}
+
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
@@ -3627,6 +3677,9 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_AUTO_ASCONF:
retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
break;
+ case SCTP_PEER_ADDR_THLDS:
+ retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -5498,6 +5551,51 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
return 0;
}
+/*
+ * SCTP_PEER_ADDR_THLDS
+ *
+ * This option allows us to fetch the partially failed threshold for one or all
+ * transports in an association. See Section 6.1 of:
+ * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
+ */
+static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
+ char __user *optval,
+ int len,
+ int __user *optlen)
+{
+ struct sctp_paddrthlds val;
+ struct sctp_transport *trans;
+ struct sctp_association *asoc;
+
+ if (len < sizeof(struct sctp_paddrthlds))
+ return -EINVAL;
+ len = sizeof(struct sctp_paddrthlds);
+ if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
+ return -EFAULT;
+
+ if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
+ asoc = sctp_id2assoc(sk, val.spt_assoc_id);
+ if (!asoc)
+ return -ENOENT;
+
+ val.spt_pathpfthld = asoc->pf_retrans;
+ val.spt_pathmaxrxt = asoc->pathmaxrxt;
+ } else {
+ trans = sctp_addr_id2transport(sk, &val.spt_address,
+ val.spt_assoc_id);
+ if (!trans)
+ return -ENOENT;
+
+ val.spt_pathmaxrxt = trans->pathmaxrxt;
+ val.spt_pathpfthld = trans->pf_retrans;
+ }
+
+ if (put_user(len, optlen) || copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -5636,6 +5734,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
case SCTP_AUTO_ASCONF:
retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
break;
+ case SCTP_PEER_ADDR_THLDS:
+ retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index e5fe639c89e..2b2bfe933ff 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -141,6 +141,15 @@ static ctl_table sctp_table[] = {
.extra2 = &int_max
},
{
+ .procname = "pf_retrans",
+ .data = &sctp_pf_retrans,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &int_max
+ },
+ {
.procname = "max_init_retransmits",
.data = &sctp_max_retrans_init,
.maxlen = sizeof(int),
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 1dcceb6e0ce..c97472b248a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -87,6 +87,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
/* Initialize the default path max_retrans. */
peer->pathmaxrxt = sctp_max_retrans_path;
+ peer->pf_retrans = sctp_pf_retrans;
INIT_LIST_HEAD(&peer->transmitted);
INIT_LIST_HEAD(&peer->send_ready);
@@ -216,7 +217,7 @@ void sctp_transport_set_owner(struct sctp_transport *transport,
void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
{
/* If we don't have a fresh route, look one up */
- if (!transport->dst || transport->dst->obsolete > 1) {
+ if (!transport->dst || transport->dst->obsolete) {
dst_release(transport->dst);
transport->af_specific->get_dst(transport, &transport->saddr,
&transport->fl, sk);
@@ -228,7 +229,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
}
-void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
+void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu)
{
struct dst_entry *dst;
@@ -245,8 +246,16 @@ void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
}
dst = sctp_transport_dst_check(t);
- if (dst)
- dst->ops->update_pmtu(dst, pmtu);
+ if (!dst)
+ t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
+
+ if (dst) {
+ dst->ops->update_pmtu(dst, sk, NULL, pmtu);
+
+ dst = sctp_transport_dst_check(t);
+ if (!dst)
+ t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
+ }
}
/* Caches the dst entry and source address for a transport's destination
@@ -587,7 +596,8 @@ unsigned long sctp_transport_timeout(struct sctp_transport *t)
{
unsigned long timeout;
timeout = t->rto + sctp_jitter(t->rto);
- if (t->state != SCTP_UNCONFIRMED)
+ if ((t->state != SCTP_UNCONFIRMED) &&
+ (t->state != SCTP_PF))
timeout += t->hbinterval;
timeout += jiffies;
return timeout;
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 33d89477619..10c018a5b9f 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -702,7 +702,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
if (rx_count >= asoc->base.sk->sk_rcvbuf) {
if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) ||
- (!sk_rmem_schedule(asoc->base.sk, chunk->skb->truesize)))
+ (!sk_rmem_schedule(asoc->base.sk, chunk->skb,
+ chunk->skb->truesize)))
goto fail;
}
diff --git a/net/socket.c b/net/socket.c
index 6e0ccc09b31..dfe5b66c97e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -398,7 +398,7 @@ int sock_map_fd(struct socket *sock, int flags)
}
EXPORT_SYMBOL(sock_map_fd);
-static struct socket *sock_from_file(struct file *file, int *err)
+struct socket *sock_from_file(struct file *file, int *err)
{
if (file->f_op == &socket_file_ops)
return file->private_data; /* set in sock_map_fd */
@@ -406,6 +406,7 @@ static struct socket *sock_from_file(struct file *file, int *err)
*err = -ENOTSOCK;
return NULL;
}
+EXPORT_SYMBOL(sock_from_file);
/**
* sockfd_lookup - Go from a file number to its socket slot
@@ -522,6 +523,9 @@ void sock_release(struct socket *sock)
if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
printk(KERN_ERR "sock_release: fasync list not empty!\n");
+ if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags))
+ return;
+
this_cpu_sub(sockets_in_use, 1);
if (!sock->file) {
iput(SOCK_INODE(sock));
@@ -551,8 +555,6 @@ static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
sock_update_classid(sock->sk);
- sock_update_netprioidx(sock->sk);
-
si->sock = sock;
si->scm = NULL;
si->msg = msg;
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 9fe8857d8d5..03d03e37a7d 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -21,6 +21,11 @@ config SUNRPC_XPRT_RDMA
If unsure, say N.
+config SUNRPC_SWAP
+ bool
+ depends on SUNRPC
+ select NETVM
+
config RPCSEC_GSS_KRB5
tristate "Secure RPC: Kerberos V mechanism"
depends on SUNRPC && CRYPTO
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 727e506cacd..b5c067bccc4 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/hash.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/gss_api.h>
#include <linux/spinlock.h>
#ifdef RPC_DEBUG
@@ -122,6 +123,59 @@ rpcauth_unregister(const struct rpc_authops *ops)
}
EXPORT_SYMBOL_GPL(rpcauth_unregister);
+/**
+ * rpcauth_list_flavors - discover registered flavors and pseudoflavors
+ * @array: array to fill in
+ * @size: size of "array"
+ *
+ * Returns the number of array items filled in, or a negative errno.
+ *
+ * The returned array is not sorted by any policy. Callers should not
+ * rely on the order of the items in the returned array.
+ */
+int
+rpcauth_list_flavors(rpc_authflavor_t *array, int size)
+{
+ rpc_authflavor_t flavor;
+ int result = 0;
+
+ spin_lock(&rpc_authflavor_lock);
+ for (flavor = 0; flavor < RPC_AUTH_MAXFLAVOR; flavor++) {
+ const struct rpc_authops *ops = auth_flavors[flavor];
+ rpc_authflavor_t pseudos[4];
+ int i, len;
+
+ if (result >= size) {
+ result = -ENOMEM;
+ break;
+ }
+
+ if (ops == NULL)
+ continue;
+ if (ops->list_pseudoflavors == NULL) {
+ array[result++] = ops->au_flavor;
+ continue;
+ }
+ len = ops->list_pseudoflavors(pseudos, ARRAY_SIZE(pseudos));
+ if (len < 0) {
+ result = len;
+ break;
+ }
+ for (i = 0; i < len; i++) {
+ if (result >= size) {
+ result = -ENOMEM;
+ break;
+ }
+ array[result++] = pseudos[i];
+ }
+ }
+ spin_unlock(&rpc_authflavor_lock);
+
+ dprintk("RPC: %s returns %d\n", __func__, result);
+ return result;
+}
+EXPORT_SYMBOL_GPL(rpcauth_list_flavors);
+
struct rpc_auth *
rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt)
{
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index d3ad81f8da5..34c52202100 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1619,6 +1619,7 @@ static const struct rpc_authops authgss_ops = {
.crcreate = gss_create_cred,
.pipes_create = gss_pipes_dentries_create,
.pipes_destroy = gss_pipes_dentries_destroy,
+ .list_pseudoflavors = gss_mech_list_pseudoflavors,
};
static const struct rpc_credops gss_credops = {
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 782bfe1b646..b174fcd9ff4 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -239,14 +239,28 @@ gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor);
-int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr)
+/**
+ * gss_mech_list_pseudoflavors - Discover registered GSS pseudoflavors
+ * @array: array to fill in
+ * @size: size of "array"
+ *
+ * Returns the number of array items filled in, or a negative errno.
+ *
+ * The returned array is not sorted by any policy. Callers should not
+ * rely on the order of the items in the returned array.
+ */
+int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr, int size)
{
struct gss_api_mech *pos = NULL;
int j, i = 0;
spin_lock(&registered_mechs_lock);
list_for_each_entry(pos, &registered_mechs, gm_list) {
- for (j=0; j < pos->gm_pf_num; j++) {
+ for (j = 0; j < pos->gm_pf_num; j++) {
+ if (i >= size) {
+ spin_unlock(&registered_mechs_lock);
+ return -ENOMEM;
+ }
array_ptr[i++] = pos->gm_pfs[j].pseudoflavor;
}
}
@@ -254,8 +268,6 @@ int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr)
return i;
}
-EXPORT_SYMBOL_GPL(gss_mech_list_pseudoflavors);
-
u32
gss_svc_to_pseudoflavor(struct gss_api_mech *gm, u32 service)
{
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 31def68a0f6..5a3d675d2f2 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -176,13 +176,14 @@ out_free:
}
EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
-/*
- * Destroys the backchannel preallocated structures.
+/**
+ * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
+ * @xprt: the transport holding the preallocated strucures
+ * @max_reqs the maximum number of preallocated structures to destroy
+ *
* Since these structures may have been allocated by multiple calls
* to xprt_setup_backchannel, we only destroy up to the maximum number
* of reqs specified by the caller.
- * @xprt: the transport holding the preallocated strucures
- * @max_reqs the maximum number of preallocated structures to destroy
*/
void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
{
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 47ad2666fdf..2afd2a84dc3 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1349,8 +1349,11 @@ static int c_show(struct seq_file *m, void *p)
if (cache_check(cd, cp, NULL))
/* cache_check does a cache_put on failure */
seq_printf(m, "# ");
- else
+ else {
+ if (cache_is_expired(cd, cp))
+ seq_printf(m, "# ");
cache_put(cp, cd);
+ }
return cd->cache_show(m, cd, cp);
}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index f56f045778a..fa48c60aef2 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -385,7 +385,7 @@ out_no_rpciod:
return ERR_PTR(err);
}
-/*
+/**
* rpc_create - create an RPC client and transport with one call
* @args: rpc_clnt create argument structure
*
@@ -717,6 +717,15 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
atomic_inc(&clnt->cl_count);
if (clnt->cl_softrtry)
task->tk_flags |= RPC_TASK_SOFT;
+ if (sk_memalloc_socks()) {
+ struct rpc_xprt *xprt;
+
+ rcu_read_lock();
+ xprt = rcu_dereference(clnt->cl_xprt);
+ if (xprt->swapper)
+ task->tk_flags |= RPC_TASK_SWAPPER;
+ rcu_read_unlock();
+ }
/* Add to the client's list of all tasks */
spin_lock(&clnt->cl_lock);
list_add_tail(&task->tk_task, &clnt->cl_tasks);
@@ -1844,12 +1853,13 @@ call_timeout(struct rpc_task *task)
return;
}
if (RPC_IS_SOFT(task)) {
- if (clnt->cl_chatty)
+ if (clnt->cl_chatty) {
rcu_read_lock();
printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
clnt->cl_protname,
rcu_dereference(clnt->cl_xprt)->servername);
rcu_read_unlock();
+ }
if (task->tk_flags & RPC_TASK_TIMEOUT)
rpc_exit(task, -ETIMEDOUT);
else
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 92509ffe15f..a70acae496e 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -251,7 +251,7 @@ static int rpcb_create_local_unix(struct net *net)
if (IS_ERR(clnt)) {
dprintk("RPC: failed to create AF_LOCAL rpcbind "
"client (errno %ld).\n", PTR_ERR(clnt));
- result = -PTR_ERR(clnt);
+ result = PTR_ERR(clnt);
goto out;
}
@@ -298,7 +298,7 @@ static int rpcb_create_local_net(struct net *net)
if (IS_ERR(clnt)) {
dprintk("RPC: failed to create local rpcbind "
"client (errno %ld).\n", PTR_ERR(clnt));
- result = -PTR_ERR(clnt);
+ result = PTR_ERR(clnt);
goto out;
}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 994cfea2bad..128494ec9a6 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -300,8 +300,9 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
/*
* Make an RPC task runnable.
*
- * Note: If the task is ASYNC, this must be called with
- * the spinlock held to protect the wait queue operation.
+ * Note: If the task is ASYNC, and is being made runnable after sitting on an
+ * rpc_wait_queue, this must be called with the queue spinlock held to protect
+ * the wait queue operation.
*/
static void rpc_make_runnable(struct rpc_task *task)
{
@@ -790,7 +791,9 @@ void rpc_execute(struct rpc_task *task)
static void rpc_async_schedule(struct work_struct *work)
{
+ current->flags |= PF_FSTRANS;
__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
+ current->flags &= ~PF_FSTRANS;
}
/**
@@ -812,7 +815,10 @@ static void rpc_async_schedule(struct work_struct *work)
void *rpc_malloc(struct rpc_task *task, size_t size)
{
struct rpc_buffer *buf;
- gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
+ gfp_t gfp = GFP_NOWAIT;
+
+ if (RPC_IS_SWAPPER(task))
+ gfp |= __GFP_MEMALLOC;
size += sizeof(struct rpc_buffer);
if (size <= RPC_BUFFER_MAXSIZE)
@@ -886,7 +892,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
static struct rpc_task *
rpc_alloc_task(void)
{
- return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
+ return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO);
}
/*
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 2777fa89664..4d012920373 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -104,23 +104,9 @@ static void ip_map_put(struct kref *kref)
kfree(im);
}
-#if IP_HASHBITS == 8
-/* hash_long on a 64 bit machine is currently REALLY BAD for
- * IP addresses in reverse-endian (i.e. on a little-endian machine).
- * So use a trivial but reliable hash instead
- */
-static inline int hash_ip(__be32 ip)
-{
- int hash = (__force u32)ip ^ ((__force u32)ip>>16);
- return (hash ^ (hash>>8)) & 0xff;
-}
-#endif
-static inline int hash_ip6(struct in6_addr ip)
+static inline int hash_ip6(const struct in6_addr *ip)
{
- return (hash_ip(ip.s6_addr32[0]) ^
- hash_ip(ip.s6_addr32[1]) ^
- hash_ip(ip.s6_addr32[2]) ^
- hash_ip(ip.s6_addr32[3]));
+ return hash_32(ipv6_addr_hash(ip), IP_HASHBITS);
}
static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
{
@@ -301,7 +287,7 @@ static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
ip.m_addr = *addr;
ch = sunrpc_cache_lookup(cd, &ip.h,
hash_str(class, IP_HASHBITS) ^
- hash_ip6(*addr));
+ hash_ip6(addr));
if (ch)
return container_of(ch, struct ip_map, h);
@@ -331,7 +317,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
ip.h.expiry_time = expiry;
ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
hash_str(ipm->m_class, IP_HASHBITS) ^
- hash_ip6(ipm->m_addr));
+ hash_ip6(&ipm->m_addr));
if (!ch)
return -ENOMEM;
cache_put(ch, cd);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a6de09de5d2..18bc130255a 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -43,6 +43,7 @@
#include <net/tcp_states.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
+#include <trace/events/skb.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/clnt.h>
@@ -619,6 +620,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
if (!svc_udp_get_dest_address(rqstp, cmh)) {
net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n",
cmh->cmsg_level, cmh->cmsg_type);
+out_free:
+ trace_kfree_skb(skb, svc_udp_recvfrom);
skb_free_datagram_locked(svsk->sk_sk, skb);
return 0;
}
@@ -630,8 +633,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
local_bh_enable();
/* checksum error */
- skb_free_datagram_locked(svsk->sk_sk, skb);
- return 0;
+ goto out_free;
}
local_bh_enable();
skb_free_datagram_locked(svsk->sk_sk, skb);
@@ -640,10 +642,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_arg.head[0].iov_base = skb->data +
sizeof(struct udphdr);
rqstp->rq_arg.head[0].iov_len = len;
- if (skb_checksum_complete(skb)) {
- skb_free_datagram_locked(svsk->sk_sk, skb);
- return 0;
- }
+ if (skb_checksum_complete(skb))
+ goto out_free;
rqstp->rq_xprt_ctxt = skb;
}
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index fddcccfcdf7..0afba1b4b65 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -129,34 +129,6 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len)
EXPORT_SYMBOL_GPL(xdr_terminate_string);
void
-xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
- unsigned int len)
-{
- struct kvec *tail = xdr->tail;
- u32 *p;
-
- xdr->pages = pages;
- xdr->page_base = base;
- xdr->page_len = len;
-
- p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
- tail->iov_base = p;
- tail->iov_len = 0;
-
- if (len & 3) {
- unsigned int pad = 4 - (len & 3);
-
- *p = 0;
- tail->iov_base = (char *)p + (len & 3);
- tail->iov_len = pad;
- len += pad;
- }
- xdr->buflen += len;
- xdr->len += len;
-}
-EXPORT_SYMBOL_GPL(xdr_encode_pages);
-
-void
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
struct page **pages, unsigned int base, unsigned int len)
{
@@ -180,7 +152,9 @@ EXPORT_SYMBOL_GPL(xdr_inline_pages);
/*
* Helper routines for doing 'memmove' like operations on a struct xdr_buf
- *
+ */
+
+/**
* _shift_data_right_pages
* @pages: vector of pages containing both the source and dest memory area.
* @pgto_base: page vector address of destination
@@ -242,7 +216,7 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
} while ((len -= copy) != 0);
}
-/*
+/**
* _copy_to_pages
* @pages: array of pages
* @pgbase: page vector address of destination
@@ -286,7 +260,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
flush_dcache_page(*pgto);
}
-/*
+/**
* _copy_from_pages
* @p: pointer to destination
* @pages: array of pages
@@ -326,7 +300,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
}
EXPORT_SYMBOL_GPL(_copy_from_pages);
-/*
+/**
* xdr_shrink_bufhead
* @buf: xdr_buf
* @len: bytes to remove from buf->head[0]
@@ -399,7 +373,7 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
buf->len = buf->buflen;
}
-/*
+/**
* xdr_shrink_pagelen
* @buf: xdr_buf
* @len: bytes to remove from buf->pages
@@ -455,6 +429,16 @@ xdr_shift_buf(struct xdr_buf *buf, size_t len)
EXPORT_SYMBOL_GPL(xdr_shift_buf);
/**
+ * xdr_stream_pos - Return the current offset from the start of the xdr_stream
+ * @xdr: pointer to struct xdr_stream
+ */
+unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
+{
+ return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
+}
+EXPORT_SYMBOL_GPL(xdr_stream_pos);
+
+/**
* xdr_init_encode - Initialize a struct xdr_stream for sending data.
* @xdr: pointer to xdr_stream struct
* @buf: pointer to XDR buffer in which to encode data
@@ -554,13 +538,11 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b
EXPORT_SYMBOL_GPL(xdr_write_pages);
static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
- __be32 *p, unsigned int len)
+ unsigned int len)
{
if (len > iov->iov_len)
len = iov->iov_len;
- if (p == NULL)
- p = (__be32*)iov->iov_base;
- xdr->p = p;
+ xdr->p = (__be32*)iov->iov_base;
xdr->end = (__be32*)(iov->iov_base + len);
xdr->iov = iov;
xdr->page_ptr = NULL;
@@ -607,7 +589,7 @@ static void xdr_set_next_page(struct xdr_stream *xdr)
newbase -= xdr->buf->page_base;
if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
- xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+ xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
}
static bool xdr_set_next_buffer(struct xdr_stream *xdr)
@@ -616,7 +598,7 @@ static bool xdr_set_next_buffer(struct xdr_stream *xdr)
xdr_set_next_page(xdr);
else if (xdr->iov == xdr->buf->head) {
if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
- xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+ xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len);
}
return xdr->p != xdr->end;
}
@@ -632,10 +614,15 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
xdr->buf = buf;
xdr->scratch.iov_base = NULL;
xdr->scratch.iov_len = 0;
+ xdr->nwords = XDR_QUADLEN(buf->len);
if (buf->head[0].iov_len != 0)
- xdr_set_iov(xdr, buf->head, p, buf->len);
+ xdr_set_iov(xdr, buf->head, buf->len);
else if (buf->page_len != 0)
xdr_set_page_base(xdr, 0, buf->len);
+ if (p != NULL && p > xdr->p && xdr->end >= p) {
+ xdr->nwords -= p - xdr->p;
+ xdr->p = p;
+ }
}
EXPORT_SYMBOL_GPL(xdr_init_decode);
@@ -660,12 +647,14 @@ EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
+ unsigned int nwords = XDR_QUADLEN(nbytes);
__be32 *p = xdr->p;
- __be32 *q = p + XDR_QUADLEN(nbytes);
+ __be32 *q = p + nwords;
- if (unlikely(q > xdr->end || q < p))
+ if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
return NULL;
xdr->p = q;
+ xdr->nwords -= nwords;
return p;
}
@@ -732,6 +721,31 @@ __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
}
EXPORT_SYMBOL_GPL(xdr_inline_decode);
+static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
+{
+ struct xdr_buf *buf = xdr->buf;
+ struct kvec *iov;
+ unsigned int nwords = XDR_QUADLEN(len);
+ unsigned int cur = xdr_stream_pos(xdr);
+
+ if (xdr->nwords == 0)
+ return 0;
+ if (nwords > xdr->nwords) {
+ nwords = xdr->nwords;
+ len = nwords << 2;
+ }
+ /* Realign pages to current pointer position */
+ iov = buf->head;
+ if (iov->iov_len > cur)
+ xdr_shrink_bufhead(buf, iov->iov_len - cur);
+
+ /* Truncate page data and move it into the tail */
+ if (buf->page_len > len)
+ xdr_shrink_pagelen(buf, buf->page_len - len);
+ xdr->nwords = XDR_QUADLEN(buf->len - cur);
+ return len;
+}
+
/**
* xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
* @xdr: pointer to xdr_stream struct
@@ -740,39 +754,37 @@ EXPORT_SYMBOL_GPL(xdr_inline_decode);
* Moves data beyond the current pointer position from the XDR head[] buffer
* into the page list. Any data that lies beyond current position + "len"
* bytes is moved into the XDR tail[].
+ *
+ * Returns the number of XDR encoded bytes now contained in the pages
*/
-void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
+unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
{
struct xdr_buf *buf = xdr->buf;
struct kvec *iov;
- ssize_t shift;
+ unsigned int nwords;
unsigned int end;
- int padding;
-
- /* Realign pages to current pointer position */
- iov = buf->head;
- shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
- if (shift > 0)
- xdr_shrink_bufhead(buf, shift);
+ unsigned int padding;
- /* Truncate page data and move it into the tail */
- if (buf->page_len > len)
- xdr_shrink_pagelen(buf, buf->page_len - len);
- padding = (XDR_QUADLEN(len) << 2) - len;
+ len = xdr_align_pages(xdr, len);
+ if (len == 0)
+ return 0;
+ nwords = XDR_QUADLEN(len);
+ padding = (nwords << 2) - len;
xdr->iov = iov = buf->tail;
/* Compute remaining message length. */
- end = iov->iov_len;
- shift = buf->buflen - buf->len;
- if (shift < end)
- end -= shift;
- else if (shift > 0)
- end = 0;
+ end = ((xdr->nwords - nwords) << 2) + padding;
+ if (end > iov->iov_len)
+ end = iov->iov_len;
+
/*
* Position current pointer at beginning of tail, and
* set remaining message length.
*/
xdr->p = (__be32 *)((char *)iov->iov_base + padding);
xdr->end = (__be32 *)((char *)iov->iov_base + end);
+ xdr->page_ptr = NULL;
+ xdr->nwords = XDR_QUADLEN(end - padding);
+ return len;
}
EXPORT_SYMBOL_GPL(xdr_read_pages);
@@ -788,12 +800,13 @@ EXPORT_SYMBOL_GPL(xdr_read_pages);
*/
void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
{
- xdr_read_pages(xdr, len);
+ len = xdr_align_pages(xdr, len);
/*
* Position current pointer at beginning of tail, and
* set remaining message length.
*/
- xdr_set_page_base(xdr, 0, len);
+ if (len != 0)
+ xdr_set_page_base(xdr, 0, len);
}
EXPORT_SYMBOL_GPL(xdr_enter_page);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 3c83035cdaa..a5a402a7d21 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -531,7 +531,7 @@ void xprt_set_retrans_timeout_def(struct rpc_task *task)
}
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
-/*
+/**
* xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
* @task: task whose timeout is to be set
*
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index b446e100286..06cdbff79e4 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -200,6 +200,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
int rc = 0;
if (!xprt->shutdown) {
+ current->flags |= PF_FSTRANS;
xprt_clear_connected(xprt);
dprintk("RPC: %s: %sconnect\n", __func__,
@@ -212,10 +213,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
out:
xprt_wake_pending_tasks(xprt, rc);
-
out_clear:
dprintk("RPC: %s: exit\n", __func__);
xprt_clear_connecting(xprt);
+ current->flags &= ~PF_FSTRANS;
}
/*
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 890b03f8d87..400567243f8 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1014,9 +1014,6 @@ static void xs_udp_data_ready(struct sock *sk, int len)
UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
- /* Something worked... */
- dst_confirm(skb_dst(skb));
-
xprt_adjust_cwnd(task, copied);
xprt_complete_rqst(task, copied);
@@ -1895,6 +1892,8 @@ static void xs_local_setup_socket(struct work_struct *work)
if (xprt->shutdown)
goto out;
+ current->flags |= PF_FSTRANS;
+
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
status = __sock_create(xprt->xprt_net, AF_LOCAL,
SOCK_STREAM, 0, &sock, 1);
@@ -1928,8 +1927,48 @@ static void xs_local_setup_socket(struct work_struct *work)
out:
xprt_clear_connecting(xprt);
xprt_wake_pending_tasks(xprt, status);
+ current->flags &= ~PF_FSTRANS;
+}
+
+#ifdef CONFIG_SUNRPC_SWAP
+static void xs_set_memalloc(struct rpc_xprt *xprt)
+{
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
+ xprt);
+
+ if (xprt->swapper)
+ sk_set_memalloc(transport->inet);
}
+/**
+ * xs_swapper - Tag this transport as being used for swap.
+ * @xprt: transport to tag
+ * @enable: enable/disable
+ *
+ */
+int xs_swapper(struct rpc_xprt *xprt, int enable)
+{
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
+ xprt);
+ int err = 0;
+
+ if (enable) {
+ xprt->swapper++;
+ xs_set_memalloc(xprt);
+ } else if (xprt->swapper) {
+ xprt->swapper--;
+ sk_clear_memalloc(transport->inet);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xs_swapper);
+#else
+static void xs_set_memalloc(struct rpc_xprt *xprt)
+{
+}
+#endif
+
static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
@@ -1954,6 +1993,8 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
transport->sock = sock;
transport->inet = sk;
+ xs_set_memalloc(xprt);
+
write_unlock_bh(&sk->sk_callback_lock);
}
xs_udp_do_set_buffer_size(xprt);
@@ -1970,6 +2011,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
if (xprt->shutdown)
goto out;
+ current->flags |= PF_FSTRANS;
+
/* Start by resetting any existing state */
xs_reset_transport(transport);
sock = xs_create_sock(xprt, transport,
@@ -1988,6 +2031,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
out:
xprt_clear_connecting(xprt);
xprt_wake_pending_tasks(xprt, status);
+ current->flags &= ~PF_FSTRANS;
}
/*
@@ -2078,6 +2122,8 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
if (!xprt_bound(xprt))
goto out;
+ xs_set_memalloc(xprt);
+
/* Tell the socket layer to start connecting... */
xprt->stat.connect_count++;
xprt->stat.connect_start = jiffies;
@@ -2113,6 +2159,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
if (xprt->shutdown)
goto out;
+ current->flags |= PF_FSTRANS;
+
if (!sock) {
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
sock = xs_create_sock(xprt, transport,
@@ -2162,6 +2210,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
case -EINPROGRESS:
case -EALREADY:
xprt_clear_connecting(xprt);
+ current->flags &= ~PF_FSTRANS;
return;
case -EINVAL:
/* Happens, for instance, if the user specified a link
@@ -2174,6 +2223,7 @@ out_eagain:
out:
xprt_clear_connecting(xprt);
xprt_wake_pending_tasks(xprt, status);
+ current->flags &= ~PF_FSTRANS;
}
/**
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index 2c5954b8593..585460180ff 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -41,29 +41,4 @@ config TIPC_PORTS
Setting this to a smaller value saves some memory,
setting it to higher allows for more ports.
-config TIPC_LOG
- int "Size of log buffer"
- depends on TIPC_ADVANCED
- range 0 32768
- default "0"
- help
- Size (in bytes) of TIPC's internal log buffer, which records the
- occurrence of significant events. Can range from 0 to 32768 bytes;
- default is 0.
-
- There is no need to enable the log buffer unless the node will be
- managed remotely via TIPC.
-
-config TIPC_DEBUG
- bool "Enable debugging support"
- default n
- help
- Saying Y here enables TIPC debugging capabilities used by developers.
- Most users do not need to bother; if unsure, just say N.
-
- Enabling debugging support causes TIPC to display data about its
- internal state when certain abnormal conditions occur. It also
- makes it easy for developers to capture additional information of
- interest using the dbg() or msg_dbg() macros.
-
endif # TIPC
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 2625f5ebe3e..e4e6d8cd47e 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -162,7 +162,7 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
}
-/*
+/**
* tipc_bclink_retransmit_to - get most recent node to request retransmission
*
* Called with bc_lock locked
@@ -270,7 +270,7 @@ exit:
spin_unlock_bh(&bc_lock);
}
-/*
+/**
* tipc_bclink_update_link_state - update broadcast link state
*
* tipc_net_lock and node lock set
@@ -330,7 +330,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
}
}
-/*
+/**
* bclink_peek_nack - monitor retransmission requests sent by other nodes
*
* Delay any upcoming NACK by this node if another node has already
@@ -381,7 +381,7 @@ exit:
return res;
}
-/*
+/**
* bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
*
* Called with both sending node's lock and bc_lock taken.
@@ -406,7 +406,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
}
}
-/*
+/**
* tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
*
* tipc_net_lock is read_locked, no other locks set
@@ -701,48 +701,43 @@ void tipc_bcbearer_sort(void)
int tipc_bclink_stats(char *buf, const u32 buf_size)
{
- struct print_buf pb;
+ int ret;
+ struct tipc_stats *s;
if (!bcl)
return 0;
- tipc_printbuf_init(&pb, buf, buf_size);
-
spin_lock_bh(&bc_lock);
- tipc_printf(&pb, "Link <%s>\n"
- " Window:%u packets\n",
- bcl->name, bcl->queue_limit[0]);
- tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
- bcl->stats.recv_info,
- bcl->stats.recv_fragments,
- bcl->stats.recv_fragmented,
- bcl->stats.recv_bundles,
- bcl->stats.recv_bundled);
- tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
- bcl->stats.sent_info,
- bcl->stats.sent_fragments,
- bcl->stats.sent_fragmented,
- bcl->stats.sent_bundles,
- bcl->stats.sent_bundled);
- tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
- bcl->stats.recv_nacks,
- bcl->stats.deferred_recv,
- bcl->stats.duplicates);
- tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
- bcl->stats.sent_nacks,
- bcl->stats.sent_acks,
- bcl->stats.retransmitted);
- tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
- bcl->stats.bearer_congs,
- bcl->stats.link_congs,
- bcl->stats.max_queue_sz,
- bcl->stats.queue_sz_counts
- ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
- : 0);
+ s = &bcl->stats;
+
+ ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
+ " Window:%u packets\n",
+ bcl->name, bcl->queue_limit[0]);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ s->recv_info, s->recv_fragments,
+ s->recv_fragmented, s->recv_bundles,
+ s->recv_bundled);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ s->sent_info, s->sent_fragments,
+ s->sent_fragmented, s->sent_bundles,
+ s->sent_bundled);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " RX naks:%u defs:%u dups:%u\n",
+ s->recv_nacks, s->deferred_recv, s->duplicates);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX naks:%u acks:%u dups:%u\n",
+ s->sent_nacks, s->sent_acks, s->retransmitted);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
+ s->bearer_congs, s->link_congs, s->max_queue_sz,
+ s->queue_sz_counts ?
+ (s->accu_queue_sz / s->queue_sz_counts) : 0);
spin_unlock_bh(&bc_lock);
- return tipc_printbuf_validate(&pb);
+ return ret;
}
int tipc_bclink_reset_stats(void)
@@ -880,7 +875,7 @@ void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
if (!item->next) {
item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
if (!item->next) {
- warn("Incomplete multicast delivery, no memory\n");
+ pr_warn("Incomplete multicast delivery, no memory\n");
return;
}
item->next->next = NULL;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index a297e3a2e3e..09e71241265 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -123,28 +123,30 @@ int tipc_register_media(struct tipc_media *m_ptr)
exit:
write_unlock_bh(&tipc_net_lock);
if (res)
- warn("Media <%s> registration error\n", m_ptr->name);
+ pr_warn("Media <%s> registration error\n", m_ptr->name);
return res;
}
/**
* tipc_media_addr_printf - record media address in print buffer
*/
-void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
+void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
{
char addr_str[MAX_ADDR_STR];
struct tipc_media *m_ptr;
+ int ret;
m_ptr = media_find_id(a->media_id);
if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str)))
- tipc_printf(pb, "%s(%s)", m_ptr->name, addr_str);
+ ret = tipc_snprintf(buf, len, "%s(%s)", m_ptr->name, addr_str);
else {
u32 i;
- tipc_printf(pb, "UNKNOWN(%u)", a->media_id);
+ ret = tipc_snprintf(buf, len, "UNKNOWN(%u)", a->media_id);
for (i = 0; i < sizeof(a->value); i++)
- tipc_printf(pb, "-%02x", a->value[i]);
+ ret += tipc_snprintf(buf - ret, len + ret,
+ "-%02x", a->value[i]);
}
}
@@ -172,8 +174,8 @@ struct sk_buff *tipc_media_get_names(void)
/**
* bearer_name_validate - validate & (optionally) deconstruct bearer name
- * @name - ptr to bearer name string
- * @name_parts - ptr to area for bearer name components (or NULL if not needed)
+ * @name: ptr to bearer name string
+ * @name_parts: ptr to area for bearer name components (or NULL if not needed)
*
* Returns 1 if bearer name is valid, otherwise 0.
*/
@@ -418,12 +420,12 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
int res = -EINVAL;
if (!tipc_own_addr) {
- warn("Bearer <%s> rejected, not supported in standalone mode\n",
- name);
+ pr_warn("Bearer <%s> rejected, not supported in standalone mode\n",
+ name);
return -ENOPROTOOPT;
}
if (!bearer_name_validate(name, &b_names)) {
- warn("Bearer <%s> rejected, illegal name\n", name);
+ pr_warn("Bearer <%s> rejected, illegal name\n", name);
return -EINVAL;
}
if (tipc_addr_domain_valid(disc_domain) &&
@@ -435,12 +437,13 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
res = 0; /* accept specified node in own cluster */
}
if (res) {
- warn("Bearer <%s> rejected, illegal discovery domain\n", name);
+ pr_warn("Bearer <%s> rejected, illegal discovery domain\n",
+ name);
return -EINVAL;
}
if ((priority > TIPC_MAX_LINK_PRI) &&
(priority != TIPC_MEDIA_LINK_PRI)) {
- warn("Bearer <%s> rejected, illegal priority\n", name);
+ pr_warn("Bearer <%s> rejected, illegal priority\n", name);
return -EINVAL;
}
@@ -448,8 +451,8 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
m_ptr = tipc_media_find(b_names.media_name);
if (!m_ptr) {
- warn("Bearer <%s> rejected, media <%s> not registered\n", name,
- b_names.media_name);
+ pr_warn("Bearer <%s> rejected, media <%s> not registered\n",
+ name, b_names.media_name);
goto exit;
}
@@ -465,24 +468,25 @@ restart:
continue;
}
if (!strcmp(name, tipc_bearers[i].name)) {
- warn("Bearer <%s> rejected, already enabled\n", name);
+ pr_warn("Bearer <%s> rejected, already enabled\n",
+ name);
goto exit;
}
if ((tipc_bearers[i].priority == priority) &&
(++with_this_prio > 2)) {
if (priority-- == 0) {
- warn("Bearer <%s> rejected, duplicate priority\n",
- name);
+ pr_warn("Bearer <%s> rejected, duplicate priority\n",
+ name);
goto exit;
}
- warn("Bearer <%s> priority adjustment required %u->%u\n",
- name, priority + 1, priority);
+ pr_warn("Bearer <%s> priority adjustment required %u->%u\n",
+ name, priority + 1, priority);
goto restart;
}
}
if (bearer_id >= MAX_BEARERS) {
- warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
- name, MAX_BEARERS);
+ pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
+ name, MAX_BEARERS);
goto exit;
}
@@ -490,7 +494,8 @@ restart:
strcpy(b_ptr->name, name);
res = m_ptr->enable_bearer(b_ptr);
if (res) {
- warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res);
+ pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
+ name, -res);
goto exit;
}
@@ -508,20 +513,20 @@ restart:
res = tipc_disc_create(b_ptr, &m_ptr->bcast_addr, disc_domain);
if (res) {
bearer_disable(b_ptr);
- warn("Bearer <%s> rejected, discovery object creation failed\n",
- name);
+ pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
+ name);
goto exit;
}
- info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
- name, tipc_addr_string_fill(addr_string, disc_domain), priority);
+ pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
+ name,
+ tipc_addr_string_fill(addr_string, disc_domain), priority);
exit:
write_unlock_bh(&tipc_net_lock);
return res;
}
/**
- * tipc_block_bearer(): Block the bearer with the given name,
- * and reset all its links
+ * tipc_block_bearer - Block the bearer with the given name, and reset all its links
*/
int tipc_block_bearer(const char *name)
{
@@ -532,12 +537,12 @@ int tipc_block_bearer(const char *name)
read_lock_bh(&tipc_net_lock);
b_ptr = tipc_bearer_find(name);
if (!b_ptr) {
- warn("Attempt to block unknown bearer <%s>\n", name);
+ pr_warn("Attempt to block unknown bearer <%s>\n", name);
read_unlock_bh(&tipc_net_lock);
return -EINVAL;
}
- info("Blocking bearer <%s>\n", name);
+ pr_info("Blocking bearer <%s>\n", name);
spin_lock_bh(&b_ptr->lock);
b_ptr->blocked = 1;
list_splice_init(&b_ptr->cong_links, &b_ptr->links);
@@ -563,7 +568,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
struct tipc_link *l_ptr;
struct tipc_link *temp_l_ptr;
- info("Disabling bearer <%s>\n", b_ptr->name);
+ pr_info("Disabling bearer <%s>\n", b_ptr->name);
spin_lock_bh(&b_ptr->lock);
b_ptr->blocked = 1;
b_ptr->media->disable_bearer(b_ptr);
@@ -585,7 +590,7 @@ int tipc_disable_bearer(const char *name)
write_lock_bh(&tipc_net_lock);
b_ptr = tipc_bearer_find(name);
if (b_ptr == NULL) {
- warn("Attempt to disable unknown bearer <%s>\n", name);
+ pr_warn("Attempt to disable unknown bearer <%s>\n", name);
res = -EINVAL;
} else {
bearer_disable(b_ptr);
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index e3b2be37fb3..dd4c2abf08e 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -57,7 +57,7 @@
*/
#define TIPC_MEDIA_TYPE_ETH 1
-/*
+/**
* struct tipc_media_addr - destination address used by TIPC bearers
* @value: address info (format defined by media)
* @media_id: TIPC media type identifier
@@ -179,7 +179,7 @@ void tipc_eth_media_stop(void);
int tipc_media_set_priority(const char *name, u32 new_value);
int tipc_media_set_window(const char *name, u32 new_value);
-void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
+void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
struct sk_buff *tipc_media_get_names(void);
struct sk_buff *tipc_bearer_get_names(void);
diff --git a/net/tipc/config.c b/net/tipc/config.c
index c5712a34381..a056a3852f7 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -39,6 +39,8 @@
#include "name_table.h"
#include "config.h"
+#define REPLY_TRUNCATED "<truncated>\n"
+
static u32 config_port_ref;
static DEFINE_SPINLOCK(config_lock);
@@ -104,13 +106,12 @@ struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string)
return buf;
}
-#define MAX_STATS_INFO 2000
-
static struct sk_buff *tipc_show_stats(void)
{
struct sk_buff *buf;
struct tlv_desc *rep_tlv;
- struct print_buf pb;
+ char *pb;
+ int pb_len;
int str_len;
u32 value;
@@ -121,17 +122,16 @@ static struct sk_buff *tipc_show_stats(void)
if (value != 0)
return tipc_cfg_reply_error_string("unsupported argument");
- buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_STATS_INFO));
+ buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
if (buf == NULL)
return NULL;
rep_tlv = (struct tlv_desc *)buf->data;
- tipc_printbuf_init(&pb, (char *)TLV_DATA(rep_tlv), MAX_STATS_INFO);
-
- tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n");
+ pb = TLV_DATA(rep_tlv);
+ pb_len = ULTRA_STRING_MAX_LEN;
- /* Use additional tipc_printf()'s to return more info ... */
- str_len = tipc_printbuf_validate(&pb);
+ str_len = tipc_snprintf(pb, pb_len, "TIPC version " TIPC_MOD_VER "\n");
+ str_len += 1; /* for "\0" */
skb_put(buf, TLV_SPACE(str_len));
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -334,12 +334,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
case TIPC_CMD_SHOW_PORTS:
rep_tlv_buf = tipc_port_get_ports();
break;
- case TIPC_CMD_SET_LOG_SIZE:
- rep_tlv_buf = tipc_log_resize_cmd(req_tlv_area, req_tlv_space);
- break;
- case TIPC_CMD_DUMP_LOG:
- rep_tlv_buf = tipc_log_dump();
- break;
case TIPC_CMD_SHOW_STATS:
rep_tlv_buf = tipc_show_stats();
break;
@@ -399,6 +393,8 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
case TIPC_CMD_GET_MAX_CLUSTERS:
case TIPC_CMD_SET_MAX_NODES:
case TIPC_CMD_GET_MAX_NODES:
+ case TIPC_CMD_SET_LOG_SIZE:
+ case TIPC_CMD_DUMP_LOG:
rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (obsolete command)");
break;
@@ -408,6 +404,15 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
break;
}
+ WARN_ON(rep_tlv_buf->len > TLV_SPACE(ULTRA_STRING_MAX_LEN));
+
+ /* Append an error message if we cannot return all requested data */
+ if (rep_tlv_buf->len == TLV_SPACE(ULTRA_STRING_MAX_LEN)) {
+ if (*(rep_tlv_buf->data + ULTRA_STRING_MAX_LEN) != '\0')
+ sprintf(rep_tlv_buf->data + rep_tlv_buf->len -
+ sizeof(REPLY_TRUNCATED) - 1, REPLY_TRUNCATED);
+ }
+
/* Return reply buffer */
exit:
spin_unlock_bh(&config_lock);
@@ -432,7 +437,7 @@ static void cfg_named_msg_event(void *userdata,
if ((size < sizeof(*req_hdr)) ||
(size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
(ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
- warn("Invalid configuration message discarded\n");
+ pr_warn("Invalid configuration message discarded\n");
return;
}
@@ -478,7 +483,7 @@ int tipc_cfg_init(void)
return 0;
failed:
- err("Unable to create configuration service\n");
+ pr_err("Unable to create configuration service\n");
return res;
}
@@ -494,7 +499,7 @@ void tipc_cfg_reinit(void)
seq.lower = seq.upper = tipc_own_addr;
res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq);
if (res)
- err("Unable to reinitialize configuration service\n");
+ pr_err("Unable to reinitialize configuration service\n");
}
void tipc_cfg_stop(void)
diff --git a/net/tipc/core.c b/net/tipc/core.c
index f7b95239ebd..6586eac6a50 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -34,22 +34,18 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/module.h>
-
#include "core.h"
#include "ref.h"
#include "name_table.h"
#include "subscr.h"
#include "config.h"
+#include <linux/module.h>
#ifndef CONFIG_TIPC_PORTS
#define CONFIG_TIPC_PORTS 8191
#endif
-#ifndef CONFIG_TIPC_LOG
-#define CONFIG_TIPC_LOG 0
-#endif
/* global variables used by multiple sub-systems within TIPC */
int tipc_random;
@@ -125,7 +121,6 @@ static void tipc_core_stop(void)
tipc_nametbl_stop();
tipc_ref_table_stop();
tipc_socket_stop();
- tipc_log_resize(0);
}
/**
@@ -161,10 +156,7 @@ static int __init tipc_init(void)
{
int res;
- if (tipc_log_resize(CONFIG_TIPC_LOG) != 0)
- warn("Unable to create log buffer\n");
-
- info("Activated (version " TIPC_MOD_VER ")\n");
+ pr_info("Activated (version " TIPC_MOD_VER ")\n");
tipc_own_addr = 0;
tipc_remote_management = 1;
@@ -175,9 +167,9 @@ static int __init tipc_init(void)
res = tipc_core_start();
if (res)
- err("Unable to start in single node mode\n");
+ pr_err("Unable to start in single node mode\n");
else
- info("Started in single node mode\n");
+ pr_info("Started in single node mode\n");
return res;
}
@@ -185,7 +177,7 @@ static void __exit tipc_exit(void)
{
tipc_core_stop_net();
tipc_core_stop();
- info("Deactivated\n");
+ pr_info("Deactivated\n");
}
module_init(tipc_init);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 2a9bb99537b..fd42e106c18 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -37,6 +37,8 @@
#ifndef _TIPC_CORE_H
#define _TIPC_CORE_H
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/tipc.h>
#include <linux/tipc_config.h>
#include <linux/types.h>
@@ -58,68 +60,11 @@
#define TIPC_MOD_VER "2.0.0"
-struct tipc_msg; /* msg.h */
-struct print_buf; /* log.h */
-
-/*
- * TIPC system monitoring code
- */
-
-/*
- * TIPC's print buffer subsystem supports the following print buffers:
- *
- * TIPC_NULL : null buffer (i.e. print nowhere)
- * TIPC_CONS : system console
- * TIPC_LOG : TIPC log buffer
- * &buf : user-defined buffer (struct print_buf *)
- *
- * Note: TIPC_LOG is configured to echo its output to the system console;
- * user-defined buffers can be configured to do the same thing.
- */
-extern struct print_buf *const TIPC_NULL;
-extern struct print_buf *const TIPC_CONS;
-extern struct print_buf *const TIPC_LOG;
-
-void tipc_printf(struct print_buf *, const char *fmt, ...);
-
-/*
- * TIPC_OUTPUT is the destination print buffer for system messages.
- */
-#ifndef TIPC_OUTPUT
-#define TIPC_OUTPUT TIPC_LOG
-#endif
+#define ULTRA_STRING_MAX_LEN 32768
-#define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
- KERN_ERR "TIPC: " fmt, ## arg)
-#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
- KERN_WARNING "TIPC: " fmt, ## arg)
-#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
- KERN_NOTICE "TIPC: " fmt, ## arg)
-
-#ifdef CONFIG_TIPC_DEBUG
-
-/*
- * DBG_OUTPUT is the destination print buffer for debug messages.
- */
-#ifndef DBG_OUTPUT
-#define DBG_OUTPUT TIPC_LOG
-#endif
-
-#define dbg(fmt, arg...) tipc_printf(DBG_OUTPUT, KERN_DEBUG fmt, ## arg);
-
-#define msg_dbg(msg, txt) tipc_msg_dbg(DBG_OUTPUT, msg, txt);
-
-void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *);
-
-#else
-
-#define dbg(fmt, arg...) do {} while (0)
-#define msg_dbg(msg, txt) do {} while (0)
-
-#define tipc_msg_dbg(buf, msg, txt) do {} while (0)
-
-#endif
+struct tipc_msg; /* msg.h */
+int tipc_snprintf(char *buf, int len, const char *fmt, ...);
/*
* TIPC-specific error codes
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index ae054cfe179..50eaa403eb6 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -100,14 +100,12 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
{
char node_addr_str[16];
char media_addr_str[64];
- struct print_buf pb;
tipc_addr_string_fill(node_addr_str, node_addr);
- tipc_printbuf_init(&pb, media_addr_str, sizeof(media_addr_str));
- tipc_media_addr_printf(&pb, media_addr);
- tipc_printbuf_validate(&pb);
- warn("Duplicate %s using %s seen on <%s>\n",
- node_addr_str, media_addr_str, b_ptr->name);
+ tipc_media_addr_printf(media_addr_str, sizeof(media_addr_str),
+ media_addr);
+ pr_warn("Duplicate %s using %s seen on <%s>\n", node_addr_str,
+ media_addr_str, b_ptr->name);
}
/**
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 9c6f22ff1c6..7a52d3922f3 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -57,14 +57,14 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument)
struct queue_item *item;
if (!handler_enabled) {
- err("Signal request ignored by handler\n");
+ pr_err("Signal request ignored by handler\n");
return -ENOPROTOOPT;
}
spin_lock_bh(&qitem_lock);
item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
if (!item) {
- err("Signal queue out of memory\n");
+ pr_err("Signal queue out of memory\n");
spin_unlock_bh(&qitem_lock);
return -ENOMEM;
}
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 7a614f43549..1c1e6151875 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -41,6 +41,12 @@
#include "discover.h"
#include "config.h"
+/*
+ * Error message prefixes
+ */
+static const char *link_co_err = "Link changeover error, ";
+static const char *link_rst_msg = "Resetting link ";
+static const char *link_unk_evt = "Unknown link event ";
/*
* Out-of-range value for link session numbers
@@ -153,8 +159,8 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
/**
* link_name_validate - validate & (optionally) deconstruct tipc_link name
- * @name - ptr to link name string
- * @name_parts - ptr to area for link name components (or NULL if not needed)
+ * @name: ptr to link name string
+ * @name_parts: ptr to area for link name components (or NULL if not needed)
*
* Returns 1 if link name is valid, otherwise 0.
*/
@@ -300,20 +306,20 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
if (n_ptr->link_cnt >= 2) {
tipc_addr_string_fill(addr_string, n_ptr->addr);
- err("Attempt to establish third link to %s\n", addr_string);
+ pr_err("Attempt to establish third link to %s\n", addr_string);
return NULL;
}
if (n_ptr->links[b_ptr->identity]) {
tipc_addr_string_fill(addr_string, n_ptr->addr);
- err("Attempt to establish second link on <%s> to %s\n",
- b_ptr->name, addr_string);
+ pr_err("Attempt to establish second link on <%s> to %s\n",
+ b_ptr->name, addr_string);
return NULL;
}
l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
if (!l_ptr) {
- warn("Link creation failed, no memory\n");
+ pr_warn("Link creation failed, no memory\n");
return NULL;
}
@@ -371,7 +377,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
void tipc_link_delete(struct tipc_link *l_ptr)
{
if (!l_ptr) {
- err("Attempt to delete non-existent link\n");
+ pr_err("Attempt to delete non-existent link\n");
return;
}
@@ -632,8 +638,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
link_set_timer(l_ptr, cont_intv / 4);
break;
case RESET_MSG:
- info("Resetting link <%s>, requested by peer\n",
- l_ptr->name);
+ pr_info("%s<%s>, requested by peer\n", link_rst_msg,
+ l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
@@ -642,7 +648,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
link_set_timer(l_ptr, cont_intv);
break;
default:
- err("Unknown link event %u in WW state\n", event);
+ pr_err("%s%u in WW state\n", link_unk_evt, event);
}
break;
case WORKING_UNKNOWN:
@@ -654,8 +660,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
- info("Resetting link <%s>, requested by peer "
- "while probing\n", l_ptr->name);
+ pr_info("%s<%s>, requested by peer while probing\n",
+ link_rst_msg, l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
@@ -680,8 +686,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv / 4);
} else { /* Link has failed */
- warn("Resetting link <%s>, peer not responding\n",
- l_ptr->name);
+ pr_warn("%s<%s>, peer not responding\n",
+ link_rst_msg, l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_UNKNOWN;
l_ptr->fsm_msg_cnt = 0;
@@ -692,7 +698,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
}
break;
default:
- err("Unknown link event %u in WU state\n", event);
+ pr_err("%s%u in WU state\n", link_unk_evt, event);
}
break;
case RESET_UNKNOWN:
@@ -726,7 +732,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
link_set_timer(l_ptr, cont_intv);
break;
default:
- err("Unknown link event %u in RU state\n", event);
+ pr_err("%s%u in RU state\n", link_unk_evt, event);
}
break;
case RESET_RESET:
@@ -751,11 +757,11 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
link_set_timer(l_ptr, cont_intv);
break;
default:
- err("Unknown link event %u in RR state\n", event);
+ pr_err("%s%u in RR state\n", link_unk_evt, event);
}
break;
default:
- err("Unknown link state %u/%u\n", l_ptr->state, event);
+ pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
}
}
@@ -856,7 +862,8 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
}
kfree_skb(buf);
if (imp > CONN_MANAGER) {
- warn("Resetting link <%s>, send queue full", l_ptr->name);
+ pr_warn("%s<%s>, send queue full", link_rst_msg,
+ l_ptr->name);
tipc_link_reset(l_ptr);
}
return dsz;
@@ -944,7 +951,7 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
return res;
}
-/*
+/**
* tipc_link_send_names - send name table entries to new neighbor
*
* Send routine for bulk delivery of name table messages when contact
@@ -1409,8 +1416,8 @@ static void link_reset_all(unsigned long addr)
tipc_node_lock(n_ptr);
- warn("Resetting all links to %s\n",
- tipc_addr_string_fill(addr_string, n_ptr->addr));
+ pr_warn("Resetting all links to %s\n",
+ tipc_addr_string_fill(addr_string, n_ptr->addr));
for (i = 0; i < MAX_BEARERS; i++) {
if (n_ptr->links[i]) {
@@ -1428,7 +1435,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
{
struct tipc_msg *msg = buf_msg(buf);
- warn("Retransmission failure on link <%s>\n", l_ptr->name);
+ pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
if (l_ptr->addr) {
/* Handle failure on standard link */
@@ -1440,21 +1447,23 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
struct tipc_node *n_ptr;
char addr_string[16];
- info("Msg seq number: %u, ", msg_seqno(msg));
- info("Outstanding acks: %lu\n",
- (unsigned long) TIPC_SKB_CB(buf)->handle);
+ pr_info("Msg seq number: %u, ", msg_seqno(msg));
+ pr_cont("Outstanding acks: %lu\n",
+ (unsigned long) TIPC_SKB_CB(buf)->handle);
n_ptr = tipc_bclink_retransmit_to();
tipc_node_lock(n_ptr);
tipc_addr_string_fill(addr_string, n_ptr->addr);
- info("Broadcast link info for %s\n", addr_string);
- info("Supportable: %d, ", n_ptr->bclink.supportable);
- info("Supported: %d, ", n_ptr->bclink.supported);
- info("Acked: %u\n", n_ptr->bclink.acked);
- info("Last in: %u, ", n_ptr->bclink.last_in);
- info("Oos state: %u, ", n_ptr->bclink.oos_state);
- info("Last sent: %u\n", n_ptr->bclink.last_sent);
+ pr_info("Broadcast link info for %s\n", addr_string);
+ pr_info("Supportable: %d, Supported: %d, Acked: %u\n",
+ n_ptr->bclink.supportable,
+ n_ptr->bclink.supported,
+ n_ptr->bclink.acked);
+ pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
+ n_ptr->bclink.last_in,
+ n_ptr->bclink.oos_state,
+ n_ptr->bclink.last_sent);
tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
@@ -1479,8 +1488,8 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
l_ptr->retransm_queue_head = msg_seqno(msg);
l_ptr->retransm_queue_size = retransmits;
} else {
- err("Unexpected retransmit on link %s (qsize=%d)\n",
- l_ptr->name, l_ptr->retransm_queue_size);
+ pr_err("Unexpected retransmit on link %s (qsize=%d)\n",
+ l_ptr->name, l_ptr->retransm_queue_size);
}
return;
} else {
@@ -1787,7 +1796,7 @@ cont:
read_unlock_bh(&tipc_net_lock);
}
-/*
+/**
* tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
*
* Returns increase in queue length (i.e. 0 or 1)
@@ -2074,8 +2083,9 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
if (msg_linkprio(msg) &&
(msg_linkprio(msg) != l_ptr->priority)) {
- warn("Resetting link <%s>, priority change %u->%u\n",
- l_ptr->name, l_ptr->priority, msg_linkprio(msg));
+ pr_warn("%s<%s>, priority change %u->%u\n",
+ link_rst_msg, l_ptr->name, l_ptr->priority,
+ msg_linkprio(msg));
l_ptr->priority = msg_linkprio(msg);
tipc_link_reset(l_ptr); /* Enforce change to take effect */
break;
@@ -2139,15 +2149,13 @@ static void tipc_link_tunnel(struct tipc_link *l_ptr,
tunnel = l_ptr->owner->active_links[selector & 1];
if (!tipc_link_is_up(tunnel)) {
- warn("Link changeover error, "
- "tunnel link no longer available\n");
+ pr_warn("%stunnel link no longer available\n", link_co_err);
return;
}
msg_set_size(tunnel_hdr, length + INT_H_SIZE);
buf = tipc_buf_acquire(length + INT_H_SIZE);
if (!buf) {
- warn("Link changeover error, "
- "unable to send tunnel msg\n");
+ pr_warn("%sunable to send tunnel msg\n", link_co_err);
return;
}
skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
@@ -2173,8 +2181,7 @@ void tipc_link_changeover(struct tipc_link *l_ptr)
return;
if (!l_ptr->owner->permit_changeover) {
- warn("Link changeover error, "
- "peer did not permit changeover\n");
+ pr_warn("%speer did not permit changeover\n", link_co_err);
return;
}
@@ -2192,8 +2199,8 @@ void tipc_link_changeover(struct tipc_link *l_ptr)
msg_set_size(&tunnel_hdr, INT_H_SIZE);
tipc_link_send_buf(tunnel, buf);
} else {
- warn("Link changeover error, "
- "unable to send changeover msg\n");
+ pr_warn("%sunable to send changeover msg\n",
+ link_co_err);
}
return;
}
@@ -2246,8 +2253,8 @@ void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
outbuf = tipc_buf_acquire(length + INT_H_SIZE);
if (outbuf == NULL) {
- warn("Link changeover error, "
- "unable to send duplicate msg\n");
+ pr_warn("%sunable to send duplicate msg\n",
+ link_co_err);
return;
}
skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
@@ -2298,8 +2305,8 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
if (!dest_link)
goto exit;
if (dest_link == *l_ptr) {
- err("Unexpected changeover message on link <%s>\n",
- (*l_ptr)->name);
+ pr_err("Unexpected changeover message on link <%s>\n",
+ (*l_ptr)->name);
goto exit;
}
*l_ptr = dest_link;
@@ -2310,7 +2317,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
goto exit;
*buf = buf_extract(tunnel_buf, INT_H_SIZE);
if (*buf == NULL) {
- warn("Link changeover error, duplicate msg dropped\n");
+ pr_warn("%sduplicate msg dropped\n", link_co_err);
goto exit;
}
kfree_skb(tunnel_buf);
@@ -2319,8 +2326,8 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
/* First original message ?: */
if (tipc_link_is_up(dest_link)) {
- info("Resetting link <%s>, changeover initiated by peer\n",
- dest_link->name);
+ pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg,
+ dest_link->name);
tipc_link_reset(dest_link);
dest_link->exp_msg_count = msg_count;
if (!msg_count)
@@ -2333,8 +2340,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
/* Receive original message */
if (dest_link->exp_msg_count == 0) {
- warn("Link switchover error, "
- "got too many tunnelled messages\n");
+ pr_warn("%sgot too many tunnelled messages\n", link_co_err);
goto exit;
}
dest_link->exp_msg_count--;
@@ -2346,7 +2352,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
kfree_skb(tunnel_buf);
return 1;
} else {
- warn("Link changeover error, original msg dropped\n");
+ pr_warn("%soriginal msg dropped\n", link_co_err);
}
}
exit:
@@ -2367,7 +2373,7 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
while (msgcount--) {
obuf = buf_extract(buf, pos);
if (obuf == NULL) {
- warn("Link unable to unbundle message(s)\n");
+ pr_warn("Link unable to unbundle message(s)\n");
break;
}
pos += align(msg_size(buf_msg(obuf)));
@@ -2538,7 +2544,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
set_fragm_size(pbuf, fragm_sz);
set_expected_frags(pbuf, exp_fragm_cnt - 1);
} else {
- dbg("Link unable to reassemble fragmented message\n");
+ pr_debug("Link unable to reassemble fragmented message\n");
kfree_skb(fbuf);
return -1;
}
@@ -2635,8 +2641,8 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
/**
* link_find_link - locate link by name
- * @name - ptr to link name string
- * @node - ptr to area to be filled with ptr to associated node
+ * @name: ptr to link name string
+ * @node: ptr to area to be filled with ptr to associated node
*
* Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
* this also prevents link deletion.
@@ -2671,8 +2677,8 @@ static struct tipc_link *link_find_link(const char *name,
/**
* link_value_is_valid -- validate proposed link tolerance/priority/window
*
- * @cmd - value type (TIPC_CMD_SET_LINK_*)
- * @new_value - the new value
+ * @cmd: value type (TIPC_CMD_SET_LINK_*)
+ * @new_value: the new value
*
* Returns 1 if value is within range, 0 if not.
*/
@@ -2693,9 +2699,9 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
/**
* link_cmd_set_value - change priority/tolerance/window for link/bearer/media
- * @name - ptr to link, bearer, or media name
- * @new_value - new value of link, bearer, or media setting
- * @cmd - which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
+ * @name: ptr to link, bearer, or media name
+ * @new_value: new value of link, bearer, or media setting
+ * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
*
* Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
*
@@ -2860,112 +2866,114 @@ static u32 percent(u32 count, u32 total)
*/
static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
{
- struct print_buf pb;
- struct tipc_link *l_ptr;
+ struct tipc_link *l;
+ struct tipc_stats *s;
struct tipc_node *node;
char *status;
u32 profile_total = 0;
+ int ret;
if (!strcmp(name, tipc_bclink_name))
return tipc_bclink_stats(buf, buf_size);
- tipc_printbuf_init(&pb, buf, buf_size);
-
read_lock_bh(&tipc_net_lock);
- l_ptr = link_find_link(name, &node);
- if (!l_ptr) {
+ l = link_find_link(name, &node);
+ if (!l) {
read_unlock_bh(&tipc_net_lock);
return 0;
}
tipc_node_lock(node);
+ s = &l->stats;
- if (tipc_link_is_active(l_ptr))
+ if (tipc_link_is_active(l))
status = "ACTIVE";
- else if (tipc_link_is_up(l_ptr))
+ else if (tipc_link_is_up(l))
status = "STANDBY";
else
status = "DEFUNCT";
- tipc_printf(&pb, "Link <%s>\n"
- " %s MTU:%u Priority:%u Tolerance:%u ms"
- " Window:%u packets\n",
- l_ptr->name, status, l_ptr->max_pkt,
- l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
- tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
- l_ptr->next_in_no - l_ptr->stats.recv_info,
- l_ptr->stats.recv_fragments,
- l_ptr->stats.recv_fragmented,
- l_ptr->stats.recv_bundles,
- l_ptr->stats.recv_bundled);
- tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
- l_ptr->next_out_no - l_ptr->stats.sent_info,
- l_ptr->stats.sent_fragments,
- l_ptr->stats.sent_fragmented,
- l_ptr->stats.sent_bundles,
- l_ptr->stats.sent_bundled);
- profile_total = l_ptr->stats.msg_length_counts;
+
+ ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
+ " %s MTU:%u Priority:%u Tolerance:%u ms"
+ " Window:%u packets\n",
+ l->name, status, l->max_pkt, l->priority,
+ l->tolerance, l->queue_limit[0]);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ l->next_in_no - s->recv_info, s->recv_fragments,
+ s->recv_fragmented, s->recv_bundles,
+ s->recv_bundled);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ l->next_out_no - s->sent_info, s->sent_fragments,
+ s->sent_fragmented, s->sent_bundles,
+ s->sent_bundled);
+
+ profile_total = s->msg_length_counts;
if (!profile_total)
profile_total = 1;
- tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n"
- " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
- "-16384:%u%% -32768:%u%% -66000:%u%%\n",
- l_ptr->stats.msg_length_counts,
- l_ptr->stats.msg_lengths_total / profile_total,
- percent(l_ptr->stats.msg_length_profile[0], profile_total),
- percent(l_ptr->stats.msg_length_profile[1], profile_total),
- percent(l_ptr->stats.msg_length_profile[2], profile_total),
- percent(l_ptr->stats.msg_length_profile[3], profile_total),
- percent(l_ptr->stats.msg_length_profile[4], profile_total),
- percent(l_ptr->stats.msg_length_profile[5], profile_total),
- percent(l_ptr->stats.msg_length_profile[6], profile_total));
- tipc_printf(&pb, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
- l_ptr->stats.recv_states,
- l_ptr->stats.recv_probes,
- l_ptr->stats.recv_nacks,
- l_ptr->stats.deferred_recv,
- l_ptr->stats.duplicates);
- tipc_printf(&pb, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
- l_ptr->stats.sent_states,
- l_ptr->stats.sent_probes,
- l_ptr->stats.sent_nacks,
- l_ptr->stats.sent_acks,
- l_ptr->stats.retransmitted);
- tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
- l_ptr->stats.bearer_congs,
- l_ptr->stats.link_congs,
- l_ptr->stats.max_queue_sz,
- l_ptr->stats.queue_sz_counts
- ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
- : 0);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX profile sample:%u packets average:%u octets\n"
+ " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
+ "-16384:%u%% -32768:%u%% -66000:%u%%\n",
+ s->msg_length_counts,
+ s->msg_lengths_total / profile_total,
+ percent(s->msg_length_profile[0], profile_total),
+ percent(s->msg_length_profile[1], profile_total),
+ percent(s->msg_length_profile[2], profile_total),
+ percent(s->msg_length_profile[3], profile_total),
+ percent(s->msg_length_profile[4], profile_total),
+ percent(s->msg_length_profile[5], profile_total),
+ percent(s->msg_length_profile[6], profile_total));
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " RX states:%u probes:%u naks:%u defs:%u"
+ " dups:%u\n", s->recv_states, s->recv_probes,
+ s->recv_nacks, s->deferred_recv, s->duplicates);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX states:%u probes:%u naks:%u acks:%u"
+ " dups:%u\n", s->sent_states, s->sent_probes,
+ s->sent_nacks, s->sent_acks, s->retransmitted);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " Congestion bearer:%u link:%u Send queue"
+ " max:%u avg:%u\n", s->bearer_congs, s->link_congs,
+ s->max_queue_sz, s->queue_sz_counts ?
+ (s->accu_queue_sz / s->queue_sz_counts) : 0);
tipc_node_unlock(node);
read_unlock_bh(&tipc_net_lock);
- return tipc_printbuf_validate(&pb);
+ return ret;
}
-#define MAX_LINK_STATS_INFO 2000
-
struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
{
struct sk_buff *buf;
struct tlv_desc *rep_tlv;
int str_len;
+ int pb_len;
+ char *pb;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
+ buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
if (!buf)
return NULL;
rep_tlv = (struct tlv_desc *)buf->data;
-
+ pb = TLV_DATA(rep_tlv);
+ pb_len = ULTRA_STRING_MAX_LEN;
str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
- (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
+ pb, pb_len);
if (!str_len) {
kfree_skb(buf);
return tipc_cfg_reply_error_string("link not found");
}
-
+ str_len += 1; /* for "\0" */
skb_put(buf, TLV_SPACE(str_len));
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -3003,62 +3011,16 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
static void link_print(struct tipc_link *l_ptr, const char *str)
{
- char print_area[256];
- struct print_buf pb;
- struct print_buf *buf = &pb;
-
- tipc_printbuf_init(buf, print_area, sizeof(print_area));
-
- tipc_printf(buf, str);
- tipc_printf(buf, "Link %x<%s>:",
- l_ptr->addr, l_ptr->b_ptr->name);
-
-#ifdef CONFIG_TIPC_DEBUG
- if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
- goto print_state;
-
- tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
- tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
- tipc_printf(buf, "SQUE");
- if (l_ptr->first_out) {
- tipc_printf(buf, "[%u..", buf_seqno(l_ptr->first_out));
- if (l_ptr->next_out)
- tipc_printf(buf, "%u..", buf_seqno(l_ptr->next_out));
- tipc_printf(buf, "%u]", buf_seqno(l_ptr->last_out));
- if ((mod(buf_seqno(l_ptr->last_out) -
- buf_seqno(l_ptr->first_out))
- != (l_ptr->out_queue_size - 1)) ||
- (l_ptr->last_out->next != NULL)) {
- tipc_printf(buf, "\nSend queue inconsistency\n");
- tipc_printf(buf, "first_out= %p ", l_ptr->first_out);
- tipc_printf(buf, "next_out= %p ", l_ptr->next_out);
- tipc_printf(buf, "last_out= %p ", l_ptr->last_out);
- }
- } else
- tipc_printf(buf, "[]");
- tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
- if (l_ptr->oldest_deferred_in) {
- u32 o = buf_seqno(l_ptr->oldest_deferred_in);
- u32 n = buf_seqno(l_ptr->newest_deferred_in);
- tipc_printf(buf, ":RQUE[%u..%u]", o, n);
- if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
- tipc_printf(buf, ":RQSIZ(%u)",
- l_ptr->deferred_inqueue_sz);
- }
- }
-print_state:
-#endif
+ pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name);
if (link_working_unknown(l_ptr))
- tipc_printf(buf, ":WU");
+ pr_cont(":WU\n");
else if (link_reset_reset(l_ptr))
- tipc_printf(buf, ":RR");
+ pr_cont(":RR\n");
else if (link_reset_unknown(l_ptr))
- tipc_printf(buf, ":RU");
+ pr_cont(":RU\n");
else if (link_working_working(l_ptr))
- tipc_printf(buf, ":WW");
- tipc_printf(buf, "\n");
-
- tipc_printbuf_validate(buf);
- info("%s", print_area);
+ pr_cont(":WW\n");
+ else
+ pr_cont("\n");
}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index d6a60a963ce..6e921121be0 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -37,7 +37,6 @@
#ifndef _TIPC_LINK_H
#define _TIPC_LINK_H
-#include "log.h"
#include "msg.h"
#include "node.h"
@@ -63,6 +62,37 @@
*/
#define MAX_PKT_DEFAULT 1500
+struct tipc_stats {
+ u32 sent_info; /* used in counting # sent packets */
+ u32 recv_info; /* used in counting # recv'd packets */
+ u32 sent_states;
+ u32 recv_states;
+ u32 sent_probes;
+ u32 recv_probes;
+ u32 sent_nacks;
+ u32 recv_nacks;
+ u32 sent_acks;
+ u32 sent_bundled;
+ u32 sent_bundles;
+ u32 recv_bundled;
+ u32 recv_bundles;
+ u32 retransmitted;
+ u32 sent_fragmented;
+ u32 sent_fragments;
+ u32 recv_fragmented;
+ u32 recv_fragments;
+ u32 link_congs; /* # port sends blocked by congestion */
+ u32 bearer_congs;
+ u32 deferred_recv;
+ u32 duplicates;
+ u32 max_queue_sz; /* send queue size high water mark */
+ u32 accu_queue_sz; /* used for send queue size profiling */
+ u32 queue_sz_counts; /* used for send queue size profiling */
+ u32 msg_length_counts; /* used for message length profiling */
+ u32 msg_lengths_total; /* used for message length profiling */
+ u32 msg_length_profile[7]; /* used for msg. length profiling */
+};
+
/**
* struct tipc_link - TIPC link data structure
* @addr: network address of link's peer node
@@ -175,36 +205,7 @@ struct tipc_link {
struct sk_buff *defragm_buf;
/* Statistics */
- struct {
- u32 sent_info; /* used in counting # sent packets */
- u32 recv_info; /* used in counting # recv'd packets */
- u32 sent_states;
- u32 recv_states;
- u32 sent_probes;
- u32 recv_probes;
- u32 sent_nacks;
- u32 recv_nacks;
- u32 sent_acks;
- u32 sent_bundled;
- u32 sent_bundles;
- u32 recv_bundled;
- u32 recv_bundles;
- u32 retransmitted;
- u32 sent_fragmented;
- u32 sent_fragments;
- u32 recv_fragmented;
- u32 recv_fragments;
- u32 link_congs; /* # port sends blocked by congestion */
- u32 bearer_congs;
- u32 deferred_recv;
- u32 duplicates;
- u32 max_queue_sz; /* send queue size high water mark */
- u32 accu_queue_sz; /* used for send queue size profiling */
- u32 queue_sz_counts; /* used for send queue size profiling */
- u32 msg_length_counts; /* used for message length profiling */
- u32 msg_lengths_total; /* used for message length profiling */
- u32 msg_length_profile[7]; /* used for msg. length profiling */
- } stats;
+ struct tipc_stats stats;
};
struct tipc_port;
diff --git a/net/tipc/log.c b/net/tipc/log.c
index 026733f2491..abef644f27d 100644
--- a/net/tipc/log.c
+++ b/net/tipc/log.c
@@ -36,302 +36,20 @@
#include "core.h"
#include "config.h"
-#include "log.h"
-
-/*
- * TIPC pre-defines the following print buffers:
- *
- * TIPC_NULL : null buffer (i.e. print nowhere)
- * TIPC_CONS : system console
- * TIPC_LOG : TIPC log buffer
- *
- * Additional user-defined print buffers are also permitted.
- */
-static struct print_buf null_buf = { NULL, 0, NULL, 0 };
-struct print_buf *const TIPC_NULL = &null_buf;
-
-static struct print_buf cons_buf = { NULL, 0, NULL, 1 };
-struct print_buf *const TIPC_CONS = &cons_buf;
-
-static struct print_buf log_buf = { NULL, 0, NULL, 1 };
-struct print_buf *const TIPC_LOG = &log_buf;
-
-/*
- * Locking policy when using print buffers.
- *
- * 1) tipc_printf() uses 'print_lock' to protect against concurrent access to
- * 'print_string' when writing to a print buffer. This also protects against
- * concurrent writes to the print buffer being written to.
- *
- * 2) tipc_log_XXX() leverages the aforementioned use of 'print_lock' to
- * protect against all types of concurrent operations on their associated
- * print buffer (not just write operations).
- *
- * Note: All routines of the form tipc_printbuf_XXX() are lock-free, and rely
- * on the caller to prevent simultaneous use of the print buffer(s) being
- * manipulated.
- */
-static char print_string[TIPC_PB_MAX_STR];
-static DEFINE_SPINLOCK(print_lock);
-
-static void tipc_printbuf_move(struct print_buf *pb_to,
- struct print_buf *pb_from);
-
-#define FORMAT(PTR, LEN, FMT) \
-{\
- va_list args;\
- va_start(args, FMT);\
- LEN = vsprintf(PTR, FMT, args);\
- va_end(args);\
- *(PTR + LEN) = '\0';\
-}
-
-/**
- * tipc_printbuf_init - initialize print buffer to empty
- * @pb: pointer to print buffer structure
- * @raw: pointer to character array used by print buffer
- * @size: size of character array
- *
- * Note: If the character array is too small (or absent), the print buffer
- * becomes a null device that discards anything written to it.
- */
-void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
-{
- pb->buf = raw;
- pb->crs = raw;
- pb->size = size;
- pb->echo = 0;
-
- if (size < TIPC_PB_MIN_SIZE) {
- pb->buf = NULL;
- } else if (raw) {
- pb->buf[0] = 0;
- pb->buf[size - 1] = ~0;
- }
-}
-
-/**
- * tipc_printbuf_reset - reinitialize print buffer to empty state
- * @pb: pointer to print buffer structure
- */
-static void tipc_printbuf_reset(struct print_buf *pb)
-{
- if (pb->buf) {
- pb->crs = pb->buf;
- pb->buf[0] = 0;
- pb->buf[pb->size - 1] = ~0;
- }
-}
-
-/**
- * tipc_printbuf_empty - test if print buffer is in empty state
- * @pb: pointer to print buffer structure
- *
- * Returns non-zero if print buffer is empty.
- */
-static int tipc_printbuf_empty(struct print_buf *pb)
-{
- return !pb->buf || (pb->crs == pb->buf);
-}
-
-/**
- * tipc_printbuf_validate - check for print buffer overflow
- * @pb: pointer to print buffer structure
- *
- * Verifies that a print buffer has captured all data written to it.
- * If data has been lost, linearize buffer and prepend an error message
- *
- * Returns length of print buffer data string (including trailing NUL)
- */
-int tipc_printbuf_validate(struct print_buf *pb)
-{
- char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n";
- char *cp_buf;
- struct print_buf cb;
-
- if (!pb->buf)
- return 0;
-
- if (pb->buf[pb->size - 1] == 0) {
- cp_buf = kmalloc(pb->size, GFP_ATOMIC);
- if (cp_buf) {
- tipc_printbuf_init(&cb, cp_buf, pb->size);
- tipc_printbuf_move(&cb, pb);
- tipc_printbuf_move(pb, &cb);
- kfree(cp_buf);
- memcpy(pb->buf, err, strlen(err));
- } else {
- tipc_printbuf_reset(pb);
- tipc_printf(pb, err);
- }
- }
- return pb->crs - pb->buf + 1;
-}
-
-/**
- * tipc_printbuf_move - move print buffer contents to another print buffer
- * @pb_to: pointer to destination print buffer structure
- * @pb_from: pointer to source print buffer structure
- *
- * Current contents of destination print buffer (if any) are discarded.
- * Source print buffer becomes empty if a successful move occurs.
- */
-static void tipc_printbuf_move(struct print_buf *pb_to,
- struct print_buf *pb_from)
-{
- int len;
-
- /* Handle the cases where contents can't be moved */
- if (!pb_to->buf)
- return;
-
- if (!pb_from->buf) {
- tipc_printbuf_reset(pb_to);
- return;
- }
-
- if (pb_to->size < pb_from->size) {
- strcpy(pb_to->buf, "*** PRINT BUFFER MOVE ERROR ***");
- pb_to->buf[pb_to->size - 1] = ~0;
- pb_to->crs = strchr(pb_to->buf, 0);
- return;
- }
-
- /* Copy data from char after cursor to end (if used) */
- len = pb_from->buf + pb_from->size - pb_from->crs - 2;
- if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) {
- strcpy(pb_to->buf, pb_from->crs + 1);
- pb_to->crs = pb_to->buf + len;
- } else
- pb_to->crs = pb_to->buf;
-
- /* Copy data from start to cursor (always) */
- len = pb_from->crs - pb_from->buf;
- strcpy(pb_to->crs, pb_from->buf);
- pb_to->crs += len;
-
- tipc_printbuf_reset(pb_from);
-}
/**
- * tipc_printf - append formatted output to print buffer
- * @pb: pointer to print buffer
+ * tipc_snprintf - append formatted output to print buffer
+ * @buf: pointer to print buffer
+ * @len: buffer length
* @fmt: formatted info to be printed
*/
-void tipc_printf(struct print_buf *pb, const char *fmt, ...)
-{
- int chars_to_add;
- int chars_left;
- char save_char;
-
- spin_lock_bh(&print_lock);
-
- FORMAT(print_string, chars_to_add, fmt);
- if (chars_to_add >= TIPC_PB_MAX_STR)
- strcpy(print_string, "*** PRINT BUFFER STRING TOO LONG ***");
-
- if (pb->buf) {
- chars_left = pb->buf + pb->size - pb->crs - 1;
- if (chars_to_add <= chars_left) {
- strcpy(pb->crs, print_string);
- pb->crs += chars_to_add;
- } else if (chars_to_add >= (pb->size - 1)) {
- strcpy(pb->buf, print_string + chars_to_add + 1
- - pb->size);
- pb->crs = pb->buf + pb->size - 1;
- } else {
- strcpy(pb->buf, print_string + chars_left);
- save_char = print_string[chars_left];
- print_string[chars_left] = 0;
- strcpy(pb->crs, print_string);
- print_string[chars_left] = save_char;
- pb->crs = pb->buf + chars_to_add - chars_left;
- }
- }
-
- if (pb->echo)
- printk("%s", print_string);
-
- spin_unlock_bh(&print_lock);
-}
-
-/**
- * tipc_log_resize - change the size of the TIPC log buffer
- * @log_size: print buffer size to use
- */
-int tipc_log_resize(int log_size)
-{
- int res = 0;
-
- spin_lock_bh(&print_lock);
- kfree(TIPC_LOG->buf);
- TIPC_LOG->buf = NULL;
- if (log_size) {
- if (log_size < TIPC_PB_MIN_SIZE)
- log_size = TIPC_PB_MIN_SIZE;
- res = TIPC_LOG->echo;
- tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC),
- log_size);
- TIPC_LOG->echo = res;
- res = !TIPC_LOG->buf;
- }
- spin_unlock_bh(&print_lock);
-
- return res;
-}
-
-/**
- * tipc_log_resize_cmd - reconfigure size of TIPC log buffer
- */
-struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (value > 32768)
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (log size must be 0-32768)");
- if (tipc_log_resize(value))
- return tipc_cfg_reply_error_string(
- "unable to create specified log (log size is now 0)");
- return tipc_cfg_reply_none();
-}
-
-/**
- * tipc_log_dump - capture TIPC log buffer contents in configuration message
- */
-struct sk_buff *tipc_log_dump(void)
+int tipc_snprintf(char *buf, int len, const char *fmt, ...)
{
- struct sk_buff *reply;
-
- spin_lock_bh(&print_lock);
- if (!TIPC_LOG->buf) {
- spin_unlock_bh(&print_lock);
- reply = tipc_cfg_reply_ultra_string("log not activated\n");
- } else if (tipc_printbuf_empty(TIPC_LOG)) {
- spin_unlock_bh(&print_lock);
- reply = tipc_cfg_reply_ultra_string("log is empty\n");
- } else {
- struct tlv_desc *rep_tlv;
- struct print_buf pb;
- int str_len;
+ int i;
+ va_list args;
- str_len = min(TIPC_LOG->size, 32768u);
- spin_unlock_bh(&print_lock);
- reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len));
- if (reply) {
- rep_tlv = (struct tlv_desc *)reply->data;
- tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
- spin_lock_bh(&print_lock);
- tipc_printbuf_move(&pb, TIPC_LOG);
- spin_unlock_bh(&print_lock);
- str_len = strlen(TLV_DATA(rep_tlv)) + 1;
- skb_put(reply, TLV_SPACE(str_len));
- TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
- }
- }
- return reply;
+ va_start(args, fmt);
+ i = vscnprintf(buf, len, fmt, args);
+ va_end(args);
+ return i;
}
diff --git a/net/tipc/log.h b/net/tipc/log.h
deleted file mode 100644
index d1f5eb967fd..00000000000
--- a/net/tipc/log.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * net/tipc/log.h: Include file for TIPC print buffer routines
- *
- * Copyright (c) 1997-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _TIPC_LOG_H
-#define _TIPC_LOG_H
-
-/**
- * struct print_buf - TIPC print buffer structure
- * @buf: pointer to character array containing print buffer contents
- * @size: size of character array
- * @crs: pointer to first unused space in character array (i.e. final NUL)
- * @echo: echo output to system console if non-zero
- */
-struct print_buf {
- char *buf;
- u32 size;
- char *crs;
- int echo;
-};
-
-#define TIPC_PB_MIN_SIZE 64 /* minimum size for a print buffer's array */
-#define TIPC_PB_MAX_STR 512 /* max printable string (with trailing NUL) */
-
-void tipc_printbuf_init(struct print_buf *pb, char *buf, u32 size);
-int tipc_printbuf_validate(struct print_buf *pb);
-
-int tipc_log_resize(int log_size);
-
-struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area,
- int req_tlv_space);
-struct sk_buff *tipc_log_dump(void);
-
-#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index deea0d232dc..f2db8a87d9c 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -109,245 +109,3 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
*buf = NULL;
return -EFAULT;
}
-
-#ifdef CONFIG_TIPC_DEBUG
-void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
-{
- u32 usr = msg_user(msg);
- tipc_printf(buf, KERN_DEBUG);
- tipc_printf(buf, str);
-
- switch (usr) {
- case MSG_BUNDLER:
- tipc_printf(buf, "BNDL::");
- tipc_printf(buf, "MSGS(%u):", msg_msgcnt(msg));
- break;
- case BCAST_PROTOCOL:
- tipc_printf(buf, "BCASTP::");
- break;
- case MSG_FRAGMENTER:
- tipc_printf(buf, "FRAGM::");
- switch (msg_type(msg)) {
- case FIRST_FRAGMENT:
- tipc_printf(buf, "FIRST:");
- break;
- case FRAGMENT:
- tipc_printf(buf, "BODY:");
- break;
- case LAST_FRAGMENT:
- tipc_printf(buf, "LAST:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN:%x", msg_type(msg));
-
- }
- tipc_printf(buf, "NO(%u/%u):", msg_long_msgno(msg),
- msg_fragm_no(msg));
- break;
- case TIPC_LOW_IMPORTANCE:
- case TIPC_MEDIUM_IMPORTANCE:
- case TIPC_HIGH_IMPORTANCE:
- case TIPC_CRITICAL_IMPORTANCE:
- tipc_printf(buf, "DAT%u:", msg_user(msg));
- if (msg_short(msg)) {
- tipc_printf(buf, "CON:");
- break;
- }
- switch (msg_type(msg)) {
- case TIPC_CONN_MSG:
- tipc_printf(buf, "CON:");
- break;
- case TIPC_MCAST_MSG:
- tipc_printf(buf, "MCST:");
- break;
- case TIPC_NAMED_MSG:
- tipc_printf(buf, "NAM:");
- break;
- case TIPC_DIRECT_MSG:
- tipc_printf(buf, "DIR:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE %u", msg_type(msg));
- }
- if (msg_reroute_cnt(msg))
- tipc_printf(buf, "REROUTED(%u):",
- msg_reroute_cnt(msg));
- break;
- case NAME_DISTRIBUTOR:
- tipc_printf(buf, "NMD::");
- switch (msg_type(msg)) {
- case PUBLICATION:
- tipc_printf(buf, "PUBL(%u):", (msg_size(msg) - msg_hdr_sz(msg)) / 20); /* Items */
- break;
- case WITHDRAWAL:
- tipc_printf(buf, "WDRW:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN:%x", msg_type(msg));
- }
- if (msg_reroute_cnt(msg))
- tipc_printf(buf, "REROUTED(%u):",
- msg_reroute_cnt(msg));
- break;
- case CONN_MANAGER:
- tipc_printf(buf, "CONN_MNG:");
- switch (msg_type(msg)) {
- case CONN_PROBE:
- tipc_printf(buf, "PROBE:");
- break;
- case CONN_PROBE_REPLY:
- tipc_printf(buf, "PROBE_REPLY:");
- break;
- case CONN_ACK:
- tipc_printf(buf, "CONN_ACK:");
- tipc_printf(buf, "ACK(%u):", msg_msgcnt(msg));
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
- }
- if (msg_reroute_cnt(msg))
- tipc_printf(buf, "REROUTED(%u):", msg_reroute_cnt(msg));
- break;
- case LINK_PROTOCOL:
- switch (msg_type(msg)) {
- case STATE_MSG:
- tipc_printf(buf, "STATE:");
- tipc_printf(buf, "%s:", msg_probe(msg) ? "PRB" : "");
- tipc_printf(buf, "NXS(%u):", msg_next_sent(msg));
- tipc_printf(buf, "GAP(%u):", msg_seq_gap(msg));
- tipc_printf(buf, "LSTBC(%u):", msg_last_bcast(msg));
- break;
- case RESET_MSG:
- tipc_printf(buf, "RESET:");
- if (msg_size(msg) != msg_hdr_sz(msg))
- tipc_printf(buf, "BEAR:%s:", msg_data(msg));
- break;
- case ACTIVATE_MSG:
- tipc_printf(buf, "ACTIVATE:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
- }
- tipc_printf(buf, "PLANE(%c):", msg_net_plane(msg));
- tipc_printf(buf, "SESS(%u):", msg_session(msg));
- break;
- case CHANGEOVER_PROTOCOL:
- tipc_printf(buf, "TUNL:");
- switch (msg_type(msg)) {
- case DUPLICATE_MSG:
- tipc_printf(buf, "DUPL:");
- break;
- case ORIGINAL_MSG:
- tipc_printf(buf, "ORIG:");
- tipc_printf(buf, "EXP(%u)", msg_msgcnt(msg));
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
- }
- break;
- case LINK_CONFIG:
- tipc_printf(buf, "CFG:");
- switch (msg_type(msg)) {
- case DSC_REQ_MSG:
- tipc_printf(buf, "DSC_REQ:");
- break;
- case DSC_RESP_MSG:
- tipc_printf(buf, "DSC_RESP:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE:%x:", msg_type(msg));
- break;
- }
- break;
- default:
- tipc_printf(buf, "UNKNOWN USER:");
- }
-
- switch (usr) {
- case CONN_MANAGER:
- case TIPC_LOW_IMPORTANCE:
- case TIPC_MEDIUM_IMPORTANCE:
- case TIPC_HIGH_IMPORTANCE:
- case TIPC_CRITICAL_IMPORTANCE:
- switch (msg_errcode(msg)) {
- case TIPC_OK:
- break;
- case TIPC_ERR_NO_NAME:
- tipc_printf(buf, "NO_NAME:");
- break;
- case TIPC_ERR_NO_PORT:
- tipc_printf(buf, "NO_PORT:");
- break;
- case TIPC_ERR_NO_NODE:
- tipc_printf(buf, "NO_PROC:");
- break;
- case TIPC_ERR_OVERLOAD:
- tipc_printf(buf, "OVERLOAD:");
- break;
- case TIPC_CONN_SHUTDOWN:
- tipc_printf(buf, "SHUTDOWN:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN ERROR(%x):",
- msg_errcode(msg));
- }
- default:
- break;
- }
-
- tipc_printf(buf, "HZ(%u):", msg_hdr_sz(msg));
- tipc_printf(buf, "SZ(%u):", msg_size(msg));
- tipc_printf(buf, "SQNO(%u):", msg_seqno(msg));
-
- if (msg_non_seq(msg))
- tipc_printf(buf, "NOSEQ:");
- else
- tipc_printf(buf, "ACK(%u):", msg_ack(msg));
- tipc_printf(buf, "BACK(%u):", msg_bcast_ack(msg));
- tipc_printf(buf, "PRND(%x)", msg_prevnode(msg));
-
- if (msg_isdata(msg)) {
- if (msg_named(msg)) {
- tipc_printf(buf, "NTYP(%u):", msg_nametype(msg));
- tipc_printf(buf, "NINST(%u)", msg_nameinst(msg));
- }
- }
-
- if ((usr != LINK_PROTOCOL) && (usr != LINK_CONFIG) &&
- (usr != MSG_BUNDLER)) {
- if (!msg_short(msg)) {
- tipc_printf(buf, ":ORIG(%x:%u):",
- msg_orignode(msg), msg_origport(msg));
- tipc_printf(buf, ":DEST(%x:%u):",
- msg_destnode(msg), msg_destport(msg));
- } else {
- tipc_printf(buf, ":OPRT(%u):", msg_origport(msg));
- tipc_printf(buf, ":DPRT(%u):", msg_destport(msg));
- }
- }
- if (msg_user(msg) == NAME_DISTRIBUTOR) {
- tipc_printf(buf, ":ONOD(%x):", msg_orignode(msg));
- tipc_printf(buf, ":DNOD(%x):", msg_destnode(msg));
- }
-
- if (msg_user(msg) == LINK_CONFIG) {
- struct tipc_media_addr orig;
-
- tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
- tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
- memcpy(orig.value, msg_media_addr(msg), sizeof(orig.value));
- orig.media_id = 0;
- orig.broadcast = 0;
- tipc_media_addr_printf(buf, &orig);
- }
- if (msg_user(msg) == BCAST_PROTOCOL) {
- tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
- tipc_printf(buf, "TO(%u):", msg_bcgap_to(msg));
- }
- tipc_printf(buf, "\n");
- if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg)))
- tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
- if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT))
- tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
-}
-#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 158318e67b0..55d3928dfd6 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -161,7 +161,7 @@ void tipc_named_publish(struct publication *publ)
buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
if (!buf) {
- warn("Publication distribution failure\n");
+ pr_warn("Publication distribution failure\n");
return;
}
@@ -186,7 +186,7 @@ void tipc_named_withdraw(struct publication *publ)
buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
if (!buf) {
- warn("Withdrawal distribution failure\n");
+ pr_warn("Withdrawal distribution failure\n");
return;
}
@@ -213,7 +213,7 @@ static void named_distribute(struct list_head *message_list, u32 node,
rest -= left;
buf = named_prepare_buf(PUBLICATION, left, node);
if (!buf) {
- warn("Bulk publication failure\n");
+ pr_warn("Bulk publication failure\n");
return;
}
item = (struct distr_item *)msg_data(buf_msg(buf));
@@ -283,9 +283,10 @@ static void named_purge_publ(struct publication *publ)
write_unlock_bh(&tipc_nametbl_lock);
if (p != publ) {
- err("Unable to remove publication from failed node\n"
- "(type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
- publ->type, publ->lower, publ->node, publ->ref, publ->key);
+ pr_err("Unable to remove publication from failed node\n"
+ " (type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
+ publ->type, publ->lower, publ->node, publ->ref,
+ publ->key);
}
kfree(p);
@@ -329,14 +330,14 @@ void tipc_named_recv(struct sk_buff *buf)
tipc_nodesub_unsubscribe(&publ->subscr);
kfree(publ);
} else {
- err("Unable to remove publication by node 0x%x\n"
- "(type=%u, lower=%u, ref=%u, key=%u)\n",
- msg_orignode(msg),
- ntohl(item->type), ntohl(item->lower),
- ntohl(item->ref), ntohl(item->key));
+ pr_err("Unable to remove publication by node 0x%x\n"
+ " (type=%u, lower=%u, ref=%u, key=%u)\n",
+ msg_orignode(msg), ntohl(item->type),
+ ntohl(item->lower), ntohl(item->ref),
+ ntohl(item->key));
}
} else {
- warn("Unrecognized name table message received\n");
+ pr_warn("Unrecognized name table message received\n");
}
item++;
}
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 010f24a59da..360c478b0b5 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -126,7 +126,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
{
struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
if (publ == NULL) {
- warn("Publication creation failure, no memory\n");
+ pr_warn("Publication creation failure, no memory\n");
return NULL;
}
@@ -163,7 +163,7 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
struct sub_seq *sseq = tipc_subseq_alloc(1);
if (!nseq || !sseq) {
- warn("Name sequence creation failed, no memory\n");
+ pr_warn("Name sequence creation failed, no memory\n");
kfree(nseq);
kfree(sseq);
return NULL;
@@ -191,7 +191,7 @@ static void nameseq_delete_empty(struct name_seq *seq)
}
}
-/*
+/**
* nameseq_find_subseq - find sub-sequence (if any) matching a name instance
*
* Very time-critical, so binary searches through sub-sequence array.
@@ -263,8 +263,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
/* Lower end overlaps existing entry => need an exact match */
if ((sseq->lower != lower) || (sseq->upper != upper)) {
- warn("Cannot publish {%u,%u,%u}, overlap error\n",
- type, lower, upper);
+ pr_warn("Cannot publish {%u,%u,%u}, overlap error\n",
+ type, lower, upper);
return NULL;
}
@@ -286,8 +286,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
/* Fail if upper end overlaps into an existing entry */
if ((inspos < nseq->first_free) &&
(upper >= nseq->sseqs[inspos].lower)) {
- warn("Cannot publish {%u,%u,%u}, overlap error\n",
- type, lower, upper);
+ pr_warn("Cannot publish {%u,%u,%u}, overlap error\n",
+ type, lower, upper);
return NULL;
}
@@ -296,8 +296,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
if (!sseqs) {
- warn("Cannot publish {%u,%u,%u}, no memory\n",
- type, lower, upper);
+ pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
+ type, lower, upper);
return NULL;
}
memcpy(sseqs, nseq->sseqs,
@@ -309,8 +309,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
info = kzalloc(sizeof(*info), GFP_ATOMIC);
if (!info) {
- warn("Cannot publish {%u,%u,%u}, no memory\n",
- type, lower, upper);
+ pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
+ type, lower, upper);
return NULL;
}
@@ -435,7 +435,7 @@ found:
}
/**
- * tipc_nameseq_subscribe: attach a subscription, and issue
+ * tipc_nameseq_subscribe - attach a subscription, and issue
* the prescribed number of events if there is any sub-
* sequence overlapping with the requested sequence
*/
@@ -492,8 +492,8 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
(lower > upper)) {
- dbg("Failed to publish illegal {%u,%u,%u} with scope %u\n",
- type, lower, upper, scope);
+ pr_debug("Failed to publish illegal {%u,%u,%u} with scope %u\n",
+ type, lower, upper, scope);
return NULL;
}
@@ -520,7 +520,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
return publ;
}
-/*
+/**
* tipc_nametbl_translate - perform name translation
*
* On entry, 'destnode' is the search domain used during translation.
@@ -668,8 +668,8 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
struct publication *publ;
if (table.local_publ_count >= tipc_max_publications) {
- warn("Publication failed, local publication limit reached (%u)\n",
- tipc_max_publications);
+ pr_warn("Publication failed, local publication limit reached (%u)\n",
+ tipc_max_publications);
return NULL;
}
@@ -702,9 +702,9 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
return 1;
}
write_unlock_bh(&tipc_nametbl_lock);
- err("Unable to remove local publication\n"
- "(type=%u, lower=%u, ref=%u, key=%u)\n",
- type, lower, ref, key);
+ pr_err("Unable to remove local publication\n"
+ "(type=%u, lower=%u, ref=%u, key=%u)\n",
+ type, lower, ref, key);
return 0;
}
@@ -725,8 +725,8 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
tipc_nameseq_subscribe(seq, s);
spin_unlock_bh(&seq->lock);
} else {
- warn("Failed to create subscription for {%u,%u,%u}\n",
- s->seq.type, s->seq.lower, s->seq.upper);
+ pr_warn("Failed to create subscription for {%u,%u,%u}\n",
+ s->seq.type, s->seq.lower, s->seq.upper);
}
write_unlock_bh(&tipc_nametbl_lock);
}
@@ -751,21 +751,22 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
/**
- * subseq_list: print specified sub-sequence contents into the given buffer
+ * subseq_list - print specified sub-sequence contents into the given buffer
*/
-static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
+static int subseq_list(struct sub_seq *sseq, char *buf, int len, u32 depth,
u32 index)
{
char portIdStr[27];
const char *scope_str[] = {"", " zone", " cluster", " node"};
struct publication *publ;
struct name_info *info;
+ int ret;
- tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper);
+ ret = tipc_snprintf(buf, len, "%-10u %-10u ", sseq->lower, sseq->upper);
if (depth == 2) {
- tipc_printf(buf, "\n");
- return;
+ ret += tipc_snprintf(buf - ret, len + ret, "\n");
+ return ret;
}
info = sseq->info;
@@ -774,52 +775,58 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
sprintf(portIdStr, "<%u.%u.%u:%u>",
tipc_zone(publ->node), tipc_cluster(publ->node),
tipc_node(publ->node), publ->ref);
- tipc_printf(buf, "%-26s ", portIdStr);
+ ret += tipc_snprintf(buf + ret, len - ret, "%-26s ", portIdStr);
if (depth > 3) {
- tipc_printf(buf, "%-10u %s", publ->key,
- scope_str[publ->scope]);
+ ret += tipc_snprintf(buf + ret, len - ret, "%-10u %s",
+ publ->key, scope_str[publ->scope]);
}
if (!list_is_last(&publ->zone_list, &info->zone_list))
- tipc_printf(buf, "\n%33s", " ");
+ ret += tipc_snprintf(buf + ret, len - ret,
+ "\n%33s", " ");
};
- tipc_printf(buf, "\n");
+ ret += tipc_snprintf(buf + ret, len - ret, "\n");
+ return ret;
}
/**
- * nameseq_list: print specified name sequence contents into the given buffer
+ * nameseq_list - print specified name sequence contents into the given buffer
*/
-static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
+static int nameseq_list(struct name_seq *seq, char *buf, int len, u32 depth,
u32 type, u32 lowbound, u32 upbound, u32 index)
{
struct sub_seq *sseq;
char typearea[11];
+ int ret = 0;
if (seq->first_free == 0)
- return;
+ return 0;
sprintf(typearea, "%-10u", seq->type);
if (depth == 1) {
- tipc_printf(buf, "%s\n", typearea);
- return;
+ ret += tipc_snprintf(buf, len, "%s\n", typearea);
+ return ret;
}
for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) {
if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) {
- tipc_printf(buf, "%s ", typearea);
+ ret += tipc_snprintf(buf + ret, len - ret, "%s ",
+ typearea);
spin_lock_bh(&seq->lock);
- subseq_list(sseq, buf, depth, index);
+ ret += subseq_list(sseq, buf + ret, len - ret,
+ depth, index);
spin_unlock_bh(&seq->lock);
sprintf(typearea, "%10s", " ");
}
}
+ return ret;
}
/**
* nametbl_header - print name table header into the given buffer
*/
-static void nametbl_header(struct print_buf *buf, u32 depth)
+static int nametbl_header(char *buf, int len, u32 depth)
{
const char *header[] = {
"Type ",
@@ -829,24 +836,27 @@ static void nametbl_header(struct print_buf *buf, u32 depth)
};
int i;
+ int ret = 0;
if (depth > 4)
depth = 4;
for (i = 0; i < depth; i++)
- tipc_printf(buf, header[i]);
- tipc_printf(buf, "\n");
+ ret += tipc_snprintf(buf + ret, len - ret, header[i]);
+ ret += tipc_snprintf(buf + ret, len - ret, "\n");
+ return ret;
}
/**
* nametbl_list - print specified name table contents into the given buffer
*/
-static void nametbl_list(struct print_buf *buf, u32 depth_info,
+static int nametbl_list(char *buf, int len, u32 depth_info,
u32 type, u32 lowbound, u32 upbound)
{
struct hlist_head *seq_head;
struct hlist_node *seq_node;
struct name_seq *seq;
int all_types;
+ int ret = 0;
u32 depth;
u32 i;
@@ -854,65 +864,69 @@ static void nametbl_list(struct print_buf *buf, u32 depth_info,
depth = (depth_info & ~TIPC_NTQ_ALLTYPES);
if (depth == 0)
- return;
+ return 0;
if (all_types) {
/* display all entries in name table to specified depth */
- nametbl_header(buf, depth);
+ ret += nametbl_header(buf, len, depth);
lowbound = 0;
upbound = ~0;
for (i = 0; i < tipc_nametbl_size; i++) {
seq_head = &table.types[i];
hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
- nameseq_list(seq, buf, depth, seq->type,
- lowbound, upbound, i);
+ ret += nameseq_list(seq, buf + ret, len - ret,
+ depth, seq->type,
+ lowbound, upbound, i);
}
}
} else {
/* display only the sequence that matches the specified type */
if (upbound < lowbound) {
- tipc_printf(buf, "invalid name sequence specified\n");
- return;
+ ret += tipc_snprintf(buf + ret, len - ret,
+ "invalid name sequence specified\n");
+ return ret;
}
- nametbl_header(buf, depth);
+ ret += nametbl_header(buf + ret, len - ret, depth);
i = hash(type);
seq_head = &table.types[i];
hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
if (seq->type == type) {
- nameseq_list(seq, buf, depth, type,
- lowbound, upbound, i);
+ ret += nameseq_list(seq, buf + ret, len - ret,
+ depth, type,
+ lowbound, upbound, i);
break;
}
}
}
+ return ret;
}
-#define MAX_NAME_TBL_QUERY 32768
-
struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
{
struct sk_buff *buf;
struct tipc_name_table_query *argv;
struct tlv_desc *rep_tlv;
- struct print_buf b;
+ char *pb;
+ int pb_len;
int str_len;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY))
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY));
+ buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
if (!buf)
return NULL;
rep_tlv = (struct tlv_desc *)buf->data;
- tipc_printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY);
+ pb = TLV_DATA(rep_tlv);
+ pb_len = ULTRA_STRING_MAX_LEN;
argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
read_lock_bh(&tipc_nametbl_lock);
- nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type),
- ntohl(argv->lowbound), ntohl(argv->upbound));
+ str_len = nametbl_list(pb, pb_len, ntohl(argv->depth),
+ ntohl(argv->type),
+ ntohl(argv->lowbound), ntohl(argv->upbound));
read_unlock_bh(&tipc_nametbl_lock);
- str_len = tipc_printbuf_validate(&b);
-
+ str_len += 1; /* for "\0" */
skb_put(buf, TLV_SPACE(str_len));
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -940,8 +954,10 @@ void tipc_nametbl_stop(void)
/* Verify name table is empty, then release it */
write_lock_bh(&tipc_nametbl_lock);
for (i = 0; i < tipc_nametbl_size; i++) {
- if (!hlist_empty(&table.types[i]))
- err("tipc_nametbl_stop(): hash chain %u is non-null\n", i);
+ if (hlist_empty(&table.types[i]))
+ continue;
+ pr_err("nametbl_stop(): orphaned hash chain detected\n");
+ break;
}
kfree(table.types);
table.types = NULL;
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 7c236c89cf5..5b5cea259ca 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -184,9 +184,9 @@ int tipc_net_start(u32 addr)
tipc_cfg_reinit();
- info("Started in network mode\n");
- info("Own node address %s, network identity %u\n",
- tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
+ pr_info("Started in network mode\n");
+ pr_info("Own node address %s, network identity %u\n",
+ tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
return 0;
}
@@ -202,5 +202,5 @@ void tipc_net_stop(void)
list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
tipc_node_delete(node);
write_unlock_bh(&tipc_net_lock);
- info("Left network mode\n");
+ pr_info("Left network mode\n");
}
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 7bda8e3d139..47a839df27d 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -90,7 +90,7 @@ int tipc_netlink_start(void)
res = genl_register_family_with_ops(&tipc_genl_family,
&tipc_genl_ops, 1);
if (res) {
- err("Failed to register netlink interface\n");
+ pr_err("Failed to register netlink interface\n");
return res;
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index d4fd341e6e0..d21db204e25 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -105,7 +105,7 @@ struct tipc_node *tipc_node_create(u32 addr)
n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
if (!n_ptr) {
spin_unlock_bh(&node_create_lock);
- warn("Node creation failed, no memory\n");
+ pr_warn("Node creation failed, no memory\n");
return NULL;
}
@@ -151,8 +151,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
n_ptr->working_links++;
- info("Established link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->b_ptr->net_plane);
+ pr_info("Established link <%s> on network plane %c\n",
+ l_ptr->name, l_ptr->b_ptr->net_plane);
if (!active[0]) {
active[0] = active[1] = l_ptr;
@@ -160,7 +160,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
return;
}
if (l_ptr->priority < active[0]->priority) {
- info("New link <%s> becomes standby\n", l_ptr->name);
+ pr_info("New link <%s> becomes standby\n", l_ptr->name);
return;
}
tipc_link_send_duplicate(active[0], l_ptr);
@@ -168,9 +168,9 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
active[0] = l_ptr;
return;
}
- info("Old link <%s> becomes standby\n", active[0]->name);
+ pr_info("Old link <%s> becomes standby\n", active[0]->name);
if (active[1] != active[0])
- info("Old link <%s> becomes standby\n", active[1]->name);
+ pr_info("Old link <%s> becomes standby\n", active[1]->name);
active[0] = active[1] = l_ptr;
}
@@ -211,11 +211,11 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
n_ptr->working_links--;
if (!tipc_link_is_active(l_ptr)) {
- info("Lost standby link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->b_ptr->net_plane);
+ pr_info("Lost standby link <%s> on network plane %c\n",
+ l_ptr->name, l_ptr->b_ptr->net_plane);
return;
}
- info("Lost link <%s> on network plane %c\n",
+ pr_info("Lost link <%s> on network plane %c\n",
l_ptr->name, l_ptr->b_ptr->net_plane);
active = &n_ptr->active_links[0];
@@ -290,8 +290,8 @@ static void node_lost_contact(struct tipc_node *n_ptr)
char addr_string[16];
u32 i;
- info("Lost contact with %s\n",
- tipc_addr_string_fill(addr_string, n_ptr->addr));
+ pr_info("Lost contact with %s\n",
+ tipc_addr_string_fill(addr_string, n_ptr->addr));
/* Flush broadcast link info associated with lost node */
if (n_ptr->bclink.supported) {
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index 7a27344108f..5e34b015da4 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -51,7 +51,8 @@ void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
node_sub->node = tipc_node_find(addr);
if (!node_sub->node) {
- warn("Node subscription rejected, unknown node 0x%x\n", addr);
+ pr_warn("Node subscription rejected, unknown node 0x%x\n",
+ addr);
return;
}
node_sub->handle_node_down = handle_down;
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 2ad37a4db37..07c42fba672 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -69,7 +69,7 @@ static u32 port_peerport(struct tipc_port *p_ptr)
return msg_destport(&p_ptr->phdr);
}
-/*
+/**
* tipc_port_peer_msg - verify message was sent by connected port's peer
*
* Handles cases where the node's network address has changed from
@@ -191,7 +191,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
if (b == NULL) {
- warn("Unable to deliver multicast message(s)\n");
+ pr_warn("Unable to deliver multicast message(s)\n");
goto exit;
}
if ((index == 0) && (cnt != 0))
@@ -221,12 +221,12 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC);
if (!p_ptr) {
- warn("Port creation failed, no memory\n");
+ pr_warn("Port creation failed, no memory\n");
return NULL;
}
ref = tipc_ref_acquire(p_ptr, &p_ptr->lock);
if (!ref) {
- warn("Port creation failed, reference table exhausted\n");
+ pr_warn("Port creation failed, ref. table exhausted\n");
kfree(p_ptr);
return NULL;
}
@@ -581,67 +581,73 @@ exit:
kfree_skb(buf);
}
-static void port_print(struct tipc_port *p_ptr, struct print_buf *buf, int full_id)
+static int port_print(struct tipc_port *p_ptr, char *buf, int len, int full_id)
{
struct publication *publ;
+ int ret;
if (full_id)
- tipc_printf(buf, "<%u.%u.%u:%u>:",
- tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
- tipc_node(tipc_own_addr), p_ptr->ref);
+ ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
+ tipc_zone(tipc_own_addr),
+ tipc_cluster(tipc_own_addr),
+ tipc_node(tipc_own_addr), p_ptr->ref);
else
- tipc_printf(buf, "%-10u:", p_ptr->ref);
+ ret = tipc_snprintf(buf, len, "%-10u:", p_ptr->ref);
if (p_ptr->connected) {
u32 dport = port_peerport(p_ptr);
u32 destnode = port_peernode(p_ptr);
- tipc_printf(buf, " connected to <%u.%u.%u:%u>",
- tipc_zone(destnode), tipc_cluster(destnode),
- tipc_node(destnode), dport);
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " connected to <%u.%u.%u:%u>",
+ tipc_zone(destnode),
+ tipc_cluster(destnode),
+ tipc_node(destnode), dport);
if (p_ptr->conn_type != 0)
- tipc_printf(buf, " via {%u,%u}",
- p_ptr->conn_type,
- p_ptr->conn_instance);
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " via {%u,%u}", p_ptr->conn_type,
+ p_ptr->conn_instance);
} else if (p_ptr->published) {
- tipc_printf(buf, " bound to");
+ ret += tipc_snprintf(buf + ret, len - ret, " bound to");
list_for_each_entry(publ, &p_ptr->publications, pport_list) {
if (publ->lower == publ->upper)
- tipc_printf(buf, " {%u,%u}", publ->type,
- publ->lower);
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " {%u,%u}", publ->type,
+ publ->lower);
else
- tipc_printf(buf, " {%u,%u,%u}", publ->type,
- publ->lower, publ->upper);
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " {%u,%u,%u}", publ->type,
+ publ->lower, publ->upper);
}
}
- tipc_printf(buf, "\n");
+ ret += tipc_snprintf(buf + ret, len - ret, "\n");
+ return ret;
}
-#define MAX_PORT_QUERY 32768
-
struct sk_buff *tipc_port_get_ports(void)
{
struct sk_buff *buf;
struct tlv_desc *rep_tlv;
- struct print_buf pb;
+ char *pb;
+ int pb_len;
struct tipc_port *p_ptr;
- int str_len;
+ int str_len = 0;
- buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
+ buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
if (!buf)
return NULL;
rep_tlv = (struct tlv_desc *)buf->data;
+ pb = TLV_DATA(rep_tlv);
+ pb_len = ULTRA_STRING_MAX_LEN;
- tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
spin_lock_bh(&tipc_port_list_lock);
list_for_each_entry(p_ptr, &ports, port_list) {
spin_lock_bh(p_ptr->lock);
- port_print(p_ptr, &pb, 0);
+ str_len += port_print(p_ptr, pb, pb_len, 0);
spin_unlock_bh(p_ptr->lock);
}
spin_unlock_bh(&tipc_port_list_lock);
- str_len = tipc_printbuf_validate(&pb);
-
+ str_len += 1; /* for "\0" */
skb_put(buf, TLV_SPACE(str_len));
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -906,11 +912,11 @@ int tipc_createport(void *usr_handle,
up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
if (!up_ptr) {
- warn("Port creation failed, no memory\n");
+ pr_warn("Port creation failed, no memory\n");
return -ENOMEM;
}
- p_ptr = (struct tipc_port *)tipc_createport_raw(NULL, port_dispatcher,
- port_wakeup, importance);
+ p_ptr = tipc_createport_raw(NULL, port_dispatcher, port_wakeup,
+ importance);
if (!p_ptr) {
kfree(up_ptr);
return -ENOMEM;
@@ -1078,8 +1084,7 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr)
if (tp_ptr->connected) {
tp_ptr->connected = 0;
/* let timer expire on it's own to avoid deadlock! */
- tipc_nodesub_unsubscribe(
- &((struct tipc_port *)tp_ptr)->subscription);
+ tipc_nodesub_unsubscribe(&tp_ptr->subscription);
res = 0;
} else {
res = -ENOTCONN;
@@ -1099,7 +1104,7 @@ int tipc_disconnect(u32 ref)
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- res = tipc_disconnect_port((struct tipc_port *)p_ptr);
+ res = tipc_disconnect_port(p_ptr);
tipc_port_unlock(p_ptr);
return res;
}
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 98cbec9c453..4660e306579 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -79,6 +79,7 @@ typedef void (*tipc_continue_event) (void *usr_handle, u32 portref);
* struct user_port - TIPC user port (used with native API)
* @usr_handle: user-specified field
* @ref: object reference to associated TIPC port
+ *
* <various callback routines>
*/
struct user_port {
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 5cada0e38e0..2a2a938dc22 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -153,11 +153,11 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
struct reference *entry = NULL;
if (!object) {
- err("Attempt to acquire reference to non-existent object\n");
+ pr_err("Attempt to acquire ref. to non-existent obj\n");
return 0;
}
if (!tipc_ref_table.entries) {
- err("Reference table not found during acquisition attempt\n");
+ pr_err("Ref. table not found in acquisition attempt\n");
return 0;
}
@@ -211,7 +211,7 @@ void tipc_ref_discard(u32 ref)
u32 index_mask;
if (!tipc_ref_table.entries) {
- err("Reference table not found during discard attempt\n");
+ pr_err("Ref. table not found during discard attempt\n");
return;
}
@@ -222,11 +222,11 @@ void tipc_ref_discard(u32 ref)
write_lock_bh(&ref_table_lock);
if (!entry->object) {
- err("Attempt to discard reference to non-existent object\n");
+ pr_err("Attempt to discard ref. to non-existent obj\n");
goto exit;
}
if (entry->ref != ref) {
- err("Attempt to discard non-existent reference\n");
+ pr_err("Attempt to discard non-existent reference\n");
goto exit;
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 5577a447f53..09dc5b97e07 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -34,12 +34,12 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/export.h>
-#include <net/sock.h>
-
#include "core.h"
#include "port.h"
+#include <linux/export.h>
+#include <net/sock.h>
+
#define SS_LISTENING -1 /* socket is listening */
#define SS_READY -2 /* socket is connectionless */
@@ -54,7 +54,7 @@ struct tipc_sock {
};
#define tipc_sk(sk) ((struct tipc_sock *)(sk))
-#define tipc_sk_port(sk) ((struct tipc_port *)(tipc_sk(sk)->p))
+#define tipc_sk_port(sk) (tipc_sk(sk)->p)
#define tipc_rx_ready(sock) (!skb_queue_empty(&sock->sk->sk_receive_queue) || \
(sock->state == SS_DISCONNECTING))
@@ -1699,9 +1699,8 @@ static int getsockopt(struct socket *sock,
return put_user(sizeof(value), ol);
}
-/**
- * Protocol switches for the various types of TIPC sockets
- */
+/* Protocol switches for the various types of TIPC sockets */
+
static const struct proto_ops msg_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
@@ -1788,13 +1787,13 @@ int tipc_socket_init(void)
res = proto_register(&tipc_proto, 1);
if (res) {
- err("Failed to register TIPC protocol type\n");
+ pr_err("Failed to register TIPC protocol type\n");
goto out;
}
res = sock_register(&tipc_family_ops);
if (res) {
- err("Failed to register TIPC socket type\n");
+ pr_err("Failed to register TIPC socket type\n");
proto_unregister(&tipc_proto);
goto out;
}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index f976e9cd6a7..5ed5965eb0b 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -305,8 +305,8 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
/* Refuse subscription if global limit exceeded */
if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
- warn("Subscription rejected, subscription limit reached (%u)\n",
- tipc_max_subscriptions);
+ pr_warn("Subscription rejected, limit reached (%u)\n",
+ tipc_max_subscriptions);
subscr_terminate(subscriber);
return NULL;
}
@@ -314,7 +314,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
/* Allocate subscription object */
sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
if (!sub) {
- warn("Subscription rejected, no memory\n");
+ pr_warn("Subscription rejected, no memory\n");
subscr_terminate(subscriber);
return NULL;
}
@@ -328,7 +328,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
if ((!(sub->filter & TIPC_SUB_PORTS) ==
!(sub->filter & TIPC_SUB_SERVICE)) ||
(sub->seq.lower > sub->seq.upper)) {
- warn("Subscription rejected, illegal request\n");
+ pr_warn("Subscription rejected, illegal request\n");
kfree(sub);
subscr_terminate(subscriber);
return NULL;
@@ -440,7 +440,7 @@ static void subscr_named_msg_event(void *usr_handle,
/* Create subscriber object */
subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
if (subscriber == NULL) {
- warn("Subscriber rejected, no memory\n");
+ pr_warn("Subscriber rejected, no memory\n");
return;
}
INIT_LIST_HEAD(&subscriber->subscription_list);
@@ -458,7 +458,7 @@ static void subscr_named_msg_event(void *usr_handle,
NULL,
&subscriber->port_ref);
if (subscriber->port_ref == 0) {
- warn("Subscriber rejected, unable to create port\n");
+ pr_warn("Subscriber rejected, unable to create port\n");
kfree(subscriber);
return;
}
@@ -517,7 +517,7 @@ int tipc_subscr_start(void)
return 0;
failed:
- err("Failed to create subscription service\n");
+ pr_err("Failed to create subscription service\n");
return res;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 641f2e47f16..e4768c180da 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -115,15 +115,24 @@
#include <net/checksum.h>
#include <linux/security.h>
-struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
+struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
EXPORT_SYMBOL_GPL(unix_socket_table);
DEFINE_SPINLOCK(unix_table_lock);
EXPORT_SYMBOL_GPL(unix_table_lock);
static atomic_long_t unix_nr_socks;
-#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
-#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
+static struct hlist_head *unix_sockets_unbound(void *addr)
+{
+ unsigned long hash = (unsigned long)addr;
+
+ hash ^= hash >> 16;
+ hash ^= hash >> 8;
+ hash %= UNIX_HASH_SIZE;
+ return &unix_socket_table[UNIX_HASH_SIZE + hash];
+}
+
+#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
#ifdef CONFIG_SECURITY_NETWORK
static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
@@ -645,7 +654,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
INIT_LIST_HEAD(&u->link);
mutex_init(&u->readlock); /* single task reading lock */
init_waitqueue_head(&u->peer_wait);
- unix_insert_socket(unix_sockets_unbound, sk);
+ unix_insert_socket(unix_sockets_unbound(sk), sk);
out:
if (sk == NULL)
atomic_long_dec(&unix_nr_socks);
@@ -814,6 +823,34 @@ fail:
return NULL;
}
+static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
+{
+ struct dentry *dentry;
+ struct path path;
+ int err = 0;
+ /*
+ * Get the parent directory, calculate the hash for last
+ * component.
+ */
+ dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
+ err = PTR_ERR(dentry);
+ if (IS_ERR(dentry))
+ return err;
+
+ /*
+ * All right, let's create it.
+ */
+ err = security_path_mknod(&path, dentry, mode, 0);
+ if (!err) {
+ err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
+ if (!err) {
+ res->mnt = mntget(path.mnt);
+ res->dentry = dget(dentry);
+ }
+ }
+ done_path_create(&path, dentry);
+ return err;
+}
static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
@@ -822,8 +859,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
char *sun_path = sunaddr->sun_path;
- struct dentry *dentry = NULL;
- struct path path;
int err;
unsigned int hash;
struct unix_address *addr;
@@ -860,43 +895,23 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
atomic_set(&addr->refcnt, 1);
if (sun_path[0]) {
- umode_t mode;
- err = 0;
- /*
- * Get the parent directory, calculate the hash for last
- * component.
- */
- dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
- err = PTR_ERR(dentry);
- if (IS_ERR(dentry))
- goto out_mknod_parent;
-
- /*
- * All right, let's create it.
- */
- mode = S_IFSOCK |
+ struct path path;
+ umode_t mode = S_IFSOCK |
(SOCK_INODE(sock)->i_mode & ~current_umask());
- err = mnt_want_write(path.mnt);
- if (err)
- goto out_mknod_dput;
- err = security_path_mknod(&path, dentry, mode, 0);
- if (err)
- goto out_mknod_drop_write;
- err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
-out_mknod_drop_write:
- mnt_drop_write(path.mnt);
- if (err)
- goto out_mknod_dput;
- mutex_unlock(&path.dentry->d_inode->i_mutex);
- dput(path.dentry);
- path.dentry = dentry;
-
+ err = unix_mknod(sun_path, mode, &path);
+ if (err) {
+ if (err == -EEXIST)
+ err = -EADDRINUSE;
+ unix_release_addr(addr);
+ goto out_up;
+ }
addr->hash = UNIX_HASH_SIZE;
- }
-
- spin_lock(&unix_table_lock);
-
- if (!sun_path[0]) {
+ hash = path.dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1);
+ spin_lock(&unix_table_lock);
+ u->path = path;
+ list = &unix_socket_table[hash];
+ } else {
+ spin_lock(&unix_table_lock);
err = -EADDRINUSE;
if (__unix_find_socket_byname(net, sunaddr, addr_len,
sk->sk_type, hash)) {
@@ -905,9 +920,6 @@ out_mknod_drop_write:
}
list = &unix_socket_table[addr->hash];
- } else {
- list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
- u->path = path;
}
err = 0;
@@ -921,16 +933,6 @@ out_up:
mutex_unlock(&u->readlock);
out:
return err;
-
-out_mknod_dput:
- dput(dentry);
- mutex_unlock(&path.dentry->d_inode->i_mutex);
- path_put(&path);
-out_mknod_parent:
- if (err == -EEXIST)
- err = -EADDRINUSE;
- unix_release_addr(addr);
- goto out_up;
}
static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
@@ -2239,47 +2241,54 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
}
#ifdef CONFIG_PROC_FS
-static struct sock *first_unix_socket(int *i)
+
+#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
+
+#define get_bucket(x) ((x) >> BUCKET_SPACE)
+#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
+#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
+
+static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
{
- for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
- if (!hlist_empty(&unix_socket_table[*i]))
- return __sk_head(&unix_socket_table[*i]);
+ unsigned long offset = get_offset(*pos);
+ unsigned long bucket = get_bucket(*pos);
+ struct sock *sk;
+ unsigned long count = 0;
+
+ for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
+ if (sock_net(sk) != seq_file_net(seq))
+ continue;
+ if (++count == offset)
+ break;
}
- return NULL;
+
+ return sk;
}
-static struct sock *next_unix_socket(int *i, struct sock *s)
+static struct sock *unix_next_socket(struct seq_file *seq,
+ struct sock *sk,
+ loff_t *pos)
{
- struct sock *next = sk_next(s);
- /* More in this chain? */
- if (next)
- return next;
- /* Look for next non-empty chain. */
- for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
- if (!hlist_empty(&unix_socket_table[*i]))
- return __sk_head(&unix_socket_table[*i]);
+ unsigned long bucket;
+
+ while (sk > (struct sock *)SEQ_START_TOKEN) {
+ sk = sk_next(sk);
+ if (!sk)
+ goto next_bucket;
+ if (sock_net(sk) == seq_file_net(seq))
+ return sk;
}
- return NULL;
-}
-struct unix_iter_state {
- struct seq_net_private p;
- int i;
-};
+ do {
+ sk = unix_from_bucket(seq, pos);
+ if (sk)
+ return sk;
-static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
-{
- struct unix_iter_state *iter = seq->private;
- loff_t off = 0;
- struct sock *s;
+next_bucket:
+ bucket = get_bucket(*pos) + 1;
+ *pos = set_bucket_offset(bucket, 1);
+ } while (bucket < ARRAY_SIZE(unix_socket_table));
- for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
- if (sock_net(s) != seq_file_net(seq))
- continue;
- if (off == pos)
- return s;
- ++off;
- }
return NULL;
}
@@ -2287,22 +2296,20 @@ static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(unix_table_lock)
{
spin_lock(&unix_table_lock);
- return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+
+ if (!*pos)
+ return SEQ_START_TOKEN;
+
+ if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
+ return NULL;
+
+ return unix_next_socket(seq, NULL, pos);
}
static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct unix_iter_state *iter = seq->private;
- struct sock *sk = v;
++*pos;
-
- if (v == SEQ_START_TOKEN)
- sk = first_unix_socket(&iter->i);
- else
- sk = next_unix_socket(&iter->i, sk);
- while (sk && (sock_net(sk) != seq_file_net(seq)))
- sk = next_unix_socket(&iter->i, sk);
- return sk;
+ return unix_next_socket(seq, v, pos);
}
static void unix_seq_stop(struct seq_file *seq, void *v)
@@ -2365,7 +2372,7 @@ static const struct seq_operations unix_seq_ops = {
static int unix_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &unix_seq_ops,
- sizeof(struct unix_iter_state));
+ sizeof(struct seq_net_private));
}
static const struct file_operations unix_seq_fops = {
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 47d3002737f..750b1340844 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -8,40 +8,31 @@
#include <net/af_unix.h>
#include <net/tcp_states.h>
-#define UNIX_DIAG_PUT(skb, attrtype, attrlen) \
- RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
-
static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
{
struct unix_address *addr = unix_sk(sk)->addr;
- char *s;
-
- if (addr) {
- s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short));
- memcpy(s, addr->name->sun_path, addr->len - sizeof(short));
- }
- return 0;
+ if (!addr)
+ return 0;
-rtattr_failure:
- return -EMSGSIZE;
+ return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short),
+ addr->name->sun_path);
}
static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
{
struct dentry *dentry = unix_sk(sk)->path.dentry;
- struct unix_diag_vfs *uv;
if (dentry) {
- uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv));
- uv->udiag_vfs_ino = dentry->d_inode->i_ino;
- uv->udiag_vfs_dev = dentry->d_sb->s_dev;
+ struct unix_diag_vfs uv = {
+ .udiag_vfs_ino = dentry->d_inode->i_ino,
+ .udiag_vfs_dev = dentry->d_sb->s_dev,
+ };
+
+ return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
}
return 0;
-
-rtattr_failure:
- return -EMSGSIZE;
}
static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
@@ -56,24 +47,28 @@ static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
unix_state_unlock(peer);
sock_put(peer);
- RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino);
+ return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
}
return 0;
-rtattr_failure:
- return -EMSGSIZE;
}
static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
{
struct sk_buff *skb;
+ struct nlattr *attr;
u32 *buf;
int i;
if (sk->sk_state == TCP_LISTEN) {
spin_lock(&sk->sk_receive_queue.lock);
- buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS,
- sk->sk_receive_queue.qlen * sizeof(u32));
+
+ attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
+ sk->sk_receive_queue.qlen * sizeof(u32));
+ if (!attr)
+ goto errout;
+
+ buf = nla_data(attr);
i = 0;
skb_queue_walk(&sk->sk_receive_queue, skb) {
struct sock *req, *peer;
@@ -94,43 +89,38 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
return 0;
-rtattr_failure:
+errout:
spin_unlock(&sk->sk_receive_queue.lock);
return -EMSGSIZE;
}
static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
{
- struct unix_diag_rqlen *rql;
-
- rql = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_RQLEN, sizeof(*rql));
+ struct unix_diag_rqlen rql;
if (sk->sk_state == TCP_LISTEN) {
- rql->udiag_rqueue = sk->sk_receive_queue.qlen;
- rql->udiag_wqueue = sk->sk_max_ack_backlog;
+ rql.udiag_rqueue = sk->sk_receive_queue.qlen;
+ rql.udiag_wqueue = sk->sk_max_ack_backlog;
} else {
- rql->udiag_rqueue = (__u32)unix_inq_len(sk);
- rql->udiag_wqueue = (__u32)unix_outq_len(sk);
+ rql.udiag_rqueue = (u32) unix_inq_len(sk);
+ rql.udiag_wqueue = (u32) unix_outq_len(sk);
}
- return 0;
-
-rtattr_failure:
- return -EMSGSIZE;
+ return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
u32 pid, u32 seq, u32 flags, int sk_ino)
{
- unsigned char *b = skb_tail_pointer(skb);
struct nlmsghdr *nlh;
struct unix_diag_msg *rep;
- nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep));
- nlh->nlmsg_flags = flags;
-
- rep = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
+ flags);
+ if (!nlh)
+ return -EMSGSIZE;
+ rep = nlmsg_data(nlh);
rep->udiag_family = AF_UNIX;
rep->udiag_type = sk->sk_type;
rep->udiag_state = sk->sk_state;
@@ -139,33 +129,32 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
if ((req->udiag_show & UDIAG_SHOW_NAME) &&
sk_diag_dump_name(sk, skb))
- goto nlmsg_failure;
+ goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_VFS) &&
sk_diag_dump_vfs(sk, skb))
- goto nlmsg_failure;
+ goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_PEER) &&
sk_diag_dump_peer(sk, skb))
- goto nlmsg_failure;
+ goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
sk_diag_dump_icons(sk, skb))
- goto nlmsg_failure;
+ goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
sk_diag_show_rqlen(sk, skb))
- goto nlmsg_failure;
+ goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
- goto nlmsg_failure;
+ goto out_nlmsg_trim;
- nlh->nlmsg_len = skb_tail_pointer(skb) - b;
- return skb->len;
+ return nlmsg_end(skb, nlh);
-nlmsg_failure:
- nlmsg_trim(skb, b);
+out_nlmsg_trim:
+ nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
@@ -188,19 +177,24 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct unix_diag_req *req;
int num, s_num, slot, s_slot;
+ struct net *net = sock_net(skb->sk);
- req = NLMSG_DATA(cb->nlh);
+ req = nlmsg_data(cb->nlh);
s_slot = cb->args[0];
num = s_num = cb->args[1];
spin_lock(&unix_table_lock);
- for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) {
+ for (slot = s_slot;
+ slot < ARRAY_SIZE(unix_socket_table);
+ s_num = 0, slot++) {
struct sock *sk;
struct hlist_node *node;
num = 0;
sk_for_each(sk, node, &unix_socket_table[slot]) {
+ if (!net_eq(sock_net(sk), net))
+ continue;
if (num < s_num)
goto next;
if (!(req->udiag_states & (1 << sk->sk_state)))
@@ -228,7 +222,7 @@ static struct sock *unix_lookup_by_ino(int ino)
struct sock *sk;
spin_lock(&unix_table_lock);
- for (i = 0; i <= UNIX_HASH_SIZE; i++) {
+ for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
struct hlist_node *node;
sk_for_each(sk, node, &unix_socket_table[i])
@@ -252,6 +246,7 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
struct sock *sk;
struct sk_buff *rep;
unsigned int extra_len;
+ struct net *net = sock_net(in_skb->sk);
if (req->udiag_ino == 0)
goto out_nosk;
@@ -268,22 +263,21 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
extra_len = 256;
again:
err = -ENOMEM;
- rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)),
- GFP_KERNEL);
+ rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
if (!rep)
goto out;
err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, req->udiag_ino);
if (err < 0) {
- kfree_skb(rep);
+ nlmsg_free(rep);
extra_len += 256;
if (extra_len >= PAGE_SIZE)
goto out;
goto again;
}
- err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+ err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
MSG_DONTWAIT);
if (err > 0)
err = 0;
@@ -297,6 +291,7 @@ out_nosk:
static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
{
int hdrlen = sizeof(struct unix_diag_req);
+ struct net *net = sock_net(skb->sk);
if (nlmsg_len(h) < hdrlen)
return -EINVAL;
@@ -305,9 +300,9 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
struct netlink_dump_control c = {
.dump = unix_diag_dump,
};
- return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
+ return netlink_dump_start(net->diag_nlsk, skb, h, &c);
} else
- return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
+ return unix_diag_get_exact(skb, h, nlmsg_data(h));
}
static const struct sock_diag_handler unix_diag_handler = {
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 788a12c1eb5..2ab785064b7 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -602,36 +602,31 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
* successfully, add it to the interface list.
*/
- if (dev->name == NULL) {
- err = -EINVAL;
- } else {
+#ifdef WANDEBUG
+ printk(KERN_INFO "%s: registering interface %s...\n",
+ wanrouter_modname, dev->name);
+#endif
- #ifdef WANDEBUG
- printk(KERN_INFO "%s: registering interface %s...\n",
- wanrouter_modname, dev->name);
- #endif
-
- err = register_netdev(dev);
- if (!err) {
- struct net_device *slave = NULL;
- unsigned long smp_flags=0;
-
- lock_adapter_irq(&wandev->lock, &smp_flags);
-
- if (wandev->dev == NULL) {
- wandev->dev = dev;
- } else {
- for (slave=wandev->dev;
- DEV_TO_SLAVE(slave);
- slave = DEV_TO_SLAVE(slave))
- DEV_TO_SLAVE(slave) = dev;
- }
- ++wandev->ndev;
-
- unlock_adapter_irq(&wandev->lock, &smp_flags);
- err = 0; /* done !!! */
- goto out;
+ err = register_netdev(dev);
+ if (!err) {
+ struct net_device *slave = NULL;
+ unsigned long smp_flags=0;
+
+ lock_adapter_irq(&wandev->lock, &smp_flags);
+
+ if (wandev->dev == NULL) {
+ wandev->dev = dev;
+ } else {
+ for (slave=wandev->dev;
+ DEV_TO_SLAVE(slave);
+ slave = DEV_TO_SLAVE(slave))
+ DEV_TO_SLAVE(slave) = dev;
}
+ ++wandev->ndev;
+
+ unlock_adapter_irq(&wandev->lock, &smp_flags);
+ err = 0; /* done !!! */
+ goto out;
}
if (wandev->del_if)
wandev->del_if(wandev, dev);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 2e4444fedbe..fe4adb12b3e 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -74,6 +74,27 @@ config CFG80211_REG_DEBUG
If unsure, say N.
+config CFG80211_CERTIFICATION_ONUS
+ bool "cfg80211 certification onus"
+ depends on CFG80211 && EXPERT
+ default n
+ ---help---
+ You should disable this option unless you are both capable
+ and willing to ensure your system will remain regulatory
+ compliant with the features available under this option.
+ Some options may still be under heavy development and
+ for whatever reason regulatory compliance has not or
+ cannot yet be verified. Regulatory verification may at
+ times only be possible until you have the final system
+ in place.
+
+ This option should only be enabled by system integrators
+ or distributions that have done work necessary to ensure
+ regulatory certification on the system with the enabled
+ features. Alternatively you can enable this option if
+ you are a wireless researcher and are working in a controlled
+ and approved environment by your local regulatory agency.
+
config CFG80211_DEFAULT_PS
bool "enable powersave by default"
depends on CFG80211
@@ -114,24 +135,10 @@ config CFG80211_WEXT
bool "cfg80211 wireless extensions compatibility"
depends on CFG80211
select WEXT_CORE
- default y
help
Enable this option if you need old userspace for wireless
extensions with cfg80211-based drivers.
-config WIRELESS_EXT_SYSFS
- bool "Wireless extensions sysfs files"
- depends on WEXT_CORE && SYSFS
- help
- This option enables the deprecated wireless statistics
- files in /sys/class/net/*/wireless/. The same information
- is available via the ioctls as well.
-
- Say N. If you know you have ancient tools requiring it,
- like very old versions of hal (prior to 0.5.12 release),
- say Y and update the tools as soon as possible as this
- option will be removed soon.
-
config LIB80211
tristate "Common routines for IEEE802.11 drivers"
default n
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 55a28ab21db..0f7e0d621ab 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_WEXT_SPY) += wext-spy.o
obj-$(CONFIG_WEXT_PRIV) += wext-priv.o
cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
-cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o
+cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o
cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
new file mode 100644
index 00000000000..fcc60d8dbef
--- /dev/null
+++ b/net/wireless/ap.c
@@ -0,0 +1,46 @@
+#include <linux/ieee80211.h>
+#include <linux/export.h>
+#include <net/cfg80211.h>
+#include "nl80211.h"
+#include "core.h"
+
+
+static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
+ struct net_device *dev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (!rdev->ops->stop_ap)
+ return -EOPNOTSUPP;
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ return -EOPNOTSUPP;
+
+ if (!wdev->beacon_interval)
+ return -ENOENT;
+
+ err = rdev->ops->stop_ap(&rdev->wiphy, dev);
+ if (!err) {
+ wdev->beacon_interval = 0;
+ wdev->channel = NULL;
+ }
+
+ return err;
+}
+
+int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
+ struct net_device *dev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ wdev_lock(wdev);
+ err = __cfg80211_stop_ap(rdev, dev);
+ wdev_unlock(wdev);
+
+ return err;
+}
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 884801ac4dd..d355f67d0cd 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -60,7 +60,7 @@ bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
diff = -20;
break;
default:
- return false;
+ return true;
}
sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff);
@@ -78,60 +78,75 @@ bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
}
EXPORT_SYMBOL(cfg80211_can_beacon_sec_chan);
-int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev, int freq,
- enum nl80211_channel_type channel_type)
+int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
+ int freq, enum nl80211_channel_type chantype)
{
struct ieee80211_channel *chan;
- int result;
-
- if (wdev && wdev->iftype == NL80211_IFTYPE_MONITOR)
- wdev = NULL;
- if (wdev) {
- ASSERT_WDEV_LOCK(wdev);
-
- if (!netif_running(wdev->netdev))
- return -ENETDOWN;
- }
-
- if (!rdev->ops->set_channel)
+ if (!rdev->ops->set_monitor_channel)
return -EOPNOTSUPP;
+ if (!cfg80211_has_monitors_only(rdev))
+ return -EBUSY;
- chan = rdev_freq_to_chan(rdev, freq, channel_type);
+ chan = rdev_freq_to_chan(rdev, freq, chantype);
if (!chan)
return -EINVAL;
- /* Both channels should be able to initiate communication */
- if (wdev && (wdev->iftype == NL80211_IFTYPE_ADHOC ||
- wdev->iftype == NL80211_IFTYPE_AP ||
- wdev->iftype == NL80211_IFTYPE_AP_VLAN ||
- wdev->iftype == NL80211_IFTYPE_MESH_POINT ||
- wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
- switch (channel_type) {
- case NL80211_CHAN_HT40PLUS:
- case NL80211_CHAN_HT40MINUS:
- if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, chan,
- channel_type)) {
- printk(KERN_DEBUG
- "cfg80211: Secondary channel not "
- "allowed to initiate communication\n");
- return -EINVAL;
- }
- break;
- default:
- break;
+ return rdev->ops->set_monitor_channel(&rdev->wiphy, chan, chantype);
+}
+
+void
+cfg80211_get_chan_state(struct wireless_dev *wdev,
+ struct ieee80211_channel **chan,
+ enum cfg80211_chan_mode *chanmode)
+{
+ *chan = NULL;
+ *chanmode = CHAN_MODE_UNDEFINED;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (!netif_running(wdev->netdev))
+ return;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ if (wdev->current_bss) {
+ *chan = wdev->current_bss->pub.channel;
+ *chanmode = wdev->ibss_fixed
+ ? CHAN_MODE_SHARED
+ : CHAN_MODE_EXCLUSIVE;
+ return;
+ }
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (wdev->current_bss) {
+ *chan = wdev->current_bss->pub.channel;
+ *chanmode = CHAN_MODE_SHARED;
+ return;
+ }
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ if (wdev->beacon_interval) {
+ *chan = wdev->channel;
+ *chanmode = CHAN_MODE_SHARED;
}
+ return;
+ case NL80211_IFTYPE_MESH_POINT:
+ if (wdev->mesh_id_len) {
+ *chan = wdev->channel;
+ *chanmode = CHAN_MODE_SHARED;
+ }
+ return;
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ /* these interface types don't really have a channel */
+ return;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NUM_NL80211_IFTYPES:
+ WARN_ON(1);
}
- result = rdev->ops->set_channel(&rdev->wiphy,
- wdev ? wdev->netdev : NULL,
- chan, channel_type);
- if (result)
- return result;
-
- if (wdev)
- wdev->channel = chan;
-
- return 0;
+ return;
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index a87d4355297..31b40cc4a9c 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -96,69 +96,6 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx)
return &rdev->wiphy;
}
-/* requires cfg80211_mutex to be held! */
-struct cfg80211_registered_device *
-__cfg80211_rdev_from_info(struct genl_info *info)
-{
- int ifindex;
- struct cfg80211_registered_device *bywiphyidx = NULL, *byifidx = NULL;
- struct net_device *dev;
- int err = -EINVAL;
-
- assert_cfg80211_lock();
-
- if (info->attrs[NL80211_ATTR_WIPHY]) {
- bywiphyidx = cfg80211_rdev_by_wiphy_idx(
- nla_get_u32(info->attrs[NL80211_ATTR_WIPHY]));
- err = -ENODEV;
- }
-
- if (info->attrs[NL80211_ATTR_IFINDEX]) {
- ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]);
- dev = dev_get_by_index(genl_info_net(info), ifindex);
- if (dev) {
- if (dev->ieee80211_ptr)
- byifidx =
- wiphy_to_dev(dev->ieee80211_ptr->wiphy);
- dev_put(dev);
- }
- err = -ENODEV;
- }
-
- if (bywiphyidx && byifidx) {
- if (bywiphyidx != byifidx)
- return ERR_PTR(-EINVAL);
- else
- return bywiphyidx; /* == byifidx */
- }
- if (bywiphyidx)
- return bywiphyidx;
-
- if (byifidx)
- return byifidx;
-
- return ERR_PTR(err);
-}
-
-struct cfg80211_registered_device *
-cfg80211_get_dev_from_info(struct genl_info *info)
-{
- struct cfg80211_registered_device *rdev;
-
- mutex_lock(&cfg80211_mutex);
- rdev = __cfg80211_rdev_from_info(info);
-
- /* if it is not an error we grab the lock on
- * it to assure it won't be going away while
- * we operate on it */
- if (!IS_ERR(rdev))
- mutex_lock(&rdev->mtx);
-
- mutex_unlock(&cfg80211_mutex);
-
- return rdev;
-}
-
struct cfg80211_registered_device *
cfg80211_get_dev_from_ifindex(struct net *net, int ifindex)
{
@@ -239,7 +176,9 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK))
return -EOPNOTSUPP;
- list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ if (!wdev->netdev)
+ continue;
wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
err = dev_change_net_namespace(wdev->netdev, net, "wlan%d");
if (err)
@@ -251,8 +190,10 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
/* failed -- clean up to old netns */
net = wiphy_net(&rdev->wiphy);
- list_for_each_entry_continue_reverse(wdev, &rdev->netdev_list,
+ list_for_each_entry_continue_reverse(wdev, &rdev->wdev_list,
list) {
+ if (!wdev->netdev)
+ continue;
wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
err = dev_change_net_namespace(wdev->netdev, net,
"wlan%d");
@@ -289,8 +230,9 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
rtnl_lock();
mutex_lock(&rdev->devlist_mtx);
- list_for_each_entry(wdev, &rdev->netdev_list, list)
- dev_close(wdev->netdev);
+ list_for_each_entry(wdev, &rdev->wdev_list, list)
+ if (wdev->netdev)
+ dev_close(wdev->netdev);
mutex_unlock(&rdev->devlist_mtx);
rtnl_unlock();
@@ -367,7 +309,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
mutex_init(&rdev->mtx);
mutex_init(&rdev->devlist_mtx);
mutex_init(&rdev->sched_scan_mtx);
- INIT_LIST_HEAD(&rdev->netdev_list);
+ INIT_LIST_HEAD(&rdev->wdev_list);
spin_lock_init(&rdev->bss_lock);
INIT_LIST_HEAD(&rdev->bss_list);
INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
@@ -436,6 +378,14 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
if (WARN_ON(!c->num_different_channels))
return -EINVAL;
+ /*
+ * Put a sane limit on maximum number of different
+ * channels to simplify channel accounting code.
+ */
+ if (WARN_ON(c->num_different_channels >
+ CFG80211_MAX_NUM_DIFFERENT_CHANNELS))
+ return -EINVAL;
+
if (WARN_ON(!c->n_limits))
return -EINVAL;
@@ -484,9 +434,11 @@ int wiphy_register(struct wiphy *wiphy)
int i;
u16 ifmodes = wiphy->interface_modes;
+#ifdef CONFIG_PM
if (WARN_ON((wiphy->wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
!(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY)))
return -EINVAL;
+#endif
if (WARN_ON(wiphy->ap_sme_capa &&
!(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME)))
@@ -521,8 +473,14 @@ int wiphy_register(struct wiphy *wiphy)
continue;
sband->band = band;
-
- if (WARN_ON(!sband->n_channels || !sband->n_bitrates))
+ if (WARN_ON(!sband->n_channels))
+ return -EINVAL;
+ /*
+ * on 60gHz band, there are no legacy rates, so
+ * n_bitrates is 0
+ */
+ if (WARN_ON(band != IEEE80211_BAND_60GHZ &&
+ !sband->n_bitrates))
return -EINVAL;
/*
@@ -563,12 +521,14 @@ int wiphy_register(struct wiphy *wiphy)
return -EINVAL;
}
+#ifdef CONFIG_PM
if (rdev->wiphy.wowlan.n_patterns) {
if (WARN_ON(!rdev->wiphy.wowlan.pattern_min_len ||
rdev->wiphy.wowlan.pattern_min_len >
rdev->wiphy.wowlan.pattern_max_len))
return -EINVAL;
}
+#endif
/* check and set up bitrates */
ieee80211_set_bitrate_flags(wiphy);
@@ -582,7 +542,7 @@ int wiphy_register(struct wiphy *wiphy)
}
/* set up regulatory info */
- regulatory_update(wiphy, NL80211_REGDOM_SET_BY_CORE);
+ wiphy_regulatory_register(wiphy);
list_add_rcu(&rdev->list, &cfg80211_rdev_list);
cfg80211_rdev_list_generation++;
@@ -667,7 +627,7 @@ void wiphy_unregister(struct wiphy *wiphy)
__count == 0; }));
mutex_lock(&rdev->devlist_mtx);
- BUG_ON(!list_empty(&rdev->netdev_list));
+ BUG_ON(!list_empty(&rdev->wdev_list));
mutex_unlock(&rdev->devlist_mtx);
/*
@@ -692,9 +652,11 @@ void wiphy_unregister(struct wiphy *wiphy)
/* nothing */
cfg80211_unlock_rdev(rdev);
- /* If this device got a regulatory hint tell core its
- * free to listen now to a new shiny device regulatory hint */
- reg_device_remove(wiphy);
+ /*
+ * If this device got a regulatory hint tell core its
+ * free to listen now to a new shiny device regulatory hint
+ */
+ wiphy_regulatory_deregister(wiphy);
cfg80211_rdev_list_generation++;
device_del(&rdev->wiphy.dev);
@@ -748,7 +710,7 @@ static void wdev_cleanup_work(struct work_struct *work)
cfg80211_lock_rdev(rdev);
- if (WARN_ON(rdev->scan_req && rdev->scan_req->dev == wdev->netdev)) {
+ if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
rdev->scan_req->aborted = true;
___cfg80211_scan_done(rdev, true);
}
@@ -776,6 +738,16 @@ static struct device_type wiphy_type = {
.name = "wlan",
};
+void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
+ enum nl80211_iftype iftype, int num)
+{
+ ASSERT_RTNL();
+
+ rdev->num_running_ifaces += num;
+ if (iftype == NL80211_IFTYPE_MONITOR)
+ rdev->num_running_monitor_ifaces += num;
+}
+
static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
unsigned long state,
void *ndev)
@@ -810,7 +782,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
spin_lock_init(&wdev->mgmt_registrations_lock);
mutex_lock(&rdev->devlist_mtx);
- list_add_rcu(&wdev->list, &rdev->netdev_list);
+ wdev->identifier = ++rdev->wdev_id;
+ list_add_rcu(&wdev->list, &rdev->wdev_list);
rdev->devlist_generation++;
/* can only change netns with wiphy */
dev->features |= NETIF_F_NETNS_LOCAL;
@@ -869,12 +842,16 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
case NL80211_IFTYPE_MESH_POINT:
cfg80211_leave_mesh(rdev, dev);
break;
+ case NL80211_IFTYPE_AP:
+ cfg80211_stop_ap(rdev, dev);
+ break;
default:
break;
}
wdev->beacon_interval = 0;
break;
case NETDEV_DOWN:
+ cfg80211_update_iface_num(rdev, wdev->iftype, -1);
dev_hold(dev);
queue_work(cfg80211_wq, &wdev->cleanup_work);
break;
@@ -891,6 +868,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
mutex_unlock(&rdev->devlist_mtx);
dev_put(dev);
}
+ cfg80211_update_iface_num(rdev, wdev->iftype, 1);
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->devlist_mtx);
wdev_lock(wdev);
@@ -980,7 +958,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
return notifier_from_errno(-EOPNOTSUPP);
if (rfkill_blocked(rdev->rfkill))
return notifier_from_errno(-ERFKILL);
+ mutex_lock(&rdev->devlist_mtx);
ret = cfg80211_can_add_interface(rdev, wdev->iftype);
+ mutex_unlock(&rdev->devlist_mtx);
if (ret)
return notifier_from_errno(ret);
break;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 8523f387867..5206c6844fd 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -13,6 +13,7 @@
#include <linux/debugfs.h>
#include <linux/rfkill.h>
#include <linux/workqueue.h>
+#include <linux/rtnetlink.h>
#include <net/genetlink.h>
#include <net/cfg80211.h>
#include "reg.h"
@@ -46,16 +47,20 @@ struct cfg80211_registered_device {
/* wiphy index, internal only */
int wiphy_idx;
- /* associate netdev list */
+ /* associated wireless interfaces */
struct mutex devlist_mtx;
/* protected by devlist_mtx or RCU */
- struct list_head netdev_list;
- int devlist_generation;
+ struct list_head wdev_list;
+ int devlist_generation, wdev_id;
int opencount; /* also protected by devlist_mtx */
wait_queue_head_t dev_wait;
u32 ap_beacons_nlpid;
+ /* protected by RTNL only */
+ int num_running_ifaces;
+ int num_running_monitor_ifaces;
+
/* BSSes/scanning */
spinlock_t bss_lock;
struct list_head bss_list;
@@ -159,32 +164,6 @@ static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss)
struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx);
int get_wiphy_idx(struct wiphy *wiphy);
-struct cfg80211_registered_device *
-__cfg80211_rdev_from_info(struct genl_info *info);
-
-/*
- * This function returns a pointer to the driver
- * that the genl_info item that is passed refers to.
- * If successful, it returns non-NULL and also locks
- * the driver's mutex!
- *
- * This means that you need to call cfg80211_unlock_rdev()
- * before being allowed to acquire &cfg80211_mutex!
- *
- * This is necessary because we need to lock the global
- * mutex to get an item off the list safely, and then
- * we lock the rdev mutex so it doesn't go away under us.
- *
- * We don't want to keep cfg80211_mutex locked
- * for all the time in order to allow requests on
- * other interfaces to go through at the same time.
- *
- * The result of this can be a PTR_ERR and hence must
- * be checked with IS_ERR() for errors.
- */
-extern struct cfg80211_registered_device *
-cfg80211_get_dev_from_info(struct genl_info *info);
-
/* requires cfg80211_rdev_mutex to be held! */
struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx);
@@ -223,6 +202,14 @@ static inline void wdev_unlock(struct wireless_dev *wdev)
#define ASSERT_RDEV_LOCK(rdev) lockdep_assert_held(&(rdev)->mtx)
#define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx)
+static inline bool cfg80211_has_monitors_only(struct cfg80211_registered_device *rdev)
+{
+ ASSERT_RTNL();
+
+ return rdev->num_running_ifaces == rdev->num_running_monitor_ifaces &&
+ rdev->num_running_ifaces > 0;
+}
+
enum cfg80211_event_type {
EVENT_CONNECT_RESULT,
EVENT_ROAMED,
@@ -267,6 +254,12 @@ struct cfg80211_cached_keys {
int def, defmgmt;
};
+enum cfg80211_chan_mode {
+ CHAN_MODE_UNDEFINED,
+ CHAN_MODE_SHARED,
+ CHAN_MODE_EXCLUSIVE,
+};
+
/* free object */
extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
@@ -303,14 +296,21 @@ extern const struct mesh_config default_mesh_config;
extern const struct mesh_setup default_mesh_setup;
int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- const struct mesh_setup *setup,
+ struct mesh_setup *setup,
const struct mesh_config *conf);
int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- const struct mesh_setup *setup,
+ struct mesh_setup *setup,
const struct mesh_config *conf);
int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev);
+int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev, int freq,
+ enum nl80211_channel_type channel_type);
+
+/* AP */
+int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
+ struct net_device *dev);
/* MLME */
int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
@@ -369,7 +369,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid);
void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
+ struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
@@ -427,9 +427,20 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
u32 *flags, struct vif_params *params);
void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
-int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev,
- enum nl80211_iftype iftype);
+int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ enum nl80211_iftype iftype,
+ struct ieee80211_channel *chan,
+ enum cfg80211_chan_mode chanmode);
+
+static inline int
+cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ enum nl80211_iftype iftype)
+{
+ return cfg80211_can_use_iftype_chan(rdev, wdev, iftype, NULL,
+ CHAN_MODE_UNDEFINED);
+}
static inline int
cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
@@ -438,12 +449,26 @@ cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
return cfg80211_can_change_interface(rdev, NULL, iftype);
}
+static inline int
+cfg80211_can_use_chan(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ struct ieee80211_channel *chan,
+ enum cfg80211_chan_mode chanmode)
+{
+ return cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
+ chan, chanmode);
+}
+
+void
+cfg80211_get_chan_state(struct wireless_dev *wdev,
+ struct ieee80211_channel **chan,
+ enum cfg80211_chan_mode *chanmode);
+
struct ieee80211_channel *
rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
int freq, enum nl80211_channel_type channel_type);
-int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev, int freq,
- enum nl80211_channel_type channel_type);
+int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
+ int freq, enum nl80211_channel_type chantype);
int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
const u8 *rates, unsigned int n_rates,
@@ -452,6 +477,11 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
u32 beacon_int);
+void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
+ enum nl80211_iftype iftype, int num);
+
+#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
+
#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
#else
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 89baa332841..ca5672f6ee2 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -113,10 +113,21 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
kfree(wdev->connect_keys);
wdev->connect_keys = connkeys;
+ wdev->ibss_fixed = params->channel_fixed;
#ifdef CONFIG_CFG80211_WEXT
wdev->wext.ibss.channel = params->channel;
#endif
wdev->sme_state = CFG80211_SME_CONNECTING;
+
+ err = cfg80211_can_use_chan(rdev, wdev, params->channel,
+ params->channel_fixed
+ ? CHAN_MODE_SHARED
+ : CHAN_MODE_EXCLUSIVE);
+ if (err) {
+ wdev->connect_keys = NULL;
+ return err;
+ }
+
err = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
if (err) {
wdev->connect_keys = NULL;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 2749cb86b46..c384e77ff77 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -14,6 +14,9 @@
#define MESH_PATH_TIMEOUT 5000
#define MESH_RANN_INTERVAL 5000
+#define MESH_PATH_TO_ROOT_TIMEOUT 6000
+#define MESH_ROOT_INTERVAL 5000
+#define MESH_ROOT_CONFIRMATION_INTERVAL 2000
/*
* Minimum interval between two consecutive PREQs originated by the same
@@ -62,9 +65,15 @@ const struct mesh_config default_mesh_config = {
.dot11MeshForwarding = true,
.rssi_threshold = MESH_RSSI_THRESHOLD,
.ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED,
+ .dot11MeshHWMPactivePathToRootTimeout = MESH_PATH_TO_ROOT_TIMEOUT,
+ .dot11MeshHWMProotInterval = MESH_ROOT_INTERVAL,
+ .dot11MeshHWMPconfirmationInterval = MESH_ROOT_CONFIRMATION_INTERVAL,
};
const struct mesh_setup default_mesh_setup = {
+ /* cfg80211_join_mesh() will pick a channel if needed */
+ .channel = NULL,
+ .channel_type = NL80211_CHAN_NO_HT,
.sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
.path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
.path_metric = IEEE80211_PATH_METRIC_AIRTIME,
@@ -75,7 +84,7 @@ const struct mesh_setup default_mesh_setup = {
int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- const struct mesh_setup *setup,
+ struct mesh_setup *setup,
const struct mesh_config *conf)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -101,10 +110,61 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
if (!rdev->ops->join_mesh)
return -EOPNOTSUPP;
+ if (!setup->channel) {
+ /* if no channel explicitly given, use preset channel */
+ setup->channel = wdev->preset_chan;
+ setup->channel_type = wdev->preset_chantype;
+ }
+
+ if (!setup->channel) {
+ /* if we don't have that either, use the first usable channel */
+ enum ieee80211_band band;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *chan;
+ int i;
+
+ sband = rdev->wiphy.bands[band];
+ if (!sband)
+ continue;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+ if (chan->flags & (IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN |
+ IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_RADAR))
+ continue;
+ setup->channel = chan;
+ break;
+ }
+
+ if (setup->channel)
+ break;
+ }
+
+ /* no usable channel ... */
+ if (!setup->channel)
+ return -EINVAL;
+
+ setup->channel_type = NL80211_CHAN_NO_HT;
+ }
+
+ if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, setup->channel,
+ setup->channel_type))
+ return -EINVAL;
+
+ err = cfg80211_can_use_chan(rdev, wdev, setup->channel,
+ CHAN_MODE_SHARED);
+ if (err)
+ return err;
+
err = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup);
if (!err) {
memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
wdev->mesh_id_len = setup->mesh_id_len;
+ wdev->channel = setup->channel;
}
return err;
@@ -112,19 +172,71 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- const struct mesh_setup *setup,
+ struct mesh_setup *setup,
const struct mesh_config *conf)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
+ mutex_lock(&rdev->devlist_mtx);
wdev_lock(wdev);
err = __cfg80211_join_mesh(rdev, dev, setup, conf);
wdev_unlock(wdev);
+ mutex_unlock(&rdev->devlist_mtx);
return err;
}
+int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev, int freq,
+ enum nl80211_channel_type channel_type)
+{
+ struct ieee80211_channel *channel;
+ int err;
+
+ channel = rdev_freq_to_chan(rdev, freq, channel_type);
+ if (!channel || !cfg80211_can_beacon_sec_chan(&rdev->wiphy,
+ channel,
+ channel_type)) {
+ return -EINVAL;
+ }
+
+ /*
+ * Workaround for libertas (only!), it puts the interface
+ * into mesh mode but doesn't implement join_mesh. Instead,
+ * it is configured via sysfs and then joins the mesh when
+ * you set the channel. Note that the libertas mesh isn't
+ * compatible with 802.11 mesh.
+ */
+ if (rdev->ops->libertas_set_mesh_channel) {
+ if (channel_type != NL80211_CHAN_NO_HT)
+ return -EINVAL;
+
+ if (!netif_running(wdev->netdev))
+ return -ENETDOWN;
+
+ err = cfg80211_can_use_chan(rdev, wdev, channel,
+ CHAN_MODE_SHARED);
+ if (err)
+ return err;
+
+ err = rdev->ops->libertas_set_mesh_channel(&rdev->wiphy,
+ wdev->netdev,
+ channel);
+ if (!err)
+ wdev->channel = channel;
+
+ return err;
+ }
+
+ if (wdev->mesh_id_len)
+ return -EBUSY;
+
+ wdev->preset_chan = channel;
+ wdev->preset_chantype = channel_type;
+ return 0;
+}
+
void cfg80211_notify_new_peer_candidate(struct net_device *dev,
const u8 *macaddr, const u8* ie, u8 ie_len, gfp_t gfp)
{
@@ -156,8 +268,11 @@ static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
return -ENOTCONN;
err = rdev->ops->leave_mesh(&rdev->wiphy, dev);
- if (!err)
+ if (!err) {
wdev->mesh_id_len = 0;
+ wdev->channel = NULL;
+ }
+
return err;
}
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index eb90988bbd3..1cdb1d5e6b0 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -302,8 +302,14 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
if (!req.bss)
return -ENOENT;
+ err = cfg80211_can_use_chan(rdev, wdev, req.bss->channel,
+ CHAN_MODE_SHARED);
+ if (err)
+ goto out;
+
err = rdev->ops->auth(&rdev->wiphy, dev, &req);
+out:
cfg80211_put_bss(req.bss);
return err;
}
@@ -317,11 +323,13 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
{
int err;
+ mutex_lock(&rdev->devlist_mtx);
wdev_lock(dev->ieee80211_ptr);
err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
ssid, ssid_len, ie, ie_len,
key, key_len, key_idx);
wdev_unlock(dev->ieee80211_ptr);
+ mutex_unlock(&rdev->devlist_mtx);
return err;
}
@@ -397,8 +405,14 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
return -ENOENT;
}
+ err = cfg80211_can_use_chan(rdev, wdev, req.bss->channel,
+ CHAN_MODE_SHARED);
+ if (err)
+ goto out;
+
err = rdev->ops->assoc(&rdev->wiphy, dev, &req);
+out:
if (err) {
if (was_connected)
wdev->sme_state = CFG80211_SME_CONNECTED;
@@ -421,11 +435,13 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
+ mutex_lock(&rdev->devlist_mtx);
wdev_lock(wdev);
err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
ssid, ssid_len, ie, ie_len, use_mfp, crypt,
assoc_flags, ht_capa, ht_capa_mask);
wdev_unlock(wdev);
+ mutex_unlock(&rdev->devlist_mtx);
return err;
}
@@ -551,29 +567,28 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
}
}
-void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie,
+void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration, gfp_t gfp)
{
- struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- nl80211_send_remain_on_channel(rdev, dev, cookie, chan, channel_type,
+ nl80211_send_remain_on_channel(rdev, wdev, cookie, chan, channel_type,
duration, gfp);
}
EXPORT_SYMBOL(cfg80211_ready_on_channel);
-void cfg80211_remain_on_channel_expired(struct net_device *dev,
- u64 cookie,
+void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
gfp_t gfp)
{
- struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- nl80211_send_remain_on_channel_cancel(rdev, dev, cookie, chan,
+ nl80211_send_remain_on_channel_cancel(rdev, wdev, cookie, chan,
channel_type, gfp);
}
EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
@@ -662,8 +677,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
list_add(&nreg->list, &wdev->mgmt_registrations);
if (rdev->ops->mgmt_frame_register)
- rdev->ops->mgmt_frame_register(wiphy, wdev->netdev,
- frame_type, true);
+ rdev->ops->mgmt_frame_register(wiphy, wdev, frame_type, true);
out:
spin_unlock_bh(&wdev->mgmt_registrations_lock);
@@ -686,7 +700,7 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
if (rdev->ops->mgmt_frame_register) {
u16 frame_type = le16_to_cpu(reg->frame_type);
- rdev->ops->mgmt_frame_register(wiphy, wdev->netdev,
+ rdev->ops->mgmt_frame_register(wiphy, wdev,
frame_type, false);
}
@@ -715,14 +729,14 @@ void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
}
int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
+ struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, bool no_cck,
bool dont_wait_for_ack, u64 *cookie)
{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct net_device *dev = wdev->netdev;
const struct ieee80211_mgmt *mgmt;
u16 stype;
@@ -809,16 +823,15 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
return -EINVAL;
/* Transmit the Action frame as requested by user space */
- return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan,
+ return rdev->ops->mgmt_tx(&rdev->wiphy, wdev, chan, offchan,
channel_type, channel_type_valid,
wait, buf, len, no_cck, dont_wait_for_ack,
cookie);
}
-bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_mbm,
+bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
const u8 *buf, size_t len, gfp_t gfp)
{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
struct cfg80211_mgmt_registration *reg;
@@ -855,7 +868,7 @@ bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_mbm,
/* found match! */
/* Indicate the received Action frame to user space */
- if (nl80211_send_mgmt(rdev, dev, reg->nlpid,
+ if (nl80211_send_mgmt(rdev, wdev, reg->nlpid,
freq, sig_mbm,
buf, len, gfp))
continue;
@@ -870,15 +883,14 @@ bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_mbm,
}
EXPORT_SYMBOL(cfg80211_rx_mgmt);
-void cfg80211_mgmt_tx_status(struct net_device *dev, u64 cookie,
+void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
const u8 *buf, size_t len, bool ack, gfp_t gfp)
{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
/* Indicate TX status of the Action frame to user space */
- nl80211_send_mgmt_tx_status(rdev, dev, cookie, buf, len, ack, gfp);
+ nl80211_send_mgmt_tx_status(rdev, wdev, cookie, buf, len, ack, gfp);
}
EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
@@ -907,6 +919,19 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev,
}
EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify);
+void cfg80211_cqm_txe_notify(struct net_device *dev,
+ const u8 *peer, u32 num_packets,
+ u32 rate, u32 intvl, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ nl80211_send_cqm_txe_notify(rdev, dev, peer, num_packets,
+ rate, intvl, gfp);
+}
+EXPORT_SYMBOL(cfg80211_cqm_txe_notify);
+
void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
const u8 *replay_ctr, gfp_t gfp)
{
@@ -948,7 +973,6 @@ void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
goto out;
wdev->channel = chan;
-
nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL);
out:
wdev_unlock(wdev);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 206465dc0ca..97026f3b215 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -46,28 +46,175 @@ static struct genl_family nl80211_fam = {
.post_doit = nl80211_post_doit,
};
-/* internal helper: get rdev and dev */
-static int get_rdev_dev_by_ifindex(struct net *netns, struct nlattr **attrs,
- struct cfg80211_registered_device **rdev,
- struct net_device **dev)
+/* returns ERR_PTR values */
+static struct wireless_dev *
+__cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs)
{
- int ifindex;
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *result = NULL;
+ bool have_ifidx = attrs[NL80211_ATTR_IFINDEX];
+ bool have_wdev_id = attrs[NL80211_ATTR_WDEV];
+ u64 wdev_id;
+ int wiphy_idx = -1;
+ int ifidx = -1;
- if (!attrs[NL80211_ATTR_IFINDEX])
- return -EINVAL;
+ assert_cfg80211_lock();
- ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]);
- *dev = dev_get_by_index(netns, ifindex);
- if (!*dev)
- return -ENODEV;
+ if (!have_ifidx && !have_wdev_id)
+ return ERR_PTR(-EINVAL);
- *rdev = cfg80211_get_dev_from_ifindex(netns, ifindex);
- if (IS_ERR(*rdev)) {
- dev_put(*dev);
- return PTR_ERR(*rdev);
+ if (have_ifidx)
+ ifidx = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]);
+ if (have_wdev_id) {
+ wdev_id = nla_get_u64(attrs[NL80211_ATTR_WDEV]);
+ wiphy_idx = wdev_id >> 32;
}
- return 0;
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ struct wireless_dev *wdev;
+
+ if (wiphy_net(&rdev->wiphy) != netns)
+ continue;
+
+ if (have_wdev_id && rdev->wiphy_idx != wiphy_idx)
+ continue;
+
+ mutex_lock(&rdev->devlist_mtx);
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ if (have_ifidx && wdev->netdev &&
+ wdev->netdev->ifindex == ifidx) {
+ result = wdev;
+ break;
+ }
+ if (have_wdev_id && wdev->identifier == (u32)wdev_id) {
+ result = wdev;
+ break;
+ }
+ }
+ mutex_unlock(&rdev->devlist_mtx);
+
+ if (result)
+ break;
+ }
+
+ if (result)
+ return result;
+ return ERR_PTR(-ENODEV);
+}
+
+static struct cfg80211_registered_device *
+__cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
+{
+ struct cfg80211_registered_device *rdev = NULL, *tmp;
+ struct net_device *netdev;
+
+ assert_cfg80211_lock();
+
+ if (!attrs[NL80211_ATTR_WIPHY] &&
+ !attrs[NL80211_ATTR_IFINDEX] &&
+ !attrs[NL80211_ATTR_WDEV])
+ return ERR_PTR(-EINVAL);
+
+ if (attrs[NL80211_ATTR_WIPHY])
+ rdev = cfg80211_rdev_by_wiphy_idx(
+ nla_get_u32(attrs[NL80211_ATTR_WIPHY]));
+
+ if (attrs[NL80211_ATTR_WDEV]) {
+ u64 wdev_id = nla_get_u64(attrs[NL80211_ATTR_WDEV]);
+ struct wireless_dev *wdev;
+ bool found = false;
+
+ tmp = cfg80211_rdev_by_wiphy_idx(wdev_id >> 32);
+ if (tmp) {
+ /* make sure wdev exists */
+ mutex_lock(&tmp->devlist_mtx);
+ list_for_each_entry(wdev, &tmp->wdev_list, list) {
+ if (wdev->identifier != (u32)wdev_id)
+ continue;
+ found = true;
+ break;
+ }
+ mutex_unlock(&tmp->devlist_mtx);
+
+ if (!found)
+ tmp = NULL;
+
+ if (rdev && tmp != rdev)
+ return ERR_PTR(-EINVAL);
+ rdev = tmp;
+ }
+ }
+
+ if (attrs[NL80211_ATTR_IFINDEX]) {
+ int ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]);
+ netdev = dev_get_by_index(netns, ifindex);
+ if (netdev) {
+ if (netdev->ieee80211_ptr)
+ tmp = wiphy_to_dev(
+ netdev->ieee80211_ptr->wiphy);
+ else
+ tmp = NULL;
+
+ dev_put(netdev);
+
+ /* not wireless device -- return error */
+ if (!tmp)
+ return ERR_PTR(-EINVAL);
+
+ /* mismatch -- return error */
+ if (rdev && tmp != rdev)
+ return ERR_PTR(-EINVAL);
+
+ rdev = tmp;
+ }
+ }
+
+ if (!rdev)
+ return ERR_PTR(-ENODEV);
+
+ if (netns != wiphy_net(&rdev->wiphy))
+ return ERR_PTR(-ENODEV);
+
+ return rdev;
+}
+
+/*
+ * This function returns a pointer to the driver
+ * that the genl_info item that is passed refers to.
+ * If successful, it returns non-NULL and also locks
+ * the driver's mutex!
+ *
+ * This means that you need to call cfg80211_unlock_rdev()
+ * before being allowed to acquire &cfg80211_mutex!
+ *
+ * This is necessary because we need to lock the global
+ * mutex to get an item off the list safely, and then
+ * we lock the rdev mutex so it doesn't go away under us.
+ *
+ * We don't want to keep cfg80211_mutex locked
+ * for all the time in order to allow requests on
+ * other interfaces to go through at the same time.
+ *
+ * The result of this can be a PTR_ERR and hence must
+ * be checked with IS_ERR() for errors.
+ */
+static struct cfg80211_registered_device *
+cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+
+ mutex_lock(&cfg80211_mutex);
+ rdev = __cfg80211_rdev_from_attrs(netns, info->attrs);
+
+ /* if it is not an error we grab the lock on
+ * it to assure it won't be going away while
+ * we operate on it */
+ if (!IS_ERR(rdev))
+ mutex_lock(&rdev->mtx);
+
+ mutex_unlock(&cfg80211_mutex);
+
+ return rdev;
}
/* policy for the attributes */
@@ -115,7 +262,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 },
[NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
[NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
- .len = IEEE80211_MAX_MESH_ID_LEN },
+ .len = IEEE80211_MAX_MESH_ID_LEN },
[NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
[NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
@@ -206,6 +353,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 },
[NL80211_ATTR_INACTIVITY_TIMEOUT] = { .type = NLA_U16 },
[NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 },
+ [NL80211_ATTR_WDEV] = { .type = NLA_U64 },
+ [NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 },
};
/* policy for the key attributes */
@@ -250,8 +399,9 @@ nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
static const struct nla_policy
nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
- [NL80211_ATTR_SCHED_SCAN_MATCH_SSID] = { .type = NLA_BINARY,
+ [NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_SSID_LEN },
+ [NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 },
};
/* ifidx get helper */
@@ -832,6 +982,15 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
dev->wiphy.bands[band]->ht_cap.ampdu_density)))
goto nla_put_failure;
+ /* add VHT info */
+ if (dev->wiphy.bands[band]->vht_cap.vht_supported &&
+ (nla_put(msg, NL80211_BAND_ATTR_VHT_MCS_SET,
+ sizeof(dev->wiphy.bands[band]->vht_cap.vht_mcs),
+ &dev->wiphy.bands[band]->vht_cap.vht_mcs) ||
+ nla_put_u32(msg, NL80211_BAND_ATTR_VHT_CAPA,
+ dev->wiphy.bands[band]->vht_cap.cap)))
+ goto nla_put_failure;
+
/* add frequencies */
nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS);
if (!nl_freqs)
@@ -921,7 +1080,12 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
goto nla_put_failure;
}
- CMD(set_channel, SET_CHANNEL);
+ if (dev->ops->set_monitor_channel || dev->ops->start_ap ||
+ dev->ops->join_mesh) {
+ i++;
+ if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL))
+ goto nla_put_failure;
+ }
CMD(set_wds_peer, SET_WDS_PEER);
if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
CMD(tdls_mgmt, TDLS_MGMT);
@@ -1018,6 +1182,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
nla_nest_end(msg, nl_ifs);
}
+#ifdef CONFIG_PM
if (dev->wiphy.wowlan.flags || dev->wiphy.wowlan.n_patterns) {
struct nlattr *nl_wowlan;
@@ -1058,6 +1223,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
nla_nest_end(msg, nl_wowlan);
}
+#endif
if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
dev->wiphy.software_iftypes))
@@ -1162,18 +1328,22 @@ static int parse_txq_params(struct nlattr *tb[],
static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
{
/*
- * You can only set the channel explicitly for AP, mesh
- * and WDS type interfaces; all others have their channel
- * managed via their respective "establish a connection"
- * command (connect, join, ...)
+ * You can only set the channel explicitly for WDS interfaces,
+ * all others have their channel managed via their respective
+ * "establish a connection" command (connect, join, ...)
+ *
+ * For AP/GO and mesh mode, the channel can be set with the
+ * channel userspace API, but is only stored and passed to the
+ * low-level driver when the AP starts or the mesh is joined.
+ * This is for backward compatibility, userspace can also give
+ * the channel in the start-ap or join-mesh commands instead.
*
* Monitors are special as they are normally slaved to
- * whatever else is going on, so they behave as though
- * you tried setting the wiphy channel itself.
+ * whatever else is going on, so they have their own special
+ * operation to set the monitor channel if possible.
*/
return !wdev ||
wdev->iftype == NL80211_IFTYPE_AP ||
- wdev->iftype == NL80211_IFTYPE_WDS ||
wdev->iftype == NL80211_IFTYPE_MESH_POINT ||
wdev->iftype == NL80211_IFTYPE_MONITOR ||
wdev->iftype == NL80211_IFTYPE_P2P_GO;
@@ -1204,9 +1374,14 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev,
struct genl_info *info)
{
+ struct ieee80211_channel *channel;
enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
u32 freq;
int result;
+ enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR;
+
+ if (wdev)
+ iftype = wdev->iftype;
if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
return -EINVAL;
@@ -1221,12 +1396,32 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
mutex_lock(&rdev->devlist_mtx);
- if (wdev) {
- wdev_lock(wdev);
- result = cfg80211_set_freq(rdev, wdev, freq, channel_type);
- wdev_unlock(wdev);
- } else {
- result = cfg80211_set_freq(rdev, NULL, freq, channel_type);
+ switch (iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ if (wdev->beacon_interval) {
+ result = -EBUSY;
+ break;
+ }
+ channel = rdev_freq_to_chan(rdev, freq, channel_type);
+ if (!channel || !cfg80211_can_beacon_sec_chan(&rdev->wiphy,
+ channel,
+ channel_type)) {
+ result = -EINVAL;
+ break;
+ }
+ wdev->preset_chan = channel;
+ wdev->preset_chantype = channel_type;
+ result = 0;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ result = cfg80211_set_mesh_freq(rdev, wdev, freq, channel_type);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ result = cfg80211_set_monitor_channel(rdev, freq, channel_type);
+ break;
+ default:
+ result = -EINVAL;
}
mutex_unlock(&rdev->devlist_mtx);
@@ -1300,7 +1495,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
}
if (!netdev) {
- rdev = __cfg80211_rdev_from_info(info);
+ rdev = __cfg80211_rdev_from_attrs(genl_info_net(info),
+ info->attrs);
if (IS_ERR(rdev)) {
mutex_unlock(&cfg80211_mutex);
return PTR_ERR(rdev);
@@ -1310,8 +1506,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
result = 0;
mutex_lock(&rdev->mtx);
- } else if (netif_running(netdev) &&
- nl80211_can_set_dev_channel(netdev->ieee80211_ptr))
+ } else if (nl80211_can_set_dev_channel(netdev->ieee80211_ptr))
wdev = netdev->ieee80211_ptr;
else
wdev = NULL;
@@ -1534,22 +1729,32 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
return result;
}
+static inline u64 wdev_id(struct wireless_dev *wdev)
+{
+ return (u64)wdev->identifier |
+ ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32);
+}
static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
struct cfg80211_registered_device *rdev,
- struct net_device *dev)
+ struct wireless_dev *wdev)
{
+ struct net_device *dev = wdev->netdev;
void *hdr;
hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_INTERFACE);
if (!hdr)
return -1;
- if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
- nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
- nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
- nla_put_u32(msg, NL80211_ATTR_IFTYPE,
- dev->ieee80211_ptr->iftype) ||
+ if (dev &&
+ (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+ nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dev->dev_addr)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) ||
+ nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
nla_put_u32(msg, NL80211_ATTR_GENERATION,
rdev->devlist_generation ^
(cfg80211_rdev_list_generation << 2)))
@@ -1559,12 +1764,13 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
struct ieee80211_channel *chan;
enum nl80211_channel_type channel_type;
- chan = rdev->ops->get_channel(&rdev->wiphy, &channel_type);
+ chan = rdev->ops->get_channel(&rdev->wiphy, wdev,
+ &channel_type);
if (chan &&
(nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ,
- chan->center_freq) ||
+ chan->center_freq) ||
nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
- channel_type)))
+ channel_type)))
goto nla_put_failure;
}
@@ -1595,14 +1801,14 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
if_idx = 0;
mutex_lock(&rdev->devlist_mtx);
- list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
if (if_idx < if_start) {
if_idx++;
continue;
}
if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- rdev, wdev->netdev) < 0) {
+ rdev, wdev) < 0) {
mutex_unlock(&rdev->devlist_mtx);
goto out;
}
@@ -1625,14 +1831,14 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
struct cfg80211_registered_device *dev = info->user_ptr[0];
- struct net_device *netdev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
- dev, netdev) < 0) {
+ dev, wdev) < 0) {
nlmsg_free(msg);
return -ENOBUFS;
}
@@ -1772,7 +1978,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct vif_params params;
- struct net_device *dev;
+ struct wireless_dev *wdev;
+ struct sk_buff *msg;
int err;
enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
u32 flags;
@@ -1799,19 +2006,23 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
return err;
}
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
&flags);
- dev = rdev->ops->add_virtual_intf(&rdev->wiphy,
+ wdev = rdev->ops->add_virtual_intf(&rdev->wiphy,
nla_data(info->attrs[NL80211_ATTR_IFNAME]),
type, err ? NULL : &flags, &params);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ if (IS_ERR(wdev)) {
+ nlmsg_free(msg);
+ return PTR_ERR(wdev);
+ }
if (type == NL80211_IFTYPE_MESH_POINT &&
info->attrs[NL80211_ATTR_MESH_ID]) {
- struct wireless_dev *wdev = dev->ieee80211_ptr;
-
wdev_lock(wdev);
BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
IEEE80211_MAX_MESH_ID_LEN);
@@ -1822,18 +2033,34 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
wdev_unlock(wdev);
}
- return 0;
+ if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
+ rdev, wdev) < 0) {
+ nlmsg_free(msg);
+ return -ENOBUFS;
+ }
+
+ return genlmsg_reply(msg, info);
}
static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
if (!rdev->ops->del_virtual_intf)
return -EOPNOTSUPP;
- return rdev->ops->del_virtual_intf(&rdev->wiphy, dev);
+ /*
+ * If we remove a wireless device without a netdev then clear
+ * user_ptr[1] so that nl80211_post_doit won't dereference it
+ * to check if it needs to do dev_put(). Otherwise it crashes
+ * since the wdev has been freed, unlike with a netdev where
+ * we need the dev_put() for the netdev to really be freed.
+ */
+ if (!wdev->netdev)
+ info->user_ptr[1] = NULL;
+
+ return rdev->ops->del_virtual_intf(&rdev->wiphy, wdev);
}
static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info)
@@ -2213,6 +2440,33 @@ static int nl80211_parse_beacon(struct genl_info *info,
return 0;
}
+static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
+ struct cfg80211_ap_settings *params)
+{
+ struct wireless_dev *wdev;
+ bool ret = false;
+
+ mutex_lock(&rdev->devlist_mtx);
+
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ if (wdev->iftype != NL80211_IFTYPE_AP &&
+ wdev->iftype != NL80211_IFTYPE_P2P_GO)
+ continue;
+
+ if (!wdev->preset_chan)
+ continue;
+
+ params->channel = wdev->preset_chan;
+ params->channel_type = wdev->preset_chantype;
+ ret = true;
+ break;
+ }
+
+ mutex_unlock(&rdev->devlist_mtx);
+
+ return ret;
+}
+
static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -2299,9 +2553,44 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]);
}
+ if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
+ enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
+
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
+ !nl80211_valid_channel_type(info, &channel_type))
+ return -EINVAL;
+
+ params.channel = rdev_freq_to_chan(rdev,
+ nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
+ channel_type);
+ if (!params.channel)
+ return -EINVAL;
+ params.channel_type = channel_type;
+ } else if (wdev->preset_chan) {
+ params.channel = wdev->preset_chan;
+ params.channel_type = wdev->preset_chantype;
+ } else if (!nl80211_get_ap_channel(rdev, &params))
+ return -EINVAL;
+
+ if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, params.channel,
+ params.channel_type))
+ return -EINVAL;
+
+ mutex_lock(&rdev->devlist_mtx);
+ err = cfg80211_can_use_chan(rdev, wdev, params.channel,
+ CHAN_MODE_SHARED);
+ mutex_unlock(&rdev->devlist_mtx);
+
+ if (err)
+ return err;
+
err = rdev->ops->start_ap(&rdev->wiphy, dev, &params);
- if (!err)
+ if (!err) {
+ wdev->preset_chan = params.channel;
+ wdev->preset_chantype = params.channel_type;
wdev->beacon_interval = params.beacon_interval;
+ wdev->channel = params.channel;
+ }
return err;
}
@@ -2334,23 +2623,8 @@ static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
- struct wireless_dev *wdev = dev->ieee80211_ptr;
- int err;
- if (!rdev->ops->stop_ap)
- return -EOPNOTSUPP;
-
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
- return -EOPNOTSUPP;
-
- if (!wdev->beacon_interval)
- return -ENOENT;
-
- err = rdev->ops->stop_ap(&rdev->wiphy, dev);
- if (!err)
- wdev->beacon_interval = 0;
- return err;
+ return cfg80211_stop_ap(rdev, dev);
}
static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
@@ -2442,7 +2716,8 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
int attr)
{
struct nlattr *rate;
- u16 bitrate;
+ u32 bitrate;
+ u16 bitrate_compat;
rate = nla_nest_start(msg, attr);
if (!rate)
@@ -2450,8 +2725,12 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
/* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
bitrate = cfg80211_calculate_bitrate(info);
+ /* report 16-bit bitrate only if we can */
+ bitrate_compat = bitrate < (1UL << 16) ? bitrate : 0;
if ((bitrate > 0 &&
- nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate)) ||
+ nla_put_u32(msg, NL80211_RATE_INFO_BITRATE32, bitrate)) ||
+ (bitrate_compat > 0 &&
+ nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate_compat)) ||
((info->flags & RATE_INFO_FLAGS_MCS) &&
nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) ||
((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) &&
@@ -3304,6 +3583,7 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
{
int r;
char *data = NULL;
+ enum nl80211_user_reg_hint_type user_reg_hint_type;
/*
* You should only get this when cfg80211 hasn't yet initialized
@@ -3323,7 +3603,21 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
- r = regulatory_hint_user(data);
+ if (info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE])
+ user_reg_hint_type =
+ nla_get_u32(info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]);
+ else
+ user_reg_hint_type = NL80211_USER_REG_HINT_USER;
+
+ switch (user_reg_hint_type) {
+ case NL80211_USER_REG_HINT_USER:
+ case NL80211_USER_REG_HINT_CELL_BASE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ r = regulatory_hint_user(data, user_reg_hint_type);
return r;
}
@@ -3413,7 +3707,13 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
cur_params.rssi_threshold) ||
nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE,
- cur_params.ht_opmode))
+ cur_params.ht_opmode) ||
+ nla_put_u32(msg, NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
+ cur_params.dot11MeshHWMPactivePathToRootTimeout) ||
+ nla_put_u16(msg, NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
+ cur_params.dot11MeshHWMProotInterval) ||
+ nla_put_u16(msg, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
+ cur_params.dot11MeshHWMPconfirmationInterval))
goto nla_put_failure;
nla_nest_end(msg, pinfoattr);
genlmsg_end(msg, hdr);
@@ -3436,7 +3736,6 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
[NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 },
[NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 },
[NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 },
-
[NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 },
[NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 },
[NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 },
@@ -3448,8 +3747,11 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
[NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
[NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
[NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 },
- [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32},
- [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16},
+ [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32 },
+ [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16 },
+ [NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT] = { .type = NLA_U32 },
+ [NL80211_MESHCONF_HWMP_ROOT_INTERVAL] = { .type = NLA_U16 },
+ [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = { .type = NLA_U16 },
};
static const struct nla_policy
@@ -3459,7 +3761,7 @@ static const struct nla_policy
[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
[NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
[NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY,
- .len = IEEE80211_MAX_DATA_LEN },
+ .len = IEEE80211_MAX_DATA_LEN },
[NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG },
};
@@ -3492,63 +3794,82 @@ do {\
/* Fill in the params struct */
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout,
- mask, NL80211_MESHCONF_RETRY_TIMEOUT, nla_get_u16);
+ mask, NL80211_MESHCONF_RETRY_TIMEOUT,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout,
- mask, NL80211_MESHCONF_CONFIRM_TIMEOUT, nla_get_u16);
+ mask, NL80211_MESHCONF_CONFIRM_TIMEOUT,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout,
- mask, NL80211_MESHCONF_HOLDING_TIMEOUT, nla_get_u16);
+ mask, NL80211_MESHCONF_HOLDING_TIMEOUT,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks,
- mask, NL80211_MESHCONF_MAX_PEER_LINKS, nla_get_u16);
+ mask, NL80211_MESHCONF_MAX_PEER_LINKS,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries,
- mask, NL80211_MESHCONF_MAX_RETRIES, nla_get_u8);
+ mask, NL80211_MESHCONF_MAX_RETRIES,
+ nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL,
- mask, NL80211_MESHCONF_TTL, nla_get_u8);
+ mask, NL80211_MESHCONF_TTL, nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl,
- mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8);
+ mask, NL80211_MESHCONF_ELEMENT_TTL,
+ nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks,
- mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor,
- mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
- nla_get_u32);
+ mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
+ nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, mask,
+ NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
+ nla_get_u32);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries,
- mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
- nla_get_u8);
+ mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
+ nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time,
- mask, NL80211_MESHCONF_PATH_REFRESH_TIME, nla_get_u32);
+ mask, NL80211_MESHCONF_PATH_REFRESH_TIME,
+ nla_get_u32);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout,
- mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
- nla_get_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout,
- mask, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
- nla_get_u32);
+ mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, mask,
+ NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
+ nla_get_u32);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval,
- mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
- nla_get_u16);
+ mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval,
- mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
- nla_get_u16);
+ mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
- dot11MeshHWMPnetDiameterTraversalTime,
- mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
- nla_get_u16);
+ dot11MeshHWMPnetDiameterTraversalTime, mask,
+ NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, mask,
+ NL80211_MESHCONF_HWMP_ROOTMODE, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, mask,
+ NL80211_MESHCONF_HWMP_RANN_INTERVAL,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
- dot11MeshHWMPRootMode, mask,
- NL80211_MESHCONF_HWMP_ROOTMODE,
- nla_get_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
- dot11MeshHWMPRannInterval, mask,
- NL80211_MESHCONF_HWMP_RANN_INTERVAL,
- nla_get_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
- dot11MeshGateAnnouncementProtocol, mask,
- NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
- nla_get_u8);
+ dot11MeshGateAnnouncementProtocol, mask,
+ NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
+ nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding,
- mask, NL80211_MESHCONF_FORWARDING, nla_get_u8);
+ mask, NL80211_MESHCONF_FORWARDING,
+ nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold,
- mask, NL80211_MESHCONF_RSSI_THRESHOLD, nla_get_u32);
+ mask, NL80211_MESHCONF_RSSI_THRESHOLD,
+ nla_get_u32);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode,
- mask, NL80211_MESHCONF_HT_OPMODE, nla_get_u16);
+ mask, NL80211_MESHCONF_HT_OPMODE,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
+ mask,
+ NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
+ nla_get_u32);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval,
+ mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
+ dot11MeshHWMPconfirmationInterval, mask,
+ NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
+ nla_get_u16);
if (mask_out)
*mask_out = mask;
@@ -3666,6 +3987,11 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
cfg80211_regdomain->dfs_region)))
goto nla_put_failure;
+ if (reg_last_request_cell_base() &&
+ nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE,
+ NL80211_USER_REG_HINT_CELL_BASE))
+ goto nla_put_failure;
+
nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
if (!nl_reg_rules)
goto nla_put_failure;
@@ -3831,7 +4157,7 @@ static int validate_scan_freqs(struct nlattr *freqs)
static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
struct cfg80211_scan_request *request;
struct nlattr *attr;
struct wiphy *wiphy;
@@ -3991,15 +4317,16 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
request->no_cck =
nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
- request->dev = dev;
+ request->wdev = wdev;
request->wiphy = &rdev->wiphy;
rdev->scan_req = request;
- err = rdev->ops->scan(&rdev->wiphy, dev, request);
+ err = rdev->ops->scan(&rdev->wiphy, request);
if (!err) {
- nl80211_send_scan_start(rdev, dev);
- dev_hold(dev);
+ nl80211_send_scan_start(rdev, wdev);
+ if (wdev->netdev)
+ dev_hold(wdev->netdev);
} else {
out_free:
rdev->scan_req = NULL;
@@ -4185,12 +4512,12 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
nla_for_each_nested(attr,
info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
tmp) {
- struct nlattr *ssid;
+ struct nlattr *ssid, *rssi;
nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
nla_data(attr), nla_len(attr),
nl80211_match_policy);
- ssid = tb[NL80211_ATTR_SCHED_SCAN_MATCH_SSID];
+ ssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID];
if (ssid) {
if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
@@ -4201,6 +4528,12 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
request->match_sets[i].ssid.ssid_len =
nla_len(ssid);
}
+ rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
+ if (rssi)
+ request->rssi_thold = nla_get_u32(rssi);
+ else
+ request->rssi_thold =
+ NL80211_SCAN_RSSI_THOLD_OFF;
i++;
}
}
@@ -5058,21 +5391,18 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
nl80211_policy);
if (err)
return err;
- if (nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]) {
- phy_idx = nla_get_u32(
- nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]);
- } else {
- struct net_device *netdev;
- err = get_rdev_dev_by_ifindex(sock_net(skb->sk),
- nl80211_fam.attrbuf,
- &rdev, &netdev);
- if (err)
- return err;
- dev_put(netdev);
- phy_idx = rdev->wiphy_idx;
- cfg80211_unlock_rdev(rdev);
+ mutex_lock(&cfg80211_mutex);
+ rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk),
+ nl80211_fam.attrbuf);
+ if (IS_ERR(rdev)) {
+ mutex_unlock(&cfg80211_mutex);
+ return PTR_ERR(rdev);
}
+ phy_idx = rdev->wiphy_idx;
+ rdev = NULL;
+ mutex_unlock(&cfg80211_mutex);
+
if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA])
cb->args[1] =
(long)nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA];
@@ -5474,7 +5804,7 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
struct ieee80211_channel *chan;
struct sk_buff *msg;
void *hdr;
@@ -5489,18 +5819,18 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
+ if (!rdev->ops->remain_on_channel ||
+ !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
+ return -EOPNOTSUPP;
+
/*
- * We should be on that channel for at least one jiffie,
- * and more than 5 seconds seems excessive.
+ * We should be on that channel for at least a minimum amount of
+ * time (10ms) but no longer than the driver supports.
*/
- if (!duration || !msecs_to_jiffies(duration) ||
+ if (duration < NL80211_MIN_REMAIN_ON_CHANNEL_TIME ||
duration > rdev->wiphy.max_remain_on_channel_duration)
return -EINVAL;
- if (!rdev->ops->remain_on_channel ||
- !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
- return -EOPNOTSUPP;
-
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
!nl80211_valid_channel_type(info, &channel_type))
return -EINVAL;
@@ -5522,7 +5852,7 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
goto free_msg;
}
- err = rdev->ops->remain_on_channel(&rdev->wiphy, dev, chan,
+ err = rdev->ops->remain_on_channel(&rdev->wiphy, wdev, chan,
channel_type, duration, &cookie);
if (err)
@@ -5546,7 +5876,7 @@ static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
u64 cookie;
if (!info->attrs[NL80211_ATTR_COOKIE])
@@ -5557,7 +5887,7 @@ static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
- return rdev->ops->cancel_remain_on_channel(&rdev->wiphy, dev, cookie);
+ return rdev->ops->cancel_remain_on_channel(&rdev->wiphy, wdev, cookie);
}
static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
@@ -5706,7 +6036,7 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
u16 frame_type = IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION;
if (!info->attrs[NL80211_ATTR_FRAME_MATCH])
@@ -5715,21 +6045,24 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_FRAME_TYPE])
frame_type = nla_get_u16(info->attrs[NL80211_ATTR_FRAME_TYPE]);
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_P2P_GO:
+ break;
+ default:
return -EOPNOTSUPP;
+ }
/* not much point in registering if we can't reply */
if (!rdev->ops->mgmt_tx)
return -EOPNOTSUPP;
- return cfg80211_mlme_register_mgmt(dev->ieee80211_ptr, info->snd_pid,
- frame_type,
+ return cfg80211_mlme_register_mgmt(wdev, info->snd_pid, frame_type,
nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]),
nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]));
}
@@ -5737,7 +6070,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
struct ieee80211_channel *chan;
enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
bool channel_type_valid = false;
@@ -5758,19 +6091,32 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->mgmt_tx)
return -EOPNOTSUPP;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_P2P_GO:
+ break;
+ default:
return -EOPNOTSUPP;
+ }
if (info->attrs[NL80211_ATTR_DURATION]) {
if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
return -EINVAL;
wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
+
+ /*
+ * We should wait on the channel for at least a minimum amount
+ * of time (10ms) but no longer than the driver supports.
+ */
+ if (wait < NL80211_MIN_REMAIN_ON_CHANNEL_TIME ||
+ wait > rdev->wiphy.max_remain_on_channel_duration)
+ return -EINVAL;
+
}
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
@@ -5805,7 +6151,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
}
}
- err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, offchan, channel_type,
+ err = cfg80211_mlme_mgmt_tx(rdev, wdev, chan, offchan, channel_type,
channel_type_valid, wait,
nla_data(info->attrs[NL80211_ATTR_FRAME]),
nla_len(info->attrs[NL80211_ATTR_FRAME]),
@@ -5833,7 +6179,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
u64 cookie;
if (!info->attrs[NL80211_ATTR_COOKIE])
@@ -5842,17 +6188,21 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in
if (!rdev->ops->mgmt_tx_cancel_wait)
return -EOPNOTSUPP;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_P2P_GO:
+ break;
+ default:
return -EOPNOTSUPP;
+ }
cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
- return rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, dev, cookie);
+ return rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie);
}
static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
@@ -5938,8 +6288,35 @@ nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] __read_mostly = {
[NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 },
[NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 },
[NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 },
+ [NL80211_ATTR_CQM_TXE_RATE] = { .type = NLA_U32 },
+ [NL80211_ATTR_CQM_TXE_PKTS] = { .type = NLA_U32 },
+ [NL80211_ATTR_CQM_TXE_INTVL] = { .type = NLA_U32 },
};
+static int nl80211_set_cqm_txe(struct genl_info *info,
+ u32 rate, u32 pkts, u32 intvl)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev;
+ struct net_device *dev = info->user_ptr[1];
+
+ if ((rate < 0 || rate > 100) ||
+ (intvl < 0 || intvl > NL80211_CQM_TXE_MAX_INTVL))
+ return -EINVAL;
+
+ wdev = dev->ieee80211_ptr;
+
+ if (!rdev->ops->set_cqm_txe_config)
+ return -EOPNOTSUPP;
+
+ if (wdev->iftype != NL80211_IFTYPE_STATION &&
+ wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ return -EOPNOTSUPP;
+
+ return rdev->ops->set_cqm_txe_config(wdev->wiphy, dev,
+ rate, pkts, intvl);
+}
+
static int nl80211_set_cqm_rssi(struct genl_info *info,
s32 threshold, u32 hysteresis)
{
@@ -5987,6 +6364,14 @@ static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
err = nl80211_set_cqm_rssi(info, threshold, hysteresis);
+ } else if (attrs[NL80211_ATTR_CQM_TXE_RATE] &&
+ attrs[NL80211_ATTR_CQM_TXE_PKTS] &&
+ attrs[NL80211_ATTR_CQM_TXE_INTVL]) {
+ u32 rate, pkts, intvl;
+ rate = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_RATE]);
+ pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]);
+ intvl = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_INTVL]);
+ err = nl80211_set_cqm_txe(info, rate, pkts, intvl);
} else
err = -EINVAL;
@@ -6032,6 +6417,24 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
return err;
}
+ if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
+ enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
+
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
+ !nl80211_valid_channel_type(info, &channel_type))
+ return -EINVAL;
+
+ setup.channel = rdev_freq_to_chan(rdev,
+ nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
+ channel_type);
+ if (!setup.channel)
+ return -EINVAL;
+ setup.channel_type = channel_type;
+ } else {
+ /* cfg80211_join_mesh() will sort it out */
+ setup.channel = NULL;
+ }
+
return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
}
@@ -6043,6 +6446,7 @@ static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info)
return cfg80211_leave_mesh(rdev, dev);
}
+#ifdef CONFIG_PM
static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -6124,8 +6528,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG];
- struct cfg80211_wowlan no_triggers = {};
struct cfg80211_wowlan new_triggers = {};
+ struct cfg80211_wowlan *ntrig;
struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan;
int err, i;
bool prev_enabled = rdev->wowlan;
@@ -6133,8 +6537,11 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns)
return -EOPNOTSUPP;
- if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS])
- goto no_triggers;
+ if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) {
+ cfg80211_rdev_free_wowlan(rdev);
+ rdev->wowlan = NULL;
+ goto set_wakeup;
+ }
err = nla_parse(tb, MAX_NL80211_WOWLAN_TRIG,
nla_data(info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]),
@@ -6245,22 +6652,15 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
}
}
- if (memcmp(&new_triggers, &no_triggers, sizeof(new_triggers))) {
- struct cfg80211_wowlan *ntrig;
- ntrig = kmemdup(&new_triggers, sizeof(new_triggers),
- GFP_KERNEL);
- if (!ntrig) {
- err = -ENOMEM;
- goto error;
- }
- cfg80211_rdev_free_wowlan(rdev);
- rdev->wowlan = ntrig;
- } else {
- no_triggers:
- cfg80211_rdev_free_wowlan(rdev);
- rdev->wowlan = NULL;
+ ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL);
+ if (!ntrig) {
+ err = -ENOMEM;
+ goto error;
}
+ cfg80211_rdev_free_wowlan(rdev);
+ rdev->wowlan = ntrig;
+ set_wakeup:
if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan)
rdev->ops->set_wakeup(&rdev->wiphy, rdev->wowlan);
@@ -6271,6 +6671,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
kfree(new_triggers.patterns);
return err;
}
+#endif
static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
{
@@ -6415,44 +6816,75 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
#define NL80211_FLAG_CHECK_NETDEV_UP 0x08
#define NL80211_FLAG_NEED_NETDEV_UP (NL80211_FLAG_NEED_NETDEV |\
NL80211_FLAG_CHECK_NETDEV_UP)
+#define NL80211_FLAG_NEED_WDEV 0x10
+/* If a netdev is associated, it must be UP */
+#define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\
+ NL80211_FLAG_CHECK_NETDEV_UP)
static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
struct net_device *dev;
- int err;
bool rtnl = ops->internal_flags & NL80211_FLAG_NEED_RTNL;
if (rtnl)
rtnl_lock();
if (ops->internal_flags & NL80211_FLAG_NEED_WIPHY) {
- rdev = cfg80211_get_dev_from_info(info);
+ rdev = cfg80211_get_dev_from_info(genl_info_net(info), info);
if (IS_ERR(rdev)) {
if (rtnl)
rtnl_unlock();
return PTR_ERR(rdev);
}
info->user_ptr[0] = rdev;
- } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) {
- err = get_rdev_dev_by_ifindex(genl_info_net(info), info->attrs,
- &rdev, &dev);
- if (err) {
+ } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV ||
+ ops->internal_flags & NL80211_FLAG_NEED_WDEV) {
+ mutex_lock(&cfg80211_mutex);
+ wdev = __cfg80211_wdev_from_attrs(genl_info_net(info),
+ info->attrs);
+ if (IS_ERR(wdev)) {
+ mutex_unlock(&cfg80211_mutex);
if (rtnl)
rtnl_unlock();
- return err;
+ return PTR_ERR(wdev);
}
- if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP &&
- !netif_running(dev)) {
- cfg80211_unlock_rdev(rdev);
- dev_put(dev);
- if (rtnl)
- rtnl_unlock();
- return -ENETDOWN;
+
+ dev = wdev->netdev;
+ rdev = wiphy_to_dev(wdev->wiphy);
+
+ if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) {
+ if (!dev) {
+ mutex_unlock(&cfg80211_mutex);
+ if (rtnl)
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ info->user_ptr[1] = dev;
+ } else {
+ info->user_ptr[1] = wdev;
}
+
+ if (dev) {
+ if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP &&
+ !netif_running(dev)) {
+ mutex_unlock(&cfg80211_mutex);
+ if (rtnl)
+ rtnl_unlock();
+ return -ENETDOWN;
+ }
+
+ dev_hold(dev);
+ }
+
+ cfg80211_lock_rdev(rdev);
+
+ mutex_unlock(&cfg80211_mutex);
+
info->user_ptr[0] = rdev;
- info->user_ptr[1] = dev;
}
return 0;
@@ -6463,8 +6895,16 @@ static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb,
{
if (info->user_ptr[0])
cfg80211_unlock_rdev(info->user_ptr[0]);
- if (info->user_ptr[1])
- dev_put(info->user_ptr[1]);
+ if (info->user_ptr[1]) {
+ if (ops->internal_flags & NL80211_FLAG_NEED_WDEV) {
+ struct wireless_dev *wdev = info->user_ptr[1];
+
+ if (wdev->netdev)
+ dev_put(wdev->netdev);
+ } else {
+ dev_put(info->user_ptr[1]);
+ }
+ }
if (ops->internal_flags & NL80211_FLAG_NEED_RTNL)
rtnl_unlock();
}
@@ -6491,7 +6931,7 @@ static struct genl_ops nl80211_ops[] = {
.dumpit = nl80211_dump_interface,
.policy = nl80211_policy,
/* can be retrieved by unprivileged users */
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = NL80211_FLAG_NEED_WDEV,
},
{
.cmd = NL80211_CMD_SET_INTERFACE,
@@ -6514,7 +6954,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_del_interface,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_WDEV |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6685,7 +7125,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_trigger_scan,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6826,7 +7266,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_remain_on_channel,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6834,7 +7274,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_cancel_remain_on_channel,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6850,7 +7290,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_register_mgmt,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_WDEV |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6858,7 +7298,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_tx_mgmt,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6866,7 +7306,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_tx_mgmt_cancel_wait,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6925,6 +7365,7 @@ static struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
+#ifdef CONFIG_PM
{
.cmd = NL80211_CMD_GET_WOWLAN,
.doit = nl80211_get_wowlan,
@@ -6941,6 +7382,7 @@ static struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_WIPHY |
NL80211_FLAG_NEED_RTNL,
},
+#endif
{
.cmd = NL80211_CMD_SET_REKEY_OFFLOAD,
.doit = nl80211_set_rekey_data,
@@ -7075,7 +7517,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
static int nl80211_send_scan_msg(struct sk_buff *msg,
struct cfg80211_registered_device *rdev,
- struct net_device *netdev,
+ struct wireless_dev *wdev,
u32 pid, u32 seq, int flags,
u32 cmd)
{
@@ -7086,7 +7528,9 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
return -1;
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
- nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+ (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
+ wdev->netdev->ifindex)) ||
+ nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
goto nla_put_failure;
/* ignore errors and send incomplete event anyway */
@@ -7123,15 +7567,15 @@ nl80211_send_sched_scan_msg(struct sk_buff *msg,
}
void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
- struct net_device *netdev)
+ struct wireless_dev *wdev)
{
struct sk_buff *msg;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return;
- if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0,
+ if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
NL80211_CMD_TRIGGER_SCAN) < 0) {
nlmsg_free(msg);
return;
@@ -7142,7 +7586,7 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
}
void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
- struct net_device *netdev)
+ struct wireless_dev *wdev)
{
struct sk_buff *msg;
@@ -7150,7 +7594,7 @@ void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
if (!msg)
return;
- if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0,
+ if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
nlmsg_free(msg);
return;
@@ -7161,7 +7605,7 @@ void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
}
void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
- struct net_device *netdev)
+ struct wireless_dev *wdev)
{
struct sk_buff *msg;
@@ -7169,7 +7613,7 @@ void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
if (!msg)
return;
- if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0,
+ if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
NL80211_CMD_SCAN_ABORTED) < 0) {
nlmsg_free(msg);
return;
@@ -7203,7 +7647,7 @@ void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
{
struct sk_buff *msg;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return;
@@ -7419,7 +7863,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -7459,7 +7903,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -7497,7 +7941,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return;
@@ -7692,7 +8136,7 @@ nla_put_failure:
static void nl80211_send_remain_on_chan_event(
int cmd, struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u64 cookie,
+ struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration, gfp_t gfp)
@@ -7711,7 +8155,9 @@ static void nl80211_send_remain_on_chan_event(
}
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
- nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
+ wdev->netdev->ifindex)) ||
+ nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) ||
nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) ||
nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
@@ -7733,23 +8179,24 @@ static void nl80211_send_remain_on_chan_event(
}
void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u64 cookie,
+ struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration, gfp_t gfp)
{
nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
- rdev, netdev, cookie, chan,
+ rdev, wdev, cookie, chan,
channel_type, duration, gfp);
}
void nl80211_send_remain_on_channel_cancel(
- struct cfg80211_registered_device *rdev, struct net_device *netdev,
+ struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
u64 cookie, struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type, gfp_t gfp)
{
nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
- rdev, netdev, cookie, chan,
+ rdev, wdev, cookie, chan,
channel_type, 0, gfp);
}
@@ -7759,7 +8206,7 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
{
struct sk_buff *msg;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -7780,7 +8227,7 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -7863,10 +8310,11 @@ bool nl80211_unexpected_4addr_frame(struct net_device *dev,
}
int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u32 nlpid,
+ struct wireless_dev *wdev, u32 nlpid,
int freq, int sig_dbm,
const u8 *buf, size_t len, gfp_t gfp)
{
+ struct net_device *netdev = wdev->netdev;
struct sk_buff *msg;
void *hdr;
@@ -7881,7 +8329,8 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
}
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
- nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
+ netdev->ifindex)) ||
nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
(sig_dbm &&
nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
@@ -7899,10 +8348,11 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
}
void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u64 cookie,
+ struct wireless_dev *wdev, u64 cookie,
const u8 *buf, size_t len, bool ack,
gfp_t gfp)
{
+ struct net_device *netdev = wdev->netdev;
struct sk_buff *msg;
void *hdr;
@@ -7917,7 +8367,8 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
}
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
- nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
+ netdev->ifindex)) ||
nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
(ack && nla_put_flag(msg, NL80211_ATTR_ACK)))
@@ -7943,7 +8394,7 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
struct nlattr *pinfoattr;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -7986,7 +8437,7 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
struct nlattr *rekey_attr;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -8030,7 +8481,7 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
struct nlattr *attr;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -8074,7 +8525,7 @@ void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -8101,6 +8552,56 @@ void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
}
void
+nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *peer,
+ u32 num_packets, u32 rate, u32 intvl, gfp_t gfp)
+{
+ struct sk_buff *msg;
+ struct nlattr *pinfoattr;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
+ goto nla_put_failure;
+
+ pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
+ if (!pinfoattr)
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_PKTS, num_packets))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_RATE, rate))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_INTVL, intvl))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, pinfoattr);
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+
+void
nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *peer,
u32 num_packets, gfp_t gfp)
@@ -8109,7 +8610,7 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
struct nlattr *pinfoattr;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -8153,7 +8654,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
void *hdr;
int err;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -8241,7 +8742,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
rcu_read_lock();
list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
- list_for_each_entry_rcu(wdev, &rdev->netdev_list, list)
+ list_for_each_entry_rcu(wdev, &rdev->wdev_list, list)
cfg80211_mlme_unregister_socket(wdev, notify->pid);
if (rdev->ap_beacons_nlpid == notify->pid)
rdev->ap_beacons_nlpid = 0;
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 01a1122c3b3..9f2616fffb4 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -7,11 +7,11 @@ int nl80211_init(void);
void nl80211_exit(void);
void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev);
void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
- struct net_device *netdev);
+ struct wireless_dev *wdev);
void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
- struct net_device *netdev);
+ struct wireless_dev *wdev);
void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
- struct net_device *netdev);
+ struct wireless_dev *wdev);
void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
struct net_device *netdev, u32 cmd);
void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
@@ -74,13 +74,13 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
gfp_t gfp);
void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
- struct net_device *netdev,
- u64 cookie,
+ struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration, gfp_t gfp);
void nl80211_send_remain_on_channel_cancel(
- struct cfg80211_registered_device *rdev, struct net_device *netdev,
+ struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
u64 cookie, struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type, gfp_t gfp);
@@ -92,11 +92,11 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
gfp_t gfp);
int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u32 nlpid,
+ struct wireless_dev *wdev, u32 nlpid,
int freq, int sig_dbm,
const u8 *buf, size_t len, gfp_t gfp);
void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u64 cookie,
+ struct wireless_dev *wdev, u64 cookie,
const u8 *buf, size_t len, bool ack,
gfp_t gfp);
@@ -110,6 +110,11 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *peer,
u32 num_packets, gfp_t gfp);
+void
+nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *peer,
+ u32 num_packets, u32 rate, u32 intvl, gfp_t gfp);
+
void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
const u8 *replay_ctr, gfp_t gfp);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index baf5704740e..2303ee73b50 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -97,9 +97,16 @@ const struct ieee80211_regdomain *cfg80211_regdomain;
* - cfg80211_world_regdom
* - cfg80211_regdom
* - last_request
+ * - reg_num_devs_support_basehint
*/
static DEFINE_MUTEX(reg_mutex);
+/*
+ * Number of devices that registered to the core
+ * that support cellular base station regulatory hints
+ */
+static int reg_num_devs_support_basehint;
+
static inline void assert_reg_lock(void)
{
lockdep_assert_held(&reg_mutex);
@@ -129,7 +136,7 @@ static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
/* We keep a static world regulatory domain in case of the absence of CRDA */
static const struct ieee80211_regdomain world_regdom = {
- .n_reg_rules = 5,
+ .n_reg_rules = 6,
.alpha2 = "00",
.reg_rules = {
/* IEEE 802.11b/g, channels 1..11 */
@@ -156,6 +163,9 @@ static const struct ieee80211_regdomain world_regdom = {
REG_RULE(5745-10, 5825+10, 40, 6, 20,
NL80211_RRF_PASSIVE_SCAN |
NL80211_RRF_NO_IBSS),
+
+ /* IEEE 802.11ad (60gHz), channels 1..3 */
+ REG_RULE(56160+2160*1-1080, 56160+2160*3+1080, 2160, 0, 0, 0),
}
};
@@ -908,6 +918,61 @@ static void handle_band(struct wiphy *wiphy,
handle_channel(wiphy, initiator, band, i);
}
+static bool reg_request_cell_base(struct regulatory_request *request)
+{
+ if (request->initiator != NL80211_REGDOM_SET_BY_USER)
+ return false;
+ if (request->user_reg_hint_type != NL80211_USER_REG_HINT_CELL_BASE)
+ return false;
+ return true;
+}
+
+bool reg_last_request_cell_base(void)
+{
+ bool val;
+ assert_cfg80211_lock();
+
+ mutex_lock(&reg_mutex);
+ val = reg_request_cell_base(last_request);
+ mutex_unlock(&reg_mutex);
+ return val;
+}
+
+#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS
+
+/* Core specific check */
+static int reg_ignore_cell_hint(struct regulatory_request *pending_request)
+{
+ if (!reg_num_devs_support_basehint)
+ return -EOPNOTSUPP;
+
+ if (reg_request_cell_base(last_request)) {
+ if (!regdom_changes(pending_request->alpha2))
+ return -EALREADY;
+ return 0;
+ }
+ return 0;
+}
+
+/* Device specific check */
+static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy)
+{
+ if (!(wiphy->features & NL80211_FEATURE_CELL_BASE_REG_HINTS))
+ return true;
+ return false;
+}
+#else
+static int reg_ignore_cell_hint(struct regulatory_request *pending_request)
+{
+ return -EOPNOTSUPP;
+}
+static int reg_dev_ignore_cell_hint(struct wiphy *wiphy)
+{
+ return true;
+}
+#endif
+
+
static bool ignore_reg_update(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator)
{
@@ -941,6 +1006,9 @@ static bool ignore_reg_update(struct wiphy *wiphy,
return true;
}
+ if (reg_request_cell_base(last_request))
+ return reg_dev_ignore_cell_hint(wiphy);
+
return false;
}
@@ -1166,14 +1234,6 @@ static void wiphy_update_regulatory(struct wiphy *wiphy,
wiphy->reg_notifier(wiphy, last_request);
}
-void regulatory_update(struct wiphy *wiphy,
- enum nl80211_reg_initiator setby)
-{
- mutex_lock(&reg_mutex);
- wiphy_update_regulatory(wiphy, setby);
- mutex_unlock(&reg_mutex);
-}
-
static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
{
struct cfg80211_registered_device *rdev;
@@ -1304,6 +1364,13 @@ static int ignore_request(struct wiphy *wiphy,
return 0;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ if (reg_request_cell_base(last_request)) {
+ /* Trust a Cell base station over the AP's country IE */
+ if (regdom_changes(pending_request->alpha2))
+ return -EOPNOTSUPP;
+ return -EALREADY;
+ }
+
last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
if (unlikely(!is_an_alpha2(pending_request->alpha2)))
@@ -1348,6 +1415,12 @@ static int ignore_request(struct wiphy *wiphy,
return REG_INTERSECT;
case NL80211_REGDOM_SET_BY_USER:
+ if (reg_request_cell_base(pending_request))
+ return reg_ignore_cell_hint(pending_request);
+
+ if (reg_request_cell_base(last_request))
+ return -EOPNOTSUPP;
+
if (last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)
return REG_INTERSECT;
/*
@@ -1637,7 +1710,8 @@ static int regulatory_hint_core(const char *alpha2)
}
/* User hints */
-int regulatory_hint_user(const char *alpha2)
+int regulatory_hint_user(const char *alpha2,
+ enum nl80211_user_reg_hint_type user_reg_hint_type)
{
struct regulatory_request *request;
@@ -1651,6 +1725,7 @@ int regulatory_hint_user(const char *alpha2)
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_USER;
+ request->user_reg_hint_type = user_reg_hint_type;
queue_regulatory_request(request);
@@ -1903,7 +1978,7 @@ static void restore_regulatory_settings(bool reset_user)
* settings, user regulatory settings takes precedence.
*/
if (is_an_alpha2(alpha2))
- regulatory_hint_user(user_alpha2);
+ regulatory_hint_user(user_alpha2, NL80211_USER_REG_HINT_USER);
if (list_empty(&tmp_reg_req_list))
return;
@@ -2078,9 +2153,16 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
else {
if (is_unknown_alpha2(rd->alpha2))
pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n");
- else
- pr_info("Regulatory domain changed to country: %c%c\n",
- rd->alpha2[0], rd->alpha2[1]);
+ else {
+ if (reg_request_cell_base(last_request))
+ pr_info("Regulatory domain changed "
+ "to country: %c%c by Cell Station\n",
+ rd->alpha2[0], rd->alpha2[1]);
+ else
+ pr_info("Regulatory domain changed "
+ "to country: %c%c\n",
+ rd->alpha2[0], rd->alpha2[1]);
+ }
}
print_dfs_region(rd->dfs_region);
print_rd_rules(rd);
@@ -2125,7 +2207,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
* checking if the alpha2 changes if CRDA was already called
*/
if (!regdom_changes(rd->alpha2))
- return -EINVAL;
+ return -EALREADY;
}
/*
@@ -2245,6 +2327,9 @@ int set_regdom(const struct ieee80211_regdomain *rd)
/* Note that this doesn't update the wiphys, this is done below */
r = __set_regdom(rd);
if (r) {
+ if (r == -EALREADY)
+ reg_set_request_processed();
+
kfree(rd);
mutex_unlock(&reg_mutex);
return r;
@@ -2287,8 +2372,22 @@ int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
}
#endif /* CONFIG_HOTPLUG */
+void wiphy_regulatory_register(struct wiphy *wiphy)
+{
+ assert_cfg80211_lock();
+
+ mutex_lock(&reg_mutex);
+
+ if (!reg_dev_ignore_cell_hint(wiphy))
+ reg_num_devs_support_basehint++;
+
+ wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
+
+ mutex_unlock(&reg_mutex);
+}
+
/* Caller must hold cfg80211_mutex */
-void reg_device_remove(struct wiphy *wiphy)
+void wiphy_regulatory_deregister(struct wiphy *wiphy)
{
struct wiphy *request_wiphy = NULL;
@@ -2296,6 +2395,9 @@ void reg_device_remove(struct wiphy *wiphy)
mutex_lock(&reg_mutex);
+ if (!reg_dev_ignore_cell_hint(wiphy))
+ reg_num_devs_support_basehint--;
+
kfree(wiphy->regd);
if (last_request)
@@ -2361,7 +2463,8 @@ int __init regulatory_init(void)
* as a user hint.
*/
if (!is_world_regdom(ieee80211_regdom))
- regulatory_hint_user(ieee80211_regdom);
+ regulatory_hint_user(ieee80211_regdom,
+ NL80211_USER_REG_HINT_USER);
return 0;
}
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index e2aaaf525a2..f023c8a31c6 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -22,17 +22,19 @@ bool is_world_regdom(const char *alpha2);
bool reg_is_valid_request(const char *alpha2);
bool reg_supported_dfs_region(u8 dfs_region);
-int regulatory_hint_user(const char *alpha2);
+int regulatory_hint_user(const char *alpha2,
+ enum nl80211_user_reg_hint_type user_reg_hint_type);
int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env);
-void reg_device_remove(struct wiphy *wiphy);
+void wiphy_regulatory_register(struct wiphy *wiphy);
+void wiphy_regulatory_deregister(struct wiphy *wiphy);
int __init regulatory_init(void);
void regulatory_exit(void);
int set_regdom(const struct ieee80211_regdomain *rd);
-void regulatory_update(struct wiphy *wiphy, enum nl80211_reg_initiator setby);
+bool reg_last_request_cell_base(void);
/**
* regulatory_hint_found_beacon - hints a beacon was found on a channel
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index af2b1caa37f..848523a2b22 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -23,7 +23,7 @@
void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
{
struct cfg80211_scan_request *request;
- struct net_device *dev;
+ struct wireless_dev *wdev;
#ifdef CONFIG_CFG80211_WEXT
union iwreq_data wrqu;
#endif
@@ -35,29 +35,31 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
if (!request)
return;
- dev = request->dev;
+ wdev = request->wdev;
/*
* This must be before sending the other events!
* Otherwise, wpa_supplicant gets completely confused with
* wext events.
*/
- cfg80211_sme_scan_done(dev);
+ if (wdev->netdev)
+ cfg80211_sme_scan_done(wdev->netdev);
if (request->aborted)
- nl80211_send_scan_aborted(rdev, dev);
+ nl80211_send_scan_aborted(rdev, wdev);
else
- nl80211_send_scan_done(rdev, dev);
+ nl80211_send_scan_done(rdev, wdev);
#ifdef CONFIG_CFG80211_WEXT
- if (!request->aborted) {
+ if (wdev->netdev && !request->aborted) {
memset(&wrqu, 0, sizeof(wrqu));
- wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
+ wireless_send_event(wdev->netdev, SIOCGIWSCAN, &wrqu, NULL);
}
#endif
- dev_put(dev);
+ if (wdev->netdev)
+ dev_put(wdev->netdev);
rdev->scan_req = NULL;
@@ -955,7 +957,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
}
creq->wiphy = wiphy;
- creq->dev = dev;
+ creq->wdev = dev->ieee80211_ptr;
/* SSIDs come after channels */
creq->ssids = (void *)&creq->channels[n_channels];
creq->n_channels = n_channels;
@@ -1024,12 +1026,12 @@ int cfg80211_wext_siwscan(struct net_device *dev,
creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
rdev->scan_req = creq;
- err = rdev->ops->scan(wiphy, dev, creq);
+ err = rdev->ops->scan(wiphy, creq);
if (err) {
rdev->scan_req = NULL;
/* creq will be freed below */
} else {
- nl80211_send_scan_start(rdev, dev);
+ nl80211_send_scan_start(rdev, dev->ieee80211_ptr);
/* creq now owned by driver */
creq = NULL;
dev_hold(dev);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f7e937ff897..6f39cb80830 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -51,7 +51,7 @@ static bool cfg80211_is_all_idle(void)
*/
list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
cfg80211_lock_rdev(rdev);
- list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
wdev_lock(wdev);
if (wdev->sme_state != CFG80211_SME_IDLE)
is_all_idle = false;
@@ -136,15 +136,15 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
wdev->conn->params.ssid_len);
request->ssids[0].ssid_len = wdev->conn->params.ssid_len;
- request->dev = wdev->netdev;
+ request->wdev = wdev;
request->wiphy = &rdev->wiphy;
rdev->scan_req = request;
- err = rdev->ops->scan(wdev->wiphy, wdev->netdev, request);
+ err = rdev->ops->scan(wdev->wiphy, request);
if (!err) {
wdev->conn->state = CFG80211_CONN_SCANNING;
- nl80211_send_scan_start(rdev, wdev->netdev);
+ nl80211_send_scan_start(rdev, wdev);
dev_hold(wdev->netdev);
} else {
rdev->scan_req = NULL;
@@ -221,7 +221,7 @@ void cfg80211_conn_work(struct work_struct *work)
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->devlist_mtx);
- list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
wdev_lock(wdev);
if (!netif_running(wdev->netdev)) {
wdev_unlock(wdev);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 316cfd00914..26f8cd30f71 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -35,19 +35,29 @@ int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band)
{
/* see 802.11 17.3.8.3.2 and Annex J
* there are overlapping channel numbers in 5GHz and 2GHz bands */
- if (band == IEEE80211_BAND_5GHZ) {
- if (chan >= 182 && chan <= 196)
- return 4000 + chan * 5;
- else
- return 5000 + chan * 5;
- } else { /* IEEE80211_BAND_2GHZ */
+ if (chan <= 0)
+ return 0; /* not supported */
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
if (chan == 14)
return 2484;
else if (chan < 14)
return 2407 + chan * 5;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ if (chan >= 182 && chan <= 196)
+ return 4000 + chan * 5;
else
- return 0; /* not supported */
+ return 5000 + chan * 5;
+ break;
+ case IEEE80211_BAND_60GHZ:
+ if (chan < 5)
+ return 56160 + chan * 2160;
+ break;
+ default:
+ ;
}
+ return 0; /* not supported */
}
EXPORT_SYMBOL(ieee80211_channel_to_frequency);
@@ -60,8 +70,12 @@ int ieee80211_frequency_to_channel(int freq)
return (freq - 2407) / 5;
else if (freq >= 4910 && freq <= 4980)
return (freq - 4000) / 5;
- else
+ else if (freq <= 45000) /* DMG band lower limit */
return (freq - 5000) / 5;
+ else if (freq >= 58320 && freq <= 64800)
+ return (freq - 56160) / 2160;
+ else
+ return 0;
}
EXPORT_SYMBOL(ieee80211_frequency_to_channel);
@@ -137,6 +151,11 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband,
}
WARN_ON(want != 0 && want != 3 && want != 6);
break;
+ case IEEE80211_BAND_60GHZ:
+ /* check for mandatory HT MCS 1..4 */
+ WARN_ON(!sband->ht_cap.ht_supported);
+ WARN_ON((sband->ht_cap.mcs.rx_mask[0] & 0x1e) != 0x1e);
+ break;
case IEEE80211_NUM_BANDS:
WARN_ON(1);
break;
@@ -774,7 +793,7 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev)
mutex_lock(&rdev->devlist_mtx);
- list_for_each_entry(wdev, &rdev->netdev_list, list)
+ list_for_each_entry(wdev, &rdev->wdev_list, list)
cfg80211_process_wdev_events(wdev);
mutex_unlock(&rdev->devlist_mtx);
@@ -805,8 +824,10 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
return -EBUSY;
if (ntype != otype && netif_running(dev)) {
+ mutex_lock(&rdev->devlist_mtx);
err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
ntype);
+ mutex_unlock(&rdev->devlist_mtx);
if (err)
return err;
@@ -814,6 +835,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
dev->ieee80211_ptr->mesh_id_up_len = 0;
switch (otype) {
+ case NL80211_IFTYPE_AP:
+ cfg80211_stop_ap(rdev, dev);
+ break;
case NL80211_IFTYPE_ADHOC:
cfg80211_leave_ibss(rdev, dev, false);
break;
@@ -868,15 +892,69 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
}
}
+ if (!err && ntype != otype && netif_running(dev)) {
+ cfg80211_update_iface_num(rdev, ntype, 1);
+ cfg80211_update_iface_num(rdev, otype, -1);
+ }
+
return err;
}
-u16 cfg80211_calculate_bitrate(struct rate_info *rate)
+static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate)
+{
+ static const u32 __mcs2bitrate[] = {
+ /* control PHY */
+ [0] = 275,
+ /* SC PHY */
+ [1] = 3850,
+ [2] = 7700,
+ [3] = 9625,
+ [4] = 11550,
+ [5] = 12512, /* 1251.25 mbps */
+ [6] = 15400,
+ [7] = 19250,
+ [8] = 23100,
+ [9] = 25025,
+ [10] = 30800,
+ [11] = 38500,
+ [12] = 46200,
+ /* OFDM PHY */
+ [13] = 6930,
+ [14] = 8662, /* 866.25 mbps */
+ [15] = 13860,
+ [16] = 17325,
+ [17] = 20790,
+ [18] = 27720,
+ [19] = 34650,
+ [20] = 41580,
+ [21] = 45045,
+ [22] = 51975,
+ [23] = 62370,
+ [24] = 67568, /* 6756.75 mbps */
+ /* LP-SC PHY */
+ [25] = 6260,
+ [26] = 8340,
+ [27] = 11120,
+ [28] = 12510,
+ [29] = 16680,
+ [30] = 22240,
+ [31] = 25030,
+ };
+
+ if (WARN_ON_ONCE(rate->mcs >= ARRAY_SIZE(__mcs2bitrate)))
+ return 0;
+
+ return __mcs2bitrate[rate->mcs];
+}
+
+u32 cfg80211_calculate_bitrate(struct rate_info *rate)
{
int modulation, streams, bitrate;
if (!(rate->flags & RATE_INFO_FLAGS_MCS))
return rate->legacy;
+ if (rate->flags & RATE_INFO_FLAGS_60G)
+ return cfg80211_calculate_bitrate_60g(rate);
/* the formula below does only work for MCS values smaller than 32 */
if (WARN_ON_ONCE(rate->mcs >= 32))
@@ -916,7 +994,7 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
mutex_lock(&rdev->devlist_mtx);
- list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
if (!wdev->beacon_interval)
continue;
if (wdev->beacon_interval != beacon_int) {
@@ -930,28 +1008,49 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
return res;
}
-int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev,
- enum nl80211_iftype iftype)
+int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ enum nl80211_iftype iftype,
+ struct ieee80211_channel *chan,
+ enum cfg80211_chan_mode chanmode)
{
struct wireless_dev *wdev_iter;
u32 used_iftypes = BIT(iftype);
int num[NUM_NL80211_IFTYPES];
+ struct ieee80211_channel
+ *used_channels[CFG80211_MAX_NUM_DIFFERENT_CHANNELS];
+ struct ieee80211_channel *ch;
+ enum cfg80211_chan_mode chmode;
+ int num_different_channels = 0;
int total = 1;
int i, j;
ASSERT_RTNL();
+ lockdep_assert_held(&rdev->devlist_mtx);
/* Always allow software iftypes */
if (rdev->wiphy.software_iftypes & BIT(iftype))
return 0;
memset(num, 0, sizeof(num));
+ memset(used_channels, 0, sizeof(used_channels));
num[iftype] = 1;
- mutex_lock(&rdev->devlist_mtx);
- list_for_each_entry(wdev_iter, &rdev->netdev_list, list) {
+ switch (chanmode) {
+ case CHAN_MODE_UNDEFINED:
+ break;
+ case CHAN_MODE_SHARED:
+ WARN_ON(!chan);
+ used_channels[0] = chan;
+ num_different_channels++;
+ break;
+ case CHAN_MODE_EXCLUSIVE:
+ num_different_channels++;
+ break;
+ }
+
+ list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
if (wdev_iter == wdev)
continue;
if (!netif_running(wdev_iter->netdev))
@@ -960,11 +1059,42 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype))
continue;
+ /*
+ * We may be holding the "wdev" mutex, but now need to lock
+ * wdev_iter. This is OK because once we get here wdev_iter
+ * is not wdev (tested above), but we need to use the nested
+ * locking for lockdep.
+ */
+ mutex_lock_nested(&wdev_iter->mtx, 1);
+ __acquire(wdev_iter->mtx);
+ cfg80211_get_chan_state(wdev_iter, &ch, &chmode);
+ wdev_unlock(wdev_iter);
+
+ switch (chmode) {
+ case CHAN_MODE_UNDEFINED:
+ break;
+ case CHAN_MODE_SHARED:
+ for (i = 0; i < CFG80211_MAX_NUM_DIFFERENT_CHANNELS; i++)
+ if (!used_channels[i] || used_channels[i] == ch)
+ break;
+
+ if (i == CFG80211_MAX_NUM_DIFFERENT_CHANNELS)
+ return -EBUSY;
+
+ if (used_channels[i] == NULL) {
+ used_channels[i] = ch;
+ num_different_channels++;
+ }
+ break;
+ case CHAN_MODE_EXCLUSIVE:
+ num_different_channels++;
+ break;
+ }
+
num[wdev_iter->iftype]++;
total++;
used_iftypes |= BIT(wdev_iter->iftype);
}
- mutex_unlock(&rdev->devlist_mtx);
if (total == 1)
return 0;
@@ -976,12 +1106,15 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
c = &rdev->wiphy.iface_combinations[i];
+ if (total > c->max_interfaces)
+ continue;
+ if (num_different_channels > c->num_different_channels)
+ continue;
+
limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
GFP_KERNEL);
if (!limits)
return -ENOMEM;
- if (total > c->max_interfaces)
- goto cont;
for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
if (rdev->wiphy.software_iftypes & BIT(iftype))
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 6a6181a673c..494379eb464 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -796,7 +796,15 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_WDS:
+ freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
+ if (freq < 0)
+ return freq;
+ if (freq == 0)
+ return -EINVAL;
+ mutex_lock(&rdev->devlist_mtx);
+ err = cfg80211_set_monitor_channel(rdev, freq, NL80211_CHAN_NO_HT);
+ mutex_unlock(&rdev->devlist_mtx);
+ return err;
case NL80211_IFTYPE_MESH_POINT:
freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
if (freq < 0)
@@ -804,9 +812,8 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
if (freq == 0)
return -EINVAL;
mutex_lock(&rdev->devlist_mtx);
- wdev_lock(wdev);
- err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
- wdev_unlock(wdev);
+ err = cfg80211_set_mesh_freq(rdev, wdev, freq,
+ NL80211_CHAN_NO_HT);
mutex_unlock(&rdev->devlist_mtx);
return err;
default:
@@ -832,18 +839,14 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
if (!rdev->ops->get_channel)
return -EINVAL;
- chan = rdev->ops->get_channel(wdev->wiphy, &channel_type);
+ chan = rdev->ops->get_channel(wdev->wiphy, wdev, &channel_type);
if (!chan)
return -EINVAL;
freq->m = chan->center_freq;
freq->e = 6;
return 0;
default:
- if (!wdev->channel)
- return -EINVAL;
- freq->m = wdev->channel->center_freq;
- freq->e = 6;
- return 0;
+ return -EINVAL;
}
}
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 7decbd357d5..1f773f668d1 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -111,9 +111,15 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
wdev->wext.connect.channel = chan;
- /* SSID is not set, we just want to switch channel */
+ /*
+ * SSID is not set, we just want to switch monitor channel,
+ * this is really just backward compatibility, if the SSID
+ * is set then we use the channel to select the BSS to use
+ * to connect to instead. If we were connected on another
+ * channel we disconnected above and reconnect below.
+ */
if (chan && !wdev->wext.connect.ssid_len) {
- err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
+ err = cfg80211_set_monitor_channel(rdev, freq, NL80211_CHAN_NO_HT);
goto out;
}
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index cf636627005..277c8d2448d 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -66,7 +66,7 @@ out:
/**
* __x25_remove_route - remove route from x25_route_list
- * @rt - route to remove
+ * @rt: route to remove
*
* Remove route from x25_route_list. If it was there.
* Caller must hold x25_route_list_lock.
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ccfbd328a69..c5a5165a592 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1350,11 +1350,12 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
default:
BUG();
}
- xdst = dst_alloc(dst_ops, NULL, 0, 0, 0);
+ xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
if (likely(xdst)) {
- memset(&xdst->u.rt6.rt6i_table, 0,
- sizeof(*xdst) - sizeof(struct dst_entry));
+ struct dst_entry *dst = &xdst->u.dst;
+
+ memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
xdst->flo.ops = &xfrm_bundle_fc_ops;
} else
xdst = ERR_PTR(-ENOBUFS);
@@ -1476,7 +1477,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
dst1->xfrm = xfrm[i];
xdst->xfrm_genid = xfrm[i]->genid;
- dst1->obsolete = -1;
+ dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
dst1->flags |= DST_HOST;
dst1->lastuse = now;
@@ -1500,9 +1501,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
if (!dev)
goto free_dst;
- /* Copy neighbour for reachability confirmation */
- dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst)));
-
xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
xfrm_init_pmtu(dst_prev);
@@ -2221,12 +2219,13 @@ EXPORT_SYMBOL(__xfrm_route_forward);
static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
{
/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
- * to "-1" to force all XFRM destinations to get validated by
- * dst_ops->check on every use. We do this because when a
- * normal route referenced by an XFRM dst is obsoleted we do
- * not go looking around for all parent referencing XFRM dsts
- * so that we can invalidate them. It is just too much work.
- * Instead we make the checks here on every use. For example:
+ * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
+ * get validated by dst_ops->check on every use. We do this
+ * because when a normal route referenced by an XFRM dst is
+ * obsoleted we do not go looking around for all parent
+ * referencing XFRM dsts so that we can invalidate them. It
+ * is just too much work. Instead we make the checks here on
+ * every use. For example:
*
* XFRM dst A --> IPv4 dst X
*
@@ -2236,9 +2235,9 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
* stale_bundle() check.
*
* When a policy's bundle is pruned, we dst_free() the XFRM
- * dst which causes it's ->obsolete field to be set to a
- * positive non-zero integer. If an XFRM dst has been pruned
- * like this, we want to force a new route lookup.
+ * dst which causes it's ->obsolete field to be set to
+ * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
+ * this, we want to force a new route lookup.
*/
if (dst->obsolete < 0 && !stale_bundle(dst))
return dst;
@@ -2404,9 +2403,11 @@ static unsigned int xfrm_mtu(const struct dst_entry *dst)
return mtu ? : dst_mtu(dst->path);
}
-static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr)
+static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr)
{
- return dst_neigh_lookup(dst->path, daddr);
+ return dst->path->ops->neigh_lookup(dst, skb, daddr);
}
int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 44293b3fd6a..e75d8e47f35 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -754,58 +754,67 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
struct xfrm_usersa_info *p,
struct sk_buff *skb)
{
- copy_to_user_state(x, p);
-
- if (x->coaddr &&
- nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr))
- goto nla_put_failure;
-
- if (x->lastused &&
- nla_put_u64(skb, XFRMA_LASTUSED, x->lastused))
- goto nla_put_failure;
-
- if (x->aead &&
- nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead))
- goto nla_put_failure;
-
- if (x->aalg &&
- (copy_to_user_auth(x->aalg, skb) ||
- nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
- xfrm_alg_auth_len(x->aalg), x->aalg)))
- goto nla_put_failure;
-
- if (x->ealg &&
- nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg))
- goto nla_put_failure;
-
- if (x->calg &&
- nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg))
- goto nla_put_failure;
-
- if (x->encap &&
- nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap))
- goto nla_put_failure;
+ int ret = 0;
- if (x->tfcpad &&
- nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad))
- goto nla_put_failure;
-
- if (xfrm_mark_put(skb, &x->mark))
- goto nla_put_failure;
-
- if (x->replay_esn &&
- nla_put(skb, XFRMA_REPLAY_ESN_VAL,
- xfrm_replay_state_esn_len(x->replay_esn),
- x->replay_esn))
- goto nla_put_failure;
-
- if (x->security && copy_sec_ctx(x->security, skb))
- goto nla_put_failure;
-
- return 0;
+ copy_to_user_state(x, p);
-nla_put_failure:
- return -EMSGSIZE;
+ if (x->coaddr) {
+ ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
+ if (ret)
+ goto out;
+ }
+ if (x->lastused) {
+ ret = nla_put_u64(skb, XFRMA_LASTUSED, x->lastused);
+ if (ret)
+ goto out;
+ }
+ if (x->aead) {
+ ret = nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
+ if (ret)
+ goto out;
+ }
+ if (x->aalg) {
+ ret = copy_to_user_auth(x->aalg, skb);
+ if (!ret)
+ ret = nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
+ xfrm_alg_auth_len(x->aalg), x->aalg);
+ if (ret)
+ goto out;
+ }
+ if (x->ealg) {
+ ret = nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
+ if (ret)
+ goto out;
+ }
+ if (x->calg) {
+ ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
+ if (ret)
+ goto out;
+ }
+ if (x->encap) {
+ ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
+ if (ret)
+ goto out;
+ }
+ if (x->tfcpad) {
+ ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
+ if (ret)
+ goto out;
+ }
+ ret = xfrm_mark_put(skb, &x->mark);
+ if (ret)
+ goto out;
+ if (x->replay_esn) {
+ ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
+ xfrm_replay_state_esn_len(x->replay_esn),
+ x->replay_esn);
+ if (ret)
+ goto out;
+ }
+ if (x->security)
+ ret = copy_sec_ctx(x->security, skb);
+out:
+ return ret;
}
static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
@@ -825,15 +834,12 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
p = nlmsg_data(nlh);
err = copy_to_user_state_extra(x, p, skb);
- if (err)
- goto nla_put_failure;
-
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
nlmsg_end(skb, nlh);
return 0;
-
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return err;
}
static int xfrm_dump_sa_done(struct netlink_callback *cb)
@@ -904,6 +910,7 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
struct xfrmu_spdinfo spc;
struct xfrmu_spdhinfo sph;
struct nlmsghdr *nlh;
+ int err;
u32 *f;
nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
@@ -922,15 +929,15 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
sph.spdhcnt = si.spdhcnt;
sph.spdhmcnt = si.spdhmcnt;
- if (nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc) ||
- nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph))
- goto nla_put_failure;
+ err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
+ if (!err)
+ err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
return nlmsg_end(skb, nlh);
-
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
}
static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -965,6 +972,7 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
struct xfrmk_sadinfo si;
struct xfrmu_sadhinfo sh;
struct nlmsghdr *nlh;
+ int err;
u32 *f;
nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
@@ -978,15 +986,15 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
sh.sadhmcnt = si.sadhmcnt;
sh.sadhcnt = si.sadhcnt;
- if (nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt) ||
- nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh))
- goto nla_put_failure;
+ err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
+ if (!err)
+ err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
return nlmsg_end(skb, nlh);
-
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
}
static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1439,9 +1447,8 @@ static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buf
static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
{
- if (xp->security) {
+ if (xp->security)
return copy_sec_ctx(xp->security, skb);
- }
return 0;
}
static inline size_t userpolicy_type_attrsize(void)
@@ -1477,6 +1484,7 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
struct sk_buff *in_skb = sp->in_skb;
struct sk_buff *skb = sp->out_skb;
struct nlmsghdr *nlh;
+ int err;
nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
@@ -1485,22 +1493,19 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
p = nlmsg_data(nlh);
copy_to_user_policy(xp, p, dir);
- if (copy_to_user_tmpl(xp, skb) < 0)
- goto nlmsg_failure;
- if (copy_to_user_sec_ctx(xp, skb))
- goto nlmsg_failure;
- if (copy_to_user_policy_type(xp->type, skb) < 0)
- goto nlmsg_failure;
- if (xfrm_mark_put(skb, &xp->mark))
- goto nla_put_failure;
-
+ err = copy_to_user_tmpl(xp, skb);
+ if (!err)
+ err = copy_to_user_sec_ctx(xp, skb);
+ if (!err)
+ err = copy_to_user_policy_type(xp->type, skb);
+ if (!err)
+ err = xfrm_mark_put(skb, &xp->mark);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
nlmsg_end(skb, nlh);
return 0;
-
-nla_put_failure:
-nlmsg_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
}
static int xfrm_dump_policy_done(struct netlink_callback *cb)
@@ -1688,6 +1693,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
{
struct xfrm_aevent_id *id;
struct nlmsghdr *nlh;
+ int err;
nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
if (nlh == NULL)
@@ -1703,35 +1709,39 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
id->flags = c->data.aevent;
if (x->replay_esn) {
- if (nla_put(skb, XFRMA_REPLAY_ESN_VAL,
- xfrm_replay_state_esn_len(x->replay_esn),
- x->replay_esn))
- goto nla_put_failure;
+ err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
+ xfrm_replay_state_esn_len(x->replay_esn),
+ x->replay_esn);
} else {
- if (nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
- &x->replay))
- goto nla_put_failure;
+ err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
+ &x->replay);
}
- if (nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft))
- goto nla_put_failure;
-
- if ((id->flags & XFRM_AE_RTHR) &&
- nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff))
- goto nla_put_failure;
-
- if ((id->flags & XFRM_AE_ETHR) &&
- nla_put_u32(skb, XFRMA_ETIMER_THRESH,
- x->replay_maxage * 10 / HZ))
- goto nla_put_failure;
+ if (err)
+ goto out_cancel;
+ err = nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
+ if (err)
+ goto out_cancel;
- if (xfrm_mark_put(skb, &x->mark))
- goto nla_put_failure;
+ if (id->flags & XFRM_AE_RTHR) {
+ err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
+ if (err)
+ goto out_cancel;
+ }
+ if (id->flags & XFRM_AE_ETHR) {
+ err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
+ x->replay_maxage * 10 / HZ);
+ if (err)
+ goto out_cancel;
+ }
+ err = xfrm_mark_put(skb, &x->mark);
+ if (err)
+ goto out_cancel;
return nlmsg_end(skb, nlh);
-nla_put_failure:
+out_cancel:
nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
+ return err;
}
static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2155,7 +2165,7 @@ static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
const struct xfrm_migrate *mp;
struct xfrm_userpolicy_id *pol_id;
struct nlmsghdr *nlh;
- int i;
+ int i, err;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
if (nlh == NULL)
@@ -2167,21 +2177,25 @@ static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
pol_id->dir = dir;
- if (k != NULL && (copy_to_user_kmaddress(k, skb) < 0))
- goto nlmsg_failure;
-
- if (copy_to_user_policy_type(type, skb) < 0)
- goto nlmsg_failure;
-
+ if (k != NULL) {
+ err = copy_to_user_kmaddress(k, skb);
+ if (err)
+ goto out_cancel;
+ }
+ err = copy_to_user_policy_type(type, skb);
+ if (err)
+ goto out_cancel;
for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
- if (copy_to_user_migrate(mp, skb) < 0)
- goto nlmsg_failure;
+ err = copy_to_user_migrate(mp, skb);
+ if (err)
+ goto out_cancel;
}
return nlmsg_end(skb, nlh);
-nlmsg_failure:
+
+out_cancel:
nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
+ return err;
}
static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
@@ -2354,6 +2368,7 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
{
struct xfrm_user_expire *ue;
struct nlmsghdr *nlh;
+ int err;
nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
if (nlh == NULL)
@@ -2363,13 +2378,11 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
copy_to_user_state(x, &ue->state);
ue->hard = (c->data.hard != 0) ? 1 : 0;
- if (xfrm_mark_put(skb, &x->mark))
- goto nla_put_failure;
+ err = xfrm_mark_put(skb, &x->mark);
+ if (err)
+ return err;
return nlmsg_end(skb, nlh);
-
-nla_put_failure:
- return -EMSGSIZE;
}
static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
@@ -2470,7 +2483,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
struct nlmsghdr *nlh;
struct sk_buff *skb;
int len = xfrm_sa_len(x);
- int headlen;
+ int headlen, err;
headlen = sizeof(*p);
if (c->event == XFRM_MSG_DELSA) {
@@ -2485,8 +2498,9 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
return -ENOMEM;
nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
+ err = -EMSGSIZE;
if (nlh == NULL)
- goto nla_put_failure;
+ goto out_free_skb;
p = nlmsg_data(nlh);
if (c->event == XFRM_MSG_DELSA) {
@@ -2499,24 +2513,23 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
id->proto = x->id.proto;
attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
+ err = -EMSGSIZE;
if (attr == NULL)
- goto nla_put_failure;
+ goto out_free_skb;
p = nla_data(attr);
}
-
- if (copy_to_user_state_extra(x, p, skb))
- goto nla_put_failure;
+ err = copy_to_user_state_extra(x, p, skb);
+ if (err)
+ goto out_free_skb;
nlmsg_end(skb, nlh);
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
-nla_put_failure:
- /* Somebody screwed up with xfrm_sa_len! */
- WARN_ON(1);
+out_free_skb:
kfree_skb(skb);
- return -1;
+ return err;
}
static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
@@ -2557,9 +2570,10 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_tmpl *xt, struct xfrm_policy *xp,
int dir)
{
+ __u32 seq = xfrm_get_acqseq();
struct xfrm_user_acquire *ua;
struct nlmsghdr *nlh;
- __u32 seq = xfrm_get_acqseq();
+ int err;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
if (nlh == NULL)
@@ -2575,21 +2589,19 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
ua->calgos = xt->calgos;
ua->seq = x->km.seq = seq;
- if (copy_to_user_tmpl(xp, skb) < 0)
- goto nlmsg_failure;
- if (copy_to_user_state_sec_ctx(x, skb))
- goto nlmsg_failure;
- if (copy_to_user_policy_type(xp->type, skb) < 0)
- goto nlmsg_failure;
- if (xfrm_mark_put(skb, &xp->mark))
- goto nla_put_failure;
+ err = copy_to_user_tmpl(xp, skb);
+ if (!err)
+ err = copy_to_user_state_sec_ctx(x, skb);
+ if (!err)
+ err = copy_to_user_policy_type(xp->type, skb);
+ if (!err)
+ err = xfrm_mark_put(skb, &xp->mark);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
return nlmsg_end(skb, nlh);
-
-nla_put_failure:
-nlmsg_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
}
static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
@@ -2681,8 +2693,9 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
int dir, const struct km_event *c)
{
struct xfrm_user_polexpire *upe;
- struct nlmsghdr *nlh;
int hard = c->data.hard;
+ struct nlmsghdr *nlh;
+ int err;
nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
if (nlh == NULL)
@@ -2690,22 +2703,20 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
upe = nlmsg_data(nlh);
copy_to_user_policy(xp, &upe->pol, dir);
- if (copy_to_user_tmpl(xp, skb) < 0)
- goto nlmsg_failure;
- if (copy_to_user_sec_ctx(xp, skb))
- goto nlmsg_failure;
- if (copy_to_user_policy_type(xp->type, skb) < 0)
- goto nlmsg_failure;
- if (xfrm_mark_put(skb, &xp->mark))
- goto nla_put_failure;
+ err = copy_to_user_tmpl(xp, skb);
+ if (!err)
+ err = copy_to_user_sec_ctx(xp, skb);
+ if (!err)
+ err = copy_to_user_policy_type(xp->type, skb);
+ if (!err)
+ err = xfrm_mark_put(skb, &xp->mark);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
upe->hard = !!hard;
return nlmsg_end(skb, nlh);
-
-nla_put_failure:
-nlmsg_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
}
static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
@@ -2725,13 +2736,13 @@ static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct
static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
+ int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
struct net *net = xp_net(xp);
struct xfrm_userpolicy_info *p;
struct xfrm_userpolicy_id *id;
struct nlmsghdr *nlh;
struct sk_buff *skb;
- int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
- int headlen;
+ int headlen, err;
headlen = sizeof(*p);
if (c->event == XFRM_MSG_DELPOLICY) {
@@ -2747,8 +2758,9 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
return -ENOMEM;
nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
+ err = -EMSGSIZE;
if (nlh == NULL)
- goto nlmsg_failure;
+ goto out_free_skb;
p = nlmsg_data(nlh);
if (c->event == XFRM_MSG_DELPOLICY) {
@@ -2763,29 +2775,29 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
memcpy(&id->sel, &xp->selector, sizeof(id->sel));
attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
+ err = -EMSGSIZE;
if (attr == NULL)
- goto nlmsg_failure;
+ goto out_free_skb;
p = nla_data(attr);
}
copy_to_user_policy(xp, p, dir);
- if (copy_to_user_tmpl(xp, skb) < 0)
- goto nlmsg_failure;
- if (copy_to_user_policy_type(xp->type, skb) < 0)
- goto nlmsg_failure;
-
- if (xfrm_mark_put(skb, &xp->mark))
- goto nla_put_failure;
+ err = copy_to_user_tmpl(xp, skb);
+ if (!err)
+ err = copy_to_user_policy_type(xp->type, skb);
+ if (!err)
+ err = xfrm_mark_put(skb, &xp->mark);
+ if (err)
+ goto out_free_skb;
nlmsg_end(skb, nlh);
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
-nla_put_failure:
-nlmsg_failure:
+out_free_skb:
kfree_skb(skb);
- return -1;
+ return err;
}
static int xfrm_notify_policy_flush(const struct km_event *c)
@@ -2793,24 +2805,27 @@ static int xfrm_notify_policy_flush(const struct km_event *c)
struct net *net = c->net;
struct nlmsghdr *nlh;
struct sk_buff *skb;
+ int err;
skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
+ err = -EMSGSIZE;
if (nlh == NULL)
- goto nlmsg_failure;
- if (copy_to_user_policy_type(c->data.type, skb) < 0)
- goto nlmsg_failure;
+ goto out_free_skb;
+ err = copy_to_user_policy_type(c->data.type, skb);
+ if (err)
+ goto out_free_skb;
nlmsg_end(skb, nlh);
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
-nlmsg_failure:
+out_free_skb:
kfree_skb(skb);
- return -1;
+ return err;
}
static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
@@ -2853,15 +2868,14 @@ static int build_report(struct sk_buff *skb, u8 proto,
ur->proto = proto;
memcpy(&ur->sel, sel, sizeof(ur->sel));
- if (addr &&
- nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr))
- goto nla_put_failure;
-
+ if (addr) {
+ int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
+ }
return nlmsg_end(skb, nlh);
-
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
}
static int xfrm_send_report(struct net *net, u8 proto,
@@ -2945,9 +2959,12 @@ static struct xfrm_mgr netlink_mgr = {
static int __net_init xfrm_user_net_init(struct net *net)
{
struct sock *nlsk;
+ struct netlink_kernel_cfg cfg = {
+ .groups = XFRMNLGRP_MAX,
+ .input = xfrm_netlink_rcv,
+ };
- nlsk = netlink_kernel_create(net, NETLINK_XFRM, XFRMNLGRP_MAX,
- xfrm_netlink_rcv, NULL, THIS_MODULE);
+ nlsk = netlink_kernel_create(net, NETLINK_XFRM, THIS_MODULE, &cfg);
if (nlsk == NULL)
return -ENOMEM;
net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */