summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-11-13 20:55:35 +0100
committerJiri Kosina <jkosina@suse.cz>2011-11-13 20:55:53 +0100
commit2290c0d06d82faee87b1ab2d9d4f7bf81ef64379 (patch)
treee075e4d5534193f28e6059904f61e5ca03958d3c /net
parent4da669a2e3e5bc70b30a0465f3641528681b5f77 (diff)
parent52e4c2a05256cb83cda12f3c2137ab1533344edb (diff)
downloadop-kernel-dev-2290c0d06d82faee87b1ab2d9d4f7bf81ef64379.zip
op-kernel-dev-2290c0d06d82faee87b1ab2d9d4f7bf81ef64379.tar.gz
Merge branch 'master' into for-next
Sync with Linus tree to have 157550ff ("mtd: add GPMI-NAND driver in the config and Makefile") as I have patch depending on that one.
Diffstat (limited to 'net')
-rw-r--r--net/802/fc.c1
-rw-r--r--net/802/garp.c5
-rw-r--r--net/802/stp.c5
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_core.c8
-rw-r--r--net/8021q/vlan_dev.c14
-rw-r--r--net/8021q/vlan_netlink.c1
-rw-r--r--net/9p/client.c469
-rw-r--r--net/9p/protocol.c99
-rw-r--r--net/9p/protocol.h4
-rw-r--r--net/9p/trans_common.c53
-rw-r--r--net/9p/trans_common.h21
-rw-r--r--net/9p/trans_virtio.c319
-rw-r--r--net/appletalk/aarp.c1
-rw-r--r--net/appletalk/atalk_proc.c1
-rw-r--r--net/appletalk/ddp.c5
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/atm/pvc.c1
-rw-r--r--net/atm/svc.c1
-rw-r--r--net/ax25/ax25_route.c1
-rw-r--r--net/ax25/ax25_uid.c1
-rw-r--r--net/batman-adv/Makefile2
-rw-r--r--net/batman-adv/aggregation.c293
-rw-r--r--net/batman-adv/aggregation.h46
-rw-r--r--net/batman-adv/bat_iv_ogm.c1170
-rw-r--r--net/batman-adv/bat_ogm.h35
-rw-r--r--net/batman-adv/bat_sysfs.c2
-rw-r--r--net/batman-adv/bitarray.c6
-rw-r--r--net/batman-adv/gateway_client.c10
-rw-r--r--net/batman-adv/hard-interface.c88
-rw-r--r--net/batman-adv/hard-interface.h1
-rw-r--r--net/batman-adv/hash.h25
-rw-r--r--net/batman-adv/main.c4
-rw-r--r--net/batman-adv/main.h8
-rw-r--r--net/batman-adv/originator.c21
-rw-r--r--net/batman-adv/packet.h19
-rw-r--r--net/batman-adv/routing.c669
-rw-r--r--net/batman-adv/routing.h17
-rw-r--r--net/batman-adv/send.c313
-rw-r--r--net/batman-adv/send.h9
-rw-r--r--net/batman-adv/soft-interface.c36
-rw-r--r--net/batman-adv/translation-table.c223
-rw-r--r--net/batman-adv/translation-table.h21
-rw-r--r--net/batman-adv/types.h9
-rw-r--r--net/batman-adv/unicast.c6
-rw-r--r--net/batman-adv/unicast.h2
-rw-r--r--net/batman-adv/vis.c10
-rw-r--r--net/bluetooth/af_bluetooth.c30
-rw-r--r--net/bluetooth/bnep/core.c5
-rw-r--r--net/bluetooth/bnep/netdev.c2
-rw-r--r--net/bluetooth/cmtp/core.c5
-rw-r--r--net/bluetooth/hci_conn.c16
-rw-r--r--net/bluetooth/hci_core.c59
-rw-r--r--net/bluetooth/hci_event.c36
-rw-r--r--net/bluetooth/hci_sock.c18
-rw-r--r--net/bluetooth/hci_sysfs.c3
-rw-r--r--net/bluetooth/hidp/core.c13
-rw-r--r--net/bluetooth/l2cap_core.c273
-rw-r--r--net/bluetooth/l2cap_sock.c1
-rw-r--r--net/bluetooth/mgmt.c215
-rw-r--r--net/bluetooth/rfcomm/core.c14
-rw-r--r--net/bluetooth/smp.c421
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_fdb.c23
-rw-r--r--net/bridge/br_if.c41
-rw-r--r--net/bridge/br_input.c34
-rw-r--r--net/bridge/br_private.h7
-rw-r--r--net/bridge/br_stp_if.c1
-rw-r--r--net/bridge/br_sysfs_br.c34
-rw-r--r--net/bridge/netfilter/ebt_ulog.c7
-rw-r--r--net/bridge/netfilter/ebtable_broute.c4
-rw-r--r--net/caif/caif_dev.c6
-rw-r--r--net/caif/cfcnfg.c38
-rw-r--r--net/caif/cfctrl.c23
-rw-r--r--net/caif/cfdbgl.c7
-rw-r--r--net/caif/cfdgml.c7
-rw-r--r--net/caif/cffrml.c7
-rw-r--r--net/caif/cfmuxl.c6
-rw-r--r--net/caif/cfpkt_skbuff.c1
-rw-r--r--net/caif/cfrfml.c7
-rw-r--r--net/caif/cfserl.c7
-rw-r--r--net/caif/cfsrvl.c8
-rw-r--r--net/caif/cfutill.c7
-rw-r--r--net/caif/cfveil.c7
-rw-r--r--net/caif/cfvidl.c7
-rw-r--r--net/can/Kconfig11
-rw-r--r--net/can/Makefile3
-rw-r--r--net/can/af_can.c6
-rw-r--r--net/can/af_can.h2
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/can/gw.c957
-rw-r--r--net/can/proc.c2
-rw-r--r--net/can/raw.c2
-rw-r--r--net/ceph/Kconfig14
-rw-r--r--net/ceph/ceph_common.c47
-rw-r--r--net/ceph/messenger.c131
-rw-r--r--net/ceph/mon_client.c79
-rw-r--r--net/ceph/msgpool.c4
-rw-r--r--net/ceph/osd_client.c34
-rw-r--r--net/compat.c1
-rw-r--r--net/core/datagram.c24
-rw-r--r--net/core/dev.c339
-rw-r--r--net/core/dev_addr_lists.c5
-rw-r--r--net/core/dst.c15
-rw-r--r--net/core/ethtool.c20
-rw-r--r--net/core/fib_rules.c5
-rw-r--r--net/core/filter.c4
-rw-r--r--net/core/flow.c14
-rw-r--r--net/core/kmap_skb.h2
-rw-r--r--net/core/link_watch.c9
-rw-r--r--net/core/neighbour.c50
-rw-r--r--net/core/net-sysfs.c13
-rw-r--r--net/core/net-traces.c1
-rw-r--r--net/core/net_namespace.c1
-rw-r--r--net/core/netevent.c1
-rw-r--r--net/core/netpoll.c5
-rw-r--r--net/core/pktgen.c25
-rw-r--r--net/core/rtnetlink.c34
-rw-r--r--net/core/scm.c10
-rw-r--r--net/core/secure_seq.c2
-rw-r--r--net/core/skbuff.c173
-rw-r--r--net/core/sock.c24
-rw-r--r--net/core/timestamping.c13
-rw-r--r--net/core/user_dma.c7
-rw-r--r--net/dcb/dcbevent.c1
-rw-r--r--net/dcb/dcbnl.c31
-rw-r--r--net/dccp/ackvec.c1
-rw-r--r--net/dccp/ccids/ccid2.c84
-rw-r--r--net/dccp/ccids/ccid2.h6
-rw-r--r--net/dccp/ccids/lib/tfrc.c1
-rw-r--r--net/dccp/dccp.h1
-rw-r--r--net/dccp/feat.c202
-rw-r--r--net/dccp/feat.h1
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/proto.c1
-rw-r--r--net/dccp/timer.c1
-rw-r--r--net/decnet/dn_dev.c6
-rw-r--r--net/decnet/dn_route.c1
-rw-r--r--net/decnet/dn_rules.c1
-rw-r--r--net/dsa/dsa.c1
-rw-r--r--net/dsa/slave.c3
-rw-r--r--net/ieee802154/6lowpan.c891
-rw-r--r--net/ieee802154/6lowpan.h212
-rw-r--r--net/ieee802154/Kconfig6
-rw-r--r--net/ieee802154/Makefile8
-rw-r--r--net/ieee802154/nl-mac.c1
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/cipso_ipv4.c2
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/fib_rules.c1
-rw-r--r--net/ipv4/fib_trie.c13
-rw-r--r--net/ipv4/gre.c4
-rw-r--r--net/ipv4/icmp.c5
-rw-r--r--net/ipv4/igmp.c12
-rw-r--r--net/ipv4/inet_diag.c5
-rw-r--r--net/ipv4/inet_lro.c10
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/ip_fragment.c40
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_output.c17
-rw-r--r--net/ipv4/ip_sockglue.c7
-rw-r--r--net/ipv4/ipconfig.c1
-rw-r--r--net/ipv4/ipip.c10
-rw-r--r--net/ipv4/ipmr.c9
-rw-r--r--net/ipv4/netfilter.c1
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c1
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c4
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c24
-rw-r--r--net/ipv4/netfilter/nf_nat_ftp.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c36
-rw-r--r--net/ipv4/netfilter/nf_nat_irc.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c16
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_common.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_icmp.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_sctp.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_tcp.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_udp.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_udplite.c1
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c28
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c26
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c4
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/route.c53
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/tcp.c97
-rw-r--r--net/ipv4/tcp_input.c244
-rw-r--r--net/ipv4/tcp_ipv4.c80
-rw-r--r--net/ipv4/tcp_minisocks.c5
-rw-r--r--net/ipv4/tcp_output.c152
-rw-r--r--net/ipv4/tcp_timer.c2
-rw-r--r--net/ipv4/udp.c41
-rw-r--r--net/ipv4/udplite.c14
-rw-r--r--net/ipv4/xfrm4_policy.c14
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/addrconf.c125
-rw-r--r--net/ipv6/addrconf_core.c1
-rw-r--r--net/ipv6/datagram.c4
-rw-r--r--net/ipv6/exthdrs.c8
-rw-r--r--net/ipv6/exthdrs_core.c1
-rw-r--r--net/ipv6/fib6_rules.c1
-rw-r--r--net/ipv6/icmp.c28
-rw-r--r--net/ipv6/inet6_connection_sock.c9
-rw-r--r--net/ipv6/ip6_fib.c4
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/ipv6/ip6_output.c37
-rw-r--r--net/ipv6/ip6_tunnel.c54
-rw-r--r--net/ipv6/ip6mr.c1
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/ndisc.c42
-rw-r--r--net/ipv6/netfilter.c14
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c11
-rw-r--r--net/ipv6/proc.c1
-rw-r--r--net/ipv6/raw.c12
-rw-r--r--net/ipv6/reassembly.c5
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/sit.c12
-rw-r--r--net/ipv6/syncookies.c6
-rw-r--r--net/ipv6/sysctl_net_ipv6.c1
-rw-r--r--net/ipv6/tcp_ipv6.c77
-rw-r--r--net/ipv6/udp.c16
-rw-r--r--net/ipv6/udplite.c14
-rw-r--r--net/ipv6/xfrm6_output.c56
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/ipx/ipx_proc.c1
-rw-r--r--net/irda/discovery.c1
-rw-r--r--net/irda/ircomm/ircomm_tty.c2
-rw-r--r--net/irda/irda_device.c1
-rw-r--r--net/irda/irlan/irlan_eth.c2
-rw-r--r--net/irda/irttp.c1
-rw-r--r--net/irda/qos.c2
-rw-r--r--net/iucv/Kconfig14
-rw-r--r--net/iucv/af_iucv.c870
-rw-r--r--net/iucv/iucv.c23
-rw-r--r--net/l2tp/l2tp_core.c3
-rw-r--r--net/l2tp/l2tp_ppp.c9
-rw-r--r--net/lapb/lapb_iface.c29
-rw-r--r--net/llc/llc_input.c1
-rw-r--r--net/llc/llc_output.c1
-rw-r--r--net/llc/llc_proc.c1
-rw-r--r--net/mac80211/Kconfig25
-rw-r--r--net/mac80211/agg-rx.c26
-rw-r--r--net/mac80211/agg-tx.c65
-rw-r--r--net/mac80211/cfg.c497
-rw-r--r--net/mac80211/debugfs.c71
-rw-r--r--net/mac80211/debugfs_netdev.c59
-rw-r--r--net/mac80211/debugfs_sta.c37
-rw-r--r--net/mac80211/driver-ops.h91
-rw-r--r--net/mac80211/driver-trace.h117
-rw-r--r--net/mac80211/ht.c9
-rw-r--r--net/mac80211/ibss.c16
-rw-r--r--net/mac80211/ieee80211_i.h105
-rw-r--r--net/mac80211/iface.c26
-rw-r--r--net/mac80211/key.c5
-rw-r--r--net/mac80211/led.c1
-rw-r--r--net/mac80211/main.c27
-rw-r--r--net/mac80211/mesh.c213
-rw-r--r--net/mac80211/mesh.h38
-rw-r--r--net/mac80211/mesh_hwmp.c177
-rw-r--r--net/mac80211/mesh_pathtbl.c481
-rw-r--r--net/mac80211/mesh_plink.c257
-rw-r--r--net/mac80211/mlme.c160
-rw-r--r--net/mac80211/offchannel.c1
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rate.c38
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c1
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c13
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c1
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c1
-rw-r--r--net/mac80211/rx.c200
-rw-r--r--net/mac80211/scan.c7
-rw-r--r--net/mac80211/spectmgmt.c6
-rw-r--r--net/mac80211/sta_info.c977
-rw-r--r--net/mac80211/sta_info.h171
-rw-r--r--net/mac80211/status.c252
-rw-r--r--net/mac80211/tkip.c1
-rw-r--r--net/mac80211/tx.c558
-rw-r--r--net/mac80211/util.c281
-rw-r--r--net/mac80211/wme.c20
-rw-r--r--net/mac80211/wme.h3
-rw-r--r--net/mac80211/work.c17
-rw-r--r--net/mac80211/wpa.c3
-rw-r--r--net/netfilter/core.c15
-rw-r--r--net/netfilter/ipset/ip_set_core.c4
-rw-r--r--net/netfilter/ipset/ip_set_getport.c1
-rw-r--r--net/netfilter/ipset/pfxlen.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c20
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c22
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c2
-rw-r--r--net/netfilter/nf_conntrack_acct.c1
-rw-r--r--net/netfilter/nf_conntrack_core.c17
-rw-r--r--net/netfilter/nf_conntrack_ecache.c9
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_conntrack_extend.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c6
-rw-r--r--net/netfilter/nf_conntrack_netlink.c6
-rw-r--r--net/netfilter/nf_log.c10
-rw-r--r--net/netfilter/nf_queue.c6
-rw-r--r--net/netfilter/nfnetlink.c6
-rw-r--r--net/netfilter/nfnetlink_log.c7
-rw-r--r--net/netfilter/x_tables.c1
-rw-r--r--net/netfilter/xt_IDLETIMER.c2
-rw-r--r--net/netfilter/xt_hashlimit.c5
-rw-r--r--net/netfilter/xt_quota.c1
-rw-r--r--net/netfilter/xt_statistic.c1
-rw-r--r--net/netlabel/netlabel_domainhash.c6
-rw-r--r--net/netlabel/netlabel_unlabeled.c6
-rw-r--r--net/netlink/af_netlink.c7
-rw-r--r--net/netrom/nr_route.c1
-rw-r--r--net/nfc/Kconfig2
-rw-r--r--net/nfc/Makefile1
-rw-r--r--net/nfc/af_nfc.c1
-rw-r--r--net/nfc/core.c83
-rw-r--r--net/nfc/nci/Kconfig10
-rw-r--r--net/nfc/nci/Makefile7
-rw-r--r--net/nfc/nci/core.c798
-rw-r--r--net/nfc/nci/data.c247
-rw-r--r--net/nfc/nci/lib.c94
-rw-r--r--net/nfc/nci/ntf.c258
-rw-r--r--net/nfc/nci/rsp.c226
-rw-r--r--net/nfc/netlink.c56
-rw-r--r--net/nfc/nfc.h8
-rw-r--r--net/nfc/rawsock.c14
-rw-r--r--net/packet/af_packet.c1007
-rw-r--r--net/phonet/af_phonet.c4
-rw-r--r--net/phonet/datagram.c1
-rw-r--r--net/phonet/pep.c1
-rw-r--r--net/phonet/pn_dev.c6
-rw-r--r--net/phonet/socket.c7
-rw-r--r--net/rds/Kconfig1
-rw-r--r--net/rds/cong.c1
-rw-r--r--net/rds/connection.c1
-rw-r--r--net/rds/ib.c1
-rw-r--r--net/rds/ib_rdma.c112
-rw-r--r--net/rds/info.c1
-rw-r--r--net/rds/iw.c1
-rw-r--r--net/rds/message.c1
-rw-r--r--net/rds/page.c1
-rw-r--r--net/rds/rdma_transport.c1
-rw-r--r--net/rds/rds.h8
-rw-r--r--net/rds/recv.c1
-rw-r--r--net/rds/send.c2
-rw-r--r--net/rds/stats.c1
-rw-r--r--net/rds/tcp.c1
-rw-r--r--net/rds/threads.c1
-rw-r--r--net/rds/xlist.h80
-rw-r--r--net/rfkill/core.c2
-rw-r--r--net/rfkill/input.c1
-rw-r--r--net/rfkill/rfkill-gpio.c11
-rw-r--r--net/rfkill/rfkill-regulator.c1
-rw-r--r--net/rose/rose_route.c1
-rw-r--r--net/rxrpc/ar-output.c1
-rw-r--r--net/rxrpc/ar-recvmsg.c1
-rw-r--r--net/sched/act_api.c1
-rw-r--r--net/sched/cls_flow.c189
-rw-r--r--net/sched/sch_mq.c1
-rw-r--r--net/sched/sch_mqprio.c1
-rw-r--r--net/sched/sch_sfb.c13
-rw-r--r--net/sctp/associola.c1
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/outqueue.c4
-rw-r--r--net/sctp/proc.c1
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/sm_make_chunk.c1
-rw-r--r--net/sctp/sm_statefuns.c5
-rw-r--r--net/sctp/socket.c1
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/addr.c7
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c28
-rw-r--r--net/sunrpc/auth_unix.c3
-rw-r--r--net/sunrpc/backchannel_rqst.c1
-rw-r--r--net/sunrpc/clnt.c4
-rw-r--r--net/sunrpc/rpc_pipe.c23
-rw-r--r--net/sunrpc/rpcb_clnt.c94
-rw-r--r--net/sunrpc/socklib.c1
-rw-r--r--net/sunrpc/sunrpc_syms.c3
-rw-r--r--net/sunrpc/svc.c86
-rw-r--r--net/sunrpc/svc_xprt.c14
-rw-r--r--net/sunrpc/svcsock.c24
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c1
-rw-r--r--net/sysctl_net.c1
-rw-r--r--net/tipc/bcast.c111
-rw-r--r--net/tipc/bcast.h1
-rw-r--r--net/tipc/bearer.c8
-rw-r--r--net/tipc/bearer.h4
-rw-r--r--net/tipc/config.h1
-rw-r--r--net/tipc/core.c2
-rw-r--r--net/tipc/discover.c6
-rw-r--r--net/tipc/eth_media.c32
-rw-r--r--net/tipc/link.c111
-rw-r--r--net/tipc/link.h1
-rw-r--r--net/tipc/name_distr.c35
-rw-r--r--net/tipc/net.c11
-rw-r--r--net/tipc/node.c45
-rw-r--r--net/tipc/node.h10
-rw-r--r--net/tipc/socket.c52
-rw-r--r--net/tipc/subscr.c3
-rw-r--r--net/tipc/subscr.h6
-rw-r--r--net/unix/af_unix.c24
-rw-r--r--net/wanrouter/wanproc.c2
-rw-r--r--net/wimax/op-msg.c1
-rw-r--r--net/wimax/op-reset.c1
-rw-r--r--net/wimax/op-rfkill.c1
-rw-r--r--net/wimax/stack.c1
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/core.h6
-rw-r--r--net/wireless/ibss.c1
-rw-r--r--net/wireless/lib80211.c15
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c2
-rw-r--r--net/wireless/lib80211_crypt_tkip.c4
-rw-r--r--net/wireless/lib80211_crypt_wep.c4
-rw-r--r--net/wireless/mesh.c4
-rw-r--r--net/wireless/mlme.c16
-rw-r--r--net/wireless/nl80211.c405
-rw-r--r--net/wireless/nl80211.h4
-rw-r--r--net/wireless/radiotap.c1
-rw-r--r--net/wireless/reg.c49
-rw-r--r--net/wireless/reg.h2
-rw-r--r--net/wireless/scan.c28
-rw-r--r--net/wireless/sme.c20
-rw-r--r--net/wireless/util.c195
-rw-r--r--net/wireless/wext-compat.c138
-rw-r--r--net/wireless/wext-compat.h8
-rw-r--r--net/wireless/wext-core.c1
-rw-r--r--net/wireless/wext-sme.c4
-rw-r--r--net/wireless/wext-spy.c1
-rw-r--r--net/x25/af_x25.c11
-rw-r--r--net/x25/x25_proc.c1
-rw-r--r--net/xfrm/xfrm_ipcomp.c13
-rw-r--r--net/xfrm/xfrm_proc.c1
-rw-r--r--net/xfrm/xfrm_replay.c99
-rw-r--r--net/xfrm/xfrm_user.c4
448 files changed, 15558 insertions, 6379 deletions
diff --git a/net/802/fc.c b/net/802/fc.c
index 1e49f2d..bd345f3 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -27,6 +27,7 @@
#include <linux/net.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
+#include <linux/export.h>
#include <net/arp.h>
/*
diff --git a/net/802/garp.c b/net/802/garp.c
index 1610295..8e21b6d 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -15,6 +15,7 @@
#include <linux/rtnetlink.h>
#include <linux/llc.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
#include <net/garp.h>
@@ -553,7 +554,7 @@ static void garp_release_port(struct net_device *dev)
if (rtnl_dereference(port->applicants[i]))
return;
}
- rcu_assign_pointer(dev->garp_port, NULL);
+ RCU_INIT_POINTER(dev->garp_port, NULL);
kfree_rcu(port, rcu);
}
@@ -605,7 +606,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
ASSERT_RTNL();
- rcu_assign_pointer(port->applicants[appl->type], NULL);
+ RCU_INIT_POINTER(port->applicants[appl->type], NULL);
/* Delete timer and generate a final TRANSMIT_PDU event to flush out
* all pending messages before the applicant is gone. */
diff --git a/net/802/stp.c b/net/802/stp.c
index 978c30b..15540b7 100644
--- a/net/802/stp.c
+++ b/net/802/stp.c
@@ -12,6 +12,7 @@
#include <linux/etherdevice.h>
#include <linux/llc.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
#include <net/stp.h>
@@ -88,9 +89,9 @@ void stp_proto_unregister(const struct stp_proto *proto)
{
mutex_lock(&stp_proto_mutex);
if (is_zero_ether_addr(proto->group_address))
- rcu_assign_pointer(stp_proto, NULL);
+ RCU_INIT_POINTER(stp_proto, NULL);
else
- rcu_assign_pointer(garp_protos[proto->group_address[5] -
+ RCU_INIT_POINTER(garp_protos[proto->group_address[5] -
GARP_ADDR_MIN], NULL);
synchronize_rcu();
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 8970ba1..5471628 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -133,7 +133,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
if (grp->nr_vlans == 0) {
vlan_gvrp_uninit_applicant(real_dev);
- rcu_assign_pointer(real_dev->vlgrp, NULL);
+ RCU_INIT_POINTER(real_dev->vlgrp, NULL);
/* Free the group, after all cpu's are done. */
call_rcu(&grp->rcu, vlan_rcu_free);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index f1f2f7b..f5ffc02 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -2,9 +2,10 @@
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/netpoll.h>
+#include <linux/export.h>
#include "vlan.h"
-bool vlan_do_receive(struct sk_buff **skbp)
+bool vlan_do_receive(struct sk_buff **skbp, bool last_handler)
{
struct sk_buff *skb = *skbp;
u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
@@ -13,7 +14,10 @@ bool vlan_do_receive(struct sk_buff **skbp)
vlan_dev = vlan_find_dev(skb->dev, vlan_id);
if (!vlan_dev) {
- if (vlan_id)
+ /* Only the last call to vlan_do_receive() should change
+ * pkt_type to PACKET_OTHERHOST
+ */
+ if (vlan_id && last_handler)
skb->pkt_type = PACKET_OTHERHOST;
return false;
}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9d40a07..bc25286 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -470,10 +470,12 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
- if (change & IFF_ALLMULTI)
- dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
- if (change & IFF_PROMISC)
- dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (change & IFF_PROMISC)
+ dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
+ }
}
static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
@@ -610,7 +612,8 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
const struct vlan_dev_info *vlan = vlan_dev_info(dev);
- return dev_ethtool_get_settings(vlan->real_dev, cmd);
+
+ return __ethtool_get_settings(vlan->real_dev, cmd);
}
static void vlan_ethtool_get_drvinfo(struct net_device *dev,
@@ -674,7 +677,6 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = vlan_dev_set_mac_address,
.ndo_set_rx_mode = vlan_dev_set_rx_mode,
- .ndo_set_multicast_list = vlan_dev_set_rx_mode,
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index be9a5c1..235c219 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
+#include <linux/module.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
#include <net/rtnetlink.h>
diff --git a/net/9p/client.c b/net/9p/client.c
index 0505a03..854ca7a 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -38,6 +38,9 @@
#include <net/9p/transport.h>
#include "protocol.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/9p.h>
+
/*
* Client Option Parsing (code inspired by NFS code)
* - a little lazy - parse all client options
@@ -123,21 +126,19 @@ static int parse_opts(char *opts, struct p9_client *clnt)
options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
- int token;
+ int token, r;
if (!*p)
continue;
token = match_token(p, tokens, args);
- if (token < Opt_trans) {
- int r = match_int(&args[0], &option);
+ switch (token) {
+ case Opt_msize:
+ r = match_int(&args[0], &option);
if (r < 0) {
P9_DPRINTK(P9_DEBUG_ERROR,
- "integer field, but no integer?\n");
+ "integer field, but no integer?\n");
ret = r;
continue;
}
- }
- switch (token) {
- case Opt_msize:
clnt->msize = option;
break;
case Opt_trans:
@@ -203,11 +204,13 @@ free_and_return:
*
*/
-static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
+static struct p9_req_t *
+p9_tag_alloc(struct p9_client *c, u16 tag, unsigned int max_size)
{
unsigned long flags;
int row, col;
struct p9_req_t *req;
+ int alloc_msize = min(c->msize, max_size);
/* This looks up the original request by tag so we know which
* buffer to read the data into */
@@ -245,23 +248,10 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
return ERR_PTR(-ENOMEM);
}
init_waitqueue_head(req->wq);
- if ((c->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
- P9_TRANS_PREF_PAYLOAD_SEP) {
- int alloc_msize = min(c->msize, 4096);
- req->tc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
- GFP_NOFS);
- req->tc->capacity = alloc_msize;
- req->rc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
- GFP_NOFS);
- req->rc->capacity = alloc_msize;
- } else {
- req->tc = kmalloc(sizeof(struct p9_fcall)+c->msize,
- GFP_NOFS);
- req->tc->capacity = c->msize;
- req->rc = kmalloc(sizeof(struct p9_fcall)+c->msize,
- GFP_NOFS);
- req->rc->capacity = c->msize;
- }
+ req->tc = kmalloc(sizeof(struct p9_fcall) + alloc_msize,
+ GFP_NOFS);
+ req->rc = kmalloc(sizeof(struct p9_fcall) + alloc_msize,
+ GFP_NOFS);
if ((!req->tc) || (!req->rc)) {
printk(KERN_ERR "Couldn't grow tag array\n");
kfree(req->tc);
@@ -271,6 +261,8 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
req->wq = NULL;
return ERR_PTR(-ENOMEM);
}
+ req->tc->capacity = alloc_msize;
+ req->rc->capacity = alloc_msize;
req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall);
req->rc->sdata = (char *) req->rc + sizeof(struct p9_fcall);
}
@@ -475,37 +467,22 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
int ecode;
err = p9_parse_header(req->rc, NULL, &type, NULL, 0);
+ /*
+ * dump the response from server
+ * This should be after check errors which poplulate pdu_fcall.
+ */
+ trace_9p_protocol_dump(c, req->rc);
if (err) {
P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
return err;
}
-
if (type != P9_RERROR && type != P9_RLERROR)
return 0;
if (!p9_is_proto_dotl(c)) {
char *ename;
-
- if (req->tc->pbuf_size) {
- /* Handle user buffers */
- size_t len = req->rc->size - req->rc->offset;
- if (req->tc->pubuf) {
- /* User Buffer */
- err = copy_from_user(
- &req->rc->sdata[req->rc->offset],
- req->tc->pubuf, len);
- if (err) {
- err = -EFAULT;
- goto out_err;
- }
- } else {
- /* Kernel Buffer */
- memmove(&req->rc->sdata[req->rc->offset],
- req->tc->pkbuf, len);
- }
- }
err = p9pdu_readf(req->rc, c->proto_version, "s?d",
- &ename, &ecode);
+ &ename, &ecode);
if (err)
goto out_err;
@@ -515,11 +492,10 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
if (!err || !IS_ERR_VALUE(err)) {
err = p9_errstr2errno(ename, strlen(ename));
- P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode,
- ename);
-
- kfree(ename);
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
+ -ecode, ename);
}
+ kfree(ename);
} else {
err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
err = -ecode;
@@ -527,7 +503,6 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
}
-
return err;
out_err:
@@ -536,6 +511,115 @@ out_err:
return err;
}
+/**
+ * p9_check_zc_errors - check 9p packet for error return and process it
+ * @c: current client instance
+ * @req: request to parse and check for error conditions
+ * @in_hdrlen: Size of response protocol buffer.
+ *
+ * returns error code if one is discovered, otherwise returns 0
+ *
+ * this will have to be more complicated if we have multiple
+ * error packet types
+ */
+
+static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
+ char *uidata, int in_hdrlen, int kern_buf)
+{
+ int err;
+ int ecode;
+ int8_t type;
+ char *ename = NULL;
+
+ err = p9_parse_header(req->rc, NULL, &type, NULL, 0);
+ /*
+ * dump the response from server
+ * This should be after parse_header which poplulate pdu_fcall.
+ */
+ trace_9p_protocol_dump(c, req->rc);
+ if (err) {
+ P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse header %d\n", err);
+ return err;
+ }
+
+ if (type != P9_RERROR && type != P9_RLERROR)
+ return 0;
+
+ if (!p9_is_proto_dotl(c)) {
+ /* Error is reported in string format */
+ uint16_t len;
+ /* 7 = header size for RERROR, 2 is the size of string len; */
+ int inline_len = in_hdrlen - (7 + 2);
+
+ /* Read the size of error string */
+ err = p9pdu_readf(req->rc, c->proto_version, "w", &len);
+ if (err)
+ goto out_err;
+
+ ename = kmalloc(len + 1, GFP_NOFS);
+ if (!ename) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ if (len <= inline_len) {
+ /* We have error in protocol buffer itself */
+ if (pdu_read(req->rc, ename, len)) {
+ err = -EFAULT;
+ goto out_free;
+
+ }
+ } else {
+ /*
+ * Part of the data is in user space buffer.
+ */
+ if (pdu_read(req->rc, ename, inline_len)) {
+ err = -EFAULT;
+ goto out_free;
+
+ }
+ if (kern_buf) {
+ memcpy(ename + inline_len, uidata,
+ len - inline_len);
+ } else {
+ err = copy_from_user(ename + inline_len,
+ uidata, len - inline_len);
+ if (err) {
+ err = -EFAULT;
+ goto out_free;
+ }
+ }
+ }
+ ename[len] = 0;
+ if (p9_is_proto_dotu(c)) {
+ /* For dotu we also have error code */
+ err = p9pdu_readf(req->rc,
+ c->proto_version, "d", &ecode);
+ if (err)
+ goto out_free;
+ err = -ecode;
+ }
+ if (!err || !IS_ERR_VALUE(err)) {
+ err = p9_errstr2errno(ename, strlen(ename));
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
+ -ecode, ename);
+ }
+ kfree(ename);
+ } else {
+ err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode);
+ err = -ecode;
+
+ P9_DPRINTK(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode);
+ }
+ return err;
+
+out_free:
+ kfree(ename);
+out_err:
+ P9_DPRINTK(P9_DEBUG_ERROR, "couldn't parse error%d\n", err);
+ return err;
+}
+
static struct p9_req_t *
p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
@@ -579,23 +663,12 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
return 0;
}
-/**
- * p9_client_rpc - issue a request and wait for a response
- * @c: client session
- * @type: type of request
- * @fmt: protocol format string (see protocol.c)
- *
- * Returns request structure (which client must free using p9_free_req)
- */
-
-static struct p9_req_t *
-p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
+static struct p9_req_t *p9_client_prepare_req(struct p9_client *c,
+ int8_t type, int req_size,
+ const char *fmt, va_list ap)
{
- va_list ap;
int tag, err;
struct p9_req_t *req;
- unsigned long flags;
- int sigpending;
P9_DPRINTK(P9_DEBUG_MUX, "client %p op %d\n", c, type);
@@ -607,12 +680,6 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
if ((c->status == BeginDisconnect) && (type != P9_TCLUNK))
return ERR_PTR(-EIO);
- if (signal_pending(current)) {
- sigpending = 1;
- clear_thread_flag(TIF_SIGPENDING);
- } else
- sigpending = 0;
-
tag = P9_NOTAG;
if (type != P9_TVERSION) {
tag = p9_idpool_get(c->tagpool);
@@ -620,18 +687,51 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
return ERR_PTR(-ENOMEM);
}
- req = p9_tag_alloc(c, tag);
+ req = p9_tag_alloc(c, tag, req_size);
if (IS_ERR(req))
return req;
/* marshall the data */
p9pdu_prepare(req->tc, tag, type);
- va_start(ap, fmt);
err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap);
- va_end(ap);
if (err)
goto reterr;
- p9pdu_finalize(req->tc);
+ p9pdu_finalize(c, req->tc);
+ trace_9p_client_req(c, type, tag);
+ return req;
+reterr:
+ p9_free_req(c, req);
+ return ERR_PTR(err);
+}
+
+/**
+ * p9_client_rpc - issue a request and wait for a response
+ * @c: client session
+ * @type: type of request
+ * @fmt: protocol format string (see protocol.c)
+ *
+ * Returns request structure (which client must free using p9_free_req)
+ */
+
+static struct p9_req_t *
+p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
+{
+ va_list ap;
+ int sigpending, err;
+ unsigned long flags;
+ struct p9_req_t *req;
+
+ va_start(ap, fmt);
+ req = p9_client_prepare_req(c, type, c->msize, fmt, ap);
+ va_end(ap);
+ if (IS_ERR(req))
+ return req;
+
+ if (signal_pending(current)) {
+ sigpending = 1;
+ clear_thread_flag(TIF_SIGPENDING);
+ } else
+ sigpending = 0;
err = c->trans_mod->request(c, req);
if (err < 0) {
@@ -639,18 +739,14 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
c->status = Disconnected;
goto reterr;
}
-
- P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d\n", req->wq, tag);
+ /* Wait for the response */
err = wait_event_interruptible(*req->wq,
- req->status >= REQ_STATUS_RCVD);
- P9_DPRINTK(P9_DEBUG_MUX, "wait %p tag: %d returned %d\n",
- req->wq, tag, err);
+ req->status >= REQ_STATUS_RCVD);
if (req->status == REQ_STATUS_ERROR) {
P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
err = req->t_err;
}
-
if ((err == -ERESTARTSYS) && (c->status == Connected)) {
P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
sigpending = 1;
@@ -663,25 +759,102 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
if (req->status == REQ_STATUS_RCVD)
err = 0;
}
-
if (sigpending) {
spin_lock_irqsave(&current->sighand->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
-
if (err < 0)
goto reterr;
err = p9_check_errors(c, req);
- if (!err) {
- P9_DPRINTK(P9_DEBUG_MUX, "exit: client %p op %d\n", c, type);
+ trace_9p_client_res(c, type, req->rc->tag, err);
+ if (!err)
+ return req;
+reterr:
+ p9_free_req(c, req);
+ return ERR_PTR(err);
+}
+
+/**
+ * p9_client_zc_rpc - issue a request and wait for a response
+ * @c: client session
+ * @type: type of request
+ * @uidata: user bffer that should be ued for zero copy read
+ * @uodata: user buffer that shoud be user for zero copy write
+ * @inlen: read buffer size
+ * @olen: write buffer size
+ * @hdrlen: reader header size, This is the size of response protocol data
+ * @fmt: protocol format string (see protocol.c)
+ *
+ * Returns request structure (which client must free using p9_free_req)
+ */
+static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
+ char *uidata, char *uodata,
+ int inlen, int olen, int in_hdrlen,
+ int kern_buf, const char *fmt, ...)
+{
+ va_list ap;
+ int sigpending, err;
+ unsigned long flags;
+ struct p9_req_t *req;
+
+ va_start(ap, fmt);
+ /*
+ * We allocate a inline protocol data of only 4k bytes.
+ * The actual content is passed in zero-copy fashion.
+ */
+ req = p9_client_prepare_req(c, type, P9_ZC_HDR_SZ, fmt, ap);
+ va_end(ap);
+ if (IS_ERR(req))
return req;
+
+ if (signal_pending(current)) {
+ sigpending = 1;
+ clear_thread_flag(TIF_SIGPENDING);
+ } else
+ sigpending = 0;
+
+ /* If we are called with KERNEL_DS force kern_buf */
+ if (segment_eq(get_fs(), KERNEL_DS))
+ kern_buf = 1;
+
+ err = c->trans_mod->zc_request(c, req, uidata, uodata,
+ inlen, olen, in_hdrlen, kern_buf);
+ if (err < 0) {
+ if (err == -EIO)
+ c->status = Disconnected;
+ goto reterr;
+ }
+ if (req->status == REQ_STATUS_ERROR) {
+ P9_DPRINTK(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
+ err = req->t_err;
+ }
+ if ((err == -ERESTARTSYS) && (c->status == Connected)) {
+ P9_DPRINTK(P9_DEBUG_MUX, "flushing\n");
+ sigpending = 1;
+ clear_thread_flag(TIF_SIGPENDING);
+
+ if (c->trans_mod->cancel(c, req))
+ p9_client_flush(c, req);
+
+ /* if we received the response anyway, don't signal error */
+ if (req->status == REQ_STATUS_RCVD)
+ err = 0;
+ }
+ if (sigpending) {
+ spin_lock_irqsave(&current->sighand->siglock, flags);
+ recalc_sigpending();
+ spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
+ if (err < 0)
+ goto reterr;
+ err = p9_check_zc_errors(c, req, uidata, in_hdrlen, kern_buf);
+ trace_9p_client_res(c, type, req->rc->tag, err);
+ if (!err)
+ return req;
reterr:
- P9_DPRINTK(P9_DEBUG_MUX, "exit: client %p op %d error: %d\n", c, type,
- err);
p9_free_req(c, req);
return ERR_PTR(err);
}
@@ -769,7 +942,7 @@ static int p9_client_version(struct p9_client *c)
err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version);
if (err) {
P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err);
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(c, req->rc);
goto error;
}
@@ -906,15 +1079,14 @@ EXPORT_SYMBOL(p9_client_begin_disconnect);
struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
char *uname, u32 n_uname, char *aname)
{
- int err;
+ int err = 0;
struct p9_req_t *req;
struct p9_fid *fid;
struct p9_qid qid;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
- afid ? afid->fid : -1, uname, aname);
- err = 0;
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n",
+ afid ? afid->fid : -1, uname, aname);
fid = p9_fid_create(clnt);
if (IS_ERR(fid)) {
err = PTR_ERR(fid);
@@ -931,7 +1103,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto error;
}
@@ -991,7 +1163,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto clunk_fid;
}
@@ -1058,7 +1230,7 @@ int p9_client_open(struct p9_fid *fid, int mode)
err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1101,7 +1273,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", qid, &iounit);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1146,7 +1318,7 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1185,7 +1357,7 @@ int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid,
err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1330,13 +1502,15 @@ int
p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
u32 count)
{
- int err, rsize;
- struct p9_client *clnt;
- struct p9_req_t *req;
char *dataptr;
+ int kernel_buf = 0;
+ struct p9_req_t *req;
+ struct p9_client *clnt;
+ int err, rsize, non_zc = 0;
+
- P9_DPRINTK(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", fid->fid,
- (long long unsigned) offset, count);
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
+ fid->fid, (long long unsigned) offset, count);
err = 0;
clnt = fid->clnt;
@@ -1348,13 +1522,24 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
rsize = count;
/* Don't bother zerocopy for small IO (< 1024) */
- if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
- P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
- req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset,
- rsize, data, udata);
+ if (clnt->trans_mod->zc_request && rsize > 1024) {
+ char *indata;
+ if (data) {
+ kernel_buf = 1;
+ indata = data;
+ } else
+ indata = (char *)udata;
+ /*
+ * response header len is 11
+ * PDU Header(7) + IO Size (4)
+ */
+ req = p9_client_zc_rpc(clnt, P9_TREAD, indata, NULL, rsize, 0,
+ 11, kernel_buf, "dqd", fid->fid,
+ offset, rsize);
} else {
+ non_zc = 1;
req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset,
- rsize);
+ rsize);
}
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -1363,14 +1548,13 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
- P9_DUMP_PKT(1, req->rc);
- if (!req->tc->pbuf_size) {
+ if (non_zc) {
if (data) {
memmove(data, dataptr, count);
} else {
@@ -1396,6 +1580,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
u64 offset, u32 count)
{
int err, rsize;
+ int kernel_buf = 0;
struct p9_client *clnt;
struct p9_req_t *req;
@@ -1411,19 +1596,24 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
if (count < rsize)
rsize = count;
- /* Don't bother zerocopy form small IO (< 1024) */
- if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
- P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
- req = p9_client_rpc(clnt, P9_TWRITE, "dqE", fid->fid, offset,
- rsize, data, udata);
+ /* Don't bother zerocopy for small IO (< 1024) */
+ if (clnt->trans_mod->zc_request && rsize > 1024) {
+ char *odata;
+ if (data) {
+ kernel_buf = 1;
+ odata = data;
+ } else
+ odata = (char *)udata;
+ req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize,
+ P9_ZC_HDR_SZ, kernel_buf, "dqd",
+ fid->fid, offset, rsize);
} else {
-
if (data)
req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid,
- offset, rsize, data);
+ offset, rsize, data);
else
req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid,
- offset, rsize, udata);
+ offset, rsize, udata);
}
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -1432,7 +1622,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
@@ -1472,7 +1662,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto error;
}
@@ -1523,7 +1713,7 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
err = p9pdu_readf(req->rc, clnt->proto_version, "A", ret);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto error;
}
@@ -1671,7 +1861,7 @@ int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
&sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail,
&sb->files, &sb->ffree, &sb->fsid, &sb->namelen);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto error;
}
@@ -1778,7 +1968,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
}
err = p9pdu_readf(req->rc, clnt->proto_version, "q", attr_size);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
p9_free_req(clnt, req);
goto clunk_fid;
}
@@ -1824,7 +2014,7 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
{
- int err, rsize;
+ int err, rsize, non_zc = 0;
struct p9_client *clnt;
struct p9_req_t *req;
char *dataptr;
@@ -1842,13 +2032,18 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
if (count < rsize)
rsize = count;
- if ((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
- P9_TRANS_PREF_PAYLOAD_SEP) {
- req = p9_client_rpc(clnt, P9_TREADDIR, "dqF", fid->fid,
- offset, rsize, data);
+ /* Don't bother zerocopy for small IO (< 1024) */
+ if (clnt->trans_mod->zc_request && rsize > 1024) {
+ /*
+ * response header len is 11
+ * PDU Header(7) + IO Size (4)
+ */
+ req = p9_client_zc_rpc(clnt, P9_TREADDIR, data, NULL, rsize, 0,
+ 11, 1, "dqd", fid->fid, offset, rsize);
} else {
+ non_zc = 1;
req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid,
- offset, rsize);
+ offset, rsize);
}
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -1857,13 +2052,13 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
- if (!req->tc->pbuf_size && data)
+ if (non_zc)
memmove(data, dataptr, count);
p9_free_req(clnt, req);
@@ -1894,7 +2089,7 @@ int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode,
err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type,
@@ -1925,7 +2120,7 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type,
@@ -1960,7 +2155,7 @@ int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status)
err = p9pdu_readf(req->rc, clnt->proto_version, "b", status);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status);
@@ -1993,7 +2188,7 @@ int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock)
&glock->start, &glock->length, &glock->proc_id,
&glock->client_id);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld "
@@ -2021,7 +2216,7 @@ int p9_client_readlink(struct p9_fid *fid, char **target)
err = p9pdu_readf(req->rc, clnt->proto_version, "s", target);
if (err) {
- P9_DUMP_PKT(1, req->rc);
+ trace_9p_protocol_dump(clnt, req->rc);
goto error;
}
P9_DPRINTK(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index df58375..55e10a9 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -37,40 +37,11 @@
#include <net/9p/client.h>
#include "protocol.h"
+#include <trace/events/9p.h>
+
static int
p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
-#ifdef CONFIG_NET_9P_DEBUG
-void
-p9pdu_dump(int way, struct p9_fcall *pdu)
-{
- int len = pdu->size;
-
- if ((p9_debug_level & P9_DEBUG_VPKT) != P9_DEBUG_VPKT) {
- if ((p9_debug_level & P9_DEBUG_PKT) == P9_DEBUG_PKT) {
- if (len > 32)
- len = 32;
- } else {
- /* shouldn't happen */
- return;
- }
- }
-
- if (way)
- print_hex_dump_bytes("[9P] ", DUMP_PREFIX_OFFSET, pdu->sdata,
- len);
- else
- print_hex_dump_bytes("]9P[ ", DUMP_PREFIX_OFFSET, pdu->sdata,
- len);
-}
-#else
-void
-p9pdu_dump(int way, struct p9_fcall *pdu)
-{
-}
-#endif
-EXPORT_SYMBOL(p9pdu_dump);
-
void p9stat_free(struct p9_wstat *stbuf)
{
kfree(stbuf->name);
@@ -81,7 +52,7 @@ void p9stat_free(struct p9_wstat *stbuf)
}
EXPORT_SYMBOL(p9stat_free);
-static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
+size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
{
size_t len = min(pdu->size - pdu->offset, size);
memcpy(data, &pdu->sdata[pdu->offset], len);
@@ -108,26 +79,6 @@ pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
return size - len;
}
-static size_t
-pdu_write_urw(struct p9_fcall *pdu, const char *kdata, const char __user *udata,
- size_t size)
-{
- BUG_ON(pdu->size > P9_IOHDRSZ);
- pdu->pubuf = (char __user *)udata;
- pdu->pkbuf = (char *)kdata;
- pdu->pbuf_size = size;
- return 0;
-}
-
-static size_t
-pdu_write_readdir(struct p9_fcall *pdu, const char *kdata, size_t size)
-{
- BUG_ON(pdu->size > P9_READDIRHDRSZ);
- pdu->pkbuf = (char *)kdata;
- pdu->pbuf_size = size;
- return 0;
-}
-
/*
b - int8_t
w - int16_t
@@ -459,26 +410,6 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
errcode = -EFAULT;
}
break;
- case 'E':{
- int32_t cnt = va_arg(ap, int32_t);
- const char *k = va_arg(ap, const void *);
- const char __user *u = va_arg(ap,
- const void __user *);
- errcode = p9pdu_writef(pdu, proto_version, "d",
- cnt);
- if (!errcode && pdu_write_urw(pdu, k, u, cnt))
- errcode = -EFAULT;
- }
- break;
- case 'F':{
- int32_t cnt = va_arg(ap, int32_t);
- const char *k = va_arg(ap, const void *);
- errcode = p9pdu_writef(pdu, proto_version, "d",
- cnt);
- if (!errcode && pdu_write_readdir(pdu, k, cnt))
- errcode = -EFAULT;
- }
- break;
case 'U':{
int32_t count = va_arg(ap, int32_t);
const char __user *udata =
@@ -591,7 +522,7 @@ p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...)
return ret;
}
-int p9stat_read(char *buf, int len, struct p9_wstat *st, int proto_version)
+int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st)
{
struct p9_fcall fake_pdu;
int ret;
@@ -601,10 +532,10 @@ int p9stat_read(char *buf, int len, struct p9_wstat *st, int proto_version)
fake_pdu.sdata = buf;
fake_pdu.offset = 0;
- ret = p9pdu_readf(&fake_pdu, proto_version, "S", st);
+ ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "S", st);
if (ret) {
P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
- P9_DUMP_PKT(0, &fake_pdu);
+ trace_9p_protocol_dump(clnt, &fake_pdu);
}
return ret;
@@ -617,7 +548,7 @@ int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type)
return p9pdu_writef(pdu, 0, "dbw", 0, type, tag);
}
-int p9pdu_finalize(struct p9_fcall *pdu)
+int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu)
{
int size = pdu->size;
int err;
@@ -626,7 +557,7 @@ int p9pdu_finalize(struct p9_fcall *pdu)
err = p9pdu_writef(pdu, 0, "d", size);
pdu->size = size;
- P9_DUMP_PKT(0, pdu);
+ trace_9p_protocol_dump(clnt, pdu);
P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size,
pdu->id, pdu->tag);
@@ -637,14 +568,10 @@ void p9pdu_reset(struct p9_fcall *pdu)
{
pdu->offset = 0;
pdu->size = 0;
- pdu->private = NULL;
- pdu->pubuf = NULL;
- pdu->pkbuf = NULL;
- pdu->pbuf_size = 0;
}
-int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
- int proto_version)
+int p9dirent_read(struct p9_client *clnt, char *buf, int len,
+ struct p9_dirent *dirent)
{
struct p9_fcall fake_pdu;
int ret;
@@ -655,11 +582,11 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
fake_pdu.sdata = buf;
fake_pdu.offset = 0;
- ret = p9pdu_readf(&fake_pdu, proto_version, "Qqbs", &dirent->qid,
- &dirent->d_off, &dirent->d_type, &nameptr);
+ ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid,
+ &dirent->d_off, &dirent->d_type, &nameptr);
if (ret) {
P9_DPRINTK(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
- P9_DUMP_PKT(1, &fake_pdu);
+ trace_9p_protocol_dump(clnt, &fake_pdu);
goto out;
}
diff --git a/net/9p/protocol.h b/net/9p/protocol.h
index 2431c0f..2cc525fa 100644
--- a/net/9p/protocol.h
+++ b/net/9p/protocol.h
@@ -29,6 +29,6 @@ int p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
va_list ap);
int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type);
-int p9pdu_finalize(struct p9_fcall *pdu);
-void p9pdu_dump(int, struct p9_fcall *);
+int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu);
void p9pdu_reset(struct p9_fcall *pdu);
+size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size);
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index 9a70ebd..de8df95 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -21,30 +21,25 @@
/**
* p9_release_req_pages - Release pages after the transaction.
- * @*private: PDU's private page of struct trans_rpage_info
*/
-void
-p9_release_req_pages(struct trans_rpage_info *rpinfo)
+void p9_release_pages(struct page **pages, int nr_pages)
{
int i = 0;
-
- while (rpinfo->rp_data[i] && rpinfo->rp_nr_pages--) {
- put_page(rpinfo->rp_data[i]);
+ while (pages[i] && nr_pages--) {
+ put_page(pages[i]);
i++;
}
}
-EXPORT_SYMBOL(p9_release_req_pages);
+EXPORT_SYMBOL(p9_release_pages);
/**
* p9_nr_pages - Return number of pages needed to accommodate the payload.
*/
-int
-p9_nr_pages(struct p9_req_t *req)
+int p9_nr_pages(char *data, int len)
{
unsigned long start_page, end_page;
- start_page = (unsigned long)req->tc->pubuf >> PAGE_SHIFT;
- end_page = ((unsigned long)req->tc->pubuf + req->tc->pbuf_size +
- PAGE_SIZE - 1) >> PAGE_SHIFT;
+ start_page = (unsigned long)data >> PAGE_SHIFT;
+ end_page = ((unsigned long)data + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
return end_page - start_page;
}
EXPORT_SYMBOL(p9_nr_pages);
@@ -58,35 +53,17 @@ EXPORT_SYMBOL(p9_nr_pages);
* @nr_pages: number of pages to accommodate the payload
* @rw: Indicates if the pages are for read or write.
*/
-int
-p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
- int nr_pages, u8 rw)
-{
- uint32_t first_page_bytes = 0;
- int32_t pdata_mapped_pages;
- struct trans_rpage_info *rpinfo;
-
- *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1);
- if (*pdata_off)
- first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off),
- req->tc->pbuf_size);
+int p9_payload_gup(char *data, int *nr_pages, struct page **pages, int write)
+{
+ int nr_mapped_pages;
- rpinfo = req->tc->private;
- pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf,
- nr_pages, rw, &rpinfo->rp_data[0]);
- if (pdata_mapped_pages <= 0)
- return pdata_mapped_pages;
+ nr_mapped_pages = get_user_pages_fast((unsigned long)data,
+ *nr_pages, write, pages);
+ if (nr_mapped_pages <= 0)
+ return nr_mapped_pages;
- rpinfo->rp_nr_pages = pdata_mapped_pages;
- if (*pdata_off) {
- *pdata_len = first_page_bytes;
- *pdata_len += min((req->tc->pbuf_size - *pdata_len),
- ((size_t)pdata_mapped_pages - 1) << PAGE_SHIFT);
- } else {
- *pdata_len = min(req->tc->pbuf_size,
- (size_t)pdata_mapped_pages << PAGE_SHIFT);
- }
+ *nr_pages = nr_mapped_pages;
return 0;
}
EXPORT_SYMBOL(p9_payload_gup);
diff --git a/net/9p/trans_common.h b/net/9p/trans_common.h
index 7630922..173bb55 100644
--- a/net/9p/trans_common.h
+++ b/net/9p/trans_common.h
@@ -12,21 +12,6 @@
*
*/
-/* TRUE if it is user context */
-#define P9_IS_USER_CONTEXT (!segment_eq(get_fs(), KERNEL_DS))
-
-/**
- * struct trans_rpage_info - To store mapped page information in PDU.
- * @rp_alloc:Set if this structure is allocd, not a reuse unused space in pdu.
- * @rp_nr_pages: Number of mapped pages
- * @rp_data: Array of page pointers
- */
-struct trans_rpage_info {
- u8 rp_alloc;
- int rp_nr_pages;
- struct page *rp_data[0];
-};
-
-void p9_release_req_pages(struct trans_rpage_info *);
-int p9_payload_gup(struct p9_req_t *, size_t *, int *, int, u8);
-int p9_nr_pages(struct p9_req_t *);
+void p9_release_pages(struct page **, int);
+int p9_payload_gup(char *, int *, struct page **, int);
+int p9_nr_pages(char *, int);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index e317583..32aa983 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -150,12 +150,10 @@ static void req_done(struct virtqueue *vq)
while (1) {
spin_lock_irqsave(&chan->lock, flags);
rc = virtqueue_get_buf(chan->vq, &len);
-
if (rc == NULL) {
spin_unlock_irqrestore(&chan->lock, flags);
break;
}
-
chan->ring_bufs_avail = 1;
spin_unlock_irqrestore(&chan->lock, flags);
/* Wakeup if anyone waiting for VirtIO ring space. */
@@ -163,17 +161,6 @@ static void req_done(struct virtqueue *vq)
P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
req = p9_tag_lookup(chan->client, rc->tag);
- if (req->tc->private) {
- struct trans_rpage_info *rp = req->tc->private;
- int p = rp->rp_nr_pages;
- /*Release pages */
- p9_release_req_pages(rp);
- atomic_sub(p, &vp_pinned);
- wake_up(&vp_wq);
- if (rp->rp_alloc)
- kfree(rp);
- req->tc->private = NULL;
- }
req->status = REQ_STATUS_RCVD;
p9_client_cb(chan->client, req);
}
@@ -193,9 +180,8 @@ static void req_done(struct virtqueue *vq)
*
*/
-static int
-pack_sg_list(struct scatterlist *sg, int start, int limit, char *data,
- int count)
+static int pack_sg_list(struct scatterlist *sg, int start,
+ int limit, char *data, int count)
{
int s;
int index = start;
@@ -224,31 +210,36 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
* this takes a list of pages.
* @sg: scatter/gather list to pack into
* @start: which segment of the sg_list to start at
- * @pdata_off: Offset into the first page
* @**pdata: a list of pages to add into sg.
+ * @nr_pages: number of pages to pack into the scatter/gather list
+ * @data: data to pack into scatter/gather list
* @count: amount of data to pack into the scatter/gather list
*/
static int
-pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off,
- struct page **pdata, int count)
+pack_sg_list_p(struct scatterlist *sg, int start, int limit,
+ struct page **pdata, int nr_pages, char *data, int count)
{
- int s;
- int i = 0;
+ int i = 0, s;
+ int data_off;
int index = start;
- if (pdata_off) {
- s = min((int)(PAGE_SIZE - pdata_off), count);
- sg_set_page(&sg[index++], pdata[i++], s, pdata_off);
- count -= s;
- }
-
- while (count) {
- BUG_ON(index > limit);
- s = min((int)PAGE_SIZE, count);
- sg_set_page(&sg[index++], pdata[i++], s, 0);
+ BUG_ON(nr_pages > (limit - start));
+ /*
+ * if the first page doesn't start at
+ * page boundary find the offset
+ */
+ data_off = offset_in_page(data);
+ while (nr_pages) {
+ s = rest_of_page(data);
+ if (s > count)
+ s = count;
+ sg_set_page(&sg[index++], pdata[i++], s, data_off);
+ data_off = 0;
+ data += s;
count -= s;
+ nr_pages--;
}
- return index-start;
+ return index - start;
}
/**
@@ -261,114 +252,166 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit, size_t pdata_off,
static int
p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
{
- int in, out, inp, outp;
- struct virtio_chan *chan = client->trans;
+ int err;
+ int in, out;
unsigned long flags;
- size_t pdata_off = 0;
- struct trans_rpage_info *rpinfo = NULL;
- int err, pdata_len = 0;
+ struct virtio_chan *chan = client->trans;
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
req->status = REQ_STATUS_SENT;
+req_retry:
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* Handle out VirtIO ring buffers */
+ out = pack_sg_list(chan->sg, 0,
+ VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
- if (req->tc->pbuf_size && (req->tc->pubuf && P9_IS_USER_CONTEXT)) {
- int nr_pages = p9_nr_pages(req);
- int rpinfo_size = sizeof(struct trans_rpage_info) +
- sizeof(struct page *) * nr_pages;
+ in = pack_sg_list(chan->sg, out,
+ VIRTQUEUE_NUM, req->rc->sdata, req->rc->capacity);
- if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
- err = wait_event_interruptible(vp_wq,
- atomic_read(&vp_pinned) < chan->p9_max_pages);
+ err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
+ if (err < 0) {
+ if (err == -ENOSPC) {
+ chan->ring_bufs_avail = 0;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ err = wait_event_interruptible(*chan->vc_wq,
+ chan->ring_bufs_avail);
if (err == -ERESTARTSYS)
return err;
- P9_DPRINTK(P9_DEBUG_TRANS, "9p: May gup pages now.\n");
- }
- if (rpinfo_size <= (req->tc->capacity - req->tc->size)) {
- /* We can use sdata */
- req->tc->private = req->tc->sdata + req->tc->size;
- rpinfo = (struct trans_rpage_info *)req->tc->private;
- rpinfo->rp_alloc = 0;
+ P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
+ goto req_retry;
} else {
- req->tc->private = kmalloc(rpinfo_size, GFP_NOFS);
- if (!req->tc->private) {
- P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: "
- "private kmalloc returned NULL");
- return -ENOMEM;
- }
- rpinfo = (struct trans_rpage_info *)req->tc->private;
- rpinfo->rp_alloc = 1;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ P9_DPRINTK(P9_DEBUG_TRANS,
+ "9p debug: "
+ "virtio rpc add_buf returned failure");
+ return -EIO;
}
+ }
+ virtqueue_kick(chan->vq);
+ spin_unlock_irqrestore(&chan->lock, flags);
- err = p9_payload_gup(req, &pdata_off, &pdata_len, nr_pages,
- req->tc->id == P9_TREAD ? 1 : 0);
- if (err < 0) {
- if (rpinfo->rp_alloc)
- kfree(rpinfo);
+ P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
+ return 0;
+}
+
+static int p9_get_mapped_pages(struct virtio_chan *chan,
+ struct page **pages, char *data,
+ int nr_pages, int write, int kern_buf)
+{
+ int err;
+ if (!kern_buf) {
+ /*
+ * We allow only p9_max_pages pinned. We wait for the
+ * Other zc request to finish here
+ */
+ if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
+ err = wait_event_interruptible(vp_wq,
+ (atomic_read(&vp_pinned) < chan->p9_max_pages));
+ if (err == -ERESTARTSYS)
+ return err;
+ }
+ err = p9_payload_gup(data, &nr_pages, pages, write);
+ if (err < 0)
return err;
- } else {
- atomic_add(rpinfo->rp_nr_pages, &vp_pinned);
+ atomic_add(nr_pages, &vp_pinned);
+ } else {
+ /* kernel buffer, no need to pin pages */
+ int s, index = 0;
+ int count = nr_pages;
+ while (nr_pages) {
+ s = rest_of_page(data);
+ pages[index++] = virt_to_page(data);
+ data += s;
+ nr_pages--;
}
+ nr_pages = count;
}
+ return nr_pages;
+}
-req_retry_pinned:
- spin_lock_irqsave(&chan->lock, flags);
+/**
+ * p9_virtio_zc_request - issue a zero copy request
+ * @client: client instance issuing the request
+ * @req: request to be issued
+ * @uidata: user bffer that should be ued for zero copy read
+ * @uodata: user buffer that shoud be user for zero copy write
+ * @inlen: read buffer size
+ * @olen: write buffer size
+ * @hdrlen: reader header size, This is the size of response protocol data
+ *
+ */
+static int
+p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
+ char *uidata, char *uodata, int inlen,
+ int outlen, int in_hdr_len, int kern_buf)
+{
+ int in, out, err;
+ unsigned long flags;
+ int in_nr_pages = 0, out_nr_pages = 0;
+ struct page **in_pages = NULL, **out_pages = NULL;
+ struct virtio_chan *chan = client->trans;
- /* Handle out VirtIO ring buffers */
- out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc->sdata,
- req->tc->size);
-
- if (req->tc->pbuf_size && (req->tc->id == P9_TWRITE)) {
- /* We have additional write payload buffer to take care */
- if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
- outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
- pdata_off, rpinfo->rp_data, pdata_len);
- } else {
- char *pbuf;
- if (req->tc->pubuf)
- pbuf = (__force char *) req->tc->pubuf;
- else
- pbuf = req->tc->pkbuf;
- outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
- req->tc->pbuf_size);
+ P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
+
+ if (uodata) {
+ out_nr_pages = p9_nr_pages(uodata, outlen);
+ out_pages = kmalloc(sizeof(struct page *) * out_nr_pages,
+ GFP_NOFS);
+ if (!out_pages) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ out_nr_pages = p9_get_mapped_pages(chan, out_pages, uodata,
+ out_nr_pages, 0, kern_buf);
+ if (out_nr_pages < 0) {
+ err = out_nr_pages;
+ kfree(out_pages);
+ out_pages = NULL;
+ goto err_out;
}
- out += outp;
}
-
- /* Handle in VirtIO ring buffers */
- if (req->tc->pbuf_size &&
- ((req->tc->id == P9_TREAD) || (req->tc->id == P9_TREADDIR))) {
- /*
- * Take care of additional Read payload.
- * 11 is the read/write header = PDU Header(7) + IO Size (4).
- * Arrange in such a way that server places header in the
- * alloced memory and payload onto the user buffer.
- */
- inp = pack_sg_list(chan->sg, out,
- VIRTQUEUE_NUM, req->rc->sdata, 11);
- /*
- * Running executables in the filesystem may result in
- * a read request with kernel buffer as opposed to user buffer.
- */
- if (req->tc->pubuf && P9_IS_USER_CONTEXT) {
- in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
- pdata_off, rpinfo->rp_data, pdata_len);
- } else {
- char *pbuf;
- if (req->tc->pubuf)
- pbuf = (__force char *) req->tc->pubuf;
- else
- pbuf = req->tc->pkbuf;
-
- in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM,
- pbuf, req->tc->pbuf_size);
+ if (uidata) {
+ in_nr_pages = p9_nr_pages(uidata, inlen);
+ in_pages = kmalloc(sizeof(struct page *) * in_nr_pages,
+ GFP_NOFS);
+ if (!in_pages) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ in_nr_pages = p9_get_mapped_pages(chan, in_pages, uidata,
+ in_nr_pages, 1, kern_buf);
+ if (in_nr_pages < 0) {
+ err = in_nr_pages;
+ kfree(in_pages);
+ in_pages = NULL;
+ goto err_out;
}
- in += inp;
- } else {
- in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
- req->rc->sdata, req->rc->capacity);
}
+ req->status = REQ_STATUS_SENT;
+req_retry_pinned:
+ spin_lock_irqsave(&chan->lock, flags);
+ /* out data */
+ out = pack_sg_list(chan->sg, 0,
+ VIRTQUEUE_NUM, req->tc->sdata, req->tc->size);
+
+ if (out_pages)
+ out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
+ out_pages, out_nr_pages, uodata, outlen);
+ /*
+ * Take care of in data
+ * For example TREAD have 11.
+ * 11 is the read/write header = PDU Header(7) + IO Size (4).
+ * Arrange in such a way that server places header in the
+ * alloced memory and payload onto the user buffer.
+ */
+ in = pack_sg_list(chan->sg, out,
+ VIRTQUEUE_NUM, req->rc->sdata, in_hdr_len);
+ if (in_pages)
+ in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
+ in_pages, in_nr_pages, uidata, inlen);
err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
if (err < 0) {
@@ -376,28 +419,45 @@ req_retry_pinned:
chan->ring_bufs_avail = 0;
spin_unlock_irqrestore(&chan->lock, flags);
err = wait_event_interruptible(*chan->vc_wq,
- chan->ring_bufs_avail);
+ chan->ring_bufs_avail);
if (err == -ERESTARTSYS)
- return err;
+ goto err_out;
P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
goto req_retry_pinned;
} else {
spin_unlock_irqrestore(&chan->lock, flags);
P9_DPRINTK(P9_DEBUG_TRANS,
- "9p debug: "
- "virtio rpc add_buf returned failure");
- if (rpinfo && rpinfo->rp_alloc)
- kfree(rpinfo);
- return -EIO;
+ "9p debug: "
+ "virtio rpc add_buf returned failure");
+ err = -EIO;
+ goto err_out;
}
}
-
virtqueue_kick(chan->vq);
spin_unlock_irqrestore(&chan->lock, flags);
-
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
- return 0;
+ err = wait_event_interruptible(*req->wq,
+ req->status >= REQ_STATUS_RCVD);
+ /*
+ * Non kernel buffers are pinned, unpin them
+ */
+err_out:
+ if (!kern_buf) {
+ if (in_pages) {
+ p9_release_pages(in_pages, in_nr_pages);
+ atomic_sub(in_nr_pages, &vp_pinned);
+ }
+ if (out_pages) {
+ p9_release_pages(out_pages, out_nr_pages);
+ atomic_sub(out_nr_pages, &vp_pinned);
+ }
+ /* wakeup anybody waiting for slots to pin pages */
+ wake_up(&vp_wq);
+ }
+ kfree(in_pages);
+ kfree(out_pages);
+ return err;
}
static ssize_t p9_mount_tag_show(struct device *dev,
@@ -591,8 +651,8 @@ static struct p9_trans_module p9_virtio_trans = {
.create = p9_virtio_create,
.close = p9_virtio_close,
.request = p9_virtio_request,
+ .zc_request = p9_virtio_zc_request,
.cancel = p9_virtio_cancel,
-
/*
* We leave one entry for input and one entry for response
* headers. We also skip one more entry to accomodate, address
@@ -600,7 +660,6 @@ static struct p9_trans_module p9_virtio_trans = {
* page in zero copy.
*/
.maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
- .pref = P9_TRANS_PREF_PAYLOAD_SEP,
.def = 0,
.owner = THIS_MODULE,
};
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 1acc695..173a2e8 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -39,6 +39,7 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
int sysctl_aarp_expiry_time = AARP_EXPIRY_TIME;
int sysctl_aarp_tick_time = AARP_TICK_TIME;
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 6ef0e76..b5b1a22 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -14,6 +14,7 @@
#include <net/net_namespace.h>
#include <net/sock.h>
#include <linux/atalk.h>
+#include <linux/export.h>
static __inline__ struct atalk_iface *atalk_get_interface_idx(loff_t pos)
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index b1fe7c3..bfa9ab9 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -951,13 +951,12 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
/* checksum stuff in frags */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
-
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
u8 *vaddr;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 215c9fa..f1964ca 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -643,7 +643,7 @@ static const struct net_device_ops lec_netdev_ops = {
.ndo_start_xmit = lec_start_xmit,
.ndo_change_mtu = lec_change_mtu,
.ndo_tx_timeout = lec_tx_timeout,
- .ndo_set_multicast_list = lec_set_multicast_list,
+ .ndo_set_rx_mode = lec_set_multicast_list,
};
static const unsigned char lec_ctrl_magic[] = {
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index 437ee70..3a73491 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
+#include <linux/export.h>
#include <net/sock.h> /* for sock_no_* */
#include "resources.h" /* devs and vccs */
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 754ee47..1281049 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -20,6 +20,7 @@
#include <linux/bitops.h>
#include <net/sock.h> /* for sock_no_* */
#include <linux/uaccess.h>
+#include <linux/export.h>
#include "resources.h"
#include "common.h" /* common for PVCs and SVCs */
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index a169084..87fddab 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -38,6 +38,7 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
static ax25_route *ax25_route_list;
static DEFINE_RWLOCK(ax25_route_lock);
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index d349be9..4c83137 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -37,6 +37,7 @@
#include <linux/stat.h>
#include <linux/netfilter.h>
#include <linux/sysctl.h>
+#include <linux/export.h>
#include <net/ip.h>
#include <net/arp.h>
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 2de93d0..ce68611 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -19,8 +19,8 @@
#
obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
-batman-adv-y += aggregation.o
batman-adv-y += bat_debugfs.o
+batman-adv-y += bat_iv_ogm.o
batman-adv-y += bat_sysfs.o
batman-adv-y += bitarray.o
batman-adv-y += gateway_client.o
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
deleted file mode 100644
index 69467fe..0000000
--- a/net/batman-adv/aggregation.c
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "translation-table.h"
-#include "aggregation.h"
-#include "send.h"
-#include "routing.h"
-#include "hard-interface.h"
-
-/* return true if new_packet can be aggregated with forw_packet */
-static bool can_aggregate_with(const struct batman_packet *new_batman_packet,
- struct bat_priv *bat_priv,
- int packet_len,
- unsigned long send_time,
- bool directlink,
- const struct hard_iface *if_incoming,
- const struct forw_packet *forw_packet)
-{
- struct batman_packet *batman_packet =
- (struct batman_packet *)forw_packet->skb->data;
- int aggregated_bytes = forw_packet->packet_len + packet_len;
- struct hard_iface *primary_if = NULL;
- bool res = false;
-
- /**
- * we can aggregate the current packet to this aggregated packet
- * if:
- *
- * - the send time is within our MAX_AGGREGATION_MS time
- * - the resulting packet wont be bigger than
- * MAX_AGGREGATION_BYTES
- */
-
- if (time_before(send_time, forw_packet->send_time) &&
- time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
- forw_packet->send_time) &&
- (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
-
- /**
- * check aggregation compatibility
- * -> direct link packets are broadcasted on
- * their interface only
- * -> aggregate packet if the current packet is
- * a "global" packet as well as the base
- * packet
- */
-
- primary_if = primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
-
- /* packets without direct link flag and high TTL
- * are flooded through the net */
- if ((!directlink) &&
- (!(batman_packet->flags & DIRECTLINK)) &&
- (batman_packet->ttl != 1) &&
-
- /* own packets originating non-primary
- * interfaces leave only that interface */
- ((!forw_packet->own) ||
- (forw_packet->if_incoming == primary_if))) {
- res = true;
- goto out;
- }
-
- /* if the incoming packet is sent via this one
- * interface only - we still can aggregate */
- if ((directlink) &&
- (new_batman_packet->ttl == 1) &&
- (forw_packet->if_incoming == if_incoming) &&
-
- /* packets from direct neighbors or
- * own secondary interface packets
- * (= secondary interface packets in general) */
- (batman_packet->flags & DIRECTLINK ||
- (forw_packet->own &&
- forw_packet->if_incoming != primary_if))) {
- res = true;
- goto out;
- }
- }
-
-out:
- if (primary_if)
- hardif_free_ref(primary_if);
- return res;
-}
-
-/* create a new aggregated packet and add this packet to it */
-static void new_aggregated_packet(const unsigned char *packet_buff,
- int packet_len, unsigned long send_time,
- bool direct_link,
- struct hard_iface *if_incoming,
- int own_packet)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct forw_packet *forw_packet_aggr;
- unsigned char *skb_buff;
-
- if (!atomic_inc_not_zero(&if_incoming->refcount))
- return;
-
- /* own packet should always be scheduled */
- if (!own_packet) {
- if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "batman packet queue full\n");
- goto out;
- }
- }
-
- forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
- if (!forw_packet_aggr) {
- if (!own_packet)
- atomic_inc(&bat_priv->batman_queue_left);
- goto out;
- }
-
- if ((atomic_read(&bat_priv->aggregated_ogms)) &&
- (packet_len < MAX_AGGREGATION_BYTES))
- forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
- sizeof(struct ethhdr));
- else
- forw_packet_aggr->skb = dev_alloc_skb(packet_len +
- sizeof(struct ethhdr));
-
- if (!forw_packet_aggr->skb) {
- if (!own_packet)
- atomic_inc(&bat_priv->batman_queue_left);
- kfree(forw_packet_aggr);
- goto out;
- }
- skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
-
- INIT_HLIST_NODE(&forw_packet_aggr->list);
-
- skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
- forw_packet_aggr->packet_len = packet_len;
- memcpy(skb_buff, packet_buff, packet_len);
-
- forw_packet_aggr->own = own_packet;
- forw_packet_aggr->if_incoming = if_incoming;
- forw_packet_aggr->num_packets = 0;
- forw_packet_aggr->direct_link_flags = NO_FLAGS;
- forw_packet_aggr->send_time = send_time;
-
- /* save packet direct link flag status */
- if (direct_link)
- forw_packet_aggr->direct_link_flags |= 1;
-
- /* add new packet to packet list */
- spin_lock_bh(&bat_priv->forw_bat_list_lock);
- hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
- spin_unlock_bh(&bat_priv->forw_bat_list_lock);
-
- /* start timer for this packet */
- INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
- send_outstanding_bat_packet);
- queue_delayed_work(bat_event_workqueue,
- &forw_packet_aggr->delayed_work,
- send_time - jiffies);
-
- return;
-out:
- hardif_free_ref(if_incoming);
-}
-
-/* aggregate a new packet into the existing aggregation */
-static void aggregate(struct forw_packet *forw_packet_aggr,
- const unsigned char *packet_buff, int packet_len,
- bool direct_link)
-{
- unsigned char *skb_buff;
-
- skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
- memcpy(skb_buff, packet_buff, packet_len);
- forw_packet_aggr->packet_len += packet_len;
- forw_packet_aggr->num_packets++;
-
- /* save packet direct link flag status */
- if (direct_link)
- forw_packet_aggr->direct_link_flags |=
- (1 << forw_packet_aggr->num_packets);
-}
-
-void add_bat_packet_to_list(struct bat_priv *bat_priv,
- unsigned char *packet_buff, int packet_len,
- struct hard_iface *if_incoming, int own_packet,
- unsigned long send_time)
-{
- /**
- * _aggr -> pointer to the packet we want to aggregate with
- * _pos -> pointer to the position in the queue
- */
- struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
- struct hlist_node *tmp_node;
- struct batman_packet *batman_packet =
- (struct batman_packet *)packet_buff;
- bool direct_link = batman_packet->flags & DIRECTLINK ? 1 : 0;
-
- /* find position for the packet in the forward queue */
- spin_lock_bh(&bat_priv->forw_bat_list_lock);
- /* own packets are not to be aggregated */
- if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
- hlist_for_each_entry(forw_packet_pos, tmp_node,
- &bat_priv->forw_bat_list, list) {
- if (can_aggregate_with(batman_packet,
- bat_priv,
- packet_len,
- send_time,
- direct_link,
- if_incoming,
- forw_packet_pos)) {
- forw_packet_aggr = forw_packet_pos;
- break;
- }
- }
- }
-
- /* nothing to aggregate with - either aggregation disabled or no
- * suitable aggregation packet found */
- if (!forw_packet_aggr) {
- /* the following section can run without the lock */
- spin_unlock_bh(&bat_priv->forw_bat_list_lock);
-
- /**
- * if we could not aggregate this packet with one of the others
- * we hold it back for a while, so that it might be aggregated
- * later on
- */
- if ((!own_packet) &&
- (atomic_read(&bat_priv->aggregated_ogms)))
- send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
-
- new_aggregated_packet(packet_buff, packet_len,
- send_time, direct_link,
- if_incoming, own_packet);
- } else {
- aggregate(forw_packet_aggr,
- packet_buff, packet_len,
- direct_link);
- spin_unlock_bh(&bat_priv->forw_bat_list_lock);
- }
-}
-
-/* unpack the aggregated packets and process them one by one */
-void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
- unsigned char *packet_buff, int packet_len,
- struct hard_iface *if_incoming)
-{
- struct batman_packet *batman_packet;
- int buff_pos = 0;
- unsigned char *tt_buff;
-
- batman_packet = (struct batman_packet *)packet_buff;
-
- do {
- /* network to host order for our 32bit seqno and the
- orig_interval */
- batman_packet->seqno = ntohl(batman_packet->seqno);
- batman_packet->tt_crc = ntohs(batman_packet->tt_crc);
-
- tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN;
-
- receive_bat_packet(ethhdr, batman_packet, tt_buff, if_incoming);
-
- buff_pos += BAT_PACKET_LEN +
- tt_len(batman_packet->tt_num_changes);
-
- batman_packet = (struct batman_packet *)
- (packet_buff + buff_pos);
- } while (aggregated_packet(buff_pos, packet_len,
- batman_packet->tt_num_changes));
-}
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
deleted file mode 100644
index 216337b..0000000
--- a/net/batman-adv/aggregation.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#ifndef _NET_BATMAN_ADV_AGGREGATION_H_
-#define _NET_BATMAN_ADV_AGGREGATION_H_
-
-#include "main.h"
-
-/* is there another aggregated packet here? */
-static inline int aggregated_packet(int buff_pos, int packet_len,
- int tt_num_changes)
-{
- int next_buff_pos = buff_pos + BAT_PACKET_LEN + (tt_num_changes *
- sizeof(struct tt_change));
-
- return (next_buff_pos <= packet_len) &&
- (next_buff_pos <= MAX_AGGREGATION_BYTES);
-}
-
-void add_bat_packet_to_list(struct bat_priv *bat_priv,
- unsigned char *packet_buff, int packet_len,
- struct hard_iface *if_incoming, int own_packet,
- unsigned long send_time);
-void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
- unsigned char *packet_buff, int packet_len,
- struct hard_iface *if_incoming);
-
-#endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
new file mode 100644
index 0000000..3512e25
--- /dev/null
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -0,0 +1,1170 @@
+/*
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#include "main.h"
+#include "bat_ogm.h"
+#include "translation-table.h"
+#include "ring_buffer.h"
+#include "originator.h"
+#include "routing.h"
+#include "gateway_common.h"
+#include "gateway_client.h"
+#include "hard-interface.h"
+#include "send.h"
+
+void bat_ogm_init(struct hard_iface *hard_iface)
+{
+ struct batman_ogm_packet *batman_ogm_packet;
+
+ hard_iface->packet_len = BATMAN_OGM_LEN;
+ hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
+
+ batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
+ batman_ogm_packet->packet_type = BAT_OGM;
+ batman_ogm_packet->version = COMPAT_VERSION;
+ batman_ogm_packet->flags = NO_FLAGS;
+ batman_ogm_packet->ttl = 2;
+ batman_ogm_packet->tq = TQ_MAX_VALUE;
+ batman_ogm_packet->tt_num_changes = 0;
+ batman_ogm_packet->ttvn = 0;
+}
+
+void bat_ogm_init_primary(struct hard_iface *hard_iface)
+{
+ struct batman_ogm_packet *batman_ogm_packet;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
+ batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
+ batman_ogm_packet->ttl = TTL;
+}
+
+void bat_ogm_update_mac(struct hard_iface *hard_iface)
+{
+ struct batman_ogm_packet *batman_ogm_packet;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
+ memcpy(batman_ogm_packet->orig,
+ hard_iface->net_dev->dev_addr, ETH_ALEN);
+ memcpy(batman_ogm_packet->prev_sender,
+ hard_iface->net_dev->dev_addr, ETH_ALEN);
+}
+
+/* when do we schedule our own ogm to be sent */
+static unsigned long bat_ogm_emit_send_time(const struct bat_priv *bat_priv)
+{
+ return jiffies + msecs_to_jiffies(
+ atomic_read(&bat_priv->orig_interval) -
+ JITTER + (random32() % 2*JITTER));
+}
+
+/* when do we schedule a ogm packet to be sent */
+static unsigned long bat_ogm_fwd_send_time(void)
+{
+ return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
+}
+
+/* apply hop penalty for a normal link */
+static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
+{
+ int hop_penalty = atomic_read(&bat_priv->hop_penalty);
+ return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
+}
+
+/* is there another aggregated packet here? */
+static int bat_ogm_aggr_packet(int buff_pos, int packet_len,
+ int tt_num_changes)
+{
+ int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes);
+
+ return (next_buff_pos <= packet_len) &&
+ (next_buff_pos <= MAX_AGGREGATION_BYTES);
+}
+
+/* send a batman ogm to a given interface */
+static void bat_ogm_send_to_if(struct forw_packet *forw_packet,
+ struct hard_iface *hard_iface)
+{
+ struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ char *fwd_str;
+ uint8_t packet_num;
+ int16_t buff_pos;
+ struct batman_ogm_packet *batman_ogm_packet;
+ struct sk_buff *skb;
+
+ if (hard_iface->if_status != IF_ACTIVE)
+ return;
+
+ packet_num = 0;
+ buff_pos = 0;
+ batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
+
+ /* adjust all flags and log packets */
+ while (bat_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
+ batman_ogm_packet->tt_num_changes)) {
+
+ /* we might have aggregated direct link packets with an
+ * ordinary base packet */
+ if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
+ (forw_packet->if_incoming == hard_iface))
+ batman_ogm_packet->flags |= DIRECTLINK;
+ else
+ batman_ogm_packet->flags &= ~DIRECTLINK;
+
+ fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
+ "Sending own" :
+ "Forwarding"));
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
+ " IDF %s, ttvn %d) on interface %s [%pM]\n",
+ fwd_str, (packet_num > 0 ? "aggregated " : ""),
+ batman_ogm_packet->orig,
+ ntohl(batman_ogm_packet->seqno),
+ batman_ogm_packet->tq, batman_ogm_packet->ttl,
+ (batman_ogm_packet->flags & DIRECTLINK ?
+ "on" : "off"),
+ batman_ogm_packet->ttvn, hard_iface->net_dev->name,
+ hard_iface->net_dev->dev_addr);
+
+ buff_pos += BATMAN_OGM_LEN +
+ tt_len(batman_ogm_packet->tt_num_changes);
+ packet_num++;
+ batman_ogm_packet = (struct batman_ogm_packet *)
+ (forw_packet->skb->data + buff_pos);
+ }
+
+ /* create clone because function is called more than once */
+ skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
+ if (skb)
+ send_skb_packet(skb, hard_iface, broadcast_addr);
+}
+
+/* send a batman ogm packet */
+void bat_ogm_emit(struct forw_packet *forw_packet)
+{
+ struct hard_iface *hard_iface;
+ struct net_device *soft_iface;
+ struct bat_priv *bat_priv;
+ struct hard_iface *primary_if = NULL;
+ struct batman_ogm_packet *batman_ogm_packet;
+ unsigned char directlink;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)
+ (forw_packet->skb->data);
+ directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
+
+ if (!forw_packet->if_incoming) {
+ pr_err("Error - can't forward packet: incoming iface not "
+ "specified\n");
+ goto out;
+ }
+
+ soft_iface = forw_packet->if_incoming->soft_iface;
+ bat_priv = netdev_priv(soft_iface);
+
+ if (forw_packet->if_incoming->if_status != IF_ACTIVE)
+ goto out;
+
+ primary_if = primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ /* multihomed peer assumed */
+ /* non-primary OGMs are only broadcasted on their interface */
+ if ((directlink && (batman_ogm_packet->ttl == 1)) ||
+ (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
+
+ /* FIXME: what about aggregated packets ? */
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "%s packet (originator %pM, seqno %d, TTL %d) "
+ "on interface %s [%pM]\n",
+ (forw_packet->own ? "Sending own" : "Forwarding"),
+ batman_ogm_packet->orig,
+ ntohl(batman_ogm_packet->seqno),
+ batman_ogm_packet->ttl,
+ forw_packet->if_incoming->net_dev->name,
+ forw_packet->if_incoming->net_dev->dev_addr);
+
+ /* skb is only used once and than forw_packet is free'd */
+ send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
+ broadcast_addr);
+ forw_packet->skb = NULL;
+
+ goto out;
+ }
+
+ /* broadcast on every interface */
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+ if (hard_iface->soft_iface != soft_iface)
+ continue;
+
+ bat_ogm_send_to_if(forw_packet, hard_iface);
+ }
+ rcu_read_unlock();
+
+out:
+ if (primary_if)
+ hardif_free_ref(primary_if);
+}
+
+/* return true if new_packet can be aggregated with forw_packet */
+static bool bat_ogm_can_aggregate(const struct batman_ogm_packet
+ *new_batman_ogm_packet,
+ struct bat_priv *bat_priv,
+ int packet_len, unsigned long send_time,
+ bool directlink,
+ const struct hard_iface *if_incoming,
+ const struct forw_packet *forw_packet)
+{
+ struct batman_ogm_packet *batman_ogm_packet;
+ int aggregated_bytes = forw_packet->packet_len + packet_len;
+ struct hard_iface *primary_if = NULL;
+ bool res = false;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
+
+ /**
+ * we can aggregate the current packet to this aggregated packet
+ * if:
+ *
+ * - the send time is within our MAX_AGGREGATION_MS time
+ * - the resulting packet wont be bigger than
+ * MAX_AGGREGATION_BYTES
+ */
+
+ if (time_before(send_time, forw_packet->send_time) &&
+ time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
+ forw_packet->send_time) &&
+ (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
+
+ /**
+ * check aggregation compatibility
+ * -> direct link packets are broadcasted on
+ * their interface only
+ * -> aggregate packet if the current packet is
+ * a "global" packet as well as the base
+ * packet
+ */
+
+ primary_if = primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ /* packets without direct link flag and high TTL
+ * are flooded through the net */
+ if ((!directlink) &&
+ (!(batman_ogm_packet->flags & DIRECTLINK)) &&
+ (batman_ogm_packet->ttl != 1) &&
+
+ /* own packets originating non-primary
+ * interfaces leave only that interface */
+ ((!forw_packet->own) ||
+ (forw_packet->if_incoming == primary_if))) {
+ res = true;
+ goto out;
+ }
+
+ /* if the incoming packet is sent via this one
+ * interface only - we still can aggregate */
+ if ((directlink) &&
+ (new_batman_ogm_packet->ttl == 1) &&
+ (forw_packet->if_incoming == if_incoming) &&
+
+ /* packets from direct neighbors or
+ * own secondary interface packets
+ * (= secondary interface packets in general) */
+ (batman_ogm_packet->flags & DIRECTLINK ||
+ (forw_packet->own &&
+ forw_packet->if_incoming != primary_if))) {
+ res = true;
+ goto out;
+ }
+ }
+
+out:
+ if (primary_if)
+ hardif_free_ref(primary_if);
+ return res;
+}
+
+/* create a new aggregated packet and add this packet to it */
+static void bat_ogm_aggregate_new(const unsigned char *packet_buff,
+ int packet_len, unsigned long send_time,
+ bool direct_link,
+ struct hard_iface *if_incoming,
+ int own_packet)
+{
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct forw_packet *forw_packet_aggr;
+ unsigned char *skb_buff;
+
+ if (!atomic_inc_not_zero(&if_incoming->refcount))
+ return;
+
+ /* own packet should always be scheduled */
+ if (!own_packet) {
+ if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "batman packet queue full\n");
+ goto out;
+ }
+ }
+
+ forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
+ if (!forw_packet_aggr) {
+ if (!own_packet)
+ atomic_inc(&bat_priv->batman_queue_left);
+ goto out;
+ }
+
+ if ((atomic_read(&bat_priv->aggregated_ogms)) &&
+ (packet_len < MAX_AGGREGATION_BYTES))
+ forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
+ sizeof(struct ethhdr));
+ else
+ forw_packet_aggr->skb = dev_alloc_skb(packet_len +
+ sizeof(struct ethhdr));
+
+ if (!forw_packet_aggr->skb) {
+ if (!own_packet)
+ atomic_inc(&bat_priv->batman_queue_left);
+ kfree(forw_packet_aggr);
+ goto out;
+ }
+ skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
+
+ INIT_HLIST_NODE(&forw_packet_aggr->list);
+
+ skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
+ forw_packet_aggr->packet_len = packet_len;
+ memcpy(skb_buff, packet_buff, packet_len);
+
+ forw_packet_aggr->own = own_packet;
+ forw_packet_aggr->if_incoming = if_incoming;
+ forw_packet_aggr->num_packets = 0;
+ forw_packet_aggr->direct_link_flags = NO_FLAGS;
+ forw_packet_aggr->send_time = send_time;
+
+ /* save packet direct link flag status */
+ if (direct_link)
+ forw_packet_aggr->direct_link_flags |= 1;
+
+ /* add new packet to packet list */
+ spin_lock_bh(&bat_priv->forw_bat_list_lock);
+ hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
+
+ /* start timer for this packet */
+ INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
+ send_outstanding_bat_ogm_packet);
+ queue_delayed_work(bat_event_workqueue,
+ &forw_packet_aggr->delayed_work,
+ send_time - jiffies);
+
+ return;
+out:
+ hardif_free_ref(if_incoming);
+}
+
+/* aggregate a new packet into the existing ogm packet */
+static void bat_ogm_aggregate(struct forw_packet *forw_packet_aggr,
+ const unsigned char *packet_buff,
+ int packet_len, bool direct_link)
+{
+ unsigned char *skb_buff;
+
+ skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
+ memcpy(skb_buff, packet_buff, packet_len);
+ forw_packet_aggr->packet_len += packet_len;
+ forw_packet_aggr->num_packets++;
+
+ /* save packet direct link flag status */
+ if (direct_link)
+ forw_packet_aggr->direct_link_flags |=
+ (1 << forw_packet_aggr->num_packets);
+}
+
+static void bat_ogm_queue_add(struct bat_priv *bat_priv,
+ unsigned char *packet_buff,
+ int packet_len, struct hard_iface *if_incoming,
+ int own_packet, unsigned long send_time)
+{
+ /**
+ * _aggr -> pointer to the packet we want to aggregate with
+ * _pos -> pointer to the position in the queue
+ */
+ struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
+ struct hlist_node *tmp_node;
+ struct batman_ogm_packet *batman_ogm_packet;
+ bool direct_link;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
+ direct_link = batman_ogm_packet->flags & DIRECTLINK ? 1 : 0;
+
+ /* find position for the packet in the forward queue */
+ spin_lock_bh(&bat_priv->forw_bat_list_lock);
+ /* own packets are not to be aggregated */
+ if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
+ hlist_for_each_entry(forw_packet_pos, tmp_node,
+ &bat_priv->forw_bat_list, list) {
+ if (bat_ogm_can_aggregate(batman_ogm_packet,
+ bat_priv, packet_len,
+ send_time, direct_link,
+ if_incoming,
+ forw_packet_pos)) {
+ forw_packet_aggr = forw_packet_pos;
+ break;
+ }
+ }
+ }
+
+ /* nothing to aggregate with - either aggregation disabled or no
+ * suitable aggregation packet found */
+ if (!forw_packet_aggr) {
+ /* the following section can run without the lock */
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
+
+ /**
+ * if we could not aggregate this packet with one of the others
+ * we hold it back for a while, so that it might be aggregated
+ * later on
+ */
+ if ((!own_packet) &&
+ (atomic_read(&bat_priv->aggregated_ogms)))
+ send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
+
+ bat_ogm_aggregate_new(packet_buff, packet_len,
+ send_time, direct_link,
+ if_incoming, own_packet);
+ } else {
+ bat_ogm_aggregate(forw_packet_aggr, packet_buff, packet_len,
+ direct_link);
+ spin_unlock_bh(&bat_priv->forw_bat_list_lock);
+ }
+}
+
+static void bat_ogm_forward(struct orig_node *orig_node,
+ const struct ethhdr *ethhdr,
+ struct batman_ogm_packet *batman_ogm_packet,
+ int directlink, struct hard_iface *if_incoming)
+{
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct neigh_node *router;
+ uint8_t in_tq, in_ttl, tq_avg = 0;
+ uint8_t tt_num_changes;
+
+ if (batman_ogm_packet->ttl <= 1) {
+ bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
+ return;
+ }
+
+ router = orig_node_get_router(orig_node);
+
+ in_tq = batman_ogm_packet->tq;
+ in_ttl = batman_ogm_packet->ttl;
+ tt_num_changes = batman_ogm_packet->tt_num_changes;
+
+ batman_ogm_packet->ttl--;
+ memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
+
+ /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
+ * of our best tq value */
+ if (router && router->tq_avg != 0) {
+
+ /* rebroadcast ogm of best ranking neighbor as is */
+ if (!compare_eth(router->addr, ethhdr->h_source)) {
+ batman_ogm_packet->tq = router->tq_avg;
+
+ if (router->last_ttl)
+ batman_ogm_packet->ttl = router->last_ttl - 1;
+ }
+
+ tq_avg = router->tq_avg;
+ }
+
+ if (router)
+ neigh_node_free_ref(router);
+
+ /* apply hop penalty */
+ batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
+
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Forwarding packet: tq_orig: %i, tq_avg: %i, "
+ "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
+ in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
+ batman_ogm_packet->ttl);
+
+ batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
+ batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
+
+ /* switch of primaries first hop flag when forwarding */
+ batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
+ if (directlink)
+ batman_ogm_packet->flags |= DIRECTLINK;
+ else
+ batman_ogm_packet->flags &= ~DIRECTLINK;
+
+ bat_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
+ BATMAN_OGM_LEN + tt_len(tt_num_changes),
+ if_incoming, 0, bat_ogm_fwd_send_time());
+}
+
+void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
+{
+ struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batman_ogm_packet *batman_ogm_packet;
+ struct hard_iface *primary_if;
+ int vis_server;
+
+ vis_server = atomic_read(&bat_priv->vis_mode);
+ primary_if = primary_if_get_selected(bat_priv);
+
+ batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
+
+ /* change sequence number to network order */
+ batman_ogm_packet->seqno =
+ htonl((uint32_t)atomic_read(&hard_iface->seqno));
+
+ batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
+ batman_ogm_packet->tt_crc = htons((uint16_t)
+ atomic_read(&bat_priv->tt_crc));
+ if (tt_num_changes >= 0)
+ batman_ogm_packet->tt_num_changes = tt_num_changes;
+
+ if (vis_server == VIS_TYPE_SERVER_SYNC)
+ batman_ogm_packet->flags |= VIS_SERVER;
+ else
+ batman_ogm_packet->flags &= ~VIS_SERVER;
+
+ if ((hard_iface == primary_if) &&
+ (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
+ batman_ogm_packet->gw_flags =
+ (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
+ else
+ batman_ogm_packet->gw_flags = NO_FLAGS;
+
+ atomic_inc(&hard_iface->seqno);
+
+ slide_own_bcast_window(hard_iface);
+ bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
+ hard_iface->packet_len, hard_iface, 1,
+ bat_ogm_emit_send_time(bat_priv));
+
+ if (primary_if)
+ hardif_free_ref(primary_if);
+}
+
+static void bat_ogm_orig_update(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
+ const struct ethhdr *ethhdr,
+ const struct batman_ogm_packet
+ *batman_ogm_packet,
+ struct hard_iface *if_incoming,
+ const unsigned char *tt_buff, int is_duplicate)
+{
+ struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
+ struct neigh_node *router = NULL;
+ struct orig_node *orig_node_tmp;
+ struct hlist_node *node;
+ uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
+
+ bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
+ "Searching and updating originator entry of received packet\n");
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp_neigh_node, node,
+ &orig_node->neigh_list, list) {
+ if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
+ (tmp_neigh_node->if_incoming == if_incoming) &&
+ atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
+ if (neigh_node)
+ neigh_node_free_ref(neigh_node);
+ neigh_node = tmp_neigh_node;
+ continue;
+ }
+
+ if (is_duplicate)
+ continue;
+
+ spin_lock_bh(&tmp_neigh_node->tq_lock);
+ ring_buffer_set(tmp_neigh_node->tq_recv,
+ &tmp_neigh_node->tq_index, 0);
+ tmp_neigh_node->tq_avg =
+ ring_buffer_avg(tmp_neigh_node->tq_recv);
+ spin_unlock_bh(&tmp_neigh_node->tq_lock);
+ }
+
+ if (!neigh_node) {
+ struct orig_node *orig_tmp;
+
+ orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
+ if (!orig_tmp)
+ goto unlock;
+
+ neigh_node = create_neighbor(orig_node, orig_tmp,
+ ethhdr->h_source, if_incoming);
+
+ orig_node_free_ref(orig_tmp);
+ if (!neigh_node)
+ goto unlock;
+ } else
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Updating existing last-hop neighbor of originator\n");
+
+ rcu_read_unlock();
+
+ orig_node->flags = batman_ogm_packet->flags;
+ neigh_node->last_valid = jiffies;
+
+ spin_lock_bh(&neigh_node->tq_lock);
+ ring_buffer_set(neigh_node->tq_recv,
+ &neigh_node->tq_index,
+ batman_ogm_packet->tq);
+ neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
+ spin_unlock_bh(&neigh_node->tq_lock);
+
+ if (!is_duplicate) {
+ orig_node->last_ttl = batman_ogm_packet->ttl;
+ neigh_node->last_ttl = batman_ogm_packet->ttl;
+ }
+
+ bonding_candidate_add(orig_node, neigh_node);
+
+ /* if this neighbor already is our next hop there is nothing
+ * to change */
+ router = orig_node_get_router(orig_node);
+ if (router == neigh_node)
+ goto update_tt;
+
+ /* if this neighbor does not offer a better TQ we won't consider it */
+ if (router && (router->tq_avg > neigh_node->tq_avg))
+ goto update_tt;
+
+ /* if the TQ is the same and the link not more symmetric we
+ * won't consider it either */
+ if (router && (neigh_node->tq_avg == router->tq_avg)) {
+ orig_node_tmp = router->orig_node;
+ spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+ bcast_own_sum_orig =
+ orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+ spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+
+ orig_node_tmp = neigh_node->orig_node;
+ spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+ bcast_own_sum_neigh =
+ orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+ spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+
+ if (bcast_own_sum_orig >= bcast_own_sum_neigh)
+ goto update_tt;
+ }
+
+ update_route(bat_priv, orig_node, neigh_node);
+
+update_tt:
+ /* I have to check for transtable changes only if the OGM has been
+ * sent through a primary interface */
+ if (((batman_ogm_packet->orig != ethhdr->h_source) &&
+ (batman_ogm_packet->ttl > 2)) ||
+ (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
+ tt_update_orig(bat_priv, orig_node, tt_buff,
+ batman_ogm_packet->tt_num_changes,
+ batman_ogm_packet->ttvn,
+ batman_ogm_packet->tt_crc);
+
+ if (orig_node->gw_flags != batman_ogm_packet->gw_flags)
+ gw_node_update(bat_priv, orig_node,
+ batman_ogm_packet->gw_flags);
+
+ orig_node->gw_flags = batman_ogm_packet->gw_flags;
+
+ /* restart gateway selection if fast or late switching was enabled */
+ if ((orig_node->gw_flags) &&
+ (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
+ (atomic_read(&bat_priv->gw_sel_class) > 2))
+ gw_check_election(bat_priv, orig_node);
+
+ goto out;
+
+unlock:
+ rcu_read_unlock();
+out:
+ if (neigh_node)
+ neigh_node_free_ref(neigh_node);
+ if (router)
+ neigh_node_free_ref(router);
+}
+
+static int bat_ogm_calc_tq(struct orig_node *orig_node,
+ struct orig_node *orig_neigh_node,
+ struct batman_ogm_packet *batman_ogm_packet,
+ struct hard_iface *if_incoming)
+{
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
+ struct hlist_node *node;
+ uint8_t total_count;
+ uint8_t orig_eq_count, neigh_rq_count, tq_own;
+ int tq_asym_penalty, ret = 0;
+
+ /* find corresponding one hop neighbor */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp_neigh_node, node,
+ &orig_neigh_node->neigh_list, list) {
+
+ if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
+ continue;
+
+ if (tmp_neigh_node->if_incoming != if_incoming)
+ continue;
+
+ if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
+ continue;
+
+ neigh_node = tmp_neigh_node;
+ break;
+ }
+ rcu_read_unlock();
+
+ if (!neigh_node)
+ neigh_node = create_neighbor(orig_neigh_node,
+ orig_neigh_node,
+ orig_neigh_node->orig,
+ if_incoming);
+
+ if (!neigh_node)
+ goto out;
+
+ /* if orig_node is direct neighbor update neigh_node last_valid */
+ if (orig_node == orig_neigh_node)
+ neigh_node->last_valid = jiffies;
+
+ orig_node->last_valid = jiffies;
+
+ /* find packet count of corresponding one hop neighbor */
+ spin_lock_bh(&orig_node->ogm_cnt_lock);
+ orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
+ neigh_rq_count = neigh_node->real_packet_count;
+ spin_unlock_bh(&orig_node->ogm_cnt_lock);
+
+ /* pay attention to not get a value bigger than 100 % */
+ total_count = (orig_eq_count > neigh_rq_count ?
+ neigh_rq_count : orig_eq_count);
+
+ /* if we have too few packets (too less data) we set tq_own to zero */
+ /* if we receive too few packets it is not considered bidirectional */
+ if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
+ (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
+ tq_own = 0;
+ else
+ /* neigh_node->real_packet_count is never zero as we
+ * only purge old information when getting new
+ * information */
+ tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
+
+ /*
+ * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
+ * affect the nearly-symmetric links only a little, but
+ * punishes asymmetric links more. This will give a value
+ * between 0 and TQ_MAX_VALUE
+ */
+ tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
+ (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
+ (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
+ (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
+ (TQ_LOCAL_WINDOW_SIZE *
+ TQ_LOCAL_WINDOW_SIZE *
+ TQ_LOCAL_WINDOW_SIZE);
+
+ batman_ogm_packet->tq = ((batman_ogm_packet->tq * tq_own
+ * tq_asym_penalty) /
+ (TQ_MAX_VALUE * TQ_MAX_VALUE));
+
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "bidirectional: "
+ "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
+ "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
+ "total tq: %3i\n",
+ orig_node->orig, orig_neigh_node->orig, total_count,
+ neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq);
+
+ /* if link has the minimum required transmission quality
+ * consider it bidirectional */
+ if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
+ ret = 1;
+
+out:
+ if (neigh_node)
+ neigh_node_free_ref(neigh_node);
+ return ret;
+}
+
+/* processes a batman packet for all interfaces, adjusts the sequence number and
+ * finds out whether it is a duplicate.
+ * returns:
+ * 1 the packet is a duplicate
+ * 0 the packet has not yet been received
+ * -1 the packet is old and has been received while the seqno window
+ * was protected. Caller should drop it.
+ */
+static int bat_ogm_update_seqnos(const struct ethhdr *ethhdr,
+ const struct batman_ogm_packet
+ *batman_ogm_packet,
+ const struct hard_iface *if_incoming)
+{
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct orig_node *orig_node;
+ struct neigh_node *tmp_neigh_node;
+ struct hlist_node *node;
+ int is_duplicate = 0;
+ int32_t seq_diff;
+ int need_update = 0;
+ int set_mark, ret = -1;
+
+ orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
+ if (!orig_node)
+ return 0;
+
+ spin_lock_bh(&orig_node->ogm_cnt_lock);
+ seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
+
+ /* signalize caller that the packet is to be dropped. */
+ if (window_protected(bat_priv, seq_diff,
+ &orig_node->batman_seqno_reset))
+ goto out;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp_neigh_node, node,
+ &orig_node->neigh_list, list) {
+
+ is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
+ orig_node->last_real_seqno,
+ batman_ogm_packet->seqno);
+
+ if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
+ (tmp_neigh_node->if_incoming == if_incoming))
+ set_mark = 1;
+ else
+ set_mark = 0;
+
+ /* if the window moved, set the update flag. */
+ need_update |= bit_get_packet(bat_priv,
+ tmp_neigh_node->real_bits,
+ seq_diff, set_mark);
+
+ tmp_neigh_node->real_packet_count =
+ bit_packet_count(tmp_neigh_node->real_bits);
+ }
+ rcu_read_unlock();
+
+ if (need_update) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "updating last_seqno: old %d, new %d\n",
+ orig_node->last_real_seqno, batman_ogm_packet->seqno);
+ orig_node->last_real_seqno = batman_ogm_packet->seqno;
+ }
+
+ ret = is_duplicate;
+
+out:
+ spin_unlock_bh(&orig_node->ogm_cnt_lock);
+ orig_node_free_ref(orig_node);
+ return ret;
+}
+
+static void bat_ogm_process(const struct ethhdr *ethhdr,
+ struct batman_ogm_packet *batman_ogm_packet,
+ const unsigned char *tt_buff,
+ struct hard_iface *if_incoming)
+{
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct hard_iface *hard_iface;
+ struct orig_node *orig_neigh_node, *orig_node;
+ struct neigh_node *router = NULL, *router_router = NULL;
+ struct neigh_node *orig_neigh_router = NULL;
+ int has_directlink_flag;
+ int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
+ int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
+ int is_duplicate;
+ uint32_t if_incoming_seqno;
+
+ /* Silently drop when the batman packet is actually not a
+ * correct packet.
+ *
+ * This might happen if a packet is padded (e.g. Ethernet has a
+ * minimum frame length of 64 byte) and the aggregation interprets
+ * it as an additional length.
+ *
+ * TODO: A more sane solution would be to have a bit in the
+ * batman_ogm_packet to detect whether the packet is the last
+ * packet in an aggregation. Here we expect that the padding
+ * is always zero (or not 0x01)
+ */
+ if (batman_ogm_packet->packet_type != BAT_OGM)
+ return;
+
+ /* could be changed by schedule_own_packet() */
+ if_incoming_seqno = atomic_read(&if_incoming->seqno);
+
+ has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
+
+ is_single_hop_neigh = (compare_eth(ethhdr->h_source,
+ batman_ogm_packet->orig) ? 1 : 0);
+
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
+ "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
+ "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
+ ethhdr->h_source, if_incoming->net_dev->name,
+ if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
+ batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
+ batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
+ batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
+ batman_ogm_packet->ttl, batman_ogm_packet->version,
+ has_directlink_flag);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+ if (hard_iface->if_status != IF_ACTIVE)
+ continue;
+
+ if (hard_iface->soft_iface != if_incoming->soft_iface)
+ continue;
+
+ if (compare_eth(ethhdr->h_source,
+ hard_iface->net_dev->dev_addr))
+ is_my_addr = 1;
+
+ if (compare_eth(batman_ogm_packet->orig,
+ hard_iface->net_dev->dev_addr))
+ is_my_orig = 1;
+
+ if (compare_eth(batman_ogm_packet->prev_sender,
+ hard_iface->net_dev->dev_addr))
+ is_my_oldorig = 1;
+
+ if (is_broadcast_ether_addr(ethhdr->h_source))
+ is_broadcast = 1;
+ }
+ rcu_read_unlock();
+
+ if (batman_ogm_packet->version != COMPAT_VERSION) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: incompatible batman version (%i)\n",
+ batman_ogm_packet->version);
+ return;
+ }
+
+ if (is_my_addr) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: received my own broadcast (sender: %pM"
+ ")\n",
+ ethhdr->h_source);
+ return;
+ }
+
+ if (is_broadcast) {
+ bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
+ "ignoring all packets with broadcast source addr (sender: %pM"
+ ")\n", ethhdr->h_source);
+ return;
+ }
+
+ if (is_my_orig) {
+ unsigned long *word;
+ int offset;
+
+ orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
+ if (!orig_neigh_node)
+ return;
+
+ /* neighbor has to indicate direct link and it has to
+ * come via the corresponding interface */
+ /* save packet seqno for bidirectional check */
+ if (has_directlink_flag &&
+ compare_eth(if_incoming->net_dev->dev_addr,
+ batman_ogm_packet->orig)) {
+ offset = if_incoming->if_num * NUM_WORDS;
+
+ spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
+ word = &(orig_neigh_node->bcast_own[offset]);
+ bit_mark(word,
+ if_incoming_seqno -
+ batman_ogm_packet->seqno - 2);
+ orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
+ bit_packet_count(word);
+ spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
+ }
+
+ bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
+ "originator packet from myself (via neighbor)\n");
+ orig_node_free_ref(orig_neigh_node);
+ return;
+ }
+
+ if (is_my_oldorig) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all rebroadcast echos (sender: "
+ "%pM)\n", ethhdr->h_source);
+ return;
+ }
+
+ orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
+ if (!orig_node)
+ return;
+
+ is_duplicate = bat_ogm_update_seqnos(ethhdr, batman_ogm_packet,
+ if_incoming);
+
+ if (is_duplicate == -1) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: packet within seqno protection time "
+ "(sender: %pM)\n", ethhdr->h_source);
+ goto out;
+ }
+
+ if (batman_ogm_packet->tq == 0) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet with tq equal 0\n");
+ goto out;
+ }
+
+ router = orig_node_get_router(orig_node);
+ if (router)
+ router_router = orig_node_get_router(router->orig_node);
+
+ /* avoid temporary routing loops */
+ if (router && router_router &&
+ (compare_eth(router->addr, batman_ogm_packet->prev_sender)) &&
+ !(compare_eth(batman_ogm_packet->orig,
+ batman_ogm_packet->prev_sender)) &&
+ (compare_eth(router->addr, router_router->addr))) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all rebroadcast packets that "
+ "may make me loop (sender: %pM)\n", ethhdr->h_source);
+ goto out;
+ }
+
+ /* if sender is a direct neighbor the sender mac equals
+ * originator mac */
+ orig_neigh_node = (is_single_hop_neigh ?
+ orig_node :
+ get_orig_node(bat_priv, ethhdr->h_source));
+ if (!orig_neigh_node)
+ goto out;
+
+ orig_neigh_router = orig_node_get_router(orig_neigh_node);
+
+ /* drop packet if sender is not a direct neighbor and if we
+ * don't route towards it */
+ if (!is_single_hop_neigh && (!orig_neigh_router)) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: OGM via unknown neighbor!\n");
+ goto out_neigh;
+ }
+
+ is_bidirectional = bat_ogm_calc_tq(orig_node, orig_neigh_node,
+ batman_ogm_packet, if_incoming);
+
+ bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet);
+
+ /* update ranking if it is not a duplicate or has the same
+ * seqno and similar ttl as the non-duplicate */
+ if (is_bidirectional &&
+ (!is_duplicate ||
+ ((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
+ (orig_node->last_ttl - 3 <= batman_ogm_packet->ttl))))
+ bat_ogm_orig_update(bat_priv, orig_node, ethhdr,
+ batman_ogm_packet, if_incoming,
+ tt_buff, is_duplicate);
+
+ /* is single hop (direct) neighbor */
+ if (is_single_hop_neigh) {
+
+ /* mark direct link on incoming interface */
+ bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
+ 1, if_incoming);
+
+ bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
+ "rebroadcast neighbor packet with direct link flag\n");
+ goto out_neigh;
+ }
+
+ /* multihop originator */
+ if (!is_bidirectional) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: not received via bidirectional link\n");
+ goto out_neigh;
+ }
+
+ if (is_duplicate) {
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Drop packet: duplicate packet received\n");
+ goto out_neigh;
+ }
+
+ bat_dbg(DBG_BATMAN, bat_priv,
+ "Forwarding packet: rebroadcast originator packet\n");
+ bat_ogm_forward(orig_node, ethhdr, batman_ogm_packet, 0, if_incoming);
+
+out_neigh:
+ if ((orig_neigh_node) && (!is_single_hop_neigh))
+ orig_node_free_ref(orig_neigh_node);
+out:
+ if (router)
+ neigh_node_free_ref(router);
+ if (router_router)
+ neigh_node_free_ref(router_router);
+ if (orig_neigh_router)
+ neigh_node_free_ref(orig_neigh_router);
+
+ orig_node_free_ref(orig_node);
+}
+
+void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
+ int packet_len, struct hard_iface *if_incoming)
+{
+ struct batman_ogm_packet *batman_ogm_packet;
+ int buff_pos = 0;
+ unsigned char *tt_buff;
+
+ batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
+
+ /* unpack the aggregated packets and process them one by one */
+ do {
+ /* network to host order for our 32bit seqno and the
+ orig_interval */
+ batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
+ batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
+
+ tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN;
+
+ bat_ogm_process(ethhdr, batman_ogm_packet,
+ tt_buff, if_incoming);
+
+ buff_pos += BATMAN_OGM_LEN +
+ tt_len(batman_ogm_packet->tt_num_changes);
+
+ batman_ogm_packet = (struct batman_ogm_packet *)
+ (packet_buff + buff_pos);
+ } while (bat_ogm_aggr_packet(buff_pos, packet_len,
+ batman_ogm_packet->tt_num_changes));
+}
diff --git a/net/batman-adv/bat_ogm.h b/net/batman-adv/bat_ogm.h
new file mode 100644
index 0000000..69329c1
--- /dev/null
+++ b/net/batman-adv/bat_ogm.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner, Simon Wunderlich
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#ifndef _NET_BATMAN_ADV_OGM_H_
+#define _NET_BATMAN_ADV_OGM_H_
+
+#include "main.h"
+
+void bat_ogm_init(struct hard_iface *hard_iface);
+void bat_ogm_init_primary(struct hard_iface *hard_iface);
+void bat_ogm_update_mac(struct hard_iface *hard_iface);
+void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes);
+void bat_ogm_emit(struct forw_packet *forw_packet);
+void bat_ogm_receive(const struct ethhdr *ethhdr, unsigned char *packet_buff,
+ int packet_len, struct hard_iface *if_incoming);
+
+#endif /* _NET_BATMAN_ADV_OGM_H_ */
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index cd15deb..b8a7414 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -380,6 +380,7 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr,
BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
+BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
BAT_ATTR_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
@@ -396,6 +397,7 @@ static struct bat_attribute *mesh_attrs[] = {
&bat_attr_aggregated_ogms,
&bat_attr_bonding,
&bat_attr_fragmentation,
+ &bat_attr_ap_isolation,
&bat_attr_vis_mode,
&bat_attr_gw_mode,
&bat_attr_orig_interval,
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index c1f4bfc..0be9ff3 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -97,12 +97,12 @@ static void bit_shift(unsigned long *seq_bits, int32_t n)
(seq_bits[i - word_num - 1] >>
(WORD_BIT_SIZE-word_offset));
/* and the upper part of the right half and shift it left to
- * it's position */
+ * its position */
/* for our example that would be: word[0] = 9800 + 0076 =
* 9876 */
}
- /* now for our last word, i==word_num, we only have the it's "left"
- * half. that's the 1000 word in our example.*/
+ /* now for our last word, i==word_num, we only have its "left" half.
+ * that's the 1000 word in our example.*/
seq_bits[i] = (seq_bits[i - word_num] << word_offset);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 056180e..619fb73 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -532,14 +532,14 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1;
/* Access the dhcp option lists. Each entry is made up by:
- * - octect 1: option type
- * - octect 2: option data len (only if type != 255 and 0)
- * - octect 3: option data */
+ * - octet 1: option type
+ * - octet 2: option data len (only if type != 255 and 0)
+ * - octet 3: option data */
while (*p != 255 && !ret) {
- /* p now points to the first octect: option type */
+ /* p now points to the first octet: option type */
if (*p == 53) {
/* type 53 is the message type option.
- * Jump the len octect and go to the data octect */
+ * Jump the len octet and go to the data octet */
if (pkt_len < 2)
goto out;
p += 2;
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index db7aacf..7704df4 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -28,6 +28,7 @@
#include "bat_sysfs.h"
#include "originator.h"
#include "hash.h"
+#include "bat_ogm.h"
#include <linux/if_arp.h>
@@ -131,7 +132,6 @@ static void primary_if_select(struct bat_priv *bat_priv,
struct hard_iface *new_hard_iface)
{
struct hard_iface *curr_hard_iface;
- struct batman_packet *batman_packet;
ASSERT_RTNL();
@@ -147,10 +147,7 @@ static void primary_if_select(struct bat_priv *bat_priv,
if (!new_hard_iface)
return;
- batman_packet = (struct batman_packet *)(new_hard_iface->packet_buff);
- batman_packet->flags = PRIMARIES_FIRST_HOP;
- batman_packet->ttl = TTL;
-
+ bat_ogm_init_primary(new_hard_iface);
primary_if_update_addr(bat_priv);
}
@@ -162,14 +159,6 @@ static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
return false;
}
-static void update_mac_addresses(struct hard_iface *hard_iface)
-{
- memcpy(((struct batman_packet *)(hard_iface->packet_buff))->orig,
- hard_iface->net_dev->dev_addr, ETH_ALEN);
- memcpy(((struct batman_packet *)(hard_iface->packet_buff))->prev_sender,
- hard_iface->net_dev->dev_addr, ETH_ALEN);
-}
-
static void check_known_mac_addr(const struct net_device *net_dev)
{
const struct hard_iface *hard_iface;
@@ -244,12 +233,12 @@ static void hardif_activate_interface(struct hard_iface *hard_iface)
bat_priv = netdev_priv(hard_iface->soft_iface);
- update_mac_addresses(hard_iface);
+ bat_ogm_update_mac(hard_iface);
hard_iface->if_status = IF_TO_BE_ACTIVATED;
/**
* the first active interface becomes our primary interface or
- * the next active interface after the old primay interface was removed
+ * the next active interface after the old primary interface was removed
*/
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
@@ -283,7 +272,6 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
const char *iface_name)
{
struct bat_priv *bat_priv;
- struct batman_packet *batman_packet;
struct net_device *soft_iface;
int ret;
@@ -318,8 +306,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
hard_iface->soft_iface = soft_iface;
bat_priv = netdev_priv(hard_iface->soft_iface);
- hard_iface->packet_len = BAT_PACKET_LEN;
- hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
+
+ bat_ogm_init(hard_iface);
if (!hard_iface->packet_buff) {
bat_err(hard_iface->soft_iface, "Can't add interface packet "
@@ -328,15 +316,6 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
goto err;
}
- batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
- batman_packet->packet_type = BAT_PACKET;
- batman_packet->version = COMPAT_VERSION;
- batman_packet->flags = NO_FLAGS;
- batman_packet->ttl = 2;
- batman_packet->tq = TQ_MAX_VALUE;
- batman_packet->tt_num_changes = 0;
- batman_packet->ttvn = 0;
-
hard_iface->if_num = bat_priv->num_ifaces;
bat_priv->num_ifaces++;
hard_iface->if_status = IF_INACTIVE;
@@ -381,7 +360,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
hard_iface->net_dev->name);
/* begin scheduling originator messages on that interface */
- schedule_own_packet(hard_iface);
+ schedule_bat_ogm(hard_iface);
out:
return 0;
@@ -455,11 +434,8 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
dev_hold(net_dev);
hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC);
- if (!hard_iface) {
- pr_err("Can't add interface (%s): out of memory\n",
- net_dev->name);
+ if (!hard_iface)
goto release_dev;
- }
ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
if (ret)
@@ -551,7 +527,7 @@ static int hard_if_event(struct notifier_block *this,
goto hardif_put;
check_known_mac_addr(hard_iface->net_dev);
- update_mac_addresses(hard_iface);
+ bat_ogm_update_mac(hard_iface);
bat_priv = netdev_priv(hard_iface->soft_iface);
primary_if = primary_if_get_selected(bat_priv);
@@ -573,14 +549,14 @@ out:
return NOTIFY_DONE;
}
-/* receive a packet with the batman ethertype coming on a hard
+/* incoming packets with the batman ethertype received on any active hard
* interface */
static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype,
struct net_device *orig_dev)
{
struct bat_priv *bat_priv;
- struct batman_packet *batman_packet;
+ struct batman_ogm_packet *batman_ogm_packet;
struct hard_iface *hard_iface;
int ret;
@@ -612,22 +588,22 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
if (hard_iface->if_status != IF_ACTIVE)
goto err_free;
- batman_packet = (struct batman_packet *)skb->data;
+ batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
- if (batman_packet->version != COMPAT_VERSION) {
+ if (batman_ogm_packet->version != COMPAT_VERSION) {
bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: incompatible batman version (%i)\n",
- batman_packet->version);
+ batman_ogm_packet->version);
goto err_free;
}
/* all receive handlers return whether they received or reused
* the supplied skb. if not, we have to free the skb. */
- switch (batman_packet->packet_type) {
+ switch (batman_ogm_packet->packet_type) {
/* batman originator packet */
- case BAT_PACKET:
- ret = recv_bat_packet(skb, hard_iface);
+ case BAT_OGM:
+ ret = recv_bat_ogm_packet(skb, hard_iface);
break;
/* batman icmp packet */
@@ -681,6 +657,36 @@ err_out:
return NET_RX_DROP;
}
+/* This function returns true if the interface represented by ifindex is a
+ * 802.11 wireless device */
+bool is_wifi_iface(int ifindex)
+{
+ struct net_device *net_device = NULL;
+ bool ret = false;
+
+ if (ifindex == NULL_IFINDEX)
+ goto out;
+
+ net_device = dev_get_by_index(&init_net, ifindex);
+ if (!net_device)
+ goto out;
+
+#ifdef CONFIG_WIRELESS_EXT
+ /* pre-cfg80211 drivers have to implement WEXT, so it is possible to
+ * check for wireless_handlers != NULL */
+ if (net_device->wireless_handlers)
+ ret = true;
+ else
+#endif
+ /* cfg80211 drivers have to set ieee80211_ptr */
+ if (net_device->ieee80211_ptr)
+ ret = true;
+out:
+ if (net_device)
+ dev_put(net_device);
+ return ret;
+}
+
struct notifier_block hard_if_notifier = {
.notifier_call = hard_if_event,
};
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 442eacb..67f78d1 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -42,6 +42,7 @@ void hardif_remove_interfaces(void);
int hardif_min_mtu(struct net_device *soft_iface);
void update_min_mtu(struct net_device *soft_iface);
void hardif_free_rcu(struct rcu_head *rcu);
+bool is_wifi_iface(int ifindex);
static inline void hardif_free_ref(struct hard_iface *hard_iface)
{
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index dd5c9fd..d20aa71 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -76,19 +76,30 @@ static inline void hash_delete(struct hashtable_t *hash,
hash_destroy(hash);
}
-/* adds data to the hashtable. returns 0 on success, -1 on error */
+/**
+ * hash_add - adds data to the hashtable
+ * @hash: storage hash table
+ * @compare: callback to determine if 2 hash elements are identical
+ * @choose: callback calculating the hash index
+ * @data: data passed to the aforementioned callbacks as argument
+ * @data_node: to be added element
+ *
+ * Returns 0 on success, 1 if the element already is in the hash
+ * and -1 on error.
+ */
+
static inline int hash_add(struct hashtable_t *hash,
hashdata_compare_cb compare,
hashdata_choose_cb choose,
const void *data, struct hlist_node *data_node)
{
- int index;
+ int index, ret = -1;
struct hlist_head *head;
struct hlist_node *node;
spinlock_t *list_lock; /* spinlock to protect write access */
if (!hash)
- goto err;
+ goto out;
index = choose(data, hash->size);
head = &hash->table[index];
@@ -99,6 +110,7 @@ static inline int hash_add(struct hashtable_t *hash,
if (!compare(node, data))
continue;
+ ret = 1;
goto err_unlock;
}
rcu_read_unlock();
@@ -108,12 +120,13 @@ static inline int hash_add(struct hashtable_t *hash,
hlist_add_head_rcu(data_node, head);
spin_unlock_bh(list_lock);
- return 0;
+ ret = 0;
+ goto out;
err_unlock:
rcu_read_unlock();
-err:
- return -1;
+out:
+ return ret;
}
/* removes data from hash, if found. returns pointer do data on success, so you
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index b0f9068..fb87bdc 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -107,7 +107,7 @@ int mesh_init(struct net_device *soft_iface)
if (tt_init(bat_priv) < 1)
goto err;
- tt_local_add(soft_iface, soft_iface->dev_addr);
+ tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX);
if (vis_init(bat_priv) < 1)
goto err;
@@ -117,8 +117,6 @@ int mesh_init(struct net_device *soft_iface)
goto end;
err:
- pr_err("Unable to allocate memory for mesh information structures: "
- "out of mem ?\n");
mesh_free(soft_iface);
return -1;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index a6df61a..964ad4d 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -28,7 +28,7 @@
#define DRIVER_DEVICE "batman-adv"
#ifndef SOURCE_VERSION
-#define SOURCE_VERSION "2011.3.0"
+#define SOURCE_VERSION "2011.4.0"
#endif
/* B.A.T.M.A.N. parameters */
@@ -44,7 +44,7 @@
#define PURGE_TIMEOUT 200
#define TT_LOCAL_TIMEOUT 3600 /* in seconds */
#define TT_CLIENT_ROAM_TIMEOUT 600
-/* sliding packet range of received originator messages in squence numbers
+/* sliding packet range of received originator messages in sequence numbers
* (should be a multiple of our word size) */
#define TQ_LOCAL_WINDOW_SIZE 64
#define TT_REQUEST_TIMEOUT 3 /* seconds we have to keep pending tt_req */
@@ -62,6 +62,8 @@
#define NO_FLAGS 0
+#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
+
#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
@@ -133,7 +135,7 @@ enum dbg_level {
#include <linux/mutex.h> /* mutex */
#include <linux/module.h> /* needed by all modules */
#include <linux/netdevice.h> /* netdevice */
-#include <linux/etherdevice.h> /* ethernet address classifaction */
+#include <linux/etherdevice.h> /* ethernet address classification */
#include <linux/if_ether.h> /* ethernet header */
#include <linux/poll.h> /* poll_table */
#include <linux/kthread.h> /* kernel threads */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index f3c3f62..0e5b772 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -252,7 +252,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
hash_added = hash_add(bat_priv->orig_hash, compare_orig,
choose_orig, orig_node, &orig_node->hash_entry);
- if (hash_added < 0)
+ if (hash_added != 0)
goto free_bcast_own_sum;
return orig_node;
@@ -336,8 +336,7 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
} else {
if (purge_orig_neighbors(bat_priv, orig_node,
&best_neigh_node)) {
- update_routes(bat_priv, orig_node,
- best_neigh_node);
+ update_route(bat_priv, orig_node, best_neigh_node);
}
}
@@ -493,10 +492,8 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
GFP_ATOMIC);
- if (!data_ptr) {
- pr_err("Can't resize orig: out of memory\n");
+ if (!data_ptr)
return -1;
- }
memcpy(data_ptr, orig_node->bcast_own,
(max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
@@ -504,10 +501,8 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
orig_node->bcast_own = data_ptr;
data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
- if (!data_ptr) {
- pr_err("Can't resize orig: out of memory\n");
+ if (!data_ptr)
return -1;
- }
memcpy(data_ptr, orig_node->bcast_own_sum,
(max_if_num - 1) * sizeof(uint8_t));
@@ -562,10 +557,8 @@ static int orig_node_del_if(struct orig_node *orig_node,
chunk_size = sizeof(unsigned long) * NUM_WORDS;
data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
- if (!data_ptr) {
- pr_err("Can't resize orig: out of memory\n");
+ if (!data_ptr)
return -1;
- }
/* copy first part */
memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
@@ -583,10 +576,8 @@ free_bcast_own:
goto free_own_sum;
data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
- if (!data_ptr) {
- pr_err("Can't resize orig: out of memory\n");
+ if (!data_ptr)
return -1;
- }
memcpy(data_ptr, orig_node->bcast_own_sum,
del_if_num * sizeof(uint8_t));
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index b76b4be..4d9e54c 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -25,14 +25,14 @@
#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
enum bat_packettype {
- BAT_PACKET = 0x01,
- BAT_ICMP = 0x02,
- BAT_UNICAST = 0x03,
- BAT_BCAST = 0x04,
- BAT_VIS = 0x05,
+ BAT_OGM = 0x01,
+ BAT_ICMP = 0x02,
+ BAT_UNICAST = 0x03,
+ BAT_BCAST = 0x04,
+ BAT_VIS = 0x05,
BAT_UNICAST_FRAG = 0x06,
- BAT_TT_QUERY = 0x07,
- BAT_ROAM_ADV = 0x08
+ BAT_TT_QUERY = 0x07,
+ BAT_ROAM_ADV = 0x08
};
/* this file is included by batctl which needs these defines */
@@ -84,12 +84,13 @@ enum tt_query_flags {
enum tt_client_flags {
TT_CLIENT_DEL = 1 << 0,
TT_CLIENT_ROAM = 1 << 1,
+ TT_CLIENT_WIFI = 1 << 2,
TT_CLIENT_NOPURGE = 1 << 8,
TT_CLIENT_NEW = 1 << 9,
TT_CLIENT_PENDING = 1 << 10
};
-struct batman_packet {
+struct batman_ogm_packet {
uint8_t packet_type;
uint8_t version; /* batman version field */
uint8_t ttl;
@@ -104,7 +105,7 @@ struct batman_packet {
uint16_t tt_crc;
} __packed;
-#define BAT_PACKET_LEN sizeof(struct batman_packet)
+#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet)
struct icmp_packet {
uint8_t packet_type;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 0f32c81..f961cc5 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -22,18 +22,14 @@
#include "main.h"
#include "routing.h"
#include "send.h"
-#include "hash.h"
#include "soft-interface.h"
#include "hard-interface.h"
#include "icmp_socket.h"
#include "translation-table.h"
#include "originator.h"
-#include "ring_buffer.h"
#include "vis.h"
-#include "aggregation.h"
-#include "gateway_common.h"
-#include "gateway_client.h"
#include "unicast.h"
+#include "bat_ogm.h"
void slide_own_bcast_window(struct hard_iface *hard_iface)
{
@@ -64,69 +60,9 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
}
}
-static void update_transtable(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- const unsigned char *tt_buff,
- uint8_t tt_num_changes, uint8_t ttvn,
- uint16_t tt_crc)
-{
- uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
- bool full_table = true;
-
- /* the ttvn increased by one -> we can apply the attached changes */
- if (ttvn - orig_ttvn == 1) {
- /* the OGM could not contain the changes because they were too
- * many to fit in one frame or because they have already been
- * sent TT_OGM_APPEND_MAX times. In this case send a tt
- * request */
- if (!tt_num_changes) {
- full_table = false;
- goto request_table;
- }
-
- tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
- (struct tt_change *)tt_buff);
-
- /* Even if we received the crc into the OGM, we prefer
- * to recompute it to spot any possible inconsistency
- * in the global table */
- orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
-
- /* The ttvn alone is not enough to guarantee consistency
- * because a single value could repesent different states
- * (due to the wrap around). Thus a node has to check whether
- * the resulting table (after applying the changes) is still
- * consistent or not. E.g. a node could disconnect while its
- * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
- * checking the CRC value is mandatory to detect the
- * inconsistency */
- if (orig_node->tt_crc != tt_crc)
- goto request_table;
-
- /* Roaming phase is over: tables are in sync again. I can
- * unset the flag */
- orig_node->tt_poss_change = false;
- } else {
- /* if we missed more than one change or our tables are not
- * in sync anymore -> request fresh tt data */
- if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
-request_table:
- bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
- "Need to retrieve the correct information "
- "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
- "%u num_changes: %u)\n", orig_node->orig, ttvn,
- orig_ttvn, tt_crc, orig_node->tt_crc,
- tt_num_changes);
- send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
- full_table);
- return;
- }
- }
-}
-
-static void update_route(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- struct neigh_node *neigh_node)
+static void _update_route(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
+ struct neigh_node *neigh_node)
{
struct neigh_node *curr_router;
@@ -170,8 +106,8 @@ static void update_route(struct bat_priv *bat_priv,
neigh_node_free_ref(curr_router);
}
-void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
- struct neigh_node *neigh_node)
+void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
+ struct neigh_node *neigh_node)
{
struct neigh_node *router = NULL;
@@ -181,116 +117,13 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
router = orig_node_get_router(orig_node);
if (router != neigh_node)
- update_route(bat_priv, orig_node, neigh_node);
+ _update_route(bat_priv, orig_node, neigh_node);
out:
if (router)
neigh_node_free_ref(router);
}
-static int is_bidirectional_neigh(struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- struct batman_packet *batman_packet,
- struct hard_iface *if_incoming)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
- struct hlist_node *node;
- uint8_t total_count;
- uint8_t orig_eq_count, neigh_rq_count, tq_own;
- int tq_asym_penalty, ret = 0;
-
- /* find corresponding one hop neighbor */
- rcu_read_lock();
- hlist_for_each_entry_rcu(tmp_neigh_node, node,
- &orig_neigh_node->neigh_list, list) {
-
- if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
- continue;
-
- if (tmp_neigh_node->if_incoming != if_incoming)
- continue;
-
- if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
- continue;
-
- neigh_node = tmp_neigh_node;
- break;
- }
- rcu_read_unlock();
-
- if (!neigh_node)
- neigh_node = create_neighbor(orig_neigh_node,
- orig_neigh_node,
- orig_neigh_node->orig,
- if_incoming);
-
- if (!neigh_node)
- goto out;
-
- /* if orig_node is direct neighbour update neigh_node last_valid */
- if (orig_node == orig_neigh_node)
- neigh_node->last_valid = jiffies;
-
- orig_node->last_valid = jiffies;
-
- /* find packet count of corresponding one hop neighbor */
- spin_lock_bh(&orig_node->ogm_cnt_lock);
- orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
- neigh_rq_count = neigh_node->real_packet_count;
- spin_unlock_bh(&orig_node->ogm_cnt_lock);
-
- /* pay attention to not get a value bigger than 100 % */
- total_count = (orig_eq_count > neigh_rq_count ?
- neigh_rq_count : orig_eq_count);
-
- /* if we have too few packets (too less data) we set tq_own to zero */
- /* if we receive too few packets it is not considered bidirectional */
- if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
- (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
- tq_own = 0;
- else
- /* neigh_node->real_packet_count is never zero as we
- * only purge old information when getting new
- * information */
- tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
-
- /*
- * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
- * affect the nearly-symmetric links only a little, but
- * punishes asymmetric links more. This will give a value
- * between 0 and TQ_MAX_VALUE
- */
- tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
- (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
- (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
- (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
- (TQ_LOCAL_WINDOW_SIZE *
- TQ_LOCAL_WINDOW_SIZE *
- TQ_LOCAL_WINDOW_SIZE);
-
- batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
- (TQ_MAX_VALUE * TQ_MAX_VALUE));
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "bidirectional: "
- "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
- "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
- "total tq: %3i\n",
- orig_node->orig, orig_neigh_node->orig, total_count,
- neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
-
- /* if link has the minimum required transmission quality
- * consider it bidirectional */
- if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
- ret = 1;
-
-out:
- if (neigh_node)
- neigh_node_free_ref(neigh_node);
- return ret;
-}
-
/* caller must hold the neigh_list_lock */
void bonding_candidate_del(struct orig_node *orig_node,
struct neigh_node *neigh_node)
@@ -308,8 +141,8 @@ out:
return;
}
-static void bonding_candidate_add(struct orig_node *orig_node,
- struct neigh_node *neigh_node)
+void bonding_candidate_add(struct orig_node *orig_node,
+ struct neigh_node *neigh_node)
{
struct hlist_node *node;
struct neigh_node *tmp_neigh_node, *router = NULL;
@@ -379,162 +212,23 @@ out:
}
/* copy primary address for bonding */
-static void bonding_save_primary(const struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- const struct batman_packet *batman_packet)
+void bonding_save_primary(const struct orig_node *orig_node,
+ struct orig_node *orig_neigh_node,
+ const struct batman_ogm_packet *batman_ogm_packet)
{
- if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
+ if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
return;
memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
}
-static void update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const struct ethhdr *ethhdr,
- const struct batman_packet *batman_packet,
- struct hard_iface *if_incoming,
- const unsigned char *tt_buff, int is_duplicate)
-{
- struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
- struct neigh_node *router = NULL;
- struct orig_node *orig_node_tmp;
- struct hlist_node *node;
- uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
-
- bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
- "Searching and updating originator entry of received packet\n");
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(tmp_neigh_node, node,
- &orig_node->neigh_list, list) {
- if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
- (tmp_neigh_node->if_incoming == if_incoming) &&
- atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
- if (neigh_node)
- neigh_node_free_ref(neigh_node);
- neigh_node = tmp_neigh_node;
- continue;
- }
-
- if (is_duplicate)
- continue;
-
- spin_lock_bh(&tmp_neigh_node->tq_lock);
- ring_buffer_set(tmp_neigh_node->tq_recv,
- &tmp_neigh_node->tq_index, 0);
- tmp_neigh_node->tq_avg =
- ring_buffer_avg(tmp_neigh_node->tq_recv);
- spin_unlock_bh(&tmp_neigh_node->tq_lock);
- }
-
- if (!neigh_node) {
- struct orig_node *orig_tmp;
-
- orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
- if (!orig_tmp)
- goto unlock;
-
- neigh_node = create_neighbor(orig_node, orig_tmp,
- ethhdr->h_source, if_incoming);
-
- orig_node_free_ref(orig_tmp);
- if (!neigh_node)
- goto unlock;
- } else
- bat_dbg(DBG_BATMAN, bat_priv,
- "Updating existing last-hop neighbor of originator\n");
-
- rcu_read_unlock();
-
- orig_node->flags = batman_packet->flags;
- neigh_node->last_valid = jiffies;
-
- spin_lock_bh(&neigh_node->tq_lock);
- ring_buffer_set(neigh_node->tq_recv,
- &neigh_node->tq_index,
- batman_packet->tq);
- neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
- spin_unlock_bh(&neigh_node->tq_lock);
-
- if (!is_duplicate) {
- orig_node->last_ttl = batman_packet->ttl;
- neigh_node->last_ttl = batman_packet->ttl;
- }
-
- bonding_candidate_add(orig_node, neigh_node);
-
- /* if this neighbor already is our next hop there is nothing
- * to change */
- router = orig_node_get_router(orig_node);
- if (router == neigh_node)
- goto update_tt;
-
- /* if this neighbor does not offer a better TQ we won't consider it */
- if (router && (router->tq_avg > neigh_node->tq_avg))
- goto update_tt;
-
- /* if the TQ is the same and the link not more symetric we
- * won't consider it either */
- if (router && (neigh_node->tq_avg == router->tq_avg)) {
- orig_node_tmp = router->orig_node;
- spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
- bcast_own_sum_orig =
- orig_node_tmp->bcast_own_sum[if_incoming->if_num];
- spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
-
- orig_node_tmp = neigh_node->orig_node;
- spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
- bcast_own_sum_neigh =
- orig_node_tmp->bcast_own_sum[if_incoming->if_num];
- spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
-
- if (bcast_own_sum_orig >= bcast_own_sum_neigh)
- goto update_tt;
- }
-
- update_routes(bat_priv, orig_node, neigh_node);
-
-update_tt:
- /* I have to check for transtable changes only if the OGM has been
- * sent through a primary interface */
- if (((batman_packet->orig != ethhdr->h_source) &&
- (batman_packet->ttl > 2)) ||
- (batman_packet->flags & PRIMARIES_FIRST_HOP))
- update_transtable(bat_priv, orig_node, tt_buff,
- batman_packet->tt_num_changes,
- batman_packet->ttvn,
- batman_packet->tt_crc);
-
- if (orig_node->gw_flags != batman_packet->gw_flags)
- gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
-
- orig_node->gw_flags = batman_packet->gw_flags;
-
- /* restart gateway selection if fast or late switching was enabled */
- if ((orig_node->gw_flags) &&
- (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
- (atomic_read(&bat_priv->gw_sel_class) > 2))
- gw_check_election(bat_priv, orig_node);
-
- goto out;
-
-unlock:
- rcu_read_unlock();
-out:
- if (neigh_node)
- neigh_node_free_ref(neigh_node);
- if (router)
- neigh_node_free_ref(router);
-}
-
/* checks whether the host restarted and is in the protection time.
* returns:
* 0 if the packet is to be accepted
* 1 if the packet is to be ignored.
*/
-static int window_protected(struct bat_priv *bat_priv,
- int32_t seq_num_diff,
- unsigned long *last_reset)
+int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
+ unsigned long *last_reset)
{
if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
|| (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
@@ -552,330 +246,12 @@ static int window_protected(struct bat_priv *bat_priv,
return 0;
}
-/* processes a batman packet for all interfaces, adjusts the sequence number and
- * finds out whether it is a duplicate.
- * returns:
- * 1 the packet is a duplicate
- * 0 the packet has not yet been received
- * -1 the packet is old and has been received while the seqno window
- * was protected. Caller should drop it.
- */
-static int count_real_packets(const struct ethhdr *ethhdr,
- const struct batman_packet *batman_packet,
- const struct hard_iface *if_incoming)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct orig_node *orig_node;
- struct neigh_node *tmp_neigh_node;
- struct hlist_node *node;
- int is_duplicate = 0;
- int32_t seq_diff;
- int need_update = 0;
- int set_mark, ret = -1;
-
- orig_node = get_orig_node(bat_priv, batman_packet->orig);
- if (!orig_node)
- return 0;
-
- spin_lock_bh(&orig_node->ogm_cnt_lock);
- seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
-
- /* signalize caller that the packet is to be dropped. */
- if (window_protected(bat_priv, seq_diff,
- &orig_node->batman_seqno_reset))
- goto out;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(tmp_neigh_node, node,
- &orig_node->neigh_list, list) {
-
- is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
- orig_node->last_real_seqno,
- batman_packet->seqno);
-
- if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
- (tmp_neigh_node->if_incoming == if_incoming))
- set_mark = 1;
- else
- set_mark = 0;
-
- /* if the window moved, set the update flag. */
- need_update |= bit_get_packet(bat_priv,
- tmp_neigh_node->real_bits,
- seq_diff, set_mark);
-
- tmp_neigh_node->real_packet_count =
- bit_packet_count(tmp_neigh_node->real_bits);
- }
- rcu_read_unlock();
-
- if (need_update) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "updating last_seqno: old %d, new %d\n",
- orig_node->last_real_seqno, batman_packet->seqno);
- orig_node->last_real_seqno = batman_packet->seqno;
- }
-
- ret = is_duplicate;
-
-out:
- spin_unlock_bh(&orig_node->ogm_cnt_lock);
- orig_node_free_ref(orig_node);
- return ret;
-}
-
-void receive_bat_packet(const struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- const unsigned char *tt_buff,
- struct hard_iface *if_incoming)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct hard_iface *hard_iface;
- struct orig_node *orig_neigh_node, *orig_node;
- struct neigh_node *router = NULL, *router_router = NULL;
- struct neigh_node *orig_neigh_router = NULL;
- int has_directlink_flag;
- int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
- int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
- int is_duplicate;
- uint32_t if_incoming_seqno;
-
- /* Silently drop when the batman packet is actually not a
- * correct packet.
- *
- * This might happen if a packet is padded (e.g. Ethernet has a
- * minimum frame length of 64 byte) and the aggregation interprets
- * it as an additional length.
- *
- * TODO: A more sane solution would be to have a bit in the
- * batman_packet to detect whether the packet is the last
- * packet in an aggregation. Here we expect that the padding
- * is always zero (or not 0x01)
- */
- if (batman_packet->packet_type != BAT_PACKET)
- return;
-
- /* could be changed by schedule_own_packet() */
- if_incoming_seqno = atomic_read(&if_incoming->seqno);
-
- has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
-
- is_single_hop_neigh = (compare_eth(ethhdr->h_source,
- batman_packet->orig) ? 1 : 0);
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
- "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
- "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
- ethhdr->h_source, if_incoming->net_dev->name,
- if_incoming->net_dev->dev_addr, batman_packet->orig,
- batman_packet->prev_sender, batman_packet->seqno,
- batman_packet->ttvn, batman_packet->tt_crc,
- batman_packet->tt_num_changes, batman_packet->tq,
- batman_packet->ttl, batman_packet->version,
- has_directlink_flag);
-
- rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
- if (hard_iface->if_status != IF_ACTIVE)
- continue;
-
- if (hard_iface->soft_iface != if_incoming->soft_iface)
- continue;
-
- if (compare_eth(ethhdr->h_source,
- hard_iface->net_dev->dev_addr))
- is_my_addr = 1;
-
- if (compare_eth(batman_packet->orig,
- hard_iface->net_dev->dev_addr))
- is_my_orig = 1;
-
- if (compare_eth(batman_packet->prev_sender,
- hard_iface->net_dev->dev_addr))
- is_my_oldorig = 1;
-
- if (is_broadcast_ether_addr(ethhdr->h_source))
- is_broadcast = 1;
- }
- rcu_read_unlock();
-
- if (batman_packet->version != COMPAT_VERSION) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: incompatible batman version (%i)\n",
- batman_packet->version);
- return;
- }
-
- if (is_my_addr) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: received my own broadcast (sender: %pM"
- ")\n",
- ethhdr->h_source);
- return;
- }
-
- if (is_broadcast) {
- bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
- "ignoring all packets with broadcast source addr (sender: %pM"
- ")\n", ethhdr->h_source);
- return;
- }
-
- if (is_my_orig) {
- unsigned long *word;
- int offset;
-
- orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
- if (!orig_neigh_node)
- return;
-
- /* neighbor has to indicate direct link and it has to
- * come via the corresponding interface */
- /* save packet seqno for bidirectional check */
- if (has_directlink_flag &&
- compare_eth(if_incoming->net_dev->dev_addr,
- batman_packet->orig)) {
- offset = if_incoming->if_num * NUM_WORDS;
-
- spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
- word = &(orig_neigh_node->bcast_own[offset]);
- bit_mark(word,
- if_incoming_seqno - batman_packet->seqno - 2);
- orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
- bit_packet_count(word);
- spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
- }
-
- bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
- "originator packet from myself (via neighbor)\n");
- orig_node_free_ref(orig_neigh_node);
- return;
- }
-
- if (is_my_oldorig) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all rebroadcast echos (sender: "
- "%pM)\n", ethhdr->h_source);
- return;
- }
-
- orig_node = get_orig_node(bat_priv, batman_packet->orig);
- if (!orig_node)
- return;
-
- is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
-
- if (is_duplicate == -1) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: packet within seqno protection time "
- "(sender: %pM)\n", ethhdr->h_source);
- goto out;
- }
-
- if (batman_packet->tq == 0) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: originator packet with tq equal 0\n");
- goto out;
- }
-
- router = orig_node_get_router(orig_node);
- if (router)
- router_router = orig_node_get_router(router->orig_node);
-
- /* avoid temporary routing loops */
- if (router && router_router &&
- (compare_eth(router->addr, batman_packet->prev_sender)) &&
- !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
- (compare_eth(router->addr, router_router->addr))) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all rebroadcast packets that "
- "may make me loop (sender: %pM)\n", ethhdr->h_source);
- goto out;
- }
-
- /* if sender is a direct neighbor the sender mac equals
- * originator mac */
- orig_neigh_node = (is_single_hop_neigh ?
- orig_node :
- get_orig_node(bat_priv, ethhdr->h_source));
- if (!orig_neigh_node)
- goto out;
-
- orig_neigh_router = orig_node_get_router(orig_neigh_node);
-
- /* drop packet if sender is not a direct neighbor and if we
- * don't route towards it */
- if (!is_single_hop_neigh && (!orig_neigh_router)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: OGM via unknown neighbor!\n");
- goto out_neigh;
- }
-
- is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
- batman_packet, if_incoming);
-
- bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
-
- /* update ranking if it is not a duplicate or has the same
- * seqno and similar ttl as the non-duplicate */
- if (is_bidirectional &&
- (!is_duplicate ||
- ((orig_node->last_real_seqno == batman_packet->seqno) &&
- (orig_node->last_ttl - 3 <= batman_packet->ttl))))
- update_orig(bat_priv, orig_node, ethhdr, batman_packet,
- if_incoming, tt_buff, is_duplicate);
-
- /* is single hop (direct) neighbor */
- if (is_single_hop_neigh) {
-
- /* mark direct link on incoming interface */
- schedule_forward_packet(orig_node, ethhdr, batman_packet,
- 1, if_incoming);
-
- bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
- "rebroadcast neighbor packet with direct link flag\n");
- goto out_neigh;
- }
-
- /* multihop originator */
- if (!is_bidirectional) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: not received via bidirectional link\n");
- goto out_neigh;
- }
-
- if (is_duplicate) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: duplicate packet received\n");
- goto out_neigh;
- }
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: rebroadcast originator packet\n");
- schedule_forward_packet(orig_node, ethhdr, batman_packet,
- 0, if_incoming);
-
-out_neigh:
- if ((orig_neigh_node) && (!is_single_hop_neigh))
- orig_node_free_ref(orig_neigh_node);
-out:
- if (router)
- neigh_node_free_ref(router);
- if (router_router)
- neigh_node_free_ref(router_router);
- if (orig_neigh_router)
- neigh_node_free_ref(orig_neigh_router);
-
- orig_node_free_ref(orig_node);
-}
-
-int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
+int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
{
struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */
- if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
+ if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN)))
return NET_RX_DROP;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -898,10 +274,7 @@ int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
ethhdr = (struct ethhdr *)skb_mac_header(skb);
- receive_aggr_bat_packet(ethhdr,
- skb->data,
- skb_headlen(skb),
- hard_iface);
+ bat_ogm_receive(ethhdr, skb->data, skb_headlen(skb), hard_iface);
kfree_skb(skb);
return NET_RX_SUCCESS;
@@ -1243,7 +616,7 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
}
break;
case TT_RESPONSE:
- /* packet needs to be linearised to access the TT changes */
+ /* packet needs to be linearized to access the TT changes */
if (skb_linearize(skb) < 0)
goto out;
@@ -1300,7 +673,7 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
roam_adv_packet->client);
tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
- atomic_read(&orig_node->last_ttvn) + 1, true);
+ atomic_read(&orig_node->last_ttvn) + 1, true, false);
/* Roaming phase starts: I have new information but the ttvn has not
* been incremented yet. This flag will make me check all the incoming
@@ -1536,7 +909,7 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv,
ethhdr = (struct ethhdr *)(skb->data +
sizeof(struct unicast_packet));
- orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+ orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
if (!orig_node) {
if (!is_my_client(bat_priv, ethhdr->h_dest))
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index fb14e95..7aaee0f 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -23,19 +23,15 @@
#define _NET_BATMAN_ADV_ROUTING_H_
void slide_own_bcast_window(struct hard_iface *hard_iface);
-void receive_bat_packet(const struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- const unsigned char *tt_buff,
- struct hard_iface *if_incoming);
-void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
- struct neigh_node *neigh_node);
+void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
+ struct neigh_node *neigh_node);
int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if);
+int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if);
int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if);
struct neigh_node *find_router(struct bat_priv *bat_priv,
@@ -43,5 +39,12 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
const struct hard_iface *recv_if);
void bonding_candidate_del(struct orig_node *orig_node,
struct neigh_node *neigh_node);
+void bonding_candidate_add(struct orig_node *orig_node,
+ struct neigh_node *neigh_node);
+void bonding_save_primary(const struct orig_node *orig_node,
+ struct orig_node *orig_neigh_node,
+ const struct batman_ogm_packet *batman_ogm_packet);
+int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
+ unsigned long *last_reset);
#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 58d1447..8a684eb 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -26,33 +26,12 @@
#include "soft-interface.h"
#include "hard-interface.h"
#include "vis.h"
-#include "aggregation.h"
#include "gateway_common.h"
#include "originator.h"
+#include "bat_ogm.h"
static void send_outstanding_bcast_packet(struct work_struct *work);
-/* apply hop penalty for a normal link */
-static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
-{
- int hop_penalty = atomic_read(&bat_priv->hop_penalty);
- return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
-}
-
-/* when do we schedule our own packet to be sent */
-static unsigned long own_send_time(const struct bat_priv *bat_priv)
-{
- return jiffies + msecs_to_jiffies(
- atomic_read(&bat_priv->orig_interval) -
- JITTER + (random32() % 2*JITTER));
-}
-
-/* when do we schedule a forwarded packet to be sent */
-static unsigned long forward_send_time(void)
-{
- return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
-}
-
/* send out an already prepared packet to the given address via the
* specified batman interface */
int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
@@ -99,141 +78,17 @@ send_skb_err:
return NET_XMIT_DROP;
}
-/* Send a packet to a given interface */
-static void send_packet_to_if(struct forw_packet *forw_packet,
- struct hard_iface *hard_iface)
-{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- char *fwd_str;
- uint8_t packet_num;
- int16_t buff_pos;
- struct batman_packet *batman_packet;
- struct sk_buff *skb;
-
- if (hard_iface->if_status != IF_ACTIVE)
- return;
-
- packet_num = 0;
- buff_pos = 0;
- batman_packet = (struct batman_packet *)forw_packet->skb->data;
-
- /* adjust all flags and log packets */
- while (aggregated_packet(buff_pos,
- forw_packet->packet_len,
- batman_packet->tt_num_changes)) {
-
- /* we might have aggregated direct link packets with an
- * ordinary base packet */
- if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
- (forw_packet->if_incoming == hard_iface))
- batman_packet->flags |= DIRECTLINK;
- else
- batman_packet->flags &= ~DIRECTLINK;
-
- fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
- "Sending own" :
- "Forwarding"));
- bat_dbg(DBG_BATMAN, bat_priv,
- "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
- " IDF %s, hvn %d) on interface %s [%pM]\n",
- fwd_str, (packet_num > 0 ? "aggregated " : ""),
- batman_packet->orig, ntohl(batman_packet->seqno),
- batman_packet->tq, batman_packet->ttl,
- (batman_packet->flags & DIRECTLINK ?
- "on" : "off"),
- batman_packet->ttvn, hard_iface->net_dev->name,
- hard_iface->net_dev->dev_addr);
-
- buff_pos += sizeof(*batman_packet) +
- tt_len(batman_packet->tt_num_changes);
- packet_num++;
- batman_packet = (struct batman_packet *)
- (forw_packet->skb->data + buff_pos);
- }
-
- /* create clone because function is called more than once */
- skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
- if (skb)
- send_skb_packet(skb, hard_iface, broadcast_addr);
-}
-
-/* send a batman packet */
-static void send_packet(struct forw_packet *forw_packet)
-{
- struct hard_iface *hard_iface;
- struct net_device *soft_iface;
- struct bat_priv *bat_priv;
- struct hard_iface *primary_if = NULL;
- struct batman_packet *batman_packet =
- (struct batman_packet *)(forw_packet->skb->data);
- int directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
-
- if (!forw_packet->if_incoming) {
- pr_err("Error - can't forward packet: incoming iface not "
- "specified\n");
- goto out;
- }
-
- soft_iface = forw_packet->if_incoming->soft_iface;
- bat_priv = netdev_priv(soft_iface);
-
- if (forw_packet->if_incoming->if_status != IF_ACTIVE)
- goto out;
-
- primary_if = primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
-
- /* multihomed peer assumed */
- /* non-primary OGMs are only broadcasted on their interface */
- if ((directlink && (batman_packet->ttl == 1)) ||
- (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
-
- /* FIXME: what about aggregated packets ? */
- bat_dbg(DBG_BATMAN, bat_priv,
- "%s packet (originator %pM, seqno %d, TTL %d) "
- "on interface %s [%pM]\n",
- (forw_packet->own ? "Sending own" : "Forwarding"),
- batman_packet->orig, ntohl(batman_packet->seqno),
- batman_packet->ttl,
- forw_packet->if_incoming->net_dev->name,
- forw_packet->if_incoming->net_dev->dev_addr);
-
- /* skb is only used once and than forw_packet is free'd */
- send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
- broadcast_addr);
- forw_packet->skb = NULL;
-
- goto out;
- }
-
- /* broadcast on every interface */
- rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
- if (hard_iface->soft_iface != soft_iface)
- continue;
-
- send_packet_to_if(forw_packet, hard_iface);
- }
- rcu_read_unlock();
-
-out:
- if (primary_if)
- hardif_free_ref(primary_if);
-}
-
static void realloc_packet_buffer(struct hard_iface *hard_iface,
- int new_len)
+ int new_len)
{
unsigned char *new_buff;
- struct batman_packet *batman_packet;
new_buff = kmalloc(new_len, GFP_ATOMIC);
/* keep old buffer if kmalloc should fail */
if (new_buff) {
memcpy(new_buff, hard_iface->packet_buff,
- sizeof(*batman_packet));
+ BATMAN_OGM_LEN);
kfree(hard_iface->packet_buff);
hard_iface->packet_buff = new_buff;
@@ -242,60 +97,48 @@ static void realloc_packet_buffer(struct hard_iface *hard_iface,
}
/* when calling this function (hard_iface == primary_if) has to be true */
-static void prepare_packet_buffer(struct bat_priv *bat_priv,
+static int prepare_packet_buffer(struct bat_priv *bat_priv,
struct hard_iface *hard_iface)
{
int new_len;
- struct batman_packet *batman_packet;
- new_len = BAT_PACKET_LEN +
+ new_len = BATMAN_OGM_LEN +
tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
/* if we have too many changes for one packet don't send any
* and wait for the tt table request which will be fragmented */
if (new_len > hard_iface->soft_iface->mtu)
- new_len = BAT_PACKET_LEN;
+ new_len = BATMAN_OGM_LEN;
realloc_packet_buffer(hard_iface, new_len);
- batman_packet = (struct batman_packet *)hard_iface->packet_buff;
atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
/* reset the sending counter */
atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
- batman_packet->tt_num_changes = tt_changes_fill_buffer(bat_priv,
- hard_iface->packet_buff + BAT_PACKET_LEN,
- hard_iface->packet_len - BAT_PACKET_LEN);
-
+ return tt_changes_fill_buffer(bat_priv,
+ hard_iface->packet_buff + BATMAN_OGM_LEN,
+ hard_iface->packet_len - BATMAN_OGM_LEN);
}
-static void reset_packet_buffer(struct bat_priv *bat_priv,
- struct hard_iface *hard_iface)
+static int reset_packet_buffer(struct bat_priv *bat_priv,
+ struct hard_iface *hard_iface)
{
- struct batman_packet *batman_packet;
-
- realloc_packet_buffer(hard_iface, BAT_PACKET_LEN);
-
- batman_packet = (struct batman_packet *)hard_iface->packet_buff;
- batman_packet->tt_num_changes = 0;
+ realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN);
+ return 0;
}
-void schedule_own_packet(struct hard_iface *hard_iface)
+void schedule_bat_ogm(struct hard_iface *hard_iface)
{
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct hard_iface *primary_if;
- unsigned long send_time;
- struct batman_packet *batman_packet;
- int vis_server;
+ int tt_num_changes = -1;
if ((hard_iface->if_status == IF_NOT_IN_USE) ||
(hard_iface->if_status == IF_TO_BE_REMOVED))
return;
- vis_server = atomic_read(&bat_priv->vis_mode);
- primary_if = primary_if_get_selected(bat_priv);
-
/**
* the interface gets activated here to avoid race conditions between
* the moment of activating the interface in
@@ -306,124 +149,26 @@ void schedule_own_packet(struct hard_iface *hard_iface)
if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
hard_iface->if_status = IF_ACTIVE;
+ primary_if = primary_if_get_selected(bat_priv);
+
if (hard_iface == primary_if) {
/* if at least one change happened */
if (atomic_read(&bat_priv->tt_local_changes) > 0) {
tt_commit_changes(bat_priv);
- prepare_packet_buffer(bat_priv, hard_iface);
+ tt_num_changes = prepare_packet_buffer(bat_priv,
+ hard_iface);
}
- /* if the changes have been sent enough times */
+ /* if the changes have been sent often enough */
if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
- reset_packet_buffer(bat_priv, hard_iface);
+ tt_num_changes = reset_packet_buffer(bat_priv,
+ hard_iface);
}
- /**
- * NOTE: packet_buff might just have been re-allocated in
- * prepare_packet_buffer() or in reset_packet_buffer()
- */
- batman_packet = (struct batman_packet *)hard_iface->packet_buff;
-
- /* change sequence number to network order */
- batman_packet->seqno =
- htonl((uint32_t)atomic_read(&hard_iface->seqno));
-
- batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
- batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
-
- if (vis_server == VIS_TYPE_SERVER_SYNC)
- batman_packet->flags |= VIS_SERVER;
- else
- batman_packet->flags &= ~VIS_SERVER;
-
- if ((hard_iface == primary_if) &&
- (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
- batman_packet->gw_flags =
- (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
- else
- batman_packet->gw_flags = NO_FLAGS;
-
- atomic_inc(&hard_iface->seqno);
-
- slide_own_bcast_window(hard_iface);
- send_time = own_send_time(bat_priv);
- add_bat_packet_to_list(bat_priv,
- hard_iface->packet_buff,
- hard_iface->packet_len,
- hard_iface, 1, send_time);
-
if (primary_if)
hardif_free_ref(primary_if);
-}
-
-void schedule_forward_packet(struct orig_node *orig_node,
- const struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- int directlink,
- struct hard_iface *if_incoming)
-{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct neigh_node *router;
- uint8_t in_tq, in_ttl, tq_avg = 0;
- unsigned long send_time;
- uint8_t tt_num_changes;
-
- if (batman_packet->ttl <= 1) {
- bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
- return;
- }
-
- router = orig_node_get_router(orig_node);
-
- in_tq = batman_packet->tq;
- in_ttl = batman_packet->ttl;
- tt_num_changes = batman_packet->tt_num_changes;
-
- batman_packet->ttl--;
- memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
-
- /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
- * of our best tq value */
- if (router && router->tq_avg != 0) {
-
- /* rebroadcast ogm of best ranking neighbor as is */
- if (!compare_eth(router->addr, ethhdr->h_source)) {
- batman_packet->tq = router->tq_avg;
-
- if (router->last_ttl)
- batman_packet->ttl = router->last_ttl - 1;
- }
-
- tq_avg = router->tq_avg;
- }
-
- if (router)
- neigh_node_free_ref(router);
-
- /* apply hop penalty */
- batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: tq_orig: %i, tq_avg: %i, "
- "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
- in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
- batman_packet->ttl);
-
- batman_packet->seqno = htonl(batman_packet->seqno);
- batman_packet->tt_crc = htons(batman_packet->tt_crc);
-
- /* switch of primaries first hop flag when forwarding */
- batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
- if (directlink)
- batman_packet->flags |= DIRECTLINK;
- else
- batman_packet->flags &= ~DIRECTLINK;
- send_time = forward_send_time();
- add_bat_packet_to_list(bat_priv,
- (unsigned char *)batman_packet,
- sizeof(*batman_packet) + tt_len(tt_num_changes),
- if_incoming, 0, send_time);
+ bat_ogm_schedule(hard_iface, tt_num_changes);
}
static void forw_packet_free(struct forw_packet *forw_packet)
@@ -454,7 +199,7 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
}
/* add a broadcast packet to the queue and setup timers. broadcast packets
- * are sent multiple times to increase probability for beeing received.
+ * are sent multiple times to increase probability for being received.
*
* This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
* errors.
@@ -557,7 +302,7 @@ out:
atomic_inc(&bat_priv->bcast_queue_left);
}
-void send_outstanding_bat_packet(struct work_struct *work)
+void send_outstanding_bat_ogm_packet(struct work_struct *work)
{
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
@@ -573,7 +318,7 @@ void send_outstanding_bat_packet(struct work_struct *work)
if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
goto out;
- send_packet(forw_packet);
+ bat_ogm_emit(forw_packet);
/**
* we have to have at least one packet in the queue
@@ -581,7 +326,7 @@ void send_outstanding_bat_packet(struct work_struct *work)
* shutting down
*/
if (forw_packet->own)
- schedule_own_packet(forw_packet->if_incoming);
+ schedule_bat_ogm(forw_packet->if_incoming);
out:
/* don't count own packet */
@@ -612,7 +357,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
&bat_priv->forw_bcast_list, list) {
/**
- * if purge_outstanding_packets() was called with an argmument
+ * if purge_outstanding_packets() was called with an argument
* we delete only packets belonging to the given interface
*/
if ((hard_iface) &&
@@ -641,7 +386,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
&bat_priv->forw_bat_list, list) {
/**
- * if purge_outstanding_packets() was called with an argmument
+ * if purge_outstanding_packets() was called with an argument
* we delete only packets belonging to the given interface
*/
if ((hard_iface) &&
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 1f2d1e8..c8ca3ef 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -24,15 +24,10 @@
int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
const uint8_t *dst_addr);
-void schedule_own_packet(struct hard_iface *hard_iface);
-void schedule_forward_packet(struct orig_node *orig_node,
- const struct ethhdr *ethhdr,
- struct batman_packet *batman_packet,
- int directlink,
- struct hard_iface *if_outgoing);
+void schedule_bat_ogm(struct hard_iface *hard_iface);
int add_bcast_packet_to_list(struct bat_priv *bat_priv,
const struct sk_buff *skb, unsigned long delay);
-void send_outstanding_bat_packet(struct work_struct *work);
+void send_outstanding_bat_ogm_packet(struct work_struct *work);
void purge_outstanding_packets(struct bat_priv *bat_priv,
const struct hard_iface *hard_iface);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 05dd351..f9cc957 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -445,30 +445,31 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
{
struct bat_priv *bat_priv = netdev_priv(dev);
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
- struct batman_packet *batman_packet;
+ struct batman_ogm_packet *batman_ogm_packet;
struct softif_neigh *softif_neigh = NULL;
struct hard_iface *primary_if = NULL;
struct softif_neigh *curr_softif_neigh = NULL;
if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
- batman_packet = (struct batman_packet *)
+ batman_ogm_packet = (struct batman_ogm_packet *)
(skb->data + ETH_HLEN + VLAN_HLEN);
else
- batman_packet = (struct batman_packet *)(skb->data + ETH_HLEN);
+ batman_ogm_packet = (struct batman_ogm_packet *)
+ (skb->data + ETH_HLEN);
- if (batman_packet->version != COMPAT_VERSION)
+ if (batman_ogm_packet->version != COMPAT_VERSION)
goto out;
- if (batman_packet->packet_type != BAT_PACKET)
+ if (batman_ogm_packet->packet_type != BAT_OGM)
goto out;
- if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
+ if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
goto out;
- if (is_my_mac(batman_packet->orig))
+ if (is_my_mac(batman_ogm_packet->orig))
goto out;
- softif_neigh = softif_neigh_get(bat_priv, batman_packet->orig, vid);
+ softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid);
if (!softif_neigh)
goto out;
@@ -532,11 +533,11 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- /* only modify transtable if it has been initialised before */
+ /* only modify transtable if it has been initialized before */
if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
tt_local_remove(bat_priv, dev->dev_addr,
"mac address changed", false);
- tt_local_add(dev, addr->sa_data);
+ tt_local_add(dev, addr->sa_data, NULL_IFINDEX);
}
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -595,11 +596,12 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
goto dropped;
/* Register the client MAC in the transtable */
- tt_local_add(soft_iface, ethhdr->h_source);
+ tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
- orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+ orig_node = transtable_search(bat_priv, ethhdr->h_source,
+ ethhdr->h_dest);
do_bcast = is_multicast_ether_addr(ethhdr->h_dest);
- if (do_bcast || (orig_node && orig_node->gw_flags)) {
+ if (do_bcast || (orig_node && orig_node->gw_flags)) {
ret = gw_is_target(bat_priv, skb, orig_node);
if (ret < 0)
@@ -739,6 +741,9 @@ void interface_rx(struct net_device *soft_iface,
soft_iface->last_rx = jiffies;
+ if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
+ goto dropped;
+
netif_rx(skb);
goto out;
@@ -796,10 +801,8 @@ struct net_device *softif_create(const char *name)
soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup);
- if (!soft_iface) {
- pr_err("Unable to allocate the batman interface: %s\n", name);
+ if (!soft_iface)
goto out;
- }
ret = register_netdevice(soft_iface);
if (ret < 0) {
@@ -812,6 +815,7 @@ struct net_device *softif_create(const char *name)
atomic_set(&bat_priv->aggregated_ogms, 1);
atomic_set(&bat_priv->bonding, 0);
+ atomic_set(&bat_priv->ap_isolation, 0);
atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
atomic_set(&bat_priv->gw_sel_class, 20);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index fb6931d..c7aafc7 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -137,10 +137,22 @@ static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
kfree_rcu(tt_local_entry, rcu);
}
+static void tt_global_entry_free_rcu(struct rcu_head *rcu)
+{
+ struct tt_global_entry *tt_global_entry;
+
+ tt_global_entry = container_of(rcu, struct tt_global_entry, rcu);
+
+ if (tt_global_entry->orig_node)
+ orig_node_free_ref(tt_global_entry->orig_node);
+
+ kfree(tt_global_entry);
+}
+
static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
{
if (atomic_dec_and_test(&tt_global_entry->refcount))
- kfree_rcu(tt_global_entry, rcu);
+ call_rcu(&tt_global_entry->rcu, tt_global_entry_free_rcu);
}
static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -183,7 +195,8 @@ static int tt_local_init(struct bat_priv *bat_priv)
return 1;
}
-void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
+void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+ int ifindex)
{
struct bat_priv *bat_priv = netdev_priv(soft_iface);
struct tt_local_entry *tt_local_entry = NULL;
@@ -207,6 +220,8 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
memcpy(tt_local_entry->addr, addr, ETH_ALEN);
tt_local_entry->last_seen = jiffies;
tt_local_entry->flags = NO_FLAGS;
+ if (is_wifi_iface(ifindex))
+ tt_local_entry->flags |= TT_CLIENT_WIFI;
atomic_set(&tt_local_entry->refcount, 2);
/* the batman interface mac address should never be purged */
@@ -329,7 +344,7 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
__hlist_for_each_rcu(node, head)
- buf_size += 21;
+ buf_size += 29;
rcu_read_unlock();
}
@@ -348,8 +363,19 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
hlist_for_each_entry_rcu(tt_local_entry, node,
head, hash_entry) {
- pos += snprintf(buff + pos, 22, " * %pM\n",
- tt_local_entry->addr);
+ pos += snprintf(buff + pos, 30, " * %pM "
+ "[%c%c%c%c%c]\n",
+ tt_local_entry->addr,
+ (tt_local_entry->flags &
+ TT_CLIENT_ROAM ? 'R' : '.'),
+ (tt_local_entry->flags &
+ TT_CLIENT_NOPURGE ? 'P' : '.'),
+ (tt_local_entry->flags &
+ TT_CLIENT_NEW ? 'N' : '.'),
+ (tt_local_entry->flags &
+ TT_CLIENT_PENDING ? 'X' : '.'),
+ (tt_local_entry->flags &
+ TT_CLIENT_WIFI ? 'W' : '.'));
}
rcu_read_unlock();
}
@@ -369,8 +395,8 @@ static void tt_local_set_pending(struct bat_priv *bat_priv,
tt_local_event(bat_priv, tt_local_entry->addr,
tt_local_entry->flags | flags);
- /* The local client has to be merked as "pending to be removed" but has
- * to be kept in the table in order to send it in an full tables
+ /* The local client has to be marked as "pending to be removed" but has
+ * to be kept in the table in order to send it in a full table
* response issued before the net ttvn increment (consistency check) */
tt_local_entry->flags |= TT_CLIENT_PENDING;
}
@@ -495,7 +521,8 @@ static void tt_changes_list_free(struct bat_priv *bat_priv)
/* caller must hold orig_node refcount */
int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *tt_addr, uint8_t ttvn, bool roaming)
+ const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
+ bool wifi)
{
struct tt_global_entry *tt_global_entry;
struct orig_node *orig_node_tmp;
@@ -537,6 +564,9 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
tt_global_entry->roam_at = 0;
}
+ if (wifi)
+ tt_global_entry->flags |= TT_CLIENT_WIFI;
+
bat_dbg(DBG_TT, bat_priv,
"Creating new global tt entry: %pM (via %pM)\n",
tt_global_entry->addr, orig_node->orig);
@@ -582,8 +612,8 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq,
"Globally announced TT entries received via the mesh %s\n",
net_dev->name);
- seq_printf(seq, " %-13s %s %-15s %s\n",
- "Client", "(TTVN)", "Originator", "(Curr TTVN)");
+ seq_printf(seq, " %-13s %s %-15s %s %s\n",
+ "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
buf_size = 1;
/* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
@@ -593,7 +623,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
__hlist_for_each_rcu(node, head)
- buf_size += 59;
+ buf_size += 67;
rcu_read_unlock();
}
@@ -612,14 +642,20 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
hlist_for_each_entry_rcu(tt_global_entry, node,
head, hash_entry) {
- pos += snprintf(buff + pos, 61,
- " * %pM (%3u) via %pM (%3u)\n",
- tt_global_entry->addr,
+ pos += snprintf(buff + pos, 69,
+ " * %pM (%3u) via %pM (%3u) "
+ "[%c%c%c]\n", tt_global_entry->addr,
tt_global_entry->ttvn,
tt_global_entry->orig_node->orig,
(uint8_t) atomic_read(
&tt_global_entry->orig_node->
- last_ttvn));
+ last_ttvn),
+ (tt_global_entry->flags &
+ TT_CLIENT_ROAM ? 'R' : '.'),
+ (tt_global_entry->flags &
+ TT_CLIENT_PENDING ? 'X' : '.'),
+ (tt_global_entry->flags &
+ TT_CLIENT_WIFI ? 'W' : '.'));
}
rcu_read_unlock();
}
@@ -686,6 +722,9 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
+ if (!hash)
+ return;
+
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
list_lock = &hash->list_locks[i];
@@ -774,30 +813,56 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
bat_priv->tt_global_hash = NULL;
}
+static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
+ struct tt_global_entry *tt_global_entry)
+{
+ bool ret = false;
+
+ if (tt_local_entry->flags & TT_CLIENT_WIFI &&
+ tt_global_entry->flags & TT_CLIENT_WIFI)
+ ret = true;
+
+ return ret;
+}
+
struct orig_node *transtable_search(struct bat_priv *bat_priv,
- const uint8_t *addr)
+ const uint8_t *src, const uint8_t *addr)
{
- struct tt_global_entry *tt_global_entry;
+ struct tt_local_entry *tt_local_entry = NULL;
+ struct tt_global_entry *tt_global_entry = NULL;
struct orig_node *orig_node = NULL;
- tt_global_entry = tt_global_hash_find(bat_priv, addr);
+ if (src && atomic_read(&bat_priv->ap_isolation)) {
+ tt_local_entry = tt_local_hash_find(bat_priv, src);
+ if (!tt_local_entry)
+ goto out;
+ }
+ tt_global_entry = tt_global_hash_find(bat_priv, addr);
if (!tt_global_entry)
goto out;
+ /* check whether the clients should not communicate due to AP
+ * isolation */
+ if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
+ goto out;
+
if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
- goto free_tt;
+ goto out;
/* A global client marked as PENDING has already moved from that
* originator */
if (tt_global_entry->flags & TT_CLIENT_PENDING)
- goto free_tt;
+ goto out;
orig_node = tt_global_entry->orig_node;
-free_tt:
- tt_global_entry_free_ref(tt_global_entry);
out:
+ if (tt_global_entry)
+ tt_global_entry_free_ref(tt_global_entry);
+ if (tt_local_entry)
+ tt_local_entry_free_ref(tt_local_entry);
+
return orig_node;
}
@@ -999,7 +1064,6 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
tt_response = (struct tt_query_packet *)skb_put(skb,
tt_query_size + tt_len);
tt_response->ttvn = ttvn;
- tt_response->tt_data = htons(tt_tot);
tt_change = (struct tt_change *)(skb->data + tt_query_size);
tt_count = 0;
@@ -1025,12 +1089,17 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
}
rcu_read_unlock();
+ /* store in the message the number of entries we have successfully
+ * copied */
+ tt_response->tt_data = htons(tt_count);
+
out:
return skb;
}
-int send_tt_request(struct bat_priv *bat_priv, struct orig_node *dst_orig_node,
- uint8_t ttvn, uint16_t tt_crc, bool full_table)
+static int send_tt_request(struct bat_priv *bat_priv,
+ struct orig_node *dst_orig_node,
+ uint8_t ttvn, uint16_t tt_crc, bool full_table)
{
struct sk_buff *skb = NULL;
struct tt_query_packet *tt_request;
@@ -1137,12 +1206,12 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
req_ttvn = tt_request->ttvn;
- /* I have not the requested data */
+ /* I don't have the requested data */
if (orig_ttvn != req_ttvn ||
tt_request->tt_data != req_dst_orig_node->tt_crc)
goto out;
- /* If it has explicitly been requested the full table */
+ /* If the full table has been explicitly requested */
if (tt_request->flags & TT_FULL_TABLE ||
!req_dst_orig_node->tt_buff)
full_table = true;
@@ -1363,7 +1432,9 @@ static void _tt_update_changes(struct bat_priv *bat_priv,
(tt_change + i)->flags & TT_CLIENT_ROAM);
else
if (!tt_global_add(bat_priv, orig_node,
- (tt_change + i)->addr, ttvn, false))
+ (tt_change + i)->addr, ttvn, false,
+ (tt_change + i)->flags &
+ TT_CLIENT_WIFI))
/* In case of problem while storing a
* global_entry, we stop the updating
* procedure without committing the
@@ -1403,9 +1474,10 @@ out:
orig_node_free_ref(orig_node);
}
-void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
- uint16_t tt_num_changes, uint8_t ttvn,
- struct tt_change *tt_change)
+static void tt_update_changes(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
+ uint16_t tt_num_changes, uint8_t ttvn,
+ struct tt_change *tt_change)
{
_tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
ttvn);
@@ -1668,6 +1740,8 @@ static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags)
rcu_read_lock();
hlist_for_each_entry_rcu(tt_local_entry, node,
head, hash_entry) {
+ if (!(tt_local_entry->flags & flags))
+ continue;
tt_local_entry->flags &= ~flags;
atomic_inc(&bat_priv->num_local_tt);
}
@@ -1720,3 +1794,90 @@ void tt_commit_changes(struct bat_priv *bat_priv)
atomic_inc(&bat_priv->ttvn);
bat_priv->tt_poss_change = false;
}
+
+bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
+{
+ struct tt_local_entry *tt_local_entry = NULL;
+ struct tt_global_entry *tt_global_entry = NULL;
+ bool ret = true;
+
+ if (!atomic_read(&bat_priv->ap_isolation))
+ return false;
+
+ tt_local_entry = tt_local_hash_find(bat_priv, dst);
+ if (!tt_local_entry)
+ goto out;
+
+ tt_global_entry = tt_global_hash_find(bat_priv, src);
+ if (!tt_global_entry)
+ goto out;
+
+ if (_is_ap_isolated(tt_local_entry, tt_global_entry))
+ goto out;
+
+ ret = false;
+
+out:
+ if (tt_global_entry)
+ tt_global_entry_free_ref(tt_global_entry);
+ if (tt_local_entry)
+ tt_local_entry_free_ref(tt_local_entry);
+ return ret;
+}
+
+void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
+ const unsigned char *tt_buff, uint8_t tt_num_changes,
+ uint8_t ttvn, uint16_t tt_crc)
+{
+ uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+ bool full_table = true;
+
+ /* the ttvn increased by one -> we can apply the attached changes */
+ if (ttvn - orig_ttvn == 1) {
+ /* the OGM could not contain the changes due to their size or
+ * because they have already been sent TT_OGM_APPEND_MAX times.
+ * In this case send a tt request */
+ if (!tt_num_changes) {
+ full_table = false;
+ goto request_table;
+ }
+
+ tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
+ (struct tt_change *)tt_buff);
+
+ /* Even if we received the precomputed crc with the OGM, we
+ * prefer to recompute it to spot any possible inconsistency
+ * in the global table */
+ orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
+
+ /* The ttvn alone is not enough to guarantee consistency
+ * because a single value could represent different states
+ * (due to the wrap around). Thus a node has to check whether
+ * the resulting table (after applying the changes) is still
+ * consistent or not. E.g. a node could disconnect while its
+ * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
+ * checking the CRC value is mandatory to detect the
+ * inconsistency */
+ if (orig_node->tt_crc != tt_crc)
+ goto request_table;
+
+ /* Roaming phase is over: tables are in sync again. I can
+ * unset the flag */
+ orig_node->tt_poss_change = false;
+ } else {
+ /* if we missed more than one change or our tables are not
+ * in sync anymore -> request fresh tt data */
+ if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
+request_table:
+ bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
+ "Need to retrieve the correct information "
+ "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
+ "%u num_changes: %u)\n", orig_node->orig, ttvn,
+ orig_ttvn, tt_crc, orig_node->tt_crc,
+ tt_num_changes);
+ send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
+ full_table);
+ return;
+ }
+ }
+}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index d4122cb..30efd49 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -26,15 +26,16 @@ int tt_len(int changes_num);
int tt_changes_fill_buffer(struct bat_priv *bat_priv,
unsigned char *buff, int buff_len);
int tt_init(struct bat_priv *bat_priv);
-void tt_local_add(struct net_device *soft_iface, const uint8_t *addr);
+void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+ int ifindex);
void tt_local_remove(struct bat_priv *bat_priv,
const uint8_t *addr, const char *message, bool roaming);
int tt_local_seq_print_text(struct seq_file *seq, void *offset);
void tt_global_add_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
const unsigned char *tt_buff, int tt_buff_len);
-int tt_global_add(struct bat_priv *bat_priv,
- struct orig_node *orig_node, const unsigned char *addr,
- uint8_t ttvn, bool roaming);
+int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
+ const unsigned char *addr, uint8_t ttvn, bool roaming,
+ bool wifi);
int tt_global_seq_print_text(struct seq_file *seq, void *offset);
void tt_global_del_orig(struct bat_priv *bat_priv,
struct orig_node *orig_node, const char *message);
@@ -42,25 +43,23 @@ void tt_global_del(struct bat_priv *bat_priv,
struct orig_node *orig_node, const unsigned char *addr,
const char *message, bool roaming);
struct orig_node *transtable_search(struct bat_priv *bat_priv,
- const uint8_t *addr);
+ const uint8_t *src, const uint8_t *addr);
void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
const unsigned char *tt_buff, uint8_t tt_num_changes);
uint16_t tt_local_crc(struct bat_priv *bat_priv);
uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node);
void tt_free(struct bat_priv *bat_priv);
-int send_tt_request(struct bat_priv *bat_priv,
- struct orig_node *dst_orig_node, uint8_t hvn,
- uint16_t tt_crc, bool full_table);
bool send_tt_response(struct bat_priv *bat_priv,
struct tt_query_packet *tt_request);
-void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
- uint16_t tt_num_changes, uint8_t ttvn,
- struct tt_change *tt_change);
bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
void handle_tt_response(struct bat_priv *bat_priv,
struct tt_query_packet *tt_response);
void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
struct orig_node *orig_node);
void tt_commit_changes(struct bat_priv *bat_priv);
+bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
+void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
+ const unsigned char *tt_buff, uint8_t tt_num_changes,
+ uint8_t ttvn, uint16_t tt_crc);
#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 25bd1db..ab8d0fe 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -57,7 +57,7 @@ struct hard_iface {
* @batman_seqno_reset: time when the batman seqno window was reset
* @gw_flags: flags related to gateway class
* @flags: for now only VIS_SERVER flag
- * @last_real_seqno: last and best known squence number
+ * @last_real_seqno: last and best known sequence number
* @last_ttl: ttl of last received packet
* @last_bcast_seqno: last broadcast sequence number received by this host
*
@@ -146,6 +146,7 @@ struct bat_priv {
atomic_t aggregated_ogms; /* boolean */
atomic_t bonding; /* boolean */
atomic_t fragmentation; /* boolean */
+ atomic_t ap_isolation; /* boolean */
atomic_t vis_mode; /* VIS_TYPE_* */
atomic_t gw_mode; /* GW_MODE_* */
atomic_t gw_sel_class; /* uint */
@@ -156,7 +157,7 @@ struct bat_priv {
atomic_t bcast_seqno;
atomic_t bcast_queue_left;
atomic_t batman_queue_left;
- atomic_t ttvn; /* tranlation table version number */
+ atomic_t ttvn; /* translation table version number */
atomic_t tt_ogm_append_cnt;
atomic_t tt_local_changes; /* changes registered in a OGM interval */
/* The tt_poss_change flag is used to detect an ongoing roaming phase.
@@ -223,22 +224,22 @@ struct socket_packet {
struct tt_local_entry {
uint8_t addr[ETH_ALEN];
+ struct hlist_node hash_entry;
unsigned long last_seen;
uint16_t flags;
atomic_t refcount;
struct rcu_head rcu;
- struct hlist_node hash_entry;
};
struct tt_global_entry {
uint8_t addr[ETH_ALEN];
+ struct hlist_node hash_entry; /* entry in the global table */
struct orig_node *orig_node;
uint8_t ttvn;
uint16_t flags; /* only TT_GLOBAL_ROAM is used */
unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
atomic_t refcount;
struct rcu_head rcu;
- struct hlist_node hash_entry; /* entry in the global table */
};
struct tt_change_node {
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 32b125f..07d1c1d 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -299,8 +299,10 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
goto find_router;
}
- /* check for tt host - increases orig_node refcount */
- orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+ /* check for tt host - increases orig_node refcount.
+ * returns NULL in case of AP isolation */
+ orig_node = transtable_search(bat_priv, ethhdr->h_source,
+ ethhdr->h_dest);
find_router:
/**
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index 62f54b9..8fd5535 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -24,7 +24,7 @@
#include "packet.h"
-#define FRAG_TIMEOUT 10000 /* purge frag list entrys after time in ms */
+#define FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 8a1b985..f81a6b6 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -131,7 +131,7 @@ static void vis_data_insert_interface(const uint8_t *interface,
return;
}
- /* its a new address, add it to the list */
+ /* it's a new address, add it to the list */
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return;
@@ -465,7 +465,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
/* try to add it */
hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
info, &info->hash_entry);
- if (hash_added < 0) {
+ if (hash_added != 0) {
/* did not work (for some reason) */
kref_put(&info->refcount, free_info);
info = NULL;
@@ -887,10 +887,8 @@ int vis_init(struct bat_priv *bat_priv)
}
bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
- if (!bat_priv->my_vis_info) {
- pr_err("Can't initialize vis packet\n");
+ if (!bat_priv->my_vis_info)
goto err;
- }
bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) +
MAX_VIS_PACKET_SIZE +
@@ -920,7 +918,7 @@ int vis_init(struct bat_priv *bat_priv)
hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
bat_priv->my_vis_info,
&bat_priv->my_vis_info->hash_entry);
- if (hash_added < 0) {
+ if (hash_added != 0) {
pr_err("Can't add own vis packet into hash\n");
/* not in hash, need to remove it manually. */
kref_put(&bat_priv->my_vis_info->refcount, free_info);
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 117e0d1..062124c 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -349,7 +349,7 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
}
chunk = min_t(unsigned int, skb->len, size);
- if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+ if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
if (!copied)
copied = -EFAULT;
@@ -361,7 +361,33 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
- skb_pull(skb, chunk);
+ int skb_len = skb_headlen(skb);
+
+ if (chunk <= skb_len) {
+ __skb_pull(skb, chunk);
+ } else {
+ struct sk_buff *frag;
+
+ __skb_pull(skb, skb_len);
+ chunk -= skb_len;
+
+ skb_walk_frags(skb, frag) {
+ if (chunk <= frag->len) {
+ /* Pulling partial data */
+ skb->len -= chunk;
+ skb->data_len -= chunk;
+ __skb_pull(frag, chunk);
+ break;
+ } else if (frag->len) {
+ /* Pulling all frag data */
+ chunk -= frag->len;
+ skb->len -= frag->len;
+ skb->data_len -= frag->len;
+ __skb_pull(frag, frag->len);
+ }
+ }
+ }
+
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
break;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index d9edfe8..91bcd3a 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -492,7 +492,10 @@ static int bnep_session(void *arg)
/* RX */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- bnep_rx_frame(s, skb);
+ if (!skb_linearize(skb))
+ bnep_rx_frame(s, skb);
+ else
+ kfree_skb(skb);
}
if (sk->sk_state != BT_CONNECTED)
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index d4f5dff..bc40864 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -217,7 +217,7 @@ static const struct net_device_ops bnep_netdev_ops = {
.ndo_stop = bnep_net_close,
.ndo_start_xmit = bnep_net_xmit,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_multicast_list = bnep_net_set_mc_list,
+ .ndo_set_rx_mode = bnep_net_set_mc_list,
.ndo_set_mac_address = bnep_net_set_mac_addr,
.ndo_tx_timeout = bnep_net_timeout,
.ndo_change_mtu = eth_change_mtu,
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 521baa4..7d00ddf 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -302,7 +302,10 @@ static int cmtp_session(void *arg)
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- cmtp_recv_frame(session, skb);
+ if (!skb_linearize(skb))
+ cmtp_recv_frame(session, skb);
+ else
+ kfree_skb(skb);
}
cmtp_process_transmit(session);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index ea7f031..c1c597e 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -56,15 +56,15 @@ static void hci_le_connect(struct hci_conn *conn)
conn->sec_level = BT_SECURITY_LOW;
memset(&cp, 0, sizeof(cp));
- cp.scan_interval = cpu_to_le16(0x0004);
- cp.scan_window = cpu_to_le16(0x0004);
+ cp.scan_interval = cpu_to_le16(0x0060);
+ cp.scan_window = cpu_to_le16(0x0030);
bacpy(&cp.peer_addr, &conn->dst);
cp.peer_addr_type = conn->dst_type;
- cp.conn_interval_min = cpu_to_le16(0x0008);
- cp.conn_interval_max = cpu_to_le16(0x0100);
- cp.supervision_timeout = cpu_to_le16(0x0064);
- cp.min_ce_len = cpu_to_le16(0x0001);
- cp.max_ce_len = cpu_to_le16(0x0001);
+ cp.conn_interval_min = cpu_to_le16(0x0028);
+ cp.conn_interval_max = cpu_to_le16(0x0038);
+ cp.supervision_timeout = cpu_to_le16(0x002a);
+ cp.min_ce_len = cpu_to_le16(0x0000);
+ cp.max_ce_len = cpu_to_le16(0x0000);
hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
}
@@ -218,7 +218,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
cp.handle = cpu_to_le16(conn->handle);
memcpy(cp.ltk, ltk, sizeof(cp.ltk));
cp.ediv = ediv;
- memcpy(cp.rand, rand, sizeof(rand));
+ memcpy(cp.rand, rand, sizeof(cp.rand));
hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 56943ad..be84ae3 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -613,7 +613,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
if (!test_bit(HCI_RAW, &hdev->flags)) {
set_bit(HCI_INIT, &hdev->flags);
__hci_request(hdev, hci_reset_req, 0,
- msecs_to_jiffies(250));
+ msecs_to_jiffies(HCI_INIT_TIMEOUT));
clear_bit(HCI_INIT, &hdev->flags);
}
@@ -1312,59 +1312,41 @@ int hci_blacklist_clear(struct hci_dev *hdev)
int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct bdaddr_list *entry;
- int err;
if (bacmp(bdaddr, BDADDR_ANY) == 0)
return -EBADF;
- hci_dev_lock_bh(hdev);
-
- if (hci_blacklist_lookup(hdev, bdaddr)) {
- err = -EEXIST;
- goto err;
- }
+ if (hci_blacklist_lookup(hdev, bdaddr))
+ return -EEXIST;
entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
- if (!entry) {
- err = -ENOMEM;
- goto err;
- }
+ if (!entry)
+ return -ENOMEM;
bacpy(&entry->bdaddr, bdaddr);
list_add(&entry->list, &hdev->blacklist);
- err = 0;
-
-err:
- hci_dev_unlock_bh(hdev);
- return err;
+ return mgmt_device_blocked(hdev->id, bdaddr);
}
int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct bdaddr_list *entry;
- int err = 0;
-
- hci_dev_lock_bh(hdev);
if (bacmp(bdaddr, BDADDR_ANY) == 0) {
- hci_blacklist_clear(hdev);
- goto done;
+ return hci_blacklist_clear(hdev);
}
entry = hci_blacklist_lookup(hdev, bdaddr);
if (!entry) {
- err = -ENOENT;
- goto done;
+ return -ENOENT;
}
list_del(&entry->list);
kfree(entry);
-done:
- hci_dev_unlock_bh(hdev);
- return err;
+ return mgmt_device_unblocked(hdev->id, bdaddr);
}
static void hci_clear_adv_cache(unsigned long arg)
@@ -1523,11 +1505,6 @@ int hci_register_dev(struct hci_dev *hdev)
if (!hdev->workqueue)
goto nomem;
- hdev->tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(hdev->tfm))
- BT_INFO("Failed to load transform for ecb(aes): %ld",
- PTR_ERR(hdev->tfm));
-
hci_register_sysfs(hdev);
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
@@ -1576,9 +1553,6 @@ int hci_unregister_dev(struct hci_dev *hdev)
!test_bit(HCI_SETUP, &hdev->flags))
mgmt_index_removed(hdev->id);
- if (!IS_ERR(hdev->tfm))
- crypto_free_blkcipher(hdev->tfm);
-
hci_notify(hdev, HCI_DEV_UNREG);
if (hdev->rfkill) {
@@ -2074,6 +2048,9 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
min = c->sent;
conn = c;
}
+
+ if (hci_conn_num(hdev, type) == num)
+ break;
}
if (conn) {
@@ -2131,6 +2108,9 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ if (!hci_conn_num(hdev, ACL_LINK))
+ return;
+
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
@@ -2162,6 +2142,9 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ if (!hci_conn_num(hdev, SCO_LINK))
+ return;
+
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
@@ -2182,6 +2165,9 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ if (!hci_conn_num(hdev, ESCO_LINK))
+ return;
+
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
@@ -2202,6 +2188,9 @@ static inline void hci_sched_le(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ if (!hci_conn_num(hdev, LE_LINK))
+ return;
+
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* LE tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 7ef4eb4..d7d96b6 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -898,16 +898,15 @@ static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
if (!cp)
return;
- hci_dev_lock(hdev);
-
if (cp->enable == 0x01) {
del_timer(&hdev->adv_timer);
+
+ hci_dev_lock(hdev);
hci_adv_entries_clear(hdev);
+ hci_dev_unlock(hdev);
} else if (cp->enable == 0x00) {
mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
}
-
- hci_dev_unlock(hdev);
}
static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1103,9 +1102,10 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
return 0;
/* Only request authentication for SSP connections or non-SSP
- * devices with sec_level HIGH */
+ * devices with sec_level HIGH or if MITM protection is requested */
if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
- conn->pending_sec_level != BT_SECURITY_HIGH)
+ conn->pending_sec_level != BT_SECURITY_HIGH &&
+ !(conn->auth_type & 0x01))
return 0;
return 1;
@@ -1412,7 +1412,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
conn->state = BT_CONFIG;
hci_conn_hold(conn);
conn->disc_timeout = HCI_DISCONN_TIMEOUT;
- mgmt_connected(hdev->id, &ev->bdaddr);
+ mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
} else
conn->state = BT_CONNECTED;
@@ -2174,7 +2174,10 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
- if (conn && conn->state == BT_CONNECTED) {
+ if (!conn)
+ goto unlock;
+
+ if (conn->state == BT_CONNECTED) {
hci_conn_hold(conn);
conn->disc_timeout = HCI_PAIRING_TIMEOUT;
hci_conn_put(conn);
@@ -2194,6 +2197,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
}
+unlock:
hci_dev_unlock(hdev);
}
@@ -2816,7 +2820,7 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
goto unlock;
}
- mgmt_connected(hdev->id, &ev->bdaddr);
+ mgmt_connected(hdev->id, &ev->bdaddr, conn->type);
conn->sec_level = BT_SECURITY_LOW;
conn->handle = __le16_to_cpu(ev->handle);
@@ -2834,19 +2838,17 @@ unlock:
static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
struct sk_buff *skb)
{
- struct hci_ev_le_advertising_info *ev;
- u8 num_reports;
-
- num_reports = skb->data[0];
- ev = (void *) &skb->data[1];
+ u8 num_reports = skb->data[0];
+ void *ptr = &skb->data[1];
hci_dev_lock(hdev);
- hci_add_adv_entry(hdev, ev);
+ while (num_reports--) {
+ struct hci_ev_le_advertising_info *ev = ptr;
- while (--num_reports) {
- ev = (void *) (ev->data + ev->length + 1);
hci_add_adv_entry(hdev, ev);
+
+ ptr += sizeof(*ev) + ev->length + 1;
}
hci_dev_unlock(hdev);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index ff02cf5..f6afe3d 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -183,21 +183,35 @@ static int hci_sock_release(struct socket *sock)
static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
{
bdaddr_t bdaddr;
+ int err;
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
return -EFAULT;
- return hci_blacklist_add(hdev, &bdaddr);
+ hci_dev_lock_bh(hdev);
+
+ err = hci_blacklist_add(hdev, &bdaddr);
+
+ hci_dev_unlock_bh(hdev);
+
+ return err;
}
static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
{
bdaddr_t bdaddr;
+ int err;
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
return -EFAULT;
- return hci_blacklist_del(hdev, &bdaddr);
+ hci_dev_lock_bh(hdev);
+
+ err = hci_blacklist_del(hdev, &bdaddr);
+
+ hci_dev_unlock_bh(hdev);
+
+ return err;
}
/* Ioctls that require bound socket */
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index a6c3aa8..661b461 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -5,6 +5,7 @@
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/module.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -23,6 +24,8 @@ static inline char *link_typetostr(int type)
return "SCO";
case ESCO_LINK:
return "eSCO";
+ case LE_LINK:
+ return "LE";
default:
return "UNKNOWN";
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index fb68f34..075a3e9 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -716,12 +716,18 @@ static int hidp_session(void *arg)
while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
skb_orphan(skb);
- hidp_recv_ctrl_frame(session, skb);
+ if (!skb_linearize(skb))
+ hidp_recv_ctrl_frame(session, skb);
+ else
+ kfree_skb(skb);
}
while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) {
skb_orphan(skb);
- hidp_recv_intr_frame(session, skb);
+ if (!skb_linearize(skb))
+ hidp_recv_intr_frame(session, skb);
+ else
+ kfree_skb(skb);
}
hidp_process_transmit(session);
@@ -872,6 +878,9 @@ static int hidp_start(struct hid_device *hid)
struct hidp_session *session = hid->driver_data;
struct hid_report *report;
+ if (hid->quirks & HID_QUIRK_NO_INIT_REPORTS)
+ return 0;
+
list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].
report_list, list)
hidp_send_report(session, report);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b3bdb48..8cd1291 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -907,6 +907,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
if (!conn->hcon->out && conn->hcon->type == LE_LINK)
l2cap_le_conn_ready(conn);
+ if (conn->hcon->out && conn->hcon->type == LE_LINK)
+ smp_conn_security(conn, conn->hcon->pending_sec_level);
+
read_lock(&conn->chan_lock);
list_for_each_entry(chan, &conn->chan_l, list) {
@@ -986,8 +989,10 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
del_timer_sync(&conn->info_timer);
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
+ if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
del_timer(&conn->security_timer);
+ smp_chan_destroy(conn);
+ }
hcon->l2cap_data = NULL;
kfree(conn);
@@ -1240,7 +1245,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
__clear_retrans_timer(chan);
}
-void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
+static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
{
struct hci_conn *hcon = chan->conn->hcon;
u16 flags;
@@ -1256,7 +1261,7 @@ void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
hci_send_acl(hcon, skb, flags);
}
-void l2cap_streaming_send(struct l2cap_chan *chan)
+static void l2cap_streaming_send(struct l2cap_chan *chan)
{
struct sk_buff *skb;
u16 control, fcs;
@@ -1322,7 +1327,7 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
l2cap_do_send(chan, tx_skb);
}
-int l2cap_ertm_send(struct l2cap_chan *chan)
+static int l2cap_ertm_send(struct l2cap_chan *chan)
{
struct sk_buff *skb, *tx_skb;
u16 control, fcs;
@@ -1460,7 +1465,7 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
return sent;
}
-struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
@@ -1490,7 +1495,7 @@ struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr
return skb;
}
-struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
@@ -1519,7 +1524,9 @@ struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *m
return skb;
}
-struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
+static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
+ struct msghdr *msg, size_t len,
+ u16 control, u16 sdulen)
{
struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
@@ -1565,7 +1572,7 @@ struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *
return skb;
}
-int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
+static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
{
struct sk_buff *skb;
struct sk_buff_head sar_queue;
@@ -3121,102 +3128,104 @@ static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb,
return 0;
}
-static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
+static void append_skb_frag(struct sk_buff *skb,
+ struct sk_buff *new_frag, struct sk_buff **last_frag)
{
- struct sk_buff *_skb;
- int err;
+ /* skb->len reflects data in skb as well as all fragments
+ * skb->data_len reflects only data in fragments
+ */
+ if (!skb_has_frag_list(skb))
+ skb_shinfo(skb)->frag_list = new_frag;
+
+ new_frag->next = NULL;
+
+ (*last_frag)->next = new_frag;
+ *last_frag = new_frag;
+
+ skb->len += new_frag->len;
+ skb->data_len += new_frag->len;
+ skb->truesize += new_frag->truesize;
+}
+
+static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
+{
+ int err = -EINVAL;
switch (control & L2CAP_CTRL_SAR) {
case L2CAP_SDU_UNSEGMENTED:
- if (test_bit(CONN_SAR_SDU, &chan->conn_state))
- goto drop;
+ if (chan->sdu)
+ break;
- return chan->ops->recv(chan->data, skb);
+ err = chan->ops->recv(chan->data, skb);
+ break;
case L2CAP_SDU_START:
- if (test_bit(CONN_SAR_SDU, &chan->conn_state))
- goto drop;
+ if (chan->sdu)
+ break;
chan->sdu_len = get_unaligned_le16(skb->data);
+ skb_pull(skb, 2);
- if (chan->sdu_len > chan->imtu)
- goto disconnect;
-
- chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
- if (!chan->sdu)
- return -ENOMEM;
+ if (chan->sdu_len > chan->imtu) {
+ err = -EMSGSIZE;
+ break;
+ }
- /* pull sdu_len bytes only after alloc, because of Local Busy
- * condition we have to be sure that this will be executed
- * only once, i.e., when alloc does not fail */
- skb_pull(skb, 2);
+ if (skb->len >= chan->sdu_len)
+ break;
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
+ chan->sdu = skb;
+ chan->sdu_last_frag = skb;
- set_bit(CONN_SAR_SDU, &chan->conn_state);
- chan->partial_sdu_len = skb->len;
+ skb = NULL;
+ err = 0;
break;
case L2CAP_SDU_CONTINUE:
- if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
- goto disconnect;
-
if (!chan->sdu)
- goto disconnect;
+ break;
- chan->partial_sdu_len += skb->len;
- if (chan->partial_sdu_len > chan->sdu_len)
- goto drop;
+ append_skb_frag(chan->sdu, skb,
+ &chan->sdu_last_frag);
+ skb = NULL;
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
+ if (chan->sdu->len >= chan->sdu_len)
+ break;
+ err = 0;
break;
case L2CAP_SDU_END:
- if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
- goto disconnect;
-
if (!chan->sdu)
- goto disconnect;
-
- chan->partial_sdu_len += skb->len;
-
- if (chan->partial_sdu_len > chan->imtu)
- goto drop;
+ break;
- if (chan->partial_sdu_len != chan->sdu_len)
- goto drop;
+ append_skb_frag(chan->sdu, skb,
+ &chan->sdu_last_frag);
+ skb = NULL;
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
+ if (chan->sdu->len != chan->sdu_len)
+ break;
- _skb = skb_clone(chan->sdu, GFP_ATOMIC);
- if (!_skb) {
- return -ENOMEM;
- }
+ err = chan->ops->recv(chan->data, chan->sdu);
- err = chan->ops->recv(chan->data, _skb);
- if (err < 0) {
- kfree_skb(_skb);
- return err;
+ if (!err) {
+ /* Reassembly complete */
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
}
-
- clear_bit(CONN_SAR_SDU, &chan->conn_state);
-
- kfree_skb(chan->sdu);
break;
}
- kfree_skb(skb);
- return 0;
-
-drop:
- kfree_skb(chan->sdu);
- chan->sdu = NULL;
+ if (err) {
+ kfree_skb(skb);
+ kfree_skb(chan->sdu);
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+ }
-disconnect:
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- kfree_skb(skb);
- return 0;
+ return err;
}
static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
@@ -3270,99 +3279,6 @@ void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
}
}
-static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
-{
- struct sk_buff *_skb;
- int err = -EINVAL;
-
- /*
- * TODO: We have to notify the userland if some data is lost with the
- * Streaming Mode.
- */
-
- switch (control & L2CAP_CTRL_SAR) {
- case L2CAP_SDU_UNSEGMENTED:
- if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
- kfree_skb(chan->sdu);
- break;
- }
-
- err = chan->ops->recv(chan->data, skb);
- if (!err)
- return 0;
-
- break;
-
- case L2CAP_SDU_START:
- if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
- kfree_skb(chan->sdu);
- break;
- }
-
- chan->sdu_len = get_unaligned_le16(skb->data);
- skb_pull(skb, 2);
-
- if (chan->sdu_len > chan->imtu) {
- err = -EMSGSIZE;
- break;
- }
-
- chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
- if (!chan->sdu) {
- err = -ENOMEM;
- break;
- }
-
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
-
- set_bit(CONN_SAR_SDU, &chan->conn_state);
- chan->partial_sdu_len = skb->len;
- err = 0;
- break;
-
- case L2CAP_SDU_CONTINUE:
- if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
- break;
-
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
-
- chan->partial_sdu_len += skb->len;
- if (chan->partial_sdu_len > chan->sdu_len)
- kfree_skb(chan->sdu);
- else
- err = 0;
-
- break;
-
- case L2CAP_SDU_END:
- if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
- break;
-
- memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
-
- clear_bit(CONN_SAR_SDU, &chan->conn_state);
- chan->partial_sdu_len += skb->len;
-
- if (chan->partial_sdu_len > chan->imtu)
- goto drop;
-
- if (chan->partial_sdu_len == chan->sdu_len) {
- _skb = skb_clone(chan->sdu, GFP_ATOMIC);
- err = chan->ops->recv(chan->data, _skb);
- if (err < 0)
- kfree_skb(_skb);
- }
- err = 0;
-
-drop:
- kfree_skb(chan->sdu);
- break;
- }
-
- kfree_skb(skb);
- return err;
-}
-
static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
{
struct sk_buff *skb;
@@ -3377,7 +3293,7 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
skb = skb_dequeue(&chan->srej_q);
control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
- err = l2cap_ertm_reassembly_sdu(chan, skb, control);
+ err = l2cap_reassemble_sdu(chan, skb, control);
if (err < 0) {
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
@@ -3537,7 +3453,7 @@ expected:
return 0;
}
- err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control);
+ err = l2cap_reassemble_sdu(chan, skb, rx_control);
chan->buffer_seq = (chan->buffer_seq + 1) % 64;
if (err < 0) {
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
@@ -3853,12 +3769,20 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
tx_seq = __get_txseq(control);
- if (chan->expected_tx_seq == tx_seq)
- chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
- else
- chan->expected_tx_seq = (tx_seq + 1) % 64;
+ if (chan->expected_tx_seq != tx_seq) {
+ /* Frame(s) missing - must discard partial SDU */
+ kfree_skb(chan->sdu);
+ chan->sdu = NULL;
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
+
+ /* TODO: Notify userland of missing data */
+ }
+
+ chan->expected_tx_seq = (tx_seq + 1) % 64;
- l2cap_streaming_reassembly_sdu(chan, skb, control);
+ if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
goto done;
@@ -4093,6 +4017,11 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
BT_DBG("conn %p", conn);
+ if (hcon->type == LE_LINK) {
+ smp_distribute_keys(conn, 0);
+ del_timer(&conn->security_timer);
+ }
+
read_lock(&conn->chan_lock);
list_for_each_entry(chan, &conn->chan_l, list) {
@@ -4105,9 +4034,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
if (chan->scid == L2CAP_CID_LE_DATA) {
if (!status && encrypt) {
chan->sec_level = hcon->sec_level;
- del_timer(&conn->security_timer);
l2cap_chan_ready(sk);
- smp_distribute_keys(conn, 0);
}
bh_unlock_sock(sk);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e829236..5c406d3 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -27,6 +27,7 @@
/* Bluetooth L2CAP sockets. */
#include <linux/security.h>
+#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 53e109e..2c76342 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -23,6 +23,7 @@
/* Bluetooth HCI Management interface */
#include <linux/uaccess.h>
+#include <linux/module.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -147,8 +148,6 @@ static int read_index_list(struct sock *sk)
hci_del_off_timer(d);
- set_bit(HCI_MGMT, &d->flags);
-
if (test_bit(HCI_SETUP, &d->flags))
continue;
@@ -908,7 +907,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
struct hci_dev *hdev;
struct mgmt_cp_load_keys *cp;
u16 key_count, expected_len;
- int i, err;
+ int i;
cp = (void *) data;
@@ -918,9 +917,9 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
key_count = get_unaligned_le16(&cp->key_count);
expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
- if (expected_len > len) {
- BT_ERR("load_keys: expected at least %u bytes, got %u bytes",
- expected_len, len);
+ if (expected_len != len) {
+ BT_ERR("load_keys: expected %u bytes, got %u bytes",
+ len, expected_len);
return -EINVAL;
}
@@ -942,36 +941,17 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
else
clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
- len -= sizeof(*cp);
- i = 0;
-
- while (i < len) {
- struct mgmt_key_info *key = (void *) cp->keys + i;
-
- i += sizeof(*key) + key->dlen;
-
- if (key->type == HCI_LK_SMP_LTK) {
- struct key_master_id *id = (void *) key->data;
-
- if (key->dlen != sizeof(struct key_master_id))
- continue;
-
- hci_add_ltk(hdev, 0, &key->bdaddr, key->pin_len,
- id->ediv, id->rand, key->val);
-
- continue;
- }
+ for (i = 0; i < key_count; i++) {
+ struct mgmt_key_info *key = &cp->keys[i];
hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
key->pin_len);
}
- err = cmd_complete(sk, index, MGMT_OP_LOAD_KEYS, NULL, 0);
-
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
- return err;
+ return 0;
}
static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
@@ -1347,6 +1327,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
struct hci_dev *hdev;
struct mgmt_cp_pair_device *cp;
struct pending_cmd *cmd;
+ struct adv_entry *entry;
u8 sec_level, auth_type;
struct hci_conn *conn;
int err;
@@ -1364,15 +1345,20 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
hci_dev_lock_bh(hdev);
- if (cp->io_cap == 0x03) {
- sec_level = BT_SECURITY_MEDIUM;
+ sec_level = BT_SECURITY_MEDIUM;
+ if (cp->io_cap == 0x03)
auth_type = HCI_AT_DEDICATED_BONDING;
- } else {
- sec_level = BT_SECURITY_HIGH;
+ else
auth_type = HCI_AT_DEDICATED_BONDING_MITM;
- }
- conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type);
+ entry = hci_find_adv_entry(hdev, &cp->bdaddr);
+ if (entry)
+ conn = hci_connect(hdev, LE_LINK, &cp->bdaddr, sec_level,
+ auth_type);
+ else
+ conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level,
+ auth_type);
+
if (IS_ERR(conn)) {
err = PTR_ERR(conn);
goto unlock;
@@ -1391,7 +1377,10 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
goto unlock;
}
- conn->connect_cfm_cb = pairing_complete_cb;
+ /* For LE, just connecting isn't a proof that the pairing finished */
+ if (!entry)
+ conn->connect_cfm_cb = pairing_complete_cb;
+
conn->security_cfm_cb = pairing_complete_cb;
conn->disconn_cfm_cb = pairing_complete_cb;
conn->io_capability = cp->io_cap;
@@ -1689,13 +1678,12 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
struct hci_dev *hdev;
- struct mgmt_cp_block_device *cp;
+ struct pending_cmd *cmd;
+ struct mgmt_cp_block_device *cp = (void *) data;
int err;
BT_DBG("hci%u", index);
- cp = (void *) data;
-
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
EINVAL);
@@ -1705,6 +1693,14 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
ENODEV);
+ hci_dev_lock_bh(hdev);
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_BLOCK_DEVICE, index, NULL, 0);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
err = hci_blacklist_add(hdev, &cp->bdaddr);
if (err < 0)
@@ -1712,6 +1708,11 @@ static int block_device(struct sock *sk, u16 index, unsigned char *data,
else
err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
NULL, 0);
+
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
return err;
@@ -1721,13 +1722,12 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
u16 len)
{
struct hci_dev *hdev;
- struct mgmt_cp_unblock_device *cp;
+ struct pending_cmd *cmd;
+ struct mgmt_cp_unblock_device *cp = (void *) data;
int err;
BT_DBG("hci%u", index);
- cp = (void *) data;
-
if (len != sizeof(*cp))
return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
EINVAL);
@@ -1737,6 +1737,14 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
ENODEV);
+ hci_dev_lock_bh(hdev);
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_UNBLOCK_DEVICE, index, NULL, 0);
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
err = hci_blacklist_del(hdev, &cp->bdaddr);
if (err < 0)
@@ -1744,6 +1752,67 @@ static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
else
err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
NULL, 0);
+
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock_bh(hdev);
+ hci_dev_put(hdev);
+
+ return err;
+}
+
+static int set_fast_connectable(struct sock *sk, u16 index,
+ unsigned char *data, u16 len)
+{
+ struct hci_dev *hdev;
+ struct mgmt_cp_set_fast_connectable *cp = (void *) data;
+ struct hci_cp_write_page_scan_activity acp;
+ u8 type;
+ int err;
+
+ BT_DBG("hci%u", index);
+
+ if (len != sizeof(*cp))
+ return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
+ EINVAL);
+
+ hdev = hci_dev_get(index);
+ if (!hdev)
+ return cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
+ ENODEV);
+
+ hci_dev_lock(hdev);
+
+ if (cp->enable) {
+ type = PAGE_SCAN_TYPE_INTERLACED;
+ acp.interval = 0x0024; /* 22.5 msec page scan interval */
+ } else {
+ type = PAGE_SCAN_TYPE_STANDARD; /* default */
+ acp.interval = 0x0800; /* default 1.28 sec page scan */
+ }
+
+ acp.window = 0x0012; /* default 11.25 msec page scan window */
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
+ sizeof(acp), &acp);
+ if (err < 0) {
+ err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
+ -err);
+ goto done;
+ }
+
+ err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
+ if (err < 0) {
+ err = cmd_status(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
+ -err);
+ goto done;
+ }
+
+ err = cmd_complete(sk, index, MGMT_OP_SET_FAST_CONNECTABLE,
+ NULL, 0);
+done:
+ hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -1869,6 +1938,10 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
case MGMT_OP_UNBLOCK_DEVICE:
err = unblock_device(sk, index, buf + sizeof(*hdr), len);
break;
+ case MGMT_OP_SET_FAST_CONNECTABLE:
+ err = set_fast_connectable(sk, index, buf + sizeof(*hdr),
+ len);
+ break;
default:
BT_DBG("Unknown op %u", opcode);
err = cmd_status(sk, index, opcode, 0x01);
@@ -1977,35 +2050,25 @@ int mgmt_connectable(u16 index, u8 connectable)
int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
{
- struct mgmt_ev_new_key *ev;
- int err, total;
-
- total = sizeof(struct mgmt_ev_new_key) + key->dlen;
- ev = kzalloc(total, GFP_ATOMIC);
- if (!ev)
- return -ENOMEM;
-
- bacpy(&ev->key.bdaddr, &key->bdaddr);
- ev->key.type = key->type;
- memcpy(ev->key.val, key->val, 16);
- ev->key.pin_len = key->pin_len;
- ev->key.dlen = key->dlen;
- ev->store_hint = persistent;
+ struct mgmt_ev_new_key ev;
- memcpy(ev->key.data, key->data, key->dlen);
-
- err = mgmt_event(MGMT_EV_NEW_KEY, index, ev, total, NULL);
+ memset(&ev, 0, sizeof(ev));
- kfree(ev);
+ ev.store_hint = persistent;
+ bacpy(&ev.key.bdaddr, &key->bdaddr);
+ ev.key.type = key->type;
+ memcpy(ev.key.val, key->val, 16);
+ ev.key.pin_len = key->pin_len;
- return err;
+ return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
}
-int mgmt_connected(u16 index, bdaddr_t *bdaddr)
+int mgmt_connected(u16 index, bdaddr_t *bdaddr, u8 link_type)
{
struct mgmt_ev_connected ev;
bacpy(&ev.bdaddr, bdaddr);
+ ev.link_type = link_type;
return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL);
}
@@ -2260,12 +2323,14 @@ int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
memset(&ev, 0, sizeof(ev));
bacpy(&ev.bdaddr, bdaddr);
- memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
ev.rssi = rssi;
if (eir)
memcpy(ev.eir, eir, sizeof(ev.eir));
+ if (dev_class)
+ memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
+
return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
}
@@ -2286,3 +2351,29 @@ int mgmt_discovering(u16 index, u8 discovering)
return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
sizeof(discovering), NULL);
}
+
+int mgmt_device_blocked(u16 index, bdaddr_t *bdaddr)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_ev_device_blocked ev;
+
+ cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, index);
+
+ bacpy(&ev.bdaddr, bdaddr);
+
+ return mgmt_event(MGMT_EV_DEVICE_BLOCKED, index, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
+}
+
+int mgmt_device_unblocked(u16 index, bdaddr_t *bdaddr)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_ev_device_unblocked ev;
+
+ cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, index);
+
+ bacpy(&ev.bdaddr, bdaddr);
+
+ return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, index, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
+}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 5ba3f6d..4e32e18 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1802,6 +1802,11 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
continue;
}
+ if (test_bit(RFCOMM_ENC_DROP, &d->flags)) {
+ __rfcomm_dlc_close(d, ECONNREFUSED);
+ continue;
+ }
+
if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) {
rfcomm_dlc_clear_timer(d);
if (d->out) {
@@ -1853,7 +1858,10 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s)
/* Get data directly from socket receive queue without copying it. */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- rfcomm_recv_frame(s, skb);
+ if (!skb_linearize(skb))
+ rfcomm_recv_frame(s, skb);
+ else
+ kfree_skb(skb);
}
if (sk->sk_state == BT_CLOSED) {
@@ -2074,7 +2082,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) {
rfcomm_dlc_clear_timer(d);
if (status || encrypt == 0x00) {
- __rfcomm_dlc_close(d, ECONNREFUSED);
+ set_bit(RFCOMM_ENC_DROP, &d->flags);
continue;
}
}
@@ -2085,7 +2093,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
continue;
} else if (d->sec_level == BT_SECURITY_HIGH) {
- __rfcomm_dlc_close(d, ECONNREFUSED);
+ set_bit(RFCOMM_ENC_DROP, &d->flags);
continue;
}
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 391888b..759b635 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -182,18 +182,9 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
return;
hci_send_acl(conn->hcon, skb, 0);
-}
-
-static __u8 seclevel_to_authreq(__u8 level)
-{
- switch (level) {
- case BT_SECURITY_HIGH:
- /* Right now we don't support bonding */
- return SMP_AUTH_MITM;
- default:
- return SMP_AUTH_NONE;
- }
+ mod_timer(&conn->security_timer, jiffies +
+ msecs_to_jiffies(SMP_TIMEOUT));
}
static void build_pairing_cmd(struct l2cap_conn *conn,
@@ -205,7 +196,7 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
dist_keys = 0;
if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) {
- dist_keys = SMP_DIST_ENC_KEY | SMP_DIST_ID_KEY | SMP_DIST_SIGN;
+ dist_keys = SMP_DIST_ENC_KEY;
authreq |= SMP_AUTH_BONDING;
}
@@ -229,24 +220,184 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
{
+ struct smp_chan *smp = conn->smp_chan;
+
if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
(max_key_size < SMP_MIN_ENC_KEY_SIZE))
return SMP_ENC_KEY_SIZE;
- conn->smp_key_size = max_key_size;
+ smp->smp_key_size = max_key_size;
return 0;
}
+static void confirm_work(struct work_struct *work)
+{
+ struct smp_chan *smp = container_of(work, struct smp_chan, confirm);
+ struct l2cap_conn *conn = smp->conn;
+ struct crypto_blkcipher *tfm;
+ struct smp_cmd_pairing_confirm cp;
+ int ret;
+ u8 res[16], reason;
+
+ BT_DBG("conn %p", conn);
+
+ tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
+ }
+
+ smp->tfm = tfm;
+
+ if (conn->hcon->out)
+ ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0,
+ conn->src, conn->hcon->dst_type, conn->dst,
+ res);
+ else
+ ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
+ conn->hcon->dst_type, conn->dst, 0, conn->src,
+ res);
+ if (ret) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
+ }
+
+ swap128(res, cp.confirm_val);
+ smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
+
+ return;
+
+error:
+ smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
+ smp_chan_destroy(conn);
+}
+
+static void random_work(struct work_struct *work)
+{
+ struct smp_chan *smp = container_of(work, struct smp_chan, random);
+ struct l2cap_conn *conn = smp->conn;
+ struct hci_conn *hcon = conn->hcon;
+ struct crypto_blkcipher *tfm = smp->tfm;
+ u8 reason, confirm[16], res[16], key[16];
+ int ret;
+
+ if (IS_ERR_OR_NULL(tfm)) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
+ }
+
+ BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
+
+ if (hcon->out)
+ ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0,
+ conn->src, hcon->dst_type, conn->dst,
+ res);
+ else
+ ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
+ hcon->dst_type, conn->dst, 0, conn->src,
+ res);
+ if (ret) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
+ }
+
+ swap128(res, confirm);
+
+ if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
+ BT_ERR("Pairing failed (confirmation values mismatch)");
+ reason = SMP_CONFIRM_FAILED;
+ goto error;
+ }
+
+ if (hcon->out) {
+ u8 stk[16], rand[8];
+ __le16 ediv;
+
+ memset(rand, 0, sizeof(rand));
+ ediv = 0;
+
+ smp_s1(tfm, smp->tk, smp->rrnd, smp->prnd, key);
+ swap128(key, stk);
+
+ memset(stk + smp->smp_key_size, 0,
+ SMP_MAX_ENC_KEY_SIZE - smp->smp_key_size);
+
+ if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend)) {
+ reason = SMP_UNSPECIFIED;
+ goto error;
+ }
+
+ hci_le_start_enc(hcon, ediv, rand, stk);
+ hcon->enc_key_size = smp->smp_key_size;
+ } else {
+ u8 stk[16], r[16], rand[8];
+ __le16 ediv;
+
+ memset(rand, 0, sizeof(rand));
+ ediv = 0;
+
+ swap128(smp->prnd, r);
+ smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r);
+
+ smp_s1(tfm, smp->tk, smp->prnd, smp->rrnd, key);
+ swap128(key, stk);
+
+ memset(stk + smp->smp_key_size, 0,
+ SMP_MAX_ENC_KEY_SIZE - smp->smp_key_size);
+
+ hci_add_ltk(hcon->hdev, 0, conn->dst, smp->smp_key_size,
+ ediv, rand, stk);
+ }
+
+ return;
+
+error:
+ smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason);
+ smp_chan_destroy(conn);
+}
+
+static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
+{
+ struct smp_chan *smp;
+
+ smp = kzalloc(sizeof(struct smp_chan), GFP_ATOMIC);
+ if (!smp)
+ return NULL;
+
+ INIT_WORK(&smp->confirm, confirm_work);
+ INIT_WORK(&smp->random, random_work);
+
+ smp->conn = conn;
+ conn->smp_chan = smp;
+
+ hci_conn_hold(conn->hcon);
+
+ return smp;
+}
+
+void smp_chan_destroy(struct l2cap_conn *conn)
+{
+ kfree(conn->smp_chan);
+ hci_conn_put(conn->hcon);
+}
+
static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_pairing rsp, *req = (void *) skb->data;
+ struct smp_chan *smp;
u8 key_size;
+ int ret;
BT_DBG("conn %p", conn);
- conn->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&conn->preq[1], req, sizeof(*req));
+ if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend))
+ smp = smp_chan_create(conn);
+
+ smp = conn->smp_chan;
+
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], req, sizeof(*req));
skb_pull(skb, sizeof(*req));
if (req->oob_flag)
@@ -260,32 +411,33 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
return SMP_ENC_KEY_SIZE;
/* Just works */
- memset(conn->tk, 0, sizeof(conn->tk));
+ memset(smp->tk, 0, sizeof(smp->tk));
+
+ ret = smp_rand(smp->prnd);
+ if (ret)
+ return SMP_UNSPECIFIED;
- conn->prsp[0] = SMP_CMD_PAIRING_RSP;
- memcpy(&conn->prsp[1], &rsp, sizeof(rsp));
+ smp->prsp[0] = SMP_CMD_PAIRING_RSP;
+ memcpy(&smp->prsp[1], &rsp, sizeof(rsp));
smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
- mod_timer(&conn->security_timer, jiffies +
- msecs_to_jiffies(SMP_TIMEOUT));
-
return 0;
}
static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
- struct smp_cmd_pairing_confirm cp;
- struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm;
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_dev *hdev = conn->hcon->hdev;
+ u8 key_size;
int ret;
- u8 res[16], key_size;
BT_DBG("conn %p", conn);
skb_pull(skb, sizeof(*rsp));
- req = (void *) &conn->preq[1];
+ req = (void *) &smp->preq[1];
key_size = min(req->max_key_size, rsp->max_key_size);
if (check_enc_key_size(conn, key_size))
@@ -295,222 +447,154 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
return SMP_OOB_NOT_AVAIL;
/* Just works */
- memset(conn->tk, 0, sizeof(conn->tk));
-
- conn->prsp[0] = SMP_CMD_PAIRING_RSP;
- memcpy(&conn->prsp[1], rsp, sizeof(*rsp));
-
- ret = smp_rand(conn->prnd);
- if (ret)
- return SMP_UNSPECIFIED;
+ memset(smp->tk, 0, sizeof(smp->tk));
- ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp, 0,
- conn->src, conn->hcon->dst_type, conn->dst, res);
+ ret = smp_rand(smp->prnd);
if (ret)
return SMP_UNSPECIFIED;
- swap128(res, cp.confirm_val);
+ smp->prsp[0] = SMP_CMD_PAIRING_RSP;
+ memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
- smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
+ queue_work(hdev->workqueue, &smp->confirm);
return 0;
}
static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm;
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_dev *hdev = conn->hcon->hdev;
BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
- memcpy(conn->pcnf, skb->data, sizeof(conn->pcnf));
- skb_pull(skb, sizeof(conn->pcnf));
+ memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
+ skb_pull(skb, sizeof(smp->pcnf));
if (conn->hcon->out) {
u8 random[16];
- swap128(conn->prnd, random);
+ swap128(smp->prnd, random);
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
random);
} else {
- struct smp_cmd_pairing_confirm cp;
- int ret;
- u8 res[16];
-
- ret = smp_rand(conn->prnd);
- if (ret)
- return SMP_UNSPECIFIED;
-
- ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp,
- conn->hcon->dst_type, conn->dst,
- 0, conn->src, res);
- if (ret)
- return SMP_CONFIRM_FAILED;
-
- swap128(res, cp.confirm_val);
-
- smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
+ queue_work(hdev->workqueue, &smp->confirm);
}
- mod_timer(&conn->security_timer, jiffies +
- msecs_to_jiffies(SMP_TIMEOUT));
-
return 0;
}
static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct hci_conn *hcon = conn->hcon;
- struct crypto_blkcipher *tfm = hcon->hdev->tfm;
- int ret;
- u8 key[16], res[16], random[16], confirm[16];
+ struct smp_chan *smp = conn->smp_chan;
+ struct hci_dev *hdev = conn->hcon->hdev;
- swap128(skb->data, random);
- skb_pull(skb, sizeof(random));
-
- if (conn->hcon->out)
- ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp, 0,
- conn->src, conn->hcon->dst_type, conn->dst,
- res);
- else
- ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp,
- conn->hcon->dst_type, conn->dst, 0, conn->src,
- res);
- if (ret)
- return SMP_UNSPECIFIED;
-
- BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
-
- swap128(res, confirm);
-
- if (memcmp(conn->pcnf, confirm, sizeof(conn->pcnf)) != 0) {
- BT_ERR("Pairing failed (confirmation values mismatch)");
- return SMP_CONFIRM_FAILED;
- }
-
- if (conn->hcon->out) {
- u8 stk[16], rand[8];
- __le16 ediv;
-
- memset(rand, 0, sizeof(rand));
- ediv = 0;
+ BT_DBG("conn %p", conn);
- smp_s1(tfm, conn->tk, random, conn->prnd, key);
- swap128(key, stk);
+ swap128(skb->data, smp->rrnd);
+ skb_pull(skb, sizeof(smp->rrnd));
- memset(stk + conn->smp_key_size, 0,
- SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size);
+ queue_work(hdev->workqueue, &smp->random);
- hci_le_start_enc(hcon, ediv, rand, stk);
- hcon->enc_key_size = conn->smp_key_size;
- } else {
- u8 stk[16], r[16], rand[8];
- __le16 ediv;
+ return 0;
+}
- memset(rand, 0, sizeof(rand));
- ediv = 0;
+static u8 smp_ltk_encrypt(struct l2cap_conn *conn)
+{
+ struct link_key *key;
+ struct key_master_id *master;
+ struct hci_conn *hcon = conn->hcon;
- swap128(conn->prnd, r);
- smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r);
+ key = hci_find_link_key_type(hcon->hdev, conn->dst,
+ HCI_LK_SMP_LTK);
+ if (!key)
+ return 0;
- smp_s1(tfm, conn->tk, conn->prnd, random, key);
- swap128(key, stk);
+ if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND,
+ &hcon->pend))
+ return 1;
- memset(stk + conn->smp_key_size, 0,
- SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size);
+ master = (void *) key->data;
+ hci_le_start_enc(hcon, master->ediv, master->rand,
+ key->val);
+ hcon->enc_key_size = key->pin_len;
- hci_add_ltk(conn->hcon->hdev, 0, conn->dst, conn->smp_key_size,
- ediv, rand, stk);
- }
+ return 1;
- return 0;
}
-
static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_security_req *rp = (void *) skb->data;
struct smp_cmd_pairing cp;
struct hci_conn *hcon = conn->hcon;
+ struct smp_chan *smp;
BT_DBG("conn %p", conn);
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
+ hcon->pending_sec_level = BT_SECURITY_MEDIUM;
+
+ if (smp_ltk_encrypt(conn))
return 0;
+ if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend))
+ return 0;
+
+ smp = smp_chan_create(conn);
+
skb_pull(skb, sizeof(*rp));
memset(&cp, 0, sizeof(cp));
build_pairing_cmd(conn, &cp, NULL, rp->auth_req);
- conn->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&conn->preq[1], &cp, sizeof(cp));
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], &cp, sizeof(cp));
smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
- mod_timer(&conn->security_timer, jiffies +
- msecs_to_jiffies(SMP_TIMEOUT));
-
- set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
-
return 0;
}
int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
{
struct hci_conn *hcon = conn->hcon;
- __u8 authreq;
+ struct smp_chan *smp = conn->smp_chan;
BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
if (!lmp_host_le_capable(hcon->hdev))
return 1;
- if (IS_ERR(hcon->hdev->tfm))
- return 1;
-
- if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
- return 0;
-
if (sec_level == BT_SECURITY_LOW)
return 1;
if (hcon->sec_level >= sec_level)
return 1;
- authreq = seclevel_to_authreq(sec_level);
-
- if (hcon->link_mode & HCI_LM_MASTER) {
- struct smp_cmd_pairing cp;
- struct link_key *key;
+ if (hcon->link_mode & HCI_LM_MASTER)
+ if (smp_ltk_encrypt(conn))
+ goto done;
- key = hci_find_link_key_type(hcon->hdev, conn->dst,
- HCI_LK_SMP_LTK);
- if (key) {
- struct key_master_id *master = (void *) key->data;
+ if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend))
+ return 0;
- hci_le_start_enc(hcon, master->ediv, master->rand,
- key->val);
- hcon->enc_key_size = key->pin_len;
+ smp = smp_chan_create(conn);
- goto done;
- }
-
- build_pairing_cmd(conn, &cp, NULL, authreq);
- conn->preq[0] = SMP_CMD_PAIRING_REQ;
- memcpy(&conn->preq[1], &cp, sizeof(cp));
+ if (hcon->link_mode & HCI_LM_MASTER) {
+ struct smp_cmd_pairing cp;
- mod_timer(&conn->security_timer, jiffies +
- msecs_to_jiffies(SMP_TIMEOUT));
+ build_pairing_cmd(conn, &cp, NULL, SMP_AUTH_NONE);
+ smp->preq[0] = SMP_CMD_PAIRING_REQ;
+ memcpy(&smp->preq[1], &cp, sizeof(cp));
smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
} else {
struct smp_cmd_security_req cp;
- cp.auth_req = authreq;
+ cp.auth_req = SMP_AUTH_NONE;
smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
}
done:
hcon->pending_sec_level = sec_level;
- set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
return 0;
}
@@ -518,10 +602,11 @@ done:
static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_encrypt_info *rp = (void *) skb->data;
+ struct smp_chan *smp = conn->smp_chan;
skb_pull(skb, sizeof(*rp));
- memcpy(conn->tk, rp->ltk, sizeof(conn->tk));
+ memcpy(smp->tk, rp->ltk, sizeof(smp->tk));
return 0;
}
@@ -529,11 +614,12 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_master_ident *rp = (void *) skb->data;
+ struct smp_chan *smp = conn->smp_chan;
skb_pull(skb, sizeof(*rp));
- hci_add_ltk(conn->hcon->hdev, 1, conn->src, conn->smp_key_size,
- rp->ediv, rp->rand, conn->tk);
+ hci_add_ltk(conn->hcon->hdev, 1, conn->src, smp->smp_key_size,
+ rp->ediv, rp->rand, smp->tk);
smp_distribute_keys(conn, 1);
@@ -552,12 +638,6 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
goto done;
}
- if (IS_ERR(conn->hcon->hdev->tfm)) {
- err = PTR_ERR(conn->hcon->hdev->tfm);
- reason = SMP_PAIRING_NOTSUPP;
- goto done;
- }
-
skb_pull(skb, sizeof(code));
switch (code) {
@@ -621,20 +701,21 @@ done:
int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
{
struct smp_cmd_pairing *req, *rsp;
+ struct smp_chan *smp = conn->smp_chan;
__u8 *keydist;
BT_DBG("conn %p force %d", conn, force);
- if (IS_ERR(conn->hcon->hdev->tfm))
- return PTR_ERR(conn->hcon->hdev->tfm);
+ if (!test_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend))
+ return 0;
- rsp = (void *) &conn->prsp[1];
+ rsp = (void *) &smp->prsp[1];
/* The responder sends its keys first */
if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07))
return 0;
- req = (void *) &conn->preq[1];
+ req = (void *) &smp->preq[1];
if (conn->hcon->out) {
keydist = &rsp->init_key_dist;
@@ -658,7 +739,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
- hci_add_ltk(conn->hcon->hdev, 1, conn->dst, conn->smp_key_size,
+ hci_add_ltk(conn->hcon->hdev, 1, conn->dst, smp->smp_key_size,
ediv, ident.rand, enc.ltk);
ident.ediv = cpu_to_le16(ediv);
@@ -698,5 +779,11 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
*keydist &= ~SMP_DIST_SIGN;
}
+ if (conn->hcon->out || force) {
+ clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->pend);
+ del_timer(&conn->security_timer);
+ smp_chan_destroy(conn);
+ }
+
return 0;
}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ff3ed60..feb77ea 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -301,7 +301,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_start_xmit = br_dev_xmit,
.ndo_get_stats64 = br_get_stats64,
.ndo_set_mac_address = br_set_mac_address,
- .ndo_set_multicast_list = br_dev_set_multicast_list,
+ .ndo_set_rx_mode = br_dev_set_multicast_list,
.ndo_change_mtu = br_change_mtu,
.ndo_do_ioctl = br_dev_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -358,6 +358,8 @@ void br_dev_setup(struct net_device *dev)
memcpy(br->group_addr, br_group_address, ETH_ALEN);
br->stp_enabled = BR_NO_STP;
+ br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
+
br->designated_root = br->bridge_id;
br->bridge_max_age = br->max_age = 20 * HZ;
br->bridge_hello_time = br->hello_time = 2 * HZ;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 68def3b..c8e7861 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -558,19 +558,28 @@ skip:
/* Create new static fdb entry */
static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
- __u16 state)
+ __u16 state, __u16 flags)
{
struct net_bridge *br = source->br;
struct hlist_head *head = &br->hash[br_mac_hash(addr)];
struct net_bridge_fdb_entry *fdb;
fdb = fdb_find(head, addr);
- if (fdb)
- return -EEXIST;
+ if (fdb == NULL) {
+ if (!(flags & NLM_F_CREATE))
+ return -ENOENT;
- fdb = fdb_create(head, source, addr);
- if (!fdb)
- return -ENOMEM;
+ fdb = fdb_create(head, source, addr);
+ if (!fdb)
+ return -ENOMEM;
+ } else {
+ if (flags & NLM_F_EXCL)
+ return -EEXIST;
+
+ if (flags & NLM_F_REPLACE)
+ fdb->updated = fdb->used = jiffies;
+ fdb->is_local = fdb->is_static = 0;
+ }
if (state & NUD_PERMANENT)
fdb->is_local = fdb->is_static = 1;
@@ -626,7 +635,7 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
}
spin_lock_bh(&p->br->hash_lock);
- err = fdb_add_entry(p, addr, ndm->ndm_state);
+ err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags);
spin_unlock_bh(&p->br->hash_lock);
return err;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 1d420f6..f603e5b 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/netpoll.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
@@ -33,20 +34,18 @@
*/
static int port_cost(struct net_device *dev)
{
- if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
- struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
-
- if (!dev_ethtool_get_settings(dev, &ecmd)) {
- switch (ethtool_cmd_speed(&ecmd)) {
- case SPEED_10000:
- return 2;
- case SPEED_1000:
- return 4;
- case SPEED_100:
- return 19;
- case SPEED_10:
- return 100;
- }
+ struct ethtool_cmd ecmd;
+
+ if (!__ethtool_get_settings(dev, &ecmd)) {
+ switch (ethtool_cmd_speed(&ecmd)) {
+ case SPEED_10000:
+ return 2;
+ case SPEED_1000:
+ return 4;
+ case SPEED_100:
+ return 19;
+ case SPEED_10:
+ return 100;
}
}
@@ -325,7 +324,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
/* Don't allow bridging non-ethernet like devices */
if ((dev->flags & IFF_LOOPBACK) ||
- dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN)
+ dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
+ !is_valid_ether_addr(dev->dev_addr))
return -EINVAL;
/* No bridging of bridges */
@@ -353,10 +353,6 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
SYSFS_BRIDGE_PORT_ATTR);
if (err)
- goto err0;
-
- err = br_fdb_insert(br, p, dev->dev_addr);
- if (err)
goto err1;
err = br_sysfs_addif(p);
@@ -397,6 +393,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
dev_set_mtu(br->dev, br_min_mtu(br));
+ if (br_fdb_insert(br, p, dev->dev_addr))
+ netdev_err(dev, "failed insert local address bridge forwarding table\n");
+
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
@@ -406,11 +405,9 @@ err4:
err3:
sysfs_remove_link(br->ifobj, p->dev->name);
err2:
- br_fdb_delete_by_port(br, p, 1);
-err1:
kobject_put(&p->kobj);
p = NULL; /* kobject_put frees */
-err0:
+err1:
dev_set_promiscuity(dev, -1);
put_back:
dev_put(dev);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index f06ee39..5a31731 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/netfilter_bridge.h>
+#include <linux/export.h>
#include "br_private.h"
/* Bridge group multicast address 802.1d (pg 51). */
@@ -162,14 +163,37 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
p = br_port_get_rcu(skb->dev);
if (unlikely(is_link_local(dest))) {
- /* Pause frames shouldn't be passed up by driver anyway */
- if (skb->protocol == htons(ETH_P_PAUSE))
+ /*
+ * See IEEE 802.1D Table 7-10 Reserved addresses
+ *
+ * Assignment Value
+ * Bridge Group Address 01-80-C2-00-00-00
+ * (MAC Control) 802.3 01-80-C2-00-00-01
+ * (Link Aggregation) 802.3 01-80-C2-00-00-02
+ * 802.1X PAE address 01-80-C2-00-00-03
+ *
+ * 802.1AB LLDP 01-80-C2-00-00-0E
+ *
+ * Others reserved for future standardization
+ */
+ switch (dest[5]) {
+ case 0x00: /* Bridge Group Address */
+ /* If STP is turned off,
+ then must forward to keep loop detection */
+ if (p->br->stp_enabled == BR_NO_STP)
+ goto forward;
+ break;
+
+ case 0x01: /* IEEE MAC (Pause) */
goto drop;
- /* If STP is turned off, then forward */
- if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0)
- goto forward;
+ default:
+ /* Allow selective forwarding for most other protocols */
+ if (p->br->group_fwd_mask & (1u << dest[5]))
+ goto forward;
+ }
+ /* Deliver packet to local host only */
if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
NULL, br_handle_local_finish)) {
return RX_HANDLER_CONSUMED; /* consumed by filter */
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 857a021..d7d6fb0 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -29,6 +29,11 @@
#define BR_VERSION "2.3"
+/* Control of forwarding link local multicast */
+#define BR_GROUPFWD_DEFAULT 0
+/* Don't allow forwarding control protocols like STP and LLDP */
+#define BR_GROUPFWD_RESTRICTED 0x4007u
+
/* Path to usermode spanning tree program */
#define BR_STP_PROG "/sbin/bridge-stp"
@@ -193,6 +198,8 @@ struct net_bridge
unsigned long flags;
#define BR_SET_MAC_ADDR 0x00000001
+ u16 group_fwd_mask;
+
/* STP */
bridge_id designated_root;
bridge_id bridge_id;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 10eda3c..19308e3 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -12,6 +12,7 @@
*/
#include <linux/kernel.h>
+#include <linux/kmod.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 68b893e..c236c0e 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -149,6 +149,39 @@ static ssize_t store_stp_state(struct device *d,
static DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state,
store_stp_state);
+static ssize_t show_group_fwd_mask(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%#x\n", br->group_fwd_mask);
+}
+
+
+static ssize_t store_group_fwd_mask(struct device *d,
+ struct device_attribute *attr, const char *buf,
+ size_t len)
+{
+ struct net_bridge *br = to_bridge(d);
+ char *endp;
+ unsigned long val;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (endp == buf)
+ return -EINVAL;
+
+ if (val & BR_GROUPFWD_RESTRICTED)
+ return -EINVAL;
+
+ br->group_fwd_mask = val;
+
+ return len;
+}
+static DEVICE_ATTR(group_fwd_mask, S_IRUGO | S_IWUSR, show_group_fwd_mask,
+ store_group_fwd_mask);
+
static ssize_t show_priority(struct device *d, struct device_attribute *attr,
char *buf)
{
@@ -652,6 +685,7 @@ static struct attribute *bridge_attrs[] = {
&dev_attr_max_age.attr,
&dev_attr_ageing_time.attr,
&dev_attr_stp_state.attr,
+ &dev_attr_group_fwd_mask.attr,
&dev_attr_priority.attr,
&dev_attr_bridge_id.attr,
&dev_attr_root_id.attr,
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index bf2a333..5449294 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -102,16 +102,15 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
unsigned int n;
n = max(size, nlbufsiz);
- skb = alloc_skb(n, GFP_ATOMIC);
+ skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN);
if (!skb) {
- pr_debug("cannot alloc whole buffer of size %ub!\n", n);
if (n > size) {
/* try to allocate only as much as we need for
* current packet */
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
- pr_debug("cannot even allocate "
- "buffer of size %ub\n", size);
+ pr_debug("cannot even allocate buffer of size %ub\n",
+ size);
}
}
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 1bcaf36a..40d8258 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -87,14 +87,14 @@ static int __init ebtable_broute_init(void)
if (ret < 0)
return ret;
/* see br_input.c */
- rcu_assign_pointer(br_should_route_hook,
+ RCU_INIT_POINTER(br_should_route_hook,
(br_should_route_hook_t *)ebt_broute);
return 0;
}
static void __exit ebtable_broute_fini(void)
{
- rcu_assign_pointer(br_should_route_hook, NULL);
+ RCU_INIT_POINTER(br_should_route_hook, NULL);
synchronize_net();
unregister_pernet_subsys(&broute_net_ops);
}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 7f9ac07..f1fa1f6 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -16,6 +16,7 @@
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <linux/module.h>
#include <net/netns/generic.h>
#include <net/net_namespace.h>
#include <net/pkt_sched.h>
@@ -212,8 +213,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
enum cfcnfg_phy_preference pref;
enum cfcnfg_phy_type phy_type;
struct cfcnfg *cfg;
- struct caif_device_entry_list *caifdevs =
- caif_device_list(dev_net(dev));
+ struct caif_device_entry_list *caifdevs;
if (dev->type != ARPHRD_CAIF)
return 0;
@@ -222,6 +222,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
if (cfg == NULL)
return 0;
+ caifdevs = caif_device_list(dev_net(dev));
+
switch (what) {
case NETDEV_REGISTER:
caifd = caif_device_alloc(dev);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 52fe33b..00523ec 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -78,10 +78,8 @@ struct cfcnfg *cfcnfg_create(void)
/* Initiate this layer */
this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
- if (!this) {
- pr_warn("Out of memory\n");
+ if (!this)
return NULL;
- }
this->mux = cfmuxl_create();
if (!this->mux)
goto out_of_mem;
@@ -108,8 +106,6 @@ struct cfcnfg *cfcnfg_create(void)
return this;
out_of_mem:
- pr_warn("Out of memory\n");
-
synchronize_rcu();
kfree(this->mux);
@@ -448,10 +444,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
"- unknown channel type\n");
goto unlock;
}
- if (!servicel) {
- pr_warn("Out of memory\n");
+ if (!servicel)
goto unlock;
- }
layer_set_dn(servicel, cnfg->mux);
cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
layer_set_up(servicel, adapt_layer);
@@ -473,7 +467,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
{
struct cflayer *frml;
struct cflayer *phy_driver = NULL;
- struct cfcnfg_phyinfo *phyinfo;
+ struct cfcnfg_phyinfo *phyinfo = NULL;
int i;
u8 phyid;
@@ -488,25 +482,25 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
goto got_phyid;
}
pr_warn("Too many CAIF Link Layers (max 6)\n");
- goto out;
+ goto out_err;
got_phyid:
phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
+ if (!phyinfo)
+ goto out_err;
switch (phy_type) {
case CFPHYTYPE_FRAG:
phy_driver =
cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
- if (!phy_driver) {
- pr_warn("Out of memory\n");
- goto out;
- }
+ if (!phy_driver)
+ goto out_err;
break;
case CFPHYTYPE_CAIF:
phy_driver = NULL;
break;
default:
- goto out;
+ goto out_err;
}
phy_layer->id = phyid;
phyinfo->pref = pref;
@@ -520,11 +514,8 @@ got_phyid:
frml = cffrml_create(phyid, fcs);
- if (!frml) {
- pr_warn("Out of memory\n");
- kfree(phyinfo);
- goto out;
- }
+ if (!frml)
+ goto out_err;
phyinfo->frm_layer = frml;
layer_set_up(frml, cnfg->mux);
@@ -540,7 +531,12 @@ got_phyid:
}
list_add_rcu(&phyinfo->node, &cnfg->phys);
-out:
+ mutex_unlock(&cnfg->lock);
+ return;
+
+out_err:
+ kfree(phy_driver);
+ kfree(phyinfo);
mutex_unlock(&cnfg->lock);
}
EXPORT_SYMBOL(cfcnfg_add_phy_layer);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index e22671b..5cf5222 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -35,15 +35,12 @@ struct cflayer *cfctrl_create(void)
{
struct dev_info dev_info;
struct cfctrl *this =
- kmalloc(sizeof(struct cfctrl), GFP_ATOMIC);
- if (!this) {
- pr_warn("Out of memory\n");
+ kzalloc(sizeof(struct cfctrl), GFP_ATOMIC);
+ if (!this)
return NULL;
- }
caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
memset(&dev_info, 0, sizeof(dev_info));
dev_info.id = 0xff;
- memset(this, 0, sizeof(*this));
cfsrvl_init(&this->serv, 0, &dev_info, false);
atomic_set(&this->req_seq_no, 1);
atomic_set(&this->rsp_seq_no, 1);
@@ -180,10 +177,8 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
struct cfctrl *cfctrl = container_obj(layer);
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
struct cflayer *dn = cfctrl->serv.layer.dn;
- if (!pkt) {
- pr_warn("Out of memory\n");
+ if (!pkt)
return;
- }
if (!dn) {
pr_debug("not able to send enum request\n");
return;
@@ -224,10 +219,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
}
pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
- if (!pkt) {
- pr_warn("Out of memory\n");
+ if (!pkt)
return -ENOMEM;
- }
cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
cfpkt_addbdy(pkt, (param->chtype << 4) | param->linktype);
cfpkt_addbdy(pkt, (param->priority << 3) | param->phyid);
@@ -275,10 +268,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
return -EINVAL;
}
req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req) {
- pr_warn("Out of memory\n");
+ if (!req)
return -ENOMEM;
- }
req->client_layer = user_layer;
req->cmd = CFCTRL_CMD_LINK_SETUP;
req->param = *param;
@@ -312,10 +303,8 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
struct cflayer *dn = cfctrl->serv.layer.dn;
- if (!pkt) {
- pr_warn("Out of memory\n");
+ if (!pkt)
return -ENOMEM;
- }
if (!dn) {
pr_debug("not able to send link-down request\n");
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 11a2af4..65d6ef3 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -19,13 +19,10 @@ static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
{
- struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
- if (!dbg) {
- pr_warn("Out of memory\n");
+ struct cfsrvl *dbg = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!dbg)
return NULL;
- }
caif_assert(offsetof(struct cfsrvl, layer) == 0);
- memset(dbg, 0, sizeof(struct cfsrvl));
cfsrvl_init(dbg, channel_id, dev_info, false);
dbg->layer.receive = cfdbgl_receive;
dbg->layer.transmit = cfdbgl_transmit;
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 0382dec..0f5ff27 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -26,13 +26,10 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt);
struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
{
- struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
- if (!dgm) {
- pr_warn("Out of memory\n");
+ struct cfsrvl *dgm = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!dgm)
return NULL;
- }
caif_assert(offsetof(struct cfsrvl, layer) == 0);
- memset(dgm, 0, sizeof(struct cfsrvl));
cfsrvl_init(dgm, channel_id, dev_info, true);
dgm->layer.receive = cfdgml_receive;
dgm->layer.transmit = cfdgml_transmit;
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index 04204b2..f399211 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -34,11 +34,9 @@ static u32 cffrml_rcv_error;
static u32 cffrml_rcv_checsum_error;
struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
{
- struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC);
- if (!this) {
- pr_warn("Out of memory\n");
+ struct cffrml *this = kzalloc(sizeof(struct cffrml), GFP_ATOMIC);
+ if (!this)
return NULL;
- }
this->pcpu_refcnt = alloc_percpu(int);
if (this->pcpu_refcnt == NULL) {
kfree(this);
@@ -47,7 +45,6 @@ struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
caif_assert(offsetof(struct cffrml, layer) == 0);
- memset(this, 0, sizeof(struct cflayer));
this->layer.receive = cffrml_receive;
this->layer.transmit = cffrml_transmit;
this->layer.ctrlcmd = cffrml_ctrlcmd;
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index c23979e..b36f24a 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -108,7 +108,7 @@ struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
int idx = phyid % DN_CACHE_SIZE;
spin_lock_bh(&muxl->transmit_lock);
- rcu_assign_pointer(muxl->dn_cache[idx], NULL);
+ RCU_INIT_POINTER(muxl->dn_cache[idx], NULL);
dn = get_from_id(&muxl->frml_list, phyid);
if (dn == NULL)
goto out;
@@ -164,7 +164,7 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
if (up == NULL)
goto out;
- rcu_assign_pointer(muxl->up_cache[idx], NULL);
+ RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
list_del_rcu(&up->node);
out:
spin_unlock_bh(&muxl->receive_lock);
@@ -261,7 +261,7 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
idx = layer->id % UP_CACHE_SIZE;
spin_lock_bh(&muxl->receive_lock);
- rcu_assign_pointer(muxl->up_cache[idx], NULL);
+ RCU_INIT_POINTER(muxl->up_cache[idx], NULL);
list_del_rcu(&layer->node);
spin_unlock_bh(&muxl->receive_lock);
}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 75d4bfa..df08c47 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -9,6 +9,7 @@
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/hardirq.h>
+#include <linux/export.h>
#include <net/caif/cfpkt.h>
#define PKT_PREFIX 48
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 0deabb4..81660f8 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -46,13 +46,10 @@ struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
int mtu_size)
{
int tmp;
- struct cfrfml *this =
- kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
+ struct cfrfml *this = kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
- if (!this) {
- pr_warn("Out of memory\n");
+ if (!this)
return NULL;
- }
cfsrvl_init(&this->serv, channel_id, dev_info, false);
this->serv.release = cfrfml_release;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 2715c84..797c8d1 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -33,13 +33,10 @@ static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
struct cflayer *cfserl_create(int type, int instance, bool use_stx)
{
- struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
- if (!this) {
- pr_warn("Out of memory\n");
+ struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);
+ if (!this)
return NULL;
- }
caif_assert(offsetof(struct cfserl, layer) == 0);
- memset(this, 0, sizeof(struct cfserl));
this->layer.receive = cfserl_receive;
this->layer.transmit = cfserl_transmit;
this->layer.ctrlcmd = cfserl_ctrlcmd;
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 535a1e7..b99f5b2 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -108,10 +108,8 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
struct caif_payload_info *info;
u8 flow_on = SRVL_FLOW_ON;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
- if (!pkt) {
- pr_warn("Out of memory\n");
+ if (!pkt)
return -ENOMEM;
- }
if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
pr_err("Packet is erroneous!\n");
@@ -130,10 +128,8 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
struct caif_payload_info *info;
u8 flow_off = SRVL_FLOW_OFF;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
- if (!pkt) {
- pr_warn("Out of memory\n");
+ if (!pkt)
return -ENOMEM;
- }
if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
pr_err("Packet is erroneous!\n");
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 98e027d..53e49f3 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -26,13 +26,10 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
{
- struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
- if (!util) {
- pr_warn("Out of memory\n");
+ struct cfsrvl *util = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!util)
return NULL;
- }
caif_assert(offsetof(struct cfsrvl, layer) == 0);
- memset(util, 0, sizeof(struct cfsrvl));
cfsrvl_init(util, channel_id, dev_info, true);
util->layer.receive = cfutill_receive;
util->layer.transmit = cfutill_transmit;
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 3ec83fb..910ab06 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -25,13 +25,10 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt);
struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
{
- struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
- if (!vei) {
- pr_warn("Out of memory\n");
+ struct cfsrvl *vei = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!vei)
return NULL;
- }
caif_assert(offsetof(struct cfsrvl, layer) == 0);
- memset(vei, 0, sizeof(struct cfsrvl));
cfsrvl_init(vei, channel_id, dev_info, true);
vei->layer.receive = cfvei_receive;
vei->layer.transmit = cfvei_transmit;
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index b2f5989..e3f37db 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -21,14 +21,11 @@ static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt);
struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
{
- struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
- if (!vid) {
- pr_warn("Out of memory\n");
+ struct cfsrvl *vid = kzalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!vid)
return NULL;
- }
caif_assert(offsetof(struct cfsrvl, layer) == 0);
- memset(vid, 0, sizeof(struct cfsrvl));
cfsrvl_init(vid, channel_id, dev_info, false);
vid->layer.receive = cfvidl_receive;
vid->layer.transmit = cfvidl_transmit;
diff --git a/net/can/Kconfig b/net/can/Kconfig
index 89395b2..0320069 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -40,5 +40,16 @@ config CAN_BCM
CAN messages are used on the bus (e.g. in automotive environments).
To use the Broadcast Manager, use AF_CAN with protocol CAN_BCM.
+config CAN_GW
+ tristate "CAN Gateway/Router (with netlink configuration)"
+ depends on CAN
+ default N
+ ---help---
+ The CAN Gateway/Router is used to route (and modify) CAN frames.
+ It is based on the PF_CAN core infrastructure for msg filtering and
+ msg sending and can optionally modify routed CAN frames on the fly.
+ CAN frames can be routed between CAN network interfaces (one hop).
+ They can be modified with AND/OR/XOR/SET operations as configured
+ by the netlink configuration interface known e.g. from iptables.
source "drivers/net/can/Kconfig"
diff --git a/net/can/Makefile b/net/can/Makefile
index 2d3894b3..cef49eb 100644
--- a/net/can/Makefile
+++ b/net/can/Makefile
@@ -10,3 +10,6 @@ can-raw-y := raw.o
obj-$(CONFIG_CAN_BCM) += can-bcm.o
can-bcm-y := bcm.o
+
+obj-$(CONFIG_CAN_GW) += can-gw.o
+can-gw-y := gw.o
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 9b0c32a..0ce2ad0 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -38,8 +38,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#include <linux/module.h>
@@ -719,7 +717,7 @@ int can_proto_register(const struct can_proto *cp)
proto);
err = -EBUSY;
} else
- rcu_assign_pointer(proto_tab[proto], cp);
+ RCU_INIT_POINTER(proto_tab[proto], cp);
mutex_unlock(&proto_tab_lock);
@@ -740,7 +738,7 @@ void can_proto_unregister(const struct can_proto *cp)
mutex_lock(&proto_tab_lock);
BUG_ON(proto_tab[proto] != cp);
- rcu_assign_pointer(proto_tab[proto], NULL);
+ RCU_INIT_POINTER(proto_tab[proto], NULL);
mutex_unlock(&proto_tab_lock);
synchronize_rcu();
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 34253b8..fd882db 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -35,8 +35,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#ifndef AF_CAN_H
diff --git a/net/can/bcm.c b/net/can/bcm.c
index c84963d..151b773 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -37,8 +37,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#include <linux/module.h>
diff --git a/net/can/gw.c b/net/can/gw.c
new file mode 100644
index 0000000..3d79b12
--- /dev/null
+++ b/net/can/gw.c
@@ -0,0 +1,957 @@
+/*
+ * gw.c - CAN frame Gateway/Router/Bridge with netlink interface
+ *
+ * Copyright (c) 2011 Volkswagen Group Electronic Research
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/skbuff.h>
+#include <linux/can.h>
+#include <linux/can/core.h>
+#include <linux/can/gw.h>
+#include <net/rtnetlink.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+#define CAN_GW_VERSION "20101209"
+static __initdata const char banner[] =
+ KERN_INFO "can: netlink gateway (rev " CAN_GW_VERSION ")\n";
+
+MODULE_DESCRIPTION("PF_CAN netlink gateway");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
+MODULE_ALIAS("can-gw");
+
+HLIST_HEAD(cgw_list);
+static struct notifier_block notifier;
+
+static struct kmem_cache *cgw_cache __read_mostly;
+
+/* structure that contains the (on-the-fly) CAN frame modifications */
+struct cf_mod {
+ struct {
+ struct can_frame and;
+ struct can_frame or;
+ struct can_frame xor;
+ struct can_frame set;
+ } modframe;
+ struct {
+ u8 and;
+ u8 or;
+ u8 xor;
+ u8 set;
+ } modtype;
+ void (*modfunc[MAX_MODFUNCTIONS])(struct can_frame *cf,
+ struct cf_mod *mod);
+
+ /* CAN frame checksum calculation after CAN frame modifications */
+ struct {
+ struct cgw_csum_xor xor;
+ struct cgw_csum_crc8 crc8;
+ } csum;
+ struct {
+ void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
+ void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
+ } csumfunc;
+};
+
+
+/*
+ * So far we just support CAN -> CAN routing and frame modifications.
+ *
+ * The internal can_can_gw structure contains data and attributes for
+ * a CAN -> CAN gateway job.
+ */
+struct can_can_gw {
+ struct can_filter filter;
+ int src_idx;
+ int dst_idx;
+};
+
+/* list entry for CAN gateways jobs */
+struct cgw_job {
+ struct hlist_node list;
+ struct rcu_head rcu;
+ u32 handled_frames;
+ u32 dropped_frames;
+ struct cf_mod mod;
+ union {
+ /* CAN frame data source */
+ struct net_device *dev;
+ } src;
+ union {
+ /* CAN frame data destination */
+ struct net_device *dev;
+ } dst;
+ union {
+ struct can_can_gw ccgw;
+ /* tbc */
+ };
+ u8 gwtype;
+ u16 flags;
+};
+
+/* modification functions that are invoked in the hot path in can_can_gw_rcv */
+
+#define MODFUNC(func, op) static void func(struct can_frame *cf, \
+ struct cf_mod *mod) { op ; }
+
+MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id)
+MODFUNC(mod_and_dlc, cf->can_dlc &= mod->modframe.and.can_dlc)
+MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data)
+MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id)
+MODFUNC(mod_or_dlc, cf->can_dlc |= mod->modframe.or.can_dlc)
+MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data)
+MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id)
+MODFUNC(mod_xor_dlc, cf->can_dlc ^= mod->modframe.xor.can_dlc)
+MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data)
+MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id)
+MODFUNC(mod_set_dlc, cf->can_dlc = mod->modframe.set.can_dlc)
+MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data)
+
+static inline void canframecpy(struct can_frame *dst, struct can_frame *src)
+{
+ /*
+ * Copy the struct members separately to ensure that no uninitialized
+ * data are copied in the 3 bytes hole of the struct. This is needed
+ * to make easy compares of the data in the struct cf_mod.
+ */
+
+ dst->can_id = src->can_id;
+ dst->can_dlc = src->can_dlc;
+ *(u64 *)dst->data = *(u64 *)src->data;
+}
+
+static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re)
+{
+ /*
+ * absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0]
+ * relative to received dlc -1 .. -8 :
+ * e.g. for received dlc = 8
+ * -1 => index = 7 (data[7])
+ * -3 => index = 5 (data[5])
+ * -8 => index = 0 (data[0])
+ */
+
+ if (fr > -9 && fr < 8 &&
+ to > -9 && to < 8 &&
+ re > -9 && re < 8)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static inline int calc_idx(int idx, int rx_dlc)
+{
+ if (idx < 0)
+ return rx_dlc + idx;
+ else
+ return idx;
+}
+
+static void cgw_csum_xor_rel(struct can_frame *cf, struct cgw_csum_xor *xor)
+{
+ int from = calc_idx(xor->from_idx, cf->can_dlc);
+ int to = calc_idx(xor->to_idx, cf->can_dlc);
+ int res = calc_idx(xor->result_idx, cf->can_dlc);
+ u8 val = xor->init_xor_val;
+ int i;
+
+ if (from < 0 || to < 0 || res < 0)
+ return;
+
+ if (from <= to) {
+ for (i = from; i <= to; i++)
+ val ^= cf->data[i];
+ } else {
+ for (i = from; i >= to; i--)
+ val ^= cf->data[i];
+ }
+
+ cf->data[res] = val;
+}
+
+static void cgw_csum_xor_pos(struct can_frame *cf, struct cgw_csum_xor *xor)
+{
+ u8 val = xor->init_xor_val;
+ int i;
+
+ for (i = xor->from_idx; i <= xor->to_idx; i++)
+ val ^= cf->data[i];
+
+ cf->data[xor->result_idx] = val;
+}
+
+static void cgw_csum_xor_neg(struct can_frame *cf, struct cgw_csum_xor *xor)
+{
+ u8 val = xor->init_xor_val;
+ int i;
+
+ for (i = xor->from_idx; i >= xor->to_idx; i--)
+ val ^= cf->data[i];
+
+ cf->data[xor->result_idx] = val;
+}
+
+static void cgw_csum_crc8_rel(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
+{
+ int from = calc_idx(crc8->from_idx, cf->can_dlc);
+ int to = calc_idx(crc8->to_idx, cf->can_dlc);
+ int res = calc_idx(crc8->result_idx, cf->can_dlc);
+ u8 crc = crc8->init_crc_val;
+ int i;
+
+ if (from < 0 || to < 0 || res < 0)
+ return;
+
+ if (from <= to) {
+ for (i = crc8->from_idx; i <= crc8->to_idx; i++)
+ crc = crc8->crctab[crc^cf->data[i]];
+ } else {
+ for (i = crc8->from_idx; i >= crc8->to_idx; i--)
+ crc = crc8->crctab[crc^cf->data[i]];
+ }
+
+ switch (crc8->profile) {
+
+ case CGW_CRC8PRF_1U8:
+ crc = crc8->crctab[crc^crc8->profile_data[0]];
+ break;
+
+ case CGW_CRC8PRF_16U8:
+ crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
+ break;
+
+ case CGW_CRC8PRF_SFFID_XOR:
+ crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
+ (cf->can_id >> 8 & 0xFF)];
+ break;
+
+ }
+
+ cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
+}
+
+static void cgw_csum_crc8_pos(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
+{
+ u8 crc = crc8->init_crc_val;
+ int i;
+
+ for (i = crc8->from_idx; i <= crc8->to_idx; i++)
+ crc = crc8->crctab[crc^cf->data[i]];
+
+ switch (crc8->profile) {
+
+ case CGW_CRC8PRF_1U8:
+ crc = crc8->crctab[crc^crc8->profile_data[0]];
+ break;
+
+ case CGW_CRC8PRF_16U8:
+ crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
+ break;
+
+ case CGW_CRC8PRF_SFFID_XOR:
+ crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
+ (cf->can_id >> 8 & 0xFF)];
+ break;
+ }
+
+ cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
+}
+
+static void cgw_csum_crc8_neg(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
+{
+ u8 crc = crc8->init_crc_val;
+ int i;
+
+ for (i = crc8->from_idx; i >= crc8->to_idx; i--)
+ crc = crc8->crctab[crc^cf->data[i]];
+
+ switch (crc8->profile) {
+
+ case CGW_CRC8PRF_1U8:
+ crc = crc8->crctab[crc^crc8->profile_data[0]];
+ break;
+
+ case CGW_CRC8PRF_16U8:
+ crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
+ break;
+
+ case CGW_CRC8PRF_SFFID_XOR:
+ crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
+ (cf->can_id >> 8 & 0xFF)];
+ break;
+ }
+
+ cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
+}
+
+/* the receive & process & send function */
+static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+{
+ struct cgw_job *gwj = (struct cgw_job *)data;
+ struct can_frame *cf;
+ struct sk_buff *nskb;
+ int modidx = 0;
+
+ /* do not handle already routed frames - see comment below */
+ if (skb_mac_header_was_set(skb))
+ return;
+
+ if (!(gwj->dst.dev->flags & IFF_UP)) {
+ gwj->dropped_frames++;
+ return;
+ }
+
+ /*
+ * clone the given skb, which has not been done in can_rcv()
+ *
+ * When there is at least one modification function activated,
+ * we need to copy the skb as we want to modify skb->data.
+ */
+ if (gwj->mod.modfunc[0])
+ nskb = skb_copy(skb, GFP_ATOMIC);
+ else
+ nskb = skb_clone(skb, GFP_ATOMIC);
+
+ if (!nskb) {
+ gwj->dropped_frames++;
+ return;
+ }
+
+ /*
+ * Mark routed frames by setting some mac header length which is
+ * not relevant for the CAN frames located in the skb->data section.
+ *
+ * As dev->header_ops is not set in CAN netdevices no one is ever
+ * accessing the various header offsets in the CAN skbuffs anyway.
+ * E.g. using the packet socket to read CAN frames is still working.
+ */
+ skb_set_mac_header(nskb, 8);
+ nskb->dev = gwj->dst.dev;
+
+ /* pointer to modifiable CAN frame */
+ cf = (struct can_frame *)nskb->data;
+
+ /* perform preprocessed modification functions if there are any */
+ while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
+ (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
+
+ /* check for checksum updates when the CAN frame has been modified */
+ if (modidx) {
+ if (gwj->mod.csumfunc.crc8)
+ (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
+
+ if (gwj->mod.csumfunc.xor)
+ (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
+ }
+
+ /* clear the skb timestamp if not configured the other way */
+ if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP))
+ nskb->tstamp.tv64 = 0;
+
+ /* send to netdevice */
+ if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
+ gwj->dropped_frames++;
+ else
+ gwj->handled_frames++;
+}
+
+static inline int cgw_register_filter(struct cgw_job *gwj)
+{
+ return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
+ gwj->ccgw.filter.can_mask, can_can_gw_rcv,
+ gwj, "gw");
+}
+
+static inline void cgw_unregister_filter(struct cgw_job *gwj)
+{
+ can_rx_unregister(gwj->src.dev, gwj->ccgw.filter.can_id,
+ gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj);
+}
+
+static int cgw_notifier(struct notifier_block *nb,
+ unsigned long msg, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+
+ if (!net_eq(dev_net(dev), &init_net))
+ return NOTIFY_DONE;
+ if (dev->type != ARPHRD_CAN)
+ return NOTIFY_DONE;
+
+ if (msg == NETDEV_UNREGISTER) {
+
+ struct cgw_job *gwj = NULL;
+ struct hlist_node *n, *nx;
+
+ ASSERT_RTNL();
+
+ hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
+
+ if (gwj->src.dev == dev || gwj->dst.dev == dev) {
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(gwj);
+ kfree(gwj);
+ }
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
+{
+ struct cgw_frame_mod mb;
+ struct rtcanmsg *rtcan;
+ struct nlmsghdr *nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*rtcan), 0);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ rtcan = nlmsg_data(nlh);
+ rtcan->can_family = AF_CAN;
+ rtcan->gwtype = gwj->gwtype;
+ rtcan->flags = gwj->flags;
+
+ /* add statistics if available */
+
+ if (gwj->handled_frames) {
+ if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
+ }
+
+ if (gwj->dropped_frames) {
+ if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
+ }
+
+ /* check non default settings of attributes */
+
+ if (gwj->mod.modtype.and) {
+ memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
+ mb.modtype = gwj->mod.modtype.and;
+ if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
+ }
+
+ if (gwj->mod.modtype.or) {
+ memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
+ mb.modtype = gwj->mod.modtype.or;
+ if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
+ }
+
+ if (gwj->mod.modtype.xor) {
+ memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
+ mb.modtype = gwj->mod.modtype.xor;
+ if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
+ }
+
+ if (gwj->mod.modtype.set) {
+ memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
+ mb.modtype = gwj->mod.modtype.set;
+ if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
+ }
+
+ if (gwj->mod.csumfunc.crc8) {
+ if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
+ &gwj->mod.csum.crc8) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + \
+ NLA_ALIGN(CGW_CS_CRC8_LEN);
+ }
+
+ if (gwj->mod.csumfunc.xor) {
+ if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
+ &gwj->mod.csum.xor) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + \
+ NLA_ALIGN(CGW_CS_XOR_LEN);
+ }
+
+ if (gwj->gwtype == CGW_TYPE_CAN_CAN) {
+
+ if (gwj->ccgw.filter.can_id || gwj->ccgw.filter.can_mask) {
+ if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
+ &gwj->ccgw.filter) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN +
+ NLA_ALIGN(sizeof(struct can_filter));
+ }
+
+ if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
+
+ if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
+ goto cancel;
+ else
+ nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
+ }
+
+ return skb->len;
+
+cancel:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+/* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */
+static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct cgw_job *gwj = NULL;
+ struct hlist_node *n;
+ int idx = 0;
+ int s_idx = cb->args[0];
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(gwj, n, &cgw_list, list) {
+ if (idx < s_idx)
+ goto cont;
+
+ if (cgw_put_job(skb, gwj) < 0)
+ break;
+cont:
+ idx++;
+ }
+ rcu_read_unlock();
+
+ cb->args[0] = idx;
+
+ return skb->len;
+}
+
+/* check for common and gwtype specific attributes */
+static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
+ u8 gwtype, void *gwtypeattr)
+{
+ struct nlattr *tb[CGW_MAX+1];
+ struct cgw_frame_mod mb;
+ int modidx = 0;
+ int err = 0;
+
+ /* initialize modification & checksum data space */
+ memset(mod, 0, sizeof(*mod));
+
+ err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, NULL);
+ if (err < 0)
+ return err;
+
+ /* check for AND/OR/XOR/SET modifications */
+
+ if (tb[CGW_MOD_AND] &&
+ nla_len(tb[CGW_MOD_AND]) == CGW_MODATTR_LEN) {
+ nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
+
+ canframecpy(&mod->modframe.and, &mb.cf);
+ mod->modtype.and = mb.modtype;
+
+ if (mb.modtype & CGW_MOD_ID)
+ mod->modfunc[modidx++] = mod_and_id;
+
+ if (mb.modtype & CGW_MOD_DLC)
+ mod->modfunc[modidx++] = mod_and_dlc;
+
+ if (mb.modtype & CGW_MOD_DATA)
+ mod->modfunc[modidx++] = mod_and_data;
+ }
+
+ if (tb[CGW_MOD_OR] &&
+ nla_len(tb[CGW_MOD_OR]) == CGW_MODATTR_LEN) {
+ nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
+
+ canframecpy(&mod->modframe.or, &mb.cf);
+ mod->modtype.or = mb.modtype;
+
+ if (mb.modtype & CGW_MOD_ID)
+ mod->modfunc[modidx++] = mod_or_id;
+
+ if (mb.modtype & CGW_MOD_DLC)
+ mod->modfunc[modidx++] = mod_or_dlc;
+
+ if (mb.modtype & CGW_MOD_DATA)
+ mod->modfunc[modidx++] = mod_or_data;
+ }
+
+ if (tb[CGW_MOD_XOR] &&
+ nla_len(tb[CGW_MOD_XOR]) == CGW_MODATTR_LEN) {
+ nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
+
+ canframecpy(&mod->modframe.xor, &mb.cf);
+ mod->modtype.xor = mb.modtype;
+
+ if (mb.modtype & CGW_MOD_ID)
+ mod->modfunc[modidx++] = mod_xor_id;
+
+ if (mb.modtype & CGW_MOD_DLC)
+ mod->modfunc[modidx++] = mod_xor_dlc;
+
+ if (mb.modtype & CGW_MOD_DATA)
+ mod->modfunc[modidx++] = mod_xor_data;
+ }
+
+ if (tb[CGW_MOD_SET] &&
+ nla_len(tb[CGW_MOD_SET]) == CGW_MODATTR_LEN) {
+ nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
+
+ canframecpy(&mod->modframe.set, &mb.cf);
+ mod->modtype.set = mb.modtype;
+
+ if (mb.modtype & CGW_MOD_ID)
+ mod->modfunc[modidx++] = mod_set_id;
+
+ if (mb.modtype & CGW_MOD_DLC)
+ mod->modfunc[modidx++] = mod_set_dlc;
+
+ if (mb.modtype & CGW_MOD_DATA)
+ mod->modfunc[modidx++] = mod_set_data;
+ }
+
+ /* check for checksum operations after CAN frame modifications */
+ if (modidx) {
+
+ if (tb[CGW_CS_CRC8] &&
+ nla_len(tb[CGW_CS_CRC8]) == CGW_CS_CRC8_LEN) {
+
+ struct cgw_csum_crc8 *c = (struct cgw_csum_crc8 *)\
+ nla_data(tb[CGW_CS_CRC8]);
+
+ err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
+ c->result_idx);
+ if (err)
+ return err;
+
+ nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8],
+ CGW_CS_CRC8_LEN);
+
+ /*
+ * select dedicated processing function to reduce
+ * runtime operations in receive hot path.
+ */
+ if (c->from_idx < 0 || c->to_idx < 0 ||
+ c->result_idx < 0)
+ mod->csumfunc.crc8 = cgw_csum_crc8_rel;
+ else if (c->from_idx <= c->to_idx)
+ mod->csumfunc.crc8 = cgw_csum_crc8_pos;
+ else
+ mod->csumfunc.crc8 = cgw_csum_crc8_neg;
+ }
+
+ if (tb[CGW_CS_XOR] &&
+ nla_len(tb[CGW_CS_XOR]) == CGW_CS_XOR_LEN) {
+
+ struct cgw_csum_xor *c = (struct cgw_csum_xor *)\
+ nla_data(tb[CGW_CS_XOR]);
+
+ err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
+ c->result_idx);
+ if (err)
+ return err;
+
+ nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR],
+ CGW_CS_XOR_LEN);
+
+ /*
+ * select dedicated processing function to reduce
+ * runtime operations in receive hot path.
+ */
+ if (c->from_idx < 0 || c->to_idx < 0 ||
+ c->result_idx < 0)
+ mod->csumfunc.xor = cgw_csum_xor_rel;
+ else if (c->from_idx <= c->to_idx)
+ mod->csumfunc.xor = cgw_csum_xor_pos;
+ else
+ mod->csumfunc.xor = cgw_csum_xor_neg;
+ }
+ }
+
+ if (gwtype == CGW_TYPE_CAN_CAN) {
+
+ /* check CGW_TYPE_CAN_CAN specific attributes */
+
+ struct can_can_gw *ccgw = (struct can_can_gw *)gwtypeattr;
+ memset(ccgw, 0, sizeof(*ccgw));
+
+ /* check for can_filter in attributes */
+ if (tb[CGW_FILTER] &&
+ nla_len(tb[CGW_FILTER]) == sizeof(struct can_filter))
+ nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
+ sizeof(struct can_filter));
+
+ err = -ENODEV;
+
+ /* specifying two interfaces is mandatory */
+ if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
+ return err;
+
+ if (nla_len(tb[CGW_SRC_IF]) == sizeof(u32))
+ nla_memcpy(&ccgw->src_idx, tb[CGW_SRC_IF],
+ sizeof(u32));
+
+ if (nla_len(tb[CGW_DST_IF]) == sizeof(u32))
+ nla_memcpy(&ccgw->dst_idx, tb[CGW_DST_IF],
+ sizeof(u32));
+
+ /* both indices set to 0 for flushing all routing entries */
+ if (!ccgw->src_idx && !ccgw->dst_idx)
+ return 0;
+
+ /* only one index set to 0 is an error */
+ if (!ccgw->src_idx || !ccgw->dst_idx)
+ return err;
+ }
+
+ /* add the checks for other gwtypes here */
+
+ return 0;
+}
+
+static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+ void *arg)
+{
+ struct rtcanmsg *r;
+ struct cgw_job *gwj;
+ int err = 0;
+
+ if (nlmsg_len(nlh) < sizeof(*r))
+ return -EINVAL;
+
+ r = nlmsg_data(nlh);
+ if (r->can_family != AF_CAN)
+ return -EPFNOSUPPORT;
+
+ /* so far we only support CAN -> CAN routings */
+ if (r->gwtype != CGW_TYPE_CAN_CAN)
+ return -EINVAL;
+
+ gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
+ if (!gwj)
+ return -ENOMEM;
+
+ gwj->handled_frames = 0;
+ gwj->dropped_frames = 0;
+ gwj->flags = r->flags;
+ gwj->gwtype = r->gwtype;
+
+ err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw);
+ if (err < 0)
+ goto out;
+
+ err = -ENODEV;
+
+ /* ifindex == 0 is not allowed for job creation */
+ if (!gwj->ccgw.src_idx || !gwj->ccgw.dst_idx)
+ goto out;
+
+ gwj->src.dev = dev_get_by_index(&init_net, gwj->ccgw.src_idx);
+
+ if (!gwj->src.dev)
+ goto out;
+
+ /* check for CAN netdev not using header_ops - see gw_rcv() */
+ if (gwj->src.dev->type != ARPHRD_CAN || gwj->src.dev->header_ops)
+ goto put_src_out;
+
+ gwj->dst.dev = dev_get_by_index(&init_net, gwj->ccgw.dst_idx);
+
+ if (!gwj->dst.dev)
+ goto put_src_out;
+
+ /* check for CAN netdev not using header_ops - see gw_rcv() */
+ if (gwj->dst.dev->type != ARPHRD_CAN || gwj->dst.dev->header_ops)
+ goto put_src_dst_out;
+
+ ASSERT_RTNL();
+
+ err = cgw_register_filter(gwj);
+ if (!err)
+ hlist_add_head_rcu(&gwj->list, &cgw_list);
+
+put_src_dst_out:
+ dev_put(gwj->dst.dev);
+put_src_out:
+ dev_put(gwj->src.dev);
+out:
+ if (err)
+ kmem_cache_free(cgw_cache, gwj);
+
+ return err;
+}
+
+static void cgw_remove_all_jobs(void)
+{
+ struct cgw_job *gwj = NULL;
+ struct hlist_node *n, *nx;
+
+ ASSERT_RTNL();
+
+ hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(gwj);
+ kfree(gwj);
+ }
+}
+
+static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct cgw_job *gwj = NULL;
+ struct hlist_node *n, *nx;
+ struct rtcanmsg *r;
+ struct cf_mod mod;
+ struct can_can_gw ccgw;
+ int err = 0;
+
+ if (nlmsg_len(nlh) < sizeof(*r))
+ return -EINVAL;
+
+ r = nlmsg_data(nlh);
+ if (r->can_family != AF_CAN)
+ return -EPFNOSUPPORT;
+
+ /* so far we only support CAN -> CAN routings */
+ if (r->gwtype != CGW_TYPE_CAN_CAN)
+ return -EINVAL;
+
+ err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw);
+ if (err < 0)
+ return err;
+
+ /* two interface indices both set to 0 => remove all entries */
+ if (!ccgw.src_idx && !ccgw.dst_idx) {
+ cgw_remove_all_jobs();
+ return 0;
+ }
+
+ err = -EINVAL;
+
+ ASSERT_RTNL();
+
+ /* remove only the first matching entry */
+ hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
+
+ if (gwj->flags != r->flags)
+ continue;
+
+ if (memcmp(&gwj->mod, &mod, sizeof(mod)))
+ continue;
+
+ /* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
+ if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
+ continue;
+
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(gwj);
+ kfree(gwj);
+ err = 0;
+ break;
+ }
+
+ return err;
+}
+
+static __init int cgw_module_init(void)
+{
+ printk(banner);
+
+ cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
+ 0, 0, NULL);
+
+ if (!cgw_cache)
+ return -ENOMEM;
+
+ /* set notifier */
+ notifier.notifier_call = cgw_notifier;
+ register_netdevice_notifier(&notifier);
+
+ if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
+ unregister_netdevice_notifier(&notifier);
+ kmem_cache_destroy(cgw_cache);
+ return -ENOBUFS;
+ }
+
+ /* Only the first call to __rtnl_register can fail */
+ __rtnl_register(PF_CAN, RTM_NEWROUTE, cgw_create_job, NULL, NULL);
+ __rtnl_register(PF_CAN, RTM_DELROUTE, cgw_remove_job, NULL, NULL);
+
+ return 0;
+}
+
+static __exit void cgw_module_exit(void)
+{
+ rtnl_unregister_all(PF_CAN);
+
+ unregister_netdevice_notifier(&notifier);
+
+ rtnl_lock();
+ cgw_remove_all_jobs();
+ rtnl_unlock();
+
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
+
+ kmem_cache_destroy(cgw_cache);
+}
+
+module_init(cgw_module_init);
+module_exit(cgw_module_exit);
diff --git a/net/can/proc.c b/net/can/proc.c
index 0016f73..ba873c3 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -37,8 +37,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#include <linux/module.h>
diff --git a/net/can/raw.c b/net/can/raw.c
index dea99a6..cde1b4a 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -37,8 +37,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
- * Send feedback to <socketcan-users@lists.berlios.de>
- *
*/
#include <linux/module.h>
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig
index be683f2..cc04dd6 100644
--- a/net/ceph/Kconfig
+++ b/net/ceph/Kconfig
@@ -27,3 +27,17 @@ config CEPH_LIB_PRETTYDEBUG
If unsure, say N.
+config CEPH_LIB_USE_DNS_RESOLVER
+ bool "Use in-kernel support for DNS lookup"
+ depends on CEPH_LIB
+ select DNS_RESOLVER
+ default n
+ help
+ If you say Y here, hostnames (e.g. monitor addresses) will
+ be resolved using the CONFIG_DNS_RESOLVER facility.
+
+ For information on how to use CONFIG_DNS_RESOLVER consult
+ Documentation/networking/dns_resolver.txt
+
+ If unsure, say N.
+
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 2883ea0..97f70e5 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -432,9 +432,12 @@ EXPORT_SYMBOL(ceph_client_id);
/*
* create a fresh client instance
*/
-struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private)
+struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
+ unsigned supported_features,
+ unsigned required_features)
{
struct ceph_client *client;
+ struct ceph_entity_addr *myaddr = NULL;
int err = -ENOMEM;
client = kzalloc(sizeof(*client), GFP_KERNEL);
@@ -449,15 +452,27 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private)
client->auth_err = 0;
client->extra_mon_dispatch = NULL;
- client->supported_features = CEPH_FEATURE_SUPPORTED_DEFAULT;
- client->required_features = CEPH_FEATURE_REQUIRED_DEFAULT;
-
- client->msgr = NULL;
+ client->supported_features = CEPH_FEATURE_SUPPORTED_DEFAULT |
+ supported_features;
+ client->required_features = CEPH_FEATURE_REQUIRED_DEFAULT |
+ required_features;
+
+ /* msgr */
+ if (ceph_test_opt(client, MYIP))
+ myaddr = &client->options->my_addr;
+ client->msgr = ceph_messenger_create(myaddr,
+ client->supported_features,
+ client->required_features);
+ if (IS_ERR(client->msgr)) {
+ err = PTR_ERR(client->msgr);
+ goto fail;
+ }
+ client->msgr->nocrc = ceph_test_opt(client, NOCRC);
/* subsystems */
err = ceph_monc_init(&client->monc, client);
if (err < 0)
- goto fail;
+ goto fail_msgr;
err = ceph_osdc_init(&client->osdc, client);
if (err < 0)
goto fail_monc;
@@ -466,6 +481,8 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private)
fail_monc:
ceph_monc_stop(&client->monc);
+fail_msgr:
+ ceph_messenger_destroy(client->msgr);
fail:
kfree(client);
return ERR_PTR(err);
@@ -490,8 +507,7 @@ void ceph_destroy_client(struct ceph_client *client)
ceph_debugfs_client_cleanup(client);
- if (client->msgr)
- ceph_messenger_destroy(client->msgr);
+ ceph_messenger_destroy(client->msgr);
ceph_destroy_options(client->options);
@@ -514,24 +530,9 @@ static int have_mon_and_osd_map(struct ceph_client *client)
*/
int __ceph_open_session(struct ceph_client *client, unsigned long started)
{
- struct ceph_entity_addr *myaddr = NULL;
int err;
unsigned long timeout = client->options->mount_timeout * HZ;
- /* initialize the messenger */
- if (client->msgr == NULL) {
- if (ceph_test_opt(client, MYIP))
- myaddr = &client->options->my_addr;
- client->msgr = ceph_messenger_create(myaddr,
- client->supported_features,
- client->required_features);
- if (IS_ERR(client->msgr)) {
- client->msgr = NULL;
- return PTR_ERR(client->msgr);
- }
- client->msgr->nocrc = ceph_test_opt(client, NOCRC);
- }
-
/* open session, and wait for mon and osd maps */
err = ceph_monc_open_session(&client->monc);
if (err < 0)
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 9918e9e..ad5b708 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -11,12 +11,14 @@
#include <linux/string.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/dns_resolver.h>
#include <net/tcp.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/decode.h>
#include <linux/ceph/pagelist.h>
+#include <linux/export.h>
/*
* Ceph uses the messenger to exchange ceph_msg messages with other
@@ -1078,6 +1080,101 @@ static void addr_set_port(struct sockaddr_storage *ss, int p)
}
/*
+ * Unlike other *_pton function semantics, zero indicates success.
+ */
+static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
+ char delim, const char **ipend)
+{
+ struct sockaddr_in *in4 = (void *)ss;
+ struct sockaddr_in6 *in6 = (void *)ss;
+
+ memset(ss, 0, sizeof(*ss));
+
+ if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
+ ss->ss_family = AF_INET;
+ return 0;
+ }
+
+ if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
+ ss->ss_family = AF_INET6;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Extract hostname string and resolve using kernel DNS facility.
+ */
+#ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
+static int ceph_dns_resolve_name(const char *name, size_t namelen,
+ struct sockaddr_storage *ss, char delim, const char **ipend)
+{
+ const char *end, *delim_p;
+ char *colon_p, *ip_addr = NULL;
+ int ip_len, ret;
+
+ /*
+ * The end of the hostname occurs immediately preceding the delimiter or
+ * the port marker (':') where the delimiter takes precedence.
+ */
+ delim_p = memchr(name, delim, namelen);
+ colon_p = memchr(name, ':', namelen);
+
+ if (delim_p && colon_p)
+ end = delim_p < colon_p ? delim_p : colon_p;
+ else if (!delim_p && colon_p)
+ end = colon_p;
+ else {
+ end = delim_p;
+ if (!end) /* case: hostname:/ */
+ end = name + namelen;
+ }
+
+ if (end <= name)
+ return -EINVAL;
+
+ /* do dns_resolve upcall */
+ ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
+ if (ip_len > 0)
+ ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
+ else
+ ret = -ESRCH;
+
+ kfree(ip_addr);
+
+ *ipend = end;
+
+ pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
+ ret, ret ? "failed" : ceph_pr_addr(ss));
+
+ return ret;
+}
+#else
+static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
+ struct sockaddr_storage *ss, char delim, const char **ipend)
+{
+ return -EINVAL;
+}
+#endif
+
+/*
+ * Parse a server name (IP or hostname). If a valid IP address is not found
+ * then try to extract a hostname to resolve using userspace DNS upcall.
+ */
+static int ceph_parse_server_name(const char *name, size_t namelen,
+ struct sockaddr_storage *ss, char delim, const char **ipend)
+{
+ int ret;
+
+ ret = ceph_pton(name, namelen, ss, delim, ipend);
+ if (ret)
+ ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
+
+ return ret;
+}
+
+/*
* Parse an ip[:port] list into an addr array. Use the default
* monitor port if a port isn't specified.
*/
@@ -1085,15 +1182,13 @@ int ceph_parse_ips(const char *c, const char *end,
struct ceph_entity_addr *addr,
int max_count, int *count)
{
- int i;
+ int i, ret = -EINVAL;
const char *p = c;
dout("parse_ips on '%.*s'\n", (int)(end-c), c);
for (i = 0; i < max_count; i++) {
const char *ipend;
struct sockaddr_storage *ss = &addr[i].in_addr;
- struct sockaddr_in *in4 = (void *)ss;
- struct sockaddr_in6 *in6 = (void *)ss;
int port;
char delim = ',';
@@ -1102,15 +1197,11 @@ int ceph_parse_ips(const char *c, const char *end,
p++;
}
- memset(ss, 0, sizeof(*ss));
- if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr,
- delim, &ipend))
- ss->ss_family = AF_INET;
- else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
- delim, &ipend))
- ss->ss_family = AF_INET6;
- else
+ ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
+ if (ret)
goto bad;
+ ret = -EINVAL;
+
p = ipend;
if (delim == ']') {
@@ -1155,7 +1246,7 @@ int ceph_parse_ips(const char *c, const char *end,
bad:
pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
- return -EINVAL;
+ return ret;
}
EXPORT_SYMBOL(ceph_parse_ips);
@@ -2281,7 +2372,8 @@ EXPORT_SYMBOL(ceph_con_keepalive);
* construct a new message with given type, size
* the new msg has a ref count of 1.
*/
-struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
+struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
+ bool can_fail)
{
struct ceph_msg *m;
@@ -2333,7 +2425,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
m->front.iov_base = kmalloc(front_len, flags);
}
if (m->front.iov_base == NULL) {
- pr_err("msg_new can't allocate %d bytes\n",
+ dout("ceph_msg_new can't allocate %d bytes\n",
front_len);
goto out2;
}
@@ -2348,7 +2440,14 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
out2:
ceph_msg_put(m);
out:
- pr_err("msg_new can't create type %d front %d\n", type, front_len);
+ if (!can_fail) {
+ pr_err("msg_new can't create type %d front %d\n", type,
+ front_len);
+ WARN_ON(1);
+ } else {
+ dout("msg_new can't create type %d front %d\n", type,
+ front_len);
+ }
return NULL;
}
EXPORT_SYMBOL(ceph_msg_new);
@@ -2398,7 +2497,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
}
if (!msg) {
*skip = 0;
- msg = ceph_msg_new(type, front_len, GFP_NOFS);
+ msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
if (!msg) {
pr_err("unable to allocate msg type %d len %d\n",
type, front_len);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index cbe31fa..0b62dea 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -116,14 +116,12 @@ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
*/
static void __close_session(struct ceph_mon_client *monc)
{
- if (monc->con) {
- dout("__close_session closing mon%d\n", monc->cur_mon);
- ceph_con_revoke(monc->con, monc->m_auth);
- ceph_con_close(monc->con);
- monc->cur_mon = -1;
- monc->pending_auth = 0;
- ceph_auth_reset(monc->auth);
- }
+ dout("__close_session closing mon%d\n", monc->cur_mon);
+ ceph_con_revoke(monc->con, monc->m_auth);
+ ceph_con_close(monc->con);
+ monc->cur_mon = -1;
+ monc->pending_auth = 0;
+ ceph_auth_reset(monc->auth);
}
/*
@@ -302,15 +300,6 @@ void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
*/
int ceph_monc_open_session(struct ceph_mon_client *monc)
{
- if (!monc->con) {
- monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
- if (!monc->con)
- return -ENOMEM;
- ceph_con_init(monc->client->msgr, monc->con);
- monc->con->private = monc;
- monc->con->ops = &mon_con_ops;
- }
-
mutex_lock(&monc->mutex);
__open_session(monc);
__schedule_delayed(monc);
@@ -528,10 +517,12 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
init_completion(&req->completion);
err = -ENOMEM;
- req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS);
+ req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS,
+ true);
if (!req->request)
goto out;
- req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS);
+ req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS,
+ true);
if (!req->reply)
goto out;
@@ -626,10 +617,12 @@ int ceph_monc_do_poolop(struct ceph_mon_client *monc, u32 op,
init_completion(&req->completion);
err = -ENOMEM;
- req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS);
+ req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS,
+ true);
if (!req->request)
goto out;
- req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS);
+ req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS,
+ true);
if (!req->reply)
goto out;
@@ -755,13 +748,21 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
if (err)
goto out;
- monc->con = NULL;
+ /* connection */
+ monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
+ if (!monc->con)
+ goto out_monmap;
+ ceph_con_init(monc->client->msgr, monc->con);
+ monc->con->private = monc;
+ monc->con->ops = &mon_con_ops;
/* authentication */
monc->auth = ceph_auth_init(cl->options->name,
cl->options->key);
- if (IS_ERR(monc->auth))
- return PTR_ERR(monc->auth);
+ if (IS_ERR(monc->auth)) {
+ err = PTR_ERR(monc->auth);
+ goto out_con;
+ }
monc->auth->want_keys =
CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
@@ -770,19 +771,21 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
err = -ENOMEM;
monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK,
sizeof(struct ceph_mon_subscribe_ack),
- GFP_NOFS);
+ GFP_NOFS, true);
if (!monc->m_subscribe_ack)
- goto out_monmap;
+ goto out_auth;
- monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS);
+ monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS,
+ true);
if (!monc->m_subscribe)
goto out_subscribe_ack;
- monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS);
+ monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS,
+ true);
if (!monc->m_auth_reply)
goto out_subscribe;
- monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS);
+ monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS, true);
monc->pending_auth = 0;
if (!monc->m_auth)
goto out_auth_reply;
@@ -808,6 +811,10 @@ out_subscribe:
ceph_msg_put(monc->m_subscribe);
out_subscribe_ack:
ceph_msg_put(monc->m_subscribe_ack);
+out_auth:
+ ceph_auth_destroy(monc->auth);
+out_con:
+ monc->con->ops->put(monc->con);
out_monmap:
kfree(monc->monmap);
out:
@@ -822,11 +829,11 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
mutex_lock(&monc->mutex);
__close_session(monc);
- if (monc->con) {
- monc->con->private = NULL;
- monc->con->ops->put(monc->con);
- monc->con = NULL;
- }
+
+ monc->con->private = NULL;
+ monc->con->ops->put(monc->con);
+ monc->con = NULL;
+
mutex_unlock(&monc->mutex);
ceph_auth_destroy(monc->auth);
@@ -973,7 +980,7 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
case CEPH_MSG_MON_MAP:
case CEPH_MSG_MDS_MAP:
case CEPH_MSG_OSD_MAP:
- m = ceph_msg_new(type, front_len, GFP_NOFS);
+ m = ceph_msg_new(type, front_len, GFP_NOFS, false);
break;
}
@@ -1000,7 +1007,7 @@ static void mon_fault(struct ceph_connection *con)
if (!con->private)
goto out;
- if (monc->con && !monc->hunting)
+ if (!monc->hunting)
pr_info("mon%d %s session lost, "
"hunting for new mon\n", monc->cur_mon,
ceph_pr_addr(&monc->con->peer_addr.in_addr));
diff --git a/net/ceph/msgpool.c b/net/ceph/msgpool.c
index 1f4cb30..11d5f41 100644
--- a/net/ceph/msgpool.c
+++ b/net/ceph/msgpool.c
@@ -12,7 +12,7 @@ static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
struct ceph_msgpool *pool = arg;
struct ceph_msg *msg;
- msg = ceph_msg_new(0, pool->front_len, gfp_mask);
+ msg = ceph_msg_new(0, pool->front_len, gfp_mask, true);
if (!msg) {
dout("msgpool_alloc %s failed\n", pool->name);
} else {
@@ -61,7 +61,7 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
WARN_ON(1);
/* try to alloc a fresh message */
- return ceph_msg_new(0, front_len, GFP_NOFS);
+ return ceph_msg_new(0, front_len, GFP_NOFS, false);
}
msg = mempool_alloc(pool->pool, GFP_NOFS);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 88ad8a2..733e4600 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -227,7 +227,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
else
msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
- OSD_OPREPLY_FRONT_LEN, gfp_flags);
+ OSD_OPREPLY_FRONT_LEN, gfp_flags, true);
if (!msg) {
ceph_osdc_put_request(req);
return NULL;
@@ -250,7 +250,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
if (use_mempool)
msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
else
- msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags);
+ msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
if (!msg) {
ceph_osdc_put_request(req);
return NULL;
@@ -943,7 +943,7 @@ EXPORT_SYMBOL(ceph_osdc_set_request_linger);
* Caller should hold map_sem for read and request_mutex.
*/
static int __map_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req)
+ struct ceph_osd_request *req, int force_resend)
{
struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
struct ceph_pg pgid;
@@ -967,7 +967,8 @@ static int __map_request(struct ceph_osd_client *osdc,
num = err;
}
- if ((req->r_osd && req->r_osd->o_osd == o &&
+ if ((!force_resend &&
+ req->r_osd && req->r_osd->o_osd == o &&
req->r_sent >= req->r_osd->o_incarnation &&
req->r_num_pg_osds == num &&
memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
@@ -1289,18 +1290,18 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
*
* Caller should hold map_sem for read and request_mutex.
*/
-static void kick_requests(struct ceph_osd_client *osdc)
+static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
{
struct ceph_osd_request *req, *nreq;
struct rb_node *p;
int needmap = 0;
int err;
- dout("kick_requests\n");
+ dout("kick_requests %s\n", force_resend ? " (force resend)" : "");
mutex_lock(&osdc->request_mutex);
for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
req = rb_entry(p, struct ceph_osd_request, r_node);
- err = __map_request(osdc, req);
+ err = __map_request(osdc, req, force_resend);
if (err < 0)
continue; /* error */
if (req->r_osd == NULL) {
@@ -1318,7 +1319,7 @@ static void kick_requests(struct ceph_osd_client *osdc)
r_linger_item) {
dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
- err = __map_request(osdc, req);
+ err = __map_request(osdc, req, force_resend);
if (err == 0)
continue; /* no change and no osd was specified */
if (err < 0)
@@ -1395,7 +1396,7 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
ceph_osdmap_destroy(osdc->osdmap);
osdc->osdmap = newmap;
}
- kick_requests(osdc);
+ kick_requests(osdc, 0);
reset_changed_osds(osdc);
} else {
dout("ignoring incremental map %u len %d\n",
@@ -1423,6 +1424,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
"older than our %u\n", epoch, maplen,
osdc->osdmap->epoch);
} else {
+ int skipped_map = 0;
+
dout("taking full map %u len %d\n", epoch, maplen);
newmap = osdmap_decode(&p, p+maplen);
if (IS_ERR(newmap)) {
@@ -1432,9 +1435,12 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
BUG_ON(!newmap);
oldmap = osdc->osdmap;
osdc->osdmap = newmap;
- if (oldmap)
+ if (oldmap) {
+ if (oldmap->epoch + 1 < newmap->epoch)
+ skipped_map = 1;
ceph_osdmap_destroy(oldmap);
- kick_requests(osdc);
+ }
+ kick_requests(osdc, skipped_map);
}
p += maplen;
nr_maps--;
@@ -1707,7 +1713,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
* the request still han't been touched yet.
*/
if (req->r_sent == 0) {
- rc = __map_request(osdc, req);
+ rc = __map_request(osdc, req, 0);
if (rc < 0) {
if (nofail) {
dout("osdc_start_request failed map, "
@@ -2032,7 +2038,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
if (front > req->r_reply->front.iov_len) {
pr_warning("get_reply front %d > preallocated %d\n",
front, (int)req->r_reply->front.iov_len);
- m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS);
+ m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS, false);
if (!m)
goto out;
ceph_msg_put(req->r_reply);
@@ -2080,7 +2086,7 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con,
switch (type) {
case CEPH_MSG_OSD_MAP:
case CEPH_MSG_WATCH_NOTIFY:
- return ceph_msg_new(type, front, GFP_NOFS);
+ return ceph_msg_new(type, front, GFP_NOFS, false);
case CEPH_MSG_OSD_OPREPLY:
return get_reply(con, hdr, skip);
default:
diff --git a/net/compat.c b/net/compat.c
index c578d93..6def90e 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -22,6 +22,7 @@
#include <linux/filter.h>
#include <linux/compat.h>
#include <linux/security.h>
+#include <linux/export.h>
#include <net/scm.h>
#include <net/sock.h>
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 18ac112..68bbf9f 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -324,15 +324,15 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
int err;
u8 *vaddr;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- struct page *page = frag->page;
+ struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
@@ -410,15 +410,15 @@ int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
int err;
u8 *vaddr;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- struct page *page = frag->page;
+ struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
@@ -500,15 +500,15 @@ int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
int err;
u8 *vaddr;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- struct page *page = frag->page;
+ struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
@@ -585,16 +585,16 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
__wsum csum2;
int err = 0;
u8 *vaddr;
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- struct page *page = frag->page;
+ struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
diff --git a/net/core/dev.c b/net/core/dev.c
index b10ff0a..6ba50a1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -133,6 +133,10 @@
#include <linux/pci.h>
#include <linux/inetdevice.h>
#include <linux/cpu_rmap.h>
+#include <linux/if_tunnel.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
+#include <linux/net_tstamp.h>
#include "net-sysfs.h"
@@ -1474,6 +1478,57 @@ static inline void net_timestamp_check(struct sk_buff *skb)
__net_timestamp(skb);
}
+static int net_hwtstamp_validate(struct ifreq *ifr)
+{
+ struct hwtstamp_config cfg;
+ enum hwtstamp_tx_types tx_type;
+ enum hwtstamp_rx_filters rx_filter;
+ int tx_type_valid = 0;
+ int rx_filter_valid = 0;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ if (cfg.flags) /* reserved for future extensions */
+ return -EINVAL;
+
+ tx_type = cfg.tx_type;
+ rx_filter = cfg.rx_filter;
+
+ switch (tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ tx_type_valid = 1;
+ break;
+ }
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ rx_filter_valid = 1;
+ break;
+ }
+
+ if (!tx_type_valid || !rx_filter_valid)
+ return -ERANGE;
+
+ return 0;
+}
+
static inline bool is_skb_forwardable(struct net_device *dev,
struct sk_buff *skb)
{
@@ -1955,9 +2010,11 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
#ifdef CONFIG_HIGHMEM
int i;
if (!(dev->features & NETIF_F_HIGHDMA)) {
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- if (PageHighMem(skb_shinfo(skb)->frags[i].page))
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ if (PageHighMem(skb_frag_page(frag)))
return 1;
+ }
}
if (PCI_DMA_BUS_IS_PHYS) {
@@ -1966,7 +2023,8 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
if (!pdev)
return 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr_t addr = page_to_phys(skb_frag_page(frag));
if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
return 1;
}
@@ -2527,25 +2585,31 @@ static inline void ____napi_schedule(struct softnet_data *sd,
/*
* __skb_get_rxhash: calculate a flow hash based on src/dst addresses
- * and src/dst port numbers. Returns a non-zero hash number on success
- * and 0 on failure.
+ * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
+ * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
+ * if hash is a canonical 4-tuple hash over transport ports.
*/
-__u32 __skb_get_rxhash(struct sk_buff *skb)
+void __skb_get_rxhash(struct sk_buff *skb)
{
int nhoff, hash = 0, poff;
const struct ipv6hdr *ip6;
const struct iphdr *ip;
+ const struct vlan_hdr *vlan;
u8 ip_proto;
- u32 addr1, addr2, ihl;
+ u32 addr1, addr2;
+ u16 proto;
union {
u32 v32;
u16 v16[2];
} ports;
nhoff = skb_network_offset(skb);
+ proto = skb->protocol;
- switch (skb->protocol) {
+again:
+ switch (proto) {
case __constant_htons(ETH_P_IP):
+ip:
if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
goto done;
@@ -2556,9 +2620,10 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
ip_proto = ip->protocol;
addr1 = (__force u32) ip->saddr;
addr2 = (__force u32) ip->daddr;
- ihl = ip->ihl;
+ nhoff += ip->ihl * 4;
break;
case __constant_htons(ETH_P_IPV6):
+ipv6:
if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
goto done;
@@ -2566,20 +2631,71 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
ip_proto = ip6->nexthdr;
addr1 = (__force u32) ip6->saddr.s6_addr32[3];
addr2 = (__force u32) ip6->daddr.s6_addr32[3];
- ihl = (40 >> 2);
+ nhoff += 40;
break;
+ case __constant_htons(ETH_P_8021Q):
+ if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
+ goto done;
+ vlan = (const struct vlan_hdr *) (skb->data + nhoff);
+ proto = vlan->h_vlan_encapsulated_proto;
+ nhoff += sizeof(*vlan);
+ goto again;
+ case __constant_htons(ETH_P_PPP_SES):
+ if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
+ goto done;
+ proto = *((__be16 *) (skb->data + nhoff +
+ sizeof(struct pppoe_hdr)));
+ nhoff += PPPOE_SES_HLEN;
+ switch (proto) {
+ case __constant_htons(PPP_IP):
+ goto ip;
+ case __constant_htons(PPP_IPV6):
+ goto ipv6;
+ default:
+ goto done;
+ }
default:
goto done;
}
+ switch (ip_proto) {
+ case IPPROTO_GRE:
+ if (pskb_may_pull(skb, nhoff + 16)) {
+ u8 *h = skb->data + nhoff;
+ __be16 flags = *(__be16 *)h;
+
+ /*
+ * Only look inside GRE if version zero and no
+ * routing
+ */
+ if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
+ proto = *(__be16 *)(h + 2);
+ nhoff += 4;
+ if (flags & GRE_CSUM)
+ nhoff += 4;
+ if (flags & GRE_KEY)
+ nhoff += 4;
+ if (flags & GRE_SEQ)
+ nhoff += 4;
+ goto again;
+ }
+ }
+ break;
+ case IPPROTO_IPIP:
+ goto again;
+ default:
+ break;
+ }
+
ports.v32 = 0;
poff = proto_ports_offset(ip_proto);
if (poff >= 0) {
- nhoff += ihl * 4 + poff;
+ nhoff += poff;
if (pskb_may_pull(skb, nhoff + 4)) {
ports.v32 = * (__force u32 *) (skb->data + nhoff);
if (ports.v16[1] < ports.v16[0])
swap(ports.v16[0], ports.v16[1]);
+ skb->l4_rxhash = 1;
}
}
@@ -2592,7 +2708,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
hash = 1;
done:
- return hash;
+ skb->rxhash = hash;
}
EXPORT_SYMBOL(__skb_get_rxhash);
@@ -2606,10 +2722,7 @@ static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *rflow, u16 next_cpu)
{
- u16 tcpu;
-
- tcpu = rflow->cpu = next_cpu;
- if (tcpu != RPS_NO_CPU) {
+ if (next_cpu != RPS_NO_CPU) {
#ifdef CONFIG_RFS_ACCEL
struct netdev_rx_queue *rxqueue;
struct rps_dev_flow_table *flow_table;
@@ -2637,16 +2750,16 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
goto out;
old_rflow = rflow;
rflow = &flow_table->flows[flow_id];
- rflow->cpu = next_cpu;
rflow->filter = rc;
if (old_rflow->filter == rflow->filter)
old_rflow->filter = RPS_NO_FILTER;
out:
#endif
rflow->last_qtail =
- per_cpu(softnet_data, tcpu).input_queue_head;
+ per_cpu(softnet_data, next_cpu).input_queue_head;
}
+ rflow->cpu = next_cpu;
return rflow;
}
@@ -2681,13 +2794,13 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
map = rcu_dereference(rxqueue->rps_map);
if (map) {
if (map->len == 1 &&
- !rcu_dereference_raw(rxqueue->rps_flow_table)) {
+ !rcu_access_pointer(rxqueue->rps_flow_table)) {
tcpu = map->cpus[0];
if (cpu_online(tcpu))
cpu = tcpu;
goto done;
}
- } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) {
+ } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
goto done;
}
@@ -3102,8 +3215,8 @@ void netdev_rx_handler_unregister(struct net_device *dev)
{
ASSERT_RTNL();
- rcu_assign_pointer(dev->rx_handler, NULL);
- rcu_assign_pointer(dev->rx_handler_data, NULL);
+ RCU_INIT_POINTER(dev->rx_handler, NULL);
+ RCU_INIT_POINTER(dev->rx_handler_data, NULL);
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
@@ -3171,6 +3284,17 @@ ncls:
#endif
rx_handler = rcu_dereference(skb->dev->rx_handler);
+ if (vlan_tx_tag_present(skb)) {
+ if (pt_prev) {
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ pt_prev = NULL;
+ }
+ if (vlan_do_receive(&skb, !rx_handler))
+ goto another_round;
+ else if (unlikely(!skb))
+ goto out;
+ }
+
if (rx_handler) {
if (pt_prev) {
ret = deliver_skb(skb, pt_prev, orig_dev);
@@ -3190,18 +3314,6 @@ ncls:
}
}
- if (vlan_tx_tag_present(skb)) {
- if (pt_prev) {
- ret = deliver_skb(skb, pt_prev, orig_dev);
- pt_prev = NULL;
- }
- if (vlan_do_receive(&skb)) {
- ret = __netif_receive_skb(skb);
- goto out;
- } else if (unlikely(!skb))
- goto out;
- }
-
/* deliver only exact match when indicated */
null_or_dev = deliver_exact ? skb->dev : NULL;
@@ -3429,10 +3541,10 @@ pull:
skb->data_len -= grow;
skb_shinfo(skb)->frags[0].page_offset += grow;
- skb_shinfo(skb)->frags[0].size -= grow;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
- if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
- put_page(skb_shinfo(skb)->frags[0].page);
+ if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
+ skb_frag_unref(skb, 0);
memmove(skb_shinfo(skb)->frags,
skb_shinfo(skb)->frags + 1,
--skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
@@ -3496,11 +3608,10 @@ void skb_gro_reset_offset(struct sk_buff *skb)
NAPI_GRO_CB(skb)->frag0_len = 0;
if (skb->mac_header == skb->tail &&
- !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
+ !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
NAPI_GRO_CB(skb)->frag0 =
- page_address(skb_shinfo(skb)->frags[0].page) +
- skb_shinfo(skb)->frags[0].page_offset;
- NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
+ skb_frag_address(&skb_shinfo(skb)->frags[0]);
+ NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
}
}
EXPORT_SYMBOL(skb_gro_reset_offset);
@@ -3982,6 +4093,60 @@ static int dev_ifconf(struct net *net, char __user *arg)
}
#ifdef CONFIG_PROC_FS
+
+#define BUCKET_SPACE (32 - NETDEV_HASHBITS)
+
+struct dev_iter_state {
+ struct seq_net_private p;
+ unsigned int pos; /* bucket << BUCKET_SPACE + offset */
+};
+
+#define get_bucket(x) ((x) >> BUCKET_SPACE)
+#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
+#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
+
+static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
+{
+ struct dev_iter_state *state = seq->private;
+ struct net *net = seq_file_net(seq);
+ struct net_device *dev;
+ struct hlist_node *p;
+ struct hlist_head *h;
+ unsigned int count, bucket, offset;
+
+ bucket = get_bucket(state->pos);
+ offset = get_offset(state->pos);
+ h = &net->dev_name_head[bucket];
+ count = 0;
+ hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
+ if (count++ == offset) {
+ state->pos = set_bucket_offset(bucket, count);
+ return dev;
+ }
+ }
+
+ return NULL;
+}
+
+static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
+{
+ struct dev_iter_state *state = seq->private;
+ struct net_device *dev;
+ unsigned int bucket;
+
+ bucket = get_bucket(state->pos);
+ do {
+ dev = dev_from_same_bucket(seq);
+ if (dev)
+ return dev;
+
+ bucket++;
+ state->pos = set_bucket_offset(bucket, 0);
+ } while (bucket < NETDEV_HASHENTRIES);
+
+ return NULL;
+}
+
/*
* This is invoked by the /proc filesystem handler to display a device
* in detail.
@@ -3989,33 +4154,33 @@ static int dev_ifconf(struct net *net, char __user *arg)
void *dev_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
- struct net *net = seq_file_net(seq);
- loff_t off;
- struct net_device *dev;
+ struct dev_iter_state *state = seq->private;
rcu_read_lock();
if (!*pos)
return SEQ_START_TOKEN;
- off = 1;
- for_each_netdev_rcu(net, dev)
- if (off++ == *pos)
- return dev;
+ /* check for end of the hash */
+ if (state->pos == 0 && *pos > 1)
+ return NULL;
- return NULL;
+ return dev_from_new_bucket(seq);
}
void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct net_device *dev = v;
+ struct net_device *dev;
+
+ ++*pos;
if (v == SEQ_START_TOKEN)
- dev = first_net_device_rcu(seq_file_net(seq));
- else
- dev = next_net_device_rcu(dev);
+ return dev_from_new_bucket(seq);
- ++*pos;
- return dev;
+ dev = dev_from_same_bucket(seq);
+ if (dev)
+ return dev;
+
+ return dev_from_new_bucket(seq);
}
void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4114,7 +4279,7 @@ static const struct seq_operations dev_seq_ops = {
static int dev_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &dev_seq_ops,
- sizeof(struct seq_net_private));
+ sizeof(struct dev_iter_state));
}
static const struct file_operations dev_seq_fops = {
@@ -4497,9 +4662,7 @@ void __dev_set_rx_mode(struct net_device *dev)
if (!netif_device_present(dev))
return;
- if (ops->ndo_set_rx_mode)
- ops->ndo_set_rx_mode(dev);
- else {
+ if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
/* Unicast addresses changes may only happen under the rtnl,
* therefore calling __dev_set_promiscuity here is safe.
*/
@@ -4510,10 +4673,10 @@ void __dev_set_rx_mode(struct net_device *dev)
__dev_set_promiscuity(dev, -1);
dev->uc_promisc = false;
}
-
- if (ops->ndo_set_multicast_list)
- ops->ndo_set_multicast_list(dev);
}
+
+ if (ops->ndo_set_rx_mode)
+ ops->ndo_set_rx_mode(dev);
}
void dev_set_rx_mode(struct net_device *dev)
@@ -4524,30 +4687,6 @@ void dev_set_rx_mode(struct net_device *dev)
}
/**
- * dev_ethtool_get_settings - call device's ethtool_ops::get_settings()
- * @dev: device
- * @cmd: memory area for ethtool_ops::get_settings() result
- *
- * The cmd arg is initialized properly (cleared and
- * ethtool_cmd::cmd field set to ETHTOOL_GSET).
- *
- * Return device's ethtool_ops::get_settings() result value or
- * -EOPNOTSUPP when device doesn't expose
- * ethtool_ops::get_settings() operation.
- */
-int dev_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
- return -EOPNOTSUPP;
-
- memset(cmd, 0, sizeof(struct ethtool_cmd));
- cmd->cmd = ETHTOOL_GSET;
- return dev->ethtool_ops->get_settings(dev, cmd);
-}
-EXPORT_SYMBOL(dev_ethtool_get_settings);
-
-/**
* dev_get_flags - get flags reported to userspace
* @dev: device
*
@@ -4863,7 +5002,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
return -EOPNOTSUPP;
case SIOCADDMULTI:
- if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
+ if (!ops->ndo_set_rx_mode ||
ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
return -EINVAL;
if (!netif_device_present(dev))
@@ -4871,7 +5010,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
case SIOCDELMULTI:
- if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
+ if (!ops->ndo_set_rx_mode ||
ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
return -EINVAL;
if (!netif_device_present(dev))
@@ -4888,6 +5027,12 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
ifr->ifr_newname[IFNAMSIZ-1] = '\0';
return dev_change_name(dev, ifr->ifr_newname);
+ case SIOCSHWTSTAMP:
+ err = net_hwtstamp_validate(ifr);
+ if (err)
+ return err;
+ /* fall through */
+
/*
* Unknown or private ioctl
*/
@@ -5202,7 +5347,7 @@ static void rollback_registered_many(struct list_head *head)
dev = list_first_entry(head, struct net_device, unreg_list);
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
- rcu_barrier();
+ synchronize_net();
list_for_each_entry(dev, head, unreg_list)
dev_put(dev);
@@ -5715,6 +5860,12 @@ void netdev_run_todo(void)
__rtnl_unlock();
+ /* Wait for rcu callbacks to finish before attempting to drain
+ * the device list. This usually avoids a 250ms wait.
+ */
+ if (!list_empty(&list))
+ rcu_barrier();
+
while (!list_empty(&list)) {
struct net_device *dev
= list_first_entry(&list, struct net_device, todo_list);
@@ -5735,8 +5886,8 @@ void netdev_run_todo(void)
/* paranoia */
BUG_ON(netdev_refcnt_read(dev));
- WARN_ON(rcu_dereference_raw(dev->ip_ptr));
- WARN_ON(rcu_dereference_raw(dev->ip6_ptr));
+ WARN_ON(rcu_access_pointer(dev->ip_ptr));
+ WARN_ON(rcu_access_pointer(dev->ip6_ptr));
WARN_ON(dev->dn_ptr);
if (dev->destructor)
@@ -5940,7 +6091,7 @@ void free_netdev(struct net_device *dev)
kfree(dev->_rx);
#endif
- kfree(rcu_dereference_raw(dev->ingress_queue));
+ kfree(rcu_dereference_protected(dev->ingress_queue, 1));
/* Flush device addresses */
dev_addr_flush(dev);
@@ -6115,6 +6266,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
+ rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
/*
* Flush the unicast and multicast chains
@@ -6298,7 +6450,7 @@ const char *netdev_drivername(const struct net_device *dev)
return empty;
}
-static int __netdev_printk(const char *level, const struct net_device *dev,
+int __netdev_printk(const char *level, const struct net_device *dev,
struct va_format *vaf)
{
int r;
@@ -6313,6 +6465,7 @@ static int __netdev_printk(const char *level, const struct net_device *dev,
return r;
}
+EXPORT_SYMBOL(__netdev_printk);
int netdev_printk(const char *level, const struct net_device *dev,
const char *format, ...)
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index e2e6693..277faef 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -13,6 +13,7 @@
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
@@ -591,8 +592,8 @@ EXPORT_SYMBOL(dev_mc_del_global);
* addresses that have no users left. The source device must be
* locked by netif_tx_lock_bh.
*
- * This function is intended to be called from the dev->set_multicast_list
- * or dev->set_rx_mode function of layered software devices.
+ * This function is intended to be called from the ndo_set_rx_mode
+ * function of layered software devices.
*/
int dev_mc_sync(struct net_device *to, struct net_device *from)
{
diff --git a/net/core/dst.c b/net/core/dst.c
index 14b33baf..d5e2c4c 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -171,7 +171,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
dst_init_metrics(dst, dst_default_metrics, true);
dst->expires = 0UL;
dst->path = dst;
- dst->_neighbour = NULL;
+ RCU_INIT_POINTER(dst->_neighbour, NULL);
#ifdef CONFIG_XFRM
dst->xfrm = NULL;
#endif
@@ -229,11 +229,11 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
smp_rmb();
again:
- neigh = dst->_neighbour;
+ neigh = rcu_dereference_protected(dst->_neighbour, 1);
child = dst->child;
if (neigh) {
- dst->_neighbour = NULL;
+ RCU_INIT_POINTER(dst->_neighbour, NULL);
neigh_release(neigh);
}
@@ -360,14 +360,19 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
if (!unregister) {
dst->input = dst->output = dst_discard;
} else {
+ struct neighbour *neigh;
+
dst->dev = dev_net(dst->dev)->loopback_dev;
dev_hold(dst->dev);
dev_put(dev);
- if (dst->_neighbour && dst->_neighbour->dev == dev) {
- dst->_neighbour->dev = dst->dev;
+ rcu_read_lock();
+ neigh = dst_get_neighbour(dst);
+ if (neigh && neigh->dev == dev) {
+ neigh->dev = dst->dev;
dev_hold(dst->dev);
dev_put(dev);
}
+ rcu_read_unlock();
}
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 6cdba5f..f444817 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -569,15 +569,25 @@ int __ethtool_set_flags(struct net_device *dev, u32 data)
return 0;
}
-static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
+int __ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
- int err;
+ ASSERT_RTNL();
- if (!dev->ethtool_ops->get_settings)
+ if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
return -EOPNOTSUPP;
- err = dev->ethtool_ops->get_settings(dev, &cmd);
+ memset(cmd, 0, sizeof(struct ethtool_cmd));
+ cmd->cmd = ETHTOOL_GSET;
+ return dev->ethtool_ops->get_settings(dev, cmd);
+}
+EXPORT_SYMBOL(__ethtool_get_settings);
+
+static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
+{
+ int err;
+ struct ethtool_cmd cmd;
+
+ err = __ethtool_get_settings(dev, &cmd);
if (err < 0)
return err;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 27071ee..c02e63c 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/fib_rules.h>
@@ -490,7 +491,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
if (ops->nr_goto_rules > 0) {
list_for_each_entry(tmp, &ops->rules_list, list) {
if (rtnl_dereference(tmp->ctarget) == rule) {
- rcu_assign_pointer(tmp->ctarget, NULL);
+ RCU_INIT_POINTER(tmp->ctarget, NULL);
ops->unresolved_rules++;
}
}
@@ -548,7 +549,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
frh->flags = rule->flags;
if (rule->action == FR_ACT_GOTO &&
- rcu_dereference_raw(rule->ctarget) == NULL)
+ rcu_access_pointer(rule->ctarget) == NULL)
frh->flags |= FIB_RULE_UNRESOLVED;
if (rule->iifname[0]) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 36f975f..5dea452 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -436,7 +436,7 @@ error:
*
* Returns 0 if the rule set is legal or -EINVAL if not.
*/
-int sk_chk_filter(struct sock_filter *filter, int flen)
+int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
{
/*
* Valid instructions are initialized to non-0.
@@ -645,7 +645,7 @@ int sk_detach_filter(struct sock *sk)
filter = rcu_dereference_protected(sk->sk_filter,
sock_owned_by_user(sk));
if (filter) {
- rcu_assign_pointer(sk->sk_filter, NULL);
+ RCU_INIT_POINTER(sk->sk_filter, NULL);
sk_filter_uncharge(sk, filter);
ret = 0;
}
diff --git a/net/core/flow.c b/net/core/flow.c
index 555a456..8ae42de 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -413,7 +413,7 @@ static int __init flow_cache_init(struct flow_cache *fc)
for_each_online_cpu(i) {
if (flow_cache_cpu_prepare(fc, i))
- return -ENOMEM;
+ goto err;
}
fc->hotcpu_notifier = (struct notifier_block){
.notifier_call = flow_cache_cpu,
@@ -426,6 +426,18 @@ static int __init flow_cache_init(struct flow_cache *fc)
add_timer(&fc->rnd_timer);
return 0;
+
+err:
+ for_each_possible_cpu(i) {
+ struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
+ kfree(fcp->hash_table);
+ fcp->hash_table = NULL;
+ }
+
+ free_percpu(fc->percpu);
+ fc->percpu = NULL;
+
+ return -ENOMEM;
}
static int __init flow_cache_init_global(void)
diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h
index 283c2b99..81e1ed7 100644
--- a/net/core/kmap_skb.h
+++ b/net/core/kmap_skb.h
@@ -7,7 +7,7 @@ static inline void *kmap_skb_frag(const skb_frag_t *frag)
local_bh_disable();
#endif
- return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
+ return kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ);
}
static inline void kunmap_skb_frag(void *vaddr)
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 357bd4e..c3519c6 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -78,8 +78,13 @@ static void rfc2863_policy(struct net_device *dev)
static bool linkwatch_urgent_event(struct net_device *dev)
{
- return netif_running(dev) && netif_carrier_ok(dev) &&
- qdisc_tx_changing(dev);
+ if (!netif_running(dev))
+ return false;
+
+ if (dev->ifindex != dev->iflink)
+ return true;
+
+ return netif_carrier_ok(dev) && qdisc_tx_changing(dev);
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 1334d7e..039d51e 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -844,6 +844,19 @@ static void neigh_invalidate(struct neighbour *neigh)
skb_queue_purge(&neigh->arp_queue);
}
+static void neigh_probe(struct neighbour *neigh)
+ __releases(neigh->lock)
+{
+ struct sk_buff *skb = skb_peek(&neigh->arp_queue);
+ /* keep skb alive even if arp_queue overflows */
+ if (skb)
+ skb = skb_copy(skb, GFP_ATOMIC);
+ write_unlock(&neigh->lock);
+ neigh->ops->solicit(neigh, skb);
+ atomic_inc(&neigh->probes);
+ kfree_skb(skb);
+}
+
/* Called when a timer expires for a neighbour entry. */
static void neigh_timer_handler(unsigned long arg)
@@ -859,12 +872,8 @@ static void neigh_timer_handler(unsigned long arg)
now = jiffies;
next = now + HZ;
- if (!(state & NUD_IN_TIMER)) {
-#ifndef CONFIG_SMP
- printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
-#endif
+ if (!(state & NUD_IN_TIMER))
goto out;
- }
if (state & NUD_REACHABLE) {
if (time_before_eq(now,
@@ -920,14 +929,7 @@ static void neigh_timer_handler(unsigned long arg)
neigh_hold(neigh);
}
if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
- struct sk_buff *skb = skb_peek(&neigh->arp_queue);
- /* keep skb alive even if arp_queue overflows */
- if (skb)
- skb = skb_copy(skb, GFP_ATOMIC);
- write_unlock(&neigh->lock);
- neigh->ops->solicit(neigh, skb);
- atomic_inc(&neigh->probes);
- kfree_skb(skb);
+ neigh_probe(neigh);
} else {
out:
write_unlock(&neigh->lock);
@@ -942,7 +944,7 @@ out:
int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
{
int rc;
- unsigned long now;
+ bool immediate_probe = false;
write_lock_bh(&neigh->lock);
@@ -950,14 +952,16 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
goto out_unlock_bh;
- now = jiffies;
-
if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
+ unsigned long next, now = jiffies;
+
atomic_set(&neigh->probes, neigh->parms->ucast_probes);
neigh->nud_state = NUD_INCOMPLETE;
- neigh->updated = jiffies;
- neigh_add_timer(neigh, now + 1);
+ neigh->updated = now;
+ next = now + max(neigh->parms->retrans_time, HZ/2);
+ neigh_add_timer(neigh, next);
+ immediate_probe = true;
} else {
neigh->nud_state = NUD_FAILED;
neigh->updated = jiffies;
@@ -989,7 +993,11 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
rc = 1;
}
out_unlock_bh:
- write_unlock_bh(&neigh->lock);
+ if (immediate_probe)
+ neigh_probe(neigh);
+ else
+ write_unlock(&neigh->lock);
+ local_bh_enable();
return rc;
}
EXPORT_SYMBOL(__neigh_event_send);
@@ -1156,10 +1164,14 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
struct dst_entry *dst = skb_dst(skb);
struct neighbour *n2, *n1 = neigh;
write_unlock_bh(&neigh->lock);
+
+ rcu_read_lock();
/* On shaper/eql skb->dst->neighbour != neigh :( */
if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
n1 = n2;
n1->output(n1, skb);
+ rcu_read_unlock();
+
write_lock_bh(&neigh->lock);
}
skb_queue_purge(&neigh->arp_queue);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 1683e5d..c71c434 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -20,6 +20,7 @@
#include <linux/rtnetlink.h>
#include <linux/wireless.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <net/wext.h>
#include "net-sysfs.h"
@@ -147,7 +148,7 @@ static ssize_t show_speed(struct device *dev,
if (netif_running(netdev)) {
struct ethtool_cmd cmd;
- if (!dev_ethtool_get_settings(netdev, &cmd))
+ if (!__ethtool_get_settings(netdev, &cmd))
ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
}
rtnl_unlock();
@@ -165,7 +166,7 @@ static ssize_t show_duplex(struct device *dev,
if (netif_running(netdev)) {
struct ethtool_cmd cmd;
- if (!dev_ethtool_get_settings(netdev, &cmd))
+ if (!__ethtool_get_settings(netdev, &cmd))
ret = sprintf(buf, "%s\n",
cmd.duplex ? "full" : "half");
}
@@ -712,13 +713,13 @@ static void rx_queue_release(struct kobject *kobj)
struct rps_dev_flow_table *flow_table;
- map = rcu_dereference_raw(queue->rps_map);
+ map = rcu_dereference_protected(queue->rps_map, 1);
if (map) {
RCU_INIT_POINTER(queue->rps_map, NULL);
kfree_rcu(map, rcu);
}
- flow_table = rcu_dereference_raw(queue->rps_flow_table);
+ flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
if (flow_table) {
RCU_INIT_POINTER(queue->rps_flow_table, NULL);
call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
@@ -987,10 +988,10 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
}
if (nonempty)
- rcu_assign_pointer(dev->xps_maps, new_dev_maps);
+ RCU_INIT_POINTER(dev->xps_maps, new_dev_maps);
else {
kfree(new_dev_maps);
- rcu_assign_pointer(dev->xps_maps, NULL);
+ RCU_INIT_POINTER(dev->xps_maps, NULL);
}
if (dev_maps)
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index 52380b1..ba3c012 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -11,6 +11,7 @@
#include <linux/inetdevice.h>
#include <linux/inet.h>
#include <linux/interrupt.h>
+#include <linux/export.h>
#include <linux/netpoll.h>
#include <linux/sched.h>
#include <linux/delay.h>
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 5bbdbf0..aefcd7a 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -10,6 +10,7 @@
#include <linux/nsproxy.h>
#include <linux/proc_fs.h>
#include <linux/file.h>
+#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
diff --git a/net/core/netevent.c b/net/core/netevent.c
index 865f0ce..f17ccd2 100644
--- a/net/core/netevent.c
+++ b/net/core/netevent.c
@@ -15,6 +15,7 @@
#include <linux/rtnetlink.h>
#include <linux/notifier.h>
+#include <linux/export.h>
#include <net/netevent.h>
static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 5262251..cf64c1f 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -23,6 +23,7 @@
#include <linux/rcupdate.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <asm/unaligned.h>
@@ -762,7 +763,7 @@ int __netpoll_setup(struct netpoll *np)
}
/* last thing to do is link it to the net device structure */
- rcu_assign_pointer(ndev->npinfo, npinfo);
+ RCU_INIT_POINTER(ndev->npinfo, npinfo);
return 0;
@@ -903,7 +904,7 @@ void __netpoll_cleanup(struct netpoll *np)
if (ops->ndo_netpoll_cleanup)
ops->ndo_netpoll_cleanup(np->dev);
- rcu_assign_pointer(np->dev->npinfo, NULL);
+ RCU_INIT_POINTER(np->dev->npinfo, NULL);
/* avoid racing with NAPI reading npinfo */
synchronize_rcu_bh();
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index e35a6fb..0001c24 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2145,9 +2145,12 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
}
start_time = ktime_now();
- if (remaining < 100000)
- ndelay(remaining); /* really small just spin */
- else {
+ if (remaining < 100000) {
+ /* for small delays (<100us), just loop until limit is reached */
+ do {
+ end_time = ktime_now();
+ } while (ktime_lt(end_time, spin_until));
+ } else {
/* see do_nanosleep */
hrtimer_init_sleeper(&t, current);
do {
@@ -2162,8 +2165,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
hrtimer_cancel(&t.timer);
} while (t.task && pkt_dev->running && !signal_pending(current));
__set_current_state(TASK_RUNNING);
+ end_time = ktime_now();
}
- end_time = ktime_now();
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
@@ -2602,18 +2605,18 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
if (!pkt_dev->page)
break;
}
- skb_shinfo(skb)->frags[i].page = pkt_dev->page;
get_page(pkt_dev->page);
+ skb_frag_set_page(skb, i, pkt_dev->page);
skb_shinfo(skb)->frags[i].page_offset = 0;
/*last fragment, fill rest of data*/
if (i == (frags - 1))
- skb_shinfo(skb)->frags[i].size =
- (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
+ skb_frag_size_set(&skb_shinfo(skb)->frags[i],
+ (datalen < PAGE_SIZE ? datalen : PAGE_SIZE));
else
- skb_shinfo(skb)->frags[i].size = frag_len;
- datalen -= skb_shinfo(skb)->frags[i].size;
- skb->len += skb_shinfo(skb)->frags[i].size;
- skb->data_len += skb_shinfo(skb)->frags[i].size;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[i], frag_len);
+ datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
i++;
skb_shinfo(skb)->nr_frags = i;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 99d9e95..9083e82 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -731,7 +731,8 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev)
size += num_vfs *
(nla_total_size(sizeof(struct ifla_vf_mac)) +
nla_total_size(sizeof(struct ifla_vf_vlan)) +
- nla_total_size(sizeof(struct ifla_vf_tx_rate)));
+ nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
+ nla_total_size(sizeof(struct ifla_vf_spoofchk)));
return size;
} else
return 0;
@@ -954,13 +955,27 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
struct ifla_vf_mac vf_mac;
struct ifla_vf_vlan vf_vlan;
struct ifla_vf_tx_rate vf_tx_rate;
+ struct ifla_vf_spoofchk vf_spoofchk;
+
+ /*
+ * Not all SR-IOV capable drivers support the
+ * spoofcheck query. Preset to -1 so the user
+ * space tool can detect that the driver didn't
+ * report anything.
+ */
+ ivi.spoofchk = -1;
if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
break;
- vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf;
+ vf_mac.vf =
+ vf_vlan.vf =
+ vf_tx_rate.vf =
+ vf_spoofchk.vf = ivi.vf;
+
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
vf_vlan.vlan = ivi.vlan;
vf_vlan.qos = ivi.qos;
vf_tx_rate.rate = ivi.tx_rate;
+ vf_spoofchk.setting = ivi.spoofchk;
vf = nla_nest_start(skb, IFLA_VF_INFO);
if (!vf) {
nla_nest_cancel(skb, vfinfo);
@@ -968,7 +983,10 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
}
NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac);
NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan);
- NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate);
+ NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
+ &vf_tx_rate);
+ NLA_PUT(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
+ &vf_spoofchk);
nla_nest_end(skb, vf);
}
nla_nest_end(skb, vfinfo);
@@ -1202,6 +1220,15 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
ivt->rate);
break;
}
+ case IFLA_VF_SPOOFCHK: {
+ struct ifla_vf_spoofchk *ivs;
+ ivs = nla_data(vf);
+ err = -EOPNOTSUPP;
+ if (ops->ndo_set_vf_spoofchk)
+ err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
+ ivs->setting);
+ break;
+ }
default:
err = -EINVAL;
break;
@@ -1604,7 +1631,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
dev_net_set(dev, net);
dev->rtnl_link_ops = ops;
dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
- dev->real_num_tx_queues = real_num_queues;
if (tb[IFLA_MTU])
dev->mtu = nla_get_u32(tb[IFLA_MTU]);
diff --git a/net/core/scm.c b/net/core/scm.c
index 811b53f..ff52ad0 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -173,7 +173,7 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
if (err)
goto error;
- if (pid_vnr(p->pid) != p->creds.pid) {
+ if (!p->pid || pid_vnr(p->pid) != p->creds.pid) {
struct pid *pid;
err = -ESRCH;
pid = find_get_pid(p->creds.pid);
@@ -183,8 +183,9 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
p->pid = pid;
}
- if ((p->cred->euid != p->creds.uid) ||
- (p->cred->egid != p->creds.gid)) {
+ if (!p->cred ||
+ (p->cred->euid != p->creds.uid) ||
+ (p->cred->egid != p->creds.gid)) {
struct cred *cred;
err = -ENOMEM;
cred = prepare_creds();
@@ -193,7 +194,8 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
cred->uid = cred->euid = p->creds.uid;
cred->gid = cred->egid = p->creds.gid;
- put_cred(p->cred);
+ if (p->cred)
+ put_cred(p->cred);
p->cred = cred;
}
break;
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 45329d7..025233d 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -35,7 +35,7 @@ static u32 seq_scale(u32 seq)
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
__be16 sport, __be16 dport)
{
u32 secret[MD5_MESSAGE_BYTES / 4];
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 387703f..18a3ceb 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -184,11 +184,21 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
goto out;
prefetchw(skb);
+ /* We do our best to align skb_shared_info on a separate cache
+ * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
+ * aligned memory blocks, unless SLUB/SLAB debug is enabled.
+ * Both skb->head and skb_shared_info are cache line aligned.
+ */
size = SKB_DATA_ALIGN(size);
- data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
- gfp_mask, node);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ data = kmalloc_node_track_caller(size, gfp_mask, node);
if (!data)
goto nodata;
+ /* kmalloc(size) might give us more room than requested.
+ * Put skb_shared_info exactly at the end of allocated zone,
+ * to allow max possible filling before reallocation.
+ */
+ size = SKB_WITH_OVERHEAD(ksize(data));
prefetchw(data + size);
/*
@@ -197,7 +207,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
* the tail pointer in struct sk_buff!
*/
memset(skb, 0, offsetof(struct sk_buff, tail));
- skb->truesize = size + sizeof(struct sk_buff);
+ /* Account for allocated memory : skb + skb->head */
+ skb->truesize = SKB_TRUESIZE(size);
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
@@ -326,7 +337,7 @@ static void skb_release_data(struct sk_buff *skb)
if (skb_shinfo(skb)->nr_frags) {
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- put_page(skb_shinfo(skb)->frags[i].page);
+ skb_frag_unref(skb, i);
}
/*
@@ -475,6 +486,30 @@ void consume_skb(struct sk_buff *skb)
EXPORT_SYMBOL(consume_skb);
/**
+ * skb_recycle - clean up an skb for reuse
+ * @skb: buffer
+ *
+ * Recycles the skb to be reused as a receive buffer. This
+ * function does any necessary reference count dropping, and
+ * cleans up the skbuff as if it just came from __alloc_skb().
+ */
+void skb_recycle(struct sk_buff *skb)
+{
+ struct skb_shared_info *shinfo;
+
+ skb_release_head_state(skb);
+
+ shinfo = skb_shinfo(skb);
+ memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+ atomic_set(&shinfo->dataref, 1);
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb->data = skb->head + NET_SKB_PAD;
+ skb_reset_tail_pointer(skb);
+}
+EXPORT_SYMBOL(skb_recycle);
+
+/**
* skb_recycle_check - check if skb can be reused for receive
* @skb: buffer
* @skb_size: minimum receive buffer size
@@ -488,33 +523,10 @@ EXPORT_SYMBOL(consume_skb);
*/
bool skb_recycle_check(struct sk_buff *skb, int skb_size)
{
- struct skb_shared_info *shinfo;
-
- if (irqs_disabled())
- return false;
-
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
- return false;
-
- if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
+ if (!skb_is_recycleable(skb, skb_size))
return false;
- skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
- if (skb_end_pointer(skb) - skb->head < skb_size)
- return false;
-
- if (skb_shared(skb) || skb_cloned(skb))
- return false;
-
- skb_release_head_state(skb);
-
- shinfo = skb_shinfo(skb);
- memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
- atomic_set(&shinfo->dataref, 1);
-
- memset(skb, 0, offsetof(struct sk_buff, tail));
- skb->data = skb->head + NET_SKB_PAD;
- skb_reset_tail_pointer(skb);
+ skb_recycle(skb);
return true;
}
@@ -529,6 +541,8 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->mac_header = old->mac_header;
skb_dst_copy(new, old);
new->rxhash = old->rxhash;
+ new->ooo_okay = old->ooo_okay;
+ new->l4_rxhash = old->l4_rxhash;
#ifdef CONFIG_XFRM
new->sp = secpath_get(old->sp);
#endif
@@ -647,7 +661,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
}
vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
memcpy(page_address(page),
- vaddr + f->page_offset, f->size);
+ vaddr + f->page_offset, skb_frag_size(f));
kunmap_skb_frag(vaddr);
page->private = (unsigned long)head;
head = page;
@@ -655,14 +669,14 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
/* skb frags release userspace buffers */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- put_page(skb_shinfo(skb)->frags[i].page);
+ skb_frag_unref(skb, i);
uarg->callback(uarg);
/* skb frags point to kernel buffers */
for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
- skb_shinfo(skb)->frags[i - 1].page_offset = 0;
- skb_shinfo(skb)->frags[i - 1].page = head;
+ __skb_fill_page_desc(skb, i-1, head, 0,
+ skb_shinfo(skb)->frags[i - 1].size);
head = (struct page *)head->private;
}
@@ -820,7 +834,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
- get_page(skb_shinfo(n)->frags[i].page);
+ skb_frag_ref(skb, i);
}
skb_shinfo(n)->nr_frags = i;
}
@@ -911,7 +925,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
goto nofrags;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- get_page(skb_shinfo(skb)->frags[i].page);
+ skb_frag_ref(skb, i);
if (skb_has_frag_list(skb))
skb_clone_fraglist(skb);
@@ -1178,20 +1192,20 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
goto drop_pages;
for (; i < nfrags; i++) {
- int end = offset + skb_shinfo(skb)->frags[i].size;
+ int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (end < len) {
offset = end;
continue;
}
- skb_shinfo(skb)->frags[i++].size = len - offset;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
drop_pages:
skb_shinfo(skb)->nr_frags = i;
for (; i < nfrags; i++)
- put_page(skb_shinfo(skb)->frags[i].page);
+ skb_frag_unref(skb, i);
if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
@@ -1294,9 +1308,11 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
/* Estimate size of pulled pages. */
eat = delta;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- if (skb_shinfo(skb)->frags[i].size >= eat)
+ int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ if (size >= eat)
goto pull_pages;
- eat -= skb_shinfo(skb)->frags[i].size;
+ eat -= size;
}
/* If we need update frag list, we are in troubles.
@@ -1359,14 +1375,16 @@ pull_pages:
eat = delta;
k = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- if (skb_shinfo(skb)->frags[i].size <= eat) {
- put_page(skb_shinfo(skb)->frags[i].page);
- eat -= skb_shinfo(skb)->frags[i].size;
+ int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ if (size <= eat) {
+ skb_frag_unref(skb, i);
+ eat -= size;
} else {
skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
if (eat) {
skb_shinfo(skb)->frags[k].page_offset += eat;
- skb_shinfo(skb)->frags[k].size -= eat;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
eat = 0;
}
k++;
@@ -1421,7 +1439,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
u8 *vaddr;
@@ -1619,7 +1637,8 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
- if (__splice_segment(f->page, f->page_offset, f->size,
+ if (__splice_segment(skb_frag_page(f),
+ f->page_offset, skb_frag_size(f),
offset, len, skb, spd, 0, sk, pipe))
return 1;
}
@@ -1729,7 +1748,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
WARN_ON(start > offset + len);
- end = start + frag->size;
+ end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
u8 *vaddr;
@@ -1802,7 +1821,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
__wsum csum2;
u8 *vaddr;
@@ -1877,7 +1896,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
__wsum csum2;
u8 *vaddr;
@@ -2150,7 +2169,7 @@ static inline void skb_split_no_header(struct sk_buff *skb,
skb->data_len = len - pos;
for (i = 0; i < nfrags; i++) {
- int size = skb_shinfo(skb)->frags[i].size;
+ int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
if (pos + size > len) {
skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
@@ -2164,10 +2183,10 @@ static inline void skb_split_no_header(struct sk_buff *skb,
* where splitting is expensive.
* 2. Split is accurately. We make this.
*/
- get_page(skb_shinfo(skb)->frags[i].page);
+ skb_frag_ref(skb, i);
skb_shinfo(skb1)->frags[0].page_offset += len - pos;
- skb_shinfo(skb1)->frags[0].size -= len - pos;
- skb_shinfo(skb)->frags[i].size = len - pos;
+ skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
+ skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
skb_shinfo(skb)->nr_frags++;
}
k++;
@@ -2239,12 +2258,13 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
* commit all, so that we don't have to undo partial changes
*/
if (!to ||
- !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
+ !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
+ fragfrom->page_offset)) {
merge = -1;
} else {
merge = to - 1;
- todo -= fragfrom->size;
+ todo -= skb_frag_size(fragfrom);
if (todo < 0) {
if (skb_prepare_for_shift(skb) ||
skb_prepare_for_shift(tgt))
@@ -2254,8 +2274,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
fragfrom = &skb_shinfo(skb)->frags[from];
fragto = &skb_shinfo(tgt)->frags[merge];
- fragto->size += shiftlen;
- fragfrom->size -= shiftlen;
+ skb_frag_size_add(fragto, shiftlen);
+ skb_frag_size_sub(fragfrom, shiftlen);
fragfrom->page_offset += shiftlen;
goto onlymerged;
@@ -2279,20 +2299,20 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
fragfrom = &skb_shinfo(skb)->frags[from];
fragto = &skb_shinfo(tgt)->frags[to];
- if (todo >= fragfrom->size) {
+ if (todo >= skb_frag_size(fragfrom)) {
*fragto = *fragfrom;
- todo -= fragfrom->size;
+ todo -= skb_frag_size(fragfrom);
from++;
to++;
} else {
- get_page(fragfrom->page);
+ __skb_frag_ref(fragfrom);
fragto->page = fragfrom->page;
fragto->page_offset = fragfrom->page_offset;
- fragto->size = todo;
+ skb_frag_size_set(fragto, todo);
fragfrom->page_offset += todo;
- fragfrom->size -= todo;
+ skb_frag_size_sub(fragfrom, todo);
todo = 0;
to++;
@@ -2307,8 +2327,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
fragfrom = &skb_shinfo(skb)->frags[0];
fragto = &skb_shinfo(tgt)->frags[merge];
- fragto->size += fragfrom->size;
- put_page(fragfrom->page);
+ skb_frag_size_add(fragto, skb_frag_size(fragfrom));
+ __skb_frag_unref(fragfrom);
}
/* Reposition in the original skb */
@@ -2405,7 +2425,7 @@ next_skb:
while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
- block_limit = frag->size + st->stepped_offset;
+ block_limit = skb_frag_size(frag) + st->stepped_offset;
if (abs_offset < block_limit) {
if (!st->frag_data)
@@ -2423,7 +2443,7 @@ next_skb:
}
st->frag_idx++;
- st->stepped_offset += frag->size;
+ st->stepped_offset += skb_frag_size(frag);
}
if (st->frag_data) {
@@ -2553,14 +2573,13 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
left = PAGE_SIZE - frag->page_offset;
copy = (length > left)? left : length;
- ret = getfrag(from, (page_address(frag->page) +
- frag->page_offset + frag->size),
+ ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag),
offset, copy, 0, skb);
if (ret < 0)
return -EFAULT;
/* copy was successful so update the size parameters */
- frag->size += copy;
+ skb_frag_size_add(frag, copy);
skb->len += copy;
skb->data_len += copy;
offset += copy;
@@ -2706,12 +2725,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
while (pos < offset + len && i < nfrags) {
*frag = skb_shinfo(skb)->frags[i];
- get_page(frag->page);
- size = frag->size;
+ __skb_frag_ref(frag);
+ size = skb_frag_size(frag);
if (pos < offset) {
frag->page_offset += offset - pos;
- frag->size -= offset - pos;
+ skb_frag_size_sub(frag, offset - pos);
}
skb_shinfo(nskb)->nr_frags++;
@@ -2720,7 +2739,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
i++;
pos += size;
} else {
- frag->size -= pos + size - (offset + len);
+ skb_frag_size_sub(frag, pos + size - (offset + len));
goto skip_fraglist;
}
@@ -2800,7 +2819,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
} while (--i);
frag->page_offset += offset;
- frag->size -= offset;
+ skb_frag_size_sub(frag, offset);
skb->truesize -= skb->data_len;
skb->len -= skb->data_len;
@@ -2852,7 +2871,7 @@ merge:
unsigned int eat = offset - headlen;
skbinfo->frags[0].page_offset += eat;
- skbinfo->frags[0].size -= eat;
+ skb_frag_size_sub(&skbinfo->frags[0], eat);
skb->data_len -= eat;
skb->len -= eat;
offset = headlen;
@@ -2923,13 +2942,13 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (copy > len)
copy = len;
- sg_set_page(&sg[elt], frag->page, copy,
+ sg_set_page(&sg[elt], skb_frag_page(frag), copy,
frag->page_offset+offset-start);
elt++;
if (!(len -= copy))
diff --git a/net/core/sock.c b/net/core/sock.c
index bc745d0..4ed7b1d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -207,7 +207,7 @@ static struct lock_class_key af_callback_keys[AF_MAX];
* not depend upon such differences.
*/
#define _SK_MEM_PACKETS 256
-#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
+#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
@@ -387,7 +387,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
sk_tx_queue_clear(sk);
- rcu_assign_pointer(sk->sk_dst_cache, NULL);
+ RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
dst_release(dst);
return NULL;
}
@@ -738,10 +738,7 @@ set_rcvbuf:
/* We implement the SO_SNDLOWAT etc to
not be settable (1003.1g 5.3) */
case SO_RXQ_OVFL:
- if (valbool)
- sock_set_flag(sk, SOCK_RXQ_OVFL);
- else
- sock_reset_flag(sk, SOCK_RXQ_OVFL);
+ sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
break;
default:
ret = -ENOPROTOOPT;
@@ -1158,7 +1155,7 @@ static void __sk_free(struct sock *sk)
atomic_read(&sk->sk_wmem_alloc) == 0);
if (filter) {
sk_filter_uncharge(sk, filter);
- rcu_assign_pointer(sk->sk_filter, NULL);
+ RCU_INIT_POINTER(sk->sk_filter, NULL);
}
sock_disable_timestamp(sk, SOCK_TIMESTAMP);
@@ -1260,6 +1257,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
newsk->sk_destruct = NULL;
+ bh_unlock_sock(newsk);
sk_free(newsk);
newsk = NULL;
goto out;
@@ -1533,7 +1531,6 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
skb_shinfo(skb)->nr_frags = npages;
for (i = 0; i < npages; i++) {
struct page *page;
- skb_frag_t *frag;
page = alloc_pages(sk->sk_allocation, 0);
if (!page) {
@@ -1543,12 +1540,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
goto failure;
}
- frag = &skb_shinfo(skb)->frags[i];
- frag->page = page;
- frag->page_offset = 0;
- frag->size = (data_len >= PAGE_SIZE ?
- PAGE_SIZE :
- data_len);
+ __skb_fill_page_desc(skb, i,
+ page, 0,
+ (data_len >= PAGE_SIZE ?
+ PAGE_SIZE :
+ data_len));
data_len -= PAGE_SIZE;
}
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 98a5264..661b5a4 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -21,6 +21,7 @@
#include <linux/phy.h>
#include <linux/ptp_classify.h>
#include <linux/skbuff.h>
+#include <linux/export.h>
static struct sock_filter ptp_filter[] = {
PTP_FILTER
@@ -57,9 +58,13 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
case PTP_CLASS_V2_VLAN:
phydev = skb->dev->phydev;
if (likely(phydev->drv->txtstamp)) {
+ if (!atomic_inc_not_zero(&sk->sk_refcnt))
+ return;
clone = skb_clone(skb, GFP_ATOMIC);
- if (!clone)
+ if (!clone) {
+ sock_put(sk);
return;
+ }
clone->sk = sk;
phydev->drv->txtstamp(phydev, clone, type);
}
@@ -77,8 +82,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
struct sock_exterr_skb *serr;
int err;
- if (!hwtstamps)
+ if (!hwtstamps) {
+ sock_put(sk);
+ kfree_skb(skb);
return;
+ }
*skb_hwtstamps(skb) = *hwtstamps;
serr = SKB_EXT_ERR(skb);
@@ -87,6 +95,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
skb->sk = NULL;
err = sock_queue_err_skb(sk, skb);
+ sock_put(sk);
if (err)
kfree_skb(skb);
}
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index 25d717e..1b5fefd 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -27,6 +27,7 @@
#include <linux/dmaengine.h>
#include <linux/socket.h>
+#include <linux/export.h>
#include <net/tcp.h>
#include <net/netdma.h>
@@ -71,14 +72,14 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
/* Copy paged appendix. Hmm... why does this look so complicated? */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
int end;
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
WARN_ON(start > offset + len);
- end = start + skb_shinfo(skb)->frags[i].size;
+ end = start + skb_frag_size(frag);
copy = end - offset;
if (copy > 0) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- struct page *page = frag->page;
+ struct page *page = skb_frag_page(frag);
if (copy > len)
copy = len;
diff --git a/net/dcb/dcbevent.c b/net/dcb/dcbevent.c
index 665a880..1d9eb7c 100644
--- a/net/dcb/dcbevent.c
+++ b/net/dcb/dcbevent.c
@@ -19,6 +19,7 @@
#include <linux/rtnetlink.h>
#include <linux/notifier.h>
+#include <linux/export.h>
static ATOMIC_NOTIFIER_HEAD(dcbevent_notif_chain);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 3cb56af..d860530 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -25,6 +25,7 @@
#include <linux/dcbnl.h>
#include <net/dcbevent.h>
#include <linux/rtnetlink.h>
+#include <linux/module.h>
#include <net/sock.h>
/**
@@ -1255,7 +1256,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
spin_lock(&dcb_lock);
list_for_each_entry(itr, &dcb_app_list, list) {
- if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) {
+ if (itr->ifindex == netdev->ifindex) {
err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
&itr->app);
if (err) {
@@ -1412,7 +1413,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
goto dcb_unlock;
list_for_each_entry(itr, &dcb_app_list, list) {
- if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) {
+ if (itr->ifindex == netdev->ifindex) {
struct nlattr *app_nest = nla_nest_start(skb,
DCB_ATTR_APP);
if (!app_nest)
@@ -2050,7 +2051,7 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->app.selector == app->selector &&
itr->app.protocol == app->protocol &&
- (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
+ itr->ifindex == dev->ifindex) {
prio = itr->app.priority;
break;
}
@@ -2073,15 +2074,17 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
struct dcb_app_type *itr;
struct dcb_app_type event;
- memcpy(&event.name, dev->name, sizeof(event.name));
+ event.ifindex = dev->ifindex;
memcpy(&event.app, new, sizeof(event.app));
+ if (dev->dcbnl_ops->getdcbx)
+ event.dcbx = dev->dcbnl_ops->getdcbx(dev);
spin_lock(&dcb_lock);
/* Search for existing match and replace */
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->app.selector == new->selector &&
itr->app.protocol == new->protocol &&
- (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
+ itr->ifindex == dev->ifindex) {
if (new->priority)
itr->app.priority = new->priority;
else {
@@ -2101,7 +2104,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
}
memcpy(&entry->app, new, sizeof(*new));
- strncpy(entry->name, dev->name, IFNAMSIZ);
+ entry->ifindex = dev->ifindex;
list_add(&entry->list, &dcb_app_list);
}
out:
@@ -2127,7 +2130,7 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
list_for_each_entry(itr, &dcb_app_list, list) {
if (itr->app.selector == app->selector &&
itr->app.protocol == app->protocol &&
- (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
+ itr->ifindex == dev->ifindex) {
prio |= 1 << itr->app.priority;
}
}
@@ -2150,8 +2153,10 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
struct dcb_app_type event;
int err = 0;
- memcpy(&event.name, dev->name, sizeof(event.name));
+ event.ifindex = dev->ifindex;
memcpy(&event.app, new, sizeof(event.app));
+ if (dev->dcbnl_ops->getdcbx)
+ event.dcbx = dev->dcbnl_ops->getdcbx(dev);
spin_lock(&dcb_lock);
/* Search for existing match and abort if found */
@@ -2159,7 +2164,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
if (itr->app.selector == new->selector &&
itr->app.protocol == new->protocol &&
itr->app.priority == new->priority &&
- (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
+ itr->ifindex == dev->ifindex) {
err = -EEXIST;
goto out;
}
@@ -2173,7 +2178,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
}
memcpy(&entry->app, new, sizeof(*new));
- strncpy(entry->name, dev->name, IFNAMSIZ);
+ entry->ifindex = dev->ifindex;
list_add(&entry->list, &dcb_app_list);
out:
spin_unlock(&dcb_lock);
@@ -2194,8 +2199,10 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
struct dcb_app_type event;
int err = -ENOENT;
- memcpy(&event.name, dev->name, sizeof(event.name));
+ event.ifindex = dev->ifindex;
memcpy(&event.app, del, sizeof(event.app));
+ if (dev->dcbnl_ops->getdcbx)
+ event.dcbx = dev->dcbnl_ops->getdcbx(dev);
spin_lock(&dcb_lock);
/* Search for existing match and remove it. */
@@ -2203,7 +2210,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
if (itr->app.selector == del->selector &&
itr->app.protocol == del->protocol &&
itr->app.priority == del->priority &&
- (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
+ itr->ifindex == dev->ifindex) {
list_del(&itr->list);
kfree(itr);
err = 0;
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 25b7a8d..ba07824 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -12,6 +12,7 @@
#include "dccp.h"
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/export.h>
static struct kmem_cache *dccp_ackvec_slab;
static struct kmem_cache *dccp_ackvec_record_slab;
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 0462040..67164bb 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -85,7 +85,6 @@ static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
{
- struct dccp_sock *dp = dccp_sk(sk);
u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
/*
@@ -98,14 +97,33 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
val = max_ratio;
}
- if (val > DCCPF_ACK_RATIO_MAX)
- val = DCCPF_ACK_RATIO_MAX;
+ dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO,
+ min_t(u32, val, DCCPF_ACK_RATIO_MAX));
+}
- if (val == dp->dccps_l_ack_ratio)
- return;
+static void ccid2_check_l_ack_ratio(struct sock *sk)
+{
+ struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
- ccid2_pr_debug("changing local ack ratio to %u\n", val);
- dp->dccps_l_ack_ratio = val;
+ /*
+ * After a loss, idle period, application limited period, or RTO we
+ * need to check that the ack ratio is still less than the congestion
+ * window. Otherwise, we will send an entire congestion window of
+ * packets and got no response because we haven't sent ack ratio
+ * packets yet.
+ * If the ack ratio does need to be reduced, we reduce it to half of
+ * the congestion window (or 1 if that's zero) instead of to the
+ * congestion window. This prevents problems if one ack is lost.
+ */
+ if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd)
+ ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U);
+}
+
+static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
+{
+ dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW,
+ clamp_val(val, DCCPF_SEQ_WMIN,
+ DCCPF_SEQ_WMAX));
}
static void ccid2_hc_tx_rto_expire(unsigned long data)
@@ -187,6 +205,8 @@ static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
}
hc->tx_cwnd_used = 0;
hc->tx_cwnd_stamp = now;
+
+ ccid2_check_l_ack_ratio(sk);
}
/* This borrows the code of tcp_cwnd_restart() */
@@ -205,6 +225,8 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
hc->tx_cwnd_stamp = now;
hc->tx_cwnd_used = 0;
+
+ ccid2_check_l_ack_ratio(sk);
}
static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
@@ -405,17 +427,37 @@ static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
unsigned int *maxincr)
{
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
-
- if (hc->tx_cwnd < hc->tx_ssthresh) {
- if (*maxincr > 0 && ++hc->tx_packets_acked == 2) {
+ struct dccp_sock *dp = dccp_sk(sk);
+ int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio;
+
+ if (hc->tx_cwnd < dp->dccps_l_seq_win &&
+ r_seq_used < dp->dccps_r_seq_win) {
+ if (hc->tx_cwnd < hc->tx_ssthresh) {
+ if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) {
+ hc->tx_cwnd += 1;
+ *maxincr -= 1;
+ hc->tx_packets_acked = 0;
+ }
+ } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
hc->tx_cwnd += 1;
- *maxincr -= 1;
hc->tx_packets_acked = 0;
}
- } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
- hc->tx_cwnd += 1;
- hc->tx_packets_acked = 0;
}
+
+ /*
+ * Adjust the local sequence window and the ack ratio to allow about
+ * 5 times the number of packets in the network (RFC 4340 7.5.2)
+ */
+ if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win)
+ ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2);
+ else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2)
+ ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U);
+
+ if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win)
+ ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2);
+ else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2)
+ ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2);
+
/*
* FIXME: RTT is sampled several times per acknowledgment (for each
* entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
@@ -441,9 +483,7 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
- /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */
- if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd)
- ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
+ ccid2_check_l_ack_ratio(sk);
}
static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
@@ -494,8 +534,16 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
if (hc->tx_rpdupack >= NUMDUPACK) {
hc->tx_rpdupack = -1; /* XXX lame */
hc->tx_rpseq = 0;
-
+#ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__
+ /*
+ * FIXME: Ack Congestion Control is broken; in
+ * the current state instabilities occurred with
+ * Ack Ratios greater than 1; causing hang-ups
+ * and long RTO timeouts. This needs to be fixed
+ * before opening up dynamic changes. -- gerrit
+ */
ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
+#endif
}
}
}
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index f585d33..18c9754 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -43,6 +43,12 @@ struct ccid2_seq {
#define CCID2_SEQBUF_LEN 1024
#define CCID2_SEQBUF_MAX 128
+/*
+ * Multiple of congestion window to keep the sequence window at
+ * (RFC 4340 7.5.2)
+ */
+#define CCID2_WIN_CHANGE_FACTOR 5
+
/**
* struct ccid2_hc_tx_sock - CCID2 TX half connection
* @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index 4902029..1f94b7e 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -4,6 +4,7 @@
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
* Copyright (c) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
*/
+#include <linux/moduleparam.h>
#include "tfrc.h"
#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 5fdb072..583490a 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -474,6 +474,7 @@ static inline int dccp_ack_pending(const struct sock *sk)
return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
}
+extern int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
extern int dccp_feat_finalise_settings(struct dccp_sock *dp);
extern int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
extern int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 568def9..23cea0e 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -12,6 +12,7 @@
* -----------
* o Feature negotiation is coordinated with connection setup (as in TCP), wild
* changes of parameters of an established connection are not supported.
+ * o Changing non-negotiable (NN) values is supported in state OPEN/PARTOPEN.
* o All currently known SP features have 1-byte quantities. If in the future
* extensions of RFCs 4340..42 define features with item lengths larger than
* one byte, a feature-specific extension of the code will be required.
@@ -343,6 +344,20 @@ static int __dccp_feat_activate(struct sock *sk, const int idx,
return dccp_feat_table[idx].activation_hdlr(sk, val, rx);
}
+/**
+ * dccp_feat_activate - Activate feature value on socket
+ * @sk: fully connected DCCP socket (after handshake is complete)
+ * @feat_num: feature to activate, one of %dccp_feature_numbers
+ * @local: whether local (1) or remote (0) @feat_num is meant
+ * @fval: the value (SP or NN) to activate, or NULL to use the default value
+ * For general use this function is preferable over __dccp_feat_activate().
+ */
+static int dccp_feat_activate(struct sock *sk, u8 feat_num, bool local,
+ dccp_feat_val const *fval)
+{
+ return __dccp_feat_activate(sk, dccp_feat_index(feat_num), local, fval);
+}
+
/* Test for "Req'd" feature (RFC 4340, 6.4) */
static inline int dccp_feat_must_be_understood(u8 feat_num)
{
@@ -650,11 +665,22 @@ int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq,
return -1;
if (pos->needs_mandatory && dccp_insert_option_mandatory(skb))
return -1;
- /*
- * Enter CHANGING after transmitting the Change option (6.6.2).
- */
- if (pos->state == FEAT_INITIALISING)
- pos->state = FEAT_CHANGING;
+
+ if (skb->sk->sk_state == DCCP_OPEN &&
+ (opt == DCCPO_CONFIRM_R || opt == DCCPO_CONFIRM_L)) {
+ /*
+ * Confirms don't get retransmitted (6.6.3) once the
+ * connection is in state OPEN
+ */
+ dccp_feat_list_pop(pos);
+ } else {
+ /*
+ * Enter CHANGING after transmitting the Change
+ * option (6.6.2).
+ */
+ if (pos->state == FEAT_INITIALISING)
+ pos->state = FEAT_CHANGING;
+ }
}
return 0;
}
@@ -730,6 +756,70 @@ int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
0, list, len);
}
+/**
+ * dccp_feat_nn_get - Query current/pending value of NN feature
+ * @sk: DCCP socket of an established connection
+ * @feat: NN feature number from %dccp_feature_numbers
+ * For a known NN feature, returns value currently being negotiated, or
+ * current (confirmed) value if no negotiation is going on.
+ */
+u64 dccp_feat_nn_get(struct sock *sk, u8 feat)
+{
+ if (dccp_feat_type(feat) == FEAT_NN) {
+ struct dccp_sock *dp = dccp_sk(sk);
+ struct dccp_feat_entry *entry;
+
+ entry = dccp_feat_list_lookup(&dp->dccps_featneg, feat, 1);
+ if (entry != NULL)
+ return entry->val.nn;
+
+ switch (feat) {
+ case DCCPF_ACK_RATIO:
+ return dp->dccps_l_ack_ratio;
+ case DCCPF_SEQUENCE_WINDOW:
+ return dp->dccps_l_seq_win;
+ }
+ }
+ DCCP_BUG("attempt to look up unsupported feature %u", feat);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dccp_feat_nn_get);
+
+/**
+ * dccp_feat_signal_nn_change - Update NN values for an established connection
+ * @sk: DCCP socket of an established connection
+ * @feat: NN feature number from %dccp_feature_numbers
+ * @nn_val: the new value to use
+ * This function is used to communicate NN updates out-of-band.
+ */
+int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val)
+{
+ struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
+ dccp_feat_val fval = { .nn = nn_val };
+ struct dccp_feat_entry *entry;
+
+ if (sk->sk_state != DCCP_OPEN && sk->sk_state != DCCP_PARTOPEN)
+ return 0;
+
+ if (dccp_feat_type(feat) != FEAT_NN ||
+ !dccp_feat_is_valid_nn_val(feat, nn_val))
+ return -EINVAL;
+
+ if (nn_val == dccp_feat_nn_get(sk, feat))
+ return 0; /* already set or negotiation under way */
+
+ entry = dccp_feat_list_lookup(fn, feat, 1);
+ if (entry != NULL) {
+ dccp_pr_debug("Clobbering existing NN entry %llu -> %llu\n",
+ (unsigned long long)entry->val.nn,
+ (unsigned long long)nn_val);
+ dccp_feat_list_pop(entry);
+ }
+
+ inet_csk_schedule_ack(sk);
+ return dccp_feat_push_change(fn, feat, 1, 0, &fval);
+}
+EXPORT_SYMBOL_GPL(dccp_feat_signal_nn_change);
/*
* Tracking features whose value depend on the choice of CCID
@@ -1187,6 +1277,100 @@ confirmation_failed:
}
/**
+ * dccp_feat_handle_nn_established - Fast-path reception of NN options
+ * @sk: socket of an established DCCP connection
+ * @mandatory: whether @opt was preceded by a Mandatory option
+ * @opt: %DCCPO_CHANGE_L | %DCCPO_CONFIRM_R (NN only)
+ * @feat: NN number, one of %dccp_feature_numbers
+ * @val: NN value
+ * @len: length of @val in bytes
+ * This function combines the functionality of change_recv/confirm_recv, with
+ * the following differences (reset codes are the same):
+ * - cleanup after receiving the Confirm;
+ * - values are directly activated after successful parsing;
+ * - deliberately restricted to NN features.
+ * The restriction to NN features is essential since SP features can have non-
+ * predictable outcomes (depending on the remote configuration), and are inter-
+ * dependent (CCIDs for instance cause further dependencies).
+ */
+static u8 dccp_feat_handle_nn_established(struct sock *sk, u8 mandatory, u8 opt,
+ u8 feat, u8 *val, u8 len)
+{
+ struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
+ const bool local = (opt == DCCPO_CONFIRM_R);
+ struct dccp_feat_entry *entry;
+ u8 type = dccp_feat_type(feat);
+ dccp_feat_val fval;
+
+ dccp_feat_print_opt(opt, feat, val, len, mandatory);
+
+ /* Ignore non-mandatory unknown and non-NN features */
+ if (type == FEAT_UNKNOWN) {
+ if (local && !mandatory)
+ return 0;
+ goto fast_path_unknown;
+ } else if (type != FEAT_NN) {
+ return 0;
+ }
+
+ /*
+ * We don't accept empty Confirms, since in fast-path feature
+ * negotiation the values are enabled immediately after sending
+ * the Change option.
+ * Empty Changes on the other hand are invalid (RFC 4340, 6.1).
+ */
+ if (len == 0 || len > sizeof(fval.nn))
+ goto fast_path_unknown;
+
+ if (opt == DCCPO_CHANGE_L) {
+ fval.nn = dccp_decode_value_var(val, len);
+ if (!dccp_feat_is_valid_nn_val(feat, fval.nn))
+ goto fast_path_unknown;
+
+ if (dccp_feat_push_confirm(fn, feat, local, &fval) ||
+ dccp_feat_activate(sk, feat, local, &fval))
+ return DCCP_RESET_CODE_TOO_BUSY;
+
+ /* set the `Ack Pending' flag to piggyback a Confirm */
+ inet_csk_schedule_ack(sk);
+
+ } else if (opt == DCCPO_CONFIRM_R) {
+ entry = dccp_feat_list_lookup(fn, feat, local);
+ if (entry == NULL || entry->state != FEAT_CHANGING)
+ return 0;
+
+ fval.nn = dccp_decode_value_var(val, len);
+ /*
+ * Just ignore a value that doesn't match our current value.
+ * If the option changes twice within two RTTs, then at least
+ * one CONFIRM will be received for the old value after a
+ * new CHANGE was sent.
+ */
+ if (fval.nn != entry->val.nn)
+ return 0;
+
+ /* Only activate after receiving the Confirm option (6.6.1). */
+ dccp_feat_activate(sk, feat, local, &fval);
+
+ /* It has been confirmed - so remove the entry */
+ dccp_feat_list_pop(entry);
+
+ } else {
+ DCCP_WARN("Received illegal option %u\n", opt);
+ goto fast_path_failed;
+ }
+ return 0;
+
+fast_path_unknown:
+ if (!mandatory)
+ return dccp_push_empty_confirm(fn, feat, local);
+
+fast_path_failed:
+ return mandatory ? DCCP_RESET_CODE_MANDATORY_ERROR
+ : DCCP_RESET_CODE_OPTION_ERROR;
+}
+
+/**
* dccp_feat_parse_options - Process Feature-Negotiation Options
* @sk: for general use and used by the client during connection setup
* @dreq: used by the server during connection setup
@@ -1221,6 +1405,14 @@ int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
return dccp_feat_confirm_recv(fn, mandatory, opt, feat,
val, len, server);
}
+ break;
+ /*
+ * Support for exchanging NN options on an established connection.
+ */
+ case DCCP_OPEN:
+ case DCCP_PARTOPEN:
+ return dccp_feat_handle_nn_established(sk, mandatory, opt, feat,
+ val, len);
}
return 0; /* ignore FN options in all other states */
}
diff --git a/net/dccp/feat.h b/net/dccp/feat.h
index e56a4e5..90b957d 100644
--- a/net/dccp/feat.h
+++ b/net/dccp/feat.h
@@ -129,6 +129,7 @@ extern int dccp_feat_clone_list(struct list_head const *, struct list_head *);
extern void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
extern u64 dccp_decode_value_var(const u8 *bf, const u8 len);
+extern u64 dccp_feat_nn_get(struct sock *sk, u8 feat);
extern int dccp_insert_option_mandatory(struct sk_buff *skb);
extern int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 332639b..90a919a 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -433,6 +433,7 @@ exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
put_and_exit:
+ bh_unlock_sock(newsk);
sock_put(newsk);
goto exit;
}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index b74f761..17ee85c 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -271,7 +271,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
&ireq6->loc_addr,
&ireq6->rmt_addr);
ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr);
- err = ip6_xmit(sk, skb, &fl6, opt);
+ err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
err = net_xmit_eval(err);
}
@@ -326,7 +326,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
if (!IS_ERR(dst)) {
skb_dst_set(skb, dst);
- ip6_xmit(ctl_sk, skb, &fl6, NULL);
+ ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
return;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 152975d..e742f90 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -184,7 +184,6 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
dp->dccps_rate_last = jiffies;
dp->dccps_role = DCCP_ROLE_UNDEFINED;
dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
- dp->dccps_l_ack_ratio = dp->dccps_r_ack_ratio = 1;
dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
dccp_init_xmit_timers(sk);
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 7587870..16f0b22 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -12,6 +12,7 @@
#include <linux/dccp.h>
#include <linux/skbuff.h>
+#include <linux/export.h>
#include "dccp.h"
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index ba4face..2ab16e1 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -388,7 +388,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
}
ifa->ifa_next = dn_db->ifa_list;
- rcu_assign_pointer(dn_db->ifa_list, ifa);
+ RCU_INIT_POINTER(dn_db->ifa_list, ifa);
dn_ifaddr_notify(RTM_NEWADDR, ifa);
blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
@@ -1093,7 +1093,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
- rcu_assign_pointer(dev->dn_ptr, dn_db);
+ RCU_INIT_POINTER(dev->dn_ptr, dn_db);
dn_db->dev = dev;
init_timer(&dn_db->timer);
@@ -1101,7 +1101,7 @@ static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
if (!dn_db->neigh_parms) {
- rcu_assign_pointer(dev->dn_ptr, NULL);
+ RCU_INIT_POINTER(dev->dn_ptr, NULL);
kfree(dn_db);
return NULL;
}
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 43450c1..a77d161 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -77,6 +77,7 @@
#include <linux/netfilter_decnet.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
+#include <linux/export.h>
#include <asm/errno.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index f0efb0c..f65c9dd 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -23,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/rcupdate.h>
+#include <linux/export.h>
#include <net/neighbour.h>
#include <net/dst.h>
#include <net/flow.h>
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 3fb14b7..0dc1589 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -12,6 +12,7 @@
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <net/dsa.h>
#include "dsa_priv.h"
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 0a47b6c..56cf9b8 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -301,7 +301,6 @@ static const struct net_device_ops dsa_netdev_ops = {
.ndo_start_xmit = dsa_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
- .ndo_set_multicast_list = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
.ndo_do_ioctl = dsa_slave_ioctl,
};
@@ -314,7 +313,6 @@ static const struct net_device_ops edsa_netdev_ops = {
.ndo_start_xmit = edsa_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
- .ndo_set_multicast_list = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
.ndo_do_ioctl = dsa_slave_ioctl,
};
@@ -327,7 +325,6 @@ static const struct net_device_ops trailer_netdev_ops = {
.ndo_start_xmit = trailer_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
- .ndo_set_multicast_list = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
.ndo_do_ioctl = dsa_slave_ioctl,
};
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
new file mode 100644
index 0000000..19d6aef
--- /dev/null
+++ b/net/ieee802154/6lowpan.c
@@ -0,0 +1,891 @@
+/*
+ * Copyright 2011, Siemens AG
+ * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+/*
+ * Based on patches from Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* Jon's code is based on 6lowpan implementation for Contiki which is:
+ * Copyright (c) 2008, Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Institute nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define DEBUG
+
+#include <linux/bitops.h>
+#include <linux/if_arp.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <net/af_ieee802154.h>
+#include <net/ieee802154.h>
+#include <net/ieee802154_netdev.h>
+#include <net/ipv6.h>
+
+#include "6lowpan.h"
+
+/* TTL uncompression values */
+static const u8 lowpan_ttl_values[] = {0, 1, 64, 255};
+
+static LIST_HEAD(lowpan_devices);
+
+/*
+ * Uncompression of linklocal:
+ * 0 -> 16 bytes from packet
+ * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet
+ * 2 -> 2 bytes from prefix - zeroes + 2 from packet
+ * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr
+ *
+ * NOTE: => the uncompress function does change 0xf to 0x10
+ * NOTE: 0x00 => no-autoconfig => unspecified
+ */
+static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20};
+
+/*
+ * Uncompression of ctx-based:
+ * 0 -> 0 bits from packet [unspecified / reserved]
+ * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet
+ * 2 -> 8 bytes from prefix - zeroes + 2 from packet
+ * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr
+ */
+static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80};
+
+/*
+ * Uncompression of ctx-base
+ * 0 -> 0 bits from packet
+ * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet
+ * 2 -> 2 bytes from prefix - zeroes + 3 from packet
+ * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr
+ */
+static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21};
+
+/* Link local prefix */
+static const u8 lowpan_llprefix[] = {0xfe, 0x80};
+
+/* private device info */
+struct lowpan_dev_info {
+ struct net_device *real_dev; /* real WPAN device ptr */
+ struct mutex dev_list_mtx; /* mutex for list ops */
+};
+
+struct lowpan_dev_record {
+ struct net_device *ldev;
+ struct list_head list;
+};
+
+static inline struct
+lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
+{
+ return netdev_priv(dev);
+}
+
+static inline void lowpan_address_flip(u8 *src, u8 *dest)
+{
+ int i;
+ for (i = 0; i < IEEE802154_ADDR_LEN; i++)
+ (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
+}
+
+/* list of all 6lowpan devices, uses for package delivering */
+/* print data in line */
+static inline void lowpan_raw_dump_inline(const char *caller, char *msg,
+ unsigned char *buf, int len)
+{
+#ifdef DEBUG
+ if (msg)
+ pr_debug("(%s) %s: ", caller, msg);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE,
+ 16, 1, buf, len, false);
+#endif /* DEBUG */
+}
+
+/*
+ * print data in a table format:
+ *
+ * addr: xx xx xx xx xx xx
+ * addr: xx xx xx xx xx xx
+ * ...
+ */
+static inline void lowpan_raw_dump_table(const char *caller, char *msg,
+ unsigned char *buf, int len)
+{
+#ifdef DEBUG
+ if (msg)
+ pr_debug("(%s) %s:\n", caller, msg);
+ print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET,
+ 16, 1, buf, len, false);
+#endif /* DEBUG */
+}
+
+static u8
+lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
+ const unsigned char *lladdr)
+{
+ u8 val = 0;
+
+ if (is_addr_mac_addr_based(ipaddr, lladdr))
+ val = 3; /* 0-bits */
+ else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
+ /* compress IID to 16 bits xxxx::XXXX */
+ memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2);
+ *hc06_ptr += 2;
+ val = 2; /* 16-bits */
+ } else {
+ /* do not compress IID => xxxx::IID */
+ memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8);
+ *hc06_ptr += 8;
+ val = 1; /* 64-bits */
+ }
+
+ return rol8(val, shift);
+}
+
+static void
+lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
+{
+ memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ALEN);
+ /* second bit-flip (Universe/Local) is done according RFC2464 */
+ ipaddr->s6_addr[8] ^= 0x02;
+}
+
+/*
+ * Uncompress addresses based on a prefix and a postfix with zeroes in
+ * between. If the postfix is zero in length it will use the link address
+ * to configure the IP address (autoconf style).
+ * pref_post_count takes a byte where the first nibble specify prefix count
+ * and the second postfix count (NOTE: 15/0xf => 16 bytes copy).
+ */
+static int
+lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
+ u8 const *prefix, u8 pref_post_count, unsigned char *lladdr)
+{
+ u8 prefcount = pref_post_count >> 4;
+ u8 postcount = pref_post_count & 0x0f;
+
+ /* full nibble 15 => 16 */
+ prefcount = (prefcount == 15 ? 16 : prefcount);
+ postcount = (postcount == 15 ? 16 : postcount);
+
+ if (lladdr)
+ lowpan_raw_dump_inline(__func__, "linklocal address",
+ lladdr, IEEE802154_ALEN);
+ if (prefcount > 0)
+ memcpy(ipaddr, prefix, prefcount);
+
+ if (prefcount + postcount < 16)
+ memset(&ipaddr->s6_addr[prefcount], 0,
+ 16 - (prefcount + postcount));
+
+ if (postcount > 0) {
+ memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount);
+ skb_pull(skb, postcount);
+ } else if (prefcount > 0) {
+ if (lladdr == NULL)
+ return -EINVAL;
+
+ /* no IID based configuration if no prefix and no data */
+ lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
+ }
+
+ pr_debug("(%s): uncompressing %d + %d => ", __func__, prefcount,
+ postcount);
+ lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
+
+ return 0;
+}
+
+static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
+{
+ u8 ret;
+
+ ret = skb->data[0];
+ skb_pull(skb, 1);
+
+ return ret;
+}
+
+static int lowpan_header_create(struct sk_buff *skb,
+ struct net_device *dev,
+ unsigned short type, const void *_daddr,
+ const void *_saddr, unsigned len)
+{
+ u8 tmp, iphc0, iphc1, *hc06_ptr;
+ struct ipv6hdr *hdr;
+ const u8 *saddr = _saddr;
+ const u8 *daddr = _daddr;
+ u8 *head;
+ struct ieee802154_addr sa, da;
+
+ if (type != ETH_P_IPV6)
+ return 0;
+ /* TODO:
+ * if this package isn't ipv6 one, where should it be routed?
+ */
+ head = kzalloc(100, GFP_KERNEL);
+ if (head == NULL)
+ return -ENOMEM;
+
+ hdr = ipv6_hdr(skb);
+ hc06_ptr = head + 2;
+
+ pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
+ "\tnexthdr = 0x%02x\n\thop_lim = %d\n", __func__,
+ hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
+ hdr->hop_limit);
+
+ lowpan_raw_dump_table(__func__, "raw skb network header dump",
+ skb_network_header(skb), sizeof(struct ipv6hdr));
+
+ if (!saddr)
+ saddr = dev->dev_addr;
+
+ lowpan_raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
+
+ /*
+ * As we copy some bit-length fields, in the IPHC encoding bytes,
+ * we sometimes use |=
+ * If the field is 0, and the current bit value in memory is 1,
+ * this does not work. We therefore reset the IPHC encoding here
+ */
+ iphc0 = LOWPAN_DISPATCH_IPHC;
+ iphc1 = 0;
+
+ /* TODO: context lookup */
+
+ lowpan_raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
+
+ /*
+ * Traffic class, flow label
+ * If flow label is 0, compress it. If traffic class is 0, compress it
+ * We have to process both in the same time as the offset of traffic
+ * class depends on the presence of version and flow label
+ */
+
+ /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */
+ tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
+ tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
+
+ if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
+ (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
+ /* flow label can be compressed */
+ iphc0 |= LOWPAN_IPHC_FL_C;
+ if ((hdr->priority == 0) &&
+ ((hdr->flow_lbl[0] & 0xF0) == 0)) {
+ /* compress (elide) all */
+ iphc0 |= LOWPAN_IPHC_TC_C;
+ } else {
+ /* compress only the flow label */
+ *hc06_ptr = tmp;
+ hc06_ptr += 1;
+ }
+ } else {
+ /* Flow label cannot be compressed */
+ if ((hdr->priority == 0) &&
+ ((hdr->flow_lbl[0] & 0xF0) == 0)) {
+ /* compress only traffic class */
+ iphc0 |= LOWPAN_IPHC_TC_C;
+ *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
+ memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2);
+ hc06_ptr += 3;
+ } else {
+ /* compress nothing */
+ memcpy(hc06_ptr, &hdr, 4);
+ /* replace the top byte with new ECN | DSCP format */
+ *hc06_ptr = tmp;
+ hc06_ptr += 4;
+ }
+ }
+
+ /* NOTE: payload length is always compressed */
+
+ /* Next Header is compress if UDP */
+ if (hdr->nexthdr == UIP_PROTO_UDP)
+ iphc0 |= LOWPAN_IPHC_NH_C;
+
+/* TODO: next header compression */
+
+ if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
+ *hc06_ptr = hdr->nexthdr;
+ hc06_ptr += 1;
+ }
+
+ /*
+ * Hop limit
+ * if 1: compress, encoding is 01
+ * if 64: compress, encoding is 10
+ * if 255: compress, encoding is 11
+ * else do not compress
+ */
+ switch (hdr->hop_limit) {
+ case 1:
+ iphc0 |= LOWPAN_IPHC_TTL_1;
+ break;
+ case 64:
+ iphc0 |= LOWPAN_IPHC_TTL_64;
+ break;
+ case 255:
+ iphc0 |= LOWPAN_IPHC_TTL_255;
+ break;
+ default:
+ *hc06_ptr = hdr->hop_limit;
+ break;
+ }
+
+ /* source address compression */
+ if (is_addr_unspecified(&hdr->saddr)) {
+ pr_debug("(%s): source address is unspecified, setting SAC\n",
+ __func__);
+ iphc1 |= LOWPAN_IPHC_SAC;
+ /* TODO: context lookup */
+ } else if (is_addr_link_local(&hdr->saddr)) {
+ pr_debug("(%s): source address is link-local\n", __func__);
+ iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
+ LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr);
+ } else {
+ pr_debug("(%s): send the full source address\n", __func__);
+ memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
+ hc06_ptr += 16;
+ }
+
+ /* destination address compression */
+ if (is_addr_mcast(&hdr->daddr)) {
+ pr_debug("(%s): destination address is multicast", __func__);
+ iphc1 |= LOWPAN_IPHC_M;
+ if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
+ pr_debug("compressed to 1 octet\n");
+ iphc1 |= LOWPAN_IPHC_DAM_11;
+ /* use last byte */
+ *hc06_ptr = hdr->daddr.s6_addr[15];
+ hc06_ptr += 1;
+ } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
+ pr_debug("compressed to 4 octets\n");
+ iphc1 |= LOWPAN_IPHC_DAM_10;
+ /* second byte + the last three */
+ *hc06_ptr = hdr->daddr.s6_addr[1];
+ memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3);
+ hc06_ptr += 4;
+ } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
+ pr_debug("compressed to 6 octets\n");
+ iphc1 |= LOWPAN_IPHC_DAM_01;
+ /* second byte + the last five */
+ *hc06_ptr = hdr->daddr.s6_addr[1];
+ memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5);
+ hc06_ptr += 6;
+ } else {
+ pr_debug("using full address\n");
+ iphc1 |= LOWPAN_IPHC_DAM_00;
+ memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16);
+ hc06_ptr += 16;
+ }
+ } else {
+ pr_debug("(%s): destination address is unicast: ", __func__);
+ /* TODO: context lookup */
+ if (is_addr_link_local(&hdr->daddr)) {
+ pr_debug("destination address is link-local\n");
+ iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
+ LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr);
+ } else {
+ pr_debug("using full address\n");
+ memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
+ hc06_ptr += 16;
+ }
+ }
+
+ /* TODO: UDP header compression */
+ /* TODO: Next Header compression */
+
+ head[0] = iphc0;
+ head[1] = iphc1;
+
+ skb_pull(skb, sizeof(struct ipv6hdr));
+ memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
+
+ kfree(head);
+
+ lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
+ skb->len);
+
+ /*
+ * NOTE1: I'm still unsure about the fact that compression and WPAN
+ * header are created here and not later in the xmit. So wait for
+ * an opinion of net maintainers.
+ */
+ /*
+ * NOTE2: to be absolutely correct, we must derive PANid information
+ * from MAC subif of the 'dev' and 'real_dev' network devices, but
+ * this isn't implemented in mainline yet, so currently we assign 0xff
+ */
+ {
+ /* prepare wpan address data */
+ sa.addr_type = IEEE802154_ADDR_LONG;
+ sa.pan_id = 0xff;
+
+ da.addr_type = IEEE802154_ADDR_LONG;
+ da.pan_id = 0xff;
+
+ memcpy(&(da.hwaddr), daddr, 8);
+ memcpy(&(sa.hwaddr), saddr, 8);
+
+ mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
+ return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
+ type, (void *)&da, (void *)&sa, skb->len);
+ }
+}
+
+static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
+{
+ struct sk_buff *new;
+ struct lowpan_dev_record *entry;
+ int stat = NET_RX_SUCCESS;
+
+ new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
+ GFP_ATOMIC);
+ kfree_skb(skb);
+
+ if (!new)
+ return -ENOMEM;
+
+ skb_push(new, sizeof(struct ipv6hdr));
+ skb_reset_network_header(new);
+ skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr));
+
+ new->protocol = htons(ETH_P_IPV6);
+ new->pkt_type = PACKET_HOST;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &lowpan_devices, list)
+ if (lowpan_dev_info(entry->ldev)->real_dev == new->dev) {
+ skb = skb_copy(new, GFP_ATOMIC);
+ if (!skb) {
+ stat = -ENOMEM;
+ break;
+ }
+
+ skb->dev = entry->ldev;
+ stat = netif_rx(skb);
+ }
+ rcu_read_unlock();
+
+ kfree_skb(new);
+
+ return stat;
+}
+
+static int
+lowpan_process_data(struct sk_buff *skb)
+{
+ struct ipv6hdr hdr;
+ u8 tmp, iphc0, iphc1, num_context = 0;
+ u8 *_saddr, *_daddr;
+ int err;
+
+ lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
+ skb->len);
+ /* at least two bytes will be used for the encoding */
+ if (skb->len < 2)
+ goto drop;
+ iphc0 = lowpan_fetch_skb_u8(skb);
+ iphc1 = lowpan_fetch_skb_u8(skb);
+
+ _saddr = mac_cb(skb)->sa.hwaddr;
+ _daddr = mac_cb(skb)->da.hwaddr;
+
+ pr_debug("(%s): iphc0 = %02x, iphc1 = %02x\n", __func__, iphc0, iphc1);
+
+ /* another if the CID flag is set */
+ if (iphc1 & LOWPAN_IPHC_CID) {
+ pr_debug("(%s): CID flag is set, increase header with one\n",
+ __func__);
+ if (!skb->len)
+ goto drop;
+ num_context = lowpan_fetch_skb_u8(skb);
+ }
+
+ hdr.version = 6;
+
+ /* Traffic Class and Flow Label */
+ switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
+ /*
+ * Traffic Class and FLow Label carried in-line
+ * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
+ */
+ case 0: /* 00b */
+ if (!skb->len)
+ goto drop;
+ tmp = lowpan_fetch_skb_u8(skb);
+ memcpy(&hdr.flow_lbl, &skb->data[0], 3);
+ skb_pull(skb, 3);
+ hdr.priority = ((tmp >> 2) & 0x0f);
+ hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
+ (hdr.flow_lbl[0] & 0x0f);
+ break;
+ /*
+ * Traffic class carried in-line
+ * ECN + DSCP (1 byte), Flow Label is elided
+ */
+ case 1: /* 10b */
+ if (!skb->len)
+ goto drop;
+ tmp = lowpan_fetch_skb_u8(skb);
+ hdr.priority = ((tmp >> 2) & 0x0f);
+ hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
+ hdr.flow_lbl[1] = 0;
+ hdr.flow_lbl[2] = 0;
+ break;
+ /*
+ * Flow Label carried in-line
+ * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
+ */
+ case 2: /* 01b */
+ if (!skb->len)
+ goto drop;
+ tmp = lowpan_fetch_skb_u8(skb);
+ hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
+ memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
+ skb_pull(skb, 2);
+ break;
+ /* Traffic Class and Flow Label are elided */
+ case 3: /* 11b */
+ hdr.priority = 0;
+ hdr.flow_lbl[0] = 0;
+ hdr.flow_lbl[1] = 0;
+ hdr.flow_lbl[2] = 0;
+ break;
+ default:
+ break;
+ }
+
+ /* Next Header */
+ if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
+ /* Next header is carried inline */
+ if (!skb->len)
+ goto drop;
+ hdr.nexthdr = lowpan_fetch_skb_u8(skb);
+ pr_debug("(%s): NH flag is set, next header is carried "
+ "inline: %02x\n", __func__, hdr.nexthdr);
+ }
+
+ /* Hop Limit */
+ if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
+ hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
+ else {
+ if (!skb->len)
+ goto drop;
+ hdr.hop_limit = lowpan_fetch_skb_u8(skb);
+ }
+
+ /* Extract SAM to the tmp variable */
+ tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
+
+ /* Source address uncompression */
+ pr_debug("(%s): source address stateless compression\n", __func__);
+ err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
+ lowpan_unc_llconf[tmp], skb->data);
+ if (err)
+ goto drop;
+
+ /* Extract DAM to the tmp variable */
+ tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
+
+ /* check for Multicast Compression */
+ if (iphc1 & LOWPAN_IPHC_M) {
+ if (iphc1 & LOWPAN_IPHC_DAC) {
+ pr_debug("(%s): destination address context-based "
+ "multicast compression\n", __func__);
+ /* TODO: implement this */
+ } else {
+ u8 prefix[] = {0xff, 0x02};
+
+ pr_debug("(%s): destination address non-context-based"
+ " multicast compression\n", __func__);
+ if (0 < tmp && tmp < 3) {
+ if (!skb->len)
+ goto drop;
+ else
+ prefix[1] = lowpan_fetch_skb_u8(skb);
+ }
+
+ err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix,
+ lowpan_unc_mxconf[tmp], NULL);
+ if (err)
+ goto drop;
+ }
+ } else {
+ pr_debug("(%s): destination address stateless compression\n",
+ __func__);
+ err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
+ lowpan_unc_llconf[tmp], skb->data);
+ if (err)
+ goto drop;
+ }
+
+ /* TODO: UDP header parse */
+
+ /* Not fragmented package */
+ hdr.payload_len = htons(skb->len);
+
+ pr_debug("(%s): skb headroom size = %d, data length = %d\n", __func__,
+ skb_headroom(skb), skb->len);
+
+ pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
+ "nexthdr = 0x%02x\n\thop_lim = %d\n", __func__, hdr.version,
+ ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit);
+
+ lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
+ sizeof(hdr));
+ return lowpan_skb_deliver(skb, &hdr);
+drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static int lowpan_set_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *sa = p;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* TODO: validate addr */
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+
+ return 0;
+}
+
+static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int err = 0;
+
+ pr_debug("(%s): package xmit\n", __func__);
+
+ skb->dev = lowpan_dev_info(dev)->real_dev;
+ if (skb->dev == NULL) {
+ pr_debug("(%s) ERROR: no real wpan device found\n", __func__);
+ dev_kfree_skb(skb);
+ } else
+ err = dev_queue_xmit(skb);
+
+ return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
+}
+
+static void lowpan_dev_free(struct net_device *dev)
+{
+ dev_put(lowpan_dev_info(dev)->real_dev);
+ free_netdev(dev);
+}
+
+static struct header_ops lowpan_header_ops = {
+ .create = lowpan_header_create,
+};
+
+static const struct net_device_ops lowpan_netdev_ops = {
+ .ndo_start_xmit = lowpan_xmit,
+ .ndo_set_mac_address = lowpan_set_address,
+};
+
+static void lowpan_setup(struct net_device *dev)
+{
+ pr_debug("(%s)\n", __func__);
+
+ dev->addr_len = IEEE802154_ADDR_LEN;
+ memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
+ dev->type = ARPHRD_IEEE802154;
+ dev->features = NETIF_F_NO_CSUM;
+ /* Frame Control + Sequence Number + Address fields + Security Header */
+ dev->hard_header_len = 2 + 1 + 20 + 14;
+ dev->needed_tailroom = 2; /* FCS */
+ dev->mtu = 1281;
+ dev->tx_queue_len = 0;
+ dev->flags = IFF_NOARP | IFF_BROADCAST;
+ dev->watchdog_timeo = 0;
+
+ dev->netdev_ops = &lowpan_netdev_ops;
+ dev->header_ops = &lowpan_header_ops;
+ dev->destructor = lowpan_dev_free;
+}
+
+static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ pr_debug("(%s)\n", __func__);
+
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ if (!netif_running(dev))
+ goto drop;
+
+ if (dev->type != ARPHRD_IEEE802154)
+ goto drop;
+
+ /* check that it's our buffer */
+ if ((skb->data[0] & 0xe0) == 0x60)
+ lowpan_process_data(skb);
+
+ return NET_RX_SUCCESS;
+
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static int lowpan_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct net_device *real_dev;
+ struct lowpan_dev_record *entry;
+
+ pr_debug("(%s)\n", __func__);
+
+ if (!tb[IFLA_LINK])
+ return -EINVAL;
+ /* find and hold real wpan device */
+ real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ if (!real_dev)
+ return -ENODEV;
+
+ lowpan_dev_info(dev)->real_dev = real_dev;
+ mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
+
+ entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
+ if (!entry) {
+ dev_put(real_dev);
+ lowpan_dev_info(dev)->real_dev = NULL;
+ return -ENOMEM;
+ }
+
+ entry->ldev = dev;
+
+ mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
+ INIT_LIST_HEAD(&entry->list);
+ list_add_tail(&entry->list, &lowpan_devices);
+ mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+
+ register_netdevice(dev);
+
+ return 0;
+}
+
+static void lowpan_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
+ struct net_device *real_dev = lowpan_dev->real_dev;
+ struct lowpan_dev_record *entry;
+ struct lowpan_dev_record *tmp;
+
+ ASSERT_RTNL();
+
+ mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
+ list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
+ if (entry->ldev == dev) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+ mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+
+ mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
+
+ unregister_netdevice_queue(dev, head);
+
+ dev_put(real_dev);
+}
+
+static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
+ .kind = "lowpan",
+ .priv_size = sizeof(struct lowpan_dev_info),
+ .setup = lowpan_setup,
+ .newlink = lowpan_newlink,
+ .dellink = lowpan_dellink,
+ .validate = lowpan_validate,
+};
+
+static inline int __init lowpan_netlink_init(void)
+{
+ return rtnl_link_register(&lowpan_link_ops);
+}
+
+static inline void __init lowpan_netlink_fini(void)
+{
+ rtnl_link_unregister(&lowpan_link_ops);
+}
+
+static struct packet_type lowpan_packet_type = {
+ .type = __constant_htons(ETH_P_IEEE802154),
+ .func = lowpan_rcv,
+};
+
+static int __init lowpan_init_module(void)
+{
+ int err = 0;
+
+ pr_debug("(%s)\n", __func__);
+
+ err = lowpan_netlink_init();
+ if (err < 0)
+ goto out;
+
+ dev_add_pack(&lowpan_packet_type);
+out:
+ return err;
+}
+
+static void __exit lowpan_cleanup_module(void)
+{
+ pr_debug("(%s)\n", __func__);
+
+ lowpan_netlink_fini();
+
+ dev_remove_pack(&lowpan_packet_type);
+}
+
+module_init(lowpan_init_module);
+module_exit(lowpan_cleanup_module);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("lowpan");
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
new file mode 100644
index 0000000..5d8cf80
--- /dev/null
+++ b/net/ieee802154/6lowpan.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2011, Siemens AG
+ * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+/*
+ * Based on patches from Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* Jon's code is based on 6lowpan implementation for Contiki which is:
+ * Copyright (c) 2008, Swedish Institute of Computer Science.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Institute nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __6LOWPAN_H__
+#define __6LOWPAN_H__
+
+/* need to know address length to manipulate with it */
+#define IEEE802154_ALEN 8
+
+#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */
+#define UIP_IPH_LEN 40 /* ipv6 fixed header size */
+#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */
+#define UIP_FRAGH_LEN 8 /* ipv6 fragment header size */
+
+/*
+ * ipv6 address based on mac
+ * second bit-flip (Universe/Local) is done according RFC2464
+ */
+#define is_addr_mac_addr_based(a, m) \
+ ((((a)->s6_addr[8]) == (((m)[0]) ^ 0x02)) && \
+ (((a)->s6_addr[9]) == (m)[1]) && \
+ (((a)->s6_addr[10]) == (m)[2]) && \
+ (((a)->s6_addr[11]) == (m)[3]) && \
+ (((a)->s6_addr[12]) == (m)[4]) && \
+ (((a)->s6_addr[13]) == (m)[5]) && \
+ (((a)->s6_addr[14]) == (m)[6]) && \
+ (((a)->s6_addr[15]) == (m)[7]))
+
+/* ipv6 address is unspecified */
+#define is_addr_unspecified(a) \
+ ((((a)->s6_addr32[0]) == 0) && \
+ (((a)->s6_addr32[1]) == 0) && \
+ (((a)->s6_addr32[2]) == 0) && \
+ (((a)->s6_addr32[3]) == 0))
+
+/* compare ipv6 addresses prefixes */
+#define ipaddr_prefixcmp(addr1, addr2, length) \
+ (memcmp(addr1, addr2, length >> 3) == 0)
+
+/* local link, i.e. FE80::/10 */
+#define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE)
+
+/*
+ * check whether we can compress the IID to 16 bits,
+ * it's possible for unicast adresses with first 49 bits are zero only.
+ */
+#define lowpan_is_iid_16_bit_compressable(a) \
+ ((((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr16[5]) == 0) && \
+ (((a)->s6_addr16[6]) == 0) && \
+ ((((a)->s6_addr[14]) & 0x80) == 0))
+
+/* multicast address */
+#define is_addr_mcast(a) (((a)->s6_addr[0]) == 0xFF)
+
+/* check whether the 112-bit gid of the multicast address is mappable to: */
+
+/* 9 bits, for FF02::1 (all nodes) and FF02::2 (all routers) addresses only. */
+#define lowpan_is_mcast_addr_compressable(a) \
+ ((((a)->s6_addr16[1]) == 0) && \
+ (((a)->s6_addr16[2]) == 0) && \
+ (((a)->s6_addr16[3]) == 0) && \
+ (((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr16[5]) == 0) && \
+ (((a)->s6_addr16[6]) == 0) && \
+ (((a)->s6_addr[14]) == 0) && \
+ ((((a)->s6_addr[15]) == 1) || (((a)->s6_addr[15]) == 2)))
+
+/* 48 bits, FFXX::00XX:XXXX:XXXX */
+#define lowpan_is_mcast_addr_compressable48(a) \
+ ((((a)->s6_addr16[1]) == 0) && \
+ (((a)->s6_addr16[2]) == 0) && \
+ (((a)->s6_addr16[3]) == 0) && \
+ (((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr[10]) == 0))
+
+/* 32 bits, FFXX::00XX:XXXX */
+#define lowpan_is_mcast_addr_compressable32(a) \
+ ((((a)->s6_addr16[1]) == 0) && \
+ (((a)->s6_addr16[2]) == 0) && \
+ (((a)->s6_addr16[3]) == 0) && \
+ (((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr16[5]) == 0) && \
+ (((a)->s6_addr[12]) == 0))
+
+/* 8 bits, FF02::00XX */
+#define lowpan_is_mcast_addr_compressable8(a) \
+ ((((a)->s6_addr[1]) == 2) && \
+ (((a)->s6_addr16[1]) == 0) && \
+ (((a)->s6_addr16[2]) == 0) && \
+ (((a)->s6_addr16[3]) == 0) && \
+ (((a)->s6_addr16[4]) == 0) && \
+ (((a)->s6_addr16[5]) == 0) && \
+ (((a)->s6_addr16[6]) == 0) && \
+ (((a)->s6_addr[14]) == 0))
+
+#define lowpan_is_addr_broadcast(a) \
+ ((((a)[0]) == 0xFF) && \
+ (((a)[1]) == 0xFF) && \
+ (((a)[2]) == 0xFF) && \
+ (((a)[3]) == 0xFF) && \
+ (((a)[4]) == 0xFF) && \
+ (((a)[5]) == 0xFF) && \
+ (((a)[6]) == 0xFF) && \
+ (((a)[7]) == 0xFF))
+
+#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */
+#define LOWPAN_DISPATCH_HC1 0x42 /* 01000010 = 66 */
+#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */
+#define LOWPAN_DISPATCH_FRAG1 0xc0 /* 11000xxx */
+#define LOWPAN_DISPATCH_FRAGN 0xe0 /* 11100xxx */
+
+/*
+ * Values of fields within the IPHC encoding first byte
+ * (C stands for compressed and I for inline)
+ */
+#define LOWPAN_IPHC_TF 0x18
+
+#define LOWPAN_IPHC_FL_C 0x10
+#define LOWPAN_IPHC_TC_C 0x08
+#define LOWPAN_IPHC_NH_C 0x04
+#define LOWPAN_IPHC_TTL_1 0x01
+#define LOWPAN_IPHC_TTL_64 0x02
+#define LOWPAN_IPHC_TTL_255 0x03
+#define LOWPAN_IPHC_TTL_I 0x00
+
+
+/* Values of fields within the IPHC encoding second byte */
+#define LOWPAN_IPHC_CID 0x80
+
+#define LOWPAN_IPHC_SAC 0x40
+#define LOWPAN_IPHC_SAM_00 0x00
+#define LOWPAN_IPHC_SAM_01 0x10
+#define LOWPAN_IPHC_SAM_10 0x20
+#define LOWPAN_IPHC_SAM 0x30
+
+#define LOWPAN_IPHC_SAM_BIT 4
+
+#define LOWPAN_IPHC_M 0x08
+#define LOWPAN_IPHC_DAC 0x04
+#define LOWPAN_IPHC_DAM_00 0x00
+#define LOWPAN_IPHC_DAM_01 0x01
+#define LOWPAN_IPHC_DAM_10 0x02
+#define LOWPAN_IPHC_DAM_11 0x03
+
+#define LOWPAN_IPHC_DAM_BIT 0
+/*
+ * LOWPAN_UDP encoding (works together with IPHC)
+ */
+#define LOWPAN_NHC_UDP_MASK 0xF8
+#define LOWPAN_NHC_UDP_ID 0xF0
+#define LOWPAN_NHC_UDP_CHECKSUMC 0x04
+#define LOWPAN_NHC_UDP_CHECKSUMI 0x00
+
+/* values for port compression, _with checksum_ ie bit 5 set to 0 */
+#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */
+#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline,
+ dest = 0xF0 + 8 bit inline */
+#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline,
+ dest = 16 bit inline */
+#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
+
+#endif /* __6LOWPAN_H__ */
diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig
index 1c1de97..7dee650 100644
--- a/net/ieee802154/Kconfig
+++ b/net/ieee802154/Kconfig
@@ -10,3 +10,9 @@ config IEEE802154
Say Y here to compile LR-WPAN support into the kernel or say M to
compile it as modules.
+
+config IEEE802154_6LOWPAN
+ tristate "6lowpan support over IEEE 802.15.4"
+ depends on IEEE802154 && IPV6
+ ---help---
+ IPv6 compression over IEEE 802.15.4.
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index 5761185..d7716d6 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,3 +1,5 @@
-obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
-ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
-af_802154-y := af_ieee802154.o raw.o dgram.o
+obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
+obj-$(CONFIG_IEEE802154_6LOWPAN) += 6lowpan.o
+
+ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
+af_802154-y := af_ieee802154.o raw.o dgram.o
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index 71ee110..adaf462 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -30,6 +30,7 @@
#include <net/genetlink.h>
#include <net/sock.h>
#include <linux/nl802154.h>
+#include <linux/export.h>
#include <net/af_ieee802154.h>
#include <net/nl802154.h>
#include <net/ieee802154.h>
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index dd2b947..1b5096a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -893,7 +893,7 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
EXPORT_SYMBOL(inet_ioctl);
#ifdef CONFIG_COMPAT
-int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
int err = -ENOIOCTLCMD;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 2c2a98e..86f3b88 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -476,7 +476,7 @@ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def,
doi = doi_def->doi;
doi_type = doi_def->type;
- if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN)
+ if (doi_def->doi == CIPSO_V4_DOI_UNKNOWN)
goto doi_add_return;
for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) {
switch (doi_def->tags[iter]) {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index bc19bd0..c6b5092 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -258,7 +258,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
ip_mc_up(in_dev);
/* we can receive as soon as ip_ptr is set -- do this last */
- rcu_assign_pointer(dev->ip_ptr, in_dev);
+ RCU_INIT_POINTER(dev->ip_ptr, in_dev);
out:
return in_dev;
out_kfree:
@@ -291,7 +291,7 @@ static void inetdev_destroy(struct in_device *in_dev)
inet_free_ifa(ifa);
}
- rcu_assign_pointer(dev->ip_ptr, NULL);
+ RCU_INIT_POINTER(dev->ip_ptr, NULL);
devinet_sysctl_unregister(in_dev);
neigh_parms_release(&arp_tbl, in_dev->arp_parms);
@@ -1175,7 +1175,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
switch (event) {
case NETDEV_REGISTER:
printk(KERN_DEBUG "inetdev_event: bug\n");
- rcu_assign_pointer(dev->ip_ptr, NULL);
+ RCU_INIT_POINTER(dev->ip_ptr, NULL);
break;
case NETDEV_UP:
if (!inetdev_valid_mtu(dev->mtu))
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index a53bb1b..46339ba 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/rcupdate.h>
+#include <linux/export.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/tcp.h>
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index de9e297..37b6711 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -73,6 +73,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
+#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/protocol.h>
@@ -204,7 +205,7 @@ static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
return (struct tnode *)(parent & ~NODE_TYPE_MASK);
}
-/* Same as rcu_assign_pointer
+/* Same as RCU_INIT_POINTER
* but that macro() assumes that value is a pointer.
*/
static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
@@ -528,7 +529,7 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *
if (n)
node_set_parent(n, tn);
- rcu_assign_pointer(tn->child[i], n);
+ RCU_INIT_POINTER(tn->child[i], n);
}
#define MAX_WORK 10
@@ -1014,7 +1015,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
tp = node_parent((struct rt_trie_node *) tn);
if (!tp)
- rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
+ RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
tnode_free_flush();
if (!tp)
@@ -1026,7 +1027,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
if (IS_TNODE(tn))
tn = (struct tnode *)resize(t, (struct tnode *)tn);
- rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
+ RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
tnode_free_flush();
}
@@ -1163,7 +1164,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
put_child(t, (struct tnode *)tp, cindex,
(struct rt_trie_node *)tn);
} else {
- rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
+ RCU_INIT_POINTER(t->trie, (struct rt_trie_node *)tn);
tp = tn;
}
}
@@ -1621,7 +1622,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l)
put_child(t, (struct tnode *)tp, cindex, NULL);
trie_rebalance(t, tp);
} else
- rcu_assign_pointer(t->trie, NULL);
+ RCU_INIT_POINTER(t->trie, NULL);
free_leaf(l);
}
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index dbfc21d..8cb1ebb 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -34,7 +34,7 @@ int gre_add_protocol(const struct gre_protocol *proto, u8 version)
if (gre_proto[version])
goto err_out_unlock;
- rcu_assign_pointer(gre_proto[version], proto);
+ RCU_INIT_POINTER(gre_proto[version], proto);
spin_unlock(&gre_proto_lock);
return 0;
@@ -54,7 +54,7 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
if (rcu_dereference_protected(gre_proto[version],
lockdep_is_held(&gre_proto_lock)) != proto)
goto err_out_unlock;
- rcu_assign_pointer(gre_proto[version], NULL);
+ RCU_INIT_POINTER(gre_proto[version], NULL);
spin_unlock(&gre_proto_lock);
synchronize_rcu();
return 0;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 23ef31b..ab188ae 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1152,10 +1152,9 @@ static int __net_init icmp_sk_init(struct net *net)
net->ipv4.icmp_sk[i] = sk;
/* Enough space for 2 64K ICMP packets, including
- * sk_buff struct overhead.
+ * sk_buff/skb_shared_info struct overhead.
*/
- sk->sk_sndbuf =
- (2 * ((64 * 1024) + sizeof(struct sk_buff)));
+ sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
/*
* Speedup sock_wfree()
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d577199..c7472ef 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1009,7 +1009,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
/* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG.
We will get multicast token leakage, when IFF_MULTICAST
- is changed. This check should be done in dev->set_multicast_list
+ is changed. This check should be done in ndo_set_rx_mode
routine. Something sort of:
if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; }
--ANK
@@ -1242,7 +1242,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
im->next_rcu = in_dev->mc_list;
in_dev->mc_count++;
- rcu_assign_pointer(in_dev->mc_list, im);
+ RCU_INIT_POINTER(in_dev->mc_list, im);
#ifdef CONFIG_IP_MULTICAST
igmpv3_del_delrec(in_dev, im->multiaddr);
@@ -1813,7 +1813,7 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
iml->next_rcu = inet->mc_list;
iml->sflist = NULL;
iml->sfmode = MCAST_EXCLUDE;
- rcu_assign_pointer(inet->mc_list, iml);
+ RCU_INIT_POINTER(inet->mc_list, iml);
ip_mc_inc_group(in_dev, addr);
err = 0;
done:
@@ -1835,7 +1835,7 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
}
err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
iml->sfmode, psf->sl_count, psf->sl_addr, 0);
- rcu_assign_pointer(iml->sflist, NULL);
+ RCU_INIT_POINTER(iml->sflist, NULL);
/* decrease mem now to avoid the memleak warning */
atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
kfree_rcu(psf, rcu);
@@ -2000,7 +2000,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
kfree_rcu(psl, rcu);
}
- rcu_assign_pointer(pmc->sflist, newpsl);
+ RCU_INIT_POINTER(pmc->sflist, newpsl);
psl = newpsl;
}
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
@@ -2103,7 +2103,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
} else
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
0, NULL, 0);
- rcu_assign_pointer(pmc->sflist, newpsl);
+ RCU_INIT_POINTER(pmc->sflist, newpsl);
pmc->sfmode = msf->imsf_fmode;
err = 0;
done:
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 389a2e6..f5e2bda 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -108,6 +108,9 @@ static int inet_csk_diag_fill(struct sock *sk,
icsk->icsk_ca_ops->name);
}
+ if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6))
+ RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
+
r->idiag_family = sk->sk_family;
r->idiag_state = sk->sk_state;
r->idiag_timer = 0;
@@ -130,6 +133,8 @@ static int inet_csk_diag_fill(struct sock *sk,
&np->rcv_saddr);
ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
&np->daddr);
+ if (ext & (1 << (INET_DIAG_TOS - 1)))
+ RTA_PUT_U8(skb, INET_DIAG_TOS, np->tclass);
}
#endif
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index ef7ae60..cc280a3 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -244,11 +244,11 @@ static void lro_add_frags(struct net_lro_desc *lro_desc,
skb->truesize += truesize;
skb_frags[0].page_offset += hlen;
- skb_frags[0].size -= hlen;
+ skb_frag_size_sub(&skb_frags[0], hlen);
while (tcp_data_len > 0) {
*(lro_desc->next_frag) = *skb_frags;
- tcp_data_len -= skb_frags->size;
+ tcp_data_len -= skb_frag_size(skb_frags);
lro_desc->next_frag++;
skb_frags++;
skb_shinfo(skb)->nr_frags++;
@@ -400,14 +400,14 @@ static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
skb_frags = skb_shinfo(skb)->frags;
while (data_len > 0) {
*skb_frags = *frags;
- data_len -= frags->size;
+ data_len -= skb_frag_size(frags);
skb_frags++;
frags++;
skb_shinfo(skb)->nr_frags++;
}
skb_shinfo(skb)->frags[0].page_offset += hdr_len;
- skb_shinfo(skb)->frags[0].size -= hdr_len;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hdr_len);
skb->ip_summed = ip_summed;
skb->csum = sum;
@@ -433,7 +433,7 @@ static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
if (!lro_mgr->get_frag_header ||
lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
(void *)&tcph, &flags, priv)) {
- mac_hdr = page_address(frags->page) + frags->page_offset;
+ mac_hdr = skb_frag_address(frags);
goto out1;
}
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 3c8dfa1..89168c6 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/kmemcheck.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <net/inet_hashtables.h>
#include <net/inet_timewait_sock.h>
#include <net/ip.h>
@@ -183,6 +184,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat
tw->tw_daddr = inet->inet_daddr;
tw->tw_rcv_saddr = inet->inet_rcv_saddr;
tw->tw_bound_dev_if = sk->sk_bound_dev_if;
+ tw->tw_tos = inet->tos;
tw->tw_num = inet->inet_num;
tw->tw_state = TCP_TIME_WAIT;
tw->tw_substate = state;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 0e0ab98..fdaabf2 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -599,8 +599,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
head->next = clone;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
- for (i=0; i<skb_shinfo(head)->nr_frags; i++)
- plen += skb_shinfo(head)->frags[i].size;
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = clone->data_len = head->data_len - plen;
head->data_len -= clone->len;
head->len -= clone->len;
@@ -682,6 +682,42 @@ int ip_defrag(struct sk_buff *skb, u32 user)
}
EXPORT_SYMBOL(ip_defrag);
+struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+{
+ const struct iphdr *iph;
+ u32 len;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return skb;
+
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return skb;
+
+ iph = ip_hdr(skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ return skb;
+ if (!pskb_may_pull(skb, iph->ihl*4))
+ return skb;
+ iph = ip_hdr(skb);
+ len = ntohs(iph->tot_len);
+ if (skb->len < len || len < (iph->ihl * 4))
+ return skb;
+
+ if (ip_is_fragment(ip_hdr(skb))) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (skb) {
+ if (pskb_trim_rcsum(skb, len))
+ return skb;
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ if (ip_defrag(skb, user))
+ return NULL;
+ skb->rxhash = 0;
+ }
+ }
+ return skb;
+}
+EXPORT_SYMBOL(ip_check_defrag);
+
#ifdef CONFIG_SYSCTL
static int zero;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d7bb94c..d55110e 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -835,8 +835,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
(skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
- if (max_headroom > dev->needed_headroom)
- dev->needed_headroom = max_headroom;
if (!new_skb) {
ip_rt_put(rt);
dev->stats.tx_dropped++;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 8c65633..0bc95f3 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -989,13 +989,13 @@ alloc_new_skb:
if (page && (left = PAGE_SIZE - off) > 0) {
if (copy >= left)
copy = left;
- if (page != frag->page) {
+ if (page != skb_frag_page(frag)) {
if (i == MAX_SKB_FRAGS) {
err = -EMSGSIZE;
goto error;
}
- get_page(page);
skb_fill_page_desc(skb, i, page, off, 0);
+ skb_frag_ref(skb, i);
frag = &skb_shinfo(skb)->frags[i];
}
} else if (i < MAX_SKB_FRAGS) {
@@ -1015,12 +1015,13 @@ alloc_new_skb:
err = -EMSGSIZE;
goto error;
}
- if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
+ if (getfrag(from, skb_frag_address(frag)+skb_frag_size(frag),
+ offset, copy, skb->len, skb) < 0) {
err = -EFAULT;
goto error;
}
cork->off += copy;
- frag->size += copy;
+ skb_frag_size_add(frag, copy);
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
@@ -1229,7 +1230,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
if (len > size)
len = size;
if (skb_can_coalesce(skb, i, page, offset)) {
- skb_shinfo(skb)->frags[i-1].size += len;
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len);
} else if (i < MAX_SKB_FRAGS) {
get_page(page);
skb_fill_page_desc(skb, i, page, offset, len);
@@ -1465,7 +1466,7 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
* structure to pass arguments.
*/
void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
- struct ip_reply_arg *arg, unsigned int len)
+ const struct ip_reply_arg *arg, unsigned int len)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_options_data replyopts;
@@ -1488,7 +1489,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
}
flowi4_init_output(&fl4, arg->bound_dev_if, 0,
- RT_TOS(ip_hdr(skb)->tos),
+ RT_TOS(arg->tos),
RT_SCOPE_UNIVERSE, sk->sk_protocol,
ip_reply_arg_flowi_flags(arg),
daddr, rt->rt_spec_dst,
@@ -1505,7 +1506,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
with locally disabled BH and that sk cannot be already spinlocked.
*/
bh_lock_sock(sk);
- inet->tos = ip_hdr(skb)->tos;
+ inet->tos = arg->tos;
sk->sk_priority = skb->priority;
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 8905e92..09ff51b 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -33,6 +33,7 @@
#include <linux/netfilter.h>
#include <linux/route.h>
#include <linux/mroute.h>
+#include <net/inet_ecn.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <net/compat.h>
@@ -578,8 +579,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
break;
case IP_TOS: /* This sets both TOS and Precedence */
if (sk->sk_type == SOCK_STREAM) {
- val &= ~3;
- val |= inet->tos & 3;
+ val &= ~INET_ECN_MASK;
+ val |= inet->tos & INET_ECN_MASK;
}
if (inet->tos != val) {
inet->tos = val;
@@ -961,7 +962,7 @@ mc_msf_out:
break;
case IP_TRANSPARENT:
- if (!capable(CAP_NET_ADMIN)) {
+ if (!!val && !capable(CAP_NET_RAW) && !capable(CAP_NET_ADMIN)) {
err = -EPERM;
break;
}
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 472a8c4..0da2afc 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -54,6 +54,7 @@
#include <linux/delay.h>
#include <linux/nfs_fs.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/arp.h>
#include <net/ip.h>
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 378b20b..065effd 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -231,7 +231,7 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
(iter = rtnl_dereference(*tp)) != NULL;
tp = &iter->next) {
if (t == iter) {
- rcu_assign_pointer(*tp, t->next);
+ RCU_INIT_POINTER(*tp, t->next);
break;
}
}
@@ -241,8 +241,8 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
{
struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
- rcu_assign_pointer(t->next, rtnl_dereference(*tp));
- rcu_assign_pointer(*tp, t);
+ RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
+ RCU_INIT_POINTER(*tp, t);
}
static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
@@ -301,7 +301,7 @@ static void ipip_tunnel_uninit(struct net_device *dev)
struct ipip_net *ipn = net_generic(net, ipip_net_id);
if (dev == ipn->fb_tunnel_dev)
- rcu_assign_pointer(ipn->tunnels_wc[0], NULL);
+ RCU_INIT_POINTER(ipn->tunnels_wc[0], NULL);
else
ipip_tunnel_unlink(ipn, netdev_priv(dev));
dev_put(dev);
@@ -791,7 +791,7 @@ static int __net_init ipip_fb_tunnel_init(struct net_device *dev)
return -ENOMEM;
dev_hold(dev);
- rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
+ RCU_INIT_POINTER(ipn->tunnels_wc[0], tunnel);
return 0;
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 58e8791..76a7f07 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -61,6 +61,7 @@
#include <linux/if_arp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/compat.h>
+#include <linux/export.h>
#include <net/ipip.h>
#include <net/checksum.h>
#include <net/netlink.h>
@@ -1176,7 +1177,7 @@ static void mrtsock_destruct(struct sock *sk)
ipmr_for_each_table(mrt, net) {
if (sk == rtnl_dereference(mrt->mroute_sk)) {
IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
- rcu_assign_pointer(mrt->mroute_sk, NULL);
+ RCU_INIT_POINTER(mrt->mroute_sk, NULL);
mroute_clean_tables(mrt);
}
}
@@ -1203,7 +1204,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -ENOENT;
if (optname != MRT_INIT) {
- if (sk != rcu_dereference_raw(mrt->mroute_sk) &&
+ if (sk != rcu_access_pointer(mrt->mroute_sk) &&
!capable(CAP_NET_ADMIN))
return -EACCES;
}
@@ -1224,13 +1225,13 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
ret = ip_ra_control(sk, 1, mrtsock_destruct);
if (ret == 0) {
- rcu_assign_pointer(mrt->mroute_sk, sk);
+ RCU_INIT_POINTER(mrt->mroute_sk, sk);
IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
}
rtnl_unlock();
return ret;
case MRT_DONE:
- if (sk != rcu_dereference_raw(mrt->mroute_sk))
+ if (sk != rcu_access_pointer(mrt->mroute_sk))
return -EACCES;
return ip_ra_control(sk, 0, NULL);
case MRT_ADD_VIF:
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 929b27b..9899619 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -5,6 +5,7 @@
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/gfp.h>
+#include <linux/export.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <net/ip.h>
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index db8d22d..a639967 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -395,7 +395,6 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
config = clusterip_config_init(cipinfo,
e->ip.dst.s_addr, dev);
if (!config) {
- pr_info("cannot allocate config\n");
dev_put(dev);
return -ENOMEM;
}
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 446e0f4..b550815 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -135,10 +135,8 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
* due to slab allocator restrictions */
n = max(size, nlbufsiz);
- skb = alloc_skb(n, GFP_ATOMIC);
+ skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN);
if (!skb) {
- pr_debug("cannot alloc whole buffer %ub!\n", n);
-
if (n > size) {
/* try to allocate only as much as we need for
* current packet */
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 5585980..9682b36 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -21,6 +21,7 @@
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <linux/rculist_nulls.h>
+#include <linux/export.h>
struct ct_iter_state {
struct seq_net_private p;
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 703f366f..7b22382 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -70,14 +70,14 @@ static unsigned int help(struct sk_buff *skb,
static void __exit nf_nat_amanda_fini(void)
{
- rcu_assign_pointer(nf_nat_amanda_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_amanda_hook, NULL);
synchronize_rcu();
}
static int __init nf_nat_amanda_init(void)
{
BUG_ON(nf_nat_amanda_hook != NULL);
- rcu_assign_pointer(nf_nat_amanda_hook, help);
+ RCU_INIT_POINTER(nf_nat_amanda_hook, help);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 3346de5..447bc5c 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -514,7 +514,7 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
ret = -EBUSY;
goto out;
}
- rcu_assign_pointer(nf_nat_protos[proto->protonum], proto);
+ RCU_INIT_POINTER(nf_nat_protos[proto->protonum], proto);
out:
spin_unlock_bh(&nf_nat_lock);
return ret;
@@ -525,7 +525,7 @@ EXPORT_SYMBOL(nf_nat_protocol_register);
void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
{
spin_lock_bh(&nf_nat_lock);
- rcu_assign_pointer(nf_nat_protos[proto->protonum],
+ RCU_INIT_POINTER(nf_nat_protos[proto->protonum],
&nf_nat_unknown_protocol);
spin_unlock_bh(&nf_nat_lock);
synchronize_rcu();
@@ -736,10 +736,10 @@ static int __init nf_nat_init(void)
/* Sew in builtin protocols. */
spin_lock_bh(&nf_nat_lock);
for (i = 0; i < MAX_IP_NAT_PROTO; i++)
- rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol);
- rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
- rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
- rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
+ RCU_INIT_POINTER(nf_nat_protos[i], &nf_nat_unknown_protocol);
+ RCU_INIT_POINTER(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
+ RCU_INIT_POINTER(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
+ RCU_INIT_POINTER(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
spin_unlock_bh(&nf_nat_lock);
/* Initialize fake conntrack so that NAT will skip it */
@@ -748,12 +748,12 @@ static int __init nf_nat_init(void)
l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
BUG_ON(nf_nat_seq_adjust_hook != NULL);
- rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
+ RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
- rcu_assign_pointer(nfnetlink_parse_nat_setup_hook,
+ RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
nfnetlink_parse_nat_setup);
BUG_ON(nf_ct_nat_offset != NULL);
- rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset);
+ RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
return 0;
cleanup_extend:
@@ -766,9 +766,9 @@ static void __exit nf_nat_cleanup(void)
unregister_pernet_subsys(&nf_nat_net_ops);
nf_ct_l3proto_put(l3proto);
nf_ct_extend_unregister(&nat_extend);
- rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
- rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL);
- rcu_assign_pointer(nf_ct_nat_offset, NULL);
+ RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
+ RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
+ RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
synchronize_net();
}
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c
index dc73abb..e462a95 100644
--- a/net/ipv4/netfilter/nf_nat_ftp.c
+++ b/net/ipv4/netfilter/nf_nat_ftp.c
@@ -113,14 +113,14 @@ out:
static void __exit nf_nat_ftp_fini(void)
{
- rcu_assign_pointer(nf_nat_ftp_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_ftp_hook, NULL);
synchronize_rcu();
}
static int __init nf_nat_ftp_init(void)
{
BUG_ON(nf_nat_ftp_hook != NULL);
- rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp);
+ RCU_INIT_POINTER(nf_nat_ftp_hook, nf_nat_ftp);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 790f316..b9a1136 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -581,30 +581,30 @@ static int __init init(void)
BUG_ON(nat_callforwarding_hook != NULL);
BUG_ON(nat_q931_hook != NULL);
- rcu_assign_pointer(set_h245_addr_hook, set_h245_addr);
- rcu_assign_pointer(set_h225_addr_hook, set_h225_addr);
- rcu_assign_pointer(set_sig_addr_hook, set_sig_addr);
- rcu_assign_pointer(set_ras_addr_hook, set_ras_addr);
- rcu_assign_pointer(nat_rtp_rtcp_hook, nat_rtp_rtcp);
- rcu_assign_pointer(nat_t120_hook, nat_t120);
- rcu_assign_pointer(nat_h245_hook, nat_h245);
- rcu_assign_pointer(nat_callforwarding_hook, nat_callforwarding);
- rcu_assign_pointer(nat_q931_hook, nat_q931);
+ RCU_INIT_POINTER(set_h245_addr_hook, set_h245_addr);
+ RCU_INIT_POINTER(set_h225_addr_hook, set_h225_addr);
+ RCU_INIT_POINTER(set_sig_addr_hook, set_sig_addr);
+ RCU_INIT_POINTER(set_ras_addr_hook, set_ras_addr);
+ RCU_INIT_POINTER(nat_rtp_rtcp_hook, nat_rtp_rtcp);
+ RCU_INIT_POINTER(nat_t120_hook, nat_t120);
+ RCU_INIT_POINTER(nat_h245_hook, nat_h245);
+ RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding);
+ RCU_INIT_POINTER(nat_q931_hook, nat_q931);
return 0;
}
/****************************************************************************/
static void __exit fini(void)
{
- rcu_assign_pointer(set_h245_addr_hook, NULL);
- rcu_assign_pointer(set_h225_addr_hook, NULL);
- rcu_assign_pointer(set_sig_addr_hook, NULL);
- rcu_assign_pointer(set_ras_addr_hook, NULL);
- rcu_assign_pointer(nat_rtp_rtcp_hook, NULL);
- rcu_assign_pointer(nat_t120_hook, NULL);
- rcu_assign_pointer(nat_h245_hook, NULL);
- rcu_assign_pointer(nat_callforwarding_hook, NULL);
- rcu_assign_pointer(nat_q931_hook, NULL);
+ RCU_INIT_POINTER(set_h245_addr_hook, NULL);
+ RCU_INIT_POINTER(set_h225_addr_hook, NULL);
+ RCU_INIT_POINTER(set_sig_addr_hook, NULL);
+ RCU_INIT_POINTER(set_ras_addr_hook, NULL);
+ RCU_INIT_POINTER(nat_rtp_rtcp_hook, NULL);
+ RCU_INIT_POINTER(nat_t120_hook, NULL);
+ RCU_INIT_POINTER(nat_h245_hook, NULL);
+ RCU_INIT_POINTER(nat_callforwarding_hook, NULL);
+ RCU_INIT_POINTER(nat_q931_hook, NULL);
synchronize_rcu();
}
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c
index 535e1a8..979ae16 100644
--- a/net/ipv4/netfilter/nf_nat_irc.c
+++ b/net/ipv4/netfilter/nf_nat_irc.c
@@ -75,14 +75,14 @@ static unsigned int help(struct sk_buff *skb,
static void __exit nf_nat_irc_fini(void)
{
- rcu_assign_pointer(nf_nat_irc_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_irc_hook, NULL);
synchronize_rcu();
}
static int __init nf_nat_irc_init(void)
{
BUG_ON(nf_nat_irc_hook != NULL);
- rcu_assign_pointer(nf_nat_irc_hook, help);
+ RCU_INIT_POINTER(nf_nat_irc_hook, help);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 4c06003..3e8284b 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -282,25 +282,25 @@ static int __init nf_nat_helper_pptp_init(void)
nf_nat_need_gre();
BUG_ON(nf_nat_pptp_hook_outbound != NULL);
- rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, pptp_outbound_pkt);
BUG_ON(nf_nat_pptp_hook_inbound != NULL);
- rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, pptp_inbound_pkt);
BUG_ON(nf_nat_pptp_hook_exp_gre != NULL);
- rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, pptp_exp_gre);
BUG_ON(nf_nat_pptp_hook_expectfn != NULL);
- rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, pptp_nat_expected);
return 0;
}
static void __exit nf_nat_helper_pptp_fini(void)
{
- rcu_assign_pointer(nf_nat_pptp_hook_expectfn, NULL);
- rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, NULL);
- rcu_assign_pointer(nf_nat_pptp_hook_inbound, NULL);
- rcu_assign_pointer(nf_nat_pptp_hook_outbound, NULL);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_expectfn, NULL);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_exp_gre, NULL);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_inbound, NULL);
+ RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, NULL);
synchronize_rcu();
}
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
index f52d41e..a3d9976 100644
--- a/net/ipv4/netfilter/nf_nat_proto_common.c
+++ b/net/ipv4/netfilter/nf_nat_proto_common.c
@@ -12,6 +12,7 @@
#include <linux/ip.h>
#include <linux/netfilter.h>
+#include <linux/export.h>
#include <net/secure_seq.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index 5744c3e..9f4dc12 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/export.h>
#include <linux/ip.h>
#include <linux/icmp.h>
diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/ipv4/netfilter/nf_nat_proto_sctp.c
index 756331d..bd5a80a 100644
--- a/net/ipv4/netfilter/nf_nat_proto_sctp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_sctp.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/sctp.h>
+#include <linux/module.h>
#include <net/sctp/checksum.h>
#include <net/netfilter/nf_nat_protocol.h>
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c
index aa460a5..0d67bb8 100644
--- a/net/ipv4/netfilter/nf_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/export.h>
#include <linux/ip.h>
#include <linux/tcp.h>
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c
index dfe65c7..0b1b860 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udp.c
@@ -7,6 +7,7 @@
*/
#include <linux/types.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/udp.h>
diff --git a/net/ipv4/netfilter/nf_nat_proto_udplite.c b/net/ipv4/netfilter/nf_nat_proto_udplite.c
index 3cc8c8a..f83ef23 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udplite.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udplite.c
@@ -13,6 +13,7 @@
#include <linux/udp.h>
#include <linux/netfilter.h>
+#include <linux/module.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_protocol.h>
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index e40cf78..78844d9 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -528,13 +528,13 @@ err1:
static void __exit nf_nat_sip_fini(void)
{
- rcu_assign_pointer(nf_nat_sip_hook, NULL);
- rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, NULL);
- rcu_assign_pointer(nf_nat_sip_expect_hook, NULL);
- rcu_assign_pointer(nf_nat_sdp_addr_hook, NULL);
- rcu_assign_pointer(nf_nat_sdp_port_hook, NULL);
- rcu_assign_pointer(nf_nat_sdp_session_hook, NULL);
- rcu_assign_pointer(nf_nat_sdp_media_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sip_expect_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sdp_addr_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
synchronize_rcu();
}
@@ -547,13 +547,13 @@ static int __init nf_nat_sip_init(void)
BUG_ON(nf_nat_sdp_port_hook != NULL);
BUG_ON(nf_nat_sdp_session_hook != NULL);
BUG_ON(nf_nat_sdp_media_hook != NULL);
- rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip);
- rcu_assign_pointer(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust);
- rcu_assign_pointer(nf_nat_sip_expect_hook, ip_nat_sip_expect);
- rcu_assign_pointer(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
- rcu_assign_pointer(nf_nat_sdp_port_hook, ip_nat_sdp_port);
- rcu_assign_pointer(nf_nat_sdp_session_hook, ip_nat_sdp_session);
- rcu_assign_pointer(nf_nat_sdp_media_hook, ip_nat_sdp_media);
+ RCU_INIT_POINTER(nf_nat_sip_hook, ip_nat_sip);
+ RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, ip_nat_sip_seq_adjust);
+ RCU_INIT_POINTER(nf_nat_sip_expect_hook, ip_nat_sip_expect);
+ RCU_INIT_POINTER(nf_nat_sdp_addr_hook, ip_nat_sdp_addr);
+ RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port);
+ RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session);
+ RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 076b7c8..2133c30 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -400,11 +400,8 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
*len = 0;
*octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
- if (*octets == NULL) {
- if (net_ratelimit())
- pr_notice("OOM in bsalg (%d)\n", __LINE__);
+ if (*octets == NULL)
return 0;
- }
ptr = *octets;
while (ctx->pointer < eoc) {
@@ -451,11 +448,8 @@ static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
return 0;
*oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
- if (*oid == NULL) {
- if (net_ratelimit())
- pr_notice("OOM in bsalg (%d)\n", __LINE__);
+ if (*oid == NULL)
return 0;
- }
optr = *oid;
@@ -728,8 +722,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
*obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
if (*obj == NULL) {
kfree(id);
- if (net_ratelimit())
- pr_notice("OOM in bsalg (%d)\n", __LINE__);
return 0;
}
(*obj)->syntax.l[0] = l;
@@ -744,8 +736,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
if (*obj == NULL) {
kfree(p);
kfree(id);
- if (net_ratelimit())
- pr_notice("OOM in bsalg (%d)\n", __LINE__);
return 0;
}
memcpy((*obj)->syntax.c, p, len);
@@ -759,8 +749,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
*obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
if (*obj == NULL) {
kfree(id);
- if (net_ratelimit())
- pr_notice("OOM in bsalg (%d)\n", __LINE__);
return 0;
}
if (!asn1_null_decode(ctx, end)) {
@@ -780,8 +768,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
if (*obj == NULL) {
kfree(lp);
kfree(id);
- if (net_ratelimit())
- pr_notice("OOM in bsalg (%d)\n", __LINE__);
return 0;
}
memcpy((*obj)->syntax.ul, lp, len);
@@ -801,8 +787,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
if (*obj == NULL) {
kfree(p);
kfree(id);
- if (net_ratelimit())
- pr_notice("OOM in bsalg (%d)\n", __LINE__);
return 0;
}
memcpy((*obj)->syntax.uc, p, len);
@@ -819,8 +803,6 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
*obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
if (*obj == NULL) {
kfree(id);
- if (net_ratelimit())
- pr_notice("OOM in bsalg (%d)\n", __LINE__);
return 0;
}
(*obj)->syntax.ul[0] = ul;
@@ -1310,7 +1292,7 @@ static int __init nf_nat_snmp_basic_init(void)
int ret = 0;
BUG_ON(nf_nat_snmp_hook != NULL);
- rcu_assign_pointer(nf_nat_snmp_hook, help);
+ RCU_INIT_POINTER(nf_nat_snmp_hook, help);
ret = nf_conntrack_helper_register(&snmp_trap_helper);
if (ret < 0) {
@@ -1322,7 +1304,7 @@ static int __init nf_nat_snmp_basic_init(void)
static void __exit nf_nat_snmp_basic_fini(void)
{
- rcu_assign_pointer(nf_nat_snmp_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
nf_conntrack_helper_unregister(&snmp_trap_helper);
}
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index a6e606e..9290048 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -284,7 +284,7 @@ static int __init nf_nat_standalone_init(void)
#ifdef CONFIG_XFRM
BUG_ON(ip_nat_decode_session != NULL);
- rcu_assign_pointer(ip_nat_decode_session, nat_decode_session);
+ RCU_INIT_POINTER(ip_nat_decode_session, nat_decode_session);
#endif
ret = nf_nat_rule_init();
if (ret < 0) {
@@ -302,7 +302,7 @@ static int __init nf_nat_standalone_init(void)
nf_nat_rule_cleanup();
cleanup_decode_session:
#ifdef CONFIG_XFRM
- rcu_assign_pointer(ip_nat_decode_session, NULL);
+ RCU_INIT_POINTER(ip_nat_decode_session, NULL);
synchronize_net();
#endif
return ret;
@@ -313,7 +313,7 @@ static void __exit nf_nat_standalone_fini(void)
nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
nf_nat_rule_cleanup();
#ifdef CONFIG_XFRM
- rcu_assign_pointer(ip_nat_decode_session, NULL);
+ RCU_INIT_POINTER(ip_nat_decode_session, NULL);
synchronize_net();
#endif
/* Conntrack caches are unregistered in nf_conntrack_cleanup */
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index 7274a43..a2901bf 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -36,14 +36,14 @@ static unsigned int help(struct sk_buff *skb,
static void __exit nf_nat_tftp_fini(void)
{
- rcu_assign_pointer(nf_nat_tftp_hook, NULL);
+ RCU_INIT_POINTER(nf_nat_tftp_hook, NULL);
synchronize_rcu();
}
static int __init nf_nat_tftp_init(void)
{
BUG_ON(nf_nat_tftp_hook != NULL);
- rcu_assign_pointer(nf_nat_tftp_hook, help);
+ RCU_INIT_POINTER(nf_nat_tftp_hook, help);
return 0;
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 39b403f..a06f73f 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -39,6 +39,7 @@
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
+#include <linux/export.h>
#include <net/sock.h>
#include <net/ping.h>
#include <net/udp.h>
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 4bfad5d..466ea8b 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -42,6 +42,7 @@
#include <linux/inetdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
#include <net/sock.h>
#include <net/raw.h>
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 61714bd..007e2eb 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -48,6 +48,7 @@
#include <linux/errno.h>
#include <linux/aio.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/sockios.h>
#include <linux/socket.h>
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 075212e..155138d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -120,7 +120,6 @@
static int ip_rt_max_size;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
-static int ip_rt_gc_interval __read_mostly = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_redirect_number __read_mostly = 9;
static int ip_rt_redirect_load __read_mostly = HZ / 50;
@@ -324,7 +323,7 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
struct rtable *r = NULL;
for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
- if (!rcu_dereference_raw(rt_hash_table[st->bucket].chain))
+ if (!rcu_access_pointer(rt_hash_table[st->bucket].chain))
continue;
rcu_read_lock_bh();
r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
@@ -350,7 +349,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
do {
if (--st->bucket < 0)
return NULL;
- } while (!rcu_dereference_raw(rt_hash_table[st->bucket].chain));
+ } while (!rcu_access_pointer(rt_hash_table[st->bucket].chain));
rcu_read_lock_bh();
r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
}
@@ -761,7 +760,7 @@ static void rt_do_flush(struct net *net, int process_context)
if (process_context && need_resched())
cond_resched();
- rth = rcu_dereference_raw(rt_hash_table[i].chain);
+ rth = rcu_access_pointer(rt_hash_table[i].chain);
if (!rth)
continue;
@@ -1309,7 +1308,12 @@ static void rt_del(unsigned hash, struct rtable *rt)
void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
__be32 saddr, struct net_device *dev)
{
+ int s, i;
struct in_device *in_dev = __in_dev_get_rcu(dev);
+ struct rtable *rt;
+ __be32 skeys[2] = { saddr, 0 };
+ int ikeys[2] = { dev->ifindex, 0 };
+ struct flowi4 fl4;
struct inet_peer *peer;
struct net *net;
@@ -1332,13 +1336,34 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
goto reject_redirect;
}
- peer = inet_getpeer_v4(daddr, 1);
- if (peer) {
- peer->redirect_learned.a4 = new_gw;
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.daddr = daddr;
+ for (s = 0; s < 2; s++) {
+ for (i = 0; i < 2; i++) {
+ fl4.flowi4_oif = ikeys[i];
+ fl4.saddr = skeys[s];
+ rt = __ip_route_output_key(net, &fl4);
+ if (IS_ERR(rt))
+ continue;
- inet_putpeer(peer);
+ if (rt->dst.error || rt->dst.dev != dev ||
+ rt->rt_gateway != old_gw) {
+ ip_rt_put(rt);
+ continue;
+ }
+
+ if (!rt->peer)
+ rt_bind_peer(rt, rt->rt_dst, 1);
+
+ peer = rt->peer;
+ if (peer) {
+ peer->redirect_learned.a4 = new_gw;
+ atomic_inc(&__rt_peer_genid);
+ }
- atomic_inc(&__rt_peer_genid);
+ ip_rt_put(rt);
+ return;
+ }
}
return;
@@ -1568,11 +1593,10 @@ unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
est_mtu = mtu;
peer->pmtu_learned = mtu;
peer->pmtu_expires = pmtu_expires;
+ atomic_inc(&__rt_peer_genid);
}
inet_putpeer(peer);
-
- atomic_inc(&__rt_peer_genid);
}
return est_mtu ? : new_mtu;
}
@@ -3121,13 +3145,6 @@ static ctl_table ipv4_route_table[] = {
.proc_handler = proc_dointvec_jiffies,
},
{
- .procname = "gc_interval",
- .data = &ip_rt_gc_interval,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
.procname = "redirect_load",
.data = &ip_rt_redirect_load,
.maxlen = sizeof(int),
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 3bc5c8f..90f6544 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -15,6 +15,7 @@
#include <linux/random.h>
#include <linux/cryptohash.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <net/tcp.h>
#include <net/route.h>
@@ -265,7 +266,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt)
{
struct tcp_options_received tcp_opt;
- u8 *hash_location;
+ const u8 *hash_location;
struct inet_request_sock *ireq;
struct tcp_request_sock *treq;
struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 46febca..34f5db1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -374,7 +374,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
unsigned int mask;
struct sock *sk = sock->sk;
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == TCP_LISTEN)
@@ -524,11 +524,11 @@ EXPORT_SYMBOL(tcp_ioctl);
static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{
- TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
tp->pushed_seq = tp->write_seq;
}
-static inline int forced_push(struct tcp_sock *tp)
+static inline int forced_push(const struct tcp_sock *tp)
{
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}
@@ -540,7 +540,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
skb->csum = 0;
tcb->seq = tcb->end_seq = tp->write_seq;
- tcb->flags = TCPHDR_ACK;
+ tcb->tcp_flags = TCPHDR_ACK;
tcb->sacked = 0;
skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
@@ -813,7 +813,7 @@ new_segment:
goto wait_for_memory;
if (can_coalesce) {
- skb_shinfo(skb)->frags[i - 1].size += copy;
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
} else {
get_page(page);
skb_fill_page_desc(skb, i, page, offset, copy);
@@ -830,7 +830,7 @@ new_segment:
skb_shinfo(skb)->gso_segs = 0;
if (!copied)
- TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
copied += copy;
poffset += copy;
@@ -891,9 +891,9 @@ EXPORT_SYMBOL(tcp_sendpage);
#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
#define TCP_OFF(sk) (sk->sk_sndmsg_off)
-static inline int select_size(struct sock *sk, int sg)
+static inline int select_size(const struct sock *sk, int sg)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
int tmp = tp->mss_cache;
if (sg) {
@@ -1058,8 +1058,7 @@ new_segment:
/* Update the skb. */
if (merge) {
- skb_shinfo(skb)->frags[i - 1].size +=
- copy;
+ skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
} else {
skb_fill_page_desc(skb, i, page, off, copy);
if (TCP_PAGE(sk)) {
@@ -1074,7 +1073,7 @@ new_segment:
}
if (!copied)
- TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
@@ -1194,13 +1193,11 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
struct tcp_sock *tp = tcp_sk(sk);
int time_to_ack = 0;
-#if TCP_DEBUG
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
"cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
-#endif
if (inet_csk_ack_scheduled(sk)) {
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2409,7 +2406,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
unsigned int optlen)
{
- struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
if (level != SOL_TCP)
return icsk->icsk_af_ops->setsockopt(sk, level, optname,
@@ -2431,9 +2428,9 @@ EXPORT_SYMBOL(compat_tcp_setsockopt);
#endif
/* Return information about state of tcp endpoint in API format. */
-void tcp_get_info(struct sock *sk, struct tcp_info *info)
+void tcp_get_info(const struct sock *sk, struct tcp_info *info)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 now = tcp_time_stamp;
@@ -2455,8 +2452,10 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
}
- if (tp->ecn_flags&TCP_ECN_OK)
+ if (tp->ecn_flags & TCP_ECN_OK)
info->tcpi_options |= TCPI_OPT_ECN;
+ if (tp->ecn_flags & TCP_ECN_SEEN)
+ info->tcpi_options |= TCPI_OPT_ECN_SEEN;
info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
@@ -2857,26 +2856,25 @@ EXPORT_SYMBOL(tcp_gro_complete);
#ifdef CONFIG_TCP_MD5SIG
static unsigned long tcp_md5sig_users;
-static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool;
+static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool;
static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
-static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
+static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
{
int cpu;
+
for_each_possible_cpu(cpu) {
- struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
- if (p) {
- if (p->md5_desc.tfm)
- crypto_free_hash(p->md5_desc.tfm);
- kfree(p);
- }
+ struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
+
+ if (p->md5_desc.tfm)
+ crypto_free_hash(p->md5_desc.tfm);
}
free_percpu(pool);
}
void tcp_free_md5sig_pool(void)
{
- struct tcp_md5sig_pool * __percpu *pool = NULL;
+ struct tcp_md5sig_pool __percpu *pool = NULL;
spin_lock_bh(&tcp_md5sig_pool_lock);
if (--tcp_md5sig_users == 0) {
@@ -2889,30 +2887,24 @@ void tcp_free_md5sig_pool(void)
}
EXPORT_SYMBOL(tcp_free_md5sig_pool);
-static struct tcp_md5sig_pool * __percpu *
+static struct tcp_md5sig_pool __percpu *
__tcp_alloc_md5sig_pool(struct sock *sk)
{
int cpu;
- struct tcp_md5sig_pool * __percpu *pool;
+ struct tcp_md5sig_pool __percpu *pool;
- pool = alloc_percpu(struct tcp_md5sig_pool *);
+ pool = alloc_percpu(struct tcp_md5sig_pool);
if (!pool)
return NULL;
for_each_possible_cpu(cpu) {
- struct tcp_md5sig_pool *p;
struct crypto_hash *hash;
- p = kzalloc(sizeof(*p), sk->sk_allocation);
- if (!p)
- goto out_free;
- *per_cpu_ptr(pool, cpu) = p;
-
hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (!hash || IS_ERR(hash))
goto out_free;
- p->md5_desc.tfm = hash;
+ per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
}
return pool;
out_free:
@@ -2920,9 +2912,9 @@ out_free:
return NULL;
}
-struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
+struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
{
- struct tcp_md5sig_pool * __percpu *pool;
+ struct tcp_md5sig_pool __percpu *pool;
int alloc = 0;
retry:
@@ -2941,7 +2933,7 @@ retry:
if (alloc) {
/* we cannot hold spinlock here because this may sleep. */
- struct tcp_md5sig_pool * __percpu *p;
+ struct tcp_md5sig_pool __percpu *p;
p = __tcp_alloc_md5sig_pool(sk);
spin_lock_bh(&tcp_md5sig_pool_lock);
@@ -2974,7 +2966,7 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
*/
struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
{
- struct tcp_md5sig_pool * __percpu *p;
+ struct tcp_md5sig_pool __percpu *p;
local_bh_disable();
@@ -2985,7 +2977,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
spin_unlock(&tcp_md5sig_pool_lock);
if (p)
- return *this_cpu_ptr(p);
+ return this_cpu_ptr(p);
local_bh_enable();
return NULL;
@@ -3000,23 +2992,25 @@ void tcp_put_md5sig_pool(void)
EXPORT_SYMBOL(tcp_put_md5sig_pool);
int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
- struct tcphdr *th)
+ const struct tcphdr *th)
{
struct scatterlist sg;
+ struct tcphdr hdr;
int err;
- __sum16 old_checksum = th->check;
- th->check = 0;
+ /* We are not allowed to change tcphdr, make a local copy */
+ memcpy(&hdr, th, sizeof(hdr));
+ hdr.check = 0;
+
/* options aren't included in the hash */
- sg_init_one(&sg, th, sizeof(struct tcphdr));
- err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr));
- th->check = old_checksum;
+ sg_init_one(&sg, &hdr, sizeof(hdr));
+ err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
return err;
}
EXPORT_SYMBOL(tcp_md5_hash_header);
int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
- struct sk_buff *skb, unsigned header_len)
+ const struct sk_buff *skb, unsigned int header_len)
{
struct scatterlist sg;
const struct tcphdr *tp = tcp_hdr(skb);
@@ -3035,8 +3029,9 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
for (i = 0; i < shi->nr_frags; ++i) {
const struct skb_frag_struct *f = &shi->frags[i];
- sg_set_page(&sg, f->page, f->size, f->page_offset);
- if (crypto_hash_update(desc, &sg, f->size))
+ struct page *page = skb_frag_page(f);
+ sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
+ if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
return 1;
}
@@ -3048,7 +3043,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
}
EXPORT_SYMBOL(tcp_md5_hash_skb_data);
-int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key)
+int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
{
struct scatterlist sg;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index d73aab3..52b5c2d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -206,7 +206,7 @@ static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp)
tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
}
-static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb)
+static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
{
if (tcp_hdr(skb)->cwr)
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
@@ -217,32 +217,41 @@ static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp)
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
}
-static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
+static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
{
- if (tp->ecn_flags & TCP_ECN_OK) {
- if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags))
- tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+ if (!(tp->ecn_flags & TCP_ECN_OK))
+ return;
+
+ switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
+ case INET_ECN_NOT_ECT:
/* Funny extension: if ECT is not set on a segment,
- * it is surely retransmit. It is not in ECN RFC,
- * but Linux follows this rule. */
- else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags)))
+ * and we already seen ECT on a previous segment,
+ * it is probably a retransmit.
+ */
+ if (tp->ecn_flags & TCP_ECN_SEEN)
tcp_enter_quickack_mode((struct sock *)tp);
+ break;
+ case INET_ECN_CE:
+ tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+ /* fallinto */
+ default:
+ tp->ecn_flags |= TCP_ECN_SEEN;
}
}
-static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th)
+static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
{
if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
tp->ecn_flags &= ~TCP_ECN_OK;
}
-static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th)
+static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
{
if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
tp->ecn_flags &= ~TCP_ECN_OK;
}
-static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
+static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
{
if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
return 1;
@@ -256,14 +265,11 @@ static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
static void tcp_fixup_sndbuf(struct sock *sk)
{
- int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
- sizeof(struct sk_buff);
+ int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER);
- if (sk->sk_sndbuf < 3 * sndmem) {
- sk->sk_sndbuf = 3 * sndmem;
- if (sk->sk_sndbuf > sysctl_tcp_wmem[2])
- sk->sk_sndbuf = sysctl_tcp_wmem[2];
- }
+ sndmem *= TCP_INIT_CWND;
+ if (sk->sk_sndbuf < sndmem)
+ sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
}
/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -309,7 +315,7 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
return 0;
}
-static void tcp_grow_window(struct sock *sk, struct sk_buff *skb)
+static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -339,17 +345,24 @@ static void tcp_grow_window(struct sock *sk, struct sk_buff *skb)
static void tcp_fixup_rcvbuf(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
- int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
+ u32 mss = tcp_sk(sk)->advmss;
+ u32 icwnd = TCP_DEFAULT_INIT_RCVWND;
+ int rcvmem;
- /* Try to select rcvbuf so that 4 mss-sized segments
- * will fit to window and corresponding skbs will fit to our rcvbuf.
- * (was 3; 4 is minimum to allow fast retransmit to work.)
+ /* Limit to 10 segments if mss <= 1460,
+ * or 14600/mss segments, with a minimum of two segments.
*/
- while (tcp_win_from_space(rcvmem) < tp->advmss)
+ if (mss > 1460)
+ icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
+
+ rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER);
+ while (tcp_win_from_space(rcvmem) < mss)
rcvmem += 128;
- if (sk->sk_rcvbuf < 4 * rcvmem)
- sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]);
+
+ rcvmem *= icwnd;
+
+ if (sk->sk_rcvbuf < rcvmem)
+ sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]);
}
/* 4. Try to fixup all. It is made immediately after connection enters
@@ -416,7 +429,7 @@ static void tcp_clamp_window(struct sock *sk)
*/
void tcp_initialize_rcv_mss(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
hint = min(hint, tp->rcv_wnd / 2);
@@ -531,8 +544,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
space /= tp->advmss;
if (!space)
space = 1;
- rcvmem = (tp->advmss + MAX_TCP_HEADER +
- 16 + sizeof(struct sk_buff));
+ rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
while (tcp_win_from_space(rcvmem) < tp->advmss)
rcvmem += 128;
space *= rcvmem;
@@ -812,7 +824,7 @@ void tcp_update_metrics(struct sock *sk)
}
}
-__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
+__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
{
__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
@@ -1204,7 +1216,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
tp->lost_retrans_low = new_low_seq;
}
-static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
+static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
struct tcp_sack_block_wire *sp, int num_sacks,
u32 prior_snd_una)
{
@@ -1298,7 +1310,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
return in_sack;
}
-static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
+static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
struct tcp_sacktag_state *state,
int dup_sack, int pcount)
{
@@ -1438,7 +1450,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
tp->lost_cnt_hint -= tcp_skb_pcount(prev);
}
- TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(prev)->flags;
+ TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
if (skb == tcp_highest_sack(sk))
tcp_advance_highest_sack(sk, skb);
@@ -1453,13 +1465,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
/* I wish gso_size would have a bit more sane initialization than
* something-or-zero which complicates things
*/
-static int tcp_skb_seglen(struct sk_buff *skb)
+static int tcp_skb_seglen(const struct sk_buff *skb)
{
return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
}
/* Shifting pages past head area doesn't work */
-static int skb_can_shift(struct sk_buff *skb)
+static int skb_can_shift(const struct sk_buff *skb)
{
return !skb_headlen(skb) && skb_is_nonlinear(skb);
}
@@ -1708,19 +1720,19 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
return skb;
}
-static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache)
+static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache)
{
return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
}
static int
-tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
+tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
u32 prior_snd_una)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- unsigned char *ptr = (skb_transport_header(ack_skb) +
- TCP_SKB_CB(ack_skb)->sacked);
+ const unsigned char *ptr = (skb_transport_header(ack_skb) +
+ TCP_SKB_CB(ack_skb)->sacked);
struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
struct tcp_sack_block sp[TCP_NUM_SACKS];
struct tcp_sack_block *cache;
@@ -2284,7 +2296,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
return 0;
}
-static inline int tcp_fackets_out(struct tcp_sock *tp)
+static inline int tcp_fackets_out(const struct tcp_sock *tp)
{
return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out;
}
@@ -2304,19 +2316,20 @@ static inline int tcp_fackets_out(struct tcp_sock *tp)
* they differ. Since neither occurs due to loss, TCP should really
* ignore them.
*/
-static inline int tcp_dupack_heuristics(struct tcp_sock *tp)
+static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
{
return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
}
-static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
+static inline int tcp_skb_timedout(const struct sock *sk,
+ const struct sk_buff *skb)
{
return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
}
-static inline int tcp_head_timedout(struct sock *sk)
+static inline int tcp_head_timedout(const struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
return tp->packets_out &&
tcp_skb_timedout(sk, tcp_write_queue_head(sk));
@@ -2627,7 +2640,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag)
/* Nothing was retransmitted or returned timestamp is less
* than timestamp of the first retransmission.
*/
-static inline int tcp_packet_delayed(struct tcp_sock *tp)
+static inline int tcp_packet_delayed(const struct tcp_sock *tp)
{
return !tp->retrans_stamp ||
(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -2688,7 +2701,7 @@ static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
tp->snd_cwnd_stamp = tcp_time_stamp;
}
-static inline int tcp_may_undo(struct tcp_sock *tp)
+static inline int tcp_may_undo(const struct tcp_sock *tp)
{
return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
}
@@ -2752,9 +2765,9 @@ static void tcp_try_undo_dsack(struct sock *sk)
* that successive retransmissions of a segment must not advance
* retrans_stamp under any conditions.
*/
-static int tcp_any_retrans_done(struct sock *sk)
+static int tcp_any_retrans_done(const struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
if (tp->retrans_out)
@@ -2828,9 +2841,13 @@ static int tcp_try_undo_loss(struct sock *sk)
static inline void tcp_complete_cwr(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- /* Do not moderate cwnd if it's already undone in cwr or recovery */
- if (tp->undo_marker && tp->snd_cwnd > tp->snd_ssthresh) {
- tp->snd_cwnd = tp->snd_ssthresh;
+
+ /* Do not moderate cwnd if it's already undone in cwr or recovery. */
+ if (tp->undo_marker) {
+ if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
+ tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
+ else /* PRR */
+ tp->snd_cwnd = tp->snd_ssthresh;
tp->snd_cwnd_stamp = tcp_time_stamp;
}
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
@@ -2948,6 +2965,38 @@ void tcp_simple_retransmit(struct sock *sk)
}
EXPORT_SYMBOL(tcp_simple_retransmit);
+/* This function implements the PRR algorithm, specifcally the PRR-SSRB
+ * (proportional rate reduction with slow start reduction bound) as described in
+ * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt.
+ * It computes the number of packets to send (sndcnt) based on packets newly
+ * delivered:
+ * 1) If the packets in flight is larger than ssthresh, PRR spreads the
+ * cwnd reductions across a full RTT.
+ * 2) If packets in flight is lower than ssthresh (such as due to excess
+ * losses and/or application stalls), do not perform any further cwnd
+ * reductions, but instead slow start up to ssthresh.
+ */
+static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
+ int fast_rexmit, int flag)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int sndcnt = 0;
+ int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
+
+ if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
+ u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
+ tp->prior_cwnd - 1;
+ sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
+ } else {
+ sndcnt = min_t(int, delta,
+ max_t(int, tp->prr_delivered - tp->prr_out,
+ newly_acked_sacked) + 1);
+ }
+
+ sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
+ tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
+}
+
/* Process an event, which can update packets-in-flight not trivially.
* Main goal of this function is to calculate new estimate for left_out,
* taking into account both packets sitting in receiver's buffer and
@@ -2959,7 +3008,8 @@ EXPORT_SYMBOL(tcp_simple_retransmit);
* It does _not_ decide what to send, it is made in function
* tcp_xmit_retransmit_queue().
*/
-static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
+static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
+ int newly_acked_sacked, int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -3109,13 +3159,17 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
tp->bytes_acked = 0;
tp->snd_cwnd_cnt = 0;
+ tp->prior_cwnd = tp->snd_cwnd;
+ tp->prr_delivered = 0;
+ tp->prr_out = 0;
tcp_set_ca_state(sk, TCP_CA_Recovery);
fast_rexmit = 1;
}
if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
tcp_update_scoreboard(sk, fast_rexmit);
- tcp_cwnd_down(sk, flag);
+ tp->prr_delivered += newly_acked_sacked;
+ tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag);
tcp_xmit_retransmit_queue(sk);
}
@@ -3192,7 +3246,7 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
*/
static void tcp_rearm_rto(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
if (!tp->packets_out) {
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
@@ -3296,7 +3350,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
* connection startup slow start one packet too
* quickly. This is severely frowned upon behavior.
*/
- if (!(scb->flags & TCPHDR_SYN)) {
+ if (!(scb->tcp_flags & TCPHDR_SYN)) {
flag |= FLAG_DATA_ACKED;
} else {
flag |= FLAG_SYN_ACKED;
@@ -3444,7 +3498,7 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp,
* Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
* and in FreeBSD. NetBSD's one is even worse.) is wrong.
*/
-static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
+static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack,
u32 ack_seq)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -3620,7 +3674,7 @@ static int tcp_process_frto(struct sock *sk, int flag)
}
/* This routine deals with incoming acks, but not outgoing ones. */
-static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
+static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -3630,6 +3684,8 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
u32 prior_in_flight;
u32 prior_fackets;
int prior_packets;
+ int prior_sacked = tp->sacked_out;
+ int newly_acked_sacked = 0;
int frto_cwnd = 0;
/* If the ack is older than previous acks
@@ -3701,6 +3757,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
/* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
+ newly_acked_sacked = (prior_packets - prior_sacked) -
+ (tp->packets_out - tp->sacked_out);
+
if (tp->frto_counter)
frto_cwnd = tcp_process_frto(sk, flag);
/* Guarantee sacktag reordering detection against wrap-arounds */
@@ -3713,7 +3772,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, prior_in_flight);
tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
- flag);
+ newly_acked_sacked, flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
tcp_cong_avoid(sk, ack, prior_in_flight);
@@ -3752,14 +3811,14 @@ old_ack:
* But, this can also be called on packets in the established flow when
* the fast version below fails.
*/
-void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
- u8 **hvpp, int estab)
+void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx,
+ const u8 **hvpp, int estab)
{
- unsigned char *ptr;
- struct tcphdr *th = tcp_hdr(skb);
+ const unsigned char *ptr;
+ const struct tcphdr *th = tcp_hdr(skb);
int length = (th->doff * 4) - sizeof(struct tcphdr);
- ptr = (unsigned char *)(th + 1);
+ ptr = (const unsigned char *)(th + 1);
opt_rx->saw_tstamp = 0;
while (length > 0) {
@@ -3870,9 +3929,9 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
}
EXPORT_SYMBOL(tcp_parse_options);
-static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
+static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
{
- __be32 *ptr = (__be32 *)(th + 1);
+ const __be32 *ptr = (const __be32 *)(th + 1);
if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
@@ -3889,8 +3948,9 @@ static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
/* Fast parse options. This hopes to only see timestamps.
* If it is wrong it falls back on tcp_parse_options().
*/
-static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
- struct tcp_sock *tp, u8 **hvpp)
+static int tcp_fast_parse_options(const struct sk_buff *skb,
+ const struct tcphdr *th,
+ struct tcp_sock *tp, const u8 **hvpp)
{
/* In the spirit of fast parsing, compare doff directly to constant
* values. Because equality is used, short doff can be ignored here.
@@ -3911,10 +3971,10 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
/*
* Parse MD5 Signature option
*/
-u8 *tcp_parse_md5sig_option(struct tcphdr *th)
+const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
{
- int length = (th->doff << 2) - sizeof (*th);
- u8 *ptr = (u8*)(th + 1);
+ int length = (th->doff << 2) - sizeof(*th);
+ const u8 *ptr = (const u8 *)(th + 1);
/* If the TCP option is too short, we can short cut */
if (length < TCPOLEN_MD5SIG)
@@ -3991,8 +4051,8 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
{
- struct tcp_sock *tp = tcp_sk(sk);
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcphdr *th = tcp_hdr(skb);
u32 seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
@@ -4031,7 +4091,7 @@ static inline int tcp_paws_discard(const struct sock *sk,
* (borrowed from freebsd)
*/
-static inline int tcp_sequence(struct tcp_sock *tp, u32 seq, u32 end_seq)
+static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
{
return !before(end_seq, tp->rcv_wup) &&
!after(seq, tp->rcv_nxt + tcp_receive_window(tp));
@@ -4076,7 +4136,7 @@ static void tcp_reset(struct sock *sk)
*
* If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
*/
-static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
+static void tcp_fin(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -4188,7 +4248,7 @@ static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
}
-static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
+static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -4347,7 +4407,7 @@ static void tcp_ofo_queue(struct sock *sk)
__skb_queue_tail(&sk->sk_receive_queue, skb);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if (tcp_hdr(skb)->fin)
- tcp_fin(skb, sk, tcp_hdr(skb));
+ tcp_fin(sk);
}
}
@@ -4375,7 +4435,7 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
{
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
struct tcp_sock *tp = tcp_sk(sk);
int eaten = -1;
@@ -4429,7 +4489,7 @@ queue_and_out:
if (skb->len)
tcp_event_data_recv(sk, skb);
if (th->fin)
- tcp_fin(skb, sk, th);
+ tcp_fin(sk);
if (!skb_queue_empty(&tp->out_of_order_queue)) {
tcp_ofo_queue(sk);
@@ -4859,9 +4919,9 @@ void tcp_cwnd_application_limited(struct sock *sk)
tp->snd_cwnd_stamp = tcp_time_stamp;
}
-static int tcp_should_expand_sndbuf(struct sock *sk)
+static int tcp_should_expand_sndbuf(const struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
/* If the user specified a specific send buffer setting, do
* not modify it.
@@ -4895,8 +4955,10 @@ static void tcp_new_space(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_should_expand_sndbuf(sk)) {
- int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
- MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
+ int sndmem = SKB_TRUESIZE(max_t(u32,
+ tp->rx_opt.mss_clamp,
+ tp->mss_cache) +
+ MAX_TCP_HEADER);
int demanded = max_t(unsigned int, tp->snd_cwnd,
tp->reordering + 1);
sndmem *= 2 * demanded;
@@ -4968,7 +5030,7 @@ static inline void tcp_ack_snd_check(struct sock *sk)
* either form (or just set the sysctl tcp_stdurg).
*/
-static void tcp_check_urg(struct sock *sk, struct tcphdr *th)
+static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 ptr = ntohs(th->urg_ptr);
@@ -5034,7 +5096,7 @@ static void tcp_check_urg(struct sock *sk, struct tcphdr *th)
}
/* This is the 'fast' part of urgent handling. */
-static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
+static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -5155,9 +5217,9 @@ out:
* play significant role here.
*/
static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, int syn_inerr)
+ const struct tcphdr *th, int syn_inerr)
{
- u8 *hash_location;
+ const u8 *hash_location;
struct tcp_sock *tp = tcp_sk(sk);
/* RFC1323: H1. Apply PAWS check first. */
@@ -5238,7 +5300,7 @@ discard:
* tcp_data_queue when everything is OK.
*/
int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len)
+ const struct tcphdr *th, unsigned int len)
{
struct tcp_sock *tp = tcp_sk(sk);
int res;
@@ -5449,9 +5511,9 @@ discard:
EXPORT_SYMBOL(tcp_rcv_established);
static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len)
+ const struct tcphdr *th, unsigned int len)
{
- u8 *hash_location;
+ const u8 *hash_location;
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
@@ -5726,7 +5788,7 @@ reset_and_undo:
*/
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned len)
+ const struct tcphdr *th, unsigned int len)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 7963e03..a744315 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -92,7 +92,7 @@ EXPORT_SYMBOL(sysctl_tcp_low_latency);
static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
__be32 addr);
static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
- __be32 daddr, __be32 saddr, struct tcphdr *th);
+ __be32 daddr, __be32 saddr, const struct tcphdr *th);
#else
static inline
struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
@@ -104,7 +104,7 @@ struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
struct inet_hashinfo tcp_hashinfo;
EXPORT_SYMBOL(tcp_hashinfo);
-static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
+static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
{
return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
ip_hdr(skb)->saddr,
@@ -552,7 +552,7 @@ static void __tcp_v4_send_check(struct sk_buff *skb,
/* This routine computes an IPv4 TCP checksum. */
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
{
- struct inet_sock *inet = inet_sk(sk);
+ const struct inet_sock *inet = inet_sk(sk);
__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
}
@@ -590,7 +590,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
{
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
struct {
struct tcphdr th;
#ifdef CONFIG_TCP_MD5SIG
@@ -652,6 +652,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
net = dev_net(skb_dst(skb)->dev);
+ arg.tos = ip_hdr(skb)->tos;
ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
&arg, arg.iov[0].iov_len);
@@ -666,9 +667,9 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
u32 win, u32 ts, int oif,
struct tcp_md5sig_key *key,
- int reply_flags)
+ int reply_flags, u8 tos)
{
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
struct {
struct tcphdr th;
__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
@@ -726,7 +727,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
arg.csumoffset = offsetof(struct tcphdr, check) / 2;
if (oif)
arg.bound_dev_if = oif;
-
+ arg.tos = tos;
ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
&arg, arg.iov[0].iov_len);
@@ -743,7 +744,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcptw->tw_ts_recent,
tw->tw_bound_dev_if,
tcp_twsk_md5_key(tcptw),
- tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0
+ tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
+ tw->tw_tos
);
inet_twsk_put(tw);
@@ -757,7 +759,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
req->ts_recent,
0,
tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr),
- inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0);
+ inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
+ ip_hdr(skb)->tos);
}
/*
@@ -1090,7 +1093,7 @@ static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
}
static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
- __be32 daddr, __be32 saddr, struct tcphdr *th)
+ __be32 daddr, __be32 saddr, const struct tcphdr *th)
{
struct tcp_md5sig_pool *hp;
struct hash_desc *desc;
@@ -1122,12 +1125,12 @@ clear_hash_noput:
}
int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
- struct sock *sk, struct request_sock *req,
- struct sk_buff *skb)
+ const struct sock *sk, const struct request_sock *req,
+ const struct sk_buff *skb)
{
struct tcp_md5sig_pool *hp;
struct hash_desc *desc;
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
__be32 saddr, daddr;
if (sk) {
@@ -1172,7 +1175,7 @@ clear_hash_noput:
}
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
-static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
+static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
/*
* This gets called for each TCP segment that arrives
@@ -1182,10 +1185,10 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
* o MD5 hash and we're not expecting one.
* o MD5 hash and its wrong.
*/
- __u8 *hash_location = NULL;
+ const __u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected;
const struct iphdr *iph = ip_hdr(skb);
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
int genhash;
unsigned char newhash[16];
@@ -1248,7 +1251,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct tcp_extend_values tmp_ext;
struct tcp_options_received tmp_opt;
- u8 *hash_location;
+ const u8 *hash_location;
struct request_sock *req;
struct inet_request_sock *ireq;
struct tcp_sock *tp = tcp_sk(sk);
@@ -1507,6 +1510,7 @@ exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
put_and_exit:
+ bh_unlock_sock(newsk);
sock_put(newsk);
goto exit;
}
@@ -1588,7 +1592,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
#endif
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
- sock_rps_save_rxhash(sk, skb->rxhash);
+ sock_rps_save_rxhash(sk, skb);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
goto reset;
@@ -1605,7 +1609,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
goto discard;
if (nsk != sk) {
- sock_rps_save_rxhash(nsk, skb->rxhash);
+ sock_rps_save_rxhash(nsk, skb);
if (tcp_child_process(sk, nsk, skb)) {
rsk = nsk;
goto reset;
@@ -1613,7 +1617,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
} else
- sock_rps_save_rxhash(sk, skb->rxhash);
+ sock_rps_save_rxhash(sk, skb);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
@@ -1645,7 +1649,7 @@ EXPORT_SYMBOL(tcp_v4_do_rcv);
int tcp_v4_rcv(struct sk_buff *skb)
{
const struct iphdr *iph;
- struct tcphdr *th;
+ const struct tcphdr *th;
struct sock *sk;
int ret;
struct net *net = dev_net(skb->dev);
@@ -1680,7 +1684,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
skb->len - th->doff * 4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->when = 0;
- TCP_SKB_CB(skb)->flags = iph->tos;
+ TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
TCP_SKB_CB(skb)->sacked = 0;
sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -1809,7 +1813,7 @@ EXPORT_SYMBOL(tcp_v4_get_peer);
void *tcp_v4_tw_get_peer(struct sock *sk)
{
- struct inet_timewait_sock *tw = inet_twsk(sk);
+ const struct inet_timewait_sock *tw = inet_twsk(sk);
return inet_getpeer_v4(tw->tw_daddr, 1);
}
@@ -2336,7 +2340,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
}
}
-static int tcp_seq_open(struct inode *inode, struct file *file)
+int tcp_seq_open(struct inode *inode, struct file *file)
{
struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
struct tcp_iter_state *s;
@@ -2352,23 +2356,19 @@ static int tcp_seq_open(struct inode *inode, struct file *file)
s->last_pos = 0;
return 0;
}
+EXPORT_SYMBOL(tcp_seq_open);
int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
{
int rc = 0;
struct proc_dir_entry *p;
- afinfo->seq_fops.open = tcp_seq_open;
- afinfo->seq_fops.read = seq_read;
- afinfo->seq_fops.llseek = seq_lseek;
- afinfo->seq_fops.release = seq_release_net;
-
afinfo->seq_ops.start = tcp_seq_start;
afinfo->seq_ops.next = tcp_seq_next;
afinfo->seq_ops.stop = tcp_seq_stop;
p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
- &afinfo->seq_fops, afinfo);
+ afinfo->seq_fops, afinfo);
if (!p)
rc = -ENOMEM;
return rc;
@@ -2381,7 +2381,7 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
}
EXPORT_SYMBOL(tcp_proc_unregister);
-static void get_openreq4(struct sock *sk, struct request_sock *req,
+static void get_openreq4(const struct sock *sk, const struct request_sock *req,
struct seq_file *f, int i, int uid, int *len)
{
const struct inet_request_sock *ireq = inet_rsk(req);
@@ -2411,9 +2411,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
{
int timer_active;
unsigned long timer_expires;
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
- struct inet_sock *inet = inet_sk(sk);
+ const struct inet_sock *inet = inet_sk(sk);
__be32 dest = inet->inet_daddr;
__be32 src = inet->inet_rcv_saddr;
__u16 destp = ntohs(inet->inet_dport);
@@ -2462,7 +2462,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
len);
}
-static void get_timewait4_sock(struct inet_timewait_sock *tw,
+static void get_timewait4_sock(const struct inet_timewait_sock *tw,
struct seq_file *f, int i, int *len)
{
__be32 dest, src;
@@ -2517,12 +2517,18 @@ out:
return 0;
}
+static const struct file_operations tcp_afinfo_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = tcp_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net
+};
+
static struct tcp_seq_afinfo tcp4_seq_afinfo = {
.name = "tcp",
.family = AF_INET,
- .seq_fops = {
- .owner = THIS_MODULE,
- },
+ .seq_fops = &tcp_afinfo_seq_fops,
.seq_ops = {
.show = tcp4_seq_show,
},
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 0ce3d06..66363b6 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -141,7 +141,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
const struct tcphdr *th)
{
struct tcp_options_received tmp_opt;
- u8 *hash_location;
+ const u8 *hash_location;
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
int paws_reject = 0;
@@ -345,6 +345,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tw6 = inet6_twsk((struct sock *)tw);
ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
+ tw->tw_tclass = np->tclass;
tw->tw_ipv6only = np->ipv6only;
}
#endif
@@ -567,7 +568,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
struct request_sock **prev)
{
struct tcp_options_received tmp_opt;
- u8 *hash_location;
+ const u8 *hash_location;
struct sock *child;
const struct tcphdr *th = tcp_hdr(skb);
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 882e0b0..980b98f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
/* Account for new data that has been sent to the network. */
-static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
+static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
unsigned int prior_packets = tp->packets_out;
@@ -89,9 +89,9 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
* Anything in between SND.UNA...SND.UNA+SND.WND also can be already
* invalid. OK, let's make this for now:
*/
-static inline __u32 tcp_acceptable_seq(struct sock *sk)
+static inline __u32 tcp_acceptable_seq(const struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
if (!before(tcp_wnd_end(tp), tp->snd_nxt))
return tp->snd_nxt;
@@ -116,7 +116,7 @@ static inline __u32 tcp_acceptable_seq(struct sock *sk)
static __u16 tcp_advertise_mss(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
+ const struct dst_entry *dst = __sk_dst_get(sk);
int mss = tp->advmss;
if (dst) {
@@ -133,7 +133,7 @@ static __u16 tcp_advertise_mss(struct sock *sk)
/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
* This is the first part of cwnd validation mechanism. */
-static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
+static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
{
struct tcp_sock *tp = tcp_sk(sk);
s32 delta = tcp_time_stamp - tp->lsndtime;
@@ -154,7 +154,7 @@ static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
/* Congestion state accounting after a packet has been sent. */
static void tcp_event_data_sent(struct tcp_sock *tp,
- struct sk_buff *skb, struct sock *sk)
+ struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
const u32 now = tcp_time_stamp;
@@ -295,11 +295,11 @@ static u16 tcp_select_window(struct sock *sk)
}
/* Packet ECN state for a SYN-ACK */
-static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
+static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
{
- TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR;
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
if (!(tp->ecn_flags & TCP_ECN_OK))
- TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE;
+ TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
}
/* Packet ECN state for a SYN. */
@@ -309,13 +309,13 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
tp->ecn_flags = 0;
if (sysctl_tcp_ecn == 1) {
- TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR;
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
tp->ecn_flags = TCP_ECN_OK;
}
}
static __inline__ void
-TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
+TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
{
if (inet_rsk(req)->ecn_ok)
th->ece = 1;
@@ -356,7 +356,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- TCP_SKB_CB(skb)->flags = flags;
+ TCP_SKB_CB(skb)->tcp_flags = flags;
TCP_SKB_CB(skb)->sacked = 0;
skb_shinfo(skb)->gso_segs = 1;
@@ -565,7 +565,8 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
*/
static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
- struct tcp_md5sig_key **md5) {
+ struct tcp_md5sig_key **md5)
+{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
unsigned remaining = MAX_TCP_OPTION_SPACE;
@@ -743,7 +744,8 @@ static unsigned tcp_synack_options(struct sock *sk,
*/
static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
- struct tcp_md5sig_key **md5) {
+ struct tcp_md5sig_key **md5)
+{
struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
struct tcp_sock *tp = tcp_sk(sk);
unsigned size = 0;
@@ -826,7 +828,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
tcb = TCP_SKB_CB(skb);
memset(&opts, 0, sizeof(opts));
- if (unlikely(tcb->flags & TCPHDR_SYN))
+ if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
else
tcp_options_size = tcp_established_options(sk, skb, &opts,
@@ -850,9 +852,9 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
th->seq = htonl(tcb->seq);
th->ack_seq = htonl(tp->rcv_nxt);
*(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
- tcb->flags);
+ tcb->tcp_flags);
- if (unlikely(tcb->flags & TCPHDR_SYN)) {
+ if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
/* RFC1323: The window in SYN & SYN/ACK segments
* is never scaled.
*/
@@ -875,7 +877,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
}
tcp_options_write((__be32 *)(th + 1), tp, &opts);
- if (likely((tcb->flags & TCPHDR_SYN) == 0))
+ if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
TCP_ECN_send(sk, skb, tcp_header_size);
#ifdef CONFIG_TCP_MD5SIG
@@ -889,11 +891,11 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
icsk->icsk_af_ops->send_check(sk, skb);
- if (likely(tcb->flags & TCPHDR_ACK))
+ if (likely(tcb->tcp_flags & TCPHDR_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
if (skb->len != tcp_header_size)
- tcp_event_data_sent(tp, skb, sk);
+ tcp_event_data_sent(tp, sk);
if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
@@ -926,7 +928,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
}
/* Initialize TSO segments for a packet. */
-static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
+static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
if (skb->len <= mss_now || !sk_can_gso(sk) ||
@@ -947,7 +949,7 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
/* When a modification to fackets out becomes necessary, we need to check
* skb is counted to fackets_out or not.
*/
-static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
+static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
int decr)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -962,7 +964,7 @@ static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
/* Pcount in the middle of the write queue got changed, we need to do various
* tweaks to fix counters
*/
-static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr)
+static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -1032,9 +1034,9 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
/* PSH and FIN should only be set in the second packet. */
- flags = TCP_SKB_CB(skb)->flags;
- TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
- TCP_SKB_CB(buff)->flags = flags;
+ flags = TCP_SKB_CB(skb)->tcp_flags;
+ TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
+ TCP_SKB_CB(buff)->tcp_flags = flags;
TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
@@ -1094,14 +1096,16 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
eat = len;
k = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- if (skb_shinfo(skb)->frags[i].size <= eat) {
- put_page(skb_shinfo(skb)->frags[i].page);
- eat -= skb_shinfo(skb)->frags[i].size;
+ int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ if (size <= eat) {
+ skb_frag_unref(skb, i);
+ eat -= size;
} else {
skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
if (eat) {
skb_shinfo(skb)->frags[k].page_offset += eat;
- skb_shinfo(skb)->frags[k].size -= eat;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
eat = 0;
}
k++;
@@ -1144,10 +1148,10 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
}
/* Calculate MSS. Not accounting for SACKs here. */
-int tcp_mtu_to_mss(struct sock *sk, int pmtu)
+int tcp_mtu_to_mss(const struct sock *sk, int pmtu)
{
- struct tcp_sock *tp = tcp_sk(sk);
- struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
int mss_now;
/* Calculate base mss without TCP options:
@@ -1173,10 +1177,10 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu)
}
/* Inverse of above */
-int tcp_mss_to_mtu(struct sock *sk, int mss)
+int tcp_mss_to_mtu(const struct sock *sk, int mss)
{
- struct tcp_sock *tp = tcp_sk(sk);
- struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
int mtu;
mtu = mss +
@@ -1250,8 +1254,8 @@ EXPORT_SYMBOL(tcp_sync_mss);
*/
unsigned int tcp_current_mss(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
+ const struct dst_entry *dst = __sk_dst_get(sk);
u32 mss_now;
unsigned header_len;
struct tcp_out_options opts;
@@ -1311,10 +1315,10 @@ static void tcp_cwnd_validate(struct sock *sk)
* modulo only when the receiver window alone is the limiting factor or
* when we would be allowed to send the split-due-to-Nagle skb fully.
*/
-static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
+static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
unsigned int mss_now, unsigned int cwnd)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
u32 needed, window, cwnd_len;
window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
@@ -1334,13 +1338,14 @@ static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
/* Can at least one segment of SKB be sent right now, according to the
* congestion window rules? If so, return how many segments are allowed.
*/
-static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
- struct sk_buff *skb)
+static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
+ const struct sk_buff *skb)
{
u32 in_flight, cwnd;
/* Don't be strict about the congestion window for the final FIN. */
- if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1)
+ if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
+ tcp_skb_pcount(skb) == 1)
return 1;
in_flight = tcp_packets_in_flight(tp);
@@ -1355,7 +1360,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
* This must be invoked the first time we consider transmitting
* SKB onto the wire.
*/
-static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
+static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
int tso_segs = tcp_skb_pcount(skb);
@@ -1393,7 +1398,7 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp,
/* Return non-zero if the Nagle test allows this packet to be
* sent now.
*/
-static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
+static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
unsigned int cur_mss, int nonagle)
{
/* Nagle rule does not apply to frames, which sit in the middle of the
@@ -1409,7 +1414,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
* Nagle can be ignored during F-RTO too (see RFC4138).
*/
if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
- (TCP_SKB_CB(skb)->flags & TCPHDR_FIN))
+ (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
return 1;
if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
@@ -1419,7 +1424,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
}
/* Does at least the first segment of SKB fit into the send window? */
-static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb,
+static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb,
unsigned int cur_mss)
{
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -1434,10 +1439,10 @@ static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb,
* should be put on the wire right now. If so, it returns the number of
* packets allowed by the congestion window.
*/
-static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
+static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
unsigned int cur_mss, int nonagle)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
unsigned int cwnd_quota;
tcp_init_tso_segs(sk, skb, cur_mss);
@@ -1455,7 +1460,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
/* Test if sending is allowed right now. */
int tcp_may_send_now(struct sock *sk)
{
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk);
return skb &&
@@ -1497,9 +1502,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
/* PSH and FIN should only be set in the second packet. */
- flags = TCP_SKB_CB(skb)->flags;
- TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
- TCP_SKB_CB(buff)->flags = flags;
+ flags = TCP_SKB_CB(skb)->tcp_flags;
+ TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
+ TCP_SKB_CB(buff)->tcp_flags = flags;
/* This packet was never sent out yet, so no SACK bits. */
TCP_SKB_CB(buff)->sacked = 0;
@@ -1530,7 +1535,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
u32 send_win, cong_win, limit, in_flight;
int win_divisor;
- if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto send_now;
if (icsk->icsk_ca_state != TCP_CA_Open)
@@ -1657,7 +1662,7 @@ static int tcp_mtu_probe(struct sock *sk)
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
- TCP_SKB_CB(nskb)->flags = TCPHDR_ACK;
+ TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
TCP_SKB_CB(nskb)->sacked = 0;
nskb->csum = 0;
nskb->ip_summed = skb->ip_summed;
@@ -1677,11 +1682,11 @@ static int tcp_mtu_probe(struct sock *sk)
if (skb->len <= copy) {
/* We've eaten all the data from this skb.
* Throw it away. */
- TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
+ TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb);
} else {
- TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
+ TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
~(TCPHDR_FIN|TCPHDR_PSH);
if (!skb_shinfo(skb)->nr_frags) {
skb_pull(skb, copy);
@@ -1796,11 +1801,13 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
tcp_event_new_data_sent(sk, skb);
tcp_minshall_update(tp, mss_now, skb);
- sent_pkts++;
+ sent_pkts += tcp_skb_pcount(skb);
if (push_one)
break;
}
+ if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
+ tp->prr_out += sent_pkts;
if (likely(sent_pkts)) {
tcp_cwnd_validate(sk);
@@ -1985,7 +1992,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
/* Merge over control information. This moves PSH/FIN etc. over */
- TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(next_skb)->flags;
+ TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
/* All done, get rid of second SKB and account for it so
* packet counting does not break.
@@ -2003,7 +2010,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
}
/* Check if coalescing SKBs is legal. */
-static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb)
+static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
{
if (tcp_skb_pcount(skb) > 1)
return 0;
@@ -2033,7 +2040,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
if (!sysctl_tcp_retrans_collapse)
return;
- if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN)
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
return;
tcp_for_write_queue_from_safe(skb, tmp, sk) {
@@ -2125,12 +2132,12 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
* since it is cheap to do so and saves bytes on the network.
*/
if (skb->len > 0 &&
- (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) &&
+ (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
if (!pskb_trim(skb, 0)) {
/* Reuse, even though it does some unnecessary work */
tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
- TCP_SKB_CB(skb)->flags);
+ TCP_SKB_CB(skb)->tcp_flags);
skb->ip_summed = CHECKSUM_NONE;
}
}
@@ -2179,7 +2186,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
static int tcp_can_forward_retransmit(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- struct tcp_sock *tp = tcp_sk(sk);
+ const struct tcp_sock *tp = tcp_sk(sk);
/* Forward retransmissions are possible only during Recovery. */
if (icsk->icsk_ca_state != TCP_CA_Recovery)
@@ -2294,6 +2301,9 @@ begin_fwd:
return;
NET_INC_STATS_BH(sock_net(sk), mib_idx);
+ if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery)
+ tp->prr_out += tcp_skb_pcount(skb);
+
if (skb == tcp_write_queue_head(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
@@ -2317,7 +2327,7 @@ void tcp_send_fin(struct sock *sk)
mss_now = tcp_current_mss(sk);
if (tcp_send_head(sk) != NULL) {
- TCP_SKB_CB(skb)->flags |= TCPHDR_FIN;
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
TCP_SKB_CB(skb)->end_seq++;
tp->write_seq++;
} else {
@@ -2379,11 +2389,11 @@ int tcp_send_synack(struct sock *sk)
struct sk_buff *skb;
skb = tcp_write_queue_head(sk);
- if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) {
+ if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
return -EFAULT;
}
- if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) {
+ if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
if (skb_cloned(skb)) {
struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
if (nskb == NULL)
@@ -2397,7 +2407,7 @@ int tcp_send_synack(struct sock *sk)
skb = nskb;
}
- TCP_SKB_CB(skb)->flags |= TCPHDR_ACK;
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
TCP_ECN_send_synack(tcp_sk(sk), skb);
}
TCP_SKB_CB(skb)->when = tcp_time_stamp;
@@ -2542,7 +2552,7 @@ EXPORT_SYMBOL(tcp_make_synack);
/* Do all connect socket setups that can be done AF independent. */
static void tcp_connect_init(struct sock *sk)
{
- struct dst_entry *dst = __sk_dst_get(sk);
+ const struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_sock *tp = tcp_sk(sk);
__u8 rcv_wscale;
@@ -2794,13 +2804,13 @@ int tcp_write_wakeup(struct sock *sk)
if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
skb->len > mss) {
seg_size = min(seg_size, mss);
- TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
if (tcp_fragment(sk, skb, seg_size, mss))
return -1;
} else if (!tcp_skb_pcount(skb))
tcp_set_skb_tso_segs(sk, skb, mss);
- TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
+ TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
TCP_SKB_CB(skb)->when = tcp_time_stamp;
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
if (!err)
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index ecd44b0..2e0f0af 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -334,7 +334,6 @@ void tcp_retransmit_timer(struct sock *sk)
* connection. If the socket is an orphan, time it out,
* we cannot allow such beasts to hang infinitely.
*/
-#ifdef TCP_DEBUG
struct inet_sock *inet = inet_sk(sk);
if (sk->sk_family == AF_INET) {
LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
@@ -349,7 +348,6 @@ void tcp_retransmit_timer(struct sock *sk)
inet->inet_num, tp->snd_una, tp->snd_nxt);
}
#endif
-#endif
if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
tcp_write_err(sk);
goto out;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1b5a193..ab0966d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1267,7 +1267,7 @@ int udp_disconnect(struct sock *sk, int flags)
sk->sk_state = TCP_CLOSE;
inet->inet_daddr = 0;
inet->inet_dport = 0;
- sock_rps_save_rxhash(sk, 0);
+ sock_rps_reset_rxhash(sk);
sk->sk_bound_dev_if = 0;
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
@@ -1355,7 +1355,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
int rc;
if (inet_sk(sk)->inet_daddr)
- sock_rps_save_rxhash(sk, skb->rxhash);
+ sock_rps_save_rxhash(sk, skb);
rc = ip_queue_rcv_skb(sk, skb);
if (rc < 0) {
@@ -1397,6 +1397,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
nf_reset(skb);
if (up->encap_type) {
+ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+
/*
* This is an encapsulation socket so pass the skb to
* the socket's udp_encap_rcv() hook. Otherwise, just
@@ -1409,11 +1411,11 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
*/
/* if we're overly short, let UDP handle it */
- if (skb->len > sizeof(struct udphdr) &&
- up->encap_rcv != NULL) {
+ encap_rcv = ACCESS_ONCE(up->encap_rcv);
+ if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
int ret;
- ret = (*up->encap_rcv)(sk, skb);
+ ret = encap_rcv(sk, skb);
if (ret <= 0) {
UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INDATAGRAMS,
@@ -1461,10 +1463,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
}
- if (rcu_dereference_raw(sk->sk_filter)) {
- if (udp_lib_checksum_complete(skb))
- goto drop;
- }
+ if (rcu_access_pointer(sk->sk_filter) &&
+ udp_lib_checksum_complete(skb))
+ goto drop;
if (sk_rcvqueues_full(sk, skb))
@@ -2038,7 +2039,7 @@ static void udp_seq_stop(struct seq_file *seq, void *v)
spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
}
-static int udp_seq_open(struct inode *inode, struct file *file)
+int udp_seq_open(struct inode *inode, struct file *file)
{
struct udp_seq_afinfo *afinfo = PDE(inode)->data;
struct udp_iter_state *s;
@@ -2054,6 +2055,7 @@ static int udp_seq_open(struct inode *inode, struct file *file)
s->udp_table = afinfo->udp_table;
return err;
}
+EXPORT_SYMBOL(udp_seq_open);
/* ------------------------------------------------------------------------ */
int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
@@ -2061,17 +2063,12 @@ int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
struct proc_dir_entry *p;
int rc = 0;
- afinfo->seq_fops.open = udp_seq_open;
- afinfo->seq_fops.read = seq_read;
- afinfo->seq_fops.llseek = seq_lseek;
- afinfo->seq_fops.release = seq_release_net;
-
afinfo->seq_ops.start = udp_seq_start;
afinfo->seq_ops.next = udp_seq_next;
afinfo->seq_ops.stop = udp_seq_stop;
p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
- &afinfo->seq_fops, afinfo);
+ afinfo->seq_fops, afinfo);
if (!p)
rc = -ENOMEM;
return rc;
@@ -2121,14 +2118,20 @@ int udp4_seq_show(struct seq_file *seq, void *v)
return 0;
}
+static const struct file_operations udp_afinfo_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = udp_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net
+};
+
/* ------------------------------------------------------------------------ */
static struct udp_seq_afinfo udp4_seq_afinfo = {
.name = "udp",
.family = AF_INET,
.udp_table = &udp_table,
- .seq_fops = {
- .owner = THIS_MODULE,
- },
+ .seq_fops = &udp_afinfo_seq_fops,
.seq_ops = {
.show = udp4_seq_show,
},
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index aee9963..12e9499 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -10,6 +10,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/export.h>
#include "udp_impl.h"
struct udp_table udplite_table __read_mostly;
@@ -71,13 +72,20 @@ static struct inet_protosw udplite4_protosw = {
};
#ifdef CONFIG_PROC_FS
+
+static const struct file_operations udplite_afinfo_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = udp_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net
+};
+
static struct udp_seq_afinfo udplite4_seq_afinfo = {
.name = "udplite",
.family = AF_INET,
.udp_table = &udplite_table,
- .seq_fops = {
- .owner = THIS_MODULE,
- },
+ .seq_fops = &udplite_afinfo_seq_fops,
.seq_ops = {
.show = udp4_seq_show,
},
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index fc5368a..a0b4c5d 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -79,13 +79,13 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
struct rtable *rt = (struct rtable *)xdst->route;
const struct flowi4 *fl4 = &fl->u.ip4;
- rt->rt_key_dst = fl4->daddr;
- rt->rt_key_src = fl4->saddr;
- rt->rt_key_tos = fl4->flowi4_tos;
- rt->rt_route_iif = fl4->flowi4_iif;
- rt->rt_iif = fl4->flowi4_iif;
- rt->rt_oif = fl4->flowi4_oif;
- rt->rt_mark = fl4->flowi4_mark;
+ xdst->u.rt.rt_key_dst = fl4->daddr;
+ xdst->u.rt.rt_key_src = fl4->saddr;
+ xdst->u.rt.rt_key_tos = fl4->flowi4_tos;
+ xdst->u.rt.rt_route_iif = fl4->flowi4_iif;
+ xdst->u.rt.rt_iif = fl4->flowi4_iif;
+ xdst->u.rt.rt_oif = fl4->flowi4_oif;
+ xdst->u.rt.rt_mark = fl4->flowi4_mark;
xdst->u.dst.dev = dev;
dev_hold(dev);
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index d9ac0a0..9258e75 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -12,6 +12,7 @@
#include <linux/pfkeyv2.h>
#include <linux/ipsec.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/export.h>
static int xfrm4_init_flags(struct xfrm_state *x)
{
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 12368c5..cf88df8 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -87,6 +87,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
/* Set to 3 to get tracing... */
#define ACONF_DEBUG 2
@@ -428,7 +429,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
ndev->tstamp = jiffies;
addrconf_sysctl_register(ndev);
/* protected by rtnl_lock */
- rcu_assign_pointer(dev->ip6_ptr, ndev);
+ RCU_INIT_POINTER(dev->ip6_ptr, ndev);
/* Join all-node multicast group */
ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
@@ -824,12 +825,13 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
{
struct inet6_dev *idev = ifp->idev;
struct in6_addr addr, *tmpaddr;
- unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_cstamp, tmp_tstamp, age;
+ unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
unsigned long regen_advance;
int tmp_plen;
int ret = 0;
int max_addresses;
u32 addr_flags;
+ unsigned long now = jiffies;
write_lock(&idev->lock);
if (ift) {
@@ -874,7 +876,7 @@ retry:
goto out;
}
memcpy(&addr.s6_addr[8], idev->rndid, 8);
- age = (jiffies - ifp->tstamp) / HZ;
+ age = (now - ifp->tstamp) / HZ;
tmp_valid_lft = min_t(__u32,
ifp->valid_lft,
idev->cnf.temp_valid_lft + age);
@@ -884,7 +886,6 @@ retry:
idev->cnf.max_desync_factor);
tmp_plen = ifp->prefix_len;
max_addresses = idev->cnf.max_addresses;
- tmp_cstamp = ifp->cstamp;
tmp_tstamp = ifp->tstamp;
spin_unlock_bh(&ifp->lock);
@@ -929,7 +930,7 @@ retry:
ift->ifpub = ifp;
ift->valid_lft = tmp_valid_lft;
ift->prefered_lft = tmp_prefered_lft;
- ift->cstamp = tmp_cstamp;
+ ift->cstamp = now;
ift->tstamp = tmp_tstamp;
spin_unlock_bh(&ift->lock);
@@ -1713,6 +1714,40 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
ip6_route_add(&cfg);
}
+
+static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
+ int plen,
+ const struct net_device *dev,
+ u32 flags, u32 noflags)
+{
+ struct fib6_node *fn;
+ struct rt6_info *rt = NULL;
+ struct fib6_table *table;
+
+ table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
+ if (table == NULL)
+ return NULL;
+
+ write_lock_bh(&table->tb6_lock);
+ fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
+ if (!fn)
+ goto out;
+ for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
+ if (rt->rt6i_dev->ifindex != dev->ifindex)
+ continue;
+ if ((rt->rt6i_flags & flags) != flags)
+ continue;
+ if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
+ continue;
+ dst_hold(&rt->dst);
+ break;
+ }
+out:
+ write_unlock_bh(&table->tb6_lock);
+ return rt;
+}
+
+
/* Create "default" multicast route to the interface */
static void addrconf_add_mroute(struct net_device *dev)
@@ -1842,10 +1877,13 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
if (addrconf_finite_timeout(rt_expires))
rt_expires *= HZ;
- rt = rt6_lookup(net, &pinfo->prefix, NULL,
- dev->ifindex, 1);
+ rt = addrconf_get_prefix_route(&pinfo->prefix,
+ pinfo->prefix_len,
+ dev,
+ RTF_ADDRCONF | RTF_PREFIX_RT,
+ RTF_GATEWAY | RTF_DEFAULT);
- if (rt && addrconf_is_prefix_route(rt)) {
+ if (rt) {
/* Autoconf prefix route */
if (valid_lft == 0) {
ip6_del_rt(rt);
@@ -1999,25 +2037,50 @@ ok:
#ifdef CONFIG_IPV6_PRIVACY
read_lock_bh(&in6_dev->lock);
/* update all temporary addresses in the list */
- list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) {
- /*
- * When adjusting the lifetimes of an existing
- * temporary address, only lower the lifetimes.
- * Implementations must not increase the
- * lifetimes of an existing temporary address
- * when processing a Prefix Information Option.
- */
+ list_for_each_entry(ift, &in6_dev->tempaddr_list,
+ tmp_list) {
+ int age, max_valid, max_prefered;
+
if (ifp != ift->ifpub)
continue;
+ /*
+ * RFC 4941 section 3.3:
+ * If a received option will extend the lifetime
+ * of a public address, the lifetimes of
+ * temporary addresses should be extended,
+ * subject to the overall constraint that no
+ * temporary addresses should ever remain
+ * "valid" or "preferred" for a time longer than
+ * (TEMP_VALID_LIFETIME) or
+ * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR),
+ * respectively.
+ */
+ age = (now - ift->cstamp) / HZ;
+ max_valid = in6_dev->cnf.temp_valid_lft - age;
+ if (max_valid < 0)
+ max_valid = 0;
+
+ max_prefered = in6_dev->cnf.temp_prefered_lft -
+ in6_dev->cnf.max_desync_factor -
+ age;
+ if (max_prefered < 0)
+ max_prefered = 0;
+
+ if (valid_lft > max_valid)
+ valid_lft = max_valid;
+
+ if (prefered_lft > max_prefered)
+ prefered_lft = max_prefered;
+
spin_lock(&ift->lock);
flags = ift->flags;
- if (ift->valid_lft > valid_lft &&
- ift->valid_lft - valid_lft > (jiffies - ift->tstamp) / HZ)
- ift->valid_lft = valid_lft + (jiffies - ift->tstamp) / HZ;
- if (ift->prefered_lft > prefered_lft &&
- ift->prefered_lft - prefered_lft > (jiffies - ift->tstamp) / HZ)
- ift->prefered_lft = prefered_lft + (jiffies - ift->tstamp) / HZ;
+ ift->valid_lft = valid_lft;
+ ift->prefered_lft = prefered_lft;
+ ift->tstamp = now;
+ if (prefered_lft > 0)
+ ift->flags &= ~IFA_F_DEPRECATED;
+
spin_unlock(&ift->lock);
if (!(flags&IFA_F_TENTATIVE))
ipv6_ifa_notify(0, ift);
@@ -2025,9 +2088,11 @@ ok:
if ((create || list_empty(&in6_dev->tempaddr_list)) && in6_dev->cnf.use_tempaddr > 0) {
/*
- * When a new public address is created as described in [ADDRCONF],
- * also create a new temporary address. Also create a temporary
- * address if it's enabled but no temporary address currently exists.
+ * When a new public address is created as
+ * described in [ADDRCONF], also create a new
+ * temporary address. Also create a temporary
+ * address if it's enabled but no temporary
+ * address currently exists.
*/
read_unlock_bh(&in6_dev->lock);
ipv6_create_tempaddr(ifp, NULL);
@@ -2706,7 +2771,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
idev->dead = 1;
/* protected by rtnl_lock */
- rcu_assign_pointer(dev->ip6_ptr, NULL);
+ RCU_INIT_POINTER(dev->ip6_ptr, NULL);
/* Step 1.5: remove snmp6 entry */
snmp6_unregister_dev(idev);
@@ -2969,12 +3034,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
ipv6_ifa_notify(RTM_NEWADDR, ifp);
- /* If added prefix is link local and forwarding is off,
- start sending router solicitations.
+ /* If added prefix is link local and we are prepared to process
+ router advertisements, start sending router solicitations.
*/
- if ((ifp->idev->cnf.forwarding == 0 ||
- ifp->idev->cnf.forwarding == 2) &&
+ if (((ifp->idev->cnf.accept_ra == 1 && !ifp->idev->cnf.forwarding) ||
+ ifp->idev->cnf.accept_ra == 2) &&
ifp->idev->cnf.rtr_solicits > 0 &&
(dev->flags&IFF_LOOPBACK) == 0 &&
(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 6b03826..399287e 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -3,6 +3,7 @@
* not configured or static.
*/
+#include <linux/export.h>
#include <net/ipv6.h>
#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index b46e9f8..e248069 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -297,10 +297,6 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
ipv6_addr_copy(&iph->daddr, &fl6->daddr);
mtu_info = IP6CBMTU(skb);
- if (!mtu_info) {
- kfree_skb(skb);
- return;
- }
mtu_info->ip6m_mtu = mtu;
mtu_info->ip6m_addr.sin6_family = AF_INET6;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 79a485e..bf22a22 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -30,6 +30,7 @@
#include <linux/in6.h>
#include <linux/icmpv6.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/dst.h>
#include <net/sock.h>
@@ -273,12 +274,12 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
__u16 dstbuf;
#endif
- struct dst_entry *dst;
+ struct dst_entry *dst = skb_dst(skb);
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
((skb_transport_header(skb)[1] + 1) << 3)))) {
- IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
+ IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst),
IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb);
return -1;
@@ -289,9 +290,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
dstbuf = opt->dst1;
#endif
- dst = dst_clone(skb_dst(skb));
if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) {
- dst_release(dst);
skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
opt = IP6CB(skb);
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
@@ -304,7 +303,6 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
IP6_INC_STATS_BH(dev_net(dst->dev),
ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
- dst_release(dst);
return -1;
}
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 14ed0a9..37f548b 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -2,6 +2,7 @@
* IPv6 library code, needed by static components when full IPv6 support is
* not configured or static.
*/
+#include <linux/export.h>
#include <net/ipv6.h>
/*
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 34d244d..2955715 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -14,6 +14,7 @@
*/
#include <linux/netdevice.h>
+#include <linux/export.h>
#include <net/fib_rules.h>
#include <net/ipv6.h>
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 1190041..90868fb 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -490,7 +490,8 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
goto out_dst_release;
}
- idev = in6_dev_get(skb->dev);
+ rcu_read_lock();
+ idev = __in6_dev_get(skb->dev);
err = ip6_append_data(sk, icmpv6_getfrag, &msg,
len + sizeof(struct icmp6hdr),
@@ -500,19 +501,16 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
if (err) {
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
- goto out_put;
+ } else {
+ err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
+ len + sizeof(struct icmp6hdr));
}
- err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, len + sizeof(struct icmp6hdr));
-
-out_put:
- if (likely(idev != NULL))
- in6_dev_put(idev);
+ rcu_read_unlock();
out_dst_release:
dst_release(dst);
out:
icmpv6_xmit_unlock(sk);
}
-
EXPORT_SYMBOL(icmpv6_send);
static void icmpv6_echo_reply(struct sk_buff *skb)
@@ -569,7 +567,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
- idev = in6_dev_get(skb->dev);
+ idev = __in6_dev_get(skb->dev);
msg.skb = skb;
msg.offset = 0;
@@ -583,13 +581,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
if (err) {
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
- goto out_put;
+ } else {
+ err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
+ skb->len + sizeof(struct icmp6hdr));
}
- err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
-
-out_put:
- if (likely(idev != NULL))
- in6_dev_put(idev);
dst_release(dst);
out:
icmpv6_xmit_unlock(sk);
@@ -840,8 +835,7 @@ static int __net_init icmpv6_sk_init(struct net *net)
/* Enough space for 2 64K ICMP packets, including
* sk_buff struct overhead.
*/
- sk->sk_sndbuf =
- (2 * ((64 * 1024) + sizeof(struct sk_buff)));
+ sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
}
return 0;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 8a58e8c..fee46d5 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -211,6 +211,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
struct flowi6 fl6;
struct dst_entry *dst;
struct in6_addr *final_p, final;
+ int res;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = sk->sk_protocol;
@@ -241,12 +242,14 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
__inet6_csk_dst_store(sk, dst, NULL, NULL);
}
- skb_dst_set(skb, dst_clone(dst));
+ rcu_read_lock();
+ skb_dst_set_noref(skb, dst);
/* Restore final destination back after routing done */
ipv6_addr_copy(&fl6.daddr, &np->daddr);
- return ip6_xmit(sk, skb, &fl6, np->opt);
+ res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
+ rcu_read_unlock();
+ return res;
}
-
EXPORT_SYMBOL_GPL(inet6_csk_xmit);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 320d91d..93718f3 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -28,10 +28,6 @@
#include <linux/list.h>
#include <linux/slab.h>
-#ifdef CONFIG_PROC_FS
-#include <linux/proc_fs.h>
-#endif
-
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 5430394..4566dbd 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -21,6 +21,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/sock.h>
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 4c882cf..84d0bd5 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -180,7 +180,7 @@ int ip6_output(struct sk_buff *skb)
*/
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
- struct ipv6_txoptions *opt)
+ struct ipv6_txoptions *opt, int tclass)
{
struct net *net = sock_net(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -190,7 +190,6 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
u8 proto = fl6->flowi6_proto;
int seg_len = skb->len;
int hlimit = -1;
- int tclass = 0;
u32 mtu;
if (opt) {
@@ -228,10 +227,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
/*
* Fill in the IPv6 header
*/
- if (np) {
- tclass = np->tclass;
+ if (np)
hlimit = np->hop_limit;
- }
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
@@ -1126,7 +1123,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
hh_len + fragheaderlen + transhdrlen + 20,
(flags & MSG_DONTWAIT), &err);
if (skb == NULL)
- return -ENOMEM;
+ return err;
/* reserve space for Hardware header */
skb_reserve(skb, hh_len);
@@ -1193,6 +1190,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
struct sk_buff *skb;
unsigned int maxfraglen, fragheaderlen;
int exthdrlen;
+ int dst_exthdrlen;
int hh_len;
int mtu;
int copy;
@@ -1248,7 +1246,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
np->cork.hop_limit = hlimit;
np->cork.tclass = tclass;
mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
- rt->dst.dev->mtu : dst_mtu(rt->dst.path);
+ rt->dst.dev->mtu : dst_mtu(&rt->dst);
if (np->frag_size < mtu) {
if (np->frag_size)
mtu = np->frag_size;
@@ -1259,16 +1257,17 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
cork->length = 0;
sk->sk_sndmsg_page = NULL;
sk->sk_sndmsg_off = 0;
- exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) -
- rt->rt6i_nfheader_len;
+ exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
length += exthdrlen;
transhdrlen += exthdrlen;
+ dst_exthdrlen = rt->dst.header_len;
} else {
rt = (struct rt6_info *)cork->dst;
fl6 = &inet->cork.fl.u.ip6;
opt = np->cork.opt;
transhdrlen = 0;
exthdrlen = 0;
+ dst_exthdrlen = 0;
mtu = cork->fragsize;
}
@@ -1368,6 +1367,8 @@ alloc_new_skb:
else
alloclen = datalen + fragheaderlen;
+ alloclen += dst_exthdrlen;
+
/*
* The last fragment gets additional space at tail.
* Note: we overallocate on fragments with MSG_MODE
@@ -1419,9 +1420,9 @@ alloc_new_skb:
/*
* Find where to start putting bytes
*/
- data = skb_put(skb, fraglen);
- skb_set_network_header(skb, exthdrlen);
- data += fragheaderlen;
+ data = skb_put(skb, fraglen + dst_exthdrlen);
+ skb_set_network_header(skb, exthdrlen + dst_exthdrlen);
+ data += fragheaderlen + dst_exthdrlen;
skb->transport_header = (skb->network_header +
fragheaderlen);
if (fraggap) {
@@ -1434,6 +1435,7 @@ alloc_new_skb:
pskb_trim_unique(skb_prev, maxfraglen);
}
copy = datalen - transhdrlen - fraggap;
+
if (copy < 0) {
err = -EINVAL;
kfree_skb(skb);
@@ -1448,6 +1450,7 @@ alloc_new_skb:
length -= datalen - fraggap;
transhdrlen = 0;
exthdrlen = 0;
+ dst_exthdrlen = 0;
csummode = CHECKSUM_NONE;
/*
@@ -1480,13 +1483,13 @@ alloc_new_skb:
if (page && (left = PAGE_SIZE - off) > 0) {
if (copy >= left)
copy = left;
- if (page != frag->page) {
+ if (page != skb_frag_page(frag)) {
if (i == MAX_SKB_FRAGS) {
err = -EMSGSIZE;
goto error;
}
- get_page(page);
skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
+ skb_frag_ref(skb, i);
frag = &skb_shinfo(skb)->frags[i];
}
} else if(i < MAX_SKB_FRAGS) {
@@ -1506,12 +1509,14 @@ alloc_new_skb:
err = -EMSGSIZE;
goto error;
}
- if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
+ if (getfrag(from,
+ skb_frag_address(frag) + skb_frag_size(frag),
+ offset, copy, skb->len, skb) < 0) {
err = -EFAULT;
goto error;
}
sk->sk_sndmsg_off += copy;
- frag->size += copy;
+ skb_frag_size_add(frag, copy);
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0bc9888..bdc15c9 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -218,8 +218,8 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
{
struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
- rcu_assign_pointer(t->next , rtnl_dereference(*tp));
- rcu_assign_pointer(*tp, t);
+ RCU_INIT_POINTER(t->next , rtnl_dereference(*tp));
+ RCU_INIT_POINTER(*tp, t);
}
/**
@@ -237,7 +237,7 @@ ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
(iter = rtnl_dereference(*tp)) != NULL;
tp = &iter->next) {
if (t == iter) {
- rcu_assign_pointer(*tp, t->next);
+ RCU_INIT_POINTER(*tp, t->next);
break;
}
}
@@ -350,7 +350,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
if (dev == ip6n->fb_tnl_dev)
- rcu_assign_pointer(ip6n->tnls_wc[0], NULL);
+ RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
else
ip6_tnl_unlink(ip6n, t);
ip6_tnl_dst_reset(t);
@@ -889,7 +889,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
struct net_device_stats *stats = &t->dev->stats;
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct ipv6_tel_txoption opt;
- struct dst_entry *dst;
+ struct dst_entry *dst = NULL, *ndst = NULL;
struct net_device *tdev;
int mtu;
unsigned int max_headroom = sizeof(struct ipv6hdr);
@@ -897,19 +897,20 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
int err = -1;
int pkt_len;
- if ((dst = ip6_tnl_dst_check(t)) != NULL)
- dst_hold(dst);
- else {
- dst = ip6_route_output(net, NULL, fl6);
+ if (!fl6->flowi6_mark)
+ dst = ip6_tnl_dst_check(t);
+ if (!dst) {
+ ndst = ip6_route_output(net, NULL, fl6);
- if (dst->error)
+ if (ndst->error)
goto tx_err_link_failure;
- dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
- if (IS_ERR(dst)) {
- err = PTR_ERR(dst);
- dst = NULL;
+ ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(fl6), NULL, 0);
+ if (IS_ERR(ndst)) {
+ err = PTR_ERR(ndst);
+ ndst = NULL;
goto tx_err_link_failure;
}
+ dst = ndst;
}
tdev = dst->dev;
@@ -955,8 +956,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
skb = new_skb;
}
skb_dst_drop(skb);
- skb_dst_set(skb, dst_clone(dst));
-
+ if (fl6->flowi6_mark) {
+ skb_dst_set(skb, dst);
+ ndst = NULL;
+ } else {
+ skb_dst_set_noref(skb, dst);
+ }
skb->transport_header = skb->network_header;
proto = fl6->flowi6_proto;
@@ -987,13 +992,14 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
stats->tx_errors++;
stats->tx_aborted_errors++;
}
- ip6_tnl_dst_store(t, dst);
+ if (ndst)
+ ip6_tnl_dst_store(t, ndst);
return 0;
tx_err_link_failure:
stats->tx_carrier_errors++;
dst_link_failure(skb);
tx_err_dst_release:
- dst_release(dst);
+ dst_release(ndst);
return err;
}
@@ -1020,9 +1026,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
dsfield = ipv4_get_dsfield(iph);
- if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
& IPV6_TCLASS_MASK;
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+ fl6.flowi6_mark = skb->mark;
err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
if (err != 0) {
@@ -1069,10 +1077,12 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
fl6.flowi6_proto = IPPROTO_IPV6;
dsfield = ipv6_get_dsfield(ipv6h);
- if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
- if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL))
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
+ if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
+ fl6.flowi6_mark = skb->mark;
err = ip6_tnl_xmit2(skb, dev, dsfield, &fl6, encap_limit, &mtu);
if (err != 0) {
@@ -1439,7 +1449,7 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
t->parms.proto = IPPROTO_IPV6;
dev_hold(dev);
- rcu_assign_pointer(ip6n->tnls_wc[0], t);
+ RCU_INIT_POINTER(ip6n->tnls_wc[0], t);
return 0;
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index def0538..449a918 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -51,6 +51,7 @@
#include <linux/pim.h>
#include <net/addrconf.h>
#include <linux/netfilter_ipv6.h>
+#include <linux/export.h>
#include <net/ip6_checksum.h>
struct mr6_table {
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 2fbda5f..c99e3ee 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -343,7 +343,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
break;
case IPV6_TRANSPARENT:
- if (!capable(CAP_NET_ADMIN)) {
+ if (valbool && !capable(CAP_NET_ADMIN) && !capable(CAP_NET_RAW)) {
retv = -EPERM;
break;
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 9da6e02..44e5b7f 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -370,17 +370,14 @@ static int ndisc_constructor(struct neighbour *neigh)
struct neigh_parms *parms;
int is_multicast = ipv6_addr_is_multicast(addr);
- rcu_read_lock();
in6_dev = in6_dev_get(dev);
if (in6_dev == NULL) {
- rcu_read_unlock();
return -EINVAL;
}
parms = in6_dev->nd_parms;
__neigh_parms_put(neigh->parms);
neigh->parms = neigh_parms_clone(parms);
- rcu_read_unlock();
neigh->type = is_multicast ? RTN_MULTICAST : RTN_UNICAST;
if (!dev->header_ops) {
@@ -533,7 +530,8 @@ void ndisc_send_skb(struct sk_buff *skb,
skb_dst_set(skb, dst);
- idev = in6_dev_get(dst->dev);
+ rcu_read_lock();
+ idev = __in6_dev_get(dst->dev);
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, dst->dev,
@@ -543,8 +541,7 @@ void ndisc_send_skb(struct sk_buff *skb,
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
}
- if (likely(idev != NULL))
- in6_dev_put(idev);
+ rcu_read_unlock();
}
EXPORT_SYMBOL(ndisc_send_skb);
@@ -1039,7 +1036,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
if (skb->len < sizeof(*rs_msg))
return;
- idev = in6_dev_get(skb->dev);
+ idev = __in6_dev_get(skb->dev);
if (!idev) {
if (net_ratelimit())
ND_PRINTK1("ICMP6 RS: can't find in6 device\n");
@@ -1080,7 +1077,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
neigh_release(neigh);
}
out:
- in6_dev_put(idev);
+ return;
}
static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
@@ -1179,7 +1176,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
* set the RA_RECV flag in the interface
*/
- in6_dev = in6_dev_get(skb->dev);
+ in6_dev = __in6_dev_get(skb->dev);
if (in6_dev == NULL) {
ND_PRINTK0(KERN_ERR
"ICMPv6 RA: can't find inet6 device for %s.\n",
@@ -1188,7 +1185,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
}
if (!ndisc_parse_options(opt, optlen, &ndopts)) {
- in6_dev_put(in6_dev);
ND_PRINTK2(KERN_WARNING
"ICMP6 RA: invalid ND options\n");
return;
@@ -1225,6 +1221,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
if (!in6_dev->cnf.accept_ra_defrtr)
goto skip_defrtr;
+ if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0))
+ goto skip_defrtr;
+
lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
#ifdef CONFIG_IPV6_ROUTER_PREF
@@ -1255,7 +1254,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
ND_PRINTK0(KERN_ERR
"ICMPv6 RA: %s() failed to add default route.\n",
__func__);
- in6_dev_put(in6_dev);
return;
}
@@ -1265,7 +1263,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
"ICMPv6 RA: %s() got default router without neighbour.\n",
__func__);
dst_release(&rt->dst);
- in6_dev_put(in6_dev);
return;
}
neigh->flags |= NTF_ROUTER;
@@ -1349,6 +1346,9 @@ skip_linkparms:
goto out;
#ifdef CONFIG_IPV6_ROUTE_INFO
+ if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0))
+ goto skip_routeinfo;
+
if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
struct nd_opt_hdr *p;
for (p = ndopts.nd_opts_ri;
@@ -1366,6 +1366,8 @@ skip_linkparms:
&ipv6_hdr(skb)->saddr);
}
}
+
+skip_routeinfo:
#endif
#ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -1422,7 +1424,6 @@ out:
dst_release(&rt->dst);
else if (neigh)
neigh_release(neigh);
- in6_dev_put(in6_dev);
}
static void ndisc_redirect_rcv(struct sk_buff *skb)
@@ -1481,13 +1482,11 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
return;
}
- in6_dev = in6_dev_get(skb->dev);
+ in6_dev = __in6_dev_get(skb->dev);
if (!in6_dev)
return;
- if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) {
- in6_dev_put(in6_dev);
+ if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
return;
- }
/* RFC2461 8.1:
* The IP source address of the Redirect MUST be the same as the current
@@ -1497,7 +1496,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
ND_PRINTK2(KERN_WARNING
"ICMPv6 Redirect: invalid ND options\n");
- in6_dev_put(in6_dev);
return;
}
if (ndopts.nd_opts_tgt_lladdr) {
@@ -1506,7 +1504,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
if (!lladdr) {
ND_PRINTK2(KERN_WARNING
"ICMPv6 Redirect: invalid link-layer address length\n");
- in6_dev_put(in6_dev);
return;
}
}
@@ -1518,7 +1515,6 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
on_link);
neigh_release(neigh);
}
- in6_dev_put(in6_dev);
}
void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
@@ -1651,7 +1647,8 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
csum_partial(icmph, len, 0));
skb_dst_set(buff, dst);
- idev = in6_dev_get(dst->dev);
+ rcu_read_lock();
+ idev = __in6_dev_get(dst->dev);
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, buff, NULL, dst->dev,
dst_output);
@@ -1660,8 +1657,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
}
- if (likely(idev != NULL))
- in6_dev_put(idev);
+ rcu_read_unlock();
return;
release:
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 30fcee4..db31561 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -3,6 +3,7 @@
#include <linux/ipv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
+#include <linux/export.h>
#include <net/dst.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
@@ -100,9 +101,16 @@ static int nf_ip6_route(struct net *net, struct dst_entry **dst,
.pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
};
const void *sk = strict ? &fake_sk : NULL;
-
- *dst = ip6_route_output(net, sk, &fl->u.ip6);
- return (*dst)->error;
+ struct dst_entry *result;
+ int err;
+
+ result = ip6_route_output(net, sk, &fl->u.ip6);
+ err = result->error;
+ if (err)
+ dst_release(result);
+ else
+ *dst = result;
+ return err;
}
__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 0857272..38f00b0 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -182,7 +182,6 @@ fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst)
return container_of(q, struct nf_ct_frag6_queue, q);
oom:
- pr_debug("Can't alloc new queue\n");
return NULL;
}
@@ -370,16 +369,16 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
struct sk_buff *clone;
int i, plen = 0;
- if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) {
- pr_debug("Can't alloc skb\n");
+ clone = alloc_skb(0, GFP_ATOMIC);
+ if (clone == NULL)
goto out_oom;
- }
+
clone->next = head->next;
head->next = clone;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
- for (i=0; i<skb_shinfo(head)->nr_frags; i++)
- plen += skb_shinfo(head)->frags[i].size;
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = clone->data_len = head->data_len - plen;
head->data_len -= clone->len;
head->len -= clone->len;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 18ff5df..1008ce9 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -21,6 +21,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stddef.h>
+#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/sock.h>
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 343852e..331af3b8 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -61,6 +61,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
static struct raw_hashinfo raw_v6_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock),
@@ -130,14 +131,14 @@ static mh_filter_t __rcu *mh_filter __read_mostly;
int rawv6_mh_filter_register(mh_filter_t filter)
{
- rcu_assign_pointer(mh_filter, filter);
+ RCU_INIT_POINTER(mh_filter, filter);
return 0;
}
EXPORT_SYMBOL(rawv6_mh_filter_register);
int rawv6_mh_filter_unregister(mh_filter_t filter)
{
- rcu_assign_pointer(mh_filter, NULL);
+ RCU_INIT_POINTER(mh_filter, NULL);
synchronize_rcu();
return 0;
}
@@ -372,9 +373,9 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
read_unlock(&raw_v6_hashinfo.lock);
}
-static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
+static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
- if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
+ if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
@@ -542,8 +543,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
goto out;
offset = rp->offset;
- total_len = inet_sk(sk)->cork.base.length - (skb_network_header(skb) -
- skb->data);
+ total_len = inet_sk(sk)->cork.base.length;
if (offset >= total_len - 1) {
err = -EINVAL;
ip6_flush_pending_frames(sk);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 7b954e2..dfb164e 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -42,6 +42,7 @@
#include <linux/jhash.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/sock.h>
#include <net/snmp.h>
@@ -464,8 +465,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
head->next = clone;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
- for (i=0; i<skb_shinfo(head)->nr_frags; i++)
- plen += skb_shinfo(head)->frags[i].size;
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = clone->data_len = head->data_len - plen;
head->data_len -= clone->len;
head->len -= clone->len;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index fb545ed..8473016 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -26,6 +26,7 @@
#include <linux/capability.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/types.h>
#include <linux/times.h>
#include <linux/socket.h>
@@ -1086,11 +1087,10 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
rt->dst.output = ip6_output;
dst_set_neighbour(&rt->dst, neigh);
atomic_set(&rt->dst.__refcnt, 1);
- dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
-
ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
rt->rt6i_dst.plen = 128;
rt->rt6i_idev = idev;
+ dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
spin_lock_bh(&icmp6_dst_lock);
rt->dst.next = icmp6_dst_gc_list;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 00b15ac..a7a1860 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -182,7 +182,7 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
(iter = rtnl_dereference(*tp)) != NULL;
tp = &iter->next) {
if (t == iter) {
- rcu_assign_pointer(*tp, t->next);
+ RCU_INIT_POINTER(*tp, t->next);
break;
}
}
@@ -192,8 +192,8 @@ static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
{
struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
- rcu_assign_pointer(t->next, rtnl_dereference(*tp));
- rcu_assign_pointer(*tp, t);
+ RCU_INIT_POINTER(t->next, rtnl_dereference(*tp));
+ RCU_INIT_POINTER(*tp, t);
}
static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
@@ -391,7 +391,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
p->addr = a->addr;
p->flags = a->flags;
t->prl_count++;
- rcu_assign_pointer(t->prl, p);
+ RCU_INIT_POINTER(t->prl, p);
out:
return err;
}
@@ -474,7 +474,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
struct sit_net *sitn = net_generic(net, sit_net_id);
if (dev == sitn->fb_tunnel_dev) {
- rcu_assign_pointer(sitn->tunnels_wc[0], NULL);
+ RCU_INIT_POINTER(sitn->tunnels_wc[0], NULL);
} else {
ipip6_tunnel_unlink(sitn, netdev_priv(dev));
ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
@@ -1176,7 +1176,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
if (!dev->tstats)
return -ENOMEM;
dev_hold(dev);
- rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
+ RCU_INIT_POINTER(sitn->tunnels_wc[0], tunnel);
return 0;
}
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index ac83896..5a0d664 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -115,7 +115,7 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
& COOKIEMASK;
}
-__u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
+__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
@@ -137,7 +137,7 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
jiffies / (HZ * 60), mssind);
}
-static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
+static inline int cookie_check(const struct sk_buff *skb, __u32 cookie)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
@@ -152,7 +152,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
{
struct tcp_options_received tcp_opt;
- u8 *hash_location;
+ const u8 *hash_location;
struct inet_request_sock *ireq;
struct inet6_request_sock *ireq6;
struct tcp_request_sock *treq;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 6dcf5e7..166a57c 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -10,6 +10,7 @@
#include <linux/in6.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/ndisc.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7b8fc57..36131d1 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -114,7 +114,7 @@ static __inline__ __sum16 tcp_v6_check(int len,
return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
}
-static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
+static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
{
return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
ipv6_hdr(skb)->saddr.s6_addr32,
@@ -513,7 +513,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
- err = ip6_xmit(sk, skb, &fl6, opt);
+ err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
err = net_xmit_eval(err);
}
@@ -761,7 +761,7 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
const struct in6_addr *daddr, struct in6_addr *saddr,
- struct tcphdr *th)
+ const struct tcphdr *th)
{
struct tcp_md5sig_pool *hp;
struct hash_desc *desc;
@@ -793,13 +793,14 @@ clear_hash_noput:
}
static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
- struct sock *sk, struct request_sock *req,
- struct sk_buff *skb)
+ const struct sock *sk,
+ const struct request_sock *req,
+ const struct sk_buff *skb)
{
const struct in6_addr *saddr, *daddr;
struct tcp_md5sig_pool *hp;
struct hash_desc *desc;
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
if (sk) {
saddr = &inet6_sk(sk)->saddr;
@@ -842,12 +843,12 @@ clear_hash_noput:
return 1;
}
-static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
+static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
{
- __u8 *hash_location = NULL;
+ const __u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected;
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
int genhash;
u8 newhash[16];
@@ -978,9 +979,10 @@ static int tcp6_gro_complete(struct sk_buff *skb)
}
static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
- u32 ts, struct tcp_md5sig_key *key, int rst)
+ u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
{
- struct tcphdr *th = tcp_hdr(skb), *t1;
+ const struct tcphdr *th = tcp_hdr(skb);
+ struct tcphdr *t1;
struct sk_buff *buff;
struct flowi6 fl6;
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -1058,7 +1060,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
if (!IS_ERR(dst)) {
skb_dst_set(buff, dst);
- ip6_xmit(ctl_sk, buff, &fl6, NULL);
+ ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
if (rst)
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
@@ -1070,7 +1072,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
{
- struct tcphdr *th = tcp_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
u32 seq = 0, ack_seq = 0;
struct tcp_md5sig_key *key = NULL;
@@ -1091,13 +1093,13 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
(th->doff << 2);
- tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
+ tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
}
static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
- struct tcp_md5sig_key *key)
+ struct tcp_md5sig_key *key, u8 tclass)
{
- tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
+ tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
}
static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -1107,7 +1109,8 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
- tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
+ tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
+ tw->tw_tclass);
inet_twsk_put(tw);
}
@@ -1116,7 +1119,7 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
{
tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
- tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
+ tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
}
@@ -1160,7 +1163,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct tcp_extend_values tmp_ext;
struct tcp_options_received tmp_opt;
- u8 *hash_location;
+ const u8 *hash_location;
struct request_sock *req;
struct inet6_request_sock *treq;
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -1608,7 +1611,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
opt_skb = skb_clone(skb, GFP_ATOMIC);
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
- sock_rps_save_rxhash(sk, skb->rxhash);
+ sock_rps_save_rxhash(sk, skb);
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
if (opt_skb)
@@ -1630,7 +1633,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
* the new socket..
*/
if(nsk != sk) {
- sock_rps_save_rxhash(nsk, skb->rxhash);
+ sock_rps_save_rxhash(nsk, skb);
if (tcp_child_process(sk, nsk, skb))
goto reset;
if (opt_skb)
@@ -1638,7 +1641,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
} else
- sock_rps_save_rxhash(sk, skb->rxhash);
+ sock_rps_save_rxhash(sk, skb);
if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
goto reset;
@@ -1688,7 +1691,7 @@ ipv6_pktoptions:
static int tcp_v6_rcv(struct sk_buff *skb)
{
- struct tcphdr *th;
+ const struct tcphdr *th;
const struct ipv6hdr *hdr;
struct sock *sk;
int ret;
@@ -1722,7 +1725,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
skb->len - th->doff*4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->when = 0;
- TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
+ TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
TCP_SKB_CB(skb)->sacked = 0;
sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
@@ -1856,8 +1859,8 @@ static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
static void *tcp_v6_tw_get_peer(struct sock *sk)
{
- struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
- struct inet_timewait_sock *tw = inet_twsk(sk);
+ const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
+ const struct inet_timewait_sock *tw = inet_twsk(sk);
if (tw->tw_family == AF_INET)
return tcp_v4_tw_get_peer(sk);
@@ -2012,7 +2015,7 @@ static void tcp_v6_destroy_sock(struct sock *sk)
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCPv6 sock list dumping. */
static void get_openreq6(struct seq_file *seq,
- struct sock *sk, struct request_sock *req, int i, int uid)
+ const struct sock *sk, struct request_sock *req, int i, int uid)
{
int ttd = req->expires - jiffies;
const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
@@ -2048,10 +2051,10 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
__u16 destp, srcp;
int timer_active;
unsigned long timer_expires;
- struct inet_sock *inet = inet_sk(sp);
- struct tcp_sock *tp = tcp_sk(sp);
+ const struct inet_sock *inet = inet_sk(sp);
+ const struct tcp_sock *tp = tcp_sk(sp);
const struct inet_connection_sock *icsk = inet_csk(sp);
- struct ipv6_pinfo *np = inet6_sk(sp);
+ const struct ipv6_pinfo *np = inet6_sk(sp);
dest = &np->daddr;
src = &np->rcv_saddr;
@@ -2103,7 +2106,7 @@ static void get_timewait6_sock(struct seq_file *seq,
{
const struct in6_addr *dest, *src;
__u16 destp, srcp;
- struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
+ const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
int ttd = tw->tw_ttd - jiffies;
if (ttd < 0)
@@ -2158,12 +2161,18 @@ out:
return 0;
}
+static const struct file_operations tcp6_afinfo_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = tcp_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net
+};
+
static struct tcp_seq_afinfo tcp6_seq_afinfo = {
.name = "tcp6",
.family = AF_INET6,
- .seq_fops = {
- .owner = THIS_MODULE,
- },
+ .seq_fops = &tcp6_afinfo_seq_fops,
.seq_ops = {
.show = tcp6_seq_show,
},
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index bb95e8e..846f4757 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -509,7 +509,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
int is_udplite = IS_UDPLITE(sk);
if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
- sock_rps_save_rxhash(sk, skb->rxhash);
+ sock_rps_save_rxhash(sk, skb);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
@@ -533,7 +533,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
}
}
- if (rcu_dereference_raw(sk->sk_filter)) {
+ if (rcu_access_pointer(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto drop;
}
@@ -1424,13 +1424,19 @@ int udp6_seq_show(struct seq_file *seq, void *v)
return 0;
}
+static const struct file_operations udp6_afinfo_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = udp_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net
+};
+
static struct udp_seq_afinfo udp6_seq_afinfo = {
.name = "udp6",
.family = AF_INET6,
.udp_table = &udp_table,
- .seq_fops = {
- .owner = THIS_MODULE,
- },
+ .seq_fops = &udp6_afinfo_seq_fops,
.seq_ops = {
.show = udp6_seq_show,
},
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 986c4de..1d08e21 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -11,6 +11,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/export.h>
#include "udp_impl.h"
static int udplitev6_rcv(struct sk_buff *skb)
@@ -93,13 +94,20 @@ void udplitev6_exit(void)
}
#ifdef CONFIG_PROC_FS
+
+static const struct file_operations udplite6_afinfo_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = udp_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net
+};
+
static struct udp_seq_afinfo udplite6_seq_afinfo = {
.name = "udplite6",
.family = AF_INET6,
.udp_table = &udplite_table,
- .seq_fops = {
- .owner = THIS_MODULE,
- },
+ .seq_fops = &udplite6_afinfo_seq_fops,
.seq_ops = {
.show = udp6_seq_show,
},
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 49a91c5f..faae417 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -28,6 +28,43 @@ int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
EXPORT_SYMBOL(xfrm6_find_1stfragopt);
+static int xfrm6_local_dontfrag(struct sk_buff *skb)
+{
+ int proto;
+ struct sock *sk = skb->sk;
+
+ if (sk) {
+ proto = sk->sk_protocol;
+
+ if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
+ return inet6_sk(sk)->dontfrag;
+ }
+
+ return 0;
+}
+
+static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
+{
+ struct flowi6 fl6;
+ struct sock *sk = skb->sk;
+
+ fl6.flowi6_oif = sk->sk_bound_dev_if;
+ ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
+
+ ipv6_local_rxpmtu(sk, &fl6, mtu);
+}
+
+static void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
+{
+ struct flowi6 fl6;
+ struct sock *sk = skb->sk;
+
+ fl6.fl6_dport = inet_sk(sk)->inet_dport;
+ ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->daddr);
+
+ ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
+}
+
static int xfrm6_tunnel_check_size(struct sk_buff *skb)
{
int mtu, ret = 0;
@@ -39,7 +76,13 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
if (!skb->local_df && skb->len > mtu) {
skb->dev = dst->dev;
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+
+ if (xfrm6_local_dontfrag(skb))
+ xfrm6_local_rxpmtu(skb, mtu);
+ else if (skb->sk)
+ xfrm6_local_error(skb, mtu);
+ else
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ret = -EMSGSIZE;
}
@@ -93,9 +136,18 @@ static int __xfrm6_output(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
+ int mtu = ip6_skb_dst_mtu(skb);
+
+ if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
+ xfrm6_local_rxpmtu(skb, mtu);
+ return -EMSGSIZE;
+ } else if (!skb->local_df && skb->len > mtu && skb->sk) {
+ xfrm6_local_error(skb, mtu);
+ return -EMSGSIZE;
+ }
if ((x && x->props.mode == XFRM_MODE_TUNNEL) &&
- ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
+ ((skb->len > mtu && !skb_is_gso(skb)) ||
dst_allfrag(skb_dst(skb)))) {
return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
}
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index 248f0b2..f2d72b8 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -15,6 +15,7 @@
#include <linux/pfkeyv2.h>
#include <linux/ipsec.h>
#include <linux/netfilter_ipv6.h>
+#include <linux/export.h>
#include <net/dsfield.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 26b5bfc..f8ba30d 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -9,6 +9,7 @@
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/tcp_states.h>
#include <net/ipx.h>
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index 36c3f03..b0b56a3 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -35,6 +35,7 @@
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/irda/irda.h>
#include <net/irda/irlmp.h>
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index b3cc8b3..253695d 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -551,7 +551,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
*/
tty->closing = 1;
if (self->closing_wait != ASYNC_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, self->closing_wait);
+ tty_wait_until_sent_from_close(tty, self->closing_wait);
ircomm_tty_shutdown(self);
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 3eca35f..14653b8 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -42,6 +42,7 @@
#include <linux/kmod.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <asm/ioctls.h>
#include <asm/uaccess.h>
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index e8d5f44..d14152e 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -50,7 +50,7 @@ static const struct net_device_ops irlan_eth_netdev_ops = {
.ndo_open = irlan_eth_open,
.ndo_stop = irlan_eth_close,
.ndo_start_xmit = irlan_eth_xmit,
- .ndo_set_multicast_list = irlan_eth_set_multicast_list,
+ .ndo_set_rx_mode = irlan_eth_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 285ccd6..32e3bb0 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -29,6 +29,7 @@
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
diff --git a/net/irda/qos.c b/net/irda/qos.c
index 4369f7f..798ffd9 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -30,6 +30,8 @@
*
********************************************************************/
+#include <linux/export.h>
+
#include <asm/byteorder.h>
#include <net/irda/irda.h>
diff --git a/net/iucv/Kconfig b/net/iucv/Kconfig
index 16ce9cd..497fbe7 100644
--- a/net/iucv/Kconfig
+++ b/net/iucv/Kconfig
@@ -1,15 +1,17 @@
config IUCV
- tristate "IUCV support (S390 - z/VM only)"
depends on S390
+ def_tristate y if S390
+ prompt "IUCV support (S390 - z/VM only)"
help
Select this option if you want to use inter-user communication
under VM or VIF. If you run on z/VM, say "Y" to enable a fast
communication link between VM guests.
config AFIUCV
- tristate "AF_IUCV support (S390 - z/VM only)"
- depends on IUCV
+ depends on S390
+ def_tristate m if QETH_L3 || IUCV
+ prompt "AF_IUCV Socket support (S390 - z/VM and HiperSockets transport)"
help
- Select this option if you want to use inter-user communication under
- VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast
- communication link between VM guests.
+ Select this option if you want to use AF_IUCV socket applications
+ based on z/VM inter-user communication vehicle or based on
+ HiperSockets.
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index e2013e4..274d150 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -27,10 +27,9 @@
#include <asm/cpcmd.h>
#include <linux/kmod.h>
-#include <net/iucv/iucv.h>
#include <net/iucv/af_iucv.h>
-#define VERSION "1.1"
+#define VERSION "1.2"
static char iucv_userid[80];
@@ -42,6 +41,8 @@ static struct proto iucv_proto = {
.obj_size = sizeof(struct iucv_sock),
};
+static struct iucv_interface *pr_iucv;
+
/* special AF_IUCV IPRM messages */
static const u8 iprm_shutdown[8] =
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
@@ -90,6 +91,12 @@ do { \
static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk);
+static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev);
+static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
+ struct sk_buff *skb, u8 flags);
+static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
+
/* Call Back functions */
static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
@@ -165,7 +172,7 @@ static int afiucv_pm_freeze(struct device *dev)
case IUCV_CLOSING:
case IUCV_CONNECTED:
if (iucv->path) {
- err = iucv_path_sever(iucv->path, NULL);
+ err = pr_iucv->path_sever(iucv->path, NULL);
iucv_path_free(iucv->path);
iucv->path = NULL;
}
@@ -229,7 +236,7 @@ static const struct dev_pm_ops afiucv_pm_ops = {
static struct device_driver af_iucv_driver = {
.owner = THIS_MODULE,
.name = "afiucv",
- .bus = &iucv_bus,
+ .bus = NULL,
.pm = &afiucv_pm_ops,
};
@@ -294,7 +301,11 @@ static inline int iucv_below_msglim(struct sock *sk)
if (sk->sk_state != IUCV_CONNECTED)
return 1;
- return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
+ if (iucv->transport == AF_IUCV_TRANS_IUCV)
+ return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
+ else
+ return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
+ (atomic_read(&iucv->pendings) <= 0));
}
/**
@@ -312,6 +323,79 @@ static void iucv_sock_wake_msglim(struct sock *sk)
rcu_read_unlock();
}
+/**
+ * afiucv_hs_send() - send a message through HiperSockets transport
+ */
+static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
+ struct sk_buff *skb, u8 flags)
+{
+ struct net *net = sock_net(sock);
+ struct iucv_sock *iucv = iucv_sk(sock);
+ struct af_iucv_trans_hdr *phs_hdr;
+ struct sk_buff *nskb;
+ int err, confirm_recv = 0;
+
+ memset(skb->head, 0, ETH_HLEN);
+ phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
+ sizeof(struct af_iucv_trans_hdr));
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_push(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
+
+ phs_hdr->magic = ETH_P_AF_IUCV;
+ phs_hdr->version = 1;
+ phs_hdr->flags = flags;
+ if (flags == AF_IUCV_FLAG_SYN)
+ phs_hdr->window = iucv->msglimit;
+ else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
+ confirm_recv = atomic_read(&iucv->msg_recv);
+ phs_hdr->window = confirm_recv;
+ if (confirm_recv)
+ phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
+ }
+ memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
+ memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
+ memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
+ memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
+ ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
+ ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
+ ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
+ ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
+ if (imsg)
+ memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
+
+ rcu_read_lock();
+ skb->dev = dev_get_by_index_rcu(net, sock->sk_bound_dev_if);
+ rcu_read_unlock();
+ if (!skb->dev)
+ return -ENODEV;
+ if (!(skb->dev->flags & IFF_UP))
+ return -ENETDOWN;
+ if (skb->len > skb->dev->mtu) {
+ if (sock->sk_type == SOCK_SEQPACKET)
+ return -EMSGSIZE;
+ else
+ skb_trim(skb, skb->dev->mtu);
+ }
+ skb->protocol = ETH_P_AF_IUCV;
+ skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+ skb_queue_tail(&iucv->send_skb_q, nskb);
+ err = dev_queue_xmit(skb);
+ if (err) {
+ skb_unlink(nskb, &iucv->send_skb_q);
+ kfree_skb(nskb);
+ } else {
+ atomic_sub(confirm_recv, &iucv->msg_recv);
+ WARN_ON(atomic_read(&iucv->msg_recv) < 0);
+ }
+ return err;
+}
+
/* Timers */
static void iucv_sock_timeout(unsigned long arg)
{
@@ -380,6 +464,8 @@ static void iucv_sock_close(struct sock *sk)
unsigned char user_data[16];
struct iucv_sock *iucv = iucv_sk(sk);
unsigned long timeo;
+ int err, blen;
+ struct sk_buff *skb;
iucv_sock_clear_timer(sk);
lock_sock(sk);
@@ -390,6 +476,20 @@ static void iucv_sock_close(struct sock *sk)
break;
case IUCV_CONNECTED:
+ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+ /* send fin */
+ blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
+ skb = sock_alloc_send_skb(sk, blen, 1, &err);
+ if (skb) {
+ skb_reserve(skb,
+ sizeof(struct af_iucv_trans_hdr) +
+ ETH_HLEN);
+ err = afiucv_hs_send(NULL, sk, skb,
+ AF_IUCV_FLAG_FIN);
+ }
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ }
case IUCV_DISCONN:
sk->sk_state = IUCV_CLOSING;
sk->sk_state_change(sk);
@@ -412,7 +512,7 @@ static void iucv_sock_close(struct sock *sk)
low_nmcpy(user_data, iucv->src_name);
high_nmcpy(user_data, iucv->dst_name);
ASCEBC(user_data, sizeof(user_data));
- iucv_path_sever(iucv->path, user_data);
+ pr_iucv->path_sever(iucv->path, user_data);
iucv_path_free(iucv->path);
iucv->path = NULL;
}
@@ -444,23 +544,33 @@ static void iucv_sock_init(struct sock *sk, struct sock *parent)
static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
{
struct sock *sk;
+ struct iucv_sock *iucv;
sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
if (!sk)
return NULL;
+ iucv = iucv_sk(sk);
sock_init_data(sock, sk);
- INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
- spin_lock_init(&iucv_sk(sk)->accept_q_lock);
- skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
- INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
- spin_lock_init(&iucv_sk(sk)->message_q.lock);
- skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
- iucv_sk(sk)->send_tag = 0;
- iucv_sk(sk)->flags = 0;
- iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT;
- iucv_sk(sk)->path = NULL;
- memset(&iucv_sk(sk)->src_user_id , 0, 32);
+ INIT_LIST_HEAD(&iucv->accept_q);
+ spin_lock_init(&iucv->accept_q_lock);
+ skb_queue_head_init(&iucv->send_skb_q);
+ INIT_LIST_HEAD(&iucv->message_q.list);
+ spin_lock_init(&iucv->message_q.lock);
+ skb_queue_head_init(&iucv->backlog_skb_q);
+ iucv->send_tag = 0;
+ atomic_set(&iucv->pendings, 0);
+ iucv->flags = 0;
+ iucv->msglimit = 0;
+ atomic_set(&iucv->msg_sent, 0);
+ atomic_set(&iucv->msg_recv, 0);
+ iucv->path = NULL;
+ iucv->sk_txnotify = afiucv_hs_callback_txnotify;
+ memset(&iucv->src_user_id , 0, 32);
+ if (pr_iucv)
+ iucv->transport = AF_IUCV_TRANS_IUCV;
+ else
+ iucv->transport = AF_IUCV_TRANS_HIPER;
sk->sk_destruct = iucv_sock_destruct;
sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
@@ -591,7 +701,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
struct sock *sk = sock->sk;
struct iucv_sock *iucv;
- int err;
+ int err = 0;
+ struct net_device *dev;
+ char uid[9];
/* Verify the input sockaddr */
if (!addr || addr->sa_family != AF_IUCV)
@@ -610,19 +722,46 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
err = -EADDRINUSE;
goto done_unlock;
}
- if (iucv->path) {
- err = 0;
+ if (iucv->path)
goto done_unlock;
- }
/* Bind the socket */
- memcpy(iucv->src_name, sa->siucv_name, 8);
- /* Copy the user id */
- memcpy(iucv->src_user_id, iucv_userid, 8);
- sk->sk_state = IUCV_BOUND;
- err = 0;
+ if (pr_iucv)
+ if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
+ goto vm_bind; /* VM IUCV transport */
+ /* try hiper transport */
+ memcpy(uid, sa->siucv_user_id, sizeof(uid));
+ ASCEBC(uid, 8);
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
+ if (!memcmp(dev->perm_addr, uid, 8)) {
+ memcpy(iucv->src_name, sa->siucv_name, 8);
+ memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
+ sock->sk->sk_bound_dev_if = dev->ifindex;
+ sk->sk_state = IUCV_BOUND;
+ iucv->transport = AF_IUCV_TRANS_HIPER;
+ if (!iucv->msglimit)
+ iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
+ rcu_read_unlock();
+ goto done_unlock;
+ }
+ }
+ rcu_read_unlock();
+vm_bind:
+ if (pr_iucv) {
+ /* use local userid for backward compat */
+ memcpy(iucv->src_name, sa->siucv_name, 8);
+ memcpy(iucv->src_user_id, iucv_userid, 8);
+ sk->sk_state = IUCV_BOUND;
+ iucv->transport = AF_IUCV_TRANS_IUCV;
+ if (!iucv->msglimit)
+ iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
+ goto done_unlock;
+ }
+ /* found no dev to bind */
+ err = -ENODEV;
done_unlock:
/* Release the socket list lock */
write_unlock_bh(&iucv_sk_list.lock);
@@ -658,45 +797,44 @@ static int iucv_sock_autobind(struct sock *sk)
memcpy(&iucv->src_name, name, 8);
+ if (!iucv->msglimit)
+ iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
+
return err;
}
-/* Connect an unconnected socket */
-static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
- int alen, int flags)
+static int afiucv_hs_connect(struct socket *sock)
{
- struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
struct sock *sk = sock->sk;
- struct iucv_sock *iucv;
- unsigned char user_data[16];
- int err;
-
- if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
- return -EINVAL;
-
- if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
- return -EBADFD;
-
- if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
- return -EINVAL;
+ struct sk_buff *skb;
+ int blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
+ int err = 0;
- if (sk->sk_state == IUCV_OPEN) {
- err = iucv_sock_autobind(sk);
- if (unlikely(err))
- return err;
+ /* send syn */
+ skb = sock_alloc_send_skb(sk, blen, 1, &err);
+ if (!skb) {
+ err = -ENOMEM;
+ goto done;
}
+ skb->dev = NULL;
+ skb_reserve(skb, blen);
+ err = afiucv_hs_send(NULL, sk, skb, AF_IUCV_FLAG_SYN);
+done:
+ return err;
+}
- lock_sock(sk);
-
- /* Set the destination information */
- memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
- memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
+static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
+{
+ struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
+ struct sock *sk = sock->sk;
+ struct iucv_sock *iucv = iucv_sk(sk);
+ unsigned char user_data[16];
+ int err;
high_nmcpy(user_data, sa->siucv_name);
- low_nmcpy(user_data, iucv_sk(sk)->src_name);
+ low_nmcpy(user_data, iucv->src_name);
ASCEBC(user_data, sizeof(user_data));
- iucv = iucv_sk(sk);
/* Create path. */
iucv->path = iucv_path_alloc(iucv->msglimit,
IUCV_IPRMDATA, GFP_KERNEL);
@@ -704,8 +842,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
err = -ENOMEM;
goto done;
}
- err = iucv_path_connect(iucv->path, &af_iucv_handler,
- sa->siucv_user_id, NULL, user_data, sk);
+ err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
+ sa->siucv_user_id, NULL, user_data,
+ sk);
if (err) {
iucv_path_free(iucv->path);
iucv->path = NULL;
@@ -724,21 +863,62 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
err = -ECONNREFUSED;
break;
}
- goto done;
}
+done:
+ return err;
+}
- if (sk->sk_state != IUCV_CONNECTED) {
+/* Connect an unconnected socket */
+static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
+ int alen, int flags)
+{
+ struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
+ struct sock *sk = sock->sk;
+ struct iucv_sock *iucv = iucv_sk(sk);
+ int err;
+
+ if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
+ return -EINVAL;
+
+ if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
+ return -EBADFD;
+
+ if (sk->sk_state == IUCV_OPEN &&
+ iucv->transport == AF_IUCV_TRANS_HIPER)
+ return -EBADFD; /* explicit bind required */
+
+ if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
+ return -EINVAL;
+
+ if (sk->sk_state == IUCV_OPEN) {
+ err = iucv_sock_autobind(sk);
+ if (unlikely(err))
+ return err;
+ }
+
+ lock_sock(sk);
+
+ /* Set the destination information */
+ memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
+ memcpy(iucv->dst_name, sa->siucv_name, 8);
+
+ if (iucv->transport == AF_IUCV_TRANS_HIPER)
+ err = afiucv_hs_connect(sock);
+ else
+ err = afiucv_path_connect(sock, addr);
+ if (err)
+ goto done;
+
+ if (sk->sk_state != IUCV_CONNECTED)
err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
IUCV_DISCONN),
sock_sndtimeo(sk, flags & O_NONBLOCK));
- }
- if (sk->sk_state == IUCV_DISCONN) {
+ if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
err = -ECONNREFUSED;
- }
- if (err) {
- iucv_path_sever(iucv->path, NULL);
+ if (err && iucv->transport == AF_IUCV_TRANS_IUCV) {
+ pr_iucv->path_sever(iucv->path, NULL);
iucv_path_free(iucv->path);
iucv->path = NULL;
}
@@ -833,20 +1013,21 @@ static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
{
struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
struct sock *sk = sock->sk;
+ struct iucv_sock *iucv = iucv_sk(sk);
addr->sa_family = AF_IUCV;
*len = sizeof(struct sockaddr_iucv);
if (peer) {
- memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
- memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
+ memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
+ memcpy(siucv->siucv_name, iucv->dst_name, 8);
} else {
- memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
- memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
+ memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
+ memcpy(siucv->siucv_name, iucv->src_name, 8);
}
memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
- memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
+ memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
return 0;
}
@@ -871,7 +1052,7 @@ static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
memcpy(prmdata, (void *) skb->data, skb->len);
prmdata[7] = 0xff - (u8) skb->len;
- return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
+ return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
(void *) prmdata, 8);
}
@@ -960,9 +1141,16 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
* this is fine for SOCK_SEQPACKET (unless we want to support
* segmented records using the MSG_EOR flag), but
* for SOCK_STREAM we might want to improve it in future */
- skb = sock_alloc_send_skb(sk, len, noblock, &err);
+ if (iucv->transport == AF_IUCV_TRANS_HIPER)
+ skb = sock_alloc_send_skb(sk,
+ len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
+ noblock, &err);
+ else
+ skb = sock_alloc_send_skb(sk, len, noblock, &err);
if (!skb)
goto out;
+ if (iucv->transport == AF_IUCV_TRANS_HIPER)
+ skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
err = -EFAULT;
goto fail;
@@ -983,6 +1171,15 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
/* increment and save iucv message tag for msg_completion cbk */
txmsg.tag = iucv->send_tag++;
memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
+ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+ atomic_inc(&iucv->msg_sent);
+ err = afiucv_hs_send(&txmsg, sk, skb, 0);
+ if (err) {
+ atomic_dec(&iucv->msg_sent);
+ goto fail;
+ }
+ goto release;
+ }
skb_queue_tail(&iucv->send_skb_q, skb);
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
@@ -999,13 +1196,13 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
/* this error should never happen since the
* IUCV_IPRMDATA path flag is set... sever path */
if (err == 0x15) {
- iucv_path_sever(iucv->path, NULL);
+ pr_iucv->path_sever(iucv->path, NULL);
skb_unlink(skb, &iucv->send_skb_q);
err = -EPIPE;
goto fail;
}
} else
- err = iucv_message_send(iucv->path, &txmsg, 0, 0,
+ err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
(void *) skb->data, skb->len);
if (err) {
if (err == 3) {
@@ -1023,6 +1220,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
goto fail;
}
+release:
release_sock(sk);
return len;
@@ -1095,8 +1293,9 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
skb->len = 0;
}
} else {
- rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
- skb->data, len, NULL);
+ rc = pr_iucv->message_receive(path, msg,
+ msg->flags & IUCV_IPRMDATA,
+ skb->data, len, NULL);
if (rc) {
kfree_skb(skb);
return;
@@ -1110,7 +1309,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
kfree_skb(skb);
skb = NULL;
if (rc) {
- iucv_path_sever(path, NULL);
+ pr_iucv->path_sever(path, NULL);
return;
}
skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
@@ -1154,7 +1353,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
unsigned int copied, rlen;
- struct sk_buff *skb, *rskb, *cskb;
+ struct sk_buff *skb, *rskb, *cskb, *sskb;
+ int blen;
int err = 0;
if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
@@ -1179,7 +1379,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
copied = min_t(unsigned int, rlen, len);
cskb = skb;
- if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
+ if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) {
if (!(flags & MSG_PEEK))
skb_queue_head(&sk->sk_receive_queue, skb);
return -EFAULT;
@@ -1217,6 +1417,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
}
kfree_skb(skb);
+ atomic_inc(&iucv->msg_recv);
/* Queue backlog skbs */
spin_lock_bh(&iucv->message_q.lock);
@@ -1233,6 +1434,24 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (skb_queue_empty(&iucv->backlog_skb_q)) {
if (!list_empty(&iucv->message_q.list))
iucv_process_message_q(sk);
+ if (atomic_read(&iucv->msg_recv) >=
+ iucv->msglimit / 2) {
+ /* send WIN to peer */
+ blen = sizeof(struct af_iucv_trans_hdr) +
+ ETH_HLEN;
+ sskb = sock_alloc_send_skb(sk, blen, 1, &err);
+ if (sskb) {
+ skb_reserve(sskb,
+ sizeof(struct af_iucv_trans_hdr)
+ + ETH_HLEN);
+ err = afiucv_hs_send(NULL, sk, sskb,
+ AF_IUCV_FLAG_WIN);
+ }
+ if (err) {
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ }
+ }
}
spin_unlock_bh(&iucv->message_q.lock);
}
@@ -1327,8 +1546,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
txmsg.class = 0;
txmsg.tag = 0;
- err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
- (void *) iprm_shutdown, 8);
+ err = pr_iucv->message_send(iucv->path, &txmsg, IUCV_IPRMDATA,
+ 0, (void *) iprm_shutdown, 8);
if (err) {
switch (err) {
case 1:
@@ -1345,7 +1564,7 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
}
if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
- err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
+ err = pr_iucv->path_quiesce(iucv->path, NULL);
if (err)
err = -ENOTCONN;
@@ -1372,7 +1591,7 @@ static int iucv_sock_release(struct socket *sock)
/* Unregister with IUCV base support */
if (iucv_sk(sk)->path) {
- iucv_path_sever(iucv_sk(sk)->path, NULL);
+ pr_iucv->path_sever(iucv_sk(sk)->path, NULL);
iucv_path_free(iucv_sk(sk)->path);
iucv_sk(sk)->path = NULL;
}
@@ -1514,14 +1733,14 @@ static int iucv_callback_connreq(struct iucv_path *path,
high_nmcpy(user_data, iucv->dst_name);
ASCEBC(user_data, sizeof(user_data));
if (sk->sk_state != IUCV_LISTEN) {
- err = iucv_path_sever(path, user_data);
+ err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
goto fail;
}
/* Check for backlog size */
if (sk_acceptq_is_full(sk)) {
- err = iucv_path_sever(path, user_data);
+ err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
goto fail;
}
@@ -1529,7 +1748,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
/* Create the new socket */
nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
if (!nsk) {
- err = iucv_path_sever(path, user_data);
+ err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
goto fail;
}
@@ -1553,9 +1772,9 @@ static int iucv_callback_connreq(struct iucv_path *path,
/* set message limit for path based on msglimit of accepting socket */
niucv->msglimit = iucv->msglimit;
path->msglim = iucv->msglimit;
- err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
+ err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
if (err) {
- err = iucv_path_sever(path, user_data);
+ err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
iucv_sock_kill(nsk);
goto fail;
@@ -1589,7 +1808,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
int len;
if (sk->sk_shutdown & RCV_SHUTDOWN) {
- iucv_message_reject(path, msg);
+ pr_iucv->message_reject(path, msg);
return;
}
@@ -1600,7 +1819,7 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
goto save_message;
len = atomic_read(&sk->sk_rmem_alloc);
- len += iucv_msg_length(msg) + sizeof(struct sk_buff);
+ len += SKB_TRUESIZE(iucv_msg_length(msg));
if (len > sk->sk_rcvbuf)
goto save_message;
@@ -1692,6 +1911,389 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
bh_unlock_sock(sk);
}
+/***************** HiperSockets transport callbacks ********************/
+static void afiucv_swap_src_dest(struct sk_buff *skb)
+{
+ struct af_iucv_trans_hdr *trans_hdr =
+ (struct af_iucv_trans_hdr *)skb->data;
+ char tmpID[8];
+ char tmpName[8];
+
+ ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
+ ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
+ ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
+ ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
+ memcpy(tmpID, trans_hdr->srcUserID, 8);
+ memcpy(tmpName, trans_hdr->srcAppName, 8);
+ memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
+ memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
+ memcpy(trans_hdr->destUserID, tmpID, 8);
+ memcpy(trans_hdr->destAppName, tmpName, 8);
+ skb_push(skb, ETH_HLEN);
+ memset(skb->data, 0, ETH_HLEN);
+}
+
+/**
+ * afiucv_hs_callback_syn - react on received SYN
+ **/
+static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
+{
+ struct sock *nsk;
+ struct iucv_sock *iucv, *niucv;
+ struct af_iucv_trans_hdr *trans_hdr;
+ int err;
+
+ iucv = iucv_sk(sk);
+ trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
+ if (!iucv) {
+ /* no sock - connection refused */
+ afiucv_swap_src_dest(skb);
+ trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
+ err = dev_queue_xmit(skb);
+ goto out;
+ }
+
+ nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
+ bh_lock_sock(sk);
+ if ((sk->sk_state != IUCV_LISTEN) ||
+ sk_acceptq_is_full(sk) ||
+ !nsk) {
+ /* error on server socket - connection refused */
+ if (nsk)
+ sk_free(nsk);
+ afiucv_swap_src_dest(skb);
+ trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
+ err = dev_queue_xmit(skb);
+ bh_unlock_sock(sk);
+ goto out;
+ }
+
+ niucv = iucv_sk(nsk);
+ iucv_sock_init(nsk, sk);
+ niucv->transport = AF_IUCV_TRANS_HIPER;
+ niucv->msglimit = iucv->msglimit;
+ if (!trans_hdr->window)
+ niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
+ else
+ niucv->msglimit_peer = trans_hdr->window;
+ memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
+ memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
+ memcpy(niucv->src_name, iucv->src_name, 8);
+ memcpy(niucv->src_user_id, iucv->src_user_id, 8);
+ nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
+ afiucv_swap_src_dest(skb);
+ trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
+ trans_hdr->window = niucv->msglimit;
+ /* if receiver acks the xmit connection is established */
+ err = dev_queue_xmit(skb);
+ if (!err) {
+ iucv_accept_enqueue(sk, nsk);
+ nsk->sk_state = IUCV_CONNECTED;
+ sk->sk_data_ready(sk, 1);
+ } else
+ iucv_sock_kill(nsk);
+ bh_unlock_sock(sk);
+
+out:
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_synack() - react on received SYN-ACK
+ **/
+static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+ struct af_iucv_trans_hdr *trans_hdr =
+ (struct af_iucv_trans_hdr *)skb->data;
+
+ if (!iucv)
+ goto out;
+ if (sk->sk_state != IUCV_BOUND)
+ goto out;
+ bh_lock_sock(sk);
+ iucv->msglimit_peer = trans_hdr->window;
+ sk->sk_state = IUCV_CONNECTED;
+ sk->sk_state_change(sk);
+ bh_unlock_sock(sk);
+out:
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_synfin() - react on received SYN_FIN
+ **/
+static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+
+ if (!iucv)
+ goto out;
+ if (sk->sk_state != IUCV_BOUND)
+ goto out;
+ bh_lock_sock(sk);
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ bh_unlock_sock(sk);
+out:
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_fin() - react on received FIN
+ **/
+static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+
+ /* other end of connection closed */
+ if (iucv) {
+ bh_lock_sock(sk);
+ if (!list_empty(&iucv->accept_q))
+ sk->sk_state = IUCV_SEVERED;
+ else
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ bh_unlock_sock(sk);
+ }
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_win() - react on received WIN
+ **/
+static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+ struct af_iucv_trans_hdr *trans_hdr =
+ (struct af_iucv_trans_hdr *)skb->data;
+
+ if (!iucv)
+ return NET_RX_SUCCESS;
+
+ if (sk->sk_state != IUCV_CONNECTED)
+ return NET_RX_SUCCESS;
+
+ atomic_sub(trans_hdr->window, &iucv->msg_sent);
+ iucv_sock_wake_msglim(sk);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_callback_rx() - react on received data
+ **/
+static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+
+ if (!iucv) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
+ if (sk->sk_state != IUCV_CONNECTED) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
+ /* write stuff from iucv_msg to skb cb */
+ if (skb->len <= sizeof(struct af_iucv_trans_hdr)) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+ skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+ spin_lock(&iucv->message_q.lock);
+ if (skb_queue_empty(&iucv->backlog_skb_q)) {
+ if (sock_queue_rcv_skb(sk, skb)) {
+ /* handle rcv queue full */
+ skb_queue_tail(&iucv->backlog_skb_q, skb);
+ }
+ } else
+ skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
+ spin_unlock(&iucv->message_q.lock);
+ return NET_RX_SUCCESS;
+}
+
+/**
+ * afiucv_hs_rcv() - base function for arriving data through HiperSockets
+ * transport
+ * called from netif RX softirq
+ **/
+static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct hlist_node *node;
+ struct sock *sk;
+ struct iucv_sock *iucv;
+ struct af_iucv_trans_hdr *trans_hdr;
+ char nullstring[8];
+ int err = 0;
+
+ skb_pull(skb, ETH_HLEN);
+ trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
+ EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
+ EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
+ EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
+ EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
+ memset(nullstring, 0, sizeof(nullstring));
+ iucv = NULL;
+ sk = NULL;
+ read_lock(&iucv_sk_list.lock);
+ sk_for_each(sk, node, &iucv_sk_list.head) {
+ if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
+ if ((!memcmp(&iucv_sk(sk)->src_name,
+ trans_hdr->destAppName, 8)) &&
+ (!memcmp(&iucv_sk(sk)->src_user_id,
+ trans_hdr->destUserID, 8)) &&
+ (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
+ (!memcmp(&iucv_sk(sk)->dst_user_id,
+ nullstring, 8))) {
+ iucv = iucv_sk(sk);
+ break;
+ }
+ } else {
+ if ((!memcmp(&iucv_sk(sk)->src_name,
+ trans_hdr->destAppName, 8)) &&
+ (!memcmp(&iucv_sk(sk)->src_user_id,
+ trans_hdr->destUserID, 8)) &&
+ (!memcmp(&iucv_sk(sk)->dst_name,
+ trans_hdr->srcAppName, 8)) &&
+ (!memcmp(&iucv_sk(sk)->dst_user_id,
+ trans_hdr->srcUserID, 8))) {
+ iucv = iucv_sk(sk);
+ break;
+ }
+ }
+ }
+ read_unlock(&iucv_sk_list.lock);
+ if (!iucv)
+ sk = NULL;
+
+ /* no sock
+ how should we send with no sock
+ 1) send without sock no send rc checking?
+ 2) introduce default sock to handle this cases
+
+ SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
+ data -> send FIN
+ SYN|ACK, SYN|FIN, FIN -> no action? */
+
+ switch (trans_hdr->flags) {
+ case AF_IUCV_FLAG_SYN:
+ /* connect request */
+ err = afiucv_hs_callback_syn(sk, skb);
+ break;
+ case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
+ /* connect request confirmed */
+ err = afiucv_hs_callback_synack(sk, skb);
+ break;
+ case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
+ /* connect request refused */
+ err = afiucv_hs_callback_synfin(sk, skb);
+ break;
+ case (AF_IUCV_FLAG_FIN):
+ /* close request */
+ err = afiucv_hs_callback_fin(sk, skb);
+ break;
+ case (AF_IUCV_FLAG_WIN):
+ err = afiucv_hs_callback_win(sk, skb);
+ if (skb->len > sizeof(struct af_iucv_trans_hdr))
+ err = afiucv_hs_callback_rx(sk, skb);
+ else
+ kfree(skb);
+ break;
+ case 0:
+ /* plain data frame */
+ err = afiucv_hs_callback_rx(sk, skb);
+ break;
+ default:
+ ;
+ }
+
+ return err;
+}
+
+/**
+ * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
+ * transport
+ **/
+static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
+ enum iucv_tx_notify n)
+{
+ struct sock *isk = skb->sk;
+ struct sock *sk = NULL;
+ struct iucv_sock *iucv = NULL;
+ struct sk_buff_head *list;
+ struct sk_buff *list_skb;
+ struct sk_buff *this = NULL;
+ unsigned long flags;
+ struct hlist_node *node;
+
+ read_lock(&iucv_sk_list.lock);
+ sk_for_each(sk, node, &iucv_sk_list.head)
+ if (sk == isk) {
+ iucv = iucv_sk(sk);
+ break;
+ }
+ read_unlock(&iucv_sk_list.lock);
+
+ if (!iucv)
+ return;
+
+ bh_lock_sock(sk);
+ list = &iucv->send_skb_q;
+ list_skb = list->next;
+ if (skb_queue_empty(list))
+ goto out_unlock;
+
+ spin_lock_irqsave(&list->lock, flags);
+ while (list_skb != (struct sk_buff *)list) {
+ if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
+ this = list_skb;
+ switch (n) {
+ case TX_NOTIFY_OK:
+ __skb_unlink(this, list);
+ iucv_sock_wake_msglim(sk);
+ kfree_skb(this);
+ break;
+ case TX_NOTIFY_PENDING:
+ atomic_inc(&iucv->pendings);
+ break;
+ case TX_NOTIFY_DELAYED_OK:
+ __skb_unlink(this, list);
+ atomic_dec(&iucv->pendings);
+ if (atomic_read(&iucv->pendings) <= 0)
+ iucv_sock_wake_msglim(sk);
+ kfree_skb(this);
+ break;
+ case TX_NOTIFY_UNREACHABLE:
+ case TX_NOTIFY_DELAYED_UNREACHABLE:
+ case TX_NOTIFY_TPQFULL: /* not yet used */
+ case TX_NOTIFY_GENERALERROR:
+ case TX_NOTIFY_DELAYED_GENERALERROR:
+ __skb_unlink(this, list);
+ kfree_skb(this);
+ if (!list_empty(&iucv->accept_q))
+ sk->sk_state = IUCV_SEVERED;
+ else
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
+ break;
+ }
+ break;
+ }
+ list_skb = list_skb->next;
+ }
+ spin_unlock_irqrestore(&list->lock, flags);
+
+out_unlock:
+ bh_unlock_sock(sk);
+}
static const struct proto_ops iucv_sock_ops = {
.family = PF_IUCV,
.owner = THIS_MODULE,
@@ -1718,71 +2320,104 @@ static const struct net_proto_family iucv_sock_family_ops = {
.create = iucv_sock_create,
};
-static int __init afiucv_init(void)
+static struct packet_type iucv_packet_type = {
+ .type = cpu_to_be16(ETH_P_AF_IUCV),
+ .func = afiucv_hs_rcv,
+};
+
+static int afiucv_iucv_init(void)
{
int err;
- if (!MACHINE_IS_VM) {
- pr_err("The af_iucv module cannot be loaded"
- " without z/VM\n");
- err = -EPROTONOSUPPORT;
- goto out;
- }
- cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
- if (unlikely(err)) {
- WARN_ON(err);
- err = -EPROTONOSUPPORT;
- goto out;
- }
-
- err = iucv_register(&af_iucv_handler, 0);
+ err = pr_iucv->iucv_register(&af_iucv_handler, 0);
if (err)
goto out;
- err = proto_register(&iucv_proto, 0);
- if (err)
- goto out_iucv;
- err = sock_register(&iucv_sock_family_ops);
- if (err)
- goto out_proto;
/* establish dummy device */
+ af_iucv_driver.bus = pr_iucv->bus;
err = driver_register(&af_iucv_driver);
if (err)
- goto out_sock;
+ goto out_iucv;
af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!af_iucv_dev) {
err = -ENOMEM;
goto out_driver;
}
dev_set_name(af_iucv_dev, "af_iucv");
- af_iucv_dev->bus = &iucv_bus;
- af_iucv_dev->parent = iucv_root;
+ af_iucv_dev->bus = pr_iucv->bus;
+ af_iucv_dev->parent = pr_iucv->root;
af_iucv_dev->release = (void (*)(struct device *))kfree;
af_iucv_dev->driver = &af_iucv_driver;
err = device_register(af_iucv_dev);
if (err)
goto out_driver;
-
return 0;
out_driver:
driver_unregister(&af_iucv_driver);
+out_iucv:
+ pr_iucv->iucv_unregister(&af_iucv_handler, 0);
+out:
+ return err;
+}
+
+static int __init afiucv_init(void)
+{
+ int err;
+
+ if (MACHINE_IS_VM) {
+ cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
+ if (unlikely(err)) {
+ WARN_ON(err);
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
+ if (!pr_iucv) {
+ printk(KERN_WARNING "iucv_if lookup failed\n");
+ memset(&iucv_userid, 0, sizeof(iucv_userid));
+ }
+ } else {
+ memset(&iucv_userid, 0, sizeof(iucv_userid));
+ pr_iucv = NULL;
+ }
+
+ err = proto_register(&iucv_proto, 0);
+ if (err)
+ goto out;
+ err = sock_register(&iucv_sock_family_ops);
+ if (err)
+ goto out_proto;
+
+ if (pr_iucv) {
+ err = afiucv_iucv_init();
+ if (err)
+ goto out_sock;
+ }
+ dev_add_pack(&iucv_packet_type);
+ return 0;
+
out_sock:
sock_unregister(PF_IUCV);
out_proto:
proto_unregister(&iucv_proto);
-out_iucv:
- iucv_unregister(&af_iucv_handler, 0);
out:
+ if (pr_iucv)
+ symbol_put(iucv_if);
return err;
}
static void __exit afiucv_exit(void)
{
- device_unregister(af_iucv_dev);
- driver_unregister(&af_iucv_driver);
+ if (pr_iucv) {
+ device_unregister(af_iucv_dev);
+ driver_unregister(&af_iucv_driver);
+ pr_iucv->iucv_unregister(&af_iucv_handler, 0);
+ symbol_put(iucv_if);
+ }
+ dev_remove_pack(&iucv_packet_type);
sock_unregister(PF_IUCV);
proto_unregister(&iucv_proto);
- iucv_unregister(&af_iucv_handler, 0);
}
module_init(afiucv_init);
@@ -1793,3 +2428,4 @@ MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_IUCV);
+
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 075a380..403be43 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1974,6 +1974,27 @@ out:
return rc;
}
+struct iucv_interface iucv_if = {
+ .message_receive = iucv_message_receive,
+ .__message_receive = __iucv_message_receive,
+ .message_reply = iucv_message_reply,
+ .message_reject = iucv_message_reject,
+ .message_send = iucv_message_send,
+ .__message_send = __iucv_message_send,
+ .message_send2way = iucv_message_send2way,
+ .message_purge = iucv_message_purge,
+ .path_accept = iucv_path_accept,
+ .path_connect = iucv_path_connect,
+ .path_quiesce = iucv_path_quiesce,
+ .path_resume = iucv_path_resume,
+ .path_sever = iucv_path_sever,
+ .iucv_register = iucv_register,
+ .iucv_unregister = iucv_unregister,
+ .bus = NULL,
+ .root = NULL,
+};
+EXPORT_SYMBOL(iucv_if);
+
/**
* iucv_init
*
@@ -2038,6 +2059,8 @@ static int __init iucv_init(void)
rc = bus_register(&iucv_bus);
if (rc)
goto out_reboot;
+ iucv_if.root = iucv_root;
+ iucv_if.bus = &iucv_bus;
return 0;
out_reboot:
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 34b2dde..bf8d50c 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -397,6 +397,7 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
* expect to send up next, dequeue it and any other
* in-sequence packets behind it.
*/
+start:
spin_lock_bh(&session->reorder_q.lock);
skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
@@ -433,7 +434,7 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
*/
spin_unlock_bh(&session->reorder_q.lock);
l2tp_recv_dequeue_skb(session, skb);
- spin_lock_bh(&session->reorder_q.lock);
+ goto start;
}
out:
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index f42cd09..8a90d75 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -395,6 +395,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
struct pppol2tp_session *ps;
int old_headroom;
int new_headroom;
+ int uhlen, headroom;
if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
goto abort;
@@ -413,7 +414,13 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
goto abort_put_sess;
old_headroom = skb_headroom(skb);
- if (skb_cow_head(skb, sizeof(ppph)))
+ uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
+ headroom = NET_SKB_PAD +
+ sizeof(struct iphdr) + /* IP header */
+ uhlen + /* UDP header (if L2TP_ENCAPTYPE_UDP) */
+ session->hdr_len + /* L2TP header */
+ sizeof(ppph); /* PPP header */
+ if (skb_cow_head(skb, headroom))
goto abort_put_sess_tun;
new_headroom = skb_headroom(skb);
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index 956b7e4..8d0324b 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -139,7 +139,8 @@ out:
return lapb;
}
-int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks)
+int lapb_register(struct net_device *dev,
+ const struct lapb_register_struct *callbacks)
{
struct lapb_cb *lapb;
int rc = LAPB_BADTOKEN;
@@ -158,7 +159,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
goto out;
lapb->dev = dev;
- lapb->callbacks = *callbacks;
+ lapb->callbacks = callbacks;
__lapb_insert_cb(lapb);
@@ -380,32 +381,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
{
- if (lapb->callbacks.connect_confirmation)
- lapb->callbacks.connect_confirmation(lapb->dev, reason);
+ if (lapb->callbacks->connect_confirmation)
+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
}
void lapb_connect_indication(struct lapb_cb *lapb, int reason)
{
- if (lapb->callbacks.connect_indication)
- lapb->callbacks.connect_indication(lapb->dev, reason);
+ if (lapb->callbacks->connect_indication)
+ lapb->callbacks->connect_indication(lapb->dev, reason);
}
void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
{
- if (lapb->callbacks.disconnect_confirmation)
- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
+ if (lapb->callbacks->disconnect_confirmation)
+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
}
void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
{
- if (lapb->callbacks.disconnect_indication)
- lapb->callbacks.disconnect_indication(lapb->dev, reason);
+ if (lapb->callbacks->disconnect_indication)
+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
}
int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
{
- if (lapb->callbacks.data_indication)
- return lapb->callbacks.data_indication(lapb->dev, skb);
+ if (lapb->callbacks->data_indication)
+ return lapb->callbacks->data_indication(lapb->dev, skb);
kfree_skb(skb);
return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
@@ -415,8 +416,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
{
int used = 0;
- if (lapb->callbacks.data_transmit) {
- lapb->callbacks.data_transmit(lapb->dev, skb);
+ if (lapb->callbacks->data_transmit) {
+ lapb->callbacks->data_transmit(lapb->dev, skb);
used = 1;
}
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 9032421..e32cab4 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -13,6 +13,7 @@
*/
#include <linux/netdevice.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index b38a107..b658cba 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -18,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/trdevice.h>
#include <linux/skbuff.h>
+#include <linux/export.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 7af1ff2..a1839c0 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -17,6 +17,7 @@
#include <linux/proc_fs.h>
#include <linux/errno.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/llc.h>
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index f5fdfcbf..7d3b438 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -199,6 +199,19 @@ config MAC80211_VERBOSE_MPL_DEBUG
Do not select this option.
+config MAC80211_VERBOSE_MPATH_DEBUG
+ bool "Verbose mesh path debugging"
+ depends on MAC80211_DEBUG_MENU
+ depends on MAC80211_MESH
+ ---help---
+ Selecting this option causes mac80211 to print out very
+ verbose mesh path selection debugging messages (when mac80211
+ is taking part in a mesh network).
+ It should not be selected on production systems as those
+ messages are remotely triggerable.
+
+ Do not select this option.
+
config MAC80211_VERBOSE_MHWMP_DEBUG
bool "Verbose mesh HWMP routing debugging"
depends on MAC80211_DEBUG_MENU
@@ -212,6 +225,18 @@ config MAC80211_VERBOSE_MHWMP_DEBUG
Do not select this option.
+config MAC80211_VERBOSE_TDLS_DEBUG
+ bool "Verbose TDLS debugging"
+ depends on MAC80211_DEBUG_MENU
+ ---help---
+ Selecting this option causes mac80211 to print out very
+ verbose TDLS selection debugging messages (when mac80211
+ is a TDLS STA).
+ It should not be selected on production systems as those
+ messages are remotely triggerable.
+
+ Do not select this option.
+
config MAC80211_DEBUG_COUNTERS
bool "Extra statistics for TX/RX debugging"
depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index fd1aaf2..93b2434 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -38,6 +38,7 @@
#include <linux/ieee80211.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -69,7 +70,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
if (!tid_rx)
return;
- rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL);
+ RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
@@ -167,12 +168,8 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
u16 capab;
skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
-
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer "
- "for addba resp frame\n", sdata->name);
+ if (!skb)
return;
- }
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
@@ -227,7 +224,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
status = WLAN_STATUS_REQUEST_DECLINED;
- if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
+ if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Suspend in progress. "
"Denying ADDBA request\n");
@@ -279,14 +276,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
/* prepare A-MPDU MLME for Rx aggregation */
tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
- if (!tid_agg_rx) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
- tid);
-#endif
+ if (!tid_agg_rx)
goto end;
- }
spin_lock_init(&tid_agg_rx->reorder_lock);
@@ -306,11 +297,6 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
tid_agg_rx->reorder_time =
kcalloc(buf_size, sizeof(unsigned long), GFP_KERNEL);
if (!tid_agg_rx->reorder_buf || !tid_agg_rx->reorder_time) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_ERR "can not allocate reordering buffer "
- "to tid %d\n", tid);
-#endif
kfree(tid_agg_rx->reorder_buf);
kfree(tid_agg_rx->reorder_time);
kfree(tid_agg_rx);
@@ -340,7 +326,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
status = WLAN_STATUS_SUCCESS;
/* activate it for RX */
- rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
+ RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
if (timeout)
mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index c8be8ef..b3f6552 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -15,6 +15,7 @@
#include <linux/ieee80211.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -68,11 +69,9 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
- if (!skb) {
- printk(KERN_ERR "%s: failed to allocate buffer "
- "for addba request frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
@@ -106,19 +105,18 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
ieee80211_tx_skb(sdata, skb);
}
-void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn)
+void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_bar *bar;
u16 bar_control = 0;
skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
- if (!skb) {
- printk(KERN_ERR "%s: failed to allocate buffer for "
- "bar frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
bar = (struct ieee80211_bar *)skb_put(skb, sizeof(*bar));
memset(bar, 0, sizeof(*bar));
@@ -128,13 +126,14 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
- bar_control |= (u16)(tid << 12);
+ bar_control |= (u16)(tid << IEEE80211_BAR_CTRL_TID_INFO_SHIFT);
bar->control = cpu_to_le16(bar_control);
bar->start_seq_num = cpu_to_le16(ssn);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
}
+EXPORT_SYMBOL(ieee80211_send_bar);
void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
struct tid_ampdu_tx *tid_tx)
@@ -364,7 +363,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
return -EINVAL;
if ((tid >= STA_TID_NUM) ||
- !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION))
+ !(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) ||
+ (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
return -EINVAL;
#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -383,7 +383,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
sdata->vif.type != NL80211_IFTYPE_AP)
return -EINVAL;
- if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
+ if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "BA sessions blocked. "
"Denying BA session request\n");
@@ -413,11 +413,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
/* prepare A-MPDU MLME for Tx aggregation */
tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
if (!tid_tx) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
- tid);
-#endif
ret = -ENOMEM;
goto err_unlock_sta;
}
@@ -574,14 +569,9 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
struct ieee80211_ra_tid *ra_tid;
struct sk_buff *skb = dev_alloc_skb(0);
- if (unlikely(!skb)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_WARNING "%s: Not enough memory, "
- "dropping start BA session", sdata->name);
-#endif
+ if (unlikely(!skb))
return;
- }
+
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
memcpy(&ra_tid->ra, ra, ETH_ALEN);
ra_tid->tid = tid;
@@ -727,14 +717,9 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
struct ieee80211_ra_tid *ra_tid;
struct sk_buff *skb = dev_alloc_skb(0);
- if (unlikely(!skb)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- printk(KERN_WARNING "%s: Not enough memory, "
- "dropping stop BA session", sdata->name);
-#endif
+ if (unlikely(!skb))
return;
- }
+
ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
memcpy(&ra_tid->ra, ra, ETH_ALEN);
ra_tid->tid = tid;
@@ -777,18 +762,14 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
#endif
-
+ /*
+ * IEEE 802.11-2007 7.3.1.14:
+ * In an ADDBA Response frame, when the Status Code field
+ * is set to 0, the Buffer Size subfield is set to a value
+ * of at least 1.
+ */
if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
- == WLAN_STATUS_SUCCESS) {
- /*
- * IEEE 802.11-2007 7.3.1.14:
- * In an ADDBA Response frame, when the Status Code field
- * is set to 0, the Buffer Size subfield is set to a value
- * of at least 1.
- */
- if (!buf_size)
- goto out;
-
+ == WLAN_STATUS_SUCCESS && buf_size) {
if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
&tid_tx->state)) {
/* ignore duplicate response */
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 3d1b091..d06c65f 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <linux/rcupdate.h>
+#include <linux/if_ether.h>
#include <net/cfg80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -62,7 +63,7 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
if (type == NL80211_IFTYPE_AP_VLAN &&
params && params->use_4addr == 0)
- rcu_assign_pointer(sdata->u.vlan.sta, NULL);
+ RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
else if (type == NL80211_IFTYPE_STATION &&
params && params->use_4addr >= 0)
sdata->u.mgd.use_4addr = params->use_4addr;
@@ -343,7 +344,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
STATION_INFO_RX_BITRATE |
STATION_INFO_RX_DROP_MISC |
STATION_INFO_BSS_PARAM |
- STATION_INFO_CONNECTED_TIME;
+ STATION_INFO_CONNECTED_TIME |
+ STATION_INFO_STA_FLAGS;
do_posix_clock_monotonic_gettime(&uptime);
sinfo->connected_time = uptime.tv_sec - sta->last_connected;
@@ -403,6 +405,23 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period;
sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
+
+ sinfo->sta_flags.set = 0;
+ sinfo->sta_flags.mask = BIT(NL80211_STA_FLAG_AUTHORIZED) |
+ BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
+ BIT(NL80211_STA_FLAG_WME) |
+ BIT(NL80211_STA_FLAG_MFP) |
+ BIT(NL80211_STA_FLAG_AUTHENTICATED);
+ if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
+ if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE);
+ if (test_sta_flag(sta, WLAN_STA_WME))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME);
+ if (test_sta_flag(sta, WLAN_STA_MFP))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
+ if (test_sta_flag(sta, WLAN_STA_AUTH))
+ sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHENTICATED);
}
@@ -455,6 +474,20 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
+static void ieee80211_config_ap_ssid(struct ieee80211_sub_if_data *sdata,
+ struct beacon_parameters *params)
+{
+ struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+
+ bss_conf->ssid_len = params->ssid_len;
+
+ if (params->ssid_len)
+ memcpy(bss_conf->ssid, params->ssid, params->ssid_len);
+
+ bss_conf->hidden_ssid =
+ (params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
+}
+
/*
* This handles both adding a beacon and setting new beacon info
*/
@@ -542,14 +575,17 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.dtim_period = new->dtim_period;
- rcu_assign_pointer(sdata->u.ap.beacon, new);
+ RCU_INIT_POINTER(sdata->u.ap.beacon, new);
synchronize_rcu();
kfree(old);
+ ieee80211_config_ap_ssid(sdata, params);
+
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
- BSS_CHANGED_BEACON);
+ BSS_CHANGED_BEACON |
+ BSS_CHANGED_SSID);
return 0;
}
@@ -594,7 +630,7 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
if (!old)
return -ENOENT;
- rcu_assign_pointer(sdata->u.ap.beacon, NULL);
+ RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
synchronize_rcu();
kfree(old);
@@ -650,7 +686,6 @@ static void sta_apply_parameters(struct ieee80211_local *local,
struct sta_info *sta,
struct station_parameters *params)
{
- unsigned long flags;
u32 rates;
int i, j;
struct ieee80211_supported_band *sband;
@@ -659,43 +694,58 @@ static void sta_apply_parameters(struct ieee80211_local *local,
sband = local->hw.wiphy->bands[local->oper_channel->band];
- spin_lock_irqsave(&sta->flaglock, flags);
mask = params->sta_flags_mask;
set = params->sta_flags_set;
if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
- sta->flags &= ~WLAN_STA_AUTHORIZED;
if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
- sta->flags |= WLAN_STA_AUTHORIZED;
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ else
+ clear_sta_flag(sta, WLAN_STA_AUTHORIZED);
}
if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) {
- sta->flags &= ~WLAN_STA_SHORT_PREAMBLE;
if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE))
- sta->flags |= WLAN_STA_SHORT_PREAMBLE;
+ set_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
+ else
+ clear_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
}
if (mask & BIT(NL80211_STA_FLAG_WME)) {
- sta->flags &= ~WLAN_STA_WME;
- sta->sta.wme = false;
if (set & BIT(NL80211_STA_FLAG_WME)) {
- sta->flags |= WLAN_STA_WME;
+ set_sta_flag(sta, WLAN_STA_WME);
sta->sta.wme = true;
+ } else {
+ clear_sta_flag(sta, WLAN_STA_WME);
+ sta->sta.wme = false;
}
}
if (mask & BIT(NL80211_STA_FLAG_MFP)) {
- sta->flags &= ~WLAN_STA_MFP;
if (set & BIT(NL80211_STA_FLAG_MFP))
- sta->flags |= WLAN_STA_MFP;
+ set_sta_flag(sta, WLAN_STA_MFP);
+ else
+ clear_sta_flag(sta, WLAN_STA_MFP);
}
if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
- sta->flags &= ~WLAN_STA_AUTH;
if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED))
- sta->flags |= WLAN_STA_AUTH;
+ set_sta_flag(sta, WLAN_STA_AUTH);
+ else
+ clear_sta_flag(sta, WLAN_STA_AUTH);
+ }
+
+ if (mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) {
+ if (set & BIT(NL80211_STA_FLAG_TDLS_PEER))
+ set_sta_flag(sta, WLAN_STA_TDLS_PEER);
+ else
+ clear_sta_flag(sta, WLAN_STA_TDLS_PEER);
+ }
+
+ if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) {
+ sta->sta.uapsd_queues = params->uapsd_queues;
+ sta->sta.max_sp = params->max_sp;
}
- spin_unlock_irqrestore(&sta->flaglock, flags);
/*
* cfg80211 validates this (1-2007) and allows setting the AID
@@ -782,11 +832,18 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
if (is_multicast_ether_addr(mac))
return -EINVAL;
+ /* Only TDLS-supporting stations can add TDLS peers */
+ if ((params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
+ !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
+ sdata->vif.type == NL80211_IFTYPE_STATION))
+ return -ENOTSUPP;
+
sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
if (!sta)
return -ENOMEM;
- sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC;
+ set_sta_flag(sta, WLAN_STA_AUTH);
+ set_sta_flag(sta, WLAN_STA_ASSOC);
sta_apply_parameters(local, sta, params);
@@ -842,6 +899,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
return -ENOENT;
}
+ /* The TDLS bit cannot be toggled after the STA was added */
+ if ((params->sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
+ !!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) !=
+ !!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
if (params->vlan && params->vlan != sta->sdata->dev) {
vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
@@ -857,7 +922,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
return -EBUSY;
}
- rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
+ RCU_INIT_POINTER(vlansdata->u.vlan.sta, sta);
}
sta->sdata = vlansdata;
@@ -918,7 +983,7 @@ static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev,
if (dst)
return mesh_path_del(dst, sdata);
- mesh_path_flush(sdata);
+ mesh_path_flush_by_iface(sdata);
return 0;
}
@@ -1137,6 +1202,22 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode;
ieee80211_mesh_root_setup(ifmsh);
}
+ if (_chg_mesh_attr(NL80211_MESHCONF_GATE_ANNOUNCEMENTS, mask)) {
+ /* our current gate announcement implementation rides on root
+ * announcements, so require this ifmsh to also be a root node
+ * */
+ if (nconf->dot11MeshGateAnnouncementProtocol &&
+ !conf->dot11MeshHWMPRootMode) {
+ conf->dot11MeshHWMPRootMode = 1;
+ ieee80211_mesh_root_setup(ifmsh);
+ }
+ conf->dot11MeshGateAnnouncementProtocol =
+ nconf->dot11MeshGateAnnouncementProtocol;
+ }
+ if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask)) {
+ conf->dot11MeshHWMPRannInterval =
+ nconf->dot11MeshHWMPRannInterval;
+ }
return 0;
}
@@ -1235,9 +1316,11 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
}
static int ieee80211_set_txq_params(struct wiphy *wiphy,
+ struct net_device *dev,
struct ieee80211_txq_params *params)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_tx_queue_params p;
if (!local->ops->conf_tx)
@@ -1258,8 +1341,8 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
if (params->queue >= local->hw.queues)
return -EINVAL;
- local->tx_conf[params->queue] = p;
- if (drv_conf_tx(local, params->queue, &p)) {
+ sdata->tx_conf[params->queue] = p;
+ if (drv_conf_tx(local, sdata, params->queue, &p)) {
wiphy_debug(local->hw.wiphy,
"failed to set TX queue parameters for queue %d\n",
params->queue);
@@ -1821,7 +1904,7 @@ ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb)
* so in that case userspace will have to deal with it.
*/
- if (wk->offchan_tx.wait && wk->offchan_tx.frame)
+ if (wk->offchan_tx.wait && !wk->offchan_tx.status)
cfg80211_mgmt_tx_status(wk->sdata->dev,
(unsigned long) wk->offchan_tx.frame,
wk->ie, wk->ie_len, false, GFP_KERNEL);
@@ -1833,7 +1916,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
- const u8 *buf, size_t len, u64 *cookie)
+ const u8 *buf, size_t len, bool no_cck,
+ u64 *cookie)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
@@ -1860,6 +1944,9 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
}
+ if (no_cck)
+ flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
+
if (is_offchan && !offchan)
return -EBUSY;
@@ -1898,33 +1985,6 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
*cookie = (unsigned long) skb;
- if (is_offchan && local->ops->offchannel_tx) {
- int ret;
-
- IEEE80211_SKB_CB(skb)->band = chan->band;
-
- mutex_lock(&local->mtx);
-
- if (local->hw_offchan_tx_cookie) {
- mutex_unlock(&local->mtx);
- return -EBUSY;
- }
-
- /* TODO: bitrate control, TX processing? */
- ret = drv_offchannel_tx(local, skb, chan, channel_type, wait);
-
- if (ret == 0)
- local->hw_offchan_tx_cookie = *cookie;
- mutex_unlock(&local->mtx);
-
- /*
- * Allow driver to return 1 to indicate it wants to have the
- * frame transmitted with a remain_on_channel + regular TX.
- */
- if (ret != 1)
- return ret;
- }
-
if (is_offchan && local->ops->remain_on_channel) {
unsigned int duration;
int ret;
@@ -2011,18 +2071,6 @@ static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
mutex_lock(&local->mtx);
- if (local->ops->offchannel_tx_cancel_wait &&
- local->hw_offchan_tx_cookie == cookie) {
- ret = drv_offchannel_tx_cancel_wait(local);
-
- if (!ret)
- local->hw_offchan_tx_cookie = 0;
-
- mutex_unlock(&local->mtx);
-
- return ret;
- }
-
if (local->ops->cancel_remain_on_channel) {
cookie ^= 2;
ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
@@ -2123,6 +2171,323 @@ static int ieee80211_set_rekey_data(struct wiphy *wiphy,
return 0;
}
+static void ieee80211_tdls_add_ext_capab(struct sk_buff *skb)
+{
+ u8 *pos = (void *)skb_put(skb, 7);
+
+ *pos++ = WLAN_EID_EXT_CAPABILITY;
+ *pos++ = 5; /* len */
+ *pos++ = 0x0;
+ *pos++ = 0x0;
+ *pos++ = 0x0;
+ *pos++ = 0x0;
+ *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
+}
+
+static u16 ieee80211_get_tdls_sta_capab(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ u16 capab;
+
+ capab = 0;
+ if (local->oper_channel->band != IEEE80211_BAND_2GHZ)
+ return capab;
+
+ if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
+ capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
+ if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
+ capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
+
+ return capab;
+}
+
+static void ieee80211_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr,
+ u8 *peer, u8 *bssid)
+{
+ struct ieee80211_tdls_lnkie *lnkid;
+
+ lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
+
+ lnkid->ie_type = WLAN_EID_LINK_ID;
+ lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) - 2;
+
+ memcpy(lnkid->bssid, bssid, ETH_ALEN);
+ memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
+ memcpy(lnkid->resp_sta, peer, ETH_ALEN);
+}
+
+static int
+ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_tdls_data *tf;
+
+ tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
+
+ memcpy(tf->da, peer, ETH_ALEN);
+ memcpy(tf->sa, sdata->vif.addr, ETH_ALEN);
+ tf->ether_type = cpu_to_be16(ETH_P_TDLS);
+ tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_REQUEST;
+
+ skb_put(skb, sizeof(tf->u.setup_req));
+ tf->u.setup_req.dialog_token = dialog_token;
+ tf->u.setup_req.capability =
+ cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
+
+ ieee80211_add_srates_ie(&sdata->vif, skb);
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+ ieee80211_tdls_add_ext_capab(skb);
+ break;
+ case WLAN_TDLS_SETUP_RESPONSE:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
+
+ skb_put(skb, sizeof(tf->u.setup_resp));
+ tf->u.setup_resp.status_code = cpu_to_le16(status_code);
+ tf->u.setup_resp.dialog_token = dialog_token;
+ tf->u.setup_resp.capability =
+ cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
+
+ ieee80211_add_srates_ie(&sdata->vif, skb);
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+ ieee80211_tdls_add_ext_capab(skb);
+ break;
+ case WLAN_TDLS_SETUP_CONFIRM:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
+
+ skb_put(skb, sizeof(tf->u.setup_cfm));
+ tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
+ tf->u.setup_cfm.dialog_token = dialog_token;
+ break;
+ case WLAN_TDLS_TEARDOWN:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_TEARDOWN;
+
+ skb_put(skb, sizeof(tf->u.teardown));
+ tf->u.teardown.reason_code = cpu_to_le16(status_code);
+ break;
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
+
+ skb_put(skb, sizeof(tf->u.discover_req));
+ tf->u.discover_req.dialog_token = dialog_token;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_mgmt *mgmt;
+
+ mgmt = (void *)skb_put(skb, 24);
+ memset(mgmt, 0, 24);
+ memcpy(mgmt->da, peer, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
+
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+
+ switch (action_code) {
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ skb_put(skb, 1 + sizeof(mgmt->u.action.u.tdls_discover_resp));
+ mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
+ mgmt->u.action.u.tdls_discover_resp.action_code =
+ WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
+ mgmt->u.action.u.tdls_discover_resp.dialog_token =
+ dialog_token;
+ mgmt->u.action.u.tdls_discover_resp.capability =
+ cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
+
+ ieee80211_add_srates_ie(&sdata->vif, skb);
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb);
+ ieee80211_tdls_add_ext_capab(skb);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb = NULL;
+ bool send_direct;
+ int ret;
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+ return -ENOTSUPP;
+
+ /* make sure we are in managed mode, and associated */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION ||
+ !sdata->u.mgd.associated)
+ return -EINVAL;
+
+#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG
+ printk(KERN_DEBUG "TDLS mgmt action %d peer %pM\n", action_code, peer);
+#endif
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+ max(sizeof(struct ieee80211_mgmt),
+ sizeof(struct ieee80211_tdls_data)) +
+ 50 + /* supported rates */
+ 7 + /* ext capab */
+ extra_ies_len +
+ sizeof(struct ieee80211_tdls_lnkie));
+ if (!skb)
+ return -ENOMEM;
+
+ info = IEEE80211_SKB_CB(skb);
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_RESPONSE:
+ case WLAN_TDLS_SETUP_CONFIRM:
+ case WLAN_TDLS_TEARDOWN:
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ ret = ieee80211_prep_tdls_encap_data(wiphy, dev, peer,
+ action_code, dialog_token,
+ status_code, skb);
+ send_direct = false;
+ break;
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ ret = ieee80211_prep_tdls_direct(wiphy, dev, peer, action_code,
+ dialog_token, status_code,
+ skb);
+ send_direct = true;
+ break;
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+
+ if (ret < 0)
+ goto fail;
+
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
+
+ /* the TDLS link IE is always added last */
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_CONFIRM:
+ case WLAN_TDLS_TEARDOWN:
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ /* we are the initiator */
+ ieee80211_tdls_add_link_ie(skb, sdata->vif.addr, peer,
+ sdata->u.mgd.bssid);
+ break;
+ case WLAN_TDLS_SETUP_RESPONSE:
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ /* we are the responder */
+ ieee80211_tdls_add_link_ie(skb, peer, sdata->vif.addr,
+ sdata->u.mgd.bssid);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ goto fail;
+ }
+
+ if (send_direct) {
+ ieee80211_tx_skb(sdata, skb);
+ return 0;
+ }
+
+ /*
+ * According to 802.11z: Setup req/resp are sent in AC_BK, otherwise
+ * we should default to AC_VI.
+ */
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_RESPONSE:
+ skb_set_queue_mapping(skb, IEEE80211_AC_BK);
+ skb->priority = 2;
+ break;
+ default:
+ skb_set_queue_mapping(skb, IEEE80211_AC_VI);
+ skb->priority = 5;
+ break;
+ }
+
+ /* disable bottom halves when entering the Tx path */
+ local_bh_disable();
+ ret = ieee80211_subif_start_xmit(skb, dev);
+ local_bh_enable();
+
+ return ret;
+
+fail:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, enum nl80211_tdls_operation oper)
+{
+ struct sta_info *sta;
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+ return -ENOTSUPP;
+
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return -EINVAL;
+
+#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG
+ printk(KERN_DEBUG "TDLS oper %d peer %pM\n", oper, peer);
+#endif
+
+ switch (oper) {
+ case NL80211_TDLS_ENABLE_LINK:
+ rcu_read_lock();
+ sta = sta_info_get(sdata, peer);
+ if (!sta) {
+ rcu_read_unlock();
+ return -ENOLINK;
+ }
+
+ set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
+ rcu_read_unlock();
+ break;
+ case NL80211_TDLS_DISABLE_LINK:
+ return sta_info_destroy_addr(sdata, peer);
+ case NL80211_TDLS_TEARDOWN:
+ case NL80211_TDLS_SETUP:
+ case NL80211_TDLS_DISCOVERY_REQ:
+ /* We don't support in-driver setup/teardown/discovery */
+ return -ENOTSUPP;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -2186,4 +2551,6 @@ struct cfg80211_ops mac80211_config_ops = {
.set_ringparam = ieee80211_set_ringparam,
.get_ringparam = ieee80211_get_ringparam,
.set_rekey_data = ieee80211_set_rekey_data,
+ .tdls_oper = ieee80211_tdls_oper,
+ .tdls_mgmt = ieee80211_tdls_mgmt,
};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 186e02f..883996b 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -78,57 +78,6 @@ DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
-static ssize_t tsf_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_local *local = file->private_data;
- u64 tsf;
-
- tsf = drv_get_tsf(local);
-
- return mac80211_format_buffer(user_buf, count, ppos, "0x%016llx\n",
- (unsigned long long) tsf);
-}
-
-static ssize_t tsf_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ieee80211_local *local = file->private_data;
- unsigned long long tsf;
- char buf[100];
- size_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
-
- if (strncmp(buf, "reset", 5) == 0) {
- if (local->ops->reset_tsf) {
- drv_reset_tsf(local);
- wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
- }
- } else {
- tsf = simple_strtoul(buf, NULL, 0);
- if (local->ops->set_tsf) {
- drv_set_tsf(local, tsf);
- wiphy_info(local->hw.wiphy,
- "debugfs set TSF to %#018llx\n", tsf);
-
- }
- }
-
- return count;
-}
-
-static const struct file_operations tsf_ops = {
- .read = tsf_read,
- .write = tsf_write,
- .open = mac80211_open_file_generic,
- .llseek = default_llseek,
-};
-
static ssize_t reset_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -195,20 +144,12 @@ static ssize_t uapsd_queues_write(struct file *file,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
- unsigned long val;
- char buf[10];
- size_t len;
+ u8 val;
int ret;
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
- buf[len] = '\0';
-
- ret = strict_strtoul(buf, 0, &val);
-
+ ret = kstrtou8_from_user(user_buf, count, 0, &val);
if (ret)
- return -EINVAL;
+ return ret;
if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
return -ERANGE;
@@ -305,6 +246,9 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
char *buf = kzalloc(mxln, GFP_KERNEL);
int sf = 0; /* how many written so far */
+ if (!buf)
+ return 0;
+
sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
@@ -355,6 +299,8 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
+ if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
+ sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
kfree(buf);
@@ -450,7 +396,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(frequency);
DEBUGFS_ADD(total_ps_buffered);
DEBUGFS_ADD(wep_iv);
- DEBUGFS_ADD(tsf);
DEBUGFS_ADD(queues);
DEBUGFS_ADD_MODE(reset, 0200);
DEBUGFS_ADD(noack);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 9ea7c0d..9352819 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -21,6 +21,7 @@
#include "rate.h"
#include "debugfs.h"
#include "debugfs_netdev.h"
+#include "driver-ops.h"
static ssize_t ieee80211_if_read(
struct ieee80211_sub_if_data *sdata,
@@ -331,6 +332,46 @@ static ssize_t ieee80211_if_fmt_num_buffered_multicast(
}
__IEEE80211_IF_FILE(num_buffered_multicast, NULL);
+/* IBSS attributes */
+static ssize_t ieee80211_if_fmt_tsf(
+ const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
+{
+ struct ieee80211_local *local = sdata->local;
+ u64 tsf;
+
+ tsf = drv_get_tsf(local, (struct ieee80211_sub_if_data *)sdata);
+
+ return scnprintf(buf, buflen, "0x%016llx\n", (unsigned long long) tsf);
+}
+
+static ssize_t ieee80211_if_parse_tsf(
+ struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
+{
+ struct ieee80211_local *local = sdata->local;
+ unsigned long long tsf;
+ int ret;
+
+ if (strncmp(buf, "reset", 5) == 0) {
+ if (local->ops->reset_tsf) {
+ drv_reset_tsf(local, sdata);
+ wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
+ }
+ } else {
+ ret = kstrtoull(buf, 10, &tsf);
+ if (ret < 0)
+ return -EINVAL;
+ if (local->ops->set_tsf) {
+ drv_set_tsf(local, sdata, tsf);
+ wiphy_info(local->hw.wiphy,
+ "debugfs set TSF to %#018llx\n", tsf);
+ }
+ }
+
+ return buflen;
+}
+__IEEE80211_IF_FILE_W(tsf);
+
+
/* WDS attributes */
IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
@@ -340,6 +381,8 @@ IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC);
IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
+IEEE80211_IF_FILE(dropped_frames_congestion,
+ u.mesh.mshstats.dropped_frames_congestion, DEC);
IEEE80211_IF_FILE(dropped_frames_no_route,
u.mesh.mshstats.dropped_frames_no_route, DEC);
IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
@@ -372,6 +415,10 @@ IEEE80211_IF_FILE(min_discovery_timeout,
u.mesh.mshcfg.min_discovery_timeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
+IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol,
+ u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
+IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
+ u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
#endif
@@ -415,6 +462,11 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
}
+static void add_ibss_files(struct ieee80211_sub_if_data *sdata)
+{
+ DEBUGFS_ADD_MODE(tsf, 0600);
+}
+
static void add_wds_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted);
@@ -459,6 +511,7 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
MESHSTATS_ADD(fwded_frames);
MESHSTATS_ADD(dropped_frames_ttl);
MESHSTATS_ADD(dropped_frames_no_route);
+ MESHSTATS_ADD(dropped_frames_congestion);
MESHSTATS_ADD(estab_plinks);
#undef MESHSTATS_ADD
}
@@ -485,7 +538,9 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
MESHPARAMS_ADD(dot11MeshHWMPmaxPREQretries);
MESHPARAMS_ADD(path_refresh_time);
MESHPARAMS_ADD(min_discovery_timeout);
-
+ MESHPARAMS_ADD(dot11MeshHWMPRootMode);
+ MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
+ MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
#undef MESHPARAMS_ADD
}
#endif
@@ -506,7 +561,7 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
add_sta_files(sdata);
break;
case NL80211_IFTYPE_ADHOC:
- /* XXX */
+ add_ibss_files(sdata);
break;
case NL80211_IFTYPE_AP:
add_ap_files(sdata);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index a01d213..c5f3417 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -56,19 +56,22 @@ STA_FILE(last_signal, last_signal, D);
static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- char buf[100];
+ char buf[121];
struct sta_info *sta = file->private_data;
- u32 staflags = get_sta_flags(sta);
- int res = scnprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s",
- staflags & WLAN_STA_AUTH ? "AUTH\n" : "",
- staflags & WLAN_STA_ASSOC ? "ASSOC\n" : "",
- staflags & WLAN_STA_PS_STA ? "PS (sta)\n" : "",
- staflags & WLAN_STA_PS_DRIVER ? "PS (driver)\n" : "",
- staflags & WLAN_STA_AUTHORIZED ? "AUTHORIZED\n" : "",
- staflags & WLAN_STA_SHORT_PREAMBLE ? "SHORT PREAMBLE\n" : "",
- staflags & WLAN_STA_WME ? "WME\n" : "",
- staflags & WLAN_STA_WDS ? "WDS\n" : "",
- staflags & WLAN_STA_MFP ? "MFP\n" : "");
+
+#define TEST(flg) \
+ test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
+
+ int res = scnprintf(buf, sizeof(buf),
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
+ TEST(PS_DRIVER), TEST(AUTHORIZED),
+ TEST(SHORT_PREAMBLE), TEST(ASSOC_AP),
+ TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
+ TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
+ TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
+ TEST(TDLS_PEER_AUTH));
+#undef TEST
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
STA_OPS(flags);
@@ -78,8 +81,14 @@ static ssize_t sta_num_ps_buf_frames_read(struct file *file,
size_t count, loff_t *ppos)
{
struct sta_info *sta = file->private_data;
- return mac80211_format_buffer(userbuf, count, ppos, "%u\n",
- skb_queue_len(&sta->ps_tx_buf));
+ char buf[17*IEEE80211_NUM_ACS], *p = buf;
+ int ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ p += scnprintf(p, sizeof(buf)+buf-p, "AC%d: %d\n", ac,
+ skb_queue_len(&sta->ps_tx_buf[ac]) +
+ skb_queue_len(&sta->tx_filtered[ac]));
+ return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
STA_OPS(num_ps_buf_frames);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 1425380..5f165d7 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -413,50 +413,56 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
trace_drv_return_void(local);
}
-static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
+static inline int drv_conf_tx(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
int ret = -EOPNOTSUPP;
might_sleep();
- trace_drv_conf_tx(local, queue, params);
+ trace_drv_conf_tx(local, sdata, queue, params);
if (local->ops->conf_tx)
- ret = local->ops->conf_tx(&local->hw, queue, params);
+ ret = local->ops->conf_tx(&local->hw, &sdata->vif,
+ queue, params);
trace_drv_return_int(local, ret);
return ret;
}
-static inline u64 drv_get_tsf(struct ieee80211_local *local)
+static inline u64 drv_get_tsf(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
{
u64 ret = -1ULL;
might_sleep();
- trace_drv_get_tsf(local);
+ trace_drv_get_tsf(local, sdata);
if (local->ops->get_tsf)
- ret = local->ops->get_tsf(&local->hw);
+ ret = local->ops->get_tsf(&local->hw, &sdata->vif);
trace_drv_return_u64(local, ret);
return ret;
}
-static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
+static inline void drv_set_tsf(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u64 tsf)
{
might_sleep();
- trace_drv_set_tsf(local, tsf);
+ trace_drv_set_tsf(local, sdata, tsf);
if (local->ops->set_tsf)
- local->ops->set_tsf(&local->hw, tsf);
+ local->ops->set_tsf(&local->hw, &sdata->vif, tsf);
trace_drv_return_void(local);
}
-static inline void drv_reset_tsf(struct ieee80211_local *local)
+static inline void drv_reset_tsf(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
{
might_sleep();
- trace_drv_reset_tsf(local);
+ trace_drv_reset_tsf(local, sdata);
if (local->ops->reset_tsf)
- local->ops->reset_tsf(&local->hw);
+ local->ops->reset_tsf(&local->hw, &sdata->vif);
trace_drv_return_void(local);
}
@@ -590,37 +596,6 @@ static inline int drv_cancel_remain_on_channel(struct ieee80211_local *local)
return ret;
}
-static inline int drv_offchannel_tx(struct ieee80211_local *local,
- struct sk_buff *skb,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- unsigned int wait)
-{
- int ret;
-
- might_sleep();
-
- trace_drv_offchannel_tx(local, skb, chan, channel_type, wait);
- ret = local->ops->offchannel_tx(&local->hw, skb, chan,
- channel_type, wait);
- trace_drv_return_int(local, ret);
-
- return ret;
-}
-
-static inline int drv_offchannel_tx_cancel_wait(struct ieee80211_local *local)
-{
- int ret;
-
- might_sleep();
-
- trace_drv_offchannel_tx_cancel_wait(local);
- ret = local->ops->offchannel_tx_cancel_wait(&local->hw);
- trace_drv_return_int(local, ret);
-
- return ret;
-}
-
static inline int drv_set_ringparam(struct ieee80211_local *local,
u32 tx, u32 rx)
{
@@ -696,4 +671,34 @@ static inline void drv_rssi_callback(struct ieee80211_local *local,
local->ops->rssi_callback(&local->hw, event);
trace_drv_return_void(local);
}
+
+static inline void
+drv_release_buffered_frames(struct ieee80211_local *local,
+ struct sta_info *sta, u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ trace_drv_release_buffered_frames(local, &sta->sta, tids, num_frames,
+ reason, more_data);
+ if (local->ops->release_buffered_frames)
+ local->ops->release_buffered_frames(&local->hw, &sta->sta, tids,
+ num_frames, reason,
+ more_data);
+ trace_drv_return_void(local);
+}
+
+static inline void
+drv_allow_buffered_frames(struct ieee80211_local *local,
+ struct sta_info *sta, u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ trace_drv_allow_buffered_frames(local, &sta->sta, tids, num_frames,
+ reason, more_data);
+ if (local->ops->allow_buffered_frames)
+ local->ops->allow_buffered_frames(&local->hw, &sta->sta,
+ tids, num_frames, reason,
+ more_data);
+ trace_drv_return_void(local);
+}
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index f47b00dc7..2af4fca 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -697,64 +697,76 @@ TRACE_EVENT(drv_sta_remove,
);
TRACE_EVENT(drv_conf_tx,
- TP_PROTO(struct ieee80211_local *local, u16 queue,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u16 queue,
const struct ieee80211_tx_queue_params *params),
- TP_ARGS(local, queue, params),
+ TP_ARGS(local, sdata, queue, params),
TP_STRUCT__entry(
LOCAL_ENTRY
+ VIF_ENTRY
__field(u16, queue)
__field(u16, txop)
__field(u16, cw_min)
__field(u16, cw_max)
__field(u8, aifs)
+ __field(bool, uapsd)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ VIF_ASSIGN;
__entry->queue = queue;
__entry->txop = params->txop;
__entry->cw_max = params->cw_max;
__entry->cw_min = params->cw_min;
__entry->aifs = params->aifs;
+ __entry->uapsd = params->uapsd;
),
TP_printk(
- LOCAL_PR_FMT " queue:%d",
- LOCAL_PR_ARG, __entry->queue
+ LOCAL_PR_FMT VIF_PR_FMT " queue:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->queue
)
);
-DEFINE_EVENT(local_only_evt, drv_get_tsf,
- TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local)
+DEFINE_EVENT(local_sdata_evt, drv_get_tsf,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
);
TRACE_EVENT(drv_set_tsf,
- TP_PROTO(struct ieee80211_local *local, u64 tsf),
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ u64 tsf),
- TP_ARGS(local, tsf),
+ TP_ARGS(local, sdata, tsf),
TP_STRUCT__entry(
LOCAL_ENTRY
+ VIF_ENTRY
__field(u64, tsf)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ VIF_ASSIGN;
__entry->tsf = tsf;
),
TP_printk(
- LOCAL_PR_FMT " tsf:%llu",
- LOCAL_PR_ARG, (unsigned long long)__entry->tsf
+ LOCAL_PR_FMT VIF_PR_FMT " tsf:%llu",
+ LOCAL_PR_ARG, VIF_PR_ARG, (unsigned long long)__entry->tsf
)
);
-DEFINE_EVENT(local_only_evt, drv_reset_tsf,
- TP_PROTO(struct ieee80211_local *local),
- TP_ARGS(local)
+DEFINE_EVENT(local_sdata_evt, drv_reset_tsf,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
);
DEFINE_EVENT(local_only_evt, drv_tx_last_beacon,
@@ -1117,6 +1129,61 @@ TRACE_EVENT(drv_rssi_callback,
)
);
+DECLARE_EVENT_CLASS(release_evt,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data),
+
+ TP_ARGS(local, sta, tids, num_frames, reason, more_data),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ STA_ENTRY
+ __field(u16, tids)
+ __field(int, num_frames)
+ __field(int, reason)
+ __field(bool, more_data)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ STA_ASSIGN;
+ __entry->tids = tids;
+ __entry->num_frames = num_frames;
+ __entry->reason = reason;
+ __entry->more_data = more_data;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT STA_PR_FMT
+ " TIDs:0x%.4x frames:%d reason:%d more:%d",
+ LOCAL_PR_ARG, STA_PR_ARG, __entry->tids, __entry->num_frames,
+ __entry->reason, __entry->more_data
+ )
+);
+
+DEFINE_EVENT(release_evt, drv_release_buffered_frames,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data),
+
+ TP_ARGS(local, sta, tids, num_frames, reason, more_data)
+);
+
+DEFINE_EVENT(release_evt, drv_allow_buffered_frames,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta,
+ u16 tids, int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data),
+
+ TP_ARGS(local, sta, tids, num_frames, reason, more_data)
+);
+
/*
* Tracing for API calls that drivers call.
*/
@@ -1431,6 +1498,28 @@ TRACE_EVENT(api_enable_rssi_reports,
)
);
+TRACE_EVENT(api_eosp,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta),
+
+ TP_ARGS(local, sta),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ STA_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ STA_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT STA_PR_FMT,
+ LOCAL_PR_ARG, STA_PR_FMT
+ )
+);
+
/*
* Tracing for internal functions
* (which may also be called in response to driver calls)
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 7cfc286..f0fb737 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -14,6 +14,7 @@
*/
#include <linux/ieee80211.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "rate.h"
@@ -130,7 +131,7 @@ void ieee80211_ba_session_work(struct work_struct *work)
* down by the code that set the flag, so this
* need not run.
*/
- if (test_sta_flags(sta, WLAN_STA_BLOCK_BA))
+ if (test_sta_flag(sta, WLAN_STA_BLOCK_BA))
return;
mutex_lock(&sta->ampdu_mlme.mtx);
@@ -186,12 +187,8 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
u16 params;
skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
-
- if (!skb) {
- printk(KERN_ERR "%s: failed to allocate buffer "
- "for delba frame\n", sdata->name);
+ if (!skb)
return;
- }
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 56c24ca..ede9a8b 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -81,10 +81,10 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
lockdep_assert_held(&ifibss->mtx);
/* Reset own TSF to allow time synchronization work. */
- drv_reset_tsf(local);
+ drv_reset_tsf(local, sdata);
skb = ifibss->skb;
- rcu_assign_pointer(ifibss->presp, NULL);
+ RCU_INIT_POINTER(ifibss->presp, NULL);
synchronize_rcu();
skb->data = skb->head;
skb->len = 0;
@@ -184,7 +184,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
*pos++ = 0; /* U-APSD no in use */
}
- rcu_assign_pointer(ifibss->presp, skb);
+ RCU_INIT_POINTER(ifibss->presp, skb);
sdata->vif.bss_conf.beacon_int = beacon_int;
sdata->vif.bss_conf.basic_rates = basic_rates;
@@ -314,7 +314,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
}
if (sta && elems->wmm_info)
- set_sta_flags(sta, WLAN_STA_WME);
+ set_sta_flag(sta, WLAN_STA_WME);
rcu_read_unlock();
}
@@ -382,7 +382,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
* second best option: get current TSF
* (will return -1 if not supported)
*/
- rx_timestamp = drv_get_tsf(local);
+ rx_timestamp = drv_get_tsf(local, sdata);
}
#ifdef CONFIG_MAC80211_IBSS_DEBUG
@@ -417,7 +417,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
* must be callable in atomic context.
*/
struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
- u8 *bssid,u8 *addr, u32 supp_rates,
+ u8 *bssid, u8 *addr, u32 supp_rates,
gfp_t gfp)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
@@ -452,7 +452,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
return NULL;
sta->last_rx = jiffies;
- set_sta_flags(sta, WLAN_STA_AUTHORIZED);
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
/* make sure mandatory rates are always added */
sta->sta.supp_rates[band] = supp_rates |
@@ -995,7 +995,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
kfree(sdata->u.ibss.ie);
skb = rcu_dereference_protected(sdata->u.ibss.presp,
lockdep_is_held(&sdata->u.ibss.mtx));
- rcu_assign_pointer(sdata->u.ibss.presp, NULL);
+ RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
sdata->vif.bss_conf.ibss_joined = false;
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_IBSS);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 400c09b..ea10a51 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -136,7 +136,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
#define TX_DROP ((__force ieee80211_tx_result) 1u)
#define TX_QUEUED ((__force ieee80211_tx_result) 2u)
-#define IEEE80211_TX_FRAGMENTED BIT(0)
#define IEEE80211_TX_UNICAST BIT(1)
#define IEEE80211_TX_PS_BUFFERED BIT(2)
@@ -149,7 +148,6 @@ struct ieee80211_tx_data {
struct ieee80211_channel *channel;
- u16 ethertype;
unsigned int flags;
};
@@ -261,6 +259,7 @@ struct mesh_stats {
__u32 fwded_frames; /* Mesh total forwarded frames */
__u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/
__u32 dropped_frames_no_route; /* Not transmitted, no route found */
+ __u32 dropped_frames_congestion;/* Not forwarded due to congestion */
atomic_t estab_plinks;
};
@@ -345,6 +344,7 @@ struct ieee80211_work {
struct {
struct sk_buff *frame;
u32 wait;
+ bool status;
} offchan_tx;
};
@@ -389,6 +389,7 @@ struct ieee80211_if_managed {
unsigned long timers_running; /* used for quiesce/restart */
bool powersave; /* powersave requested for this iface */
+ bool broken_ap; /* AP is broken -- turn off powersave */
enum ieee80211_smps_mode req_smps, /* requested smps mode */
ap_smps, /* smps mode AP thinks we're in */
driver_smps_mode; /* smps mode request */
@@ -514,6 +515,7 @@ struct ieee80211_if_mesh {
struct mesh_config mshcfg;
u32 mesh_seqnum;
bool accepting_plinks;
+ int num_gates;
const u8 *ie;
u8 ie_len;
enum {
@@ -607,6 +609,8 @@ struct ieee80211_sub_if_data {
__be16 control_port_protocol;
bool control_port_no_encrypt;
+ struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES];
+
struct work_struct work;
struct sk_buff_head skb_queue;
@@ -660,6 +664,11 @@ enum sdata_queue_type {
enum {
IEEE80211_RX_MSG = 1,
IEEE80211_TX_STATUS_MSG = 2,
+ IEEE80211_EOSP_MSG = 3,
+};
+
+struct skb_eosp_msg_data {
+ u8 sta[ETH_ALEN], iface[ETH_ALEN];
};
enum queue_stop_reason {
@@ -669,6 +678,7 @@ enum queue_stop_reason {
IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
IEEE80211_QUEUE_STOP_REASON_SUSPEND,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
+ IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE,
};
#ifdef CONFIG_MAC80211_LEDS
@@ -748,7 +758,6 @@ struct ieee80211_local {
struct workqueue_struct *workqueue;
unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES];
- struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES];
/* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
spinlock_t queue_stop_reason_lock;
@@ -1002,7 +1011,6 @@ struct ieee80211_local {
unsigned int hw_roc_duration;
u32 hw_roc_cookie;
bool hw_roc_for_tx;
- unsigned long hw_offchan_tx_cookie;
/* dummy netdev for use w/ NAPI */
struct net_device napi_dev;
@@ -1022,69 +1030,6 @@ struct ieee80211_ra_tid {
u16 tid;
};
-/* Parsed Information Elements */
-struct ieee802_11_elems {
- u8 *ie_start;
- size_t total_len;
-
- /* pointers to IEs */
- u8 *ssid;
- u8 *supp_rates;
- u8 *fh_params;
- u8 *ds_params;
- u8 *cf_params;
- struct ieee80211_tim_ie *tim;
- u8 *ibss_params;
- u8 *challenge;
- u8 *wpa;
- u8 *rsn;
- u8 *erp_info;
- u8 *ext_supp_rates;
- u8 *wmm_info;
- u8 *wmm_param;
- struct ieee80211_ht_cap *ht_cap_elem;
- struct ieee80211_ht_info *ht_info_elem;
- struct ieee80211_meshconf_ie *mesh_config;
- u8 *mesh_id;
- u8 *peer_link;
- u8 *preq;
- u8 *prep;
- u8 *perr;
- struct ieee80211_rann_ie *rann;
- u8 *ch_switch_elem;
- u8 *country_elem;
- u8 *pwr_constr_elem;
- u8 *quiet_elem; /* first quite element */
- u8 *timeout_int;
-
- /* length of them, respectively */
- u8 ssid_len;
- u8 supp_rates_len;
- u8 fh_params_len;
- u8 ds_params_len;
- u8 cf_params_len;
- u8 tim_len;
- u8 ibss_params_len;
- u8 challenge_len;
- u8 wpa_len;
- u8 rsn_len;
- u8 erp_info_len;
- u8 ext_supp_rates_len;
- u8 wmm_info_len;
- u8 wmm_param_len;
- u8 mesh_id_len;
- u8 peer_link_len;
- u8 preq_len;
- u8 prep_len;
- u8 perr_len;
- u8 ch_switch_elem_len;
- u8 country_elem_len;
- u8 pwr_constr_elem_len;
- u8 quiet_elem_len;
- u8 num_of_quiet_elem; /* can be more the one */
- u8 timeout_int_len;
-};
-
static inline struct ieee80211_local *hw_to_local(
struct ieee80211_hw *hw)
{
@@ -1233,23 +1178,10 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev);
-/*
- * radiotap header for status frames
- */
-struct ieee80211_tx_status_rtap_hdr {
- struct ieee80211_radiotap_header hdr;
- u8 rate;
- u8 padding_for_rate;
- __le16 tx_flags;
- u8 data_retries;
-} __packed;
-
-
/* HT */
void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
struct ieee80211_ht_cap *ht_cap_ie,
struct ieee80211_sta_ht_cap *ht_cap);
-void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u16 ssn);
void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
const u8 *da, u16 tid,
u16 initiator, u16 reason_code);
@@ -1333,6 +1265,7 @@ void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int ke
struct ieee80211_hdr *hdr, const u8 *tsc,
gfp_t gfp);
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
+void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
void ieee802_11_parse_elems(u8 *start, size_t len,
struct ieee802_11_elems *elems);
@@ -1364,11 +1297,11 @@ void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason);
void ieee80211_add_pending_skb(struct ieee80211_local *local,
struct sk_buff *skb);
-int ieee80211_add_pending_skbs(struct ieee80211_local *local,
- struct sk_buff_head *skbs);
-int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
- struct sk_buff_head *skbs,
- void (*fn)(void *data), void *data);
+void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+ struct sk_buff_head *skbs);
+void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
+ struct sk_buff_head *skbs,
+ void (*fn)(void *data), void *data);
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg,
@@ -1386,7 +1319,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
- u32 ratemask, bool directed);
+ u32 ratemask, bool directed, bool no_cck);
void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
const size_t supp_rates_len,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 556e7e6..30d7355 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -299,8 +299,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
goto err_del_interface;
}
- /* no locking required since STA is not live yet */
- sta->flags |= WLAN_STA_AUTHORIZED;
+ /* no atomic bitop required since STA is not live yet */
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
res = sta_info_insert(sta);
if (res) {
@@ -456,21 +456,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
BSS_CHANGED_BEACON_ENABLED);
/* remove beacon */
- rcu_assign_pointer(sdata->u.ap.beacon, NULL);
+ RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
synchronize_rcu();
kfree(old_beacon);
- /* free all potentially still buffered bcast frames */
- while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) {
- local->total_ps_buffered--;
- dev_kfree_skb(skb);
- }
-
/* down all dependent devices, that is VLANs */
list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
u.vlan.list)
dev_close(vlan->dev);
WARN_ON(!list_empty(&sdata->u.ap.vlans));
+
+ /* free all potentially still buffered bcast frames */
+ local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf);
+ skb_queue_purge(&sdata->u.ap.ps_bc_buf);
}
if (going_down)
@@ -645,7 +643,7 @@ static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_stop = ieee80211_stop,
.ndo_uninit = ieee80211_teardown_sdata,
.ndo_start_xmit = ieee80211_subif_start_xmit,
- .ndo_set_multicast_list = ieee80211_set_multicast_list,
+ .ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = ieee80211_change_mac,
.ndo_select_queue = ieee80211_netdev_select_queue,
@@ -689,7 +687,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_stop = ieee80211_stop,
.ndo_uninit = ieee80211_teardown_sdata,
.ndo_start_xmit = ieee80211_monitor_start_xmit,
- .ndo_set_multicast_list = ieee80211_set_multicast_list,
+ .ndo_set_rx_mode = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_select_queue = ieee80211_monitor_select_queue,
@@ -1214,6 +1212,9 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
list_del_rcu(&sdata->list);
mutex_unlock(&sdata->local->iflist_mtx);
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ mesh_path_flush_by_iface(sdata);
+
synchronize_rcu();
unregister_netdevice(sdata->dev);
}
@@ -1233,6 +1234,9 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
list_del(&sdata->list);
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ mesh_path_flush_by_iface(sdata);
+
unregister_netdevice_queue(sdata->dev, &unreg_list);
}
mutex_unlock(&local->iflist_mtx);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 5150c6d..fb02ea5 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -15,6 +15,7 @@
#include <linux/rcupdate.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -464,7 +465,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
* some hardware cannot handle TKIP with QoS, so
* we indicate whether QoS could be in use.
*/
- if (test_sta_flags(sta, WLAN_STA_WME))
+ if (test_sta_flag(sta, WLAN_STA_WME))
key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA;
} else {
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
@@ -478,7 +479,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
/* same here, the AP could be using QoS */
ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid);
if (ap) {
- if (test_sta_flags(ap, WLAN_STA_WME))
+ if (test_sta_flag(ap, WLAN_STA_WME))
key->conf.flags |=
IEEE80211_KEY_FLAG_WMM_STA;
}
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index 1459033..1bf7903 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -9,6 +9,7 @@
/* just for IFNAMSIZ */
#include <linux/if.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "led.h"
void ieee80211_led_rx(struct ieee80211_local *local)
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index acb4423..d999bf3 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -19,7 +19,7 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/bitmap.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/inetdevice.h>
#include <net/net_namespace.h>
#include <net/cfg80211.h>
@@ -325,6 +325,8 @@ u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
static void ieee80211_tasklet_handler(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *) data;
+ struct sta_info *sta, *tmp;
+ struct skb_eosp_msg_data *eosp_data;
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -340,6 +342,18 @@ static void ieee80211_tasklet_handler(unsigned long data)
skb->pkt_type = 0;
ieee80211_tx_status(local_to_hw(local), skb);
break;
+ case IEEE80211_EOSP_MSG:
+ eosp_data = (void *)skb->cb;
+ for_each_sta_info(local, eosp_data->sta, sta, tmp) {
+ /* skip wrong virtual interface */
+ if (memcmp(eosp_data->iface,
+ sta->sdata->vif.addr, ETH_ALEN))
+ continue;
+ clear_sta_flag(sta, WLAN_STA_SP);
+ break;
+ }
+ dev_kfree_skb(skb);
+ break;
default:
WARN(1, "mac80211: Packet is of unknown type %d\n",
skb->pkt_type);
@@ -608,6 +622,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.max_rates = 1;
local->hw.max_report_rates = 0;
local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
local->user_power_level = -1;
@@ -862,6 +877,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (local->ops->sched_scan_start)
local->hw.wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ /* mac80211 based drivers don't support internal TDLS setup */
+ if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)
+ local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
+
result = wiphy_register(local->hw.wiphy);
if (result < 0)
goto fail_wiphy_register;
@@ -885,12 +904,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
* and we need some headroom for passing the frame to monitor
* interfaces, but never both at the same time.
*/
-#ifndef __CHECKER__
- BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM !=
- sizeof(struct ieee80211_tx_status_rtap_hdr));
-#endif
local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
- sizeof(struct ieee80211_tx_status_rtap_hdr));
+ IEEE80211_TX_STATUS_HEADROOM);
debugfs_hw_add(local);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 29e9980..a7078fd 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -13,10 +13,6 @@
#include "ieee80211_i.h"
#include "mesh.h"
-#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
-#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
-#define IEEE80211_MESH_RANN_INTERVAL (1 * HZ)
-
#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01
#define MESHCONF_CAPAB_FORWARDING 0x08
@@ -27,6 +23,17 @@
int mesh_allocated;
static struct kmem_cache *rm_cache;
+#ifdef CONFIG_MAC80211_MESH
+bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt)
+{
+ return (mgmt->u.action.u.mesh_action.action_code ==
+ WLAN_MESH_ACTION_HWMP_PATH_SELECTION);
+}
+#else
+bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt)
+{ return false; }
+#endif
+
void ieee80211s_init(void)
{
mesh_pathtbl_init();
@@ -193,10 +200,9 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
}
p = kmem_cache_alloc(rm_cache, GFP_ATOMIC);
- if (!p) {
- printk(KERN_DEBUG "o11s: could not allocate RMC entry\n");
+ if (!p)
return 0;
- }
+
p->seqnum = seqnum;
p->exp_time = jiffies + RMC_TIMEOUT;
memcpy(p->sa, sa, ETH_ALEN);
@@ -204,89 +210,136 @@ int mesh_rmc_check(u8 *sa, struct ieee80211s_hdr *mesh_hdr,
return 0;
}
-void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
+int
+mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
- u8 *pos;
- int len, i, rate;
- u8 neighbors;
-
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
- len = sband->n_bitrates;
- if (len > 8)
- len = 8;
- pos = skb_put(skb, len + 2);
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = len;
- for (i = 0; i < len; i++) {
- rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
- }
-
- if (sband->n_bitrates > len) {
- pos = skb_put(skb, sband->n_bitrates - len + 2);
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = sband->n_bitrates - len;
- for (i = len; i < sband->n_bitrates; i++) {
- rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
- }
- }
-
- if (sband->band == IEEE80211_BAND_2GHZ) {
- pos = skb_put(skb, 2 + 1);
- *pos++ = WLAN_EID_DS_PARAMS;
- *pos++ = 1;
- *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
- }
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u8 *pos, neighbors;
+ u8 meshconf_len = sizeof(struct ieee80211_meshconf_ie);
- pos = skb_put(skb, 2 + sdata->u.mesh.mesh_id_len);
- *pos++ = WLAN_EID_MESH_ID;
- *pos++ = sdata->u.mesh.mesh_id_len;
- if (sdata->u.mesh.mesh_id_len)
- memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len);
+ if (skb_tailroom(skb) < 2 + meshconf_len)
+ return -ENOMEM;
- pos = skb_put(skb, 2 + sizeof(struct ieee80211_meshconf_ie));
+ pos = skb_put(skb, 2 + meshconf_len);
*pos++ = WLAN_EID_MESH_CONFIG;
- *pos++ = sizeof(struct ieee80211_meshconf_ie);
+ *pos++ = meshconf_len;
/* Active path selection protocol ID */
- *pos++ = sdata->u.mesh.mesh_pp_id;
-
+ *pos++ = ifmsh->mesh_pp_id;
/* Active path selection metric ID */
- *pos++ = sdata->u.mesh.mesh_pm_id;
-
+ *pos++ = ifmsh->mesh_pm_id;
/* Congestion control mode identifier */
- *pos++ = sdata->u.mesh.mesh_cc_id;
-
+ *pos++ = ifmsh->mesh_cc_id;
/* Synchronization protocol identifier */
- *pos++ = sdata->u.mesh.mesh_sp_id;
-
+ *pos++ = ifmsh->mesh_sp_id;
/* Authentication Protocol identifier */
- *pos++ = sdata->u.mesh.mesh_auth_id;
-
+ *pos++ = ifmsh->mesh_auth_id;
/* Mesh Formation Info - number of neighbors */
- neighbors = atomic_read(&sdata->u.mesh.mshstats.estab_plinks);
+ neighbors = atomic_read(&ifmsh->mshstats.estab_plinks);
/* Number of neighbor mesh STAs or 15 whichever is smaller */
neighbors = (neighbors > 15) ? 15 : neighbors;
*pos++ = neighbors << 1;
-
/* Mesh capability */
- sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata);
+ ifmsh->accepting_plinks = mesh_plink_availables(sdata);
*pos = MESHCONF_CAPAB_FORWARDING;
- *pos++ |= sdata->u.mesh.accepting_plinks ?
+ *pos++ |= ifmsh->accepting_plinks ?
MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
*pos++ = 0x00;
- if (sdata->u.mesh.ie) {
- int len = sdata->u.mesh.ie_len;
- const u8 *data = sdata->u.mesh.ie;
- if (skb_tailroom(skb) > len)
- memcpy(skb_put(skb, len), data, len);
+ return 0;
+}
+
+int
+mesh_add_meshid_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u8 *pos;
+
+ if (skb_tailroom(skb) < 2 + ifmsh->mesh_id_len)
+ return -ENOMEM;
+
+ pos = skb_put(skb, 2 + ifmsh->mesh_id_len);
+ *pos++ = WLAN_EID_MESH_ID;
+ *pos++ = ifmsh->mesh_id_len;
+ if (ifmsh->mesh_id_len)
+ memcpy(pos, ifmsh->mesh_id, ifmsh->mesh_id_len);
+
+ return 0;
+}
+
+int
+mesh_add_vendor_ies(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u8 offset, len;
+ const u8 *data;
+
+ if (!ifmsh->ie || !ifmsh->ie_len)
+ return 0;
+
+ /* fast-forward to vendor IEs */
+ offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
+
+ if (offset) {
+ len = ifmsh->ie_len - offset;
+ data = ifmsh->ie + offset;
+ if (skb_tailroom(skb) < len)
+ return -ENOMEM;
+ memcpy(skb_put(skb, len), data, len);
}
+
+ return 0;
}
+int
+mesh_add_rsn_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u8 len = 0;
+ const u8 *data;
+
+ if (!ifmsh->ie || !ifmsh->ie_len)
+ return 0;
+
+ /* find RSN IE */
+ data = ifmsh->ie;
+ while (data < ifmsh->ie + ifmsh->ie_len) {
+ if (*data == WLAN_EID_RSN) {
+ len = data[1] + 2;
+ break;
+ }
+ data++;
+ }
+
+ if (len) {
+ if (skb_tailroom(skb) < len)
+ return -ENOMEM;
+ memcpy(skb_put(skb, len), data, len);
+ }
+
+ return 0;
+}
+
+int mesh_add_ds_params_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
+ u8 *pos;
+
+ if (skb_tailroom(skb) < 3)
+ return -ENOMEM;
+
+ sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ if (sband->band == IEEE80211_BAND_2GHZ) {
+ pos = skb_put(skb, 2 + 1);
+ *pos++ = WLAN_EID_DS_PARAMS;
+ *pos++ = 1;
+ *pos++ = ieee80211_frequency_to_channel(local->hw.conf.channel->center_freq);
+ }
+
+ return 0;
+}
static void ieee80211_mesh_path_timer(unsigned long data)
{
@@ -352,8 +405,7 @@ int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
memcpy(hdr->addr3, meshsa, ETH_ALEN);
return 24;
} else {
- *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
- IEEE80211_FCTL_TODS);
+ *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
/* RA TA DA SA */
memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */
memcpy(hdr->addr2, meshsa, ETH_ALEN);
@@ -425,7 +477,8 @@ static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
mesh_path_tx_root_frame(sdata);
mod_timer(&ifmsh->mesh_path_root_timer,
- round_jiffies(jiffies + IEEE80211_MESH_RANN_INTERVAL));
+ round_jiffies(TU_TO_EXP_TIME(
+ ifmsh->mshcfg.dot11MeshHWMPRannInterval)));
}
#ifdef CONFIG_PM
@@ -433,7 +486,7 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- /* use atomic bitops in case both timers fire at the same time */
+ /* use atomic bitops in case all timers fire at the same time */
if (del_timer_sync(&ifmsh->housekeeping_timer))
set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
@@ -557,11 +610,18 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
struct ieee80211_rx_status *rx_status)
{
switch (mgmt->u.action.category) {
- case WLAN_CATEGORY_MESH_ACTION:
- mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
+ case WLAN_CATEGORY_SELF_PROTECTED:
+ switch (mgmt->u.action.u.self_prot.action_code) {
+ case WLAN_SP_MESH_PEERING_OPEN:
+ case WLAN_SP_MESH_PEERING_CLOSE:
+ case WLAN_SP_MESH_PEERING_CONFIRM:
+ mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
+ break;
+ }
break;
- case WLAN_CATEGORY_MESH_PATH_SEL:
- mesh_rx_path_sel_frame(sdata, mgmt, len);
+ case WLAN_CATEGORY_MESH_ACTION:
+ if (mesh_action_is_path_sel(mgmt))
+ mesh_rx_path_sel_frame(sdata, mgmt, len);
break;
}
}
@@ -633,6 +693,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
ifmsh->accepting_plinks = true;
ifmsh->preq_id = 0;
ifmsh->sn = 0;
+ ifmsh->num_gates = 0;
atomic_set(&ifmsh->mpaths, 0);
mesh_rmc_init(sdata);
ifmsh->last_preq = jiffies;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 249e733..8c00e2d 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -80,7 +80,10 @@ enum mesh_deferred_task_flags {
* retry
* @discovery_retries: number of discovery retries
* @flags: mesh path flags, as specified on &enum mesh_path_flags
- * @state_lock: mesh path state lock
+ * @state_lock: mesh path state lock used to protect changes to the
+ * mpath itself. No need to take this lock when adding or removing
+ * an mpath to a hash bucket on a path table.
+ * @is_gate: the destination station of this path is a mesh gate
*
*
* The combination of dst and sdata is unique in the mesh path table. Since the
@@ -104,6 +107,7 @@ struct mesh_path {
u8 discovery_retries;
enum mesh_path_flags flags;
spinlock_t state_lock;
+ bool is_gate;
};
/**
@@ -120,6 +124,9 @@ struct mesh_path {
* buckets
* @mean_chain_len: maximum average length for the hash buckets' list, if it is
* reached, the table will grow
+ * @known_gates: list of known mesh gates and their mpaths by the station. The
+ * gate's mpath may or may not be resolved and active.
+ *
* rcu_head: RCU head to free the table
*/
struct mesh_table {
@@ -133,6 +140,8 @@ struct mesh_table {
int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
int size_order;
int mean_chain_len;
+ struct hlist_head *known_gates;
+ spinlock_t gates_lock;
struct rcu_head rcu_head;
};
@@ -166,6 +175,8 @@ struct mesh_rmc {
u32 idx_mask;
};
+#define IEEE80211_MESH_PEER_INACTIVITY_LIMIT (1800 * HZ)
+#define IEEE80211_MESH_HOUSEKEEPING_INTERVAL (60 * HZ)
#define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */
@@ -177,14 +188,6 @@ struct mesh_rmc {
/* Maximum number of paths per interface */
#define MESH_MAX_MPATHS 1024
-/* Pending ANA approval */
-#define MESH_PATH_SEL_ACTION 0
-
-/* PERR reason codes */
-#define PEER_RCODE_UNSPECIFIED 11
-#define PERR_RCODE_NO_ROUTE 12
-#define PERR_RCODE_DEST_UNREACH 13
-
/* Public interfaces */
/* Various */
int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
@@ -199,6 +202,16 @@ bool mesh_matches_local(struct ieee802_11_elems *ie,
void mesh_ids_set_default(struct ieee80211_if_mesh *mesh);
void mesh_mgmt_ies_add(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
+int mesh_add_meshconf_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
+int mesh_add_meshid_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
+int mesh_add_rsn_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
+int mesh_add_vendor_ies(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
+int mesh_add_ds_params_ie(struct sk_buff *skb,
+ struct ieee80211_sub_if_data *sdata);
void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
void ieee80211s_init(void);
@@ -223,10 +236,13 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx,
struct ieee80211_sub_if_data *sdata);
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop);
void mesh_path_expire(struct ieee80211_sub_if_data *sdata);
-void mesh_path_flush(struct ieee80211_sub_if_data *sdata);
void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt, size_t len);
int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata);
+
+int mesh_path_add_gate(struct mesh_path *mpath);
+int mesh_path_send_to_gates(struct mesh_path *mpath);
+int mesh_gate_num(struct ieee80211_sub_if_data *sdata);
/* Mesh plinks */
void mesh_neighbour_update(u8 *hw_addr, u32 rates,
struct ieee80211_sub_if_data *sdata,
@@ -256,12 +272,14 @@ void mesh_pathtbl_unregister(void);
int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata);
void mesh_path_timer(unsigned long data);
void mesh_path_flush_by_nexthop(struct sta_info *sta);
+void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
void mesh_path_discard_frame(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata);
void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
+bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
extern int mesh_paths_generation;
#ifdef CONFIG_MAC80211_MESH
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 3460108..174040a 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -8,10 +8,12 @@
*/
#include <linux/slab.h>
+#include "wme.h"
#include "mesh.h"
#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
-#define mhwmp_dbg(fmt, args...) printk(KERN_DEBUG "Mesh HWMP: " fmt, ##args)
+#define mhwmp_dbg(fmt, args...) \
+ printk(KERN_DEBUG "Mesh HWMP (%s): " fmt "\n", sdata->name, ##args)
#else
#define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0)
#endif
@@ -68,12 +70,12 @@ static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
#define PREP_IE_TTL(x) PREQ_IE_TTL(x)
-#define PREP_IE_ORIG_ADDR(x) (x + 3)
-#define PREP_IE_ORIG_SN(x) u32_field_get(x, 9, 0)
+#define PREP_IE_ORIG_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
+#define PREP_IE_ORIG_SN(x) u32_field_get(x, 27, AE_F_SET(x))
#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x))
#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x))
-#define PREP_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
-#define PREP_IE_TARGET_SN(x) u32_field_get(x, 27, AE_F_SET(x))
+#define PREP_IE_TARGET_ADDR(x) (x + 3)
+#define PREP_IE_TARGET_SN(x) u32_field_get(x, 9, 0)
#define PERR_IE_TTL(x) (*(x))
#define PERR_IE_TARGET_FLAGS(x) (*(x + 2))
@@ -132,24 +134,25 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID == SA */
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
- mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL;
- mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
+ mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
+ mgmt->u.action.u.mesh_action.action_code =
+ WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
switch (action) {
case MPATH_PREQ:
- mhwmp_dbg("sending PREQ to %pM\n", target);
+ mhwmp_dbg("sending PREQ to %pM", target);
ie_len = 37;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREQ;
break;
case MPATH_PREP:
- mhwmp_dbg("sending PREP to %pM\n", target);
+ mhwmp_dbg("sending PREP to %pM", target);
ie_len = 31;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREP;
break;
case MPATH_RANN:
- mhwmp_dbg("sending RANN from %pM\n", orig_addr);
+ mhwmp_dbg("sending RANN from %pM", orig_addr);
ie_len = sizeof(struct ieee80211_rann_ie);
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_RANN;
@@ -163,35 +166,63 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
*pos++ = flags;
*pos++ = hop_count;
*pos++ = ttl;
- if (action == MPATH_PREQ) {
- memcpy(pos, &preq_id, 4);
+ if (action == MPATH_PREP) {
+ memcpy(pos, target, ETH_ALEN);
+ pos += ETH_ALEN;
+ memcpy(pos, &target_sn, 4);
pos += 4;
- }
- memcpy(pos, orig_addr, ETH_ALEN);
- pos += ETH_ALEN;
- memcpy(pos, &orig_sn, 4);
- pos += 4;
- if (action != MPATH_RANN) {
- memcpy(pos, &lifetime, 4);
+ } else {
+ if (action == MPATH_PREQ) {
+ memcpy(pos, &preq_id, 4);
+ pos += 4;
+ }
+ memcpy(pos, orig_addr, ETH_ALEN);
+ pos += ETH_ALEN;
+ memcpy(pos, &orig_sn, 4);
pos += 4;
}
+ memcpy(pos, &lifetime, 4); /* interval for RANN */
+ pos += 4;
memcpy(pos, &metric, 4);
pos += 4;
if (action == MPATH_PREQ) {
- /* destination count */
- *pos++ = 1;
+ *pos++ = 1; /* destination count */
*pos++ = target_flags;
- }
- if (action != MPATH_RANN) {
memcpy(pos, target, ETH_ALEN);
pos += ETH_ALEN;
memcpy(pos, &target_sn, 4);
+ pos += 4;
+ } else if (action == MPATH_PREP) {
+ memcpy(pos, orig_addr, ETH_ALEN);
+ pos += ETH_ALEN;
+ memcpy(pos, &orig_sn, 4);
+ pos += 4;
}
ieee80211_tx_skb(sdata, skb);
return 0;
}
+
+/* Headroom is not adjusted. Caller should ensure that skb has sufficient
+ * headroom in case the frame is encrypted. */
+static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ skb_set_mac_header(skb, 0);
+ skb_set_network_header(skb, 0);
+ skb_set_transport_header(skb, 0);
+
+ /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
+ skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+ skb->priority = 7;
+
+ info->control.vif = &sdata->vif;
+ ieee80211_set_qos_hdr(sdata, skb);
+}
+
/**
* mesh_send_path error - Sends a PERR mesh management frame
*
@@ -199,6 +230,10 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
* @target_sn: SN of the broken destination
* @target_rcode: reason code for this PERR
* @ra: node this frame is addressed to
+ *
+ * Note: This function may be called with driver locks taken that the driver
+ * also acquires in the TX path. To avoid a deadlock we don't transmit the
+ * frame directly but add it to the pending queue instead.
*/
int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
__le16 target_rcode, const u8 *ra,
@@ -212,7 +247,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
if (!skb)
return -1;
- skb_reserve(skb, local->hw.extra_tx_headroom);
+ skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom);
/* 25 is the size of the common mgmt part (24) plus the size of the
* common action part (1)
*/
@@ -224,9 +259,11 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
memcpy(mgmt->da, ra, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
- /* BSSID is left zeroed, wildcard value */
- mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL;
- mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
+ /* BSSID == SA */
+ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
+ mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
+ mgmt->u.action.u.mesh_action.action_code =
+ WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
ie_len = 15;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PERR;
@@ -251,7 +288,9 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
pos += 4;
memcpy(pos, &target_rcode, 2);
- ieee80211_tx_skb(sdata, skb);
+ /* see note in function header */
+ prepare_frame_for_deferred_tx(sdata, skb);
+ ieee80211_add_pending_skb(local, skb);
return 0;
}
@@ -449,7 +488,6 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
if (fresh_info) {
mesh_path_assign_nexthop(mpath, sta);
- mpath->flags &= ~MESH_PATH_SN_VALID;
mpath->metric = last_hop_metric;
mpath->exp_time = time_after(mpath->exp_time, exp_time)
? mpath->exp_time : exp_time;
@@ -484,10 +522,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
orig_sn = PREQ_IE_ORIG_SN(preq_elem);
target_flags = PREQ_IE_TARGET_F(preq_elem);
- mhwmp_dbg("received PREQ from %pM\n", orig_addr);
+ mhwmp_dbg("received PREQ from %pM", orig_addr);
if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) {
- mhwmp_dbg("PREQ is for us\n");
+ mhwmp_dbg("PREQ is for us");
forward = false;
reply = true;
metric = 0;
@@ -523,7 +561,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
lifetime = PREQ_IE_LIFETIME(preq_elem);
ttl = ifmsh->mshcfg.element_ttl;
if (ttl != 0) {
- mhwmp_dbg("replying to the PREQ\n");
+ mhwmp_dbg("replying to the PREQ");
mesh_path_sel_frame_tx(MPATH_PREP, 0, target_addr,
cpu_to_le32(target_sn), 0, orig_addr,
cpu_to_le32(orig_sn), mgmt->sa, 0, ttl,
@@ -543,7 +581,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
ifmsh->mshstats.dropped_frames_ttl++;
return;
}
- mhwmp_dbg("forwarding the PREQ from %pM\n", orig_addr);
+ mhwmp_dbg("forwarding the PREQ from %pM", orig_addr);
--ttl;
flags = PREQ_IE_FLAGS(preq_elem);
preq_id = PREQ_IE_PREQ_ID(preq_elem);
@@ -578,7 +616,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
u8 next_hop[ETH_ALEN];
u32 target_sn, orig_sn, lifetime;
- mhwmp_dbg("received PREP from %pM\n", PREP_IE_ORIG_ADDR(prep_elem));
+ mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
/* Note that we divert from the draft nomenclature and denominate
* destination to what the draft refers to as origininator. So in this
@@ -684,6 +722,8 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
u8 ttl, flags, hopcount;
u8 *orig_addr;
u32 orig_sn, metric;
+ u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
+ bool root_is_gate;
ttl = rann->rann_ttl;
if (ttl <= 1) {
@@ -692,12 +732,19 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
}
ttl--;
flags = rann->rann_flags;
+ root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
orig_addr = rann->rann_addr;
orig_sn = rann->rann_seq;
hopcount = rann->rann_hopcount;
hopcount++;
metric = rann->rann_metric;
- mhwmp_dbg("received RANN from %pM\n", orig_addr);
+
+ /* Ignore our own RANNs */
+ if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0)
+ return;
+
+ mhwmp_dbg("received RANN from %pM (is_gate=%d)", orig_addr,
+ root_is_gate);
rcu_read_lock();
mpath = mesh_path_lookup(orig_addr, sdata);
@@ -709,18 +756,28 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
sdata->u.mesh.mshstats.dropped_frames_no_route++;
return;
}
- mesh_queue_preq(mpath,
- PREQ_Q_F_START | PREQ_Q_F_REFRESH);
}
+
+ if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
+ time_after(jiffies, mpath->exp_time - 1*HZ)) &&
+ !(mpath->flags & MESH_PATH_FIXED)) {
+ mhwmp_dbg("%s time to refresh root mpath %pM", sdata->name,
+ orig_addr);
+ mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
+ }
+
if (mpath->sn < orig_sn) {
mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
cpu_to_le32(orig_sn),
0, NULL, 0, broadcast_addr,
- hopcount, ttl, 0,
+ hopcount, ttl, cpu_to_le32(interval),
cpu_to_le32(metric + mpath->metric),
0, sdata);
mpath->sn = orig_sn;
}
+ if (root_is_gate)
+ mesh_path_add_gate(mpath);
+
rcu_read_unlock();
}
@@ -732,11 +789,20 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems elems;
size_t baselen;
u32 last_hop_metric;
+ struct sta_info *sta;
/* need action_code */
if (len < IEEE80211_MIN_ACTION_SIZE + 1)
return;
+ rcu_read_lock();
+ sta = sta_info_get(sdata, mgmt->sa);
+ if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+
baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
len - baselen, &elems);
@@ -788,16 +854,16 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
if (!preq_node) {
- mhwmp_dbg("could not allocate PREQ node\n");
+ mhwmp_dbg("could not allocate PREQ node");
return;
}
- spin_lock(&ifmsh->mesh_preq_queue_lock);
+ spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
- spin_unlock(&ifmsh->mesh_preq_queue_lock);
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
kfree(preq_node);
if (printk_ratelimit())
- mhwmp_dbg("PREQ node queue full\n");
+ mhwmp_dbg("PREQ node queue full");
return;
}
@@ -806,7 +872,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
++ifmsh->preq_queue_len;
- spin_unlock(&ifmsh->mesh_preq_queue_lock);
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
@@ -982,35 +1048,46 @@ void mesh_path_timer(unsigned long data)
{
struct mesh_path *mpath = (void *) data;
struct ieee80211_sub_if_data *sdata = mpath->sdata;
+ int ret;
if (sdata->local->quiescing)
return;
spin_lock_bh(&mpath->state_lock);
if (mpath->flags & MESH_PATH_RESOLVED ||
- (!(mpath->flags & MESH_PATH_RESOLVING)))
+ (!(mpath->flags & MESH_PATH_RESOLVING))) {
mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
- else if (mpath->discovery_retries < max_preq_retries(sdata)) {
+ spin_unlock_bh(&mpath->state_lock);
+ } else if (mpath->discovery_retries < max_preq_retries(sdata)) {
++mpath->discovery_retries;
mpath->discovery_timeout *= 2;
+ spin_unlock_bh(&mpath->state_lock);
mesh_queue_preq(mpath, 0);
} else {
mpath->flags = 0;
mpath->exp_time = jiffies;
- mesh_path_flush_pending(mpath);
+ spin_unlock_bh(&mpath->state_lock);
+ if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
+ ret = mesh_path_send_to_gates(mpath);
+ if (ret)
+ mhwmp_dbg("no gate was reachable");
+ } else
+ mesh_path_flush_pending(mpath);
}
-
- spin_unlock_bh(&mpath->state_lock);
}
void
mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
+ u8 flags;
- mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->vif.addr,
+ flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
+ ? RANN_FLAG_IS_GATE : 0;
+ mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
cpu_to_le32(++ifmsh->sn),
0, NULL, 0, broadcast_addr,
0, sdata->u.mesh.mshcfg.element_ttl,
- 0, 0, 0, sdata);
+ cpu_to_le32(interval), 0, 0, sdata);
}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 068ee651..7f54c50 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -14,9 +14,16 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <net/mac80211.h>
+#include "wme.h"
#include "ieee80211_i.h"
#include "mesh.h"
+#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
+#define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
+#else
+#define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
+#endif
+
/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
#define INIT_PATHS_SIZE_ORDER 2
@@ -42,8 +49,10 @@ static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
/* This lock will have the grow table function as writer and add / delete nodes
- * as readers. When reading the table (i.e. doing lookups) we are well protected
- * by RCU
+ * as readers. RCU provides sufficient protection only when reading the table
+ * (i.e. doing lookups). Adding or adding or removing nodes requires we take
+ * the read lock or we risk operating on an old table. The write lock is only
+ * needed when modifying the number of buckets a table.
*/
static DEFINE_RWLOCK(pathtbl_resize_lock);
@@ -60,6 +69,8 @@ static inline struct mesh_table *resize_dereference_mpp_paths(void)
lockdep_is_held(&pathtbl_resize_lock));
}
+static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath);
+
/*
* CAREFUL -- "tbl" must not be an expression,
* in particular not an rcu_dereference(), since
@@ -103,6 +114,7 @@ static struct mesh_table *mesh_table_alloc(int size_order)
sizeof(newtbl->hash_rnd));
for (i = 0; i <= newtbl->hash_mask; i++)
spin_lock_init(&newtbl->hashwlock[i]);
+ spin_lock_init(&newtbl->gates_lock);
return newtbl;
}
@@ -118,6 +130,7 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
{
struct hlist_head *mesh_hash;
struct hlist_node *p, *q;
+ struct mpath_node *gate;
int i;
mesh_hash = tbl->hash_buckets;
@@ -129,6 +142,17 @@ static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
}
spin_unlock_bh(&tbl->hashwlock[i]);
}
+ if (free_leafs) {
+ spin_lock_bh(&tbl->gates_lock);
+ hlist_for_each_entry_safe(gate, p, q,
+ tbl->known_gates, list) {
+ hlist_del(&gate->list);
+ kfree(gate);
+ }
+ kfree(tbl->known_gates);
+ spin_unlock_bh(&tbl->gates_lock);
+ }
+
__mesh_table_free(tbl);
}
@@ -146,6 +170,7 @@ static int mesh_table_grow(struct mesh_table *oldtbl,
newtbl->free_node = oldtbl->free_node;
newtbl->mean_chain_len = oldtbl->mean_chain_len;
newtbl->copy_node = oldtbl->copy_node;
+ newtbl->known_gates = oldtbl->known_gates;
atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
oldhash = oldtbl->hash_buckets;
@@ -188,6 +213,7 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
struct ieee80211_hdr *hdr;
struct sk_buff_head tmpq;
unsigned long flags;
+ struct ieee80211_sub_if_data *sdata = mpath->sdata;
rcu_assign_pointer(mpath->next_hop, sta);
@@ -198,6 +224,8 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
hdr = (struct ieee80211_hdr *) skb->data;
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
+ skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
+ ieee80211_set_qos_hdr(sdata, skb);
__skb_queue_tail(&tmpq, skb);
}
@@ -205,62 +233,128 @@ void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
}
+static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
+ struct mesh_path *gate_mpath)
+{
+ struct ieee80211_hdr *hdr;
+ struct ieee80211s_hdr *mshdr;
+ int mesh_hdrlen, hdrlen;
+ char *next_hop;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+
+ if (!(mshdr->flags & MESH_FLAGS_AE)) {
+ /* size of the fixed part of the mesh header */
+ mesh_hdrlen = 6;
+
+ /* make room for the two extended addresses */
+ skb_push(skb, 2 * ETH_ALEN);
+ memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+
+ /* we preserve the previous mesh header and only add
+ * the new addreses */
+ mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+ mshdr->flags = MESH_FLAGS_AE_A5_A6;
+ memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
+ memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
+ }
+
+ /* update next hop */
+ hdr = (struct ieee80211_hdr *) skb->data;
+ rcu_read_lock();
+ next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
+ memcpy(hdr->addr1, next_hop, ETH_ALEN);
+ rcu_read_unlock();
+ memcpy(hdr->addr3, dst_addr, ETH_ALEN);
+}
/**
- * mesh_path_lookup - look up a path in the mesh path table
- * @dst: hardware address (ETH_ALEN length) of destination
- * @sdata: local subif
*
- * Returns: pointer to the mesh path structure, or NULL if not found
+ * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
*
- * Locking: must be called within a read rcu section.
+ * This function is used to transfer or copy frames from an unresolved mpath to
+ * a gate mpath. The function also adds the Address Extension field and
+ * updates the next hop.
+ *
+ * If a frame already has an Address Extension field, only the next hop and
+ * destination addresses are updated.
+ *
+ * The gate mpath must be an active mpath with a valid mpath->next_hop.
+ *
+ * @mpath: An active mpath the frames will be sent to (i.e. the gate)
+ * @from_mpath: The failed mpath
+ * @copy: When true, copy all the frames to the new mpath queue. When false,
+ * move them.
*/
-struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
+ struct mesh_path *from_mpath,
+ bool copy)
{
- struct mesh_path *mpath;
- struct hlist_node *n;
- struct hlist_head *bucket;
- struct mesh_table *tbl;
- struct mpath_node *node;
+ struct sk_buff *skb, *cp_skb = NULL;
+ struct sk_buff_head gateq, failq;
+ unsigned long flags;
+ int num_skbs;
- tbl = rcu_dereference(mesh_paths);
+ BUG_ON(gate_mpath == from_mpath);
+ BUG_ON(!gate_mpath->next_hop);
- bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
- hlist_for_each_entry_rcu(node, n, bucket, list) {
- mpath = node->mpath;
- if (mpath->sdata == sdata &&
- memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
- if (MPATH_EXPIRED(mpath)) {
- spin_lock_bh(&mpath->state_lock);
- if (MPATH_EXPIRED(mpath))
- mpath->flags &= ~MESH_PATH_ACTIVE;
- spin_unlock_bh(&mpath->state_lock);
- }
- return mpath;
+ __skb_queue_head_init(&gateq);
+ __skb_queue_head_init(&failq);
+
+ spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
+ skb_queue_splice_init(&from_mpath->frame_queue, &failq);
+ spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
+
+ num_skbs = skb_queue_len(&failq);
+
+ while (num_skbs--) {
+ skb = __skb_dequeue(&failq);
+ if (copy) {
+ cp_skb = skb_copy(skb, GFP_ATOMIC);
+ if (cp_skb)
+ __skb_queue_tail(&failq, cp_skb);
}
+
+ prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
+ __skb_queue_tail(&gateq, skb);
}
- return NULL;
+
+ spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
+ skb_queue_splice(&gateq, &gate_mpath->frame_queue);
+ mpath_dbg("Mpath queue for gate %pM has %d frames\n",
+ gate_mpath->dst,
+ skb_queue_len(&gate_mpath->frame_queue));
+ spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
+
+ if (!copy)
+ return;
+
+ spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
+ skb_queue_splice(&failq, &from_mpath->frame_queue);
+ spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
}
-struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+
+static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
+ struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
struct hlist_node *n;
struct hlist_head *bucket;
- struct mesh_table *tbl;
struct mpath_node *node;
- tbl = rcu_dereference(mpp_paths);
-
bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
hlist_for_each_entry_rcu(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
- memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
+ memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
if (MPATH_EXPIRED(mpath)) {
spin_lock_bh(&mpath->state_lock);
- if (MPATH_EXPIRED(mpath))
- mpath->flags &= ~MESH_PATH_ACTIVE;
+ mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&mpath->state_lock);
}
return mpath;
@@ -269,6 +363,25 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
return NULL;
}
+/**
+ * mesh_path_lookup - look up a path in the mesh path table
+ * @dst: hardware address (ETH_ALEN length) of destination
+ * @sdata: local subif
+ *
+ * Returns: pointer to the mesh path structure, or NULL if not found
+ *
+ * Locking: must be called within a read rcu section.
+ */
+struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+{
+ return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
+}
+
+struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
+{
+ return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
+}
+
/**
* mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
@@ -293,8 +406,7 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
if (j++ == idx) {
if (MPATH_EXPIRED(node->mpath)) {
spin_lock_bh(&node->mpath->state_lock);
- if (MPATH_EXPIRED(node->mpath))
- node->mpath->flags &= ~MESH_PATH_ACTIVE;
+ node->mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&node->mpath->state_lock);
}
return node->mpath;
@@ -304,6 +416,109 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
return NULL;
}
+static void mesh_gate_node_reclaim(struct rcu_head *rp)
+{
+ struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
+ kfree(node);
+}
+
+/**
+ * mesh_gate_add - mark mpath as path to a mesh gate and add to known_gates
+ * @mesh_tbl: table which contains known_gates list
+ * @mpath: mpath to known mesh gate
+ *
+ * Returns: 0 on success
+ *
+ */
+static int mesh_gate_add(struct mesh_table *tbl, struct mesh_path *mpath)
+{
+ struct mpath_node *gate, *new_gate;
+ struct hlist_node *n;
+ int err;
+
+ rcu_read_lock();
+ tbl = rcu_dereference(tbl);
+
+ hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
+ if (gate->mpath == mpath) {
+ err = -EEXIST;
+ goto err_rcu;
+ }
+
+ new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
+ if (!new_gate) {
+ err = -ENOMEM;
+ goto err_rcu;
+ }
+
+ mpath->is_gate = true;
+ mpath->sdata->u.mesh.num_gates++;
+ new_gate->mpath = mpath;
+ spin_lock_bh(&tbl->gates_lock);
+ hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
+ spin_unlock_bh(&tbl->gates_lock);
+ rcu_read_unlock();
+ mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
+ mpath->sdata->name, mpath->dst,
+ mpath->sdata->u.mesh.num_gates);
+ return 0;
+err_rcu:
+ rcu_read_unlock();
+ return err;
+}
+
+/**
+ * mesh_gate_del - remove a mesh gate from the list of known gates
+ * @tbl: table which holds our list of known gates
+ * @mpath: gate mpath
+ *
+ * Returns: 0 on success
+ *
+ * Locking: must be called inside rcu_read_lock() section
+ */
+static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
+{
+ struct mpath_node *gate;
+ struct hlist_node *p, *q;
+
+ tbl = rcu_dereference(tbl);
+
+ hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
+ if (gate->mpath == mpath) {
+ spin_lock_bh(&tbl->gates_lock);
+ hlist_del_rcu(&gate->list);
+ call_rcu(&gate->rcu, mesh_gate_node_reclaim);
+ spin_unlock_bh(&tbl->gates_lock);
+ mpath->sdata->u.mesh.num_gates--;
+ mpath->is_gate = false;
+ mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
+ "%d known gates\n", mpath->sdata->name,
+ mpath->dst, mpath->sdata->u.mesh.num_gates);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ *
+ * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
+ * @mpath: gate path to add to table
+ */
+int mesh_path_add_gate(struct mesh_path *mpath)
+{
+ return mesh_gate_add(mesh_paths, mpath);
+}
+
+/**
+ * mesh_gate_num - number of gates known to this interface
+ * @sdata: subif data
+ */
+int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
+{
+ return sdata->u.mesh.num_gates;
+}
+
/**
* mesh_path_add - allocate and add a new path to the mesh path table
* @addr: destination address of the path (ETH_ALEN length)
@@ -481,6 +696,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
new_mpath->flags = 0;
skb_queue_head_init(&new_mpath->frame_queue);
new_node->mpath = new_mpath;
+ init_timer(&new_mpath->timer);
new_mpath->exp_time = jiffies;
spin_lock_init(&new_mpath->state_lock);
@@ -539,28 +755,53 @@ void mesh_plink_broken(struct sta_info *sta)
struct hlist_node *p;
struct ieee80211_sub_if_data *sdata = sta->sdata;
int i;
+ __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
- spin_lock_bh(&mpath->state_lock);
if (rcu_dereference(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) {
+ spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
++mpath->sn;
spin_unlock_bh(&mpath->state_lock);
mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
mpath->dst, cpu_to_le32(mpath->sn),
- cpu_to_le16(PERR_RCODE_DEST_UNREACH),
- bcast, sdata);
- } else
- spin_unlock_bh(&mpath->state_lock);
+ reason, bcast, sdata);
+ }
}
rcu_read_unlock();
}
+static void mesh_path_node_reclaim(struct rcu_head *rp)
+{
+ struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
+ struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
+
+ del_timer_sync(&node->mpath->timer);
+ atomic_dec(&sdata->u.mesh.mpaths);
+ kfree(node->mpath);
+ kfree(node);
+}
+
+/* needs to be called with the corresponding hashwlock taken */
+static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
+{
+ struct mesh_path *mpath;
+ mpath = node->mpath;
+ spin_lock(&mpath->state_lock);
+ mpath->flags |= MESH_PATH_RESOLVING;
+ if (mpath->is_gate)
+ mesh_gate_del(tbl, mpath);
+ hlist_del_rcu(&node->list);
+ call_rcu(&node->rcu, mesh_path_node_reclaim);
+ spin_unlock(&mpath->state_lock);
+ atomic_dec(&tbl->entries);
+}
+
/**
* mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
*
@@ -581,42 +822,59 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
int i;
rcu_read_lock();
- tbl = rcu_dereference(mesh_paths);
+ read_lock_bh(&pathtbl_resize_lock);
+ tbl = resize_dereference_mesh_paths();
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
- if (rcu_dereference(mpath->next_hop) == sta)
- mesh_path_del(mpath->dst, mpath->sdata);
+ if (rcu_dereference(mpath->next_hop) == sta) {
+ spin_lock_bh(&tbl->hashwlock[i]);
+ __mesh_path_del(tbl, node);
+ spin_unlock_bh(&tbl->hashwlock[i]);
+ }
}
+ read_unlock_bh(&pathtbl_resize_lock);
rcu_read_unlock();
}
-void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
+static void table_flush_by_iface(struct mesh_table *tbl,
+ struct ieee80211_sub_if_data *sdata)
{
- struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
- rcu_read_lock();
- tbl = rcu_dereference(mesh_paths);
+ WARN_ON(!rcu_read_lock_held());
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
- if (mpath->sdata == sdata)
- mesh_path_del(mpath->dst, mpath->sdata);
+ if (mpath->sdata != sdata)
+ continue;
+ spin_lock_bh(&tbl->hashwlock[i]);
+ __mesh_path_del(tbl, node);
+ spin_unlock_bh(&tbl->hashwlock[i]);
}
- rcu_read_unlock();
}
-static void mesh_path_node_reclaim(struct rcu_head *rp)
+/**
+ * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
+ *
+ * This function deletes both mesh paths as well as mesh portal paths.
+ *
+ * @sdata - interface data to match
+ *
+ */
+void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
{
- struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
- struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
+ struct mesh_table *tbl;
- del_timer_sync(&node->mpath->timer);
- atomic_dec(&sdata->u.mesh.mpaths);
- kfree(node->mpath);
- kfree(node);
+ rcu_read_lock();
+ read_lock_bh(&pathtbl_resize_lock);
+ tbl = resize_dereference_mesh_paths();
+ table_flush_by_iface(tbl, sdata);
+ tbl = resize_dereference_mpp_paths();
+ table_flush_by_iface(tbl, sdata);
+ read_unlock_bh(&pathtbl_resize_lock);
+ rcu_read_unlock();
}
/**
@@ -647,12 +905,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
mpath = node->mpath;
if (mpath->sdata == sdata &&
memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
- spin_lock(&mpath->state_lock);
- mpath->flags |= MESH_PATH_RESOLVING;
- hlist_del_rcu(&node->list);
- call_rcu(&node->rcu, mesh_path_node_reclaim);
- atomic_dec(&tbl->entries);
- spin_unlock(&mpath->state_lock);
+ __mesh_path_del(tbl, node);
goto enddel;
}
}
@@ -681,6 +934,58 @@ void mesh_path_tx_pending(struct mesh_path *mpath)
}
/**
+ * mesh_path_send_to_gates - sends pending frames to all known mesh gates
+ *
+ * @mpath: mesh path whose queue will be emptied
+ *
+ * If there is only one gate, the frames are transferred from the failed mpath
+ * queue to that gate's queue. If there are more than one gates, the frames
+ * are copied from each gate to the next. After frames are copied, the
+ * mpath queues are emptied onto the transmission queue.
+ */
+int mesh_path_send_to_gates(struct mesh_path *mpath)
+{
+ struct ieee80211_sub_if_data *sdata = mpath->sdata;
+ struct hlist_node *n;
+ struct mesh_table *tbl;
+ struct mesh_path *from_mpath = mpath;
+ struct mpath_node *gate = NULL;
+ bool copy = false;
+ struct hlist_head *known_gates;
+
+ rcu_read_lock();
+ tbl = rcu_dereference(mesh_paths);
+ known_gates = tbl->known_gates;
+ rcu_read_unlock();
+
+ if (!known_gates)
+ return -EHOSTUNREACH;
+
+ hlist_for_each_entry_rcu(gate, n, known_gates, list) {
+ if (gate->mpath->sdata != sdata)
+ continue;
+
+ if (gate->mpath->flags & MESH_PATH_ACTIVE) {
+ mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
+ mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
+ from_mpath = gate->mpath;
+ copy = true;
+ } else {
+ mpath_dbg("Not forwarding %p\n", gate->mpath);
+ mpath_dbg("flags %x\n", gate->mpath->flags);
+ }
+ }
+
+ hlist_for_each_entry_rcu(gate, n, known_gates, list)
+ if (gate->mpath->sdata == sdata) {
+ mpath_dbg("Sending to %pM\n", gate->mpath->dst);
+ mesh_path_tx_pending(gate->mpath);
+ }
+
+ return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
+}
+
+/**
* mesh_path_discard_frame - discard a frame whose path could not be resolved
*
* @skb: frame to discard
@@ -699,18 +1004,23 @@ void mesh_path_discard_frame(struct sk_buff *skb,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct mesh_path *mpath;
u32 sn = 0;
+ __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
u8 *ra, *da;
da = hdr->addr3;
ra = hdr->addr1;
+ rcu_read_lock();
mpath = mesh_path_lookup(da, sdata);
- if (mpath)
+ if (mpath) {
+ spin_lock_bh(&mpath->state_lock);
sn = ++mpath->sn;
+ spin_unlock_bh(&mpath->state_lock);
+ }
+ rcu_read_unlock();
mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, skb->data,
- cpu_to_le32(sn),
- cpu_to_le16(PERR_RCODE_NO_ROUTE), ra, sdata);
+ cpu_to_le32(sn), reason, ra, sdata);
}
kfree_skb(skb);
@@ -728,8 +1038,7 @@ void mesh_path_flush_pending(struct mesh_path *mpath)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(&mpath->frame_queue)) &&
- (mpath->flags & MESH_PATH_ACTIVE))
+ while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
mesh_path_discard_frame(skb, mpath->sdata);
}
@@ -790,6 +1099,7 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
int mesh_pathtbl_init(void)
{
struct mesh_table *tbl_path, *tbl_mpp;
+ int ret;
tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
if (!tbl_path)
@@ -797,21 +1107,40 @@ int mesh_pathtbl_init(void)
tbl_path->free_node = &mesh_path_node_free;
tbl_path->copy_node = &mesh_path_node_copy;
tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
+ tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
+ if (!tbl_path->known_gates) {
+ ret = -ENOMEM;
+ goto free_path;
+ }
+ INIT_HLIST_HEAD(tbl_path->known_gates);
+
tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
if (!tbl_mpp) {
- mesh_table_free(tbl_path, true);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_path;
}
tbl_mpp->free_node = &mesh_path_node_free;
tbl_mpp->copy_node = &mesh_path_node_copy;
tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
+ tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
+ if (!tbl_mpp->known_gates) {
+ ret = -ENOMEM;
+ goto free_mpp;
+ }
+ INIT_HLIST_HEAD(tbl_mpp->known_gates);
/* Need no locking since this is during init */
RCU_INIT_POINTER(mesh_paths, tbl_path);
RCU_INIT_POINTER(mpp_paths, tbl_mpp);
return 0;
+
+free_mpp:
+ mesh_table_free(tbl_mpp, true);
+free_path:
+ mesh_table_free(tbl_path, true);
+ return ret;
}
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
@@ -828,14 +1157,10 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
if (node->mpath->sdata != sdata)
continue;
mpath = node->mpath;
- spin_lock_bh(&mpath->state_lock);
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
(!(mpath->flags & MESH_PATH_FIXED)) &&
- time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) {
- spin_unlock_bh(&mpath->state_lock);
+ time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
mesh_path_del(mpath->dst, mpath->sdata);
- } else
- spin_unlock_bh(&mpath->state_lock);
}
rcu_read_unlock();
}
@@ -843,6 +1168,6 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
void mesh_pathtbl_unregister(void)
{
/* no need for locking during exit path */
- mesh_table_free(rcu_dereference_raw(mesh_paths), true);
- mesh_table_free(rcu_dereference_raw(mpp_paths), true);
+ mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
+ mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index f4adc09..7e57f5d 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -19,35 +19,18 @@
#define mpl_dbg(fmt, args...) do { (void)(0); } while (0)
#endif
-#define PLINK_GET_LLID(p) (p + 4)
-#define PLINK_GET_PLID(p) (p + 6)
+#define PLINK_GET_LLID(p) (p + 2)
+#define PLINK_GET_PLID(p) (p + 4)
#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
jiffies + HZ * t / 1000))
-/* Peer link cancel reasons, all subject to ANA approval */
-#define MESH_LINK_CANCELLED 2
-#define MESH_MAX_NEIGHBORS 3
-#define MESH_CAPABILITY_POLICY_VIOLATION 4
-#define MESH_CLOSE_RCVD 5
-#define MESH_MAX_RETRIES 6
-#define MESH_CONFIRM_TIMEOUT 7
-#define MESH_SECURITY_ROLE_NEGOTIATION_DIFFERS 8
-#define MESH_SECURITY_AUTHENTICATION_IMPOSSIBLE 9
-#define MESH_SECURITY_FAILED_VERIFICATION 10
-
#define dot11MeshMaxRetries(s) (s->u.mesh.mshcfg.dot11MeshMaxRetries)
#define dot11MeshRetryTimeout(s) (s->u.mesh.mshcfg.dot11MeshRetryTimeout)
#define dot11MeshConfirmTimeout(s) (s->u.mesh.mshcfg.dot11MeshConfirmTimeout)
#define dot11MeshHoldingTimeout(s) (s->u.mesh.mshcfg.dot11MeshHoldingTimeout)
#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
-enum plink_frame_type {
- PLINK_OPEN = 1,
- PLINK_CONFIRM,
- PLINK_CLOSE
-};
-
enum plink_event {
PLINK_UNDEFINED,
OPN_ACPT,
@@ -60,6 +43,10 @@ enum plink_event {
CLS_IGNR
};
+static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_self_protected_actioncode action,
+ u8 *da, __le16 llid, __le16 plid, __le16 reason);
+
static inline
void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
{
@@ -105,7 +92,9 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
if (!sta)
return NULL;
- sta->flags = WLAN_STA_AUTHORIZED | WLAN_STA_AUTH;
+ set_sta_flag(sta, WLAN_STA_AUTH);
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ set_sta_flag(sta, WLAN_STA_WME);
sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
rate_control_rate_init(sta);
@@ -150,6 +139,10 @@ void mesh_plink_deactivate(struct sta_info *sta)
spin_lock_bh(&sta->lock);
deactivated = __mesh_plink_deactivate(sta);
+ sta->reason = cpu_to_le16(WLAN_REASON_MESH_PEER_CANCELED);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, sta->llid, sta->plid,
+ sta->reason);
spin_unlock_bh(&sta->lock);
if (deactivated)
@@ -157,16 +150,16 @@ void mesh_plink_deactivate(struct sta_info *sta)
}
static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
- enum plink_frame_type action, u8 *da, __le16 llid, __le16 plid,
- __le16 reason) {
+ enum ieee80211_self_protected_actioncode action,
+ u8 *da, __le16 llid, __le16 plid, __le16 reason) {
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 +
sdata->u.mesh.ie_len);
struct ieee80211_mgmt *mgmt;
bool include_plid = false;
- static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A };
+ int ie_len = 4;
+ u16 peering_proto = 0;
u8 *pos;
- int ie_len;
if (!skb)
return -1;
@@ -175,63 +168,75 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
* common action part (1)
*/
mgmt = (struct ieee80211_mgmt *)
- skb_put(skb, 25 + sizeof(mgmt->u.action.u.plink_action));
- memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.plink_action));
+ skb_put(skb, 25 + sizeof(mgmt->u.action.u.self_prot));
+ memset(mgmt, 0, 25 + sizeof(mgmt->u.action.u.self_prot));
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
- mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
- mgmt->u.action.u.plink_action.action_code = action;
-
- if (action == PLINK_CLOSE)
- mgmt->u.action.u.plink_action.aux = reason;
- else {
- mgmt->u.action.u.plink_action.aux = cpu_to_le16(0x0);
- if (action == PLINK_CONFIRM) {
- pos = skb_put(skb, 4);
- /* two-byte status code followed by two-byte AID */
- memset(pos, 0, 2);
+ mgmt->u.action.category = WLAN_CATEGORY_SELF_PROTECTED;
+ mgmt->u.action.u.self_prot.action_code = action;
+
+ if (action != WLAN_SP_MESH_PEERING_CLOSE) {
+ /* capability info */
+ pos = skb_put(skb, 2);
+ memset(pos, 0, 2);
+ if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
+ /* AID */
+ pos = skb_put(skb, 2);
memcpy(pos + 2, &plid, 2);
}
- mesh_mgmt_ies_add(skb, sdata);
+ if (ieee80211_add_srates_ie(&sdata->vif, skb) ||
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
+ mesh_add_rsn_ie(skb, sdata) ||
+ mesh_add_meshid_ie(skb, sdata) ||
+ mesh_add_meshconf_ie(skb, sdata))
+ return -1;
+ } else { /* WLAN_SP_MESH_PEERING_CLOSE */
+ if (mesh_add_meshid_ie(skb, sdata))
+ return -1;
}
- /* Add Peer Link Management element */
+ /* Add Mesh Peering Management element */
switch (action) {
- case PLINK_OPEN:
- ie_len = 6;
+ case WLAN_SP_MESH_PEERING_OPEN:
break;
- case PLINK_CONFIRM:
- ie_len = 8;
+ case WLAN_SP_MESH_PEERING_CONFIRM:
+ ie_len += 2;
include_plid = true;
break;
- case PLINK_CLOSE:
- default:
- if (!plid)
- ie_len = 8;
- else {
- ie_len = 10;
+ case WLAN_SP_MESH_PEERING_CLOSE:
+ if (plid) {
+ ie_len += 2;
include_plid = true;
}
+ ie_len += 2; /* reason code */
break;
+ default:
+ return -EINVAL;
}
+ if (WARN_ON(skb_tailroom(skb) < 2 + ie_len))
+ return -ENOMEM;
+
pos = skb_put(skb, 2 + ie_len);
- *pos++ = WLAN_EID_PEER_LINK;
+ *pos++ = WLAN_EID_PEER_MGMT;
*pos++ = ie_len;
- memcpy(pos, meshpeeringproto, sizeof(meshpeeringproto));
- pos += 4;
+ memcpy(pos, &peering_proto, 2);
+ pos += 2;
memcpy(pos, &llid, 2);
+ pos += 2;
if (include_plid) {
- pos += 2;
memcpy(pos, &plid, 2);
- }
- if (action == PLINK_CLOSE) {
pos += 2;
+ }
+ if (action == WLAN_SP_MESH_PEERING_CLOSE) {
memcpy(pos, &reason, 2);
+ pos += 2;
}
+ if (mesh_add_vendor_ies(skb, sdata))
+ return -1;
ieee80211_tx_skb(sdata, skb);
return 0;
@@ -322,21 +327,21 @@ static void mesh_plink_timer(unsigned long data)
++sta->plink_retries;
mod_plink_timer(sta, sta->plink_timeout);
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
- 0, 0);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
+ sta->sta.addr, llid, 0, 0);
break;
}
- reason = cpu_to_le16(MESH_MAX_RETRIES);
+ reason = cpu_to_le16(WLAN_REASON_MESH_MAX_RETRIES);
/* fall through on else */
case NL80211_PLINK_CNF_RCVD:
/* confirm timer */
if (!reason)
- reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIRM_TIMEOUT);
sta->plink_state = NL80211_PLINK_HOLDING;
mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid,
- reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case NL80211_PLINK_HOLDING:
/* holding timer */
@@ -380,7 +385,7 @@ int mesh_plink_open(struct sta_info *sta)
__le16 llid;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- if (!test_sta_flags(sta, WLAN_STA_AUTH))
+ if (!test_sta_flag(sta, WLAN_STA_AUTH))
return -EPERM;
spin_lock_bh(&sta->lock);
@@ -396,7 +401,7 @@ int mesh_plink_open(struct sta_info *sta)
mpl_dbg("Mesh plink: starting establishment with %pM\n",
sta->sta.addr);
- return mesh_plink_frame_tx(sdata, PLINK_OPEN,
+ return mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
sta->sta.addr, llid, 0, 0);
}
@@ -422,7 +427,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
struct ieee802_11_elems elems;
struct sta_info *sta;
enum plink_event event;
- enum plink_frame_type ftype;
+ enum ieee80211_self_protected_actioncode ftype;
size_t baselen;
bool deactivated, matches_local = true;
u8 ie_len;
@@ -449,14 +454,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
return;
}
- baseaddr = mgmt->u.action.u.plink_action.variable;
- baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt;
- if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) {
+ baseaddr = mgmt->u.action.u.self_prot.variable;
+ baselen = (u8 *) mgmt->u.action.u.self_prot.variable - (u8 *) mgmt;
+ if (mgmt->u.action.u.self_prot.action_code ==
+ WLAN_SP_MESH_PEERING_CONFIRM) {
baseaddr += 4;
baselen += 4;
}
ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
- if (!elems.peer_link) {
+ if (!elems.peering) {
mpl_dbg("Mesh plink: missing necessary peer link ie\n");
return;
}
@@ -466,37 +472,40 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
return;
}
- ftype = mgmt->u.action.u.plink_action.action_code;
- ie_len = elems.peer_link_len;
- if ((ftype == PLINK_OPEN && ie_len != 6) ||
- (ftype == PLINK_CONFIRM && ie_len != 8) ||
- (ftype == PLINK_CLOSE && ie_len != 8 && ie_len != 10)) {
+ ftype = mgmt->u.action.u.self_prot.action_code;
+ ie_len = elems.peering_len;
+ if ((ftype == WLAN_SP_MESH_PEERING_OPEN && ie_len != 4) ||
+ (ftype == WLAN_SP_MESH_PEERING_CONFIRM && ie_len != 6) ||
+ (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len != 6
+ && ie_len != 8)) {
mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n",
ftype, ie_len);
return;
}
- if (ftype != PLINK_CLOSE && (!elems.mesh_id || !elems.mesh_config)) {
+ if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
+ (!elems.mesh_id || !elems.mesh_config)) {
mpl_dbg("Mesh plink: missing necessary ie\n");
return;
}
/* Note the lines below are correct, the llid in the frame is the plid
* from the point of view of this host.
*/
- memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2);
- if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 10))
- memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2);
+ memcpy(&plid, PLINK_GET_LLID(elems.peering), 2);
+ if (ftype == WLAN_SP_MESH_PEERING_CONFIRM ||
+ (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8))
+ memcpy(&llid, PLINK_GET_PLID(elems.peering), 2);
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
- if (!sta && ftype != PLINK_OPEN) {
+ if (!sta && ftype != WLAN_SP_MESH_PEERING_OPEN) {
mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
rcu_read_unlock();
return;
}
- if (sta && !test_sta_flags(sta, WLAN_STA_AUTH)) {
+ if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) {
mpl_dbg("Mesh plink: Action frame from non-authed peer\n");
rcu_read_unlock();
return;
@@ -509,30 +518,30 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
/* Now we will figure out the appropriate event... */
event = PLINK_UNDEFINED;
- if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) {
+ if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
+ (!mesh_matches_local(&elems, sdata))) {
matches_local = false;
switch (ftype) {
- case PLINK_OPEN:
+ case WLAN_SP_MESH_PEERING_OPEN:
event = OPN_RJCT;
break;
- case PLINK_CONFIRM:
+ case WLAN_SP_MESH_PEERING_CONFIRM:
event = CNF_RJCT;
break;
- case PLINK_CLOSE:
- /* avoid warning */
+ default:
break;
}
}
if (!sta && !matches_local) {
rcu_read_unlock();
- reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
llid = 0;
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, mgmt->sa, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ mgmt->sa, llid, plid, reason);
return;
} else if (!sta) {
- /* ftype == PLINK_OPEN */
+ /* ftype == WLAN_SP_MESH_PEERING_OPEN */
u32 rates;
rcu_read_unlock();
@@ -557,21 +566,21 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
} else if (matches_local) {
spin_lock_bh(&sta->lock);
switch (ftype) {
- case PLINK_OPEN:
+ case WLAN_SP_MESH_PEERING_OPEN:
if (!mesh_plink_free_count(sdata) ||
(sta->plid && sta->plid != plid))
event = OPN_IGNR;
else
event = OPN_ACPT;
break;
- case PLINK_CONFIRM:
+ case WLAN_SP_MESH_PEERING_CONFIRM:
if (!mesh_plink_free_count(sdata) ||
(sta->llid != llid || sta->plid != plid))
event = CNF_IGNR;
else
event = CNF_ACPT;
break;
- case PLINK_CLOSE:
+ case WLAN_SP_MESH_PEERING_CLOSE:
if (sta->plink_state == NL80211_PLINK_ESTAB)
/* Do not check for llid or plid. This does not
* follow the standard but since multiple plinks
@@ -620,10 +629,12 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
sta->llid = llid;
mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
- 0, 0);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr,
- llid, plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_OPEN,
+ sta->sta.addr, llid, 0, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
default:
spin_unlock_bh(&sta->lock);
@@ -635,10 +646,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
switch (event) {
case OPN_RJCT:
case CNF_RJCT:
- reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
case CLS_ACPT:
if (!reason)
- reason = cpu_to_le16(MESH_CLOSE_RCVD);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
sta->reason = reason;
sta->plink_state = NL80211_PLINK_HOLDING;
if (!mod_plink_timer(sta,
@@ -647,8 +658,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case OPN_ACPT:
/* retry timer is left untouched */
@@ -656,8 +668,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
sta->plid = plid;
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
- plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
case CNF_ACPT:
sta->plink_state = NL80211_PLINK_CNF_RCVD;
@@ -677,10 +690,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
switch (event) {
case OPN_RJCT:
case CNF_RJCT:
- reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
case CLS_ACPT:
if (!reason)
- reason = cpu_to_le16(MESH_CLOSE_RCVD);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
sta->reason = reason;
sta->plink_state = NL80211_PLINK_HOLDING;
if (!mod_plink_timer(sta,
@@ -689,14 +702,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case OPN_ACPT:
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
- plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
case CNF_ACPT:
del_timer(&sta->plink_timer);
@@ -717,10 +731,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
switch (event) {
case OPN_RJCT:
case CNF_RJCT:
- reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CONFIG);
case CLS_ACPT:
if (!reason)
- reason = cpu_to_le16(MESH_CLOSE_RCVD);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
sta->reason = reason;
sta->plink_state = NL80211_PLINK_HOLDING;
if (!mod_plink_timer(sta,
@@ -729,8 +743,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case OPN_ACPT:
del_timer(&sta->plink_timer);
@@ -740,8 +755,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
sta->sta.addr);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
- plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
default:
spin_unlock_bh(&sta->lock);
@@ -752,7 +768,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
case NL80211_PLINK_ESTAB:
switch (event) {
case CLS_ACPT:
- reason = cpu_to_le16(MESH_CLOSE_RCVD);
+ reason = cpu_to_le16(WLAN_REASON_MESH_CLOSE);
sta->reason = reason;
deactivated = __mesh_plink_deactivate(sta);
sta->plink_state = NL80211_PLINK_HOLDING;
@@ -761,14 +777,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
spin_unlock_bh(&sta->lock);
if (deactivated)
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
- plid, reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
case OPN_ACPT:
llid = sta->llid;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
- plid, 0);
+ mesh_plink_frame_tx(sdata,
+ WLAN_SP_MESH_PEERING_CONFIRM,
+ sta->sta.addr, llid, plid, 0);
break;
default:
spin_unlock_bh(&sta->lock);
@@ -790,8 +807,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
llid = sta->llid;
reason = sta->reason;
spin_unlock_bh(&sta->lock);
- mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr,
- llid, plid, reason);
+ mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+ sta->sta.addr, llid, plid, reason);
break;
default:
spin_unlock_bh(&sta->lock);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d6470c7..72c8bea 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -16,10 +16,12 @@
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
+#include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <linux/crc32.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
@@ -160,7 +162,8 @@ static int ecw2cw(int ecw)
*/
static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
struct ieee80211_ht_info *hti,
- const u8 *bssid, u16 ap_ht_cap_flags)
+ const u8 *bssid, u16 ap_ht_cap_flags,
+ bool beacon_htcap_ie)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
@@ -232,6 +235,21 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
}
+ if (beacon_htcap_ie && (prev_chantype != channel_type)) {
+ /*
+ * Whenever the AP announces the HT mode change that can be
+ * 40MHz intolerant or etc., it would be safer to stop tx
+ * queues before doing hw config to avoid buffer overflow.
+ */
+ ieee80211_stop_queues_by_reason(&sdata->local->hw,
+ IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
+
+ /* flush out all packets */
+ synchronize_net();
+
+ drv_flush(local, false);
+ }
+
/* channel_type change automatically detected */
ieee80211_hw_config(local, 0);
@@ -243,6 +261,10 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
IEEE80211_RC_HT_CHANGED,
channel_type);
rcu_read_unlock();
+
+ if (beacon_htcap_ie)
+ ieee80211_wake_queues_by_reason(&sdata->local->hw,
+ IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
}
ht_opmode = le16_to_cpu(hti->operation_mode);
@@ -271,11 +293,9 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for "
- "deauth/disassoc frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
@@ -330,6 +350,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
{
struct sk_buff *skb;
struct ieee80211_hdr_3addr *nullfunc;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
if (!skb)
@@ -340,6 +361,10 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
+ IEEE80211_STA_CONNECTION_POLL))
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
+
ieee80211_tx_skb(sdata, skb);
}
@@ -354,11 +379,9 @@ static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
return;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for 4addr "
- "nullfunc frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30);
@@ -394,6 +417,9 @@ static void ieee80211_chswitch_work(struct work_struct *work)
/* call "hw_config" only if doing sw channel switch */
ieee80211_hw_config(sdata->local,
IEEE80211_CONF_CHANGE_CHANNEL);
+ } else {
+ /* update the device channel directly */
+ sdata->local->hw.conf.channel = sdata->local->oper_channel;
}
/* XXX: shouldn't really modify cfg80211-owned data! */
@@ -608,11 +634,14 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *mgd = &sdata->u.mgd;
struct sta_info *sta = NULL;
- u32 sta_flags = 0;
+ bool authorized = false;
if (!mgd->powersave)
return false;
+ if (mgd->broken_ap)
+ return false;
+
if (!mgd->associated)
return false;
@@ -626,13 +655,10 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
sta = sta_info_get(sdata, mgd->bssid);
if (sta)
- sta_flags = get_sta_flags(sta);
+ authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
rcu_read_unlock();
- if (!(sta_flags & WLAN_STA_AUTHORIZED))
- return false;
-
- return true;
+ return authorized;
}
/* need to hold RTNL or interface lock */
@@ -917,8 +943,8 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
params.aifs, params.cw_min, params.cw_max,
params.txop, params.uapsd);
#endif
- local->tx_conf[queue] = params;
- if (drv_conf_tx(local, queue, &params))
+ sdata->tx_conf[queue] = params;
+ if (drv_conf_tx(local, sdata, queue, &params))
wiphy_debug(local->hw.wiphy,
"failed to set TX queue parameters for queue %d\n",
queue);
@@ -1076,7 +1102,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, bssid);
if (sta) {
- set_sta_flags(sta, WLAN_STA_BLOCK_BA);
+ set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta, tx);
}
mutex_unlock(&local->sta_mtx);
@@ -1118,8 +1144,9 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
ieee80211_bss_info_change_notify(sdata, changed);
+ /* remove AP and TDLS peers */
if (remove_sta)
- sta_info_destroy_addr(sdata, bssid);
+ sta_info_flush(local, sdata);
del_timer_sync(&sdata->u.mgd.conn_mon_timer);
del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
@@ -1220,7 +1247,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
} else {
ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0,
- (u32) -1, true);
+ (u32) -1, true, false);
}
ifmgd->probe_send_count++;
@@ -1467,10 +1494,21 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
- printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not "
- "set\n", sdata->name, aid);
+ printk(KERN_DEBUG
+ "%s: invalid AID value 0x%x; bits 15:14 not set\n",
+ sdata->name, aid);
aid &= ~(BIT(15) | BIT(14));
+ ifmgd->broken_ap = false;
+
+ if (aid == 0 || aid > IEEE80211_MAX_AID) {
+ printk(KERN_DEBUG
+ "%s: invalid AID value %d (out of range), turn off PS\n",
+ sdata->name, aid);
+ aid = 0;
+ ifmgd->broken_ap = true;
+ }
+
pos = mgmt->u.assoc_resp.variable;
ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
@@ -1482,17 +1520,22 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
ifmgd->aid = aid;
- sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
- if (!sta) {
- printk(KERN_DEBUG "%s: failed to alloc STA entry for"
- " the AP\n", sdata->name);
+ mutex_lock(&sdata->local->sta_mtx);
+ /*
+ * station info was already allocated and inserted before
+ * the association and should be available to us
+ */
+ sta = sta_info_get_rx(sdata, cbss->bssid);
+ if (WARN_ON(!sta)) {
+ mutex_unlock(&sdata->local->sta_mtx);
return false;
}
- set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
- WLAN_STA_ASSOC_AP);
+ set_sta_flag(sta, WLAN_STA_AUTH);
+ set_sta_flag(sta, WLAN_STA_ASSOC);
+ set_sta_flag(sta, WLAN_STA_ASSOC_AP);
if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
- set_sta_flags(sta, WLAN_STA_AUTHORIZED);
+ set_sta_flag(sta, WLAN_STA_AUTHORIZED);
rates = 0;
basic_rates = 0;
@@ -1551,12 +1594,13 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
rate_control_rate_init(sta);
if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
- set_sta_flags(sta, WLAN_STA_MFP);
+ set_sta_flag(sta, WLAN_STA_MFP);
if (elems.wmm_param)
- set_sta_flags(sta, WLAN_STA_WME);
+ set_sta_flag(sta, WLAN_STA_WME);
- err = sta_info_insert(sta);
+ /* sta_info_reinsert will also unlock the mutex lock */
+ err = sta_info_reinsert(sta);
sta = NULL;
if (err) {
printk(KERN_DEBUG "%s: failed to insert STA entry for"
@@ -1584,7 +1628,8 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
(sdata->local->hw.queues >= 4) &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
- cbss->bssid, ap_ht_cap_flags);
+ cbss->bssid, ap_ht_cap_flags,
+ false);
/* set AID and assoc capability,
* ieee80211_set_associated() will tell the driver */
@@ -1918,7 +1963,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
- bssid, ap_ht_cap_flags);
+ bssid, ap_ht_cap_flags, true);
}
/* Note: country IE parsing is done for us by cfg80211 */
@@ -2429,6 +2474,29 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
return 0;
}
+/* create and insert a dummy station entry */
+static int ieee80211_pre_assoc(struct ieee80211_sub_if_data *sdata,
+ u8 *bssid) {
+ struct sta_info *sta;
+ int err;
+
+ sta = sta_info_alloc(sdata, bssid, GFP_KERNEL);
+ if (!sta)
+ return -ENOMEM;
+
+ sta->dummy = true;
+
+ err = sta_info_insert(sta);
+ sta = NULL;
+ if (err) {
+ printk(KERN_DEBUG "%s: failed to insert Dummy STA entry for"
+ " the AP (error %d)\n", sdata->name, err);
+ return err;
+ }
+
+ return 0;
+}
+
static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
struct sk_buff *skb)
{
@@ -2436,9 +2504,11 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
struct ieee80211_mgmt *mgmt;
struct ieee80211_rx_status *rx_status;
struct ieee802_11_elems elems;
+ struct cfg80211_bss *cbss = wk->assoc.bss;
u16 status;
if (!skb) {
+ sta_info_destroy_addr(wk->sdata, cbss->bssid);
cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta);
goto destroy;
}
@@ -2468,12 +2538,16 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
if (!ieee80211_assoc_success(wk, mgmt, skb->len)) {
mutex_unlock(&wk->sdata->u.mgd.mtx);
/* oops -- internal error -- send timeout for now */
+ sta_info_destroy_addr(wk->sdata, cbss->bssid);
cfg80211_send_assoc_timeout(wk->sdata->dev,
wk->filter_ta);
return WORK_DONE_DESTROY;
}
mutex_unlock(&wk->sdata->u.mgd.mtx);
+ } else {
+ /* assoc failed - destroy the dummy station entry */
+ sta_info_destroy_addr(wk->sdata, cbss->bssid);
}
cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len);
@@ -2492,7 +2566,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss *bss = (void *)req->bss->priv;
struct ieee80211_work *wk;
const u8 *ssid;
- int i;
+ int i, err;
mutex_lock(&ifmgd->mtx);
if (ifmgd->associated) {
@@ -2517,6 +2591,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
if (!wk)
return -ENOMEM;
+ /*
+ * create a dummy station info entry in order
+ * to start accepting incoming EAPOL packets from the station
+ */
+ err = ieee80211_pre_assoc(sdata, req->bss->bssid);
+ if (err) {
+ kfree(wk);
+ return err;
+ }
+
ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
@@ -2674,7 +2758,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
req->reason_code, cookie,
!req->local_state_change);
if (assoc_bss)
- sta_info_destroy_addr(sdata, bssid);
+ sta_info_flush(sdata->local, sdata);
mutex_lock(&sdata->local->mtx);
ieee80211_recalc_idle(sdata->local);
@@ -2714,7 +2798,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_send_deauth_disassoc(sdata, req->bss->bssid,
IEEE80211_STYPE_DISASSOC, req->reason_code,
cookie, !req->local_state_change);
- sta_info_destroy_addr(sdata, bssid);
+ sta_info_flush(sdata->local, sdata);
mutex_lock(&sdata->local->mtx);
ieee80211_recalc_idle(sdata->local);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 13427b1..3d41441 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -12,6 +12,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "driver-trace.h"
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 6326d34..9ee7164 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -42,7 +42,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
- set_sta_flags(sta, WLAN_STA_BLOCK_BA);
+ set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta, true);
}
mutex_unlock(&local->sta_mtx);
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 3d5a2cb..5a5a776 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "rate.h"
#include "ieee80211_i.h"
#include "debugfs.h"
@@ -199,7 +200,7 @@ static void rate_control_release(struct kref *kref)
kfree(ctrl_ref);
}
-static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
+static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc)
{
struct sk_buff *skb = txrc->skb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -208,7 +209,9 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
fc = hdr->frame_control;
- return (info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc);
+ return (info->flags & (IEEE80211_TX_CTL_NO_ACK |
+ IEEE80211_TX_CTL_USE_MINRATE)) ||
+ !ieee80211_is_data(fc);
}
static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
@@ -233,6 +236,27 @@ static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
/* could not find a basic rate; use original selection */
}
+static inline s8
+rate_lowest_non_cck_index(struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ struct ieee80211_rate *srate = &sband->bitrates[i];
+ if ((srate->bitrate == 10) || (srate->bitrate == 20) ||
+ (srate->bitrate == 55) || (srate->bitrate == 110))
+ continue;
+
+ if (rate_supported(sta, sband->band, i))
+ return i;
+ }
+
+ /* No matching rate found */
+ return 0;
+}
+
+
bool rate_control_send_low(struct ieee80211_sta *sta,
void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
@@ -241,8 +265,14 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband = txrc->sband;
int mcast_rate;
- if (!sta || !priv_sta || rc_no_data_or_no_ack(txrc)) {
- info->control.rates[0].idx = rate_lowest_index(txrc->sband, sta);
+ if (!sta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
+ if ((sband->band != IEEE80211_BAND_2GHZ) ||
+ !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
+ info->control.rates[0].idx =
+ rate_lowest_index(txrc->sband, sta);
+ else
+ info->control.rates[0].idx =
+ rate_lowest_non_cck_index(txrc->sband, sta);
info->control.rates[0].count =
(info->flags & IEEE80211_TX_CTL_NO_ACK) ?
1 : txrc->hw->max_rate_tries;
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index a290ad23..d5a5622 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -50,6 +50,7 @@
#include <linux/debugfs.h>
#include <linux/ieee80211.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "rc80211_minstrel.h"
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 66a1eeb..cdb2853 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -281,6 +281,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
mr = minstrel_get_ratestats(mi, mg->max_tp_rate);
if (cur_tp < mr->cur_tp) {
+ mi->max_tp_rate2 = mi->max_tp_rate;
+ cur_tp2 = cur_tp;
mi->max_tp_rate = mg->max_tp_rate;
cur_tp = mr->cur_tp;
}
@@ -452,7 +454,8 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
minstrel_ht_update_stats(mp, mi);
- minstrel_aggr_check(mp, sta, skb);
+ if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
+ minstrel_aggr_check(mp, sta, skb);
}
}
@@ -608,7 +611,13 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
info->flags |= mi->tx_flags;
- sample_idx = minstrel_get_sample_rate(mp, mi);
+
+ /* Don't use EAPOL frames for sampling on non-mrr hw */
+ if (mp->hw->max_rates == 1 &&
+ txrc->skb->protocol == cpu_to_be16(ETH_P_PAE))
+ sample_idx = -1;
+ else
+ sample_idx = minstrel_get_sample_rate(mp, mi);
#ifdef CONFIG_MAC80211_DEBUGFS
/* use fixed index if set */
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index cefcb5d..e788f76 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -10,6 +10,7 @@
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/ieee80211.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "rc80211_minstrel.h"
#include "rc80211_minstrel_ht.h"
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index 4851e9e..c97a065 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "rate.h"
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index fe2c2a7..bb53726 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rcupdate.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include <net/ieee80211_radiotap.h>
@@ -476,7 +477,6 @@ static ieee80211_rx_result
ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
- unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
char *dev_addr = rx->sdata->vif.addr;
if (ieee80211_is_data(hdr->frame_control)) {
@@ -524,14 +524,6 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
}
-#define msh_h_get(h, l) ((struct ieee80211s_hdr *) ((u8 *)h + l))
-
- if (ieee80211_is_data(hdr->frame_control) &&
- is_multicast_ether_addr(hdr->addr1) &&
- mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
- return RX_DROP_MONITOR;
-#undef msh_h_get
-
return RX_CONTINUE;
}
@@ -850,8 +842,21 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
ieee80211_is_pspoll(hdr->frame_control)) &&
rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
- (!rx->sta || !test_sta_flags(rx->sta, WLAN_STA_ASSOC))))
+ (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
+ if (rx->sta && rx->sta->dummy &&
+ ieee80211_is_data_present(hdr->frame_control)) {
+ u16 ethertype;
+ u8 *payload;
+
+ payload = rx->skb->data +
+ ieee80211_hdrlen(hdr->frame_control);
+ ethertype = (payload[6] << 8) | payload[7];
+ if (cpu_to_be16(ethertype) ==
+ rx->sdata->control_port_protocol)
+ return RX_CONTINUE;
+ }
return RX_DROP_MONITOR;
+ }
return RX_CONTINUE;
}
@@ -1106,7 +1111,7 @@ static void ap_sta_ps_start(struct sta_info *sta)
struct ieee80211_local *local = sdata->local;
atomic_inc(&sdata->bss->num_sta_ps);
- set_sta_flags(sta, WLAN_STA_PS_STA);
+ set_sta_flag(sta, WLAN_STA_PS_STA);
if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
@@ -1126,7 +1131,7 @@ static void ap_sta_ps_end(struct sta_info *sta)
sdata->name, sta->sta.addr, sta->sta.aid);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
- if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
+ if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
sdata->name, sta->sta.addr, sta->sta.aid);
@@ -1145,7 +1150,7 @@ int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
/* Don't let the same PS state be set twice */
- in_ps = test_sta_flags(sta_inf, WLAN_STA_PS_STA);
+ in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
if ((start && in_ps) || (!start && !in_ps))
return -EINVAL;
@@ -1159,6 +1164,81 @@ int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
EXPORT_SYMBOL(ieee80211_sta_ps_transition);
static ieee80211_rx_result debug_noinline
+ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
+{
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct ieee80211_hdr *hdr = (void *)rx->skb->data;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+ int tid, ac;
+
+ if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
+ return RX_CONTINUE;
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP &&
+ sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
+ return RX_CONTINUE;
+
+ /*
+ * The device handles station powersave, so don't do anything about
+ * uAPSD and PS-Poll frames (the latter shouldn't even come up from
+ * it to mac80211 since they're handled.)
+ */
+ if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
+ return RX_CONTINUE;
+
+ /*
+ * Don't do anything if the station isn't already asleep. In
+ * the uAPSD case, the station will probably be marked asleep,
+ * in the PS-Poll case the station must be confused ...
+ */
+ if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
+ return RX_CONTINUE;
+
+ if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
+ if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
+ if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
+ ieee80211_sta_ps_deliver_poll_response(rx->sta);
+ else
+ set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
+ }
+
+ /* Free PS Poll skb here instead of returning RX_DROP that would
+ * count as an dropped frame. */
+ dev_kfree_skb(rx->skb);
+
+ return RX_QUEUED;
+ } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
+ ieee80211_has_pm(hdr->frame_control) &&
+ (ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control))) {
+ tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ ac = ieee802_1d_to_ac[tid & 7];
+
+ /*
+ * If this AC is not trigger-enabled do nothing.
+ *
+ * NB: This could/should check a separate bitmap of trigger-
+ * enabled queues, but for now we only implement uAPSD w/o
+ * TSPEC changes to the ACs, so they're always the same.
+ */
+ if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
+ return RX_CONTINUE;
+
+ /* if we are in a service period, do nothing */
+ if (test_sta_flag(rx->sta, WLAN_STA_SP))
+ return RX_CONTINUE;
+
+ if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
+ ieee80211_sta_ps_deliver_uapsd(rx->sta);
+ else
+ set_sta_flag(rx->sta, WLAN_STA_UAPSD);
+ }
+
+ return RX_CONTINUE;
+}
+
+static ieee80211_rx_result debug_noinline
ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
{
struct sta_info *sta = rx->sta;
@@ -1216,7 +1296,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
- if (test_sta_flags(sta, WLAN_STA_PS_STA)) {
+ if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
/*
* Ignore doze->wake transitions that are
* indicated by non-data frames, the standard
@@ -1469,33 +1549,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
}
static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
-{
- struct ieee80211_sub_if_data *sdata = rx->sdata;
- __le16 fc = ((struct ieee80211_hdr *)rx->skb->data)->frame_control;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
-
- if (likely(!rx->sta || !ieee80211_is_pspoll(fc) ||
- !(status->rx_flags & IEEE80211_RX_RA_MATCH)))
- return RX_CONTINUE;
-
- if ((sdata->vif.type != NL80211_IFTYPE_AP) &&
- (sdata->vif.type != NL80211_IFTYPE_AP_VLAN))
- return RX_DROP_UNUSABLE;
-
- if (!test_sta_flags(rx->sta, WLAN_STA_PS_DRIVER))
- ieee80211_sta_ps_deliver_poll_response(rx->sta);
- else
- set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
-
- /* Free PS Poll skb here instead of returning RX_DROP that would
- * count as an dropped frame. */
- dev_kfree_skb(rx->skb);
-
- return RX_QUEUED;
-}
-
-static ieee80211_rx_result debug_noinline
ieee80211_rx_h_remove_qos_control(struct ieee80211_rx_data *rx)
{
u8 *data = rx->skb->data;
@@ -1518,7 +1571,7 @@ static int
ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
{
if (unlikely(!rx->sta ||
- !test_sta_flags(rx->sta, WLAN_STA_AUTHORIZED)))
+ !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
return -EACCES;
return 0;
@@ -1561,7 +1614,7 @@ ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
if (status->flag & RX_FLAG_DECRYPTED)
return 0;
- if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
+ if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
if (unlikely(!ieee80211_has_protected(fc) &&
ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
rx->key)) {
@@ -1827,6 +1880,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
hdrlen = ieee80211_hdrlen(hdr->frame_control);
mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+ /* frame is in RMC, don't forward */
+ if (ieee80211_is_data(hdr->frame_control) &&
+ is_multicast_ether_addr(hdr->addr1) &&
+ mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata))
+ return RX_DROP_MONITOR;
+
if (!ieee80211_is_data(hdr->frame_control))
return RX_CONTINUE;
@@ -1834,6 +1893,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
/* illegal frame */
return RX_DROP_MONITOR;
+ if (ieee80211_queue_stopped(&local->hw, skb_get_queue_mapping(skb))) {
+ IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
+ dropped_frames_congestion);
+ return RX_DROP_MONITOR;
+ }
+
if (mesh_hdr->flags & MESH_FLAGS_AE) {
struct mesh_path *mppath;
char *proxied_addr;
@@ -1889,13 +1954,13 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
memset(info, 0, sizeof(*info));
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
info->control.vif = &rx->sdata->vif;
- skb_set_queue_mapping(skb,
- ieee80211_select_queue(rx->sdata, fwd_skb));
- ieee80211_set_qos_hdr(local, skb);
- if (is_multicast_ether_addr(fwd_hdr->addr1))
+ if (is_multicast_ether_addr(fwd_hdr->addr1)) {
IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
fwded_mcast);
- else {
+ skb_set_queue_mapping(fwd_skb,
+ ieee80211_select_queue(sdata, fwd_skb));
+ ieee80211_set_qos_hdr(sdata, fwd_skb);
+ } else {
int err;
/*
* Save TA to addr1 to send TA a path error if a
@@ -2220,12 +2285,29 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
goto handled;
}
break;
+ case WLAN_CATEGORY_SELF_PROTECTED:
+ switch (mgmt->u.action.u.self_prot.action_code) {
+ case WLAN_SP_MESH_PEERING_OPEN:
+ case WLAN_SP_MESH_PEERING_CLOSE:
+ case WLAN_SP_MESH_PEERING_CONFIRM:
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ goto invalid;
+ if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
+ /* userspace handles this frame */
+ break;
+ goto queue;
+ case WLAN_SP_MGK_INFORM:
+ case WLAN_SP_MGK_ACK:
+ if (!ieee80211_vif_is_mesh(&sdata->vif))
+ goto invalid;
+ break;
+ }
+ break;
case WLAN_CATEGORY_MESH_ACTION:
if (!ieee80211_vif_is_mesh(&sdata->vif))
break;
- goto queue;
- case WLAN_CATEGORY_MESH_PATH_SEL:
- if (!mesh_path_sel_is_hwmp(sdata))
+ if (mesh_action_is_path_sel(mgmt) &&
+ (!mesh_path_sel_is_hwmp(sdata)))
break;
goto queue;
}
@@ -2534,17 +2616,17 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx)
CALL_RXH(ieee80211_rx_h_decrypt)
CALL_RXH(ieee80211_rx_h_check_more_data)
+ CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
CALL_RXH(ieee80211_rx_h_sta_process)
CALL_RXH(ieee80211_rx_h_defragment)
- CALL_RXH(ieee80211_rx_h_ps_poll)
CALL_RXH(ieee80211_rx_h_michael_mic_verify)
/* must be after MMIC verify so header is counted in MPDU mic */
- CALL_RXH(ieee80211_rx_h_remove_qos_control)
- CALL_RXH(ieee80211_rx_h_amsdu)
#ifdef CONFIG_MAC80211_MESH
if (ieee80211_vif_is_mesh(&rx->sdata->vif))
CALL_RXH(ieee80211_rx_h_mesh_fwding);
#endif
+ CALL_RXH(ieee80211_rx_h_remove_qos_control)
+ CALL_RXH(ieee80211_rx_h_amsdu)
CALL_RXH(ieee80211_rx_h_data)
CALL_RXH(ieee80211_rx_h_ctrl);
CALL_RXH(ieee80211_rx_h_mgmt_check)
@@ -2686,7 +2768,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
} else if (!ieee80211_bssid_match(bssid,
sdata->vif.addr)) {
if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
- !ieee80211_is_beacon(hdr->frame_control))
+ !ieee80211_is_beacon(hdr->frame_control) &&
+ !(ieee80211_is_action(hdr->frame_control) &&
+ sdata->vif.p2p))
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
}
@@ -2791,7 +2875,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
if (ieee80211_is_data(fc)) {
prev_sta = NULL;
- for_each_sta_info(local, hdr->addr2, sta, tmp) {
+ for_each_sta_info_rx(local, hdr->addr2, sta, tmp) {
if (!prev_sta) {
prev_sta = sta;
continue;
@@ -2835,7 +2919,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
continue;
}
- rx.sta = sta_info_get_bss(prev, hdr->addr2);
+ rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
rx.sdata = prev;
ieee80211_prepare_and_rx_handle(&rx, skb, false);
@@ -2843,7 +2927,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
}
if (prev) {
- rx.sta = sta_info_get_bss(prev, hdr->addr2);
+ rx.sta = sta_info_get_bss_rx(prev, hdr->addr2);
rx.sdata = prev;
if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 6f09eca..105436d 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -14,9 +14,10 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
-#include <linux/pm_qos_params.h>
+#include <linux/pm_qos.h>
#include <net/sch_generic.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
@@ -254,6 +255,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
req->ie, req->ie_len, band,
req->rates[band], 0);
local->hw_scan_req->ie_len = ielen;
+ local->hw_scan_req->no_cck = req->no_cck;
return true;
}
@@ -660,7 +662,8 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
local->scan_req->ssids[i].ssid,
local->scan_req->ssids[i].ssid_len,
local->scan_req->ie, local->scan_req->ie_len,
- local->scan_req->rates[band], false);
+ local->scan_req->rates[band], false,
+ local->scan_req->no_cck);
/*
* After sending probe requests, wait for probe responses
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 7733f66..578eea3 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -32,12 +32,8 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
skb = dev_alloc_skb(sizeof(*msr_report) + local->hw.extra_tx_headroom +
sizeof(struct ieee80211_msrment_ie));
-
- if (!skb) {
- printk(KERN_ERR "%s: failed to allocate buffer for "
- "measurement report frame\n", sdata->name);
+ if (!skb)
return;
- }
skb_reserve(skb, local->hw.extra_tx_headroom);
msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 21070e9..ce962d2 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -24,6 +24,7 @@
#include "sta_info.h"
#include "debugfs_sta.h"
#include "mesh.h"
+#include "wme.h"
/**
* DOC: STA information lifetime rules
@@ -72,7 +73,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
if (!s)
return -ENOENT;
if (s == sta) {
- rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)],
+ RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)],
s->hnext);
return 0;
}
@@ -82,7 +83,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
s = rcu_dereference_protected(s->hnext,
lockdep_is_held(&local->sta_lock));
if (rcu_access_pointer(s->hnext)) {
- rcu_assign_pointer(s->hnext, sta->hnext);
+ RCU_INIT_POINTER(s->hnext, sta->hnext);
return 0;
}
@@ -100,6 +101,27 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
lockdep_is_held(&local->sta_lock) ||
lockdep_is_held(&local->sta_mtx));
while (sta) {
+ if (sta->sdata == sdata && !sta->dummy &&
+ memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
+ break;
+ sta = rcu_dereference_check(sta->hnext,
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
+ }
+ return sta;
+}
+
+/* get a station info entry even if it is a dummy station*/
+struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
+ while (sta) {
if (sta->sdata == sdata &&
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
@@ -126,6 +148,32 @@ struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
while (sta) {
if ((sta->sdata == sdata ||
(sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
+ !sta->dummy &&
+ memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
+ break;
+ sta = rcu_dereference_check(sta->hnext,
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
+ }
+ return sta;
+}
+
+/*
+ * Get sta info either from the specified interface
+ * or from one of its vlans (including dummy stations)
+ */
+struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)],
+ lockdep_is_held(&local->sta_lock) ||
+ lockdep_is_held(&local->sta_mtx));
+ while (sta) {
+ if ((sta->sdata == sdata ||
+ (sta->sdata->bss && sta->sdata->bss == sdata->bss)) &&
memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
sta = rcu_dereference_check(sta->hnext,
@@ -184,7 +232,7 @@ static void sta_info_hash_add(struct ieee80211_local *local,
struct sta_info *sta)
{
sta->hnext = local->sta_hash[STA_HASH(sta->sta.addr)];
- rcu_assign_pointer(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
+ RCU_INIT_POINTER(local->sta_hash[STA_HASH(sta->sta.addr)], sta);
}
static void sta_unblock(struct work_struct *wk)
@@ -196,13 +244,22 @@ static void sta_unblock(struct work_struct *wk)
if (sta->dead)
return;
- if (!test_sta_flags(sta, WLAN_STA_PS_STA))
+ if (!test_sta_flag(sta, WLAN_STA_PS_STA))
ieee80211_sta_ps_deliver_wakeup(sta);
- else if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL)) {
- clear_sta_flags(sta, WLAN_STA_PS_DRIVER);
+ else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) {
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+
+ local_bh_disable();
ieee80211_sta_ps_deliver_poll_response(sta);
+ local_bh_enable();
+ } else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD)) {
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+
+ local_bh_disable();
+ ieee80211_sta_ps_deliver_uapsd(sta);
+ local_bh_enable();
} else
- clear_sta_flags(sta, WLAN_STA_PS_DRIVER);
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
}
static int sta_prepare_rate_control(struct ieee80211_local *local,
@@ -235,7 +292,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
return NULL;
spin_lock_init(&sta->lock);
- spin_lock_init(&sta->flaglock);
INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
mutex_init(&sta->ampdu_mlme.mtx);
@@ -262,8 +318,10 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
*/
sta->timer_to_tid[i] = i;
}
- skb_queue_head_init(&sta->ps_tx_buf);
- skb_queue_head_init(&sta->tx_filtered);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ skb_queue_head_init(&sta->ps_tx_buf[i]);
+ skb_queue_head_init(&sta->tx_filtered[i]);
+ }
for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
@@ -280,7 +338,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
return sta;
}
-static int sta_info_finish_insert(struct sta_info *sta, bool async)
+static int sta_info_finish_insert(struct sta_info *sta,
+ bool async, bool dummy_reinsert)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -290,50 +349,58 @@ static int sta_info_finish_insert(struct sta_info *sta, bool async)
lockdep_assert_held(&local->sta_mtx);
- /* notify driver */
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- sdata = container_of(sdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
- err = drv_sta_add(local, sdata, &sta->sta);
- if (err) {
- if (!async)
- return err;
- printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to driver (%d)"
- " - keeping it anyway.\n",
- sdata->name, sta->sta.addr, err);
- } else {
- sta->uploaded = true;
+ if (!sta->dummy || dummy_reinsert) {
+ /* notify driver */
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data,
+ u.ap);
+ err = drv_sta_add(local, sdata, &sta->sta);
+ if (err) {
+ if (!async)
+ return err;
+ printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to "
+ "driver (%d) - keeping it anyway.\n",
+ sdata->name, sta->sta.addr, err);
+ } else {
+ sta->uploaded = true;
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (async)
- wiphy_debug(local->hw.wiphy,
- "Finished adding IBSS STA %pM\n",
- sta->sta.addr);
+ if (async)
+ wiphy_debug(local->hw.wiphy,
+ "Finished adding IBSS STA %pM\n",
+ sta->sta.addr);
#endif
+ }
+
+ sdata = sta->sdata;
}
- sdata = sta->sdata;
+ if (!dummy_reinsert) {
+ if (!async) {
+ local->num_sta++;
+ local->sta_generation++;
+ smp_mb();
- if (!async) {
- local->num_sta++;
- local->sta_generation++;
- smp_mb();
+ /* make the station visible */
+ spin_lock_irqsave(&local->sta_lock, flags);
+ sta_info_hash_add(local, sta);
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+ }
- /* make the station visible */
- spin_lock_irqsave(&local->sta_lock, flags);
- sta_info_hash_add(local, sta);
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ list_add(&sta->list, &local->sta_list);
+ } else {
+ sta->dummy = false;
}
- list_add(&sta->list, &local->sta_list);
-
- ieee80211_sta_debugfs_add(sta);
- rate_control_add_sta_debugfs(sta);
-
- sinfo.filled = 0;
- sinfo.generation = local->sta_generation;
- cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+ if (!sta->dummy) {
+ ieee80211_sta_debugfs_add(sta);
+ rate_control_add_sta_debugfs(sta);
+ memset(&sinfo, 0, sizeof(sinfo));
+ sinfo.filled = 0;
+ sinfo.generation = local->sta_generation;
+ cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+ }
return 0;
}
@@ -350,7 +417,7 @@ static void sta_info_finish_pending(struct ieee80211_local *local)
list_del(&sta->list);
spin_unlock_irqrestore(&local->sta_lock, flags);
- sta_info_finish_insert(sta, true);
+ sta_info_finish_insert(sta, true, false);
spin_lock_irqsave(&local->sta_lock, flags);
}
@@ -367,106 +434,117 @@ static void sta_info_finish_work(struct work_struct *work)
mutex_unlock(&local->sta_mtx);
}
-int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
+static int sta_info_insert_check(struct sta_info *sta)
{
- struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- unsigned long flags;
- int err = 0;
/*
* Can't be a WARN_ON because it can be triggered through a race:
* something inserts a STA (on one CPU) without holding the RTNL
* and another CPU turns off the net device.
*/
- if (unlikely(!ieee80211_sdata_running(sdata))) {
- err = -ENETDOWN;
- rcu_read_lock();
- goto out_free;
- }
+ if (unlikely(!ieee80211_sdata_running(sdata)))
+ return -ENETDOWN;
if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
- is_multicast_ether_addr(sta->sta.addr))) {
- err = -EINVAL;
+ is_multicast_ether_addr(sta->sta.addr)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sta_info_insert_ibss(struct sta_info *sta) __acquires(RCU)
+{
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local->sta_lock, flags);
+ /* check if STA exists already */
+ if (sta_info_get_bss_rx(sdata, sta->sta.addr)) {
+ spin_unlock_irqrestore(&local->sta_lock, flags);
rcu_read_lock();
- goto out_free;
+ return -EEXIST;
}
- /*
- * In ad-hoc mode, we sometimes need to insert stations
- * from tasklet context from the RX path. To avoid races,
- * always do so in that case -- see the comment below.
- */
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
- spin_lock_irqsave(&local->sta_lock, flags);
- /* check if STA exists already */
- if (sta_info_get_bss(sdata, sta->sta.addr)) {
- spin_unlock_irqrestore(&local->sta_lock, flags);
- rcu_read_lock();
- err = -EEXIST;
- goto out_free;
- }
-
- local->num_sta++;
- local->sta_generation++;
- smp_mb();
- sta_info_hash_add(local, sta);
+ local->num_sta++;
+ local->sta_generation++;
+ smp_mb();
+ sta_info_hash_add(local, sta);
- list_add_tail(&sta->list, &local->sta_pending_list);
+ list_add_tail(&sta->list, &local->sta_pending_list);
- rcu_read_lock();
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ rcu_read_lock();
+ spin_unlock_irqrestore(&local->sta_lock, flags);
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n",
- sta->sta.addr);
+ wiphy_debug(local->hw.wiphy, "Added IBSS STA %pM\n",
+ sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
- ieee80211_queue_work(&local->hw, &local->sta_finish_work);
+ ieee80211_queue_work(&local->hw, &local->sta_finish_work);
- return 0;
- }
+ return 0;
+}
+
+/*
+ * should be called with sta_mtx locked
+ * this function replaces the mutex lock
+ * with a RCU lock
+ */
+static int sta_info_insert_non_ibss(struct sta_info *sta) __acquires(RCU)
+{
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ unsigned long flags;
+ struct sta_info *exist_sta;
+ bool dummy_reinsert = false;
+ int err = 0;
+
+ lockdep_assert_held(&local->sta_mtx);
/*
* On first glance, this will look racy, because the code
- * below this point, which inserts a station with sleeping,
+ * in this function, which inserts a station with sleeping,
* unlocks the sta_lock between checking existence in the
* hash table and inserting into it.
*
* However, it is not racy against itself because it keeps
- * the mutex locked. It still seems to race against the
- * above code that atomically inserts the station... That,
- * however, is not true because the above code can only
- * be invoked for IBSS interfaces, and the below code will
- * not be -- and the two do not race against each other as
- * the hash table also keys off the interface.
+ * the mutex locked.
*/
- might_sleep();
-
- mutex_lock(&local->sta_mtx);
-
spin_lock_irqsave(&local->sta_lock, flags);
- /* check if STA exists already */
- if (sta_info_get_bss(sdata, sta->sta.addr)) {
- spin_unlock_irqrestore(&local->sta_lock, flags);
- mutex_unlock(&local->sta_mtx);
- rcu_read_lock();
- err = -EEXIST;
- goto out_free;
+ /*
+ * check if STA exists already.
+ * only accept a scenario of a second call to sta_info_insert_non_ibss
+ * with a dummy station entry that was inserted earlier
+ * in that case - assume that the dummy station flag should
+ * be removed.
+ */
+ exist_sta = sta_info_get_bss_rx(sdata, sta->sta.addr);
+ if (exist_sta) {
+ if (exist_sta == sta && sta->dummy) {
+ dummy_reinsert = true;
+ } else {
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+ mutex_unlock(&local->sta_mtx);
+ rcu_read_lock();
+ return -EEXIST;
+ }
}
spin_unlock_irqrestore(&local->sta_lock, flags);
- err = sta_info_finish_insert(sta, false);
+ err = sta_info_finish_insert(sta, false, dummy_reinsert);
if (err) {
mutex_unlock(&local->sta_mtx);
rcu_read_lock();
- goto out_free;
+ return err;
}
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr);
+ wiphy_debug(local->hw.wiphy, "Inserted %sSTA %pM\n",
+ sta->dummy ? "dummy " : "", sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
/* move reference to rcu-protected */
@@ -477,6 +555,51 @@ int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
mesh_accept_plinks_update(sdata);
return 0;
+}
+
+int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
+{
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ int err = 0;
+
+ err = sta_info_insert_check(sta);
+ if (err) {
+ rcu_read_lock();
+ goto out_free;
+ }
+
+ /*
+ * In ad-hoc mode, we sometimes need to insert stations
+ * from tasklet context from the RX path. To avoid races,
+ * always do so in that case -- see the comment below.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ err = sta_info_insert_ibss(sta);
+ if (err)
+ goto out_free;
+
+ return 0;
+ }
+
+ /*
+ * It might seem that the function called below is in race against
+ * the function call above that atomically inserts the station... That,
+ * however, is not true because the above code can only
+ * be invoked for IBSS interfaces, and the below code will
+ * not be -- and the two do not race against each other as
+ * the hash table also keys off the interface.
+ */
+
+ might_sleep();
+
+ mutex_lock(&local->sta_mtx);
+
+ err = sta_info_insert_non_ibss(sta);
+ if (err)
+ goto out_free;
+
+ return 0;
out_free:
BUG_ON(!err);
__sta_info_free(local, sta);
@@ -492,6 +615,25 @@ int sta_info_insert(struct sta_info *sta)
return err;
}
+/* Caller must hold sta->local->sta_mtx */
+int sta_info_reinsert(struct sta_info *sta)
+{
+ struct ieee80211_local *local = sta->local;
+ int err = 0;
+
+ err = sta_info_insert_check(sta);
+ if (err) {
+ mutex_unlock(&local->sta_mtx);
+ return err;
+ }
+
+ might_sleep();
+
+ err = sta_info_insert_non_ibss(sta);
+ rcu_read_unlock();
+ return err;
+}
+
static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
{
/*
@@ -510,64 +652,93 @@ static inline void __bss_tim_clear(struct ieee80211_if_ap *bss, u16 aid)
bss->tim[aid / 8] &= ~(1 << (aid % 8));
}
-static void __sta_info_set_tim_bit(struct ieee80211_if_ap *bss,
- struct sta_info *sta)
+static unsigned long ieee80211_tids_for_ac(int ac)
{
- BUG_ON(!bss);
-
- __bss_tim_set(bss, sta->sta.aid);
-
- if (sta->local->ops->set_tim) {
- sta->local->tim_in_locked_section = true;
- drv_set_tim(sta->local, &sta->sta, true);
- sta->local->tim_in_locked_section = false;
+ /* If we ever support TIDs > 7, this obviously needs to be adjusted */
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ return BIT(6) | BIT(7);
+ case IEEE80211_AC_VI:
+ return BIT(4) | BIT(5);
+ case IEEE80211_AC_BE:
+ return BIT(0) | BIT(3);
+ case IEEE80211_AC_BK:
+ return BIT(1) | BIT(2);
+ default:
+ WARN_ON(1);
+ return 0;
}
}
-void sta_info_set_tim_bit(struct sta_info *sta)
+void sta_info_recalc_tim(struct sta_info *sta)
{
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_if_ap *bss = sta->sdata->bss;
unsigned long flags;
+ bool indicate_tim = false;
+ u8 ignore_for_tim = sta->sta.uapsd_queues;
+ int ac;
- BUG_ON(!sta->sdata->bss);
+ if (WARN_ON_ONCE(!sta->sdata->bss))
+ return;
- spin_lock_irqsave(&sta->local->sta_lock, flags);
- __sta_info_set_tim_bit(sta->sdata->bss, sta);
- spin_unlock_irqrestore(&sta->local->sta_lock, flags);
-}
+ /* No need to do anything if the driver does all */
+ if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
+ return;
-static void __sta_info_clear_tim_bit(struct ieee80211_if_ap *bss,
- struct sta_info *sta)
-{
- BUG_ON(!bss);
+ if (sta->dead)
+ goto done;
+
+ /*
+ * If all ACs are delivery-enabled then we should build
+ * the TIM bit for all ACs anyway; if only some are then
+ * we ignore those and build the TIM bit using only the
+ * non-enabled ones.
+ */
+ if (ignore_for_tim == BIT(IEEE80211_NUM_ACS) - 1)
+ ignore_for_tim = 0;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ unsigned long tids;
- __bss_tim_clear(bss, sta->sta.aid);
+ if (ignore_for_tim & BIT(ac))
+ continue;
+
+ indicate_tim |= !skb_queue_empty(&sta->tx_filtered[ac]) ||
+ !skb_queue_empty(&sta->ps_tx_buf[ac]);
+ if (indicate_tim)
+ break;
- if (sta->local->ops->set_tim) {
- sta->local->tim_in_locked_section = true;
- drv_set_tim(sta->local, &sta->sta, false);
- sta->local->tim_in_locked_section = false;
+ tids = ieee80211_tids_for_ac(ac);
+
+ indicate_tim |=
+ sta->driver_buffered_tids & tids;
}
-}
-void sta_info_clear_tim_bit(struct sta_info *sta)
-{
- unsigned long flags;
+ done:
+ spin_lock_irqsave(&local->sta_lock, flags);
- BUG_ON(!sta->sdata->bss);
+ if (indicate_tim)
+ __bss_tim_set(bss, sta->sta.aid);
+ else
+ __bss_tim_clear(bss, sta->sta.aid);
- spin_lock_irqsave(&sta->local->sta_lock, flags);
- __sta_info_clear_tim_bit(sta->sdata->bss, sta);
- spin_unlock_irqrestore(&sta->local->sta_lock, flags);
+ if (local->ops->set_tim) {
+ local->tim_in_locked_section = true;
+ drv_set_tim(local, &sta->sta, indicate_tim);
+ local->tim_in_locked_section = false;
+ }
+
+ spin_unlock_irqrestore(&local->sta_lock, flags);
}
-static int sta_info_buffer_expired(struct sta_info *sta,
- struct sk_buff *skb)
+static bool sta_info_buffer_expired(struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_tx_info *info;
int timeout;
if (!skb)
- return 0;
+ return false;
info = IEEE80211_SKB_CB(skb);
@@ -581,24 +752,59 @@ static int sta_info_buffer_expired(struct sta_info *sta,
}
-static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
- struct sta_info *sta)
+static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
+ struct sta_info *sta, int ac)
{
unsigned long flags;
struct sk_buff *skb;
- if (skb_queue_empty(&sta->ps_tx_buf))
- return false;
+ /*
+ * First check for frames that should expire on the filtered
+ * queue. Frames here were rejected by the driver and are on
+ * a separate queue to avoid reordering with normal PS-buffered
+ * frames. They also aren't accounted for right now in the
+ * total_ps_buffered counter.
+ */
+ for (;;) {
+ spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags);
+ skb = skb_peek(&sta->tx_filtered[ac]);
+ if (sta_info_buffer_expired(sta, skb))
+ skb = __skb_dequeue(&sta->tx_filtered[ac]);
+ else
+ skb = NULL;
+ spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags);
+ /*
+ * Frames are queued in order, so if this one
+ * hasn't expired yet we can stop testing. If
+ * we actually reached the end of the queue we
+ * also need to stop, of course.
+ */
+ if (!skb)
+ break;
+ dev_kfree_skb(skb);
+ }
+
+ /*
+ * Now also check the normal PS-buffered queue, this will
+ * only find something if the filtered queue was emptied
+ * since the filtered frames are all before the normal PS
+ * buffered frames.
+ */
for (;;) {
- spin_lock_irqsave(&sta->ps_tx_buf.lock, flags);
- skb = skb_peek(&sta->ps_tx_buf);
+ spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags);
+ skb = skb_peek(&sta->ps_tx_buf[ac]);
if (sta_info_buffer_expired(sta, skb))
- skb = __skb_dequeue(&sta->ps_tx_buf);
+ skb = __skb_dequeue(&sta->ps_tx_buf[ac]);
else
skb = NULL;
- spin_unlock_irqrestore(&sta->ps_tx_buf.lock, flags);
+ spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags);
+ /*
+ * frames are queued in order, so if this one
+ * hasn't expired yet (or we reached the end of
+ * the queue) we can stop testing
+ */
if (!skb)
break;
@@ -608,22 +814,47 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
sta->sta.addr);
#endif
dev_kfree_skb(skb);
-
- if (skb_queue_empty(&sta->ps_tx_buf) &&
- !test_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF))
- sta_info_clear_tim_bit(sta);
}
- return true;
+ /*
+ * Finally, recalculate the TIM bit for this station -- it might
+ * now be clear because the station was too slow to retrieve its
+ * frames.
+ */
+ sta_info_recalc_tim(sta);
+
+ /*
+ * Return whether there are any frames still buffered, this is
+ * used to check whether the cleanup timer still needs to run,
+ * if there are no frames we don't need to rearm the timer.
+ */
+ return !(skb_queue_empty(&sta->ps_tx_buf[ac]) &&
+ skb_queue_empty(&sta->tx_filtered[ac]));
+}
+
+static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
+ struct sta_info *sta)
+{
+ bool have_buffered = false;
+ int ac;
+
+ /* This is only necessary for stations on BSS interfaces */
+ if (!sta->sdata->bss)
+ return false;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ have_buffered |=
+ sta_info_cleanup_expire_buffered_ac(local, sta, ac);
+
+ return have_buffered;
}
static int __must_check __sta_info_destroy(struct sta_info *sta)
{
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
- struct sk_buff *skb;
unsigned long flags;
- int ret, i;
+ int ret, i, ac;
might_sleep();
@@ -639,7 +870,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
* sessions -- block that to make sure the tear-down
* will be sufficient.
*/
- set_sta_flags(sta, WLAN_STA_BLOCK_BA);
+ set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta, true);
spin_lock_irqsave(&local->sta_lock, flags);
@@ -660,19 +891,22 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
sta->dead = true;
- if (test_and_clear_sta_flags(sta,
- WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
+ if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
+ test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
BUG_ON(!sdata->bss);
+ clear_sta_flag(sta, WLAN_STA_PS_STA);
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+
atomic_dec(&sdata->bss->num_sta_ps);
- sta_info_clear_tim_bit(sta);
+ sta_info_recalc_tim(sta);
}
local->num_sta--;
local->sta_generation++;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- rcu_assign_pointer(sdata->u.vlan.sta, NULL);
+ RCU_INIT_POINTER(sdata->u.vlan.sta, NULL);
if (sta->uploaded) {
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -691,6 +925,12 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
*/
synchronize_rcu();
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ local->total_ps_buffered -= skb_queue_len(&sta->ps_tx_buf[ac]);
+ __skb_queue_purge(&sta->ps_tx_buf[ac]);
+ __skb_queue_purge(&sta->tx_filtered[ac]);
+ }
+
#ifdef CONFIG_MAC80211_MESH
if (ieee80211_vif_is_mesh(&sdata->vif))
mesh_accept_plinks_update(sdata);
@@ -713,14 +953,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
}
#endif
- while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
- local->total_ps_buffered--;
- dev_kfree_skb_any(skb);
- }
-
- while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
- dev_kfree_skb_any(skb);
-
__sta_info_free(local, sta);
return 0;
@@ -732,7 +964,7 @@ int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
int ret;
mutex_lock(&sdata->local->sta_mtx);
- sta = sta_info_get(sdata, addr);
+ sta = sta_info_get_rx(sdata, addr);
ret = __sta_info_destroy(sta);
mutex_unlock(&sdata->local->sta_mtx);
@@ -746,7 +978,7 @@ int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
int ret;
mutex_lock(&sdata->local->sta_mtx);
- sta = sta_info_get_bss(sdata, addr);
+ sta = sta_info_get_bss_rx(sdata, addr);
ret = __sta_info_destroy(sta);
mutex_unlock(&sdata->local->sta_mtx);
@@ -886,7 +1118,8 @@ static void clear_sta_ps_flags(void *_sta)
{
struct sta_info *sta = _sta;
- clear_sta_flags(sta, WLAN_STA_PS_DRIVER | WLAN_STA_PS_STA);
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+ clear_sta_flag(sta, WLAN_STA_PS_STA);
}
/* powersave support code */
@@ -894,88 +1127,341 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
- int sent, buffered;
+ struct sk_buff_head pending;
+ int filtered = 0, buffered = 0, ac;
+
+ clear_sta_flag(sta, WLAN_STA_SP);
+
+ BUILD_BUG_ON(BITS_TO_LONGS(STA_TID_NUM) > 1);
+ sta->driver_buffered_tids = 0;
- clear_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
- if (!skb_queue_empty(&sta->ps_tx_buf))
- sta_info_clear_tim_bit(sta);
+ skb_queue_head_init(&pending);
/* Send all buffered frames to the station */
- sent = ieee80211_add_pending_skbs(local, &sta->tx_filtered);
- buffered = ieee80211_add_pending_skbs_fn(local, &sta->ps_tx_buf,
- clear_sta_ps_flags, sta);
- sent += buffered;
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ int count = skb_queue_len(&pending), tmp;
+
+ skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending);
+ tmp = skb_queue_len(&pending);
+ filtered += tmp - count;
+ count = tmp;
+
+ skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending);
+ tmp = skb_queue_len(&pending);
+ buffered += tmp - count;
+ }
+
+ ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
+
local->total_ps_buffered -= buffered;
+ sta_info_recalc_tim(sta);
+
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
"since STA not sleeping anymore\n", sdata->name,
- sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
+ sta->sta.addr, sta->sta.aid, filtered, buffered);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
-void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
+static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta, int tid,
+ enum ieee80211_frame_release_type reason)
{
- struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_qos_hdr *nullfunc;
struct sk_buff *skb;
- int no_pending_pkts;
+ int size = sizeof(*nullfunc);
+ __le16 fc;
+ bool qos = test_sta_flag(sta, WLAN_STA_WME);
+ struct ieee80211_tx_info *info;
- skb = skb_dequeue(&sta->tx_filtered);
- if (!skb) {
- skb = skb_dequeue(&sta->ps_tx_buf);
- if (skb)
- local->total_ps_buffered--;
+ if (qos) {
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+ } else {
+ size -= 2;
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_FROMDS);
+ }
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + size);
+ if (!skb)
+ return;
+
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ nullfunc = (void *) skb_put(skb, size);
+ nullfunc->frame_control = fc;
+ nullfunc->duration_id = 0;
+ memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
+ memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
+
+ skb->priority = tid;
+ skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
+ if (qos) {
+ nullfunc->qos_ctrl = cpu_to_le16(tid);
+
+ if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
+ nullfunc->qos_ctrl |=
+ cpu_to_le16(IEEE80211_QOS_CTL_EOSP);
}
- no_pending_pkts = skb_queue_empty(&sta->tx_filtered) &&
- skb_queue_empty(&sta->ps_tx_buf);
- if (skb) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr =
- (struct ieee80211_hdr *) skb->data;
+ info = IEEE80211_SKB_CB(skb);
+
+ /*
+ * Tell TX path to send this frame even though the
+ * STA may still remain is PS mode after this frame
+ * exchange. Also set EOSP to indicate this packet
+ * ends the poll/service period.
+ */
+ info->flags |= IEEE80211_TX_CTL_POLL_RESPONSE |
+ IEEE80211_TX_STATUS_EOSP |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+ drv_allow_buffered_frames(local, sta, BIT(tid), 1, reason, false);
+
+ ieee80211_xmit(sdata, skb);
+}
+
+static void
+ieee80211_sta_ps_deliver_response(struct sta_info *sta,
+ int n_frames, u8 ignored_acs,
+ enum ieee80211_frame_release_type reason)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
+ bool found = false;
+ bool more_data = false;
+ int ac;
+ unsigned long driver_release_tids = 0;
+ struct sk_buff_head frames;
+
+ /* Service or PS-Poll period starts */
+ set_sta_flag(sta, WLAN_STA_SP);
+
+ __skb_queue_head_init(&frames);
+
+ /*
+ * Get response frame(s) and more data bit for it.
+ */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ unsigned long tids;
+
+ if (ignored_acs & BIT(ac))
+ continue;
+
+ tids = ieee80211_tids_for_ac(ac);
+
+ if (!found) {
+ driver_release_tids = sta->driver_buffered_tids & tids;
+ if (driver_release_tids) {
+ found = true;
+ } else {
+ struct sk_buff *skb;
+
+ while (n_frames > 0) {
+ skb = skb_dequeue(&sta->tx_filtered[ac]);
+ if (!skb) {
+ skb = skb_dequeue(
+ &sta->ps_tx_buf[ac]);
+ if (skb)
+ local->total_ps_buffered--;
+ }
+ if (!skb)
+ break;
+ n_frames--;
+ found = true;
+ __skb_queue_tail(&frames, skb);
+ }
+ }
+
+ /*
+ * If the driver has data on more than one TID then
+ * certainly there's more data if we release just a
+ * single frame now (from a single TID).
+ */
+ if (reason == IEEE80211_FRAME_RELEASE_PSPOLL &&
+ hweight16(driver_release_tids) > 1) {
+ more_data = true;
+ driver_release_tids =
+ BIT(ffs(driver_release_tids) - 1);
+ break;
+ }
+ }
+
+ if (!skb_queue_empty(&sta->tx_filtered[ac]) ||
+ !skb_queue_empty(&sta->ps_tx_buf[ac])) {
+ more_data = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ int tid;
/*
- * Tell TX path to send this frame even though the STA may
- * still remain is PS mode after this frame exchange.
+ * For PS-Poll, this can only happen due to a race condition
+ * when we set the TIM bit and the station notices it, but
+ * before it can poll for the frame we expire it.
+ *
+ * For uAPSD, this is said in the standard (11.2.1.5 h):
+ * At each unscheduled SP for a non-AP STA, the AP shall
+ * attempt to transmit at least one MSDU or MMPDU, but no
+ * more than the value specified in the Max SP Length field
+ * in the QoS Capability element from delivery-enabled ACs,
+ * that are destined for the non-AP STA.
+ *
+ * Since we have no other MSDU/MMPDU, transmit a QoS null frame.
*/
- info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE;
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n",
- sta->sta.addr, sta->sta.aid,
- skb_queue_len(&sta->ps_tx_buf));
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
+ /* This will evaluate to 1, 3, 5 or 7. */
+ tid = 7 - ((ffs(~ignored_acs) - 1) << 1);
- /* Use MoreData flag to indicate whether there are more
- * buffered frames for this STA */
- if (no_pending_pkts)
- hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
- else
- hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ ieee80211_send_null_response(sdata, sta, tid, reason);
+ return;
+ }
+
+ if (!driver_release_tids) {
+ struct sk_buff_head pending;
+ struct sk_buff *skb;
+ int num = 0;
+ u16 tids = 0;
+
+ skb_queue_head_init(&pending);
+
+ while ((skb = __skb_dequeue(&frames))) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *) skb->data;
+ u8 *qoshdr = NULL;
+
+ num++;
+
+ /*
+ * Tell TX path to send this frame even though the
+ * STA may still remain is PS mode after this frame
+ * exchange.
+ */
+ info->flags |= IEEE80211_TX_CTL_POLL_RESPONSE;
+
+ /*
+ * Use MoreData flag to indicate whether there are
+ * more buffered frames for this STA
+ */
+ if (!more_data)
+ hdr->frame_control &=
+ cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
+ else
+ hdr->frame_control |=
+ cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+
+ if (ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control))
+ qoshdr = ieee80211_get_qos_ctl(hdr);
+
+ /* set EOSP for the frame */
+ if (reason == IEEE80211_FRAME_RELEASE_UAPSD &&
+ qoshdr && skb_queue_empty(&frames))
+ *qoshdr |= IEEE80211_QOS_CTL_EOSP;
+
+ info->flags |= IEEE80211_TX_STATUS_EOSP |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+ if (qoshdr)
+ tids |= BIT(*qoshdr & IEEE80211_QOS_CTL_TID_MASK);
+ else
+ tids |= BIT(0);
+
+ __skb_queue_tail(&pending, skb);
+ }
- ieee80211_add_pending_skb(local, skb);
+ drv_allow_buffered_frames(local, sta, tids, num,
+ reason, more_data);
- if (no_pending_pkts)
- sta_info_clear_tim_bit(sta);
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
+ ieee80211_add_pending_skbs(local, &pending);
+
+ sta_info_recalc_tim(sta);
} else {
/*
- * FIXME: This can be the result of a race condition between
- * us expiring a frame and the station polling for it.
- * Should we send it a null-func frame indicating we
- * have nothing buffered for it?
+ * We need to release a frame that is buffered somewhere in the
+ * driver ... it'll have to handle that.
+ * Note that, as per the comment above, it'll also have to see
+ * if there is more than just one frame on the specific TID that
+ * we're releasing from, and it needs to set the more-data bit
+ * accordingly if we tell it that there's no more data. If we do
+ * tell it there's more data, then of course the more-data bit
+ * needs to be set anyway.
+ */
+ drv_release_buffered_frames(local, sta, driver_release_tids,
+ n_frames, reason, more_data);
+
+ /*
+ * Note that we don't recalculate the TIM bit here as it would
+ * most likely have no effect at all unless the driver told us
+ * that the TID became empty before returning here from the
+ * release function.
+ * Either way, however, when the driver tells us that the TID
+ * became empty we'll do the TIM recalculation.
*/
- printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
- "though there are no buffered frames for it\n",
- sdata->name, sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
}
+void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
+{
+ u8 ignore_for_response = sta->sta.uapsd_queues;
+
+ /*
+ * If all ACs are delivery-enabled then we should reply
+ * from any of them, if only some are enabled we reply
+ * only from the non-enabled ones.
+ */
+ if (ignore_for_response == BIT(IEEE80211_NUM_ACS) - 1)
+ ignore_for_response = 0;
+
+ ieee80211_sta_ps_deliver_response(sta, 1, ignore_for_response,
+ IEEE80211_FRAME_RELEASE_PSPOLL);
+}
+
+void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta)
+{
+ int n_frames = sta->sta.max_sp;
+ u8 delivery_enabled = sta->sta.uapsd_queues;
+
+ /*
+ * If we ever grow support for TSPEC this might happen if
+ * the TSPEC update from hostapd comes in between a trigger
+ * frame setting WLAN_STA_UAPSD in the RX path and this
+ * actually getting called.
+ */
+ if (!delivery_enabled)
+ return;
+
+ switch (sta->sta.max_sp) {
+ case 1:
+ n_frames = 2;
+ break;
+ case 2:
+ n_frames = 4;
+ break;
+ case 3:
+ n_frames = 6;
+ break;
+ case 0:
+ /* XXX: what is a good value? */
+ n_frames = 8;
+ break;
+ }
+
+ ieee80211_sta_ps_deliver_response(sta, n_frames, ~delivery_enabled,
+ IEEE80211_FRAME_RELEASE_UAPSD);
+}
+
void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
struct ieee80211_sta *pubsta, bool block)
{
@@ -984,17 +1470,50 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
trace_api_sta_block_awake(sta->local, pubsta, block);
if (block)
- set_sta_flags(sta, WLAN_STA_PS_DRIVER);
- else if (test_sta_flags(sta, WLAN_STA_PS_DRIVER))
+ set_sta_flag(sta, WLAN_STA_PS_DRIVER);
+ else if (test_sta_flag(sta, WLAN_STA_PS_DRIVER))
ieee80211_queue_work(hw, &sta->drv_unblock_wk);
}
EXPORT_SYMBOL(ieee80211_sta_block_awake);
-void ieee80211_sta_set_tim(struct ieee80211_sta *pubsta)
+void ieee80211_sta_eosp_irqsafe(struct ieee80211_sta *pubsta)
{
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+ struct ieee80211_local *local = sta->local;
+ struct sk_buff *skb;
+ struct skb_eosp_msg_data *data;
+
+ trace_api_eosp(local, pubsta);
+
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (!skb) {
+ /* too bad ... but race is better than loss */
+ clear_sta_flag(sta, WLAN_STA_SP);
+ return;
+ }
+
+ data = (void *)skb->cb;
+ memcpy(data->sta, pubsta->addr, ETH_ALEN);
+ memcpy(data->iface, sta->sdata->vif.addr, ETH_ALEN);
+ skb->pkt_type = IEEE80211_EOSP_MSG;
+ skb_queue_tail(&local->skb_queue, skb);
+ tasklet_schedule(&local->tasklet);
+}
+EXPORT_SYMBOL(ieee80211_sta_eosp_irqsafe);
+
+void ieee80211_sta_set_buffered(struct ieee80211_sta *pubsta,
+ u8 tid, bool buffered)
+{
+ struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+
+ if (WARN_ON(tid >= STA_TID_NUM))
+ return;
+
+ if (buffered)
+ set_bit(tid, &sta->driver_buffered_tids);
+ else
+ clear_bit(tid, &sta->driver_buffered_tids);
- set_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
- sta_info_set_tim_bit(sta);
+ sta_info_recalc_tim(sta);
}
-EXPORT_SYMBOL(ieee80211_sta_set_tim);
+EXPORT_SYMBOL(ieee80211_sta_set_buffered);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 28beb78..8c8ce05 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -19,7 +19,8 @@
/**
* enum ieee80211_sta_info_flags - Stations flags
*
- * These flags are used with &struct sta_info's @flags member.
+ * These flags are used with &struct sta_info's @flags member, but
+ * only indirectly with set_sta_flag() and friends.
*
* @WLAN_STA_AUTH: Station is authenticated.
* @WLAN_STA_ASSOC: Station is associated.
@@ -43,24 +44,33 @@
* be in the queues
* @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
* station in power-save mode, reply when the driver unblocks.
- * @WLAN_STA_PS_DRIVER_BUF: Station has frames pending in driver internal
- * buffers. Automatically cleared on station wake-up.
+ * @WLAN_STA_TDLS_PEER: Station is a TDLS peer.
+ * @WLAN_STA_TDLS_PEER_AUTH: This TDLS peer is authorized to send direct
+ * packets. This means the link is enabled.
+ * @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was
+ * keeping station in power-save mode, reply when the driver
+ * unblocks the station.
+ * @WLAN_STA_SP: Station is in a service period, so don't try to
+ * reply to other uAPSD trigger frames or PS-Poll.
*/
enum ieee80211_sta_info_flags {
- WLAN_STA_AUTH = 1<<0,
- WLAN_STA_ASSOC = 1<<1,
- WLAN_STA_PS_STA = 1<<2,
- WLAN_STA_AUTHORIZED = 1<<3,
- WLAN_STA_SHORT_PREAMBLE = 1<<4,
- WLAN_STA_ASSOC_AP = 1<<5,
- WLAN_STA_WME = 1<<6,
- WLAN_STA_WDS = 1<<7,
- WLAN_STA_CLEAR_PS_FILT = 1<<9,
- WLAN_STA_MFP = 1<<10,
- WLAN_STA_BLOCK_BA = 1<<11,
- WLAN_STA_PS_DRIVER = 1<<12,
- WLAN_STA_PSPOLL = 1<<13,
- WLAN_STA_PS_DRIVER_BUF = 1<<14,
+ WLAN_STA_AUTH,
+ WLAN_STA_ASSOC,
+ WLAN_STA_PS_STA,
+ WLAN_STA_AUTHORIZED,
+ WLAN_STA_SHORT_PREAMBLE,
+ WLAN_STA_ASSOC_AP,
+ WLAN_STA_WME,
+ WLAN_STA_WDS,
+ WLAN_STA_CLEAR_PS_FILT,
+ WLAN_STA_MFP,
+ WLAN_STA_BLOCK_BA,
+ WLAN_STA_PS_DRIVER,
+ WLAN_STA_PSPOLL,
+ WLAN_STA_TDLS_PEER,
+ WLAN_STA_TDLS_PEER_AUTH,
+ WLAN_STA_UAPSD,
+ WLAN_STA_SP,
};
#define STA_TID_NUM 16
@@ -86,6 +96,8 @@ enum ieee80211_sta_info_flags {
* @stop_initiator: initiator of a session stop
* @tx_stop: TX DelBA frame when stopping
* @buf_size: reorder buffer size at receiver
+ * @failed_bar_ssn: ssn of the last failed BAR tx attempt
+ * @bar_pending: BAR needs to be re-sent
*
* This structure's lifetime is managed by RCU, assignments to
* the array holding it must hold the aggregation mutex.
@@ -106,6 +118,9 @@ struct tid_ampdu_tx {
u8 stop_initiator;
bool tx_stop;
u8 buf_size;
+
+ u16 failed_bar_ssn;
+ bool bar_pending;
};
/**
@@ -198,15 +213,16 @@ struct sta_ampdu_mlme {
* @last_rx_rate_flag: rx status flag of the last data packet
* @lock: used for locking all fields that require locking, see comments
* in the header file.
- * @flaglock: spinlock for flags accesses
* @drv_unblock_wk: used for driver PS unblocking
* @listen_interval: listen interval of this station, when we're acting as AP
- * @flags: STA flags, see &enum ieee80211_sta_info_flags
- * @ps_tx_buf: buffer of frames to transmit to this station
- * when it leaves power saving state
- * @tx_filtered: buffer of frames we already tried to transmit
- * but were filtered by hardware due to STA having entered
- * power saving state
+ * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
+ * @ps_tx_buf: buffers (per AC) of frames to transmit to this station
+ * when it leaves power saving state or polls
+ * @tx_filtered: buffers (per AC) of frames we already tried to
+ * transmit but were filtered by hardware due to STA having
+ * entered power saving state, these are also delivered to
+ * the station when it leaves powersave or polls for frames
+ * @driver_buffered_tids: bitmap of TIDs the driver has data buffered on
* @rx_packets: Number of MSDUs received from this STA
* @rx_bytes: Number of bytes received from this STA
* @wep_weak_iv_count: number of weak WEP IVs received from this station
@@ -238,10 +254,12 @@ struct sta_ampdu_mlme {
* @plink_timer: peer link watch timer
* @plink_timer_was_running: used by suspend/resume to restore timers
* @debugfs: debug filesystem info
- * @sta: station information we share with the driver
* @dead: set to true when sta is unlinked
* @uploaded: set to true when sta is uploaded to the driver
* @lost_packets: number of consecutive lost packets
+ * @dummy: indicate a dummy station created for receiving
+ * EAP frames before association
+ * @sta: station information we share with the driver
*/
struct sta_info {
/* General information, mostly static */
@@ -254,7 +272,6 @@ struct sta_info {
struct rate_control_ref *rate_ctrl;
void *rate_ctrl_priv;
spinlock_t lock;
- spinlock_t flaglock;
struct work_struct drv_unblock_wk;
@@ -264,18 +281,16 @@ struct sta_info {
bool uploaded;
- /*
- * frequently updated, locked with own spinlock (flaglock),
- * use the accessors defined below
- */
- u32 flags;
+ /* use the accessors defined below */
+ unsigned long _flags;
/*
* STA powersave frame queues, no more than the internal
* locking required.
*/
- struct sk_buff_head ps_tx_buf;
- struct sk_buff_head tx_filtered;
+ struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
+ struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
+ unsigned long driver_buffered_tids;
/* Updated from RX path only, no locking requirements */
unsigned long rx_packets, rx_bytes;
@@ -336,6 +351,9 @@ struct sta_info {
unsigned int lost_packets;
+ /* should be right in front of sta to be in the same cache line */
+ bool dummy;
+
/* keep last! */
struct ieee80211_sta sta;
};
@@ -348,60 +366,28 @@ static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
return NL80211_PLINK_LISTEN;
}
-static inline void set_sta_flags(struct sta_info *sta, const u32 flags)
+static inline void set_sta_flag(struct sta_info *sta,
+ enum ieee80211_sta_info_flags flag)
{
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- sta->flags |= flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
+ set_bit(flag, &sta->_flags);
}
-static inline void clear_sta_flags(struct sta_info *sta, const u32 flags)
+static inline void clear_sta_flag(struct sta_info *sta,
+ enum ieee80211_sta_info_flags flag)
{
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- sta->flags &= ~flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
+ clear_bit(flag, &sta->_flags);
}
-static inline u32 test_sta_flags(struct sta_info *sta, const u32 flags)
+static inline int test_sta_flag(struct sta_info *sta,
+ enum ieee80211_sta_info_flags flag)
{
- u32 ret;
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- ret = sta->flags & flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
-
- return ret;
-}
-
-static inline u32 test_and_clear_sta_flags(struct sta_info *sta,
- const u32 flags)
-{
- u32 ret;
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- ret = sta->flags & flags;
- sta->flags &= ~flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
-
- return ret;
+ return test_bit(flag, &sta->_flags);
}
-static inline u32 get_sta_flags(struct sta_info *sta)
+static inline int test_and_clear_sta_flag(struct sta_info *sta,
+ enum ieee80211_sta_info_flags flag)
{
- u32 ret;
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- ret = sta->flags;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
-
- return ret;
+ return test_and_clear_bit(flag, &sta->_flags);
}
void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
@@ -419,8 +405,8 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
#define STA_HASH(sta) (sta[5])
-/* Maximum number of frames to buffer per power saving station */
-#define STA_MAX_TX_BUFFER 128
+/* Maximum number of frames to buffer per power saving station per AC */
+#define STA_MAX_TX_BUFFER 64
/* Minimum buffered frame expiry time. If STA uses listen interval that is
* smaller than this value, the minimum value here is used instead. */
@@ -436,9 +422,15 @@ rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
+struct sta_info *sta_info_get_rx(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr);
+
struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
+struct sta_info *sta_info_get_bss_rx(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr);
+
static inline
void for_each_sta_info_type_check(struct ieee80211_local *local,
const u8 *addr,
@@ -459,6 +451,22 @@ void for_each_sta_info_type_check(struct ieee80211_local *local,
_sta = nxt, \
nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
) \
+ /* run code only if address matches and it's not a dummy sta */ \
+ if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0 && \
+ !_sta->dummy)
+
+#define for_each_sta_info_rx(local, _addr, _sta, nxt) \
+ for ( /* initialise loop */ \
+ _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
+ nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \
+ /* typecheck */ \
+ for_each_sta_info_type_check(local, (_addr), _sta, nxt),\
+ /* continue condition */ \
+ _sta; \
+ /* advance loop */ \
+ _sta = nxt, \
+ nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
+ ) \
/* compare address and run code only if it matches */ \
if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0)
@@ -484,14 +492,14 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
int sta_info_insert(struct sta_info *sta);
int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
int sta_info_insert_atomic(struct sta_info *sta);
+int sta_info_reinsert(struct sta_info *sta);
int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
const u8 *addr);
-void sta_info_set_tim_bit(struct sta_info *sta);
-void sta_info_clear_tim_bit(struct sta_info *sta);
+void sta_info_recalc_tim(struct sta_info *sta);
void sta_info_init(struct ieee80211_local *local);
void sta_info_stop(struct ieee80211_local *local);
@@ -502,5 +510,6 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta);
void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta);
+void ieee80211_sta_ps_deliver_uapsd(struct sta_info *sta);
#endif /* STA_INFO_H */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 1658efa..80de436 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -9,11 +9,13 @@
* published by the Free Software Foundation.
*/
+#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
#include "rate.h"
#include "mesh.h"
#include "led.h"
+#include "wme.h"
void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
@@ -43,6 +45,8 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ int ac;
/*
* This skb 'survived' a round-trip through the driver, and
@@ -63,11 +67,37 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
sta->tx_filtered_count++;
/*
+ * Clear more-data bit on filtered frames, it might be set
+ * but later frames might time out so it might have to be
+ * clear again ... It's all rather unlikely (this frame
+ * should time out first, right?) but let's not confuse
+ * peers unnecessarily.
+ */
+ if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA))
+ hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *p = ieee80211_get_qos_ctl(hdr);
+ int tid = *p & IEEE80211_QOS_CTL_TID_MASK;
+
+ /*
+ * Clear EOSP if set, this could happen e.g.
+ * if an absence period (us being a P2P GO)
+ * shortens the SP.
+ */
+ if (*p & IEEE80211_QOS_CTL_EOSP)
+ *p &= ~IEEE80211_QOS_CTL_EOSP;
+ ac = ieee802_1d_to_ac[tid & 7];
+ } else {
+ ac = IEEE80211_AC_BE;
+ }
+
+ /*
* Clear the TX filter mask for this STA when sending the next
* packet. If the STA went to power save mode, this will happen
* when it wakes up for the next time.
*/
- set_sta_flags(sta, WLAN_STA_CLEAR_PS_FILT);
+ set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
/*
* This code races in the following way:
@@ -103,13 +133,19 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
* changes before calling TX status events if ordering can be
* unknown.
*/
- if (test_sta_flags(sta, WLAN_STA_PS_STA) &&
- skb_queue_len(&sta->tx_filtered) < STA_MAX_TX_BUFFER) {
- skb_queue_tail(&sta->tx_filtered, skb);
+ if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
+ skb_queue_len(&sta->tx_filtered[ac]) < STA_MAX_TX_BUFFER) {
+ skb_queue_tail(&sta->tx_filtered[ac], skb);
+ sta_info_recalc_tim(sta);
+
+ if (!timer_pending(&local->sta_cleanup))
+ mod_timer(&local->sta_cleanup,
+ round_jiffies(jiffies +
+ STA_INFO_CLEANUP_INTERVAL));
return;
}
- if (!test_sta_flags(sta, WLAN_STA_PS_STA) &&
+ if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
!(info->flags & IEEE80211_TX_INTFL_RETRIED)) {
/* Software retry the packet once */
info->flags |= IEEE80211_TX_INTFL_RETRIED;
@@ -121,18 +157,38 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
if (net_ratelimit())
wiphy_debug(local->hw.wiphy,
"dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
- skb_queue_len(&sta->tx_filtered),
- !!test_sta_flags(sta, WLAN_STA_PS_STA), jiffies);
+ skb_queue_len(&sta->tx_filtered[ac]),
+ !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
#endif
dev_kfree_skb(skb);
}
+static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid)
+{
+ struct tid_ampdu_tx *tid_tx;
+
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
+ if (!tid_tx || !tid_tx->bar_pending)
+ return;
+
+ tid_tx->bar_pending = false;
+ ieee80211_send_bar(&sta->sdata->vif, addr, tid, tid_tx->failed_bar_ssn);
+}
+
static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_mgmt *mgmt = (void *) skb->data;
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
+ if (ieee80211_is_data_qos(mgmt->frame_control)) {
+ struct ieee80211_hdr *hdr = (void *) skb->data;
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ u16 tid = qc[0] & 0xf;
+
+ ieee80211_check_pending_bar(sta, hdr->addr1, tid);
+ }
+
if (ieee80211_is_action(mgmt->frame_control) &&
sdata->vif.type == NL80211_IFTYPE_STATION &&
mgmt->u.action.category == WLAN_CATEGORY_HT &&
@@ -161,6 +217,114 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
}
}
+static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn)
+{
+ struct tid_ampdu_tx *tid_tx;
+
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
+ if (!tid_tx)
+ return;
+
+ tid_tx->failed_bar_ssn = ssn;
+ tid_tx->bar_pending = true;
+}
+
+static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
+{
+ int len = sizeof(struct ieee80211_radiotap_header);
+
+ /* IEEE80211_RADIOTAP_RATE rate */
+ if (info->status.rates[0].idx >= 0 &&
+ !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
+ len += 2;
+
+ /* IEEE80211_RADIOTAP_TX_FLAGS */
+ len += 2;
+
+ /* IEEE80211_RADIOTAP_DATA_RETRIES */
+ len += 1;
+
+ /* IEEE80211_TX_RC_MCS */
+ if (info->status.rates[0].idx >= 0 &&
+ info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
+ len += 3;
+
+ return len;
+}
+
+static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
+ *sband, struct sk_buff *skb,
+ int retry_count, int rtap_len)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_radiotap_header *rthdr;
+ unsigned char *pos;
+ __le16 txflags;
+
+ rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
+
+ memset(rthdr, 0, rtap_len);
+ rthdr->it_len = cpu_to_le16(rtap_len);
+ rthdr->it_present =
+ cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
+ (1 << IEEE80211_RADIOTAP_DATA_RETRIES));
+ pos = (unsigned char *)(rthdr + 1);
+
+ /*
+ * XXX: Once radiotap gets the bitmap reset thing the vendor
+ * extensions proposal contains, we can actually report
+ * the whole set of tries we did.
+ */
+
+ /* IEEE80211_RADIOTAP_RATE */
+ if (info->status.rates[0].idx >= 0 &&
+ !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) {
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
+ *pos = sband->bitrates[info->status.rates[0].idx].bitrate / 5;
+ /* padding for tx flags */
+ pos += 2;
+ }
+
+ /* IEEE80211_RADIOTAP_TX_FLAGS */
+ txflags = 0;
+ if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
+ !is_multicast_ether_addr(hdr->addr1))
+ txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
+
+ if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
+ (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
+ txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
+ else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
+
+ put_unaligned_le16(txflags, pos);
+ pos += 2;
+
+ /* IEEE80211_RADIOTAP_DATA_RETRIES */
+ /* for now report the total retry_count */
+ *pos = retry_count;
+ pos++;
+
+ /* IEEE80211_TX_RC_MCS */
+ if (info->status.rates[0].idx >= 0 &&
+ info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
+ pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
+ IEEE80211_RADIOTAP_MCS_HAVE_GI |
+ IEEE80211_RADIOTAP_MCS_HAVE_BW;
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ pos[1] |= IEEE80211_RADIOTAP_MCS_SGI;
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ pos[1] |= IEEE80211_RADIOTAP_MCS_BW_40;
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_GREEN_FIELD)
+ pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF;
+ pos[2] = info->status.rates[0].idx;
+ pos += 3;
+ }
+
+}
+
/*
* Use a static threshold for now, best value to be determined
* by testing ...
@@ -179,7 +343,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
u16 frag, type;
__le16 fc;
struct ieee80211_supported_band *sband;
- struct ieee80211_tx_status_rtap_hdr *rthdr;
struct ieee80211_sub_if_data *sdata;
struct net_device *prev_dev = NULL;
struct sta_info *sta, *tmp;
@@ -187,6 +350,9 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
int rates_idx = -1;
bool send_to_cooked;
bool acked;
+ struct ieee80211_bar *bar;
+ u16 tid;
+ int rtap_len;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if (info->status.rates[i].idx < 0) {
@@ -215,8 +381,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN))
continue;
+ if (info->flags & IEEE80211_TX_STATUS_EOSP)
+ clear_sta_flag(sta, WLAN_STA_SP);
+
acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
- if (!acked && test_sta_flags(sta, WLAN_STA_PS_STA)) {
+ if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) {
/*
* The STA is in power save mode, so assume
* that this TX packet failed because of that.
@@ -239,10 +408,31 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
tid = qc[0] & 0xf;
ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10)
& IEEE80211_SCTL_SEQ);
- ieee80211_send_bar(sta->sdata, hdr->addr1,
+ ieee80211_send_bar(&sta->sdata->vif, hdr->addr1,
tid, ssn);
}
+ if (!acked && ieee80211_is_back_req(fc)) {
+ u16 control;
+
+ /*
+ * BAR failed, store the last SSN and retry sending
+ * the BAR when the next unicast transmission on the
+ * same TID succeeds.
+ */
+ bar = (struct ieee80211_bar *) skb->data;
+ control = le16_to_cpu(bar->control);
+ if (!(control & IEEE80211_BAR_CTRL_MULTI_TID)) {
+ u16 ssn = le16_to_cpu(bar->start_seq_num);
+
+ tid = (control &
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
+
+ ieee80211_set_bar_pending(sta, tid, ssn);
+ }
+ }
+
if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
ieee80211_handle_filtered_frame(local, sta, skb);
rcu_read_unlock();
@@ -336,7 +526,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
continue;
if (wk->offchan_tx.frame != skb)
continue;
- wk->offchan_tx.frame = NULL;
+ wk->offchan_tx.status = true;
break;
}
rcu_read_unlock();
@@ -345,9 +535,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
local->hw_roc_skb_for_status = NULL;
}
- if (cookie == local->hw_offchan_tx_cookie)
- local->hw_offchan_tx_cookie = 0;
-
cfg80211_mgmt_tx_status(
skb->dev, cookie, skb->data, skb->len,
!!(info->flags & IEEE80211_TX_STAT_ACK), GFP_ATOMIC);
@@ -370,44 +557,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
}
/* send frame to monitor interfaces now */
-
- if (skb_headroom(skb) < sizeof(*rthdr)) {
+ rtap_len = ieee80211_tx_radiotap_len(info);
+ if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
dev_kfree_skb(skb);
return;
}
-
- rthdr = (struct ieee80211_tx_status_rtap_hdr *)
- skb_push(skb, sizeof(*rthdr));
-
- memset(rthdr, 0, sizeof(*rthdr));
- rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
- rthdr->hdr.it_present =
- cpu_to_le32((1 << IEEE80211_RADIOTAP_TX_FLAGS) |
- (1 << IEEE80211_RADIOTAP_DATA_RETRIES) |
- (1 << IEEE80211_RADIOTAP_RATE));
-
- if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
- !is_multicast_ether_addr(hdr->addr1))
- rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
-
- /*
- * XXX: Once radiotap gets the bitmap reset thing the vendor
- * extensions proposal contains, we can actually report
- * the whole set of tries we did.
- */
- if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
- (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
- rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
- else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
- rthdr->tx_flags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
- if (info->status.rates[0].idx >= 0 &&
- !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
- rthdr->rate = sband->bitrates[
- info->status.rates[0].idx].bitrate / 5;
-
- /* for now report the total retry_count */
- rthdr->data_retries = retry_count;
+ ieee80211_add_tx_radiotap_header(sband, skb, retry_count, rtap_len);
/* XXX: is this sufficient for BPF? */
skb_set_mac_header(skb, 0);
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index f49d00a..51077a9 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/export.h>
#include <asm/unaligned.h>
#include <net/mac80211.h>
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8cb0d2d..1f8b120 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -18,6 +18,7 @@
#include <linux/etherdevice.h>
#include <linux/bitmap.h>
#include <linux/rcupdate.h>
+#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/ieee80211_radiotap.h>
#include <net/cfg80211.h>
@@ -253,7 +254,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
- u32 sta_flags;
+ bool assoc = false;
if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
return TX_CONTINUE;
@@ -284,10 +285,11 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
if (tx->flags & IEEE80211_TX_PS_BUFFERED)
return TX_CONTINUE;
- sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0;
+ if (tx->sta)
+ assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
if (likely(tx->flags & IEEE80211_TX_UNICAST)) {
- if (unlikely(!(sta_flags & WLAN_STA_ASSOC) &&
+ if (unlikely(!assoc &&
tx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
ieee80211_is_data(hdr->frame_control))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -343,13 +345,22 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
total += skb_queue_len(&ap->ps_bc_buf);
}
+ /*
+ * Drop one frame from each station from the lowest-priority
+ * AC that has frames at all.
+ */
list_for_each_entry_rcu(sta, &local->sta_list, list) {
- skb = skb_dequeue(&sta->ps_tx_buf);
- if (skb) {
- purged++;
- dev_kfree_skb(skb);
+ int ac;
+
+ for (ac = IEEE80211_AC_BK; ac >= IEEE80211_AC_VO; ac--) {
+ skb = skb_dequeue(&sta->ps_tx_buf[ac]);
+ total += skb_queue_len(&sta->ps_tx_buf[ac]);
+ if (skb) {
+ purged++;
+ dev_kfree_skb(skb);
+ break;
+ }
}
- total += skb_queue_len(&sta->ps_tx_buf);
}
rcu_read_unlock();
@@ -418,7 +429,7 @@ static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
if (!ieee80211_is_mgmt(fc))
return 0;
- if (sta == NULL || !test_sta_flags(sta, WLAN_STA_MFP))
+ if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP))
return 0;
if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *)
@@ -435,7 +446,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
struct ieee80211_local *local = tx->local;
- u32 staflags;
if (unlikely(!sta ||
ieee80211_is_probe_resp(hdr->frame_control) ||
@@ -444,57 +454,52 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
ieee80211_is_reassoc_resp(hdr->frame_control)))
return TX_CONTINUE;
- staflags = get_sta_flags(sta);
+ if (unlikely((test_sta_flag(sta, WLAN_STA_PS_STA) ||
+ test_sta_flag(sta, WLAN_STA_PS_DRIVER)) &&
+ !(info->flags & IEEE80211_TX_CTL_POLL_RESPONSE))) {
+ int ac = skb_get_queue_mapping(tx->skb);
- if (unlikely((staflags & (WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) &&
- !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries "
- "before %d)\n",
- sta->sta.addr, sta->sta.aid,
- skb_queue_len(&sta->ps_tx_buf));
+ printk(KERN_DEBUG "STA %pM aid %d: PS buffer for AC %d\n",
+ sta->sta.addr, sta->sta.aid, ac);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
purge_old_ps_buffers(tx->local);
- if (skb_queue_len(&sta->ps_tx_buf) >= STA_MAX_TX_BUFFER) {
- struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf);
+ if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
+ struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: STA %pM TX "
- "buffer full - dropping oldest frame\n",
- tx->sdata->name, sta->sta.addr);
- }
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: STA %pM TX buffer for "
+ "AC %d full - dropping oldest frame\n",
+ tx->sdata->name, sta->sta.addr, ac);
#endif
dev_kfree_skb(old);
} else
tx->local->total_ps_buffered++;
- /*
- * Queue frame to be sent after STA wakes up/polls,
- * but don't set the TIM bit if the driver is blocking
- * wakeup or poll response transmissions anyway.
- */
- if (skb_queue_empty(&sta->ps_tx_buf) &&
- !(staflags & WLAN_STA_PS_DRIVER))
- sta_info_set_tim_bit(sta);
-
info->control.jiffies = jiffies;
info->control.vif = &tx->sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
- skb_queue_tail(&sta->ps_tx_buf, tx->skb);
+ skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
if (!timer_pending(&local->sta_cleanup))
mod_timer(&local->sta_cleanup,
round_jiffies(jiffies +
STA_INFO_CLEANUP_INTERVAL));
+ /*
+ * We queued up some frames, so the TIM bit might
+ * need to be set, recalculate it.
+ */
+ sta_info_recalc_tim(sta);
+
return TX_QUEUED;
}
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- else if (unlikely(staflags & WLAN_STA_PS_STA)) {
- printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll "
- "set -> send frame\n", tx->sdata->name,
- sta->sta.addr);
+ else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
+ printk(KERN_DEBUG
+ "%s: STA %pM in PS mode, but polling/in SP -> send frame\n",
+ tx->sdata->name, sta->sta.addr);
}
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
@@ -552,7 +557,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
(!ieee80211_is_robust_mgmt_frame(hdr) ||
(ieee80211_is_action(hdr->frame_control) &&
- tx->sta && test_sta_flags(tx->sta, WLAN_STA_MFP)))) {
+ tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))) {
I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
return TX_DROP;
} else
@@ -611,7 +616,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
u32 len;
bool inval = false, rts = false, short_preamble = false;
struct ieee80211_tx_rate_control txrc;
- u32 sta_flags;
+ bool assoc = false;
memset(&txrc, 0, sizeof(txrc));
@@ -647,17 +652,17 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
*/
if (tx->sdata->vif.bss_conf.use_short_preamble &&
(ieee80211_is_data(hdr->frame_control) ||
- (tx->sta && test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
+ (tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
txrc.short_preamble = short_preamble = true;
- sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0;
+ if (tx->sta)
+ assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
/*
* Lets not bother rate control if we're associated and cannot
* talk to the sta. This should not happen.
*/
- if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) &&
- (sta_flags & WLAN_STA_ASSOC) &&
+ if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc &&
!rate_usable_index_exists(sband, &tx->sta->sta),
"%s: Dropped data frame as no usable bitrate found while "
"scanning and associated. Target station: "
@@ -800,6 +805,9 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
if (ieee80211_hdrlen(hdr->frame_control) < 24)
return TX_CONTINUE;
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control))
+ return TX_CONTINUE;
+
/*
* Anything but QoS data that has a sequence number field
* (is long enough) gets a sequence number from the global
@@ -891,7 +899,10 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
int hdrlen;
int fragnum;
- if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
+ if (info->flags & IEEE80211_TX_CTL_DONTFRAG)
+ return TX_CONTINUE;
+
+ if (tx->local->ops->set_frag_threshold)
return TX_CONTINUE;
/*
@@ -904,7 +915,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
hdrlen = ieee80211_hdrlen(hdr->frame_control);
- /* internal error, why is TX_FRAGMENTED set? */
+ /* internal error, why isn't DONTFRAG set? */
if (WARN_ON(skb->len + FCS_LEN <= frag_threshold))
return TX_DROP;
@@ -1025,100 +1036,6 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
/* actual transmit path */
-/*
- * deal with packet injection down monitor interface
- * with Radiotap Header -- only called for monitor mode interface
- */
-static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
- struct sk_buff *skb)
-{
- /*
- * this is the moment to interpret and discard the radiotap header that
- * must be at the start of the packet injected in Monitor mode
- *
- * Need to take some care with endian-ness since radiotap
- * args are little-endian
- */
-
- struct ieee80211_radiotap_iterator iterator;
- struct ieee80211_radiotap_header *rthdr =
- (struct ieee80211_radiotap_header *) skb->data;
- bool hw_frag;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
- NULL);
-
- info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
- tx->flags &= ~IEEE80211_TX_FRAGMENTED;
-
- /* packet is fragmented in HW if we have a non-NULL driver callback */
- hw_frag = (tx->local->ops->set_frag_threshold != NULL);
-
- /*
- * for every radiotap entry that is present
- * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
- * entries present, or -EINVAL on error)
- */
-
- while (!ret) {
- ret = ieee80211_radiotap_iterator_next(&iterator);
-
- if (ret)
- continue;
-
- /* see if this argument is something we can use */
- switch (iterator.this_arg_index) {
- /*
- * You must take care when dereferencing iterator.this_arg
- * for multibyte types... the pointer is not aligned. Use
- * get_unaligned((type *)iterator.this_arg) to dereference
- * iterator.this_arg for type "type" safely on all arches.
- */
- case IEEE80211_RADIOTAP_FLAGS:
- if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
- /*
- * this indicates that the skb we have been
- * handed has the 32-bit FCS CRC at the end...
- * we should react to that by snipping it off
- * because it will be recomputed and added
- * on transmission
- */
- if (skb->len < (iterator._max_length + FCS_LEN))
- return false;
-
- skb_trim(skb, skb->len - FCS_LEN);
- }
- if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
- info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
- if ((*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG) &&
- !hw_frag)
- tx->flags |= IEEE80211_TX_FRAGMENTED;
- break;
-
- /*
- * Please update the file
- * Documentation/networking/mac80211-injection.txt
- * when parsing new fields here.
- */
-
- default:
- break;
- }
- }
-
- if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
- return false;
-
- /*
- * remove the radiotap header
- * iterator->_max_length was sanity-checked against
- * skb->len by iterator init
- */
- skb_pull(skb, iterator._max_length);
-
- return true;
-}
-
static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
struct sk_buff *skb,
struct ieee80211_tx_info *info,
@@ -1183,7 +1100,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int hdrlen, tid;
+ int tid;
u8 *qc;
memset(tx, 0, sizeof(*tx));
@@ -1191,26 +1108,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->local = local;
tx->sdata = sdata;
tx->channel = local->hw.conf.channel;
- /*
- * Set this flag (used below to indicate "automatic fragmentation"),
- * it will be cleared/left by radiotap as desired.
- * Only valid when fragmentation is done by the stack.
- */
- if (!local->ops->set_frag_threshold)
- tx->flags |= IEEE80211_TX_FRAGMENTED;
-
- /* process and remove the injection radiotap header */
- if (unlikely(info->flags & IEEE80211_TX_INTFL_HAS_RADIOTAP)) {
- if (!__ieee80211_parse_tx_radiotap(tx, skb))
- return TX_DROP;
-
- /*
- * __ieee80211_parse_tx_radiotap has now removed
- * the radiotap header that was present and pre-filled
- * 'tx' with tx control information.
- */
- info->flags &= ~IEEE80211_TX_INTFL_HAS_RADIOTAP;
- }
/*
* If this flag is set to true anywhere, and we get here,
@@ -1232,7 +1129,9 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->sta = sta_info_get(sdata, hdr->addr1);
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
- (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
+ !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
+ (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION) &&
+ !(local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)) {
struct tid_ampdu_tx *tid_tx;
qc = ieee80211_get_qos_ctl(hdr);
@@ -1257,29 +1156,25 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
tx->flags |= IEEE80211_TX_UNICAST;
if (unlikely(local->wifi_wme_noack_test))
info->flags |= IEEE80211_TX_CTL_NO_ACK;
- else
- info->flags &= ~IEEE80211_TX_CTL_NO_ACK;
+ /*
+ * Flags are initialized to 0. Hence, no need to
+ * explicitly unset IEEE80211_TX_CTL_NO_ACK since
+ * it might already be set for injected frames.
+ */
}
- if (tx->flags & IEEE80211_TX_FRAGMENTED) {
- if ((tx->flags & IEEE80211_TX_UNICAST) &&
- skb->len + FCS_LEN > local->hw.wiphy->frag_threshold &&
- !(info->flags & IEEE80211_TX_CTL_AMPDU))
- tx->flags |= IEEE80211_TX_FRAGMENTED;
- else
- tx->flags &= ~IEEE80211_TX_FRAGMENTED;
+ if (!(info->flags & IEEE80211_TX_CTL_DONTFRAG)) {
+ if (!(tx->flags & IEEE80211_TX_UNICAST) ||
+ skb->len + FCS_LEN <= local->hw.wiphy->frag_threshold ||
+ info->flags & IEEE80211_TX_CTL_AMPDU)
+ info->flags |= IEEE80211_TX_CTL_DONTFRAG;
}
if (!tx->sta)
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
- else if (test_and_clear_sta_flags(tx->sta, WLAN_STA_CLEAR_PS_FILT))
+ else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT))
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
- if (skb->len > hdrlen + sizeof(rfc1042_header) + 2) {
- u8 *pos = &skb->data[hdrlen + sizeof(rfc1042_header)];
- tx->ethertype = (pos[0] << 8) | pos[1];
- }
info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
return TX_CONTINUE;
@@ -1490,11 +1385,6 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
tail_need = max_t(int, tail_need, 0);
}
- if (head_need || tail_need) {
- /* Sorry. Can't account for this any more */
- skb_orphan(skb);
- }
-
if (skb_cloned(skb))
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
else if (head_need || tail_need)
@@ -1508,67 +1398,19 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
return -ENOMEM;
}
- /* update truesize too */
- skb->truesize += head_need + tail_need;
-
return 0;
}
-static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb)
+void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct ieee80211_sub_if_data *tmp_sdata;
int headroom;
bool may_encrypt;
rcu_read_lock();
- if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
- int hdrlen;
- u16 len_rthdr;
-
- info->flags |= IEEE80211_TX_CTL_INJECTED |
- IEEE80211_TX_INTFL_HAS_RADIOTAP;
-
- len_rthdr = ieee80211_get_radiotap_len(skb->data);
- hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
-
- /* check the header is complete in the frame */
- if (likely(skb->len >= len_rthdr + hdrlen)) {
- /*
- * We process outgoing injected frames that have a
- * local address we handle as though they are our
- * own frames.
- * This code here isn't entirely correct, the local
- * MAC address is not necessarily enough to find
- * the interface to use; for that proper VLAN/WDS
- * support we will need a different mechanism.
- */
-
- list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
- list) {
- if (!ieee80211_sdata_running(tmp_sdata))
- continue;
- if (tmp_sdata->vif.type ==
- NL80211_IFTYPE_MONITOR ||
- tmp_sdata->vif.type ==
- NL80211_IFTYPE_AP_VLAN ||
- tmp_sdata->vif.type ==
- NL80211_IFTYPE_WDS)
- continue;
- if (compare_ether_addr(tmp_sdata->vif.addr,
- hdr->addr2) == 0) {
- sdata = tmp_sdata;
- break;
- }
- }
- }
- }
-
may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
headroom = local->tx_headroom;
@@ -1595,11 +1437,94 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
return;
}
- ieee80211_set_qos_hdr(local, skb);
+ ieee80211_set_qos_hdr(sdata, skb);
ieee80211_tx(sdata, skb, false);
rcu_read_unlock();
}
+static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
+{
+ struct ieee80211_radiotap_iterator iterator;
+ struct ieee80211_radiotap_header *rthdr =
+ (struct ieee80211_radiotap_header *) skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
+ NULL);
+ u16 txflags;
+
+ info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
+ IEEE80211_TX_CTL_DONTFRAG;
+
+ /*
+ * for every radiotap entry that is present
+ * (ieee80211_radiotap_iterator_next returns -ENOENT when no more
+ * entries present, or -EINVAL on error)
+ */
+
+ while (!ret) {
+ ret = ieee80211_radiotap_iterator_next(&iterator);
+
+ if (ret)
+ continue;
+
+ /* see if this argument is something we can use */
+ switch (iterator.this_arg_index) {
+ /*
+ * You must take care when dereferencing iterator.this_arg
+ * for multibyte types... the pointer is not aligned. Use
+ * get_unaligned((type *)iterator.this_arg) to dereference
+ * iterator.this_arg for type "type" safely on all arches.
+ */
+ case IEEE80211_RADIOTAP_FLAGS:
+ if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FCS) {
+ /*
+ * this indicates that the skb we have been
+ * handed has the 32-bit FCS CRC at the end...
+ * we should react to that by snipping it off
+ * because it will be recomputed and added
+ * on transmission
+ */
+ if (skb->len < (iterator._max_length + FCS_LEN))
+ return false;
+
+ skb_trim(skb, skb->len - FCS_LEN);
+ }
+ if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
+ info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
+ info->flags &= ~IEEE80211_TX_CTL_DONTFRAG;
+ break;
+
+ case IEEE80211_RADIOTAP_TX_FLAGS:
+ txflags = get_unaligned_le16(iterator.this_arg);
+ if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK)
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ break;
+
+ /*
+ * Please update the file
+ * Documentation/networking/mac80211-injection.txt
+ * when parsing new fields here.
+ */
+
+ default:
+ break;
+ }
+ }
+
+ if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
+ return false;
+
+ /*
+ * remove the radiotap header
+ * iterator->_max_length was sanity-checked against
+ * skb->len by iterator init
+ */
+ skb_pull(skb, iterator._max_length);
+
+ return true;
+}
+
netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1608,7 +1533,10 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
struct ieee80211_radiotap_header *prthdr =
(struct ieee80211_radiotap_header *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_sub_if_data *tmp_sdata, *sdata;
u16 len_rthdr;
+ int hdrlen;
/*
* Frame injection is not allowed if beaconing is not allowed
@@ -1659,12 +1587,65 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
skb_set_network_header(skb, len_rthdr);
skb_set_transport_header(skb, len_rthdr);
+ if (skb->len < len_rthdr + 2)
+ goto fail;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+ if (skb->len < len_rthdr + hdrlen)
+ goto fail;
+
+ /*
+ * Initialize skb->protocol if the injected frame is a data frame
+ * carrying a rfc1042 header
+ */
+ if (ieee80211_is_data(hdr->frame_control) &&
+ skb->len >= len_rthdr + hdrlen + sizeof(rfc1042_header) + 2) {
+ u8 *payload = (u8 *)hdr + hdrlen;
+
+ if (compare_ether_addr(payload, rfc1042_header) == 0)
+ skb->protocol = cpu_to_be16((payload[6] << 8) |
+ payload[7]);
+ }
+
memset(info, 0, sizeof(*info));
- info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
+ IEEE80211_TX_CTL_INJECTED;
+
+ /* process and remove the injection radiotap header */
+ if (!ieee80211_parse_tx_radiotap(skb))
+ goto fail;
+
+ rcu_read_lock();
+
+ /*
+ * We process outgoing injected frames that have a local address
+ * we handle as though they are non-injected frames.
+ * This code here isn't entirely correct, the local MAC address
+ * isn't always enough to find the interface to use; for proper
+ * VLAN/WDS support we will need a different mechanism (which
+ * likely isn't going to be monitor interfaces).
+ */
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ list_for_each_entry_rcu(tmp_sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(tmp_sdata))
+ continue;
+ if (tmp_sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ tmp_sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ tmp_sdata->vif.type == NL80211_IFTYPE_WDS)
+ continue;
+ if (compare_ether_addr(tmp_sdata->vif.addr, hdr->addr2) == 0) {
+ sdata = tmp_sdata;
+ break;
+ }
+ }
+
+ ieee80211_xmit(sdata, skb);
+ rcu_read_unlock();
- /* pass the radiotap header up to xmit */
- ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb);
return NETDEV_TX_OK;
fail:
@@ -1703,8 +1684,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
int encaps_len, skip_header_bytes;
int nh_pos, h_pos;
struct sta_info *sta = NULL;
- u32 sta_flags = 0;
+ bool wme_sta = false, authorized = false, tdls_auth = false;
struct sk_buff *tmp_skb;
+ bool tdls_direct = false;
if (unlikely(skb->len < ETH_HLEN)) {
ret = NETDEV_TX_OK;
@@ -1728,7 +1710,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 30;
- sta_flags = get_sta_flags(sta);
+ authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ wme_sta = test_sta_flag(sta, WLAN_STA_WME);
}
rcu_read_unlock();
if (sta)
@@ -1816,11 +1799,50 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
break;
#endif
case NL80211_IFTYPE_STATION:
- memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
- if (sdata->u.mgd.use_4addr &&
- cpu_to_be16(ethertype) != sdata->control_port_protocol) {
- fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
+ if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
+ bool tdls_peer = false;
+
+ rcu_read_lock();
+ sta = sta_info_get(sdata, skb->data);
+ if (sta) {
+ authorized = test_sta_flag(sta,
+ WLAN_STA_AUTHORIZED);
+ wme_sta = test_sta_flag(sta, WLAN_STA_WME);
+ tdls_peer = test_sta_flag(sta,
+ WLAN_STA_TDLS_PEER);
+ tdls_auth = test_sta_flag(sta,
+ WLAN_STA_TDLS_PEER_AUTH);
+ }
+ rcu_read_unlock();
+
+ /*
+ * If the TDLS link is enabled, send everything
+ * directly. Otherwise, allow TDLS setup frames
+ * to be transmitted indirectly.
+ */
+ tdls_direct = tdls_peer && (tdls_auth ||
+ !(ethertype == ETH_P_TDLS && skb->len > 14 &&
+ skb->data[14] == WLAN_TDLS_SNAP_RFTYPE));
+ }
+
+ if (tdls_direct) {
+ /* link during setup - throw out frames to peer */
+ if (!tdls_auth) {
+ ret = NETDEV_TX_OK;
+ goto fail;
+ }
+
+ /* DA SA BSSID */
+ memcpy(hdr.addr1, skb->data, ETH_ALEN);
+ memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
+ memcpy(hdr.addr3, sdata->u.mgd.bssid, ETH_ALEN);
+ hdrlen = 24;
+ } else if (sdata->u.mgd.use_4addr &&
+ cpu_to_be16(ethertype) != sdata->control_port_protocol) {
+ fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
+ IEEE80211_FCTL_TODS);
/* RA TA DA SA */
+ memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
@@ -1828,6 +1850,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
} else {
fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
/* BSSID SA DA */
+ memcpy(hdr.addr1, sdata->u.mgd.bssid, ETH_ALEN);
memcpy(hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
hdrlen = 24;
@@ -1853,13 +1876,19 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (!is_multicast_ether_addr(hdr.addr1)) {
rcu_read_lock();
sta = sta_info_get(sdata, hdr.addr1);
- if (sta)
- sta_flags = get_sta_flags(sta);
+ if (sta) {
+ authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
+ wme_sta = test_sta_flag(sta, WLAN_STA_WME);
+ }
rcu_read_unlock();
}
+ /* For mesh, the use of the QoS header is mandatory */
+ if (ieee80211_vif_is_mesh(&sdata->vif))
+ wme_sta = true;
+
/* receiver and we are QoS enabled, use a QoS type frame */
- if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) {
+ if (wme_sta && local->hw.queues >= 4) {
fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
hdrlen += 2;
}
@@ -1868,12 +1897,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
* Drop unicast frames to unauthorised stations unless they are
* EAPOL frames from the local station.
*/
- if (!ieee80211_vif_is_mesh(&sdata->vif) &&
- unlikely(!is_multicast_ether_addr(hdr.addr1) &&
- !(sta_flags & WLAN_STA_AUTHORIZED) &&
- !(cpu_to_be16(ethertype) == sdata->control_port_protocol &&
- compare_ether_addr(sdata->vif.addr,
- skb->data + ETH_ALEN) == 0))) {
+ if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
+ !is_multicast_ether_addr(hdr.addr1) && !authorized &&
+ (cpu_to_be16(ethertype) != sdata->control_port_protocol ||
+ compare_ether_addr(sdata->vif.addr, skb->data + ETH_ALEN)))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit())
printk(KERN_DEBUG "%s: dropped frame to %pM"
@@ -2275,13 +2302,23 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
mgmt->u.beacon.beacon_int =
cpu_to_le16(sdata->vif.bss_conf.beacon_int);
- mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
+ mgmt->u.beacon.capab_info |= cpu_to_le16(
+ sdata->u.mesh.security ? WLAN_CAPABILITY_PRIVACY : 0);
pos = skb_put(skb, 2);
*pos++ = WLAN_EID_SSID;
*pos++ = 0x0;
- mesh_mgmt_ies_add(skb, sdata);
+ if (ieee80211_add_srates_ie(&sdata->vif, skb) ||
+ mesh_add_ds_params_ie(skb, sdata) ||
+ ieee80211_add_ext_srates_ie(&sdata->vif, skb) ||
+ mesh_add_rsn_ie(skb, sdata) ||
+ mesh_add_meshid_ie(skb, sdata) ||
+ mesh_add_meshconf_ie(skb, sdata) ||
+ mesh_add_vendor_ies(skb, sdata)) {
+ pr_err("o11s: couldn't add ies!\n");
+ goto out;
+ }
} else {
WARN_ON(1);
goto out;
@@ -2335,11 +2372,9 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
local = sdata->local;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for "
- "pspoll template\n", sdata->name);
+ if (!skb)
return NULL;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
@@ -2375,11 +2410,9 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
local = sdata->local;
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
- "template\n", sdata->name);
+ if (!skb)
return NULL;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
@@ -2414,11 +2447,8 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
ie_ssid_len + ie_len);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
- "request template\n", sdata->name);
+ if (!skb)
return NULL;
- }
skb_reserve(skb, local->hw.extra_tx_headroom);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index ddeb1b9..51e256c 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -13,13 +13,13 @@
#include <net/mac80211.h>
#include <linux/netdevice.h>
+#include <linux/export.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/bitmap.h>
-#include <linux/crc32.h>
#include <net/net_namespace.h>
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
@@ -368,14 +368,14 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
-int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
- struct sk_buff_head *skbs,
- void (*fn)(void *data), void *data)
+void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
+ struct sk_buff_head *skbs,
+ void (*fn)(void *data), void *data)
{
struct ieee80211_hw *hw = &local->hw;
struct sk_buff *skb;
unsigned long flags;
- int queue, ret = 0, i;
+ int queue, i;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for (i = 0; i < hw->queues; i++)
@@ -390,7 +390,6 @@ int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
continue;
}
- ret++;
queue = skb_get_queue_mapping(skb);
__skb_queue_tail(&local->pending[queue], skb);
}
@@ -402,14 +401,12 @@ int ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
__ieee80211_wake_queue(hw, i,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
-
- return ret;
}
-int ieee80211_add_pending_skbs(struct ieee80211_local *local,
- struct sk_buff_head *skbs)
+void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+ struct sk_buff_head *skbs)
{
- return ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
+ ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
}
void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
@@ -573,172 +570,6 @@ void ieee802_11_parse_elems(u8 *start, size_t len,
ieee802_11_parse_elems_crc(start, len, elems, 0, 0);
}
-u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
- struct ieee802_11_elems *elems,
- u64 filter, u32 crc)
-{
- size_t left = len;
- u8 *pos = start;
- bool calc_crc = filter != 0;
-
- memset(elems, 0, sizeof(*elems));
- elems->ie_start = start;
- elems->total_len = len;
-
- while (left >= 2) {
- u8 id, elen;
-
- id = *pos++;
- elen = *pos++;
- left -= 2;
-
- if (elen > left)
- break;
-
- if (calc_crc && id < 64 && (filter & (1ULL << id)))
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- switch (id) {
- case WLAN_EID_SSID:
- elems->ssid = pos;
- elems->ssid_len = elen;
- break;
- case WLAN_EID_SUPP_RATES:
- elems->supp_rates = pos;
- elems->supp_rates_len = elen;
- break;
- case WLAN_EID_FH_PARAMS:
- elems->fh_params = pos;
- elems->fh_params_len = elen;
- break;
- case WLAN_EID_DS_PARAMS:
- elems->ds_params = pos;
- elems->ds_params_len = elen;
- break;
- case WLAN_EID_CF_PARAMS:
- elems->cf_params = pos;
- elems->cf_params_len = elen;
- break;
- case WLAN_EID_TIM:
- if (elen >= sizeof(struct ieee80211_tim_ie)) {
- elems->tim = (void *)pos;
- elems->tim_len = elen;
- }
- break;
- case WLAN_EID_IBSS_PARAMS:
- elems->ibss_params = pos;
- elems->ibss_params_len = elen;
- break;
- case WLAN_EID_CHALLENGE:
- elems->challenge = pos;
- elems->challenge_len = elen;
- break;
- case WLAN_EID_VENDOR_SPECIFIC:
- if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
- pos[2] == 0xf2) {
- /* Microsoft OUI (00:50:F2) */
-
- if (calc_crc)
- crc = crc32_be(crc, pos - 2, elen + 2);
-
- if (pos[3] == 1) {
- /* OUI Type 1 - WPA IE */
- elems->wpa = pos;
- elems->wpa_len = elen;
- } else if (elen >= 5 && pos[3] == 2) {
- /* OUI Type 2 - WMM IE */
- if (pos[4] == 0) {
- elems->wmm_info = pos;
- elems->wmm_info_len = elen;
- } else if (pos[4] == 1) {
- elems->wmm_param = pos;
- elems->wmm_param_len = elen;
- }
- }
- }
- break;
- case WLAN_EID_RSN:
- elems->rsn = pos;
- elems->rsn_len = elen;
- break;
- case WLAN_EID_ERP_INFO:
- elems->erp_info = pos;
- elems->erp_info_len = elen;
- break;
- case WLAN_EID_EXT_SUPP_RATES:
- elems->ext_supp_rates = pos;
- elems->ext_supp_rates_len = elen;
- break;
- case WLAN_EID_HT_CAPABILITY:
- if (elen >= sizeof(struct ieee80211_ht_cap))
- elems->ht_cap_elem = (void *)pos;
- break;
- case WLAN_EID_HT_INFORMATION:
- if (elen >= sizeof(struct ieee80211_ht_info))
- elems->ht_info_elem = (void *)pos;
- break;
- case WLAN_EID_MESH_ID:
- elems->mesh_id = pos;
- elems->mesh_id_len = elen;
- break;
- case WLAN_EID_MESH_CONFIG:
- if (elen >= sizeof(struct ieee80211_meshconf_ie))
- elems->mesh_config = (void *)pos;
- break;
- case WLAN_EID_PEER_LINK:
- elems->peer_link = pos;
- elems->peer_link_len = elen;
- break;
- case WLAN_EID_PREQ:
- elems->preq = pos;
- elems->preq_len = elen;
- break;
- case WLAN_EID_PREP:
- elems->prep = pos;
- elems->prep_len = elen;
- break;
- case WLAN_EID_PERR:
- elems->perr = pos;
- elems->perr_len = elen;
- break;
- case WLAN_EID_RANN:
- if (elen >= sizeof(struct ieee80211_rann_ie))
- elems->rann = (void *)pos;
- break;
- case WLAN_EID_CHANNEL_SWITCH:
- elems->ch_switch_elem = pos;
- elems->ch_switch_elem_len = elen;
- break;
- case WLAN_EID_QUIET:
- if (!elems->quiet_elem) {
- elems->quiet_elem = pos;
- elems->quiet_elem_len = elen;
- }
- elems->num_of_quiet_elem++;
- break;
- case WLAN_EID_COUNTRY:
- elems->country_elem = pos;
- elems->country_elem_len = elen;
- break;
- case WLAN_EID_PWR_CONSTRAINT:
- elems->pwr_constr_elem = pos;
- elems->pwr_constr_elem_len = elen;
- break;
- case WLAN_EID_TIMEOUT_INTERVAL:
- elems->timeout_int = pos;
- elems->timeout_int_len = elen;
- break;
- default:
- break;
- }
-
- left -= elen;
- pos += elen;
- }
-
- return crc;
-}
-
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
@@ -799,8 +630,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
qparam.uapsd = false;
- local->tx_conf[queue] = qparam;
- drv_conf_tx(local, queue, &qparam);
+ sdata->tx_conf[queue] = qparam;
+ drv_conf_tx(local, sdata, queue, &qparam);
}
/* after reinitialize QoS TX queues setting to default,
@@ -874,11 +705,9 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
sizeof(*mgmt) + 6 + extra_len);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
- "frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
@@ -1031,11 +860,8 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
/* FIXME: come up with a proper value */
buf = kmalloc(200 + ie_len, GFP_KERNEL);
- if (!buf) {
- printk(KERN_DEBUG "%s: failed to allocate temporary IE "
- "buffer\n", sdata->name);
+ if (!buf)
return NULL;
- }
/*
* Do not send DS Channel parameter for directed probe requests
@@ -1071,14 +897,18 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len,
- u32 ratemask, bool directed)
+ u32 ratemask, bool directed, bool no_cck)
{
struct sk_buff *skb;
skb = ieee80211_build_probe_req(sdata, dst, ratemask, ssid, ssid_len,
ie, ie_len, directed);
- if (skb)
+ if (skb) {
+ if (no_cck)
+ IEEE80211_SKB_CB(skb)->flags |=
+ IEEE80211_TX_CTL_NO_CCK_RATE;
ieee80211_tx_skb(sdata, skb);
+ }
}
u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
@@ -1205,14 +1035,22 @@ int ieee80211_reconfig(struct ieee80211_local *local)
struct ieee80211_sub_if_data,
u.ap);
+ memset(&sta->sta.drv_priv, 0, hw->sta_data_size);
WARN_ON(drv_sta_add(local, sdata, &sta->sta));
}
}
mutex_unlock(&local->sta_mtx);
/* reconfigure tx conf */
- for (i = 0; i < hw->queues; i++)
- drv_conf_tx(local, i, &local->tx_conf[i]);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ !ieee80211_sdata_running(sdata))
+ continue;
+
+ for (i = 0; i < hw->queues; i++)
+ drv_conf_tx(local, sdata, i, &sdata->tx_conf[i]);
+ }
/* reconfigure hardware */
ieee80211_hw_config(local, ~0);
@@ -1248,6 +1086,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
changed |= BSS_CHANGED_IBSS;
/* fall through */
case NL80211_IFTYPE_AP:
+ changed |= BSS_CHANGED_SSID;
+ /* fall through */
case NL80211_IFTYPE_MESH_POINT:
changed |= BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED;
@@ -1283,7 +1123,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
list_for_each_entry(sta, &local->sta_list, list) {
ieee80211_sta_tear_down_BA_sessions(sta, true);
- clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
+ clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
}
mutex_unlock(&local->sta_mtx);
@@ -1522,3 +1362,60 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
_ieee80211_enable_rssi_reports(sdata, 0, 0);
}
EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
+
+int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
+ int rate;
+ u8 i, rates, *pos;
+
+ sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ rates = sband->n_bitrates;
+ if (rates > 8)
+ rates = 8;
+
+ if (skb_tailroom(skb) < rates + 2)
+ return -ENOMEM;
+
+ pos = skb_put(skb, rates + 2);
+ *pos++ = WLAN_EID_SUPP_RATES;
+ *pos++ = rates;
+ for (i = 0; i < rates; i++) {
+ rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ }
+
+ return 0;
+}
+
+int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
+ int rate;
+ u8 i, exrates, *pos;
+
+ sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ exrates = sband->n_bitrates;
+ if (exrates > 8)
+ exrates -= 8;
+ else
+ exrates = 0;
+
+ if (skb_tailroom(skb) < exrates + 2)
+ return -ENOMEM;
+
+ if (exrates) {
+ pos = skb_put(skb, exrates + 2);
+ *pos++ = WLAN_EID_EXT_SUPP_RATES;
+ *pos++ = exrates;
+ for (i = 8; i < sband->n_bitrates; i++) {
+ rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ }
+ }
+ return 0;
+}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 7a49532f..fd52e69 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -72,7 +72,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_AP_VLAN:
sta = rcu_dereference(sdata->u.vlan.sta);
if (sta) {
- qos = get_sta_flags(sta) & WLAN_STA_WME;
+ qos = test_sta_flag(sta, WLAN_STA_WME);
break;
}
case NL80211_IFTYPE_AP:
@@ -83,11 +83,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
- /*
- * XXX: This is clearly broken ... but already was before,
- * because ieee80211_fill_mesh_addresses() would clear A1
- * except for multicast addresses.
- */
+ ra = skb->data;
break;
#endif
case NL80211_IFTYPE_STATION:
@@ -103,7 +99,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
if (!sta && ra && !is_multicast_ether_addr(ra)) {
sta = sta_info_get(sdata, ra);
if (sta)
- qos = get_sta_flags(sta) & WLAN_STA_WME;
+ qos = test_sta_flag(sta, WLAN_STA_WME);
}
rcu_read_unlock();
@@ -139,7 +135,8 @@ u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
return ieee802_1d_to_ac[skb->priority];
}
-void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
+void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -150,10 +147,11 @@ void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
- if (unlikely(local->wifi_wme_noack_test))
+ if (unlikely(sdata->local->wifi_wme_noack_test))
ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
- /* qos header is 2 bytes, second reserved */
+ /* qos header is 2 bytes */
*p++ = ack_policy | tid;
- *p = 0;
+ *p = ieee80211_vif_is_mesh(&sdata->vif) ?
+ (IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8) : 0;
}
}
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index faead6d..34e166f 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -17,7 +17,8 @@ extern const int ieee802_1d_to_ac[8];
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
-void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb);
+void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
struct sk_buff *skb);
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 380b9a7..6c53b6d 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -229,11 +229,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
wk->ie_len + /* extra IEs */
9, /* WMM */
GFP_KERNEL);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
- "frame\n", sdata->name);
+ if (!skb)
return;
- }
+
skb_reserve(skb, local->hw.extra_tx_headroom);
capab = WLAN_CAPABILITY_ESS;
@@ -460,7 +458,7 @@ ieee80211_direct_probe(struct ieee80211_work *wk)
*/
ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
wk->probe_auth.ssid_len, NULL, 0,
- (u32) -1, true);
+ (u32) -1, true, false);
wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
run_again(local, wk->timeout);
@@ -579,7 +577,7 @@ ieee80211_offchannel_tx(struct ieee80211_work *wk)
/*
* After this, offchan_tx.frame remains but now is no
* longer a valid pointer -- we still need it as the
- * cookie for canceling this work.
+ * cookie for canceling this work/status matching.
*/
ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame);
@@ -1086,14 +1084,13 @@ static void ieee80211_work_work(struct work_struct *work)
continue;
if (wk->chan != local->tmp_channel)
continue;
- if (ieee80211_work_ct_coexists(wk->chan_type,
- local->tmp_channel_type))
+ if (!ieee80211_work_ct_coexists(wk->chan_type,
+ local->tmp_channel_type))
continue;
remain_off_channel = true;
}
if (!remain_off_channel && local->tmp_channel) {
- bool on_oper_chan = ieee80211_cfg_on_oper_channel(local);
local->tmp_channel = NULL;
/* If tmp_channel wasn't operating channel, then
* we need to go back on-channel.
@@ -1103,7 +1100,7 @@ static void ieee80211_work_work(struct work_struct *work)
* we still need to do a hardware config. Currently,
* we cannot be here while scanning, however.
*/
- if (ieee80211_cfg_on_oper_channel(local) && !on_oper_chan)
+ if (!ieee80211_cfg_on_oper_channel(local))
ieee80211_hw_config(local, 0);
/* At the least, we need to disable offchannel_ps,
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 7bc8702..f614ce7 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -53,7 +53,8 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
}
if (info->control.hw_key &&
- !(tx->flags & IEEE80211_TX_FRAGMENTED) &&
+ (info->flags & IEEE80211_TX_CTL_DONTFRAG ||
+ tx->local->ops->set_frag_threshold) &&
!(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) {
/* hwaccel - with no need for SW-generated MMIC */
return TX_CONTINUE;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 899b71c..afca6c7 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -37,7 +37,7 @@ int nf_register_afinfo(const struct nf_afinfo *afinfo)
err = mutex_lock_interruptible(&afinfo_mutex);
if (err < 0)
return err;
- rcu_assign_pointer(nf_afinfo[afinfo->family], afinfo);
+ RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
mutex_unlock(&afinfo_mutex);
return 0;
}
@@ -46,7 +46,7 @@ EXPORT_SYMBOL_GPL(nf_register_afinfo);
void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
{
mutex_lock(&afinfo_mutex);
- rcu_assign_pointer(nf_afinfo[afinfo->family], NULL);
+ RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
mutex_unlock(&afinfo_mutex);
synchronize_rcu();
}
@@ -180,17 +180,16 @@ next_hook:
if (ret == 0)
ret = -EPERM;
} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
- ret = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
- verdict >> NF_VERDICT_QBITS);
- if (ret < 0) {
- if (ret == -ECANCELED)
+ int err = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
+ verdict >> NF_VERDICT_QBITS);
+ if (err < 0) {
+ if (err == -ECANCELED)
goto next_hook;
- if (ret == -ESRCH &&
+ if (err == -ESRCH &&
(verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
goto next_hook;
kfree_skb(skb);
}
- ret = 0;
}
rcu_read_unlock();
return ret;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index d7e86ef..86137b5 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1699,10 +1699,8 @@ ip_set_init(void)
ip_set_list = kzalloc(sizeof(struct ip_set *) * ip_set_max,
GFP_KERNEL);
- if (!ip_set_list) {
- pr_err("ip_set: Unable to create ip_set_list\n");
+ if (!ip_set_list)
return -ENOMEM;
- }
ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
if (ret != 0) {
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 757143b..052579f 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -17,6 +17,7 @@
#include <net/ipv6.h>
#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/export.h>
/* We must handle non-linear skbs */
static bool
diff --git a/net/netfilter/ipset/pfxlen.c b/net/netfilter/ipset/pfxlen.c
index bd13d66..4f29fa9 100644
--- a/net/netfilter/ipset/pfxlen.c
+++ b/net/netfilter/ipset/pfxlen.c
@@ -1,3 +1,4 @@
+#include <linux/export.h>
#include <linux/netfilter/ipset/pfxlen.h>
/*
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4f77bb1..093cc32 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -188,14 +188,13 @@ ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
}
-static inline int
+static inline void
ip_vs_set_state(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
struct ip_vs_proto_data *pd)
{
- if (unlikely(!pd->pp->state_transition))
- return 0;
- return pd->pp->state_transition(cp, direction, skb, pd);
+ if (likely(pd->pp->state_transition))
+ pd->pp->state_transition(cp, direction, skb, pd);
}
static inline int
@@ -530,7 +529,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
a cache_bypass connection entry */
ipvs = net_ipvs(net);
if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
- int ret, cs;
+ int ret;
struct ip_vs_conn *cp;
unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
iph.protocol == IPPROTO_UDP)?
@@ -557,7 +556,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
ip_vs_in_stats(cp, skb);
/* set state */
- cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
+ ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
/* transmit the first SYN packet */
ret = cp->packet_xmit(skb, cp, pd->pp);
@@ -1490,7 +1489,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
struct ip_vs_protocol *pp;
struct ip_vs_proto_data *pd;
struct ip_vs_conn *cp;
- int ret, restart, pkts;
+ int ret, pkts;
struct netns_ipvs *ipvs;
/* Already marked as IPVS request or reply? */
@@ -1591,7 +1590,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
}
ip_vs_in_stats(cp, skb);
- restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
+ ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
if (cp->packet_xmit)
ret = cp->packet_xmit(skb, cp, pp);
/* do not touch skb anymore */
@@ -1878,10 +1877,9 @@ static int __net_init __ip_vs_init(struct net *net)
struct netns_ipvs *ipvs;
ipvs = net_generic(net, ip_vs_net_id);
- if (ipvs == NULL) {
- pr_err("%s(): no memory.\n", __func__);
+ if (ipvs == NULL)
return -ENOMEM;
- }
+
/* Hold the beast until a service is registerd */
ipvs->enable = 0;
ipvs->net = net;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index e3be48b..008bf97 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -856,15 +856,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
}
dest = kzalloc(sizeof(struct ip_vs_dest), GFP_KERNEL);
- if (dest == NULL) {
- pr_err("%s(): no memory.\n", __func__);
+ if (dest == NULL)
return -ENOMEM;
- }
+
dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
- if (!dest->stats.cpustats) {
- pr_err("%s() alloc_percpu failed\n", __func__);
+ if (!dest->stats.cpustats)
goto err_alloc;
- }
dest->af = svc->af;
dest->protocol = svc->protocol;
@@ -1168,10 +1165,8 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
goto out_err;
}
svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
- if (!svc->stats.cpustats) {
- pr_err("%s() alloc_percpu failed\n", __func__);
+ if (!svc->stats.cpustats)
goto out_err;
- }
/* I'm the first user of the service */
atomic_set(&svc->usecnt, 0);
@@ -3326,10 +3321,8 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
int ret = 0, cmd;
int need_full_svc = 0, need_full_dest = 0;
struct net *net;
- struct netns_ipvs *ipvs;
net = skb_sknet(skb);
- ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
mutex_lock(&__ip_vs_mutex);
@@ -3421,10 +3414,8 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
void *reply;
int ret, cmd, reply_cmd;
struct net *net;
- struct netns_ipvs *ipvs;
net = skb_sknet(skb);
- ipvs = net_ipvs(net);
cmd = info->genlhdr->cmd;
if (cmd == IPVS_CMD_GET_SERVICE)
@@ -3720,10 +3711,9 @@ int __net_init ip_vs_control_net_init(struct net *net)
/* procfs stats */
ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
- if (!ipvs->tot_stats.cpustats) {
- pr_err("%s(): alloc_percpu.\n", __func__);
+ if (!ipvs->tot_stats.cpustats)
return -ENOMEM;
- }
+
spin_lock_init(&ipvs->tot_stats.lock);
proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 95fd0d1..1c269e5 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -150,10 +150,9 @@ static int ip_vs_dh_init_svc(struct ip_vs_service *svc)
/* allocate the DH table for this service */
tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE,
GFP_ATOMIC);
- if (tbl == NULL) {
- pr_err("%s(): no memory\n", __func__);
+ if (tbl == NULL)
return -ENOMEM;
- }
+
svc->sched_data = tbl;
IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) allocated for "
"current service\n",
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 4490a32..538d74e 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -52,8 +52,9 @@
* List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper
* First port is set to the default port.
*/
+static unsigned int ports_count = 1;
static unsigned short ports[IP_VS_APP_MAX_PORTS] = {21, 0};
-module_param_array(ports, ushort, NULL, 0);
+module_param_array(ports, ushort, &ports_count, 0444);
MODULE_PARM_DESC(ports, "Ports to monitor for FTP control commands");
@@ -449,7 +450,7 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
if (ret)
goto err_exit;
- for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
+ for (i = 0; i < ports_count; i++) {
if (!ports[i])
continue;
ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 87e40ea..0f16283 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -202,10 +202,8 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
en = ip_vs_lblc_get(dest->af, tbl, daddr);
if (!en) {
en = kmalloc(sizeof(*en), GFP_ATOMIC);
- if (!en) {
- pr_err("%s(): no memory\n", __func__);
+ if (!en)
return NULL;
- }
en->af = dest->af;
ip_vs_addr_copy(dest->af, &en->addr, daddr);
@@ -345,10 +343,9 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
* Allocate the ip_vs_lblc_table for this service
*/
tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
- if (tbl == NULL) {
- pr_err("%s(): no memory\n", __func__);
+ if (tbl == NULL)
return -ENOMEM;
- }
+
svc->sched_data = tbl;
IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for "
"current service\n", sizeof(*tbl));
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 90f618a..eec797f 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -112,10 +112,8 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
}
e = kmalloc(sizeof(*e), GFP_ATOMIC);
- if (e == NULL) {
- pr_err("%s(): no memory\n", __func__);
+ if (e == NULL)
return NULL;
- }
atomic_inc(&dest->refcnt);
e->dest = dest;
@@ -373,10 +371,8 @@ ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
en = ip_vs_lblcr_get(dest->af, tbl, daddr);
if (!en) {
en = kmalloc(sizeof(*en), GFP_ATOMIC);
- if (!en) {
- pr_err("%s(): no memory\n", __func__);
+ if (!en)
return NULL;
- }
en->af = dest->af;
ip_vs_addr_copy(dest->af, &en->addr, daddr);
@@ -516,10 +512,9 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
* Allocate the ip_vs_lblcr_table for this service
*/
tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
- if (tbl == NULL) {
- pr_err("%s(): no memory\n", __func__);
+ if (tbl == NULL)
return -ENOMEM;
- }
+
svc->sched_data = tbl;
IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
"current service\n", sizeof(*tbl));
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index f454c80..022e77e 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -127,7 +127,7 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
nf_conntrack_alter_reply(ct, &new_tuple);
}
-int ip_vs_confirm_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp)
+int ip_vs_confirm_conntrack(struct sk_buff *skb)
{
return nf_conntrack_confirm(skb);
}
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 52d073c..8531293 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -74,10 +74,9 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
struct ip_vs_proto_data *pd =
kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC);
- if (!pd) {
- pr_err("%s(): no memory.\n", __func__);
+ if (!pd)
return -ENOMEM;
- }
+
pd->pp = pp; /* For speed issues */
pd->next = ipvs->proto_data_table[hash];
ipvs->proto_data_table[hash] = pd;
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index d12ed53..1fbf7a2 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -906,7 +906,7 @@ static const char *sctp_state_name(int state)
return "?";
}
-static inline int
+static inline void
set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
int direction, const struct sk_buff *skb)
{
@@ -924,7 +924,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t),
sizeof(_sctpch), &_sctpch);
if (sch == NULL)
- return 0;
+ return;
chunk_type = sch->type;
/*
@@ -993,21 +993,15 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
cp->timeout = pd->timeout_table[cp->state = next_state];
else /* What to do ? */
cp->timeout = sctp_timeouts[cp->state = next_state];
-
- return 1;
}
-static int
+static void
sctp_state_transition(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb, struct ip_vs_proto_data *pd)
{
- int ret = 0;
-
spin_lock(&cp->lock);
- ret = set_sctp_state(pd, cp, direction, skb);
+ set_sctp_state(pd, cp, direction, skb);
spin_unlock(&cp->lock);
-
- return ret;
}
static inline __u16 sctp_app_hashkey(__be16 port)
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index c0cc341..ef8641f 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -546,7 +546,7 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
/*
* Handle state transitions
*/
-static int
+static void
tcp_state_transition(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
struct ip_vs_proto_data *pd)
@@ -561,13 +561,11 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction,
th = skb_header_pointer(skb, ihl, sizeof(_tcph), &_tcph);
if (th == NULL)
- return 0;
+ return;
spin_lock(&cp->lock);
set_tcp_state(pd, cp, direction, th);
spin_unlock(&cp->lock);
-
- return 1;
}
static inline __u16 tcp_app_hashkey(__be16 port)
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index f1282cb..f4b7262 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -454,18 +454,17 @@ static const char * udp_state_name(int state)
return udp_state_name_table[state] ? udp_state_name_table[state] : "?";
}
-static int
+static void
udp_state_transition(struct ip_vs_conn *cp, int direction,
const struct sk_buff *skb,
struct ip_vs_proto_data *pd)
{
if (unlikely(!pd)) {
pr_err("UDP no ns data\n");
- return 0;
+ return;
}
cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
- return 1;
}
static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index b5e2556..33815f4 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -147,10 +147,9 @@ static int ip_vs_sh_init_svc(struct ip_vs_service *svc)
/* allocate the SH table for this service */
tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE,
GFP_ATOMIC);
- if (tbl == NULL) {
- pr_err("%s(): no memory\n", __func__);
+ if (tbl == NULL)
return -ENOMEM;
- }
+
svc->sched_data = tbl;
IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) allocated for "
"current service\n",
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 1ef41f5..fd0d4e0 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -85,10 +85,9 @@ static int ip_vs_wrr_init_svc(struct ip_vs_service *svc)
* Allocate the mark variable for WRR scheduling
*/
mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_ATOMIC);
- if (mark == NULL) {
- pr_err("%s(): no memory\n", __func__);
+ if (mark == NULL)
return -ENOMEM;
- }
+
mark->cl = &svc->destinations;
mark->cw = 0;
mark->mw = ip_vs_wrr_max_weight(svc);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index ee319a4..aa2d720 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -339,7 +339,7 @@ ip_vs_dst_reset(struct ip_vs_dest *dest)
\
(skb)->ipvs_property = 1; \
if (unlikely((cp)->flags & IP_VS_CONN_F_NFCT)) \
- __ret = ip_vs_confirm_conntrack(skb, cp); \
+ __ret = ip_vs_confirm_conntrack(skb); \
if (__ret == NF_ACCEPT) { \
nf_reset(skb); \
skb_forward_csum(skb); \
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index 5178c69..369df3f 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
+#include <linux/export.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_extend.h>
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index f7af8b8..7202b06 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -661,7 +661,6 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
*/
ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
if (ct == NULL) {
- pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
atomic_dec(&net->ct.count);
return ERR_PTR(-ENOMEM);
}
@@ -749,10 +748,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
hash);
- if (IS_ERR(ct)) {
- pr_debug("Can't allocate conntrack.\n");
+ if (IS_ERR(ct))
return (struct nf_conntrack_tuple_hash *)ct;
- }
if (!l4proto->new(ct, skb, dataoff)) {
nf_conntrack_free(ct);
@@ -779,7 +776,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
if (exp->helper) {
help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (help)
- rcu_assign_pointer(help->helper, exp->helper);
+ RCU_INIT_POINTER(help->helper, exp->helper);
}
#ifdef CONFIG_NF_CONNTRACK_MARK
@@ -1317,7 +1314,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
void nf_conntrack_cleanup(struct net *net)
{
if (net_eq(net, &init_net))
- rcu_assign_pointer(ip_ct_attach, NULL);
+ RCU_INIT_POINTER(ip_ct_attach, NULL);
/* This makes sure all current packets have passed through
netfilter framework. Roll on, two-stage module
@@ -1327,7 +1324,7 @@ void nf_conntrack_cleanup(struct net *net)
nf_conntrack_cleanup_net(net);
if (net_eq(net, &init_net)) {
- rcu_assign_pointer(nf_ct_destroy, NULL);
+ RCU_INIT_POINTER(nf_ct_destroy, NULL);
nf_conntrack_cleanup_init_net();
}
}
@@ -1576,11 +1573,11 @@ int nf_conntrack_init(struct net *net)
if (net_eq(net, &init_net)) {
/* For use by REJECT target */
- rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
- rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
+ RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
+ RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
/* Howto get NAT offsets */
- rcu_assign_pointer(nf_ct_nat_offset, NULL);
+ RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
}
return 0;
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 63a1b91..6b368be 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
@@ -94,7 +95,7 @@ int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
ret = -EBUSY;
goto out_unlock;
}
- rcu_assign_pointer(nf_conntrack_event_cb, new);
+ RCU_INIT_POINTER(nf_conntrack_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
return ret;
@@ -112,7 +113,7 @@ void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
notify = rcu_dereference_protected(nf_conntrack_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
- rcu_assign_pointer(nf_conntrack_event_cb, NULL);
+ RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
}
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
@@ -129,7 +130,7 @@ int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
ret = -EBUSY;
goto out_unlock;
}
- rcu_assign_pointer(nf_expect_event_cb, new);
+ RCU_INIT_POINTER(nf_expect_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
return ret;
@@ -147,7 +148,7 @@ void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
notify = rcu_dereference_protected(nf_expect_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
- rcu_assign_pointer(nf_expect_event_cb, NULL);
+ RCU_INIT_POINTER(nf_expect_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
}
EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index cd1e8e0..340c80d 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -20,6 +20,8 @@
#include <linux/percpu.h>
#include <linux/kernel.h>
#include <linux/jhash.h>
+#include <linux/moduleparam.h>
+#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/netfilter/nf_conntrack.h>
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 05ecdc2..4605c94 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -169,7 +169,7 @@ int nf_ct_extend_register(struct nf_ct_ext_type *type)
before updating alloc_size */
type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
+ type->len;
- rcu_assign_pointer(nf_ct_ext_types[type->id], type);
+ RCU_INIT_POINTER(nf_ct_ext_types[type->id], type);
update_alloc_size(type);
out:
mutex_unlock(&nf_ct_ext_type_mutex);
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(nf_ct_extend_register);
void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
{
mutex_lock(&nf_ct_ext_type_mutex);
- rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
+ RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
update_alloc_size(type);
mutex_unlock(&nf_ct_ext_type_mutex);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 1bdfea3..93c4bdb 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -131,7 +131,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
helper = __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
if (helper == NULL) {
if (help)
- rcu_assign_pointer(help->helper, NULL);
+ RCU_INIT_POINTER(help->helper, NULL);
goto out;
}
@@ -145,7 +145,7 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
memset(&help->help, 0, sizeof(help->help));
}
- rcu_assign_pointer(help->helper, helper);
+ RCU_INIT_POINTER(help->helper, helper);
out:
return ret;
}
@@ -162,7 +162,7 @@ static inline int unhelp(struct nf_conntrack_tuple_hash *i,
lockdep_is_held(&nf_conntrack_lock)
) == me) {
nf_conntrack_event(IPCT_HELPER, ct);
- rcu_assign_pointer(help->helper, NULL);
+ RCU_INIT_POINTER(help->helper, NULL);
}
return 0;
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 7dec88a..e58aa9b 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1125,7 +1125,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
if (help && help->helper) {
/* we had a helper before ... */
nf_ct_remove_expectations(ct);
- rcu_assign_pointer(help->helper, NULL);
+ RCU_INIT_POINTER(help->helper, NULL);
}
return 0;
@@ -1163,7 +1163,7 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
return -EOPNOTSUPP;
}
- rcu_assign_pointer(help->helper, helper);
+ RCU_INIT_POINTER(help->helper, helper);
return 0;
}
@@ -1386,7 +1386,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
}
/* not in hash table yet so not strictly necessary */
- rcu_assign_pointer(help->helper, helper);
+ RCU_INIT_POINTER(help->helper, helper);
}
} else {
/* try an implicit helper assignation */
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 20714ed..ce0c406 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -55,7 +55,7 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
llog = rcu_dereference_protected(nf_loggers[pf],
lockdep_is_held(&nf_log_mutex));
if (llog == NULL)
- rcu_assign_pointer(nf_loggers[pf], logger);
+ RCU_INIT_POINTER(nf_loggers[pf], logger);
}
mutex_unlock(&nf_log_mutex);
@@ -74,7 +74,7 @@ void nf_log_unregister(struct nf_logger *logger)
c_logger = rcu_dereference_protected(nf_loggers[i],
lockdep_is_held(&nf_log_mutex));
if (c_logger == logger)
- rcu_assign_pointer(nf_loggers[i], NULL);
+ RCU_INIT_POINTER(nf_loggers[i], NULL);
list_del(&logger->list[i]);
}
mutex_unlock(&nf_log_mutex);
@@ -92,7 +92,7 @@ int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
mutex_unlock(&nf_log_mutex);
return -ENOENT;
}
- rcu_assign_pointer(nf_loggers[pf], logger);
+ RCU_INIT_POINTER(nf_loggers[pf], logger);
mutex_unlock(&nf_log_mutex);
return 0;
}
@@ -103,7 +103,7 @@ void nf_log_unbind_pf(u_int8_t pf)
if (pf >= ARRAY_SIZE(nf_loggers))
return;
mutex_lock(&nf_log_mutex);
- rcu_assign_pointer(nf_loggers[pf], NULL);
+ RCU_INIT_POINTER(nf_loggers[pf], NULL);
mutex_unlock(&nf_log_mutex);
}
EXPORT_SYMBOL(nf_log_unbind_pf);
@@ -250,7 +250,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
mutex_unlock(&nf_log_mutex);
return -ENOENT;
}
- rcu_assign_pointer(nf_loggers[tindex], logger);
+ RCU_INIT_POINTER(nf_loggers[tindex], logger);
mutex_unlock(&nf_log_mutex);
} else {
mutex_lock(&nf_log_mutex);
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 84d0fd4..99ffd28 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -40,7 +40,7 @@ int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
else if (old)
ret = -EBUSY;
else {
- rcu_assign_pointer(queue_handler[pf], qh);
+ RCU_INIT_POINTER(queue_handler[pf], qh);
ret = 0;
}
mutex_unlock(&queue_handler_mutex);
@@ -65,7 +65,7 @@ int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
return -EINVAL;
}
- rcu_assign_pointer(queue_handler[pf], NULL);
+ RCU_INIT_POINTER(queue_handler[pf], NULL);
mutex_unlock(&queue_handler_mutex);
synchronize_rcu();
@@ -84,7 +84,7 @@ void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
queue_handler[pf],
lockdep_is_held(&queue_handler_mutex)
) == qh)
- rcu_assign_pointer(queue_handler[pf], NULL);
+ RCU_INIT_POINTER(queue_handler[pf], NULL);
}
mutex_unlock(&queue_handler_mutex);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 1905976..c879c1a 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -59,7 +59,7 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
nfnl_unlock();
return -EBUSY;
}
- rcu_assign_pointer(subsys_table[n->subsys_id], n);
+ RCU_INIT_POINTER(subsys_table[n->subsys_id], n);
nfnl_unlock();
return 0;
@@ -210,7 +210,7 @@ static int __net_init nfnetlink_net_init(struct net *net)
if (!nfnl)
return -ENOMEM;
net->nfnl_stash = nfnl;
- rcu_assign_pointer(net->nfnl, nfnl);
+ RCU_INIT_POINTER(net->nfnl, nfnl);
return 0;
}
@@ -219,7 +219,7 @@ static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list)
- rcu_assign_pointer(net->nfnl, NULL);
+ RCU_INIT_POINTER(net->nfnl, NULL);
synchronize_net();
list_for_each_entry(net, net_exit_list, exit_list)
netlink_kernel_release(net->nfnl_stash);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 2d8158a..66b2c54 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -307,17 +307,14 @@ nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size)
n = max(inst_size, pkt_size);
skb = alloc_skb(n, GFP_ATOMIC);
if (!skb) {
- pr_notice("nfnetlink_log: can't alloc whole buffer (%u bytes)\n",
- inst_size);
-
if (n > pkt_size) {
/* try to allocate only as much as we need for current
* packet */
skb = alloc_skb(pkt_size, GFP_ATOMIC);
if (!skb)
- pr_err("nfnetlink_log: can't even alloc %u "
- "bytes\n", pkt_size);
+ pr_err("nfnetlink_log: can't even alloc %u bytes\n",
+ pkt_size);
}
}
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 71441b9..8d987c3 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -14,6 +14,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/proc_fs.h>
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 3bdd443..f407ebc1 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -122,14 +122,12 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
if (!info->timer) {
- pr_debug("couldn't alloc timer\n");
ret = -ENOMEM;
goto out;
}
info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
if (!info->timer->attr.attr.name) {
- pr_debug("couldn't alloc attribute name\n");
ret = -ENOMEM;
goto out_free_timer;
}
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 9228ee0d..dfd52ba 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -176,10 +176,7 @@ dsthash_alloc_init(struct xt_hashlimit_htable *ht,
ent = NULL;
} else
ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
- if (!ent) {
- if (net_ratelimit())
- pr_err("cannot allocate dsthash_ent\n");
- } else {
+ if (ent) {
memcpy(&ent->dst, dst, sizeof(ent->dst));
spin_lock_init(&ent->lock);
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index 70eb2b4..44c8eb4 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -9,6 +9,7 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_quota.h>
+#include <linux/module.h>
struct xt_quota_priv {
spinlock_t lock;
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
index 42ecb71..4fe4fb4 100644
--- a/net/netfilter/xt_statistic.c
+++ b/net/netfilter/xt_statistic.c
@@ -16,6 +16,7 @@
#include <linux/netfilter/xt_statistic.h>
#include <linux/netfilter/x_tables.h>
+#include <linux/module.h>
struct xt_statistic_priv {
atomic_t count;
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 7d8083c..3f905e5 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -282,7 +282,7 @@ int __init netlbl_domhsh_init(u32 size)
INIT_LIST_HEAD(&hsh_tbl->tbl[iter]);
spin_lock(&netlbl_domhsh_lock);
- rcu_assign_pointer(netlbl_domhsh, hsh_tbl);
+ RCU_INIT_POINTER(netlbl_domhsh, hsh_tbl);
spin_unlock(&netlbl_domhsh_lock);
return 0;
@@ -330,7 +330,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
&rcu_dereference(netlbl_domhsh)->tbl[bkt]);
} else {
INIT_LIST_HEAD(&entry->list);
- rcu_assign_pointer(netlbl_domhsh_def, entry);
+ RCU_INIT_POINTER(netlbl_domhsh_def, entry);
}
if (entry->type == NETLBL_NLTYPE_ADDRSELECT) {
@@ -451,7 +451,7 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
if (entry != rcu_dereference(netlbl_domhsh_def))
list_del_rcu(&entry->list);
else
- rcu_assign_pointer(netlbl_domhsh_def, NULL);
+ RCU_INIT_POINTER(netlbl_domhsh_def, NULL);
} else
ret_val = -ENOENT;
spin_unlock(&netlbl_domhsh_lock);
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index e6e8236..e251c2c 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -354,7 +354,7 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_add_iface(int ifindex)
INIT_LIST_HEAD(&iface->list);
if (netlbl_unlhsh_rcu_deref(netlbl_unlhsh_def) != NULL)
goto add_iface_failure;
- rcu_assign_pointer(netlbl_unlhsh_def, iface);
+ RCU_INIT_POINTER(netlbl_unlhsh_def, iface);
}
spin_unlock(&netlbl_unlhsh_lock);
@@ -621,7 +621,7 @@ static void netlbl_unlhsh_condremove_iface(struct netlbl_unlhsh_iface *iface)
if (iface->ifindex > 0)
list_del_rcu(&iface->list);
else
- rcu_assign_pointer(netlbl_unlhsh_def, NULL);
+ RCU_INIT_POINTER(netlbl_unlhsh_def, NULL);
spin_unlock(&netlbl_unlhsh_lock);
call_rcu(&iface->rcu, netlbl_unlhsh_free_iface);
@@ -1449,7 +1449,7 @@ int __init netlbl_unlabel_init(u32 size)
rcu_read_lock();
spin_lock(&netlbl_unlhsh_lock);
- rcu_assign_pointer(netlbl_unlhsh, hsh_tbl);
+ RCU_INIT_POINTER(netlbl_unlhsh, hsh_tbl);
spin_unlock(&netlbl_unlhsh_lock);
rcu_read_unlock();
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 0a4db02..1201b6d 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1324,10 +1324,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
if (msg->msg_flags&MSG_OOB)
return -EOPNOTSUPP;
- if (NULL == siocb->scm) {
+ if (NULL == siocb->scm)
siocb->scm = &scm;
- memset(&scm, 0, sizeof(scm));
- }
+
err = scm_send(sock, msg, siocb->scm);
if (err < 0)
return err;
@@ -1578,7 +1577,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
if (!new)
return -ENOMEM;
- old = rcu_dereference_raw(tbl->listeners);
+ old = rcu_dereference_protected(tbl->listeners, 1);
memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
rcu_assign_pointer(tbl->listeners, new);
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index cd5ddb2..915a87b 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -37,6 +37,7 @@
#include <linux/spinlock.h>
#include <net/netrom.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
static unsigned int nr_neigh_no = 1;
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 33e095b1..58cddad 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -13,4 +13,6 @@ menuconfig NFC
To compile this support as a module, choose M here: the module will
be called nfc.
+source "net/nfc/nci/Kconfig"
+
source "drivers/nfc/Kconfig"
diff --git a/net/nfc/Makefile b/net/nfc/Makefile
index 16250c3..fbb550f 100644
--- a/net/nfc/Makefile
+++ b/net/nfc/Makefile
@@ -3,5 +3,6 @@
#
obj-$(CONFIG_NFC) += nfc.o
+obj-$(CONFIG_NFC_NCI) += nci/
nfc-objs := core.o netlink.o af_nfc.o rawsock.o
diff --git a/net/nfc/af_nfc.c b/net/nfc/af_nfc.c
index e982cef..da67756 100644
--- a/net/nfc/af_nfc.c
+++ b/net/nfc/af_nfc.c
@@ -22,6 +22,7 @@
*/
#include <linux/nfc.h>
+#include <linux/module.h>
#include "nfc.h"
diff --git a/net/nfc/core.c b/net/nfc/core.c
index b6fd4e1..47e02c1 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -53,6 +53,80 @@ int nfc_printk(const char *level, const char *format, ...)
EXPORT_SYMBOL(nfc_printk);
/**
+ * nfc_dev_up - turn on the NFC device
+ *
+ * @dev: The nfc device to be turned on
+ *
+ * The device remains up until the nfc_dev_down function is called.
+ */
+int nfc_dev_up(struct nfc_dev *dev)
+{
+ int rc = 0;
+
+ nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+
+ device_lock(&dev->dev);
+
+ if (!device_is_registered(&dev->dev)) {
+ rc = -ENODEV;
+ goto error;
+ }
+
+ if (dev->dev_up) {
+ rc = -EALREADY;
+ goto error;
+ }
+
+ if (dev->ops->dev_up)
+ rc = dev->ops->dev_up(dev);
+
+ if (!rc)
+ dev->dev_up = true;
+
+error:
+ device_unlock(&dev->dev);
+ return rc;
+}
+
+/**
+ * nfc_dev_down - turn off the NFC device
+ *
+ * @dev: The nfc device to be turned off
+ */
+int nfc_dev_down(struct nfc_dev *dev)
+{
+ int rc = 0;
+
+ nfc_dbg("dev_name=%s", dev_name(&dev->dev));
+
+ device_lock(&dev->dev);
+
+ if (!device_is_registered(&dev->dev)) {
+ rc = -ENODEV;
+ goto error;
+ }
+
+ if (!dev->dev_up) {
+ rc = -EALREADY;
+ goto error;
+ }
+
+ if (dev->polling || dev->remote_activated) {
+ rc = -EBUSY;
+ goto error;
+ }
+
+ if (dev->ops->dev_down)
+ dev->ops->dev_down(dev);
+
+ dev->dev_up = false;
+
+error:
+ device_unlock(&dev->dev);
+ return rc;
+}
+
+/**
* nfc_start_poll - start polling for nfc targets
*
* @dev: The nfc device that must start polling
@@ -144,6 +218,8 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
}
rc = dev->ops->activate_target(dev, target_idx, protocol);
+ if (!rc)
+ dev->remote_activated = true;
error:
device_unlock(&dev->dev);
@@ -170,6 +246,7 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
}
dev->ops->deactivate_target(dev, target_idx);
+ dev->remote_activated = false;
error:
device_unlock(&dev->dev);
@@ -322,7 +399,9 @@ struct nfc_dev *nfc_get_device(unsigned idx)
* @supported_protocols: NFC protocols supported by the device
*/
struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
- u32 supported_protocols)
+ u32 supported_protocols,
+ int tx_headroom,
+ int tx_tailroom)
{
static atomic_t dev_no = ATOMIC_INIT(0);
struct nfc_dev *dev;
@@ -345,6 +424,8 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
dev->ops = ops;
dev->supported_protocols = supported_protocols;
+ dev->tx_headroom = tx_headroom;
+ dev->tx_tailroom = tx_tailroom;
spin_lock_init(&dev->targets_lock);
nfc_genl_data_init(&dev->genl_data);
diff --git a/net/nfc/nci/Kconfig b/net/nfc/nci/Kconfig
new file mode 100644
index 0000000..decdc49
--- /dev/null
+++ b/net/nfc/nci/Kconfig
@@ -0,0 +1,10 @@
+config NFC_NCI
+ depends on NFC && EXPERIMENTAL
+ tristate "NCI protocol support (EXPERIMENTAL)"
+ default n
+ help
+ NCI (NFC Controller Interface) is a communication protocol between
+ an NFC Controller (NFCC) and a Device Host (DH).
+
+ Say Y here to compile NCI support into the kernel or say M to
+ compile it as module (nci).
diff --git a/net/nfc/nci/Makefile b/net/nfc/nci/Makefile
new file mode 100644
index 0000000..cdb3a2e
--- /dev/null
+++ b/net/nfc/nci/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux NFC NCI layer.
+#
+
+obj-$(CONFIG_NFC_NCI) += nci.o
+
+nci-objs := core.o data.o lib.o ntf.o rsp.o \ No newline at end of file
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
new file mode 100644
index 0000000..3925c657
--- /dev/null
+++ b/net/nfc/nci/core.c
@@ -0,0 +1,798 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on hci_core.c, which was written
+ * by Maxim Krasnyansky.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/skbuff.h>
+
+#include "../nfc.h"
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/nfc.h>
+
+static void nci_cmd_work(struct work_struct *work);
+static void nci_rx_work(struct work_struct *work);
+static void nci_tx_work(struct work_struct *work);
+
+/* ---- NCI requests ---- */
+
+void nci_req_complete(struct nci_dev *ndev, int result)
+{
+ if (ndev->req_status == NCI_REQ_PEND) {
+ ndev->req_result = result;
+ ndev->req_status = NCI_REQ_DONE;
+ complete(&ndev->req_completion);
+ }
+}
+
+static void nci_req_cancel(struct nci_dev *ndev, int err)
+{
+ if (ndev->req_status == NCI_REQ_PEND) {
+ ndev->req_result = err;
+ ndev->req_status = NCI_REQ_CANCELED;
+ complete(&ndev->req_completion);
+ }
+}
+
+/* Execute request and wait for completion. */
+static int __nci_request(struct nci_dev *ndev,
+ void (*req)(struct nci_dev *ndev, unsigned long opt),
+ unsigned long opt,
+ __u32 timeout)
+{
+ int rc = 0;
+ unsigned long completion_rc;
+
+ ndev->req_status = NCI_REQ_PEND;
+
+ init_completion(&ndev->req_completion);
+ req(ndev, opt);
+ completion_rc = wait_for_completion_interruptible_timeout(
+ &ndev->req_completion,
+ timeout);
+
+ nfc_dbg("wait_for_completion return %ld", completion_rc);
+
+ if (completion_rc > 0) {
+ switch (ndev->req_status) {
+ case NCI_REQ_DONE:
+ rc = nci_to_errno(ndev->req_result);
+ break;
+
+ case NCI_REQ_CANCELED:
+ rc = -ndev->req_result;
+ break;
+
+ default:
+ rc = -ETIMEDOUT;
+ break;
+ }
+ } else {
+ nfc_err("wait_for_completion_interruptible_timeout failed %ld",
+ completion_rc);
+
+ rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
+ }
+
+ ndev->req_status = ndev->req_result = 0;
+
+ return rc;
+}
+
+static inline int nci_request(struct nci_dev *ndev,
+ void (*req)(struct nci_dev *ndev, unsigned long opt),
+ unsigned long opt, __u32 timeout)
+{
+ int rc;
+
+ if (!test_bit(NCI_UP, &ndev->flags))
+ return -ENETDOWN;
+
+ /* Serialize all requests */
+ mutex_lock(&ndev->req_lock);
+ rc = __nci_request(ndev, req, opt, timeout);
+ mutex_unlock(&ndev->req_lock);
+
+ return rc;
+}
+
+static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
+{
+ nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL);
+}
+
+static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
+{
+ nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
+}
+
+static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_core_conn_create_cmd conn_cmd;
+ struct nci_rf_disc_map_cmd cmd;
+ struct disc_map_config *cfg = cmd.mapping_configs;
+ __u8 *num = &cmd.num_mapping_configs;
+ int i;
+
+ /* create static rf connection */
+ conn_cmd.target_handle = 0;
+ conn_cmd.num_target_specific_params = 0;
+ nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd);
+
+ /* set rf mapping configurations */
+ *num = 0;
+
+ /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
+ for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
+ if (ndev->supported_rf_interfaces[i] ==
+ NCI_RF_INTERFACE_ISO_DEP) {
+ cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
+ cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
+ cfg[*num].rf_interface_type = NCI_RF_INTERFACE_ISO_DEP;
+ (*num)++;
+ } else if (ndev->supported_rf_interfaces[i] ==
+ NCI_RF_INTERFACE_NFC_DEP) {
+ cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
+ cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
+ cfg[*num].rf_interface_type = NCI_RF_INTERFACE_NFC_DEP;
+ (*num)++;
+ }
+
+ if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
+ break;
+ }
+
+ nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
+ (1 + ((*num)*sizeof(struct disc_map_config))),
+ &cmd);
+}
+
+static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_rf_disc_cmd cmd;
+ __u32 protocols = opt;
+
+ cmd.num_disc_configs = 0;
+
+ if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
+ (protocols & NFC_PROTO_JEWEL_MASK
+ || protocols & NFC_PROTO_MIFARE_MASK
+ || protocols & NFC_PROTO_ISO14443_MASK
+ || protocols & NFC_PROTO_NFC_DEP_MASK)) {
+ cmd.disc_configs[cmd.num_disc_configs].type =
+ NCI_DISCOVERY_TYPE_POLL_A_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ }
+
+ if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
+ (protocols & NFC_PROTO_ISO14443_MASK)) {
+ cmd.disc_configs[cmd.num_disc_configs].type =
+ NCI_DISCOVERY_TYPE_POLL_B_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ }
+
+ if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
+ (protocols & NFC_PROTO_FELICA_MASK
+ || protocols & NFC_PROTO_NFC_DEP_MASK)) {
+ cmd.disc_configs[cmd.num_disc_configs].type =
+ NCI_DISCOVERY_TYPE_POLL_F_PASSIVE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ }
+
+ nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
+ (1 + (cmd.num_disc_configs*sizeof(struct disc_config))),
+ &cmd);
+}
+
+static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
+{
+ struct nci_rf_deactivate_cmd cmd;
+
+ cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
+
+ nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
+ sizeof(struct nci_rf_deactivate_cmd),
+ &cmd);
+}
+
+static int nci_open_device(struct nci_dev *ndev)
+{
+ int rc = 0;
+
+ mutex_lock(&ndev->req_lock);
+
+ if (test_bit(NCI_UP, &ndev->flags)) {
+ rc = -EALREADY;
+ goto done;
+ }
+
+ if (ndev->ops->open(ndev)) {
+ rc = -EIO;
+ goto done;
+ }
+
+ atomic_set(&ndev->cmd_cnt, 1);
+
+ set_bit(NCI_INIT, &ndev->flags);
+
+ rc = __nci_request(ndev, nci_reset_req, 0,
+ msecs_to_jiffies(NCI_RESET_TIMEOUT));
+
+ if (!rc) {
+ rc = __nci_request(ndev, nci_init_req, 0,
+ msecs_to_jiffies(NCI_INIT_TIMEOUT));
+ }
+
+ if (!rc) {
+ rc = __nci_request(ndev, nci_init_complete_req, 0,
+ msecs_to_jiffies(NCI_INIT_TIMEOUT));
+ }
+
+ clear_bit(NCI_INIT, &ndev->flags);
+
+ if (!rc) {
+ set_bit(NCI_UP, &ndev->flags);
+ } else {
+ /* Init failed, cleanup */
+ skb_queue_purge(&ndev->cmd_q);
+ skb_queue_purge(&ndev->rx_q);
+ skb_queue_purge(&ndev->tx_q);
+
+ ndev->ops->close(ndev);
+ ndev->flags = 0;
+ }
+
+done:
+ mutex_unlock(&ndev->req_lock);
+ return rc;
+}
+
+static int nci_close_device(struct nci_dev *ndev)
+{
+ nci_req_cancel(ndev, ENODEV);
+ mutex_lock(&ndev->req_lock);
+
+ if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
+ del_timer_sync(&ndev->cmd_timer);
+ mutex_unlock(&ndev->req_lock);
+ return 0;
+ }
+
+ /* Drop RX and TX queues */
+ skb_queue_purge(&ndev->rx_q);
+ skb_queue_purge(&ndev->tx_q);
+
+ /* Flush RX and TX wq */
+ flush_workqueue(ndev->rx_wq);
+ flush_workqueue(ndev->tx_wq);
+
+ /* Reset device */
+ skb_queue_purge(&ndev->cmd_q);
+ atomic_set(&ndev->cmd_cnt, 1);
+
+ set_bit(NCI_INIT, &ndev->flags);
+ __nci_request(ndev, nci_reset_req, 0,
+ msecs_to_jiffies(NCI_RESET_TIMEOUT));
+ clear_bit(NCI_INIT, &ndev->flags);
+
+ /* Flush cmd wq */
+ flush_workqueue(ndev->cmd_wq);
+
+ /* After this point our queues are empty
+ * and no works are scheduled. */
+ ndev->ops->close(ndev);
+
+ /* Clear flags */
+ ndev->flags = 0;
+
+ mutex_unlock(&ndev->req_lock);
+
+ return 0;
+}
+
+/* NCI command timer function */
+static void nci_cmd_timer(unsigned long arg)
+{
+ struct nci_dev *ndev = (void *) arg;
+
+ nfc_dbg("entry");
+
+ atomic_set(&ndev->cmd_cnt, 1);
+ queue_work(ndev->cmd_wq, &ndev->cmd_work);
+}
+
+static int nci_dev_up(struct nfc_dev *nfc_dev)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry");
+
+ return nci_open_device(ndev);
+}
+
+static int nci_dev_down(struct nfc_dev *nfc_dev)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry");
+
+ return nci_close_device(ndev);
+}
+
+static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+ int rc;
+
+ nfc_dbg("entry");
+
+ if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
+ nfc_err("unable to start poll, since poll is already active");
+ return -EBUSY;
+ }
+
+ if (ndev->target_active_prot) {
+ nfc_err("there is an active target");
+ return -EBUSY;
+ }
+
+ if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
+ nfc_dbg("target is active, implicitly deactivate...");
+
+ rc = nci_request(ndev, nci_rf_deactivate_req, 0,
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+ if (rc)
+ return -EBUSY;
+ }
+
+ rc = nci_request(ndev, nci_rf_discover_req, protocols,
+ msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
+
+ if (!rc)
+ ndev->poll_prots = protocols;
+
+ return rc;
+}
+
+static void nci_stop_poll(struct nfc_dev *nfc_dev)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry");
+
+ if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
+ nfc_err("unable to stop poll, since poll is not active");
+ return;
+ }
+
+ nci_request(ndev, nci_rf_deactivate_req, 0,
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+}
+
+static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
+ __u32 protocol)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol);
+
+ if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
+ nfc_err("there is no available target to activate");
+ return -EINVAL;
+ }
+
+ if (ndev->target_active_prot) {
+ nfc_err("there is already an active target");
+ return -EBUSY;
+ }
+
+ if (!(ndev->target_available_prots & (1 << protocol))) {
+ nfc_err("target does not support the requested protocol 0x%x",
+ protocol);
+ return -EINVAL;
+ }
+
+ ndev->target_active_prot = protocol;
+ ndev->target_available_prots = 0;
+
+ return 0;
+}
+
+static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+
+ nfc_dbg("entry, target_idx %d", target_idx);
+
+ if (!ndev->target_active_prot) {
+ nfc_err("unable to deactivate target, no active target");
+ return;
+ }
+
+ ndev->target_active_prot = 0;
+
+ if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
+ nci_request(ndev, nci_rf_deactivate_req, 0,
+ msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
+ }
+}
+
+static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
+ struct sk_buff *skb,
+ data_exchange_cb_t cb,
+ void *cb_context)
+{
+ struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
+ int rc;
+
+ nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len);
+
+ if (!ndev->target_active_prot) {
+ nfc_err("unable to exchange data, no active target");
+ return -EINVAL;
+ }
+
+ if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
+ return -EBUSY;
+
+ /* store cb and context to be used on receiving data */
+ ndev->data_exchange_cb = cb;
+ ndev->data_exchange_cb_context = cb_context;
+
+ rc = nci_send_data(ndev, ndev->conn_id, skb);
+ if (rc)
+ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
+
+ return rc;
+}
+
+static struct nfc_ops nci_nfc_ops = {
+ .dev_up = nci_dev_up,
+ .dev_down = nci_dev_down,
+ .start_poll = nci_start_poll,
+ .stop_poll = nci_stop_poll,
+ .activate_target = nci_activate_target,
+ .deactivate_target = nci_deactivate_target,
+ .data_exchange = nci_data_exchange,
+};
+
+/* ---- Interface to NCI drivers ---- */
+
+/**
+ * nci_allocate_device - allocate a new nci device
+ *
+ * @ops: device operations
+ * @supported_protocols: NFC protocols supported by the device
+ */
+struct nci_dev *nci_allocate_device(struct nci_ops *ops,
+ __u32 supported_protocols,
+ int tx_headroom,
+ int tx_tailroom)
+{
+ struct nci_dev *ndev;
+
+ nfc_dbg("entry, supported_protocols 0x%x", supported_protocols);
+
+ if (!ops->open || !ops->close || !ops->send)
+ return NULL;
+
+ if (!supported_protocols)
+ return NULL;
+
+ ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
+ if (!ndev)
+ return NULL;
+
+ ndev->ops = ops;
+ ndev->tx_headroom = tx_headroom;
+ ndev->tx_tailroom = tx_tailroom;
+
+ ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
+ supported_protocols,
+ tx_headroom + NCI_DATA_HDR_SIZE,
+ tx_tailroom);
+ if (!ndev->nfc_dev)
+ goto free_exit;
+
+ nfc_set_drvdata(ndev->nfc_dev, ndev);
+
+ return ndev;
+
+free_exit:
+ kfree(ndev);
+ return NULL;
+}
+EXPORT_SYMBOL(nci_allocate_device);
+
+/**
+ * nci_free_device - deallocate nci device
+ *
+ * @ndev: The nci device to deallocate
+ */
+void nci_free_device(struct nci_dev *ndev)
+{
+ nfc_dbg("entry");
+
+ nfc_free_device(ndev->nfc_dev);
+ kfree(ndev);
+}
+EXPORT_SYMBOL(nci_free_device);
+
+/**
+ * nci_register_device - register a nci device in the nfc subsystem
+ *
+ * @dev: The nci device to register
+ */
+int nci_register_device(struct nci_dev *ndev)
+{
+ int rc;
+ struct device *dev = &ndev->nfc_dev->dev;
+ char name[32];
+
+ nfc_dbg("entry");
+
+ rc = nfc_register_device(ndev->nfc_dev);
+ if (rc)
+ goto exit;
+
+ ndev->flags = 0;
+
+ INIT_WORK(&ndev->cmd_work, nci_cmd_work);
+ snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
+ ndev->cmd_wq = create_singlethread_workqueue(name);
+ if (!ndev->cmd_wq) {
+ rc = -ENOMEM;
+ goto unreg_exit;
+ }
+
+ INIT_WORK(&ndev->rx_work, nci_rx_work);
+ snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
+ ndev->rx_wq = create_singlethread_workqueue(name);
+ if (!ndev->rx_wq) {
+ rc = -ENOMEM;
+ goto destroy_cmd_wq_exit;
+ }
+
+ INIT_WORK(&ndev->tx_work, nci_tx_work);
+ snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
+ ndev->tx_wq = create_singlethread_workqueue(name);
+ if (!ndev->tx_wq) {
+ rc = -ENOMEM;
+ goto destroy_rx_wq_exit;
+ }
+
+ skb_queue_head_init(&ndev->cmd_q);
+ skb_queue_head_init(&ndev->rx_q);
+ skb_queue_head_init(&ndev->tx_q);
+
+ setup_timer(&ndev->cmd_timer, nci_cmd_timer,
+ (unsigned long) ndev);
+
+ mutex_init(&ndev->req_lock);
+
+ goto exit;
+
+destroy_rx_wq_exit:
+ destroy_workqueue(ndev->rx_wq);
+
+destroy_cmd_wq_exit:
+ destroy_workqueue(ndev->cmd_wq);
+
+unreg_exit:
+ nfc_unregister_device(ndev->nfc_dev);
+
+exit:
+ return rc;
+}
+EXPORT_SYMBOL(nci_register_device);
+
+/**
+ * nci_unregister_device - unregister a nci device in the nfc subsystem
+ *
+ * @dev: The nci device to unregister
+ */
+void nci_unregister_device(struct nci_dev *ndev)
+{
+ nfc_dbg("entry");
+
+ nci_close_device(ndev);
+
+ destroy_workqueue(ndev->cmd_wq);
+ destroy_workqueue(ndev->rx_wq);
+ destroy_workqueue(ndev->tx_wq);
+
+ nfc_unregister_device(ndev->nfc_dev);
+}
+EXPORT_SYMBOL(nci_unregister_device);
+
+/**
+ * nci_recv_frame - receive frame from NCI drivers
+ *
+ * @skb: The sk_buff to receive
+ */
+int nci_recv_frame(struct sk_buff *skb)
+{
+ struct nci_dev *ndev = (struct nci_dev *) skb->dev;
+
+ nfc_dbg("entry, len %d", skb->len);
+
+ if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
+ && !test_bit(NCI_INIT, &ndev->flags))) {
+ kfree_skb(skb);
+ return -ENXIO;
+ }
+
+ /* Queue frame for rx worker thread */
+ skb_queue_tail(&ndev->rx_q, skb);
+ queue_work(ndev->rx_wq, &ndev->rx_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(nci_recv_frame);
+
+static int nci_send_frame(struct sk_buff *skb)
+{
+ struct nci_dev *ndev = (struct nci_dev *) skb->dev;
+
+ nfc_dbg("entry, len %d", skb->len);
+
+ if (!ndev) {
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+
+ /* Get rid of skb owner, prior to sending to the driver. */
+ skb_orphan(skb);
+
+ return ndev->ops->send(skb);
+}
+
+/* Send NCI command */
+int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
+{
+ struct nci_ctrl_hdr *hdr;
+ struct sk_buff *skb;
+
+ nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen);
+
+ skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
+ if (!skb) {
+ nfc_err("no memory for command");
+ return -ENOMEM;
+ }
+
+ hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
+ hdr->gid = nci_opcode_gid(opcode);
+ hdr->oid = nci_opcode_oid(opcode);
+ hdr->plen = plen;
+
+ nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
+ nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
+
+ if (plen)
+ memcpy(skb_put(skb, plen), payload, plen);
+
+ skb->dev = (void *) ndev;
+
+ skb_queue_tail(&ndev->cmd_q, skb);
+ queue_work(ndev->cmd_wq, &ndev->cmd_work);
+
+ return 0;
+}
+
+/* ---- NCI TX Data worker thread ---- */
+
+static void nci_tx_work(struct work_struct *work)
+{
+ struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
+ struct sk_buff *skb;
+
+ nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt));
+
+ /* Send queued tx data */
+ while (atomic_read(&ndev->credits_cnt)) {
+ skb = skb_dequeue(&ndev->tx_q);
+ if (!skb)
+ return;
+
+ atomic_dec(&ndev->credits_cnt);
+
+ nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
+ nci_pbf(skb->data),
+ nci_conn_id(skb->data),
+ nci_plen(skb->data));
+
+ nci_send_frame(skb);
+ }
+}
+
+/* ----- NCI RX worker thread (data & control) ----- */
+
+static void nci_rx_work(struct work_struct *work)
+{
+ struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ndev->rx_q))) {
+ /* Process frame */
+ switch (nci_mt(skb->data)) {
+ case NCI_MT_RSP_PKT:
+ nci_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_MT_NTF_PKT:
+ nci_ntf_packet(ndev, skb);
+ break;
+
+ case NCI_MT_DATA_PKT:
+ nci_rx_data_packet(ndev, skb);
+ break;
+
+ default:
+ nfc_err("unknown MT 0x%x", nci_mt(skb->data));
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
+/* ----- NCI TX CMD worker thread ----- */
+
+static void nci_cmd_work(struct work_struct *work)
+{
+ struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
+ struct sk_buff *skb;
+
+ nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt));
+
+ /* Send queued command */
+ if (atomic_read(&ndev->cmd_cnt)) {
+ skb = skb_dequeue(&ndev->cmd_q);
+ if (!skb)
+ return;
+
+ atomic_dec(&ndev->cmd_cnt);
+
+ nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
+ nci_pbf(skb->data),
+ nci_opcode_gid(nci_opcode(skb->data)),
+ nci_opcode_oid(nci_opcode(skb->data)),
+ nci_plen(skb->data));
+
+ nci_send_frame(skb);
+
+ mod_timer(&ndev->cmd_timer,
+ jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
+ }
+}
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
new file mode 100644
index 0000000..e5ed90fc
--- /dev/null
+++ b/net/nfc/nci/data.c
@@ -0,0 +1,247 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/skbuff.h>
+
+#include "../nfc.h"
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/nfc.h>
+
+/* Complete data exchange transaction and forward skb to nfc core */
+void nci_data_exchange_complete(struct nci_dev *ndev,
+ struct sk_buff *skb,
+ int err)
+{
+ data_exchange_cb_t cb = ndev->data_exchange_cb;
+ void *cb_context = ndev->data_exchange_cb_context;
+
+ nfc_dbg("entry, len %d, err %d", ((skb) ? (skb->len) : (0)), err);
+
+ if (cb) {
+ ndev->data_exchange_cb = NULL;
+ ndev->data_exchange_cb_context = 0;
+
+ /* forward skb to nfc core */
+ cb(cb_context, skb, err);
+ } else if (skb) {
+ nfc_err("no rx callback, dropping rx data...");
+
+ /* no waiting callback, free skb */
+ kfree_skb(skb);
+ }
+
+ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
+}
+
+/* ----------------- NCI TX Data ----------------- */
+
+static inline void nci_push_data_hdr(struct nci_dev *ndev,
+ __u8 conn_id,
+ struct sk_buff *skb,
+ __u8 pbf)
+{
+ struct nci_data_hdr *hdr;
+ int plen = skb->len;
+
+ hdr = (struct nci_data_hdr *) skb_push(skb, NCI_DATA_HDR_SIZE);
+ hdr->conn_id = conn_id;
+ hdr->rfu = 0;
+ hdr->plen = plen;
+
+ nci_mt_set((__u8 *)hdr, NCI_MT_DATA_PKT);
+ nci_pbf_set((__u8 *)hdr, pbf);
+
+ skb->dev = (void *) ndev;
+}
+
+static int nci_queue_tx_data_frags(struct nci_dev *ndev,
+ __u8 conn_id,
+ struct sk_buff *skb) {
+ int total_len = skb->len;
+ unsigned char *data = skb->data;
+ unsigned long flags;
+ struct sk_buff_head frags_q;
+ struct sk_buff *skb_frag;
+ int frag_len;
+ int rc = 0;
+
+ nfc_dbg("entry, conn_id 0x%x, total_len %d", conn_id, total_len);
+
+ __skb_queue_head_init(&frags_q);
+
+ while (total_len) {
+ frag_len = min_t(int, total_len, ndev->max_pkt_payload_size);
+
+ skb_frag = nci_skb_alloc(ndev,
+ (NCI_DATA_HDR_SIZE + frag_len),
+ GFP_KERNEL);
+ if (skb_frag == NULL) {
+ rc = -ENOMEM;
+ goto free_exit;
+ }
+ skb_reserve(skb_frag, NCI_DATA_HDR_SIZE);
+
+ /* first, copy the data */
+ memcpy(skb_put(skb_frag, frag_len), data, frag_len);
+
+ /* second, set the header */
+ nci_push_data_hdr(ndev, conn_id, skb_frag,
+ ((total_len == frag_len) ? (NCI_PBF_LAST) : (NCI_PBF_CONT)));
+
+ __skb_queue_tail(&frags_q, skb_frag);
+
+ data += frag_len;
+ total_len -= frag_len;
+
+ nfc_dbg("frag_len %d, remaining total_len %d",
+ frag_len, total_len);
+ }
+
+ /* queue all fragments atomically */
+ spin_lock_irqsave(&ndev->tx_q.lock, flags);
+
+ while ((skb_frag = __skb_dequeue(&frags_q)) != NULL)
+ __skb_queue_tail(&ndev->tx_q, skb_frag);
+
+ spin_unlock_irqrestore(&ndev->tx_q.lock, flags);
+
+ /* free the original skb */
+ kfree_skb(skb);
+
+ goto exit;
+
+free_exit:
+ while ((skb_frag = __skb_dequeue(&frags_q)) != NULL)
+ kfree_skb(skb_frag);
+
+exit:
+ return rc;
+}
+
+/* Send NCI data */
+int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
+{
+ int rc = 0;
+
+ nfc_dbg("entry, conn_id 0x%x, plen %d", conn_id, skb->len);
+
+ /* check if the packet need to be fragmented */
+ if (skb->len <= ndev->max_pkt_payload_size) {
+ /* no need to fragment packet */
+ nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST);
+
+ skb_queue_tail(&ndev->tx_q, skb);
+ } else {
+ /* fragment packet and queue the fragments */
+ rc = nci_queue_tx_data_frags(ndev, conn_id, skb);
+ if (rc) {
+ nfc_err("failed to fragment tx data packet");
+ goto free_exit;
+ }
+ }
+
+ queue_work(ndev->tx_wq, &ndev->tx_work);
+
+ goto exit;
+
+free_exit:
+ kfree_skb(skb);
+
+exit:
+ return rc;
+}
+
+/* ----------------- NCI RX Data ----------------- */
+
+static void nci_add_rx_data_frag(struct nci_dev *ndev,
+ struct sk_buff *skb,
+ __u8 pbf)
+{
+ int reassembly_len;
+ int err = 0;
+
+ if (ndev->rx_data_reassembly) {
+ reassembly_len = ndev->rx_data_reassembly->len;
+
+ /* first, make enough room for the already accumulated data */
+ if (skb_cow_head(skb, reassembly_len)) {
+ nfc_err("error adding room for accumulated rx data");
+
+ kfree_skb(skb);
+ skb = 0;
+
+ kfree_skb(ndev->rx_data_reassembly);
+ ndev->rx_data_reassembly = 0;
+
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* second, combine the two fragments */
+ memcpy(skb_push(skb, reassembly_len),
+ ndev->rx_data_reassembly->data,
+ reassembly_len);
+
+ /* third, free old reassembly */
+ kfree_skb(ndev->rx_data_reassembly);
+ ndev->rx_data_reassembly = 0;
+ }
+
+ if (pbf == NCI_PBF_CONT) {
+ /* need to wait for next fragment, store skb and exit */
+ ndev->rx_data_reassembly = skb;
+ return;
+ }
+
+exit:
+ nci_data_exchange_complete(ndev, skb, err);
+}
+
+/* Rx Data packet */
+void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ __u8 pbf = nci_pbf(skb->data);
+
+ nfc_dbg("entry, len %d", skb->len);
+
+ nfc_dbg("NCI RX: MT=data, PBF=%d, conn_id=%d, plen=%d",
+ nci_pbf(skb->data),
+ nci_conn_id(skb->data),
+ nci_plen(skb->data));
+
+ /* strip the nci data header */
+ skb_pull(skb, NCI_DATA_HDR_SIZE);
+
+ if (ndev->target_active_prot == NFC_PROTO_MIFARE) {
+ /* frame I/F => remove the status byte */
+ nfc_dbg("NFC_PROTO_MIFARE => remove the status byte");
+ skb_trim(skb, (skb->len - 1));
+ }
+
+ nci_add_rx_data_frag(ndev, skb, pbf);
+}
diff --git a/net/nfc/nci/lib.c b/net/nfc/nci/lib.c
new file mode 100644
index 0000000..b19dc2f
--- /dev/null
+++ b/net/nfc/nci/lib.c
@@ -0,0 +1,94 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on lib.c, which was written
+ * by Maxim Krasnyansky.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#include <net/nfc/nci.h>
+
+/* NCI status codes to Unix errno mapping */
+int nci_to_errno(__u8 code)
+{
+ switch (code) {
+ case NCI_STATUS_OK:
+ return 0;
+
+ case NCI_STATUS_REJECTED:
+ return -EBUSY;
+
+ case NCI_STATUS_MESSAGE_CORRUPTED:
+ return -EBADMSG;
+
+ case NCI_STATUS_BUFFER_FULL:
+ return -ENOBUFS;
+
+ case NCI_STATUS_NOT_INITIALIZED:
+ return -EHOSTDOWN;
+
+ case NCI_STATUS_SYNTAX_ERROR:
+ case NCI_STATUS_SEMANTIC_ERROR:
+ case NCI_STATUS_INVALID_PARAM:
+ case NCI_STATUS_RF_PROTOCOL_ERROR:
+ case NCI_STATUS_NFCEE_PROTOCOL_ERROR:
+ return -EPROTO;
+
+ case NCI_STATUS_UNKNOWN_GID:
+ case NCI_STATUS_UNKNOWN_OID:
+ return -EBADRQC;
+
+ case NCI_STATUS_MESSAGE_SIZE_EXCEEDED:
+ return -EMSGSIZE;
+
+ case NCI_STATUS_DISCOVERY_ALREADY_STARTED:
+ return -EALREADY;
+
+ case NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED:
+ case NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED:
+ return -ECONNREFUSED;
+
+ case NCI_STATUS_RF_TRANSMISSION_ERROR:
+ case NCI_STATUS_NFCEE_TRANSMISSION_ERROR:
+ return -ECOMM;
+
+ case NCI_STATUS_RF_TIMEOUT_ERROR:
+ case NCI_STATUS_NFCEE_TIMEOUT_ERROR:
+ return -ETIMEDOUT;
+
+ case NCI_STATUS_RF_LINK_LOSS_ERROR:
+ return -ENOLINK;
+
+ case NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED:
+ return -EDQUOT;
+
+ case NCI_STATUS_FAILED:
+ default:
+ return -ENOSYS;
+ }
+}
+EXPORT_SYMBOL(nci_to_errno);
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
new file mode 100644
index 0000000..96633f5
--- /dev/null
+++ b/net/nfc/nci/ntf.c
@@ -0,0 +1,258 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on hci_event.c, which was written
+ * by Maxim Krasnyansky.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/skbuff.h>
+
+#include "../nfc.h"
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+#include <linux/nfc.h>
+
+/* Handle NCI Notification packets */
+
+static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ struct nci_core_conn_credit_ntf *ntf = (void *) skb->data;
+ int i;
+
+ nfc_dbg("entry, num_entries %d", ntf->num_entries);
+
+ if (ntf->num_entries > NCI_MAX_NUM_CONN)
+ ntf->num_entries = NCI_MAX_NUM_CONN;
+
+ /* update the credits */
+ for (i = 0; i < ntf->num_entries; i++) {
+ nfc_dbg("entry[%d]: conn_id %d, credits %d", i,
+ ntf->conn_entries[i].conn_id,
+ ntf->conn_entries[i].credits);
+
+ if (ntf->conn_entries[i].conn_id == ndev->conn_id) {
+ /* found static rf connection */
+ atomic_add(ntf->conn_entries[i].credits,
+ &ndev->credits_cnt);
+ }
+ }
+
+ /* trigger the next tx */
+ if (!skb_queue_empty(&ndev->tx_q))
+ queue_work(ndev->tx_wq, &ndev->tx_work);
+}
+
+static void nci_rf_field_info_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ struct nci_rf_field_info_ntf *ntf = (void *) skb->data;
+
+ nfc_dbg("entry, rf_field_status %d", ntf->rf_field_status);
+}
+
+static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
+ struct nci_rf_activate_ntf *ntf, __u8 *data)
+{
+ struct rf_tech_specific_params_nfca_poll *nfca_poll;
+ struct activation_params_nfca_poll_iso_dep *nfca_poll_iso_dep;
+
+ nfca_poll = &ntf->rf_tech_specific_params.nfca_poll;
+ nfca_poll_iso_dep = &ntf->activation_params.nfca_poll_iso_dep;
+
+ nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
+ data += 2;
+
+ nfca_poll->nfcid1_len = *data++;
+
+ nfc_dbg("sens_res 0x%x, nfcid1_len %d",
+ nfca_poll->sens_res,
+ nfca_poll->nfcid1_len);
+
+ memcpy(nfca_poll->nfcid1, data, nfca_poll->nfcid1_len);
+ data += nfca_poll->nfcid1_len;
+
+ nfca_poll->sel_res_len = *data++;
+
+ if (nfca_poll->sel_res_len != 0)
+ nfca_poll->sel_res = *data++;
+
+ ntf->rf_interface_type = *data++;
+ ntf->activation_params_len = *data++;
+
+ nfc_dbg("sel_res_len %d, sel_res 0x%x, rf_interface_type %d, activation_params_len %d",
+ nfca_poll->sel_res_len,
+ nfca_poll->sel_res,
+ ntf->rf_interface_type,
+ ntf->activation_params_len);
+
+ switch (ntf->rf_interface_type) {
+ case NCI_RF_INTERFACE_ISO_DEP:
+ nfca_poll_iso_dep->rats_res_len = *data++;
+ if (nfca_poll_iso_dep->rats_res_len > 0) {
+ memcpy(nfca_poll_iso_dep->rats_res,
+ data,
+ nfca_poll_iso_dep->rats_res_len);
+ }
+ break;
+
+ case NCI_RF_INTERFACE_FRAME:
+ /* no activation params */
+ break;
+
+ default:
+ nfc_err("unsupported rf_interface_type 0x%x",
+ ntf->rf_interface_type);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static void nci_target_found(struct nci_dev *ndev,
+ struct nci_rf_activate_ntf *ntf)
+{
+ struct nfc_target nfc_tgt;
+
+ if (ntf->rf_protocol == NCI_RF_PROTOCOL_T2T) /* T2T MifareUL */
+ nfc_tgt.supported_protocols = NFC_PROTO_MIFARE_MASK;
+ else if (ntf->rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) /* 4A */
+ nfc_tgt.supported_protocols = NFC_PROTO_ISO14443_MASK;
+
+ nfc_tgt.sens_res = ntf->rf_tech_specific_params.nfca_poll.sens_res;
+ nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res;
+
+ if (!(nfc_tgt.supported_protocols & ndev->poll_prots)) {
+ nfc_dbg("the target found does not have the desired protocol");
+ return;
+ }
+
+ nfc_dbg("new target found, supported_protocols 0x%x",
+ nfc_tgt.supported_protocols);
+
+ ndev->target_available_prots = nfc_tgt.supported_protocols;
+
+ nfc_targets_found(ndev->nfc_dev, &nfc_tgt, 1);
+}
+
+static void nci_rf_activate_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ struct nci_rf_activate_ntf ntf;
+ __u8 *data = skb->data;
+ int rc = -1;
+
+ clear_bit(NCI_DISCOVERY, &ndev->flags);
+ set_bit(NCI_POLL_ACTIVE, &ndev->flags);
+
+ ntf.target_handle = *data++;
+ ntf.rf_protocol = *data++;
+ ntf.rf_tech_and_mode = *data++;
+ ntf.rf_tech_specific_params_len = *data++;
+
+ nfc_dbg("target_handle %d, rf_protocol 0x%x, rf_tech_and_mode 0x%x, rf_tech_specific_params_len %d",
+ ntf.target_handle,
+ ntf.rf_protocol,
+ ntf.rf_tech_and_mode,
+ ntf.rf_tech_specific_params_len);
+
+ switch (ntf.rf_tech_and_mode) {
+ case NCI_NFC_A_PASSIVE_POLL_MODE:
+ rc = nci_rf_activate_nfca_passive_poll(ndev, &ntf,
+ data);
+ break;
+
+ default:
+ nfc_err("unsupported rf_tech_and_mode 0x%x",
+ ntf.rf_tech_and_mode);
+ return;
+ }
+
+ if (!rc)
+ nci_target_found(ndev, &ntf);
+}
+
+static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ __u8 type = skb->data[0];
+
+ nfc_dbg("entry, type 0x%x", type);
+
+ clear_bit(NCI_POLL_ACTIVE, &ndev->flags);
+ ndev->target_active_prot = 0;
+
+ /* drop tx data queue */
+ skb_queue_purge(&ndev->tx_q);
+
+ /* drop partial rx data packet */
+ if (ndev->rx_data_reassembly) {
+ kfree_skb(ndev->rx_data_reassembly);
+ ndev->rx_data_reassembly = 0;
+ }
+
+ /* complete the data exchange transaction, if exists */
+ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
+ nci_data_exchange_complete(ndev, NULL, -EIO);
+}
+
+void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ __u16 ntf_opcode = nci_opcode(skb->data);
+
+ nfc_dbg("NCI RX: MT=ntf, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
+ nci_pbf(skb->data),
+ nci_opcode_gid(ntf_opcode),
+ nci_opcode_oid(ntf_opcode),
+ nci_plen(skb->data));
+
+ /* strip the nci control header */
+ skb_pull(skb, NCI_CTRL_HDR_SIZE);
+
+ switch (ntf_opcode) {
+ case NCI_OP_CORE_CONN_CREDITS_NTF:
+ nci_core_conn_credits_ntf_packet(ndev, skb);
+ break;
+
+ case NCI_OP_RF_FIELD_INFO_NTF:
+ nci_rf_field_info_ntf_packet(ndev, skb);
+ break;
+
+ case NCI_OP_RF_ACTIVATE_NTF:
+ nci_rf_activate_ntf_packet(ndev, skb);
+ break;
+
+ case NCI_OP_RF_DEACTIVATE_NTF:
+ nci_rf_deactivate_ntf_packet(ndev, skb);
+ break;
+
+ default:
+ nfc_err("unknown ntf opcode 0x%x", ntf_opcode);
+ break;
+ }
+
+ kfree_skb(skb);
+}
diff --git a/net/nfc/nci/rsp.c b/net/nfc/nci/rsp.c
new file mode 100644
index 0000000..0403d4c
--- /dev/null
+++ b/net/nfc/nci/rsp.c
@@ -0,0 +1,226 @@
+/*
+ * The NFC Controller Interface is the communication protocol between an
+ * NFC Controller (NFCC) and a Device Host (DH).
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ *
+ * Written by Ilan Elias <ilane@ti.com>
+ *
+ * Acknowledgements:
+ * This file is based on hci_event.c, which was written
+ * by Maxim Krasnyansky.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/skbuff.h>
+
+#include "../nfc.h"
+#include <net/nfc/nci.h>
+#include <net/nfc/nci_core.h>
+
+/* Handle NCI Response packets */
+
+static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ struct nci_core_reset_rsp *rsp = (void *) skb->data;
+
+ nfc_dbg("entry, status 0x%x", rsp->status);
+
+ if (rsp->status == NCI_STATUS_OK)
+ ndev->nci_ver = rsp->nci_ver;
+
+ nfc_dbg("nci_ver 0x%x", ndev->nci_ver);
+
+ nci_req_complete(ndev, rsp->status);
+}
+
+static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ struct nci_core_init_rsp_1 *rsp_1 = (void *) skb->data;
+ struct nci_core_init_rsp_2 *rsp_2;
+
+ nfc_dbg("entry, status 0x%x", rsp_1->status);
+
+ if (rsp_1->status != NCI_STATUS_OK)
+ return;
+
+ ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features);
+ ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces;
+
+ if (ndev->num_supported_rf_interfaces >
+ NCI_MAX_SUPPORTED_RF_INTERFACES) {
+ ndev->num_supported_rf_interfaces =
+ NCI_MAX_SUPPORTED_RF_INTERFACES;
+ }
+
+ memcpy(ndev->supported_rf_interfaces,
+ rsp_1->supported_rf_interfaces,
+ ndev->num_supported_rf_interfaces);
+
+ rsp_2 = (void *) (skb->data + 6 + ndev->num_supported_rf_interfaces);
+
+ ndev->max_logical_connections =
+ rsp_2->max_logical_connections;
+ ndev->max_routing_table_size =
+ __le16_to_cpu(rsp_2->max_routing_table_size);
+ ndev->max_control_packet_payload_length =
+ rsp_2->max_control_packet_payload_length;
+ ndev->rf_sending_buffer_size =
+ __le16_to_cpu(rsp_2->rf_sending_buffer_size);
+ ndev->rf_receiving_buffer_size =
+ __le16_to_cpu(rsp_2->rf_receiving_buffer_size);
+ ndev->manufacturer_id =
+ __le16_to_cpu(rsp_2->manufacturer_id);
+
+ nfc_dbg("nfcc_features 0x%x",
+ ndev->nfcc_features);
+ nfc_dbg("num_supported_rf_interfaces %d",
+ ndev->num_supported_rf_interfaces);
+ nfc_dbg("supported_rf_interfaces[0] 0x%x",
+ ndev->supported_rf_interfaces[0]);
+ nfc_dbg("supported_rf_interfaces[1] 0x%x",
+ ndev->supported_rf_interfaces[1]);
+ nfc_dbg("supported_rf_interfaces[2] 0x%x",
+ ndev->supported_rf_interfaces[2]);
+ nfc_dbg("supported_rf_interfaces[3] 0x%x",
+ ndev->supported_rf_interfaces[3]);
+ nfc_dbg("max_logical_connections %d",
+ ndev->max_logical_connections);
+ nfc_dbg("max_routing_table_size %d",
+ ndev->max_routing_table_size);
+ nfc_dbg("max_control_packet_payload_length %d",
+ ndev->max_control_packet_payload_length);
+ nfc_dbg("rf_sending_buffer_size %d",
+ ndev->rf_sending_buffer_size);
+ nfc_dbg("rf_receiving_buffer_size %d",
+ ndev->rf_receiving_buffer_size);
+ nfc_dbg("manufacturer_id 0x%x",
+ ndev->manufacturer_id);
+
+ nci_req_complete(ndev, rsp_1->status);
+}
+
+static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ struct nci_core_conn_create_rsp *rsp = (void *) skb->data;
+
+ nfc_dbg("entry, status 0x%x", rsp->status);
+
+ if (rsp->status != NCI_STATUS_OK)
+ return;
+
+ ndev->max_pkt_payload_size = rsp->max_pkt_payload_size;
+ ndev->initial_num_credits = rsp->initial_num_credits;
+ ndev->conn_id = rsp->conn_id;
+
+ atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
+
+ nfc_dbg("max_pkt_payload_size %d", ndev->max_pkt_payload_size);
+ nfc_dbg("initial_num_credits %d", ndev->initial_num_credits);
+ nfc_dbg("conn_id %d", ndev->conn_id);
+}
+
+static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ __u8 status = skb->data[0];
+
+ nfc_dbg("entry, status 0x%x", status);
+
+ nci_req_complete(ndev, status);
+}
+
+static void nci_rf_disc_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ __u8 status = skb->data[0];
+
+ nfc_dbg("entry, status 0x%x", status);
+
+ if (status == NCI_STATUS_OK)
+ set_bit(NCI_DISCOVERY, &ndev->flags);
+
+ nci_req_complete(ndev, status);
+}
+
+static void nci_rf_deactivate_rsp_packet(struct nci_dev *ndev,
+ struct sk_buff *skb)
+{
+ __u8 status = skb->data[0];
+
+ nfc_dbg("entry, status 0x%x", status);
+
+ clear_bit(NCI_DISCOVERY, &ndev->flags);
+
+ nci_req_complete(ndev, status);
+}
+
+void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
+{
+ __u16 rsp_opcode = nci_opcode(skb->data);
+
+ /* we got a rsp, stop the cmd timer */
+ del_timer(&ndev->cmd_timer);
+
+ nfc_dbg("NCI RX: MT=rsp, PBF=%d, GID=0x%x, OID=0x%x, plen=%d",
+ nci_pbf(skb->data),
+ nci_opcode_gid(rsp_opcode),
+ nci_opcode_oid(rsp_opcode),
+ nci_plen(skb->data));
+
+ /* strip the nci control header */
+ skb_pull(skb, NCI_CTRL_HDR_SIZE);
+
+ switch (rsp_opcode) {
+ case NCI_OP_CORE_RESET_RSP:
+ nci_core_reset_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_OP_CORE_INIT_RSP:
+ nci_core_init_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_OP_CORE_CONN_CREATE_RSP:
+ nci_core_conn_create_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_OP_RF_DISCOVER_MAP_RSP:
+ nci_rf_disc_map_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_OP_RF_DISCOVER_RSP:
+ nci_rf_disc_rsp_packet(ndev, skb);
+ break;
+
+ case NCI_OP_RF_DEACTIVATE_RSP:
+ nci_rf_deactivate_rsp_packet(ndev, skb);
+ break;
+
+ default:
+ nfc_err("unknown rsp opcode 0x%x", rsp_opcode);
+ break;
+ }
+
+ kfree_skb(skb);
+
+ /* trigger the next cmd */
+ atomic_set(&ndev->cmd_cnt, 1);
+ if (!skb_queue_empty(&ndev->cmd_q))
+ queue_work(ndev->cmd_wq, &ndev->cmd_work);
+}
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index ccdff79..03f8818 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -367,6 +367,52 @@ out_putdev:
return rc;
}
+static int nfc_genl_dev_up(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nfc_dev *dev;
+ int rc;
+ u32 idx;
+
+ nfc_dbg("entry");
+
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ return -EINVAL;
+
+ idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+
+ dev = nfc_get_device(idx);
+ if (!dev)
+ return -ENODEV;
+
+ rc = nfc_dev_up(dev);
+
+ nfc_put_device(dev);
+ return rc;
+}
+
+static int nfc_genl_dev_down(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nfc_dev *dev;
+ int rc;
+ u32 idx;
+
+ nfc_dbg("entry");
+
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ return -EINVAL;
+
+ idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+
+ dev = nfc_get_device(idx);
+ if (!dev)
+ return -ENODEV;
+
+ rc = nfc_dev_down(dev);
+
+ nfc_put_device(dev);
+ return rc;
+}
+
static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
@@ -441,6 +487,16 @@ static struct genl_ops nfc_genl_ops[] = {
.policy = nfc_genl_policy,
},
{
+ .cmd = NFC_CMD_DEV_UP,
+ .doit = nfc_genl_dev_up,
+ .policy = nfc_genl_policy,
+ },
+ {
+ .cmd = NFC_CMD_DEV_DOWN,
+ .doit = nfc_genl_dev_down,
+ .policy = nfc_genl_policy,
+ },
+ {
.cmd = NFC_CMD_START_POLL,
.doit = nfc_genl_start_poll,
.policy = nfc_genl_policy,
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index aaf9832..d86583f 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -24,10 +24,10 @@
#ifndef __LOCAL_NFC_H
#define __LOCAL_NFC_H
-#include <net/nfc.h>
+#include <net/nfc/nfc.h>
#include <net/sock.h>
-__attribute__((format (printf, 2, 3)))
+__printf(2, 3)
int nfc_printk(const char *level, const char *fmt, ...);
#define nfc_info(fmt, arg...) nfc_printk(KERN_INFO, fmt, ##arg)
@@ -101,6 +101,10 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter)
class_dev_iter_exit(iter);
}
+int nfc_dev_up(struct nfc_dev *dev);
+
+int nfc_dev_down(struct nfc_dev *dev);
+
int nfc_start_poll(struct nfc_dev *dev, u32 protocols);
int nfc_stop_poll(struct nfc_dev *dev);
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 52de84a..ee7b2b3 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -23,6 +23,7 @@
#include <net/tcp_states.h>
#include <linux/nfc.h>
+#include <linux/export.h>
#include "nfc.h"
@@ -123,11 +124,7 @@ error:
static int rawsock_add_header(struct sk_buff *skb)
{
-
- if (skb_cow_head(skb, 1))
- return -ENOMEM;
-
- *skb_push(skb, 1) = 0;
+ *skb_push(skb, NFC_HEADER_SIZE) = 0;
return 0;
}
@@ -197,6 +194,7 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
+ struct nfc_dev *dev = nfc_rawsock(sk)->dev;
struct sk_buff *skb;
int rc;
@@ -208,11 +206,13 @@ static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
if (sock->state != SS_CONNECTED)
return -ENOTCONN;
- skb = sock_alloc_send_skb(sk, len, msg->msg_flags & MSG_DONTWAIT,
- &rc);
+ skb = sock_alloc_send_skb(sk, len + dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE,
+ msg->msg_flags & MSG_DONTWAIT, &rc);
if (!skb)
return rc;
+ skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
+
rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (rc < 0) {
kfree_skb(skb);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index fabb4fa..82a6f34 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -40,6 +40,10 @@
* byte arrays at the end of sockaddr_ll
* and packet_mreq.
* Johann Baudy : Added TX RING.
+ * Chetan Loke : Implemented TPACKET_V3 block abstraction
+ * layer.
+ * Copyright (C) 2011, <lokec@ccs.neu.edu>
+ *
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -161,9 +165,56 @@ struct packet_mreq_max {
unsigned char mr_address[MAX_ADDR_LEN];
};
-static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
+static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring);
+
+#define V3_ALIGNMENT (8)
+
+#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
+
+#define BLK_PLUS_PRIV(sz_of_priv) \
+ (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
+
+/* kbdq - kernel block descriptor queue */
+struct tpacket_kbdq_core {
+ struct pgv *pkbdq;
+ unsigned int feature_req_word;
+ unsigned int hdrlen;
+ unsigned char reset_pending_on_curr_blk;
+ unsigned char delete_blk_timer;
+ unsigned short kactive_blk_num;
+ unsigned short blk_sizeof_priv;
+
+ /* last_kactive_blk_num:
+ * trick to see if user-space has caught up
+ * in order to avoid refreshing timer when every single pkt arrives.
+ */
+ unsigned short last_kactive_blk_num;
+
+ char *pkblk_start;
+ char *pkblk_end;
+ int kblk_size;
+ unsigned int knum_blocks;
+ uint64_t knxt_seq_num;
+ char *prev;
+ char *nxt_offset;
+ struct sk_buff *skb;
+
+ atomic_t blk_fill_in_prog;
+
+ /* Default is set to 8ms */
+#define DEFAULT_PRB_RETIRE_TOV (8)
+
+ unsigned short retire_blk_tov;
+ unsigned short version;
+ unsigned long tov_in_jiffies;
+
+ /* timer to retire an outstanding block */
+ struct timer_list retire_blk_timer;
+};
+
+#define PGV_FROM_VMALLOC 1
struct pgv {
char *buffer;
};
@@ -179,12 +230,44 @@ struct packet_ring_buffer {
unsigned int pg_vec_pages;
unsigned int pg_vec_len;
+ struct tpacket_kbdq_core prb_bdqc;
atomic_t pending;
};
+#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
+#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
+#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
+#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
+#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
+#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
+#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
+
struct packet_sock;
static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
+static void *packet_previous_frame(struct packet_sock *po,
+ struct packet_ring_buffer *rb,
+ int status);
+static void packet_increment_head(struct packet_ring_buffer *buff);
+static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
+ struct tpacket_block_desc *);
+static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
+ struct packet_sock *);
+static void prb_retire_current_block(struct tpacket_kbdq_core *,
+ struct packet_sock *, unsigned int status);
+static int prb_queue_frozen(struct tpacket_kbdq_core *);
+static void prb_open_block(struct tpacket_kbdq_core *,
+ struct tpacket_block_desc *);
+static void prb_retire_rx_blk_timer_expired(unsigned long);
+static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
+static void prb_init_blk_timer(struct packet_sock *,
+ struct tpacket_kbdq_core *,
+ void (*func) (unsigned long));
+static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
+static void prb_clear_rxhash(struct tpacket_kbdq_core *,
+ struct tpacket3_hdr *);
+static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
+ struct tpacket3_hdr *);
static void packet_flush_mclist(struct sock *sk);
struct packet_fanout;
@@ -193,6 +276,7 @@ struct packet_sock {
struct sock sk;
struct packet_fanout *fanout;
struct tpacket_stats stats;
+ union tpacket_stats_u stats_u;
struct packet_ring_buffer rx_ring;
struct packet_ring_buffer tx_ring;
int copy_thresh;
@@ -242,7 +326,16 @@ struct packet_skb_cb {
#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
-static inline struct packet_sock *pkt_sk(struct sock *sk)
+#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
+#define GET_PBLOCK_DESC(x, bid) \
+ ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
+#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
+ ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
+#define GET_NEXT_PRB_BLK_NUM(x) \
+ (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
+ ((x)->kactive_blk_num+1) : 0)
+
+static struct packet_sock *pkt_sk(struct sock *sk)
{
return (struct packet_sock *)sk;
}
@@ -325,8 +418,9 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
h.h2->tp_status = status;
flush_dcache_page(pgv_to_page(&h.h2->tp_status));
break;
+ case TPACKET_V3:
default:
- pr_err("TPACKET version not supported\n");
+ WARN(1, "TPACKET version not supported.\n");
BUG();
}
@@ -351,8 +445,9 @@ static int __packet_get_status(struct packet_sock *po, void *frame)
case TPACKET_V2:
flush_dcache_page(pgv_to_page(&h.h2->tp_status));
return h.h2->tp_status;
+ case TPACKET_V3:
default:
- pr_err("TPACKET version not supported\n");
+ WARN(1, "TPACKET version not supported.\n");
BUG();
return 0;
}
@@ -382,14 +477,678 @@ static void *packet_lookup_frame(struct packet_sock *po,
return h.raw;
}
-static inline void *packet_current_frame(struct packet_sock *po,
+static void *packet_current_frame(struct packet_sock *po,
struct packet_ring_buffer *rb,
int status)
{
return packet_lookup_frame(po, rb, rb->head, status);
}
-static inline void *packet_previous_frame(struct packet_sock *po,
+static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
+{
+ del_timer_sync(&pkc->retire_blk_timer);
+}
+
+static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
+ int tx_ring,
+ struct sk_buff_head *rb_queue)
+{
+ struct tpacket_kbdq_core *pkc;
+
+ pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
+
+ spin_lock(&rb_queue->lock);
+ pkc->delete_blk_timer = 1;
+ spin_unlock(&rb_queue->lock);
+
+ prb_del_retire_blk_timer(pkc);
+}
+
+static void prb_init_blk_timer(struct packet_sock *po,
+ struct tpacket_kbdq_core *pkc,
+ void (*func) (unsigned long))
+{
+ init_timer(&pkc->retire_blk_timer);
+ pkc->retire_blk_timer.data = (long)po;
+ pkc->retire_blk_timer.function = func;
+ pkc->retire_blk_timer.expires = jiffies;
+}
+
+static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
+{
+ struct tpacket_kbdq_core *pkc;
+
+ if (tx_ring)
+ BUG();
+
+ pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
+ prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
+}
+
+static int prb_calc_retire_blk_tmo(struct packet_sock *po,
+ int blk_size_in_bytes)
+{
+ struct net_device *dev;
+ unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
+ struct ethtool_cmd ecmd;
+ int err;
+
+ rtnl_lock();
+ dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
+ if (unlikely(!dev)) {
+ rtnl_unlock();
+ return DEFAULT_PRB_RETIRE_TOV;
+ }
+ err = __ethtool_get_settings(dev, &ecmd);
+ rtnl_unlock();
+ if (!err) {
+ switch (ecmd.speed) {
+ case SPEED_10000:
+ msec = 1;
+ div = 10000/1000;
+ break;
+ case SPEED_1000:
+ msec = 1;
+ div = 1000/1000;
+ break;
+ /*
+ * If the link speed is so slow you don't really
+ * need to worry about perf anyways
+ */
+ case SPEED_100:
+ case SPEED_10:
+ default:
+ return DEFAULT_PRB_RETIRE_TOV;
+ }
+ }
+
+ mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
+
+ if (div)
+ mbits /= div;
+
+ tmo = mbits * msec;
+
+ if (div)
+ return tmo+1;
+ return tmo;
+}
+
+static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
+ union tpacket_req_u *req_u)
+{
+ p1->feature_req_word = req_u->req3.tp_feature_req_word;
+}
+
+static void init_prb_bdqc(struct packet_sock *po,
+ struct packet_ring_buffer *rb,
+ struct pgv *pg_vec,
+ union tpacket_req_u *req_u, int tx_ring)
+{
+ struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
+ struct tpacket_block_desc *pbd;
+
+ memset(p1, 0x0, sizeof(*p1));
+
+ p1->knxt_seq_num = 1;
+ p1->pkbdq = pg_vec;
+ pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
+ p1->pkblk_start = (char *)pg_vec[0].buffer;
+ p1->kblk_size = req_u->req3.tp_block_size;
+ p1->knum_blocks = req_u->req3.tp_block_nr;
+ p1->hdrlen = po->tp_hdrlen;
+ p1->version = po->tp_version;
+ p1->last_kactive_blk_num = 0;
+ po->stats_u.stats3.tp_freeze_q_cnt = 0;
+ if (req_u->req3.tp_retire_blk_tov)
+ p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
+ else
+ p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
+ req_u->req3.tp_block_size);
+ p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
+ p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
+
+ prb_init_ft_ops(p1, req_u);
+ prb_setup_retire_blk_timer(po, tx_ring);
+ prb_open_block(p1, pbd);
+}
+
+/* Do NOT update the last_blk_num first.
+ * Assumes sk_buff_head lock is held.
+ */
+static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
+{
+ mod_timer(&pkc->retire_blk_timer,
+ jiffies + pkc->tov_in_jiffies);
+ pkc->last_kactive_blk_num = pkc->kactive_blk_num;
+}
+
+/*
+ * Timer logic:
+ * 1) We refresh the timer only when we open a block.
+ * By doing this we don't waste cycles refreshing the timer
+ * on packet-by-packet basis.
+ *
+ * With a 1MB block-size, on a 1Gbps line, it will take
+ * i) ~8 ms to fill a block + ii) memcpy etc.
+ * In this cut we are not accounting for the memcpy time.
+ *
+ * So, if the user sets the 'tmo' to 10ms then the timer
+ * will never fire while the block is still getting filled
+ * (which is what we want). However, the user could choose
+ * to close a block early and that's fine.
+ *
+ * But when the timer does fire, we check whether or not to refresh it.
+ * Since the tmo granularity is in msecs, it is not too expensive
+ * to refresh the timer, lets say every '8' msecs.
+ * Either the user can set the 'tmo' or we can derive it based on
+ * a) line-speed and b) block-size.
+ * prb_calc_retire_blk_tmo() calculates the tmo.
+ *
+ */
+static void prb_retire_rx_blk_timer_expired(unsigned long data)
+{
+ struct packet_sock *po = (struct packet_sock *)data;
+ struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
+ unsigned int frozen;
+ struct tpacket_block_desc *pbd;
+
+ spin_lock(&po->sk.sk_receive_queue.lock);
+
+ frozen = prb_queue_frozen(pkc);
+ pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
+
+ if (unlikely(pkc->delete_blk_timer))
+ goto out;
+
+ /* We only need to plug the race when the block is partially filled.
+ * tpacket_rcv:
+ * lock(); increment BLOCK_NUM_PKTS; unlock()
+ * copy_bits() is in progress ...
+ * timer fires on other cpu:
+ * we can't retire the current block because copy_bits
+ * is in progress.
+ *
+ */
+ if (BLOCK_NUM_PKTS(pbd)) {
+ while (atomic_read(&pkc->blk_fill_in_prog)) {
+ /* Waiting for skb_copy_bits to finish... */
+ cpu_relax();
+ }
+ }
+
+ if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
+ if (!frozen) {
+ prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
+ if (!prb_dispatch_next_block(pkc, po))
+ goto refresh_timer;
+ else
+ goto out;
+ } else {
+ /* Case 1. Queue was frozen because user-space was
+ * lagging behind.
+ */
+ if (prb_curr_blk_in_use(pkc, pbd)) {
+ /*
+ * Ok, user-space is still behind.
+ * So just refresh the timer.
+ */
+ goto refresh_timer;
+ } else {
+ /* Case 2. queue was frozen,user-space caught up,
+ * now the link went idle && the timer fired.
+ * We don't have a block to close.So we open this
+ * block and restart the timer.
+ * opening a block thaws the queue,restarts timer
+ * Thawing/timer-refresh is a side effect.
+ */
+ prb_open_block(pkc, pbd);
+ goto out;
+ }
+ }
+ }
+
+refresh_timer:
+ _prb_refresh_rx_retire_blk_timer(pkc);
+
+out:
+ spin_unlock(&po->sk.sk_receive_queue.lock);
+}
+
+static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
+ struct tpacket_block_desc *pbd1, __u32 status)
+{
+ /* Flush everything minus the block header */
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
+ u8 *start, *end;
+
+ start = (u8 *)pbd1;
+
+ /* Skip the block header(we know header WILL fit in 4K) */
+ start += PAGE_SIZE;
+
+ end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
+ for (; start < end; start += PAGE_SIZE)
+ flush_dcache_page(pgv_to_page(start));
+
+ smp_wmb();
+#endif
+
+ /* Now update the block status. */
+
+ BLOCK_STATUS(pbd1) = status;
+
+ /* Flush the block header */
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
+ start = (u8 *)pbd1;
+ flush_dcache_page(pgv_to_page(start));
+
+ smp_wmb();
+#endif
+}
+
+/*
+ * Side effect:
+ *
+ * 1) flush the block
+ * 2) Increment active_blk_num
+ *
+ * Note:We DONT refresh the timer on purpose.
+ * Because almost always the next block will be opened.
+ */
+static void prb_close_block(struct tpacket_kbdq_core *pkc1,
+ struct tpacket_block_desc *pbd1,
+ struct packet_sock *po, unsigned int stat)
+{
+ __u32 status = TP_STATUS_USER | stat;
+
+ struct tpacket3_hdr *last_pkt;
+ struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
+
+ if (po->stats.tp_drops)
+ status |= TP_STATUS_LOSING;
+
+ last_pkt = (struct tpacket3_hdr *)pkc1->prev;
+ last_pkt->tp_next_offset = 0;
+
+ /* Get the ts of the last pkt */
+ if (BLOCK_NUM_PKTS(pbd1)) {
+ h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
+ h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
+ } else {
+ /* Ok, we tmo'd - so get the current time */
+ struct timespec ts;
+ getnstimeofday(&ts);
+ h1->ts_last_pkt.ts_sec = ts.tv_sec;
+ h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
+ }
+
+ smp_wmb();
+
+ /* Flush the block */
+ prb_flush_block(pkc1, pbd1, status);
+
+ pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
+}
+
+static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
+{
+ pkc->reset_pending_on_curr_blk = 0;
+}
+
+/*
+ * Side effect of opening a block:
+ *
+ * 1) prb_queue is thawed.
+ * 2) retire_blk_timer is refreshed.
+ *
+ */
+static void prb_open_block(struct tpacket_kbdq_core *pkc1,
+ struct tpacket_block_desc *pbd1)
+{
+ struct timespec ts;
+ struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
+
+ smp_rmb();
+
+ if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
+
+ /* We could have just memset this but we will lose the
+ * flexibility of making the priv area sticky
+ */
+ BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
+ BLOCK_NUM_PKTS(pbd1) = 0;
+ BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
+ getnstimeofday(&ts);
+ h1->ts_first_pkt.ts_sec = ts.tv_sec;
+ h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
+ pkc1->pkblk_start = (char *)pbd1;
+ pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
+ BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
+ BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
+ BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
+ pbd1->version = pkc1->version;
+ pkc1->prev = pkc1->nxt_offset;
+ pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
+ prb_thaw_queue(pkc1);
+ _prb_refresh_rx_retire_blk_timer(pkc1);
+
+ smp_wmb();
+
+ return;
+ }
+
+ WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
+ pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
+ dump_stack();
+ BUG();
+}
+
+/*
+ * Queue freeze logic:
+ * 1) Assume tp_block_nr = 8 blocks.
+ * 2) At time 't0', user opens Rx ring.
+ * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
+ * 4) user-space is either sleeping or processing block '0'.
+ * 5) tpacket_rcv is currently filling block '7', since there is no space left,
+ * it will close block-7,loop around and try to fill block '0'.
+ * call-flow:
+ * __packet_lookup_frame_in_block
+ * prb_retire_current_block()
+ * prb_dispatch_next_block()
+ * |->(BLOCK_STATUS == USER) evaluates to true
+ * 5.1) Since block-0 is currently in-use, we just freeze the queue.
+ * 6) Now there are two cases:
+ * 6.1) Link goes idle right after the queue is frozen.
+ * But remember, the last open_block() refreshed the timer.
+ * When this timer expires,it will refresh itself so that we can
+ * re-open block-0 in near future.
+ * 6.2) Link is busy and keeps on receiving packets. This is a simple
+ * case and __packet_lookup_frame_in_block will check if block-0
+ * is free and can now be re-used.
+ */
+static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
+ struct packet_sock *po)
+{
+ pkc->reset_pending_on_curr_blk = 1;
+ po->stats_u.stats3.tp_freeze_q_cnt++;
+}
+
+#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
+
+/*
+ * If the next block is free then we will dispatch it
+ * and return a good offset.
+ * Else, we will freeze the queue.
+ * So, caller must check the return value.
+ */
+static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
+ struct packet_sock *po)
+{
+ struct tpacket_block_desc *pbd;
+
+ smp_rmb();
+
+ /* 1. Get current block num */
+ pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
+
+ /* 2. If this block is currently in_use then freeze the queue */
+ if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
+ prb_freeze_queue(pkc, po);
+ return NULL;
+ }
+
+ /*
+ * 3.
+ * open this block and return the offset where the first packet
+ * needs to get stored.
+ */
+ prb_open_block(pkc, pbd);
+ return (void *)pkc->nxt_offset;
+}
+
+static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
+ struct packet_sock *po, unsigned int status)
+{
+ struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
+
+ /* retire/close the current block */
+ if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
+ /*
+ * Plug the case where copy_bits() is in progress on
+ * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
+ * have space to copy the pkt in the current block and
+ * called prb_retire_current_block()
+ *
+ * We don't need to worry about the TMO case because
+ * the timer-handler already handled this case.
+ */
+ if (!(status & TP_STATUS_BLK_TMO)) {
+ while (atomic_read(&pkc->blk_fill_in_prog)) {
+ /* Waiting for skb_copy_bits to finish... */
+ cpu_relax();
+ }
+ }
+ prb_close_block(pkc, pbd, po, status);
+ return;
+ }
+
+ WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
+ dump_stack();
+ BUG();
+}
+
+static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
+ struct tpacket_block_desc *pbd)
+{
+ return TP_STATUS_USER & BLOCK_STATUS(pbd);
+}
+
+static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
+{
+ return pkc->reset_pending_on_curr_blk;
+}
+
+static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
+{
+ struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
+ atomic_dec(&pkc->blk_fill_in_prog);
+}
+
+static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
+ struct tpacket3_hdr *ppd)
+{
+ ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
+}
+
+static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
+ struct tpacket3_hdr *ppd)
+{
+ ppd->hv1.tp_rxhash = 0;
+}
+
+static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
+ struct tpacket3_hdr *ppd)
+{
+ if (vlan_tx_tag_present(pkc->skb)) {
+ ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
+ ppd->tp_status = TP_STATUS_VLAN_VALID;
+ } else {
+ ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
+ }
+}
+
+static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
+ struct tpacket3_hdr *ppd)
+{
+ prb_fill_vlan_info(pkc, ppd);
+
+ if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
+ prb_fill_rxhash(pkc, ppd);
+ else
+ prb_clear_rxhash(pkc, ppd);
+}
+
+static void prb_fill_curr_block(char *curr,
+ struct tpacket_kbdq_core *pkc,
+ struct tpacket_block_desc *pbd,
+ unsigned int len)
+{
+ struct tpacket3_hdr *ppd;
+
+ ppd = (struct tpacket3_hdr *)curr;
+ ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
+ pkc->prev = curr;
+ pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
+ BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
+ BLOCK_NUM_PKTS(pbd) += 1;
+ atomic_inc(&pkc->blk_fill_in_prog);
+ prb_run_all_ft_ops(pkc, ppd);
+}
+
+/* Assumes caller has the sk->rx_queue.lock */
+static void *__packet_lookup_frame_in_block(struct packet_sock *po,
+ struct sk_buff *skb,
+ int status,
+ unsigned int len
+ )
+{
+ struct tpacket_kbdq_core *pkc;
+ struct tpacket_block_desc *pbd;
+ char *curr, *end;
+
+ pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring));
+ pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
+
+ /* Queue is frozen when user space is lagging behind */
+ if (prb_queue_frozen(pkc)) {
+ /*
+ * Check if that last block which caused the queue to freeze,
+ * is still in_use by user-space.
+ */
+ if (prb_curr_blk_in_use(pkc, pbd)) {
+ /* Can't record this packet */
+ return NULL;
+ } else {
+ /*
+ * Ok, the block was released by user-space.
+ * Now let's open that block.
+ * opening a block also thaws the queue.
+ * Thawing is a side effect.
+ */
+ prb_open_block(pkc, pbd);
+ }
+ }
+
+ smp_mb();
+ curr = pkc->nxt_offset;
+ pkc->skb = skb;
+ end = (char *) ((char *)pbd + pkc->kblk_size);
+
+ /* first try the current block */
+ if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
+ prb_fill_curr_block(curr, pkc, pbd, len);
+ return (void *)curr;
+ }
+
+ /* Ok, close the current block */
+ prb_retire_current_block(pkc, po, 0);
+
+ /* Now, try to dispatch the next block */
+ curr = (char *)prb_dispatch_next_block(pkc, po);
+ if (curr) {
+ pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
+ prb_fill_curr_block(curr, pkc, pbd, len);
+ return (void *)curr;
+ }
+
+ /*
+ * No free blocks are available.user_space hasn't caught up yet.
+ * Queue was just frozen and now this packet will get dropped.
+ */
+ return NULL;
+}
+
+static void *packet_current_rx_frame(struct packet_sock *po,
+ struct sk_buff *skb,
+ int status, unsigned int len)
+{
+ char *curr = NULL;
+ switch (po->tp_version) {
+ case TPACKET_V1:
+ case TPACKET_V2:
+ curr = packet_lookup_frame(po, &po->rx_ring,
+ po->rx_ring.head, status);
+ return curr;
+ case TPACKET_V3:
+ return __packet_lookup_frame_in_block(po, skb, status, len);
+ default:
+ WARN(1, "TPACKET version not supported\n");
+ BUG();
+ return 0;
+ }
+}
+
+static void *prb_lookup_block(struct packet_sock *po,
+ struct packet_ring_buffer *rb,
+ unsigned int previous,
+ int status)
+{
+ struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
+ struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
+
+ if (status != BLOCK_STATUS(pbd))
+ return NULL;
+ return pbd;
+}
+
+static int prb_previous_blk_num(struct packet_ring_buffer *rb)
+{
+ unsigned int prev;
+ if (rb->prb_bdqc.kactive_blk_num)
+ prev = rb->prb_bdqc.kactive_blk_num-1;
+ else
+ prev = rb->prb_bdqc.knum_blocks-1;
+ return prev;
+}
+
+/* Assumes caller has held the rx_queue.lock */
+static void *__prb_previous_block(struct packet_sock *po,
+ struct packet_ring_buffer *rb,
+ int status)
+{
+ unsigned int previous = prb_previous_blk_num(rb);
+ return prb_lookup_block(po, rb, previous, status);
+}
+
+static void *packet_previous_rx_frame(struct packet_sock *po,
+ struct packet_ring_buffer *rb,
+ int status)
+{
+ if (po->tp_version <= TPACKET_V2)
+ return packet_previous_frame(po, rb, status);
+
+ return __prb_previous_block(po, rb, status);
+}
+
+static void packet_increment_rx_head(struct packet_sock *po,
+ struct packet_ring_buffer *rb)
+{
+ switch (po->tp_version) {
+ case TPACKET_V1:
+ case TPACKET_V2:
+ return packet_increment_head(rb);
+ case TPACKET_V3:
+ default:
+ WARN(1, "TPACKET version not supported.\n");
+ BUG();
+ return;
+ }
+}
+
+static void *packet_previous_frame(struct packet_sock *po,
struct packet_ring_buffer *rb,
int status)
{
@@ -397,7 +1156,7 @@ static inline void *packet_previous_frame(struct packet_sock *po,
return packet_lookup_frame(po, rb, previous, status);
}
-static inline void packet_increment_head(struct packet_ring_buffer *buff)
+static void packet_increment_head(struct packet_ring_buffer *buff)
{
buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
}
@@ -454,43 +1213,6 @@ static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *sk
return f->arr[cpu % num];
}
-static struct sk_buff *fanout_check_defrag(struct sk_buff *skb)
-{
-#ifdef CONFIG_INET
- const struct iphdr *iph;
- u32 len;
-
- if (skb->protocol != htons(ETH_P_IP))
- return skb;
-
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- return skb;
-
- iph = ip_hdr(skb);
- if (iph->ihl < 5 || iph->version != 4)
- return skb;
- if (!pskb_may_pull(skb, iph->ihl*4))
- return skb;
- iph = ip_hdr(skb);
- len = ntohs(iph->tot_len);
- if (skb->len < len || len < (iph->ihl * 4))
- return skb;
-
- if (ip_is_fragment(ip_hdr(skb))) {
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (skb) {
- if (pskb_trim_rcsum(skb, len))
- return skb;
- memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
- if (ip_defrag(skb, IP_DEFRAG_AF_PACKET))
- return NULL;
- skb->rxhash = 0;
- }
- }
-#endif
- return skb;
-}
-
static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
@@ -509,7 +1231,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
case PACKET_FANOUT_HASH:
default:
if (f->defrag) {
- skb = fanout_check_defrag(skb);
+ skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
if (!skb)
return 0;
}
@@ -836,7 +1558,7 @@ out_free:
return err;
}
-static inline unsigned int run_filter(const struct sk_buff *skb,
+static unsigned int run_filter(const struct sk_buff *skb,
const struct sock *sk,
unsigned int res)
{
@@ -985,12 +1707,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
union {
struct tpacket_hdr *h1;
struct tpacket2_hdr *h2;
+ struct tpacket3_hdr *h3;
void *raw;
} h;
u8 *skb_head = skb->data;
int skb_len = skb->len;
unsigned int snaplen, res;
- unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
+ unsigned long status = TP_STATUS_USER;
unsigned short macoff, netoff, hdrlen;
struct sk_buff *copy_skb = NULL;
struct timeval tv;
@@ -1036,37 +1759,46 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
po->tp_reserve;
macoff = netoff - maclen;
}
-
- if (macoff + snaplen > po->rx_ring.frame_size) {
- if (po->copy_thresh &&
- atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
- (unsigned)sk->sk_rcvbuf) {
- if (skb_shared(skb)) {
- copy_skb = skb_clone(skb, GFP_ATOMIC);
- } else {
- copy_skb = skb_get(skb);
- skb_head = skb->data;
+ if (po->tp_version <= TPACKET_V2) {
+ if (macoff + snaplen > po->rx_ring.frame_size) {
+ if (po->copy_thresh &&
+ atomic_read(&sk->sk_rmem_alloc) + skb->truesize
+ < (unsigned)sk->sk_rcvbuf) {
+ if (skb_shared(skb)) {
+ copy_skb = skb_clone(skb, GFP_ATOMIC);
+ } else {
+ copy_skb = skb_get(skb);
+ skb_head = skb->data;
+ }
+ if (copy_skb)
+ skb_set_owner_r(copy_skb, sk);
}
- if (copy_skb)
- skb_set_owner_r(copy_skb, sk);
+ snaplen = po->rx_ring.frame_size - macoff;
+ if ((int)snaplen < 0)
+ snaplen = 0;
}
- snaplen = po->rx_ring.frame_size - macoff;
- if ((int)snaplen < 0)
- snaplen = 0;
}
-
spin_lock(&sk->sk_receive_queue.lock);
- h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
+ h.raw = packet_current_rx_frame(po, skb,
+ TP_STATUS_KERNEL, (macoff+snaplen));
if (!h.raw)
goto ring_is_full;
- packet_increment_head(&po->rx_ring);
+ if (po->tp_version <= TPACKET_V2) {
+ packet_increment_rx_head(po, &po->rx_ring);
+ /*
+ * LOSING will be reported till you read the stats,
+ * because it's COR - Clear On Read.
+ * Anyways, moving it for V1/V2 only as V3 doesn't need this
+ * at packet level.
+ */
+ if (po->stats.tp_drops)
+ status |= TP_STATUS_LOSING;
+ }
po->stats.tp_packets++;
if (copy_skb) {
status |= TP_STATUS_COPY;
__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
}
- if (!po->stats.tp_drops)
- status &= ~TP_STATUS_LOSING;
spin_unlock(&sk->sk_receive_queue.lock);
skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
@@ -1117,6 +1849,29 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
h.h2->tp_padding = 0;
hdrlen = sizeof(*h.h2);
break;
+ case TPACKET_V3:
+ /* tp_nxt_offset,vlan are already populated above.
+ * So DONT clear those fields here
+ */
+ h.h3->tp_status |= status;
+ h.h3->tp_len = skb->len;
+ h.h3->tp_snaplen = snaplen;
+ h.h3->tp_mac = macoff;
+ h.h3->tp_net = netoff;
+ if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
+ && shhwtstamps->syststamp.tv64)
+ ts = ktime_to_timespec(shhwtstamps->syststamp);
+ else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
+ && shhwtstamps->hwtstamp.tv64)
+ ts = ktime_to_timespec(shhwtstamps->hwtstamp);
+ else if (skb->tstamp.tv64)
+ ts = ktime_to_timespec(skb->tstamp);
+ else
+ getnstimeofday(&ts);
+ h.h3->tp_sec = ts.tv_sec;
+ h.h3->tp_nsec = ts.tv_nsec;
+ hdrlen = sizeof(*h.h3);
+ break;
default:
BUG();
}
@@ -1137,13 +1892,19 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
{
u8 *start, *end;
- end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen);
- for (start = h.raw; start < end; start += PAGE_SIZE)
- flush_dcache_page(pgv_to_page(start));
+ if (po->tp_version <= TPACKET_V2) {
+ end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
+ + macoff + snaplen);
+ for (start = h.raw; start < end; start += PAGE_SIZE)
+ flush_dcache_page(pgv_to_page(start));
+ }
smp_wmb();
}
#endif
- __packet_set_status(po, h.raw, status);
+ if (po->tp_version <= TPACKET_V2)
+ __packet_set_status(po, h.raw, status);
+ else
+ prb_clear_blk_fill_status(&po->rx_ring);
sk->sk_data_ready(sk, 0);
@@ -1170,8 +1931,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
struct packet_sock *po = pkt_sk(skb->sk);
void *ph;
- BUG_ON(skb == NULL);
-
if (likely(po->tx_ring.pg_vec)) {
ph = skb_shinfo(skb)->destructor_arg;
BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
@@ -1408,10 +2167,10 @@ out:
return err;
}
-static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
- size_t reserve, size_t len,
- size_t linear, int noblock,
- int *err)
+static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
+ size_t reserve, size_t len,
+ size_t linear, int noblock,
+ int *err)
{
struct sk_buff *skb;
@@ -1634,7 +2393,7 @@ static int packet_release(struct socket *sock)
struct sock *sk = sock->sk;
struct packet_sock *po;
struct net *net;
- struct tpacket_req req;
+ union tpacket_req_u req_u;
if (!sk)
return 0;
@@ -1657,13 +2416,13 @@ static int packet_release(struct socket *sock)
packet_flush_mclist(sk);
- memset(&req, 0, sizeof(req));
+ memset(&req_u, 0, sizeof(req_u));
if (po->rx_ring.pg_vec)
- packet_set_ring(sk, &req, 1, 0);
+ packet_set_ring(sk, &req_u, 1, 0);
if (po->tx_ring.pg_vec)
- packet_set_ring(sk, &req, 1, 1);
+ packet_set_ring(sk, &req_u, 1, 1);
fanout_release(sk);
@@ -2283,15 +3042,27 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
case PACKET_RX_RING:
case PACKET_TX_RING:
{
- struct tpacket_req req;
+ union tpacket_req_u req_u;
+ int len;
- if (optlen < sizeof(req))
+ switch (po->tp_version) {
+ case TPACKET_V1:
+ case TPACKET_V2:
+ len = sizeof(req_u.req);
+ break;
+ case TPACKET_V3:
+ default:
+ len = sizeof(req_u.req3);
+ break;
+ }
+ if (optlen < len)
return -EINVAL;
if (pkt_sk(sk)->has_vnet_hdr)
return -EINVAL;
- if (copy_from_user(&req, optval, sizeof(req)))
+ if (copy_from_user(&req_u.req, optval, len))
return -EFAULT;
- return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
+ return packet_set_ring(sk, &req_u, 0,
+ optname == PACKET_TX_RING);
}
case PACKET_COPY_THRESH:
{
@@ -2318,6 +3089,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
switch (val) {
case TPACKET_V1:
case TPACKET_V2:
+ case TPACKET_V3:
po->tp_version = val;
return 0;
default:
@@ -2427,6 +3199,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
struct packet_sock *po = pkt_sk(sk);
void *data;
struct tpacket_stats st;
+ union tpacket_stats_u st_u;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
@@ -2439,15 +3212,27 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
switch (optname) {
case PACKET_STATISTICS:
- if (len > sizeof(struct tpacket_stats))
- len = sizeof(struct tpacket_stats);
+ if (po->tp_version == TPACKET_V3) {
+ len = sizeof(struct tpacket_stats_v3);
+ } else {
+ if (len > sizeof(struct tpacket_stats))
+ len = sizeof(struct tpacket_stats);
+ }
spin_lock_bh(&sk->sk_receive_queue.lock);
- st = po->stats;
+ if (po->tp_version == TPACKET_V3) {
+ memcpy(&st_u.stats3, &po->stats,
+ sizeof(struct tpacket_stats));
+ st_u.stats3.tp_freeze_q_cnt =
+ po->stats_u.stats3.tp_freeze_q_cnt;
+ st_u.stats3.tp_packets += po->stats.tp_drops;
+ data = &st_u.stats3;
+ } else {
+ st = po->stats;
+ st.tp_packets += st.tp_drops;
+ data = &st;
+ }
memset(&po->stats, 0, sizeof(st));
spin_unlock_bh(&sk->sk_receive_queue.lock);
- st.tp_packets += st.tp_drops;
-
- data = &st;
break;
case PACKET_AUXDATA:
if (len > sizeof(int))
@@ -2488,6 +3273,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
case TPACKET_V2:
val = sizeof(struct tpacket2_hdr);
break;
+ case TPACKET_V3:
+ val = sizeof(struct tpacket3_hdr);
+ break;
default:
return -EINVAL;
}
@@ -2644,7 +3432,8 @@ static unsigned int packet_poll(struct file *file, struct socket *sock,
spin_lock_bh(&sk->sk_receive_queue.lock);
if (po->rx_ring.pg_vec) {
- if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
+ if (!packet_previous_rx_frame(po, &po->rx_ring,
+ TP_STATUS_KERNEL))
mask |= POLLIN | POLLRDNORM;
}
spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -2705,7 +3494,7 @@ static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
kfree(pg_vec);
}
-static inline char *alloc_one_pg_vec_page(unsigned long order)
+static char *alloc_one_pg_vec_page(unsigned long order)
{
char *buffer = NULL;
gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
@@ -2763,7 +3552,7 @@ out_free_pgvec:
goto out;
}
-static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
+static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
int closing, int tx_ring)
{
struct pgv *pg_vec = NULL;
@@ -2772,7 +3561,15 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
struct packet_ring_buffer *rb;
struct sk_buff_head *rb_queue;
__be16 num;
- int err;
+ int err = -EINVAL;
+ /* Added to avoid minimal code churn */
+ struct tpacket_req *req = &req_u->req;
+
+ /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
+ if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
+ WARN(1, "Tx-ring is not supported.\n");
+ goto out;
+ }
rb = tx_ring ? &po->tx_ring : &po->rx_ring;
rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
@@ -2798,6 +3595,9 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
case TPACKET_V2:
po->tp_hdrlen = TPACKET2_HDRLEN;
break;
+ case TPACKET_V3:
+ po->tp_hdrlen = TPACKET3_HDRLEN;
+ break;
}
err = -EINVAL;
@@ -2823,6 +3623,17 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
pg_vec = alloc_pg_vec(req, order);
if (unlikely(!pg_vec))
goto out;
+ switch (po->tp_version) {
+ case TPACKET_V3:
+ /* Transmit path is not supported. We checked
+ * it above but just being paranoid
+ */
+ if (!tx_ring)
+ init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
+ break;
+ default:
+ break;
+ }
}
/* Done */
else {
@@ -2875,7 +3686,11 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
register_prot_hook(sk);
}
spin_unlock(&po->bind_lock);
-
+ if (closing && (po->tp_version > TPACKET_V2)) {
+ /* Because we don't support block-based V3 on tx-ring */
+ if (!tx_ring)
+ prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
+ }
release_sock(sk);
if (pg_vec)
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index c6fffd9..bf10ea8 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -480,7 +480,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
if (proto_tab[protocol])
err = -EBUSY;
else
- rcu_assign_pointer(proto_tab[protocol], pp);
+ RCU_INIT_POINTER(proto_tab[protocol], pp);
mutex_unlock(&proto_tab_lock);
return err;
@@ -491,7 +491,7 @@ void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp)
{
mutex_lock(&proto_tab_lock);
BUG_ON(proto_tab[protocol] != pp);
- rcu_assign_pointer(proto_tab[protocol], NULL);
+ RCU_INIT_POINTER(proto_tab[protocol], NULL);
mutex_unlock(&proto_tab_lock);
synchronize_rcu();
proto_unregister(pp->prot);
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 2f03238..bf35b4e 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -30,6 +30,7 @@
#include <net/sock.h>
#include <linux/phonet.h>
+#include <linux/export.h>
#include <net/phonet/phonet.h>
static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb);
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index f17fd84..2ba6e9f 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -30,6 +30,7 @@
#include <asm/ioctls.h>
#include <linux/phonet.h>
+#include <linux/module.h>
#include <net/phonet/phonet.h>
#include <net/phonet/pep.h>
#include <net/phonet/gprs.h>
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index d2df8f3..c582761 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -276,7 +276,7 @@ static void phonet_route_autodel(struct net_device *dev)
mutex_lock(&pnn->routes.lock);
for (i = 0; i < 64; i++)
if (dev == pnn->routes.table[i]) {
- rcu_assign_pointer(pnn->routes.table[i], NULL);
+ RCU_INIT_POINTER(pnn->routes.table[i], NULL);
set_bit(i, deleted);
}
mutex_unlock(&pnn->routes.lock);
@@ -390,7 +390,7 @@ int phonet_route_add(struct net_device *dev, u8 daddr)
daddr = daddr >> 2;
mutex_lock(&routes->lock);
if (routes->table[daddr] == NULL) {
- rcu_assign_pointer(routes->table[daddr], dev);
+ RCU_INIT_POINTER(routes->table[daddr], dev);
dev_hold(dev);
err = 0;
}
@@ -406,7 +406,7 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
daddr = daddr >> 2;
mutex_lock(&routes->lock);
if (dev == routes->table[daddr])
- rcu_assign_pointer(routes->table[daddr], NULL);
+ RCU_INIT_POINTER(routes->table[daddr], NULL);
else
dev = NULL;
mutex_unlock(&routes->lock);
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index ab07711..3f8d0b1 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -31,6 +31,7 @@
#include <net/tcp_states.h>
#include <linux/phonet.h>
+#include <linux/export.h>
#include <net/phonet/phonet.h>
#include <net/phonet/pep.h>
#include <net/phonet/pn_dev.h>
@@ -679,7 +680,7 @@ int pn_sock_bind_res(struct sock *sk, u8 res)
mutex_lock(&resource_mutex);
if (pnres.sk[res] == NULL) {
sock_hold(sk);
- rcu_assign_pointer(pnres.sk[res], sk);
+ RCU_INIT_POINTER(pnres.sk[res], sk);
ret = 0;
}
mutex_unlock(&resource_mutex);
@@ -695,7 +696,7 @@ int pn_sock_unbind_res(struct sock *sk, u8 res)
mutex_lock(&resource_mutex);
if (pnres.sk[res] == sk) {
- rcu_assign_pointer(pnres.sk[res], NULL);
+ RCU_INIT_POINTER(pnres.sk[res], NULL);
ret = 0;
}
mutex_unlock(&resource_mutex);
@@ -714,7 +715,7 @@ void pn_sock_unbind_all_res(struct sock *sk)
mutex_lock(&resource_mutex);
for (res = 0; res < 256; res++) {
if (pnres.sk[res] == sk) {
- rcu_assign_pointer(pnres.sk[res], NULL);
+ RCU_INIT_POINTER(pnres.sk[res], NULL);
match++;
}
}
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index ec753b3..4cf6dc7 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -9,6 +9,7 @@ config RDS
config RDS_RDMA
tristate "RDS over Infiniband and iWARP"
+ select LLIST
depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
---help---
Allow RDS to use Infiniband and iWARP as a transport.
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 6daaa49..e5b65ac 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -34,6 +34,7 @@
#include <linux/types.h>
#include <linux/rbtree.h>
#include <linux/bitops.h>
+#include <linux/export.h>
#include "rds.h"
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 9334d89..9e07c75 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/inet_hashtables.h>
#include "rds.h"
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 3b83086..b4c8b00 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -38,6 +38,7 @@
#include <linux/if_arp.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "rds.h"
#include "ib.h"
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 819c35a..e8fdb17 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -33,10 +33,10 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/rculist.h>
+#include <linux/llist.h>
#include "rds.h"
#include "ib.h"
-#include "xlist.h"
static DEFINE_PER_CPU(unsigned long, clean_list_grace);
#define CLEAN_LIST_BUSY_BIT 0
@@ -49,7 +49,7 @@ struct rds_ib_mr {
struct rds_ib_mr_pool *pool;
struct ib_fmr *fmr;
- struct xlist_head xlist;
+ struct llist_node llnode;
/* unmap_list is for freeing */
struct list_head unmap_list;
@@ -71,9 +71,9 @@ struct rds_ib_mr_pool {
atomic_t item_count; /* total # of MRs */
atomic_t dirty_count; /* # dirty of MRs */
- struct xlist_head drop_list; /* MRs that have reached their max_maps limit */
- struct xlist_head free_list; /* unused MRs */
- struct xlist_head clean_list; /* global unused & unamapped MRs */
+ struct llist_head drop_list; /* MRs that have reached their max_maps limit */
+ struct llist_head free_list; /* unused MRs */
+ struct llist_head clean_list; /* global unused & unamapped MRs */
wait_queue_head_t flush_wait;
atomic_t free_pinned; /* memory pinned by free MRs */
@@ -220,9 +220,9 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
if (!pool)
return ERR_PTR(-ENOMEM);
- INIT_XLIST_HEAD(&pool->free_list);
- INIT_XLIST_HEAD(&pool->drop_list);
- INIT_XLIST_HEAD(&pool->clean_list);
+ init_llist_head(&pool->free_list);
+ init_llist_head(&pool->drop_list);
+ init_llist_head(&pool->clean_list);
mutex_init(&pool->flush_lock);
init_waitqueue_head(&pool->flush_wait);
INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
@@ -260,26 +260,18 @@ void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
kfree(pool);
}
-static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl,
- struct rds_ib_mr **ibmr_ret)
-{
- struct xlist_head *ibmr_xl;
- ibmr_xl = xlist_del_head_fast(xl);
- *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist);
-}
-
static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
{
struct rds_ib_mr *ibmr = NULL;
- struct xlist_head *ret;
+ struct llist_node *ret;
unsigned long *flag;
preempt_disable();
flag = &__get_cpu_var(clean_list_grace);
set_bit(CLEAN_LIST_BUSY_BIT, flag);
- ret = xlist_del_head(&pool->clean_list);
+ ret = llist_del_first(&pool->clean_list);
if (ret)
- ibmr = list_entry(ret, struct rds_ib_mr, xlist);
+ ibmr = llist_entry(ret, struct rds_ib_mr, llnode);
clear_bit(CLEAN_LIST_BUSY_BIT, flag);
preempt_enable();
@@ -529,46 +521,44 @@ static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int fr
}
/*
- * given an xlist of mrs, put them all into the list_head for more processing
+ * given an llist of mrs, put them all into the list_head for more processing
*/
-static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list)
+static void llist_append_to_list(struct llist_head *llist, struct list_head *list)
{
struct rds_ib_mr *ibmr;
- struct xlist_head splice;
- struct xlist_head *cur;
- struct xlist_head *next;
-
- splice.next = NULL;
- xlist_splice(xlist, &splice);
- cur = splice.next;
- while (cur) {
- next = cur->next;
- ibmr = list_entry(cur, struct rds_ib_mr, xlist);
+ struct llist_node *node;
+ struct llist_node *next;
+
+ node = llist_del_all(llist);
+ while (node) {
+ next = node->next;
+ ibmr = llist_entry(node, struct rds_ib_mr, llnode);
list_add_tail(&ibmr->unmap_list, list);
- cur = next;
+ node = next;
}
}
/*
- * this takes a list head of mrs and turns it into an xlist of clusters.
- * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for
- * reuse.
+ * this takes a list head of mrs and turns it into linked llist nodes
+ * of clusters. Each cluster has linked llist nodes of
+ * MR_CLUSTER_SIZE mrs that are ready for reuse.
*/
-static void list_append_to_xlist(struct rds_ib_mr_pool *pool,
- struct list_head *list, struct xlist_head *xlist,
- struct xlist_head **tail_ret)
+static void list_to_llist_nodes(struct rds_ib_mr_pool *pool,
+ struct list_head *list,
+ struct llist_node **nodes_head,
+ struct llist_node **nodes_tail)
{
struct rds_ib_mr *ibmr;
- struct xlist_head *cur_mr = xlist;
- struct xlist_head *tail_mr = NULL;
+ struct llist_node *cur = NULL;
+ struct llist_node **next = nodes_head;
list_for_each_entry(ibmr, list, unmap_list) {
- tail_mr = &ibmr->xlist;
- tail_mr->next = NULL;
- cur_mr->next = tail_mr;
- cur_mr = tail_mr;
+ cur = &ibmr->llnode;
+ *next = cur;
+ next = &cur->next;
}
- *tail_ret = tail_mr;
+ *next = NULL;
+ *nodes_tail = cur;
}
/*
@@ -581,8 +571,8 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
int free_all, struct rds_ib_mr **ibmr_ret)
{
struct rds_ib_mr *ibmr, *next;
- struct xlist_head clean_xlist;
- struct xlist_head *clean_tail;
+ struct llist_node *clean_nodes;
+ struct llist_node *clean_tail;
LIST_HEAD(unmap_list);
LIST_HEAD(fmr_list);
unsigned long unpinned = 0;
@@ -603,7 +593,7 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
prepare_to_wait(&pool->flush_wait, &wait,
TASK_UNINTERRUPTIBLE);
- if (xlist_empty(&pool->clean_list))
+ if (llist_empty(&pool->clean_list))
schedule();
ibmr = rds_ib_reuse_fmr(pool);
@@ -628,10 +618,10 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
/* Get the list of all MRs to be dropped. Ordering matters -
* we want to put drop_list ahead of free_list.
*/
- xlist_append_to_list(&pool->drop_list, &unmap_list);
- xlist_append_to_list(&pool->free_list, &unmap_list);
+ llist_append_to_list(&pool->drop_list, &unmap_list);
+ llist_append_to_list(&pool->free_list, &unmap_list);
if (free_all)
- xlist_append_to_list(&pool->clean_list, &unmap_list);
+ llist_append_to_list(&pool->clean_list, &unmap_list);
free_goal = rds_ib_flush_goal(pool, free_all);
@@ -663,22 +653,22 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
if (!list_empty(&unmap_list)) {
/* we have to make sure that none of the things we're about
* to put on the clean list would race with other cpus trying
- * to pull items off. The xlist would explode if we managed to
+ * to pull items off. The llist would explode if we managed to
* remove something from the clean list and then add it back again
- * while another CPU was spinning on that same item in xlist_del_head.
+ * while another CPU was spinning on that same item in llist_del_first.
*
- * This is pretty unlikely, but just in case wait for an xlist grace period
+ * This is pretty unlikely, but just in case wait for an llist grace period
* here before adding anything back into the clean list.
*/
wait_clean_list_grace();
- list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail);
+ list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
if (ibmr_ret)
- refill_local(pool, &clean_xlist, ibmr_ret);
+ *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
- /* refill_local may have emptied our list */
- if (!xlist_empty(&clean_xlist))
- xlist_add(clean_xlist.next, clean_tail, &pool->clean_list);
+ /* more than one entry in llist nodes */
+ if (clean_nodes->next)
+ llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
}
@@ -711,9 +701,9 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
/* Return it to the pool's free list */
if (ibmr->remap_count >= pool->fmr_attr.max_maps)
- xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list);
+ llist_add(&ibmr->llnode, &pool->drop_list);
else
- xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list);
+ llist_add(&ibmr->llnode, &pool->free_list);
atomic_add(ibmr->sg_len, &pool->free_pinned);
atomic_inc(&pool->dirty_count);
diff --git a/net/rds/info.c b/net/rds/info.c
index 4fdf1b6..f1c016c 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -34,6 +34,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
+#include <linux/export.h>
#include "rds.h"
diff --git a/net/rds/iw.c b/net/rds/iw.c
index f747484..7826d46 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -38,6 +38,7 @@
#include <linux/if_arp.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "rds.h"
#include "iw.h"
diff --git a/net/rds/message.c b/net/rds/message.c
index 1fd3d29..f0a4658 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -32,6 +32,7 @@
*/
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "rds.h"
diff --git a/net/rds/page.c b/net/rds/page.c
index b82d63e..2499cd1 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -33,6 +33,7 @@
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/cpu.h>
+#include <linux/export.h>
#include "rds.h"
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index f8760e1..c2be901 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*
*/
+#include <linux/module.h>
#include <rdma/rdma_cm.h>
#include "rdma_transport.h"
diff --git a/net/rds/rds.h b/net/rds/rds.h
index da8adac..7eaba18 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -36,8 +36,8 @@
#define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
#else
/* sigh, pr_debug() causes unused variable warnings */
-static inline void __attribute__ ((format (printf, 1, 2)))
-rdsdebug(char *fmt, ...)
+static inline __printf(1, 2)
+void rdsdebug(char *fmt, ...)
{
}
#endif
@@ -625,8 +625,8 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
struct rds_info_lengths *lens,
int (*visitor)(struct rds_connection *, void *),
size_t item_len);
-void __rds_conn_error(struct rds_connection *conn, const char *, ...)
- __attribute__ ((format (printf, 2, 3)));
+__printf(2, 3)
+void __rds_conn_error(struct rds_connection *conn, const char *, ...);
#define rds_conn_error(conn, fmt...) \
__rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 596689e..bc3f8cd 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <net/sock.h>
#include <linux/in.h>
+#include <linux/export.h>
#include "rds.h"
diff --git a/net/rds/send.c b/net/rds/send.c
index aa57e22..e2d63c5 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -31,11 +31,13 @@
*
*/
#include <linux/kernel.h>
+#include <linux/moduleparam.h>
#include <linux/gfp.h>
#include <net/sock.h>
#include <linux/in.h>
#include <linux/list.h>
#include <linux/ratelimit.h>
+#include <linux/export.h>
#include "rds.h"
diff --git a/net/rds/stats.c b/net/rds/stats.c
index 10c759c..7be790d 100644
--- a/net/rds/stats.c
+++ b/net/rds/stats.c
@@ -33,6 +33,7 @@
#include <linux/percpu.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
+#include <linux/export.h>
#include "rds.h"
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 8e0a320..edac9ef 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/in.h>
+#include <linux/module.h>
#include <net/tcp.h>
#include "rds.h"
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 0fd90f8..65eaefc 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -32,6 +32,7 @@
*/
#include <linux/kernel.h>
#include <linux/random.h>
+#include <linux/export.h>
#include "rds.h"
diff --git a/net/rds/xlist.h b/net/rds/xlist.h
deleted file mode 100644
index e6b5190..0000000
--- a/net/rds/xlist.h
+++ /dev/null
@@ -1,80 +0,0 @@
-#ifndef _LINUX_XLIST_H
-#define _LINUX_XLIST_H
-
-#include <linux/stddef.h>
-#include <linux/poison.h>
-#include <linux/prefetch.h>
-#include <asm/system.h>
-
-struct xlist_head {
- struct xlist_head *next;
-};
-
-static inline void INIT_XLIST_HEAD(struct xlist_head *list)
-{
- list->next = NULL;
-}
-
-static inline int xlist_empty(struct xlist_head *head)
-{
- return head->next == NULL;
-}
-
-static inline void xlist_add(struct xlist_head *new, struct xlist_head *tail,
- struct xlist_head *head)
-{
- struct xlist_head *cur;
- struct xlist_head *check;
-
- while (1) {
- cur = head->next;
- tail->next = cur;
- check = cmpxchg(&head->next, cur, new);
- if (check == cur)
- break;
- }
-}
-
-static inline struct xlist_head *xlist_del_head(struct xlist_head *head)
-{
- struct xlist_head *cur;
- struct xlist_head *check;
- struct xlist_head *next;
-
- while (1) {
- cur = head->next;
- if (!cur)
- goto out;
-
- next = cur->next;
- check = cmpxchg(&head->next, cur, next);
- if (check == cur)
- goto out;
- }
-out:
- return cur;
-}
-
-static inline struct xlist_head *xlist_del_head_fast(struct xlist_head *head)
-{
- struct xlist_head *cur;
-
- cur = head->next;
- if (!cur)
- return NULL;
-
- head->next = cur->next;
- return cur;
-}
-
-static inline void xlist_splice(struct xlist_head *list,
- struct xlist_head *head)
-{
- struct xlist_head *cur;
-
- WARN_ON(head->next);
- cur = xchg(&list->next, NULL);
- head->next = cur;
-}
-
-#endif
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index be90640..5be1957 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -235,7 +235,7 @@ static bool __rfkill_set_hw_state(struct rfkill *rfkill,
else
rfkill->state &= ~RFKILL_BLOCK_HW;
*change = prev != blocked;
- any = rfkill->state & RFKILL_BLOCK_ANY;
+ any = !!(rfkill->state & RFKILL_BLOCK_ANY);
spin_unlock_irqrestore(&rfkill->lock, flags);
rfkill_led_trigger_event(rfkill);
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
index 1bca6d4..24c55c5 100644
--- a/net/rfkill/input.c
+++ b/net/rfkill/input.c
@@ -15,6 +15,7 @@
#include <linux/input.h>
#include <linux/slab.h>
+#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/init.h>
#include <linux/rfkill.h>
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 256c5dd..128677d 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -101,6 +101,14 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
if (!rfkill)
return -ENOMEM;
+ if (pdata->gpio_runtime_setup) {
+ ret = pdata->gpio_runtime_setup(pdev);
+ if (ret) {
+ pr_warn("%s: can't set up gpio\n", __func__);
+ return ret;
+ }
+ }
+
rfkill->pdata = pdata;
len = strlen(pdata->name);
@@ -182,7 +190,10 @@ fail_alloc:
static int rfkill_gpio_remove(struct platform_device *pdev)
{
struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
+ struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
+ if (pdata->gpio_runtime_close)
+ pdata->gpio_runtime_close(pdev);
rfkill_unregister(rfkill->rfkill_dev);
rfkill_destroy(rfkill->rfkill_dev);
if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
index 18dc512..3ca7277 100644
--- a/net/rfkill/rfkill-regulator.c
+++ b/net/rfkill/rfkill-regulator.c
@@ -90,7 +90,6 @@ static int __devinit rfkill_regulator_probe(struct platform_device *pdev)
pdata->type,
&rfkill_regulator_ops, rfkill_data);
if (rf_kill == NULL) {
- dev_err(&pdev->dev, "Cannot alloc rfkill device\n");
ret = -ENOMEM;
goto err_rfkill_alloc;
}
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index d389de1..cd9b7ee 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -36,6 +36,7 @@
#include <linux/init.h>
#include <net/rose.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
static unsigned int rose_neigh_no = 1;
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 5f22e26..338d793 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -13,6 +13,7 @@
#include <linux/gfp.h>
#include <linux/skbuff.h>
#include <linux/circ_buf.h>
+#include <linux/export.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
index 0c65013..4b48687 100644
--- a/net/rxrpc/ar-recvmsg.c
+++ b/net/rxrpc/ar-recvmsg.c
@@ -11,6 +11,7 @@
#include <linux/net.h>
#include <linux/skbuff.h>
+#include <linux/export.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index f2fb67e..93fdf13 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/sch_generic.h>
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 6994214..7b58230 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -21,6 +21,7 @@
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <net/pkt_cls.h>
#include <net/ip.h>
@@ -65,132 +66,134 @@ static inline u32 addr_fold(void *addr)
return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
}
-static u32 flow_get_src(struct sk_buff *skb)
+static u32 flow_get_src(const struct sk_buff *skb, int nhoff)
{
+ __be32 *data = NULL, hdata;
+
switch (skb->protocol) {
case htons(ETH_P_IP):
- if (pskb_network_may_pull(skb, sizeof(struct iphdr)))
- return ntohl(ip_hdr(skb)->saddr);
+ data = skb_header_pointer(skb,
+ nhoff + offsetof(struct iphdr,
+ saddr),
+ 4, &hdata);
break;
case htons(ETH_P_IPV6):
- if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
- return ntohl(ipv6_hdr(skb)->saddr.s6_addr32[3]);
+ data = skb_header_pointer(skb,
+ nhoff + offsetof(struct ipv6hdr,
+ saddr.s6_addr32[3]),
+ 4, &hdata);
break;
}
+ if (data)
+ return ntohl(*data);
return addr_fold(skb->sk);
}
-static u32 flow_get_dst(struct sk_buff *skb)
+static u32 flow_get_dst(const struct sk_buff *skb, int nhoff)
{
+ __be32 *data = NULL, hdata;
+
switch (skb->protocol) {
case htons(ETH_P_IP):
- if (pskb_network_may_pull(skb, sizeof(struct iphdr)))
- return ntohl(ip_hdr(skb)->daddr);
+ data = skb_header_pointer(skb,
+ nhoff + offsetof(struct iphdr,
+ daddr),
+ 4, &hdata);
break;
case htons(ETH_P_IPV6):
- if (pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
- return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]);
+ data = skb_header_pointer(skb,
+ nhoff + offsetof(struct ipv6hdr,
+ daddr.s6_addr32[3]),
+ 4, &hdata);
break;
}
+ if (data)
+ return ntohl(*data);
return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
}
-static u32 flow_get_proto(struct sk_buff *skb)
+static u32 flow_get_proto(const struct sk_buff *skb, int nhoff)
{
+ __u8 *data = NULL, hdata;
+
switch (skb->protocol) {
case htons(ETH_P_IP):
- return pskb_network_may_pull(skb, sizeof(struct iphdr)) ?
- ip_hdr(skb)->protocol : 0;
+ data = skb_header_pointer(skb,
+ nhoff + offsetof(struct iphdr,
+ protocol),
+ 1, &hdata);
+ break;
case htons(ETH_P_IPV6):
- return pskb_network_may_pull(skb, sizeof(struct ipv6hdr)) ?
- ipv6_hdr(skb)->nexthdr : 0;
- default:
- return 0;
+ data = skb_header_pointer(skb,
+ nhoff + offsetof(struct ipv6hdr,
+ nexthdr),
+ 1, &hdata);
+ break;
}
+ if (data)
+ return *data;
+ return 0;
}
-static u32 flow_get_proto_src(struct sk_buff *skb)
+/* helper function to get either src or dst port */
+static __be16 *flow_get_proto_common(const struct sk_buff *skb, int nhoff,
+ __be16 *_port, int dst)
{
+ __be16 *port = NULL;
+ int poff;
+
switch (skb->protocol) {
case htons(ETH_P_IP): {
- struct iphdr *iph;
- int poff;
+ struct iphdr *iph, _iph;
- if (!pskb_network_may_pull(skb, sizeof(*iph)))
+ iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+ if (!iph)
break;
- iph = ip_hdr(skb);
if (ip_is_fragment(iph))
break;
poff = proto_ports_offset(iph->protocol);
- if (poff >= 0 &&
- pskb_network_may_pull(skb, iph->ihl * 4 + 2 + poff)) {
- iph = ip_hdr(skb);
- return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
- poff));
- }
+ if (poff >= 0)
+ port = skb_header_pointer(skb,
+ nhoff + iph->ihl * 4 + poff + dst,
+ sizeof(*_port), _port);
break;
}
case htons(ETH_P_IPV6): {
- struct ipv6hdr *iph;
- int poff;
+ struct ipv6hdr *iph, _iph;
- if (!pskb_network_may_pull(skb, sizeof(*iph)))
+ iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+ if (!iph)
break;
- iph = ipv6_hdr(skb);
poff = proto_ports_offset(iph->nexthdr);
- if (poff >= 0 &&
- pskb_network_may_pull(skb, sizeof(*iph) + poff + 2)) {
- iph = ipv6_hdr(skb);
- return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) +
- poff));
- }
+ if (poff >= 0)
+ port = skb_header_pointer(skb,
+ nhoff + sizeof(*iph) + poff + dst,
+ sizeof(*_port), _port);
break;
}
}
- return addr_fold(skb->sk);
+ return port;
}
-static u32 flow_get_proto_dst(struct sk_buff *skb)
+static u32 flow_get_proto_src(const struct sk_buff *skb, int nhoff)
{
- switch (skb->protocol) {
- case htons(ETH_P_IP): {
- struct iphdr *iph;
- int poff;
+ __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 0);
- if (!pskb_network_may_pull(skb, sizeof(*iph)))
- break;
- iph = ip_hdr(skb);
- if (ip_is_fragment(iph))
- break;
- poff = proto_ports_offset(iph->protocol);
- if (poff >= 0 &&
- pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
- iph = ip_hdr(skb);
- return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
- 2 + poff));
- }
- break;
- }
- case htons(ETH_P_IPV6): {
- struct ipv6hdr *iph;
- int poff;
+ if (port)
+ return ntohs(*port);
- if (!pskb_network_may_pull(skb, sizeof(*iph)))
- break;
- iph = ipv6_hdr(skb);
- poff = proto_ports_offset(iph->nexthdr);
- if (poff >= 0 &&
- pskb_network_may_pull(skb, sizeof(*iph) + poff + 4)) {
- iph = ipv6_hdr(skb);
- return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) +
- poff + 2));
- }
- break;
- }
- }
+ return addr_fold(skb->sk);
+}
+
+static u32 flow_get_proto_dst(const struct sk_buff *skb, int nhoff)
+{
+ __be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 2);
+
+ if (port)
+ return ntohs(*port);
return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
}
@@ -223,7 +226,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
#define CTTUPLE(skb, member) \
({ \
enum ip_conntrack_info ctinfo; \
- struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
+ const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
if (ct == NULL) \
goto fallback; \
ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
@@ -236,7 +239,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
})
#endif
-static u32 flow_get_nfct_src(struct sk_buff *skb)
+static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff)
{
switch (skb->protocol) {
case htons(ETH_P_IP):
@@ -245,10 +248,10 @@ static u32 flow_get_nfct_src(struct sk_buff *skb)
return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
}
fallback:
- return flow_get_src(skb);
+ return flow_get_src(skb, nhoff);
}
-static u32 flow_get_nfct_dst(struct sk_buff *skb)
+static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff)
{
switch (skb->protocol) {
case htons(ETH_P_IP):
@@ -257,21 +260,21 @@ static u32 flow_get_nfct_dst(struct sk_buff *skb)
return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
}
fallback:
- return flow_get_dst(skb);
+ return flow_get_dst(skb, nhoff);
}
-static u32 flow_get_nfct_proto_src(struct sk_buff *skb)
+static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, int nhoff)
{
return ntohs(CTTUPLE(skb, src.u.all));
fallback:
- return flow_get_proto_src(skb);
+ return flow_get_proto_src(skb, nhoff);
}
-static u32 flow_get_nfct_proto_dst(struct sk_buff *skb)
+static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, int nhoff)
{
return ntohs(CTTUPLE(skb, dst.u.all));
fallback:
- return flow_get_proto_dst(skb);
+ return flow_get_proto_dst(skb, nhoff);
}
static u32 flow_get_rtclassid(const struct sk_buff *skb)
@@ -313,17 +316,19 @@ static u32 flow_get_rxhash(struct sk_buff *skb)
static u32 flow_key_get(struct sk_buff *skb, int key)
{
+ int nhoff = skb_network_offset(skb);
+
switch (key) {
case FLOW_KEY_SRC:
- return flow_get_src(skb);
+ return flow_get_src(skb, nhoff);
case FLOW_KEY_DST:
- return flow_get_dst(skb);
+ return flow_get_dst(skb, nhoff);
case FLOW_KEY_PROTO:
- return flow_get_proto(skb);
+ return flow_get_proto(skb, nhoff);
case FLOW_KEY_PROTO_SRC:
- return flow_get_proto_src(skb);
+ return flow_get_proto_src(skb, nhoff);
case FLOW_KEY_PROTO_DST:
- return flow_get_proto_dst(skb);
+ return flow_get_proto_dst(skb, nhoff);
case FLOW_KEY_IIF:
return flow_get_iif(skb);
case FLOW_KEY_PRIORITY:
@@ -333,13 +338,13 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
case FLOW_KEY_NFCT:
return flow_get_nfct(skb);
case FLOW_KEY_NFCT_SRC:
- return flow_get_nfct_src(skb);
+ return flow_get_nfct_src(skb, nhoff);
case FLOW_KEY_NFCT_DST:
- return flow_get_nfct_dst(skb);
+ return flow_get_nfct_dst(skb, nhoff);
case FLOW_KEY_NFCT_PROTO_SRC:
- return flow_get_nfct_proto_src(skb);
+ return flow_get_nfct_proto_src(skb, nhoff);
case FLOW_KEY_NFCT_PROTO_DST:
- return flow_get_nfct_proto_dst(skb);
+ return flow_get_nfct_proto_dst(skb, nhoff);
case FLOW_KEY_RTCLASSID:
return flow_get_rtclassid(skb);
case FLOW_KEY_SKUID:
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index ec5cbc8..0a4b2f9 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index ea17cbe..f88256c 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -14,6 +14,7 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
+#include <linux/module.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/sch_generic.h>
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 0a833d0..e83c272 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -287,6 +287,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
u32 r, slot, salt, sfbhash;
int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+ if (unlikely(sch->q.qlen >= q->limit)) {
+ sch->qstats.overlimits++;
+ q->stats.queuedrop++;
+ goto drop;
+ }
+
if (q->rehash_interval > 0) {
unsigned long limit = q->rehash_time + q->rehash_interval;
@@ -332,12 +338,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
slot ^= 1;
sfb_skb_cb(skb)->hashes[slot] = 0;
- if (unlikely(minqlen >= q->max || sch->q.qlen >= q->limit)) {
+ if (unlikely(minqlen >= q->max)) {
sch->qstats.overlimits++;
- if (minqlen >= q->max)
- q->stats.bucketdrop++;
- else
- q->stats.queuedrop++;
+ q->stats.bucketdrop++;
goto drop;
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index dc16b90..152b5b3 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -282,6 +282,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->peer.asconf_capable = 1;
asoc->asconf_addr_del_pending = NULL;
asoc->src_out_of_asoc_ok = 0;
+ asoc->new_transport = NULL;
/* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index aabaee4..8104278 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -243,7 +243,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
if (!(transport->param_flags & SPP_PMTUD_ENABLE))
skb->local_df = 1;
- return ip6_xmit(sk, skb, &fl6, np->opt);
+ return ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
}
/* Returns the dst cache entry for the given source and destination ip
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index a6d27bf..14c2b06 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -917,6 +917,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
* current cwnd).
*/
if (!list_empty(&q->retransmit)) {
+ if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
+ goto sctp_flush_out;
if (transport == asoc->peer.retran_path)
goto retran;
@@ -989,6 +991,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
((new_transport->state == SCTP_INACTIVE) ||
(new_transport->state == SCTP_UNCONFIRMED)))
new_transport = asoc->peer.active_path;
+ if (new_transport->state == SCTP_UNCONFIRMED)
+ continue;
/* Change packets if necessary. */
if (new_transport != transport) {
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 05a6ce2..1e2eee8 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -37,6 +37,7 @@
#include <linux/types.h>
#include <linux/seq_file.h>
#include <linux/init.h>
+#include <linux/export.h>
#include <net/sctp/sctp.h>
#include <net/ip.h> /* for snmp_fold_field */
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 91784f4..61b9fca 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1299,7 +1299,7 @@ SCTP_STATIC __init int sctp_init(void)
max_share = min(4UL*1024*1024, limit);
sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */
- sysctl_sctp_rmem[1] = (1500 *(sizeof(struct sk_buff) + 1));
+ sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1);
sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share);
sysctl_sctp_wmem[0] = SK_MEM_QUANTUM;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 81db4e3..0121e0a 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3015,6 +3015,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
/* Start the heartbeat timer. */
if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer)))
sctp_transport_hold(peer);
+ asoc->new_transport = peer;
break;
case SCTP_PARAM_DEL_IP:
/* ADDIP 4.3 D7) If a request is received to delete the
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index a0f31e6..891f5db 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3618,6 +3618,11 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
*/
asconf_ack->dest = chunk->source;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
+ if (asoc->new_transport) {
+ sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport,
+ commands);
+ ((struct sctp_association *)asoc)->new_transport = NULL;
+ }
return SCTP_DISPOSITION_CONSUME;
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 836aa63..13bf5fc 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -78,6 +78,7 @@
#include <net/inet_common.h>
#include <linux/socket.h> /* for sa_family_t */
+#include <linux/export.h>
#include <net/sock.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
diff --git a/net/socket.c b/net/socket.c
index ffe92ca..2877647 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2472,7 +2472,7 @@ int sock_register(const struct net_proto_family *ops)
lockdep_is_held(&net_family_lock)))
err = -EEXIST;
else {
- rcu_assign_pointer(net_families[ops->family], ops);
+ RCU_INIT_POINTER(net_families[ops->family], ops);
err = 0;
}
spin_unlock(&net_family_lock);
@@ -2500,7 +2500,7 @@ void sock_unregister(int family)
BUG_ON(family < 0 || family >= NPROTO);
spin_lock(&net_family_lock);
- rcu_assign_pointer(net_families[family], NULL);
+ RCU_INIT_POINTER(net_families[family], NULL);
spin_unlock(&net_family_lock);
synchronize_rcu();
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 4195233..67a655e 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -19,6 +19,7 @@
#include <net/ipv6.h>
#include <linux/sunrpc/clnt.h>
#include <linux/slab.h>
+#include <linux/export.h>
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -255,12 +256,13 @@ EXPORT_SYMBOL_GPL(rpc_pton);
/**
* rpc_sockaddr2uaddr - Construct a universal address string from @sap.
* @sap: socket address
+ * @gfp_flags: allocation mode
*
* Returns a %NUL-terminated string in dynamically allocated memory;
* otherwise NULL is returned if an error occurred. Caller must
* free the returned string.
*/
-char *rpc_sockaddr2uaddr(const struct sockaddr *sap)
+char *rpc_sockaddr2uaddr(const struct sockaddr *sap, gfp_t gfp_flags)
{
char portbuf[RPCBIND_MAXUADDRPLEN];
char addrbuf[RPCBIND_MAXUADDRLEN];
@@ -288,9 +290,8 @@ char *rpc_sockaddr2uaddr(const struct sockaddr *sap)
if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf))
return NULL;
- return kstrdup(addrbuf, GFP_KERNEL);
+ return kstrdup(addrbuf, gfp_flags);
}
-EXPORT_SYMBOL_GPL(rpc_sockaddr2uaddr);
/**
* rpc_uaddr2sockaddr - convert a universal address to a socket address.
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 364eb45..afb5655 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -122,7 +122,7 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
return;
gss_get_ctx(ctx);
- rcu_assign_pointer(gss_cred->gc_ctx, ctx);
+ RCU_INIT_POINTER(gss_cred->gc_ctx, ctx);
set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
smp_mb__before_clear_bit();
clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
@@ -603,26 +603,6 @@ out:
return err;
}
-static ssize_t
-gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
- char __user *dst, size_t buflen)
-{
- char *data = (char *)msg->data + msg->copied;
- size_t mlen = min(msg->len, buflen);
- unsigned long left;
-
- left = copy_to_user(dst, data, mlen);
- if (left == mlen) {
- msg->errno = -EFAULT;
- return -EFAULT;
- }
-
- mlen -= left;
- msg->copied += mlen;
- msg->errno = 0;
- return mlen;
-}
-
#define MSG_BUF_MAXSIZE 1024
static ssize_t
@@ -970,7 +950,7 @@ gss_destroy_nullcred(struct rpc_cred *cred)
struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
struct gss_cl_ctx *ctx = gss_cred->gc_ctx;
- rcu_assign_pointer(gss_cred->gc_ctx, NULL);
+ RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
call_rcu(&cred->cr_rcu, gss_free_cred_callback);
if (ctx)
gss_put_ctx(ctx);
@@ -1590,7 +1570,7 @@ static const struct rpc_credops gss_nullops = {
};
static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
- .upcall = gss_pipe_upcall,
+ .upcall = rpc_pipe_generic_upcall,
.downcall = gss_pipe_downcall,
.destroy_msg = gss_pipe_destroy_msg,
.open_pipe = gss_pipe_open_v0,
@@ -1598,7 +1578,7 @@ static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
};
static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
- .upcall = gss_pipe_upcall,
+ .upcall = rpc_pipe_generic_upcall,
.downcall = gss_pipe_downcall,
.destroy_msg = gss_pipe_destroy_msg,
.open_pipe = gss_pipe_open_v1,
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 4cb70dc..e50502d 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -129,6 +129,9 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
for (i = 0; i < groups ; i++)
if (cred->uc_gids[i] != GROUP_AT(acred->group_info, i))
return 0;
+ if (groups < NFS_NGROUPS &&
+ cred->uc_gids[groups] != NOGROUP)
+ return 0;
return 1;
}
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 91eaa26..3ad435a 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -24,6 +24,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/sunrpc/xprt.h>
+#include <linux/export.h>
#ifdef RPC_DEBUG
#define RPCDBG_FACILITY RPCDBG_TRANS
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index c5347d2..f0268ea 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -850,7 +850,9 @@ rpc_restart_call_prepare(struct rpc_task *task)
{
if (RPC_ASSASSINATED(task))
return 0;
- task->tk_action = rpc_prepare_task;
+ task->tk_action = call_start;
+ if (task->tk_ops->rpc_call_prepare != NULL)
+ task->tk_action = rpc_prepare_task;
return 1;
}
EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index b181e34..bfddd68 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -77,6 +77,26 @@ rpc_timeout_upcall_queue(struct work_struct *work)
rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
}
+ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg,
+ char __user *dst, size_t buflen)
+{
+ char *data = (char *)msg->data + msg->copied;
+ size_t mlen = min(msg->len - msg->copied, buflen);
+ unsigned long left;
+
+ left = copy_to_user(dst, data, mlen);
+ if (left == mlen) {
+ msg->errno = -EFAULT;
+ return -EFAULT;
+ }
+
+ mlen -= left;
+ msg->copied += mlen;
+ msg->errno = 0;
+ return mlen;
+}
+EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall);
+
/**
* rpc_queue_upcall - queue an upcall message to userspace
* @inode: inode of upcall pipe on which to queue given message
@@ -1084,3 +1104,6 @@ void unregister_rpc_pipefs(void)
kmem_cache_destroy(rpc_inode_cachep);
unregister_filesystem(&rpc_pipe_fs_type);
}
+
+/* Make 'mount -t rpc_pipefs ...' autoload this module. */
+MODULE_ALIAS("rpc_pipefs");
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index e45d2fb..8761bf8 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -114,6 +114,9 @@ static struct rpc_program rpcb_program;
static struct rpc_clnt * rpcb_local_clnt;
static struct rpc_clnt * rpcb_local_clnt4;
+DEFINE_SPINLOCK(rpcb_clnt_lock);
+unsigned int rpcb_users;
+
struct rpcbind_args {
struct rpc_xprt * r_xprt;
@@ -161,6 +164,56 @@ static void rpcb_map_release(void *data)
kfree(map);
}
+static int rpcb_get_local(void)
+{
+ int cnt;
+
+ spin_lock(&rpcb_clnt_lock);
+ if (rpcb_users)
+ rpcb_users++;
+ cnt = rpcb_users;
+ spin_unlock(&rpcb_clnt_lock);
+
+ return cnt;
+}
+
+void rpcb_put_local(void)
+{
+ struct rpc_clnt *clnt = rpcb_local_clnt;
+ struct rpc_clnt *clnt4 = rpcb_local_clnt4;
+ int shutdown;
+
+ spin_lock(&rpcb_clnt_lock);
+ if (--rpcb_users == 0) {
+ rpcb_local_clnt = NULL;
+ rpcb_local_clnt4 = NULL;
+ }
+ shutdown = !rpcb_users;
+ spin_unlock(&rpcb_clnt_lock);
+
+ if (shutdown) {
+ /*
+ * cleanup_rpcb_clnt - remove xprtsock's sysctls, unregister
+ */
+ if (clnt4)
+ rpc_shutdown_client(clnt4);
+ if (clnt)
+ rpc_shutdown_client(clnt);
+ }
+}
+
+static void rpcb_set_local(struct rpc_clnt *clnt, struct rpc_clnt *clnt4)
+{
+ /* Protected by rpcb_create_local_mutex */
+ rpcb_local_clnt = clnt;
+ rpcb_local_clnt4 = clnt4;
+ smp_wmb();
+ rpcb_users = 1;
+ dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: "
+ "%p, rpcb_local_clnt4: %p)\n", rpcb_local_clnt,
+ rpcb_local_clnt4);
+}
+
/*
* Returns zero on success, otherwise a negative errno value
* is returned.
@@ -205,9 +258,7 @@ static int rpcb_create_local_unix(void)
clnt4 = NULL;
}
- /* Protected by rpcb_create_local_mutex */
- rpcb_local_clnt = clnt;
- rpcb_local_clnt4 = clnt4;
+ rpcb_set_local(clnt, clnt4);
out:
return result;
@@ -259,9 +310,7 @@ static int rpcb_create_local_net(void)
clnt4 = NULL;
}
- /* Protected by rpcb_create_local_mutex */
- rpcb_local_clnt = clnt;
- rpcb_local_clnt4 = clnt4;
+ rpcb_set_local(clnt, clnt4);
out:
return result;
@@ -271,16 +320,16 @@ out:
* Returns zero on success, otherwise a negative errno value
* is returned.
*/
-static int rpcb_create_local(void)
+int rpcb_create_local(void)
{
static DEFINE_MUTEX(rpcb_create_local_mutex);
int result = 0;
- if (rpcb_local_clnt)
+ if (rpcb_get_local())
return result;
mutex_lock(&rpcb_create_local_mutex);
- if (rpcb_local_clnt)
+ if (rpcb_get_local())
goto out;
if (rpcb_create_local_unix() != 0)
@@ -382,11 +431,6 @@ int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
struct rpc_message msg = {
.rpc_argp = &map,
};
- int error;
-
- error = rpcb_create_local();
- if (error)
- return error;
dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
"rpcbind\n", (port ? "" : "un"),
@@ -410,7 +454,7 @@ static int rpcb_register_inet4(const struct sockaddr *sap,
unsigned short port = ntohs(sin->sin_port);
int result;
- map->r_addr = rpc_sockaddr2uaddr(sap);
+ map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
"local rpcbind\n", (port ? "" : "un"),
@@ -437,7 +481,7 @@ static int rpcb_register_inet6(const struct sockaddr *sap,
unsigned short port = ntohs(sin6->sin6_port);
int result;
- map->r_addr = rpc_sockaddr2uaddr(sap);
+ map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
"local rpcbind\n", (port ? "" : "un"),
@@ -522,11 +566,7 @@ int rpcb_v4_register(const u32 program, const u32 version,
struct rpc_message msg = {
.rpc_argp = &map,
};
- int error;
- error = rpcb_create_local();
- if (error)
- return error;
if (rpcb_local_clnt4 == NULL)
return -EPROTONOSUPPORT;
@@ -686,7 +726,7 @@ void rpcb_getport_async(struct rpc_task *task)
case RPCBVERS_4:
case RPCBVERS_3:
map->r_netid = rpc_peeraddr2str(clnt, RPC_DISPLAY_NETID);
- map->r_addr = rpc_sockaddr2uaddr(sap);
+ map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
map->r_owner = "";
break;
case RPCBVERS_2:
@@ -1060,15 +1100,3 @@ static struct rpc_program rpcb_program = {
.version = rpcb_version,
.stats = &rpcb_stats,
};
-
-/**
- * cleanup_rpcb_clnt - remove xprtsock's sysctls, unregister
- *
- */
-void cleanup_rpcb_clnt(void)
-{
- if (rpcb_local_clnt4)
- rpc_shutdown_client(rpcb_local_clnt4);
- if (rpcb_local_clnt)
- rpc_shutdown_client(rpcb_local_clnt);
-}
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 10b4319..145e6784 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -14,6 +14,7 @@
#include <linux/pagemap.h>
#include <linux/udp.h>
#include <linux/sunrpc/xdr.h>
+#include <linux/export.h>
/**
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 9d08091..8ec9778 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -61,8 +61,6 @@ static struct pernet_operations sunrpc_net_ops = {
extern struct cache_detail unix_gid_cache;
-extern void cleanup_rpcb_clnt(void);
-
static int __init
init_sunrpc(void)
{
@@ -102,7 +100,6 @@ out:
static void __exit
cleanup_sunrpc(void)
{
- cleanup_rpcb_clnt();
rpcauth_remove_module();
cleanup_socket_xprt();
svc_cleanup_xprt_sock();
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6a69a11..6e03888 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -295,6 +295,18 @@ svc_pool_map_put(void)
}
+static int svc_pool_map_get_node(unsigned int pidx)
+{
+ const struct svc_pool_map *m = &svc_pool_map;
+
+ if (m->count) {
+ if (m->mode == SVC_POOL_PERCPU)
+ return cpu_to_node(m->pool_to[pidx]);
+ if (m->mode == SVC_POOL_PERNODE)
+ return m->pool_to[pidx];
+ }
+ return NUMA_NO_NODE;
+}
/*
* Set the given thread's cpus_allowed mask so that it
* will only run on cpus in the given pool.
@@ -354,6 +366,42 @@ svc_pool_for_cpu(struct svc_serv *serv, int cpu)
return &serv->sv_pools[pidx % serv->sv_nrpools];
}
+static int svc_rpcb_setup(struct svc_serv *serv)
+{
+ int err;
+
+ err = rpcb_create_local();
+ if (err)
+ return err;
+
+ /* Remove any stale portmap registrations */
+ svc_unregister(serv);
+ return 0;
+}
+
+void svc_rpcb_cleanup(struct svc_serv *serv)
+{
+ svc_unregister(serv);
+ rpcb_put_local();
+}
+EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
+
+static int svc_uses_rpcbind(struct svc_serv *serv)
+{
+ struct svc_program *progp;
+ unsigned int i;
+
+ for (progp = serv->sv_program; progp; progp = progp->pg_next) {
+ for (i = 0; i < progp->pg_nvers; i++) {
+ if (progp->pg_vers[i] == NULL)
+ continue;
+ if (progp->pg_vers[i]->vs_hidden == 0)
+ return 1;
+ }
+ }
+
+ return 0;
+}
/*
* Create an RPC service
@@ -419,8 +467,15 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
spin_lock_init(&pool->sp_lock);
}
- /* Remove any stale portmap registrations */
- svc_unregister(serv);
+ if (svc_uses_rpcbind(serv)) {
+ if (svc_rpcb_setup(serv) < 0) {
+ kfree(serv->sv_pools);
+ kfree(serv);
+ return NULL;
+ }
+ if (!serv->sv_shutdown)
+ serv->sv_shutdown = svc_rpcb_cleanup;
+ }
return serv;
}
@@ -488,7 +543,6 @@ svc_destroy(struct svc_serv *serv)
if (svc_serv_is_pooled(serv))
svc_pool_map_put();
- svc_unregister(serv);
kfree(serv->sv_pools);
kfree(serv);
}
@@ -499,7 +553,7 @@ EXPORT_SYMBOL_GPL(svc_destroy);
* We allocate pages and place them in rq_argpages.
*/
static int
-svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
+svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
{
unsigned int pages, arghi;
@@ -513,7 +567,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
arghi = 0;
BUG_ON(pages > RPCSVC_MAXPAGES);
while (pages) {
- struct page *p = alloc_page(GFP_KERNEL);
+ struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
if (!p)
break;
rqstp->rq_pages[arghi++] = p;
@@ -536,11 +590,11 @@ svc_release_buffer(struct svc_rqst *rqstp)
}
struct svc_rqst *
-svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
+svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
{
struct svc_rqst *rqstp;
- rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
+ rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
if (!rqstp)
goto out_enomem;
@@ -554,15 +608,15 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
rqstp->rq_server = serv;
rqstp->rq_pool = pool;
- rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
+ rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
if (!rqstp->rq_argp)
goto out_thread;
- rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
+ rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
if (!rqstp->rq_resp)
goto out_thread;
- if (!svc_init_buffer(rqstp, serv->sv_max_mesg))
+ if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
goto out_thread;
return rqstp;
@@ -647,6 +701,7 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
struct svc_pool *chosen_pool;
int error = 0;
unsigned int state = serv->sv_nrthreads-1;
+ int node;
if (pool == NULL) {
/* The -1 assumes caller has done a svc_get() */
@@ -662,14 +717,16 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
nrservs--;
chosen_pool = choose_pool(serv, pool, &state);
- rqstp = svc_prepare_thread(serv, chosen_pool);
+ node = svc_pool_map_get_node(chosen_pool->sp_id);
+ rqstp = svc_prepare_thread(serv, chosen_pool, node);
if (IS_ERR(rqstp)) {
error = PTR_ERR(rqstp);
break;
}
__module_get(serv->sv_module);
- task = kthread_create(serv->sv_function, rqstp, serv->sv_name);
+ task = kthread_create_on_node(serv->sv_function, rqstp,
+ node, serv->sv_name);
if (IS_ERR(task)) {
error = PTR_ERR(task);
module_put(serv->sv_module);
@@ -956,9 +1013,8 @@ static void svc_unregister(const struct svc_serv *serv)
/*
* Printk the given error with the address of the client that caused it.
*/
-static int
-__attribute__ ((format (printf, 2, 3)))
-svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
+static __printf(2, 3)
+int svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
{
va_list args;
int r;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index bd31208..447cd0e 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -14,6 +14,7 @@
#include <linux/sunrpc/svc_xprt.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/xprt.h>
+#include <linux/module.h>
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
@@ -254,8 +255,6 @@ EXPORT_SYMBOL_GPL(svc_create_xprt);
*/
void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
{
- struct sockaddr *sin;
-
memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen);
rqstp->rq_addrlen = xprt->xpt_remotelen;
@@ -263,15 +262,8 @@ void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt)
* Destination address in request is needed for binding the
* source address in RPC replies/callbacks later.
*/
- sin = (struct sockaddr *)&xprt->xpt_local;
- switch (sin->sa_family) {
- case AF_INET:
- rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr;
- break;
- case AF_INET6:
- rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr;
- break;
- }
+ memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen);
+ rqstp->rq_daddrlen = xprt->xpt_locallen;
}
EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 767d494..71bed1c 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/net.h>
@@ -143,19 +144,20 @@ static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
cmh->cmsg_level = SOL_IP;
cmh->cmsg_type = IP_PKTINFO;
pki->ipi_ifindex = 0;
- pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr;
+ pki->ipi_spec_dst.s_addr =
+ svc_daddr_in(rqstp)->sin_addr.s_addr;
cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
}
break;
case AF_INET6: {
struct in6_pktinfo *pki = CMSG_DATA(cmh);
+ struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp);
cmh->cmsg_level = SOL_IPV6;
cmh->cmsg_type = IPV6_PKTINFO;
- pki->ipi6_ifindex = 0;
- ipv6_addr_copy(&pki->ipi6_addr,
- &rqstp->rq_daddr.addr6);
+ pki->ipi6_ifindex = daddr->sin6_scope_id;
+ ipv6_addr_copy(&pki->ipi6_addr, &daddr->sin6_addr);
cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
}
break;
@@ -498,9 +500,13 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp,
struct cmsghdr *cmh)
{
struct in_pktinfo *pki = CMSG_DATA(cmh);
+ struct sockaddr_in *daddr = svc_daddr_in(rqstp);
+
if (cmh->cmsg_type != IP_PKTINFO)
return 0;
- rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr;
+
+ daddr->sin_family = AF_INET;
+ daddr->sin_addr.s_addr = pki->ipi_spec_dst.s_addr;
return 1;
}
@@ -511,9 +517,14 @@ static int svc_udp_get_dest_address6(struct svc_rqst *rqstp,
struct cmsghdr *cmh)
{
struct in6_pktinfo *pki = CMSG_DATA(cmh);
+ struct sockaddr_in6 *daddr = svc_daddr_in6(rqstp);
+
if (cmh->cmsg_type != IPV6_PKTINFO)
return 0;
- ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr);
+
+ daddr->sin6_family = AF_INET6;
+ ipv6_addr_copy(&daddr->sin6_addr, &pki->ipi6_addr);
+ daddr->sin6_scope_id = pki->ipi6_ifindex;
return 1;
}
@@ -614,6 +625,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
skb_free_datagram_locked(svsk->sk_sk, skb);
return 0;
}
+ rqstp->rq_daddrlen = svc_addr_len(svc_daddr(rqstp));
if (skb_is_nonlinear(skb)) {
/* we have to copy */
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index a385430..ba1296d 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -50,6 +50,7 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <linux/sunrpc/svc_rdma.h>
+#include <linux/export.h>
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index ca84212..e758139 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -12,6 +12,7 @@
*/
#include <linux/mm.h>
+#include <linux/export.h>
#include <linux/sysctl.h>
#include <linux/nsproxy.h>
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 759b318..28908f5 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -39,6 +39,7 @@
#include "link.h"
#include "port.h"
#include "bcast.h"
+#include "name_distr.h"
#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
@@ -298,14 +299,9 @@ static void bclink_send_nack(struct tipc_node *n_ptr)
msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
msg_set_bcast_tag(msg, tipc_own_tag);
- if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) {
- bcl->stats.sent_nacks++;
- buf_discard(buf);
- } else {
- tipc_bearer_schedule(bcl->b_ptr, bcl);
- bcl->proto_msg_queue = buf;
- bcl->stats.bearer_congs++;
- }
+ tipc_bearer_send(&bcbearer->bearer, buf, NULL);
+ bcl->stats.sent_nacks++;
+ buf_discard(buf);
/*
* Ensure we doesn't send another NACK msg to the node
@@ -426,20 +422,28 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
void tipc_bclink_recv_pkt(struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
- struct tipc_node *node = tipc_node_find(msg_prevnode(msg));
+ struct tipc_node *node;
u32 next_in;
u32 seqno;
struct sk_buff *deferred;
- if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
- (msg_mc_netid(msg) != tipc_net_id))) {
- buf_discard(buf);
- return;
- }
+ /* Screen out unwanted broadcast messages */
+
+ if (msg_mc_netid(msg) != tipc_net_id)
+ goto exit;
+
+ node = tipc_node_find(msg_prevnode(msg));
+ if (unlikely(!node))
+ goto exit;
+
+ tipc_node_lock(node);
+ if (unlikely(!node->bclink.supported))
+ goto unlock;
if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
+ if (msg_type(msg) != STATE_MSG)
+ goto unlock;
if (msg_destnode(msg) == tipc_own_addr) {
- tipc_node_lock(node);
tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
tipc_node_unlock(node);
spin_lock_bh(&bc_lock);
@@ -449,18 +453,18 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
msg_bcgap_to(msg));
spin_unlock_bh(&bc_lock);
} else {
+ tipc_node_unlock(node);
tipc_bclink_peek_nack(msg_destnode(msg),
msg_bcast_tag(msg),
msg_bcgap_after(msg),
msg_bcgap_to(msg));
}
- buf_discard(buf);
- return;
+ goto exit;
}
- tipc_node_lock(node);
+ /* Handle in-sequence broadcast message */
+
receive:
- deferred = node->bclink.deferred_head;
next_in = mod(node->bclink.last_in + 1);
seqno = msg_seqno(msg);
@@ -474,7 +478,10 @@ receive:
}
if (likely(msg_isdata(msg))) {
tipc_node_unlock(node);
- tipc_port_recv_mcast(buf, NULL);
+ if (likely(msg_mcast(msg)))
+ tipc_port_recv_mcast(buf, NULL);
+ else
+ buf_discard(buf);
} else if (msg_user(msg) == MSG_BUNDLER) {
bcl->stats.recv_bundles++;
bcl->stats.recv_bundled += msg_msgcnt(msg);
@@ -487,18 +494,22 @@ receive:
bcl->stats.recv_fragmented++;
tipc_node_unlock(node);
tipc_net_route_msg(buf);
+ } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
+ tipc_node_unlock(node);
+ tipc_named_recv(buf);
} else {
tipc_node_unlock(node);
- tipc_net_route_msg(buf);
+ buf_discard(buf);
}
+ buf = NULL;
+ tipc_node_lock(node);
+ deferred = node->bclink.deferred_head;
if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
- tipc_node_lock(node);
buf = deferred;
msg = buf_msg(buf);
node->bclink.deferred_head = deferred->next;
goto receive;
}
- return;
} else if (less(next_in, seqno)) {
u32 gap_after = node->bclink.gap_after;
u32 gap_to = node->bclink.gap_to;
@@ -513,6 +524,7 @@ receive:
else if (less(gap_after, seqno) && less(seqno, gap_to))
node->bclink.gap_to = seqno;
}
+ buf = NULL;
if (bclink_ack_allowed(node->bclink.nack_sync)) {
if (gap_to != gap_after)
bclink_send_nack(node);
@@ -520,9 +532,11 @@ receive:
}
} else {
bcl->stats.duplicates++;
- buf_discard(buf);
}
+unlock:
tipc_node_unlock(node);
+exit:
+ buf_discard(buf);
}
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
@@ -535,10 +549,11 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
/**
* tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
*
- * Send through as many bearers as necessary to reach all nodes
- * that support TIPC multicasting.
+ * Send packet over as many bearers as necessary to reach all nodes
+ * that have joined the broadcast link.
*
- * Returns 0 if packet sent successfully, non-zero if not
+ * Returns 0 (packet sent successfully) under all circumstances,
+ * since the broadcast link's pseudo-bearer never blocks
*/
static int tipc_bcbearer_send(struct sk_buff *buf,
@@ -547,7 +562,12 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
{
int bp_index;
- /* Prepare buffer for broadcasting (if first time trying to send it) */
+ /*
+ * Prepare broadcast link message for reliable transmission,
+ * if first time trying to send it;
+ * preparation is skipped for broadcast link protocol messages
+ * since they are sent in an unreliable manner and don't need it
+ */
if (likely(!msg_non_seq(buf_msg(buf)))) {
struct tipc_msg *msg;
@@ -596,18 +616,12 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
}
if (bcbearer->remains_new.count == 0)
- return 0;
+ break; /* all targets reached */
bcbearer->remains = bcbearer->remains_new;
}
- /*
- * Unable to reach all targets (indicate success, since currently
- * there isn't code in place to properly block & unblock the
- * pseudo-bearer used by the broadcast link)
- */
-
- return TIPC_OK;
+ return 0;
}
/**
@@ -667,27 +681,6 @@ void tipc_bcbearer_sort(void)
spin_unlock_bh(&bc_lock);
}
-/**
- * tipc_bcbearer_push - resolve bearer congestion
- *
- * Forces bclink to push out any unsent packets, until all packets are gone
- * or congestion reoccurs.
- * No locks set when function called
- */
-
-void tipc_bcbearer_push(void)
-{
- struct tipc_bearer *b_ptr;
-
- spin_lock_bh(&bc_lock);
- b_ptr = &bcbearer->bearer;
- if (b_ptr->blocked) {
- b_ptr->blocked = 0;
- tipc_bearer_lock_push(b_ptr);
- }
- spin_unlock_bh(&bc_lock);
-}
-
int tipc_bclink_stats(char *buf, const u32 buf_size)
{
@@ -764,7 +757,7 @@ int tipc_bclink_init(void)
bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
if (!bcbearer || !bclink) {
- warn("Multicast link creation failed, no memory\n");
+ warn("Broadcast link creation failed, no memory\n");
kfree(bcbearer);
bcbearer = NULL;
kfree(bclink);
@@ -775,7 +768,7 @@ int tipc_bclink_init(void)
INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
bcbearer->bearer.media = &bcbearer->media;
bcbearer->media.send_msg = tipc_bcbearer_send;
- sprintf(bcbearer->media.name, "tipc-multicast");
+ sprintf(bcbearer->media.name, "tipc-broadcast");
bcl = &bclink->link;
INIT_LIST_HEAD(&bcl->waiting_ports);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 500c97f..06740da 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -101,6 +101,5 @@ int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
int tipc_bclink_reset_stats(void);
int tipc_bclink_set_queue_limits(u32 limit);
void tipc_bcbearer_sort(void);
-void tipc_bcbearer_push(void);
#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 85eba9c..e2202de 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -385,13 +385,9 @@ static int bearer_push(struct tipc_bearer *b_ptr)
void tipc_bearer_lock_push(struct tipc_bearer *b_ptr)
{
- int res;
-
spin_lock_bh(&b_ptr->lock);
- res = bearer_push(b_ptr);
+ bearer_push(b_ptr);
spin_unlock_bh(&b_ptr->lock);
- if (res)
- tipc_bcbearer_push();
}
@@ -608,6 +604,7 @@ int tipc_block_bearer(const char *name)
info("Blocking bearer <%s>\n", name);
spin_lock_bh(&b_ptr->lock);
b_ptr->blocked = 1;
+ list_splice_init(&b_ptr->cong_links, &b_ptr->links);
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
struct tipc_node *n_ptr = l_ptr->owner;
@@ -635,6 +632,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
spin_lock_bh(&b_ptr->lock);
b_ptr->blocked = 1;
b_ptr->media->disable_bearer(b_ptr);
+ list_splice_init(&b_ptr->cong_links, &b_ptr->links);
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
tipc_link_delete(l_ptr);
}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 5ad70ef..d696f9e 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -39,8 +39,8 @@
#include "bcast.h"
-#define MAX_BEARERS 8
-#define MAX_MEDIA 4
+#define MAX_BEARERS 2
+#define MAX_MEDIA 2
/*
* Identifiers of supported TIPC media types
diff --git a/net/tipc/config.h b/net/tipc/config.h
index 443159a..80da6eb 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -65,7 +65,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
const void *req_tlv_area, int req_tlv_space,
int headroom);
-void tipc_cfg_link_event(u32 addr, char *name, int up);
int tipc_cfg_init(void);
void tipc_cfg_stop(void);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 943b6af..c21331d 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -34,6 +34,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/module.h>
+
#include "core.h"
#include "ref.h"
#include "name_table.h"
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 0987933..f2fb96e 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -159,12 +159,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
}
tipc_node_lock(n_ptr);
- /* Don't talk to neighbor during cleanup after last session */
- if (n_ptr->cleanup_required) {
- tipc_node_unlock(n_ptr);
- return;
- }
-
link = n_ptr->links[b_ptr->identity];
/* Create a link endpoint for this bearer, if necessary */
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index b69092e..e728d4c 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -2,7 +2,7 @@
* net/tipc/eth_media.c: Ethernet bearer support for TIPC
*
* Copyright (c) 2001-2007, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
+ * Copyright (c) 2005-2008, 2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,7 +37,7 @@
#include "core.h"
#include "bearer.h"
-#define MAX_ETH_BEARERS 2
+#define MAX_ETH_BEARERS MAX_BEARERS
#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI
#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL
#define ETH_LINK_WINDOW TIPC_DEF_LINK_WIN
@@ -144,31 +144,27 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
/* Find device with specified name */
+ read_lock(&dev_base_lock);
for_each_netdev(&init_net, pdev) {
if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) {
dev = pdev;
+ dev_hold(dev);
break;
}
}
+ read_unlock(&dev_base_lock);
if (!dev)
return -ENODEV;
- /* Find Ethernet bearer for device (or create one) */
-
- while ((eb_ptr != stop) && eb_ptr->dev && (eb_ptr->dev != dev))
- eb_ptr++;
- if (eb_ptr == stop)
- return -EDQUOT;
- if (!eb_ptr->dev) {
- eb_ptr->dev = dev;
- eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC);
- eb_ptr->tipc_packet_type.dev = dev;
- eb_ptr->tipc_packet_type.func = recv_msg;
- eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
- INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
- dev_hold(dev);
- dev_add_pack(&eb_ptr->tipc_packet_type);
- }
+ /* Create Ethernet bearer for device */
+
+ eb_ptr->dev = dev;
+ eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC);
+ eb_ptr->tipc_packet_type.dev = dev;
+ eb_ptr->tipc_packet_type.func = recv_msg;
+ eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
+ INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
+ dev_add_pack(&eb_ptr->tipc_packet_type);
/* Associate TIPC bearer with Ethernet bearer */
diff --git a/net/tipc/link.c b/net/tipc/link.c
index f89570c..ae98a72 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -332,15 +332,16 @@ struct link *tipc_link_create(struct tipc_node *n_ptr,
l_ptr->addr = peer;
if_name = strchr(b_ptr->name, ':') + 1;
- sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
+ sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
tipc_node(tipc_own_addr),
if_name,
tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
- /* note: peer i/f is appended to link name by reset/activate */
+ /* note: peer i/f name is updated by reset/activate message */
memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
l_ptr->owner = n_ptr;
l_ptr->checkpoint = 1;
+ l_ptr->peer_session = INVALID_SESSION;
l_ptr->b_ptr = b_ptr;
link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
l_ptr->state = RESET_UNKNOWN;
@@ -536,9 +537,6 @@ void tipc_link_stop(struct link *l_ptr)
l_ptr->proto_msg_queue = NULL;
}
-/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
-#define link_send_event(fcn, l_ptr, up) do { } while (0)
-
void tipc_link_reset(struct link *l_ptr)
{
struct sk_buff *buf;
@@ -596,10 +594,6 @@ void tipc_link_reset(struct link *l_ptr)
l_ptr->fsm_msg_cnt = 0;
l_ptr->stale_count = 0;
link_reset_statistics(l_ptr);
-
- link_send_event(tipc_cfg_link_event, l_ptr, 0);
- if (!in_own_cluster(l_ptr->addr))
- link_send_event(tipc_disc_link_event, l_ptr, 0);
}
@@ -608,9 +602,6 @@ static void link_activate(struct link *l_ptr)
l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
tipc_node_link_up(l_ptr->owner, l_ptr);
tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
- link_send_event(tipc_cfg_link_event, l_ptr, 1);
- if (!in_own_cluster(l_ptr->addr))
- link_send_event(tipc_disc_link_event, l_ptr, 1);
}
/**
@@ -985,6 +976,51 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
}
/*
+ * tipc_link_send_names - send name table entries to new neighbor
+ *
+ * Send routine for bulk delivery of name table messages when contact
+ * with a new neighbor occurs. No link congestion checking is performed
+ * because name table messages *must* be delivered. The messages must be
+ * small enough not to require fragmentation.
+ * Called without any locks held.
+ */
+
+void tipc_link_send_names(struct list_head *message_list, u32 dest)
+{
+ struct tipc_node *n_ptr;
+ struct link *l_ptr;
+ struct sk_buff *buf;
+ struct sk_buff *temp_buf;
+
+ if (list_empty(message_list))
+ return;
+
+ read_lock_bh(&tipc_net_lock);
+ n_ptr = tipc_node_find(dest);
+ if (n_ptr) {
+ tipc_node_lock(n_ptr);
+ l_ptr = n_ptr->active_links[0];
+ if (l_ptr) {
+ /* convert circular list to linear list */
+ ((struct sk_buff *)message_list->prev)->next = NULL;
+ link_add_chain_to_outqueue(l_ptr,
+ (struct sk_buff *)message_list->next, 0);
+ tipc_link_push_queue(l_ptr);
+ INIT_LIST_HEAD(message_list);
+ }
+ tipc_node_unlock(n_ptr);
+ }
+ read_unlock_bh(&tipc_net_lock);
+
+ /* discard the messages if they couldn't be sent */
+
+ list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
+ list_del((struct list_head *)buf);
+ buf_discard(buf);
+ }
+}
+
+/*
* link_send_buf_fast: Entry for data messages where the
* destination link is known and the header is complete,
* inclusive total message length. Very time critical.
@@ -1031,9 +1067,6 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
u32 selector = msg_origport(buf_msg(buf)) & 1;
u32 dummy;
- if (destnode == tipc_own_addr)
- return tipc_port_recv_msg(buf);
-
read_lock_bh(&tipc_net_lock);
n_ptr = tipc_node_find(destnode);
if (likely(n_ptr)) {
@@ -1658,19 +1691,12 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
continue;
}
+ /* Discard unicast link messages destined for another node */
+
if (unlikely(!msg_short(msg) &&
(msg_destnode(msg) != tipc_own_addr)))
goto cont;
- /* Discard non-routeable messages destined for another node */
-
- if (unlikely(!msg_isdata(msg) &&
- (msg_destnode(msg) != tipc_own_addr))) {
- if ((msg_user(msg) != CONN_MANAGER) &&
- (msg_user(msg) != MSG_FRAGMENTER))
- goto cont;
- }
-
/* Locate neighboring node that sent message */
n_ptr = tipc_node_find(msg_prevnode(msg));
@@ -1678,17 +1704,24 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
goto cont;
tipc_node_lock(n_ptr);
- /* Don't talk to neighbor during cleanup after last session */
+ /* Locate unicast link endpoint that should handle message */
- if (n_ptr->cleanup_required) {
+ l_ptr = n_ptr->links[b_ptr->identity];
+ if (unlikely(!l_ptr)) {
tipc_node_unlock(n_ptr);
goto cont;
}
- /* Locate unicast link endpoint that should handle message */
+ /* Verify that communication with node is currently allowed */
- l_ptr = n_ptr->links[b_ptr->identity];
- if (unlikely(!l_ptr)) {
+ if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
+ msg_user(msg) == LINK_PROTOCOL &&
+ (msg_type(msg) == RESET_MSG ||
+ msg_type(msg) == ACTIVATE_MSG) &&
+ !msg_redundant_link(msg))
+ n_ptr->block_setup &= ~WAIT_PEER_DOWN;
+
+ if (n_ptr->block_setup) {
tipc_node_unlock(n_ptr);
goto cont;
}
@@ -1923,6 +1956,12 @@ void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
if (link_blocked(l_ptr))
return;
+
+ /* Abort non-RESET send if communication with node is prohibited */
+
+ if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
+ return;
+
msg_set_type(msg, msg_typ);
msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
@@ -2051,9 +2090,19 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
case RESET_MSG:
if (!link_working_unknown(l_ptr) &&
(l_ptr->peer_session != INVALID_SESSION)) {
- if (msg_session(msg) == l_ptr->peer_session)
- break; /* duplicate: ignore */
+ if (less_eq(msg_session(msg), l_ptr->peer_session))
+ break; /* duplicate or old reset: ignore */
+ }
+
+ if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
+ link_working_unknown(l_ptr))) {
+ /*
+ * peer has lost contact -- don't allow peer's links
+ * to reactivate before we recognize loss & clean up
+ */
+ l_ptr->owner->block_setup = WAIT_NODE_DOWN;
}
+
/* fall thru' */
case ACTIVATE_MSG:
/* Update link settings according other endpoint's values */
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 74fbeca..e56cb53 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -223,6 +223,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
void tipc_link_reset(struct link *l_ptr);
int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
+void tipc_link_send_names(struct list_head *message_list, u32 dest);
int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
int tipc_link_send_sections_fast(struct tipc_port *sender,
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index cd356e5..b7ca1bd 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -173,18 +173,40 @@ void tipc_named_withdraw(struct publication *publ)
* tipc_named_node_up - tell specified node about all publications by this node
*/
-void tipc_named_node_up(unsigned long node)
+void tipc_named_node_up(unsigned long nodearg)
{
+ struct tipc_node *n_ptr;
+ struct link *l_ptr;
struct publication *publ;
struct distr_item *item = NULL;
struct sk_buff *buf = NULL;
+ struct list_head message_list;
+ u32 node = (u32)nodearg;
u32 left = 0;
u32 rest;
- u32 max_item_buf;
+ u32 max_item_buf = 0;
+
+ /* compute maximum amount of publication data to send per message */
+
+ read_lock_bh(&tipc_net_lock);
+ n_ptr = tipc_node_find(node);
+ if (n_ptr) {
+ tipc_node_lock(n_ptr);
+ l_ptr = n_ptr->active_links[0];
+ if (l_ptr)
+ max_item_buf = ((l_ptr->max_pkt - INT_H_SIZE) /
+ ITEM_SIZE) * ITEM_SIZE;
+ tipc_node_unlock(n_ptr);
+ }
+ read_unlock_bh(&tipc_net_lock);
+ if (!max_item_buf)
+ return;
+
+ /* create list of publication messages, then send them as a unit */
+
+ INIT_LIST_HEAD(&message_list);
read_lock_bh(&tipc_nametbl_lock);
- max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
- max_item_buf *= ITEM_SIZE;
rest = publ_cnt * ITEM_SIZE;
list_for_each_entry(publ, &publ_root, local_list) {
@@ -202,13 +224,14 @@ void tipc_named_node_up(unsigned long node)
item++;
left -= ITEM_SIZE;
if (!left) {
- msg_set_link_selector(buf_msg(buf), node);
- tipc_link_send(buf, node, node);
+ list_add_tail((struct list_head *)buf, &message_list);
buf = NULL;
}
}
exit:
read_unlock_bh(&tipc_nametbl_lock);
+
+ tipc_link_send_names(&message_list, (u32)node);
}
/**
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 68b3dd6..fafef6c 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -141,17 +141,6 @@ void tipc_net_route_msg(struct sk_buff *buf)
return;
msg = buf_msg(buf);
- msg_incr_reroute_cnt(msg);
- if (msg_reroute_cnt(msg) > 6) {
- if (msg_errcode(msg)) {
- buf_discard(buf);
- } else {
- tipc_reject_msg(buf, msg_destport(msg) ?
- TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
- }
- return;
- }
-
/* Handle message for this node */
dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
if (tipc_in_scope(dnode, tipc_own_addr)) {
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2d106ef..27b4bb0 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -112,6 +112,7 @@ struct tipc_node *tipc_node_create(u32 addr)
break;
}
list_add_tail(&n_ptr->list, &temp_node->list);
+ n_ptr->block_setup = WAIT_PEER_DOWN;
tipc_num_nodes++;
@@ -312,7 +313,7 @@ static void node_established_contact(struct tipc_node *n_ptr)
}
}
-static void node_cleanup_finished(unsigned long node_addr)
+static void node_name_purge_complete(unsigned long node_addr)
{
struct tipc_node *n_ptr;
@@ -320,7 +321,7 @@ static void node_cleanup_finished(unsigned long node_addr)
n_ptr = tipc_node_find(node_addr);
if (n_ptr) {
tipc_node_lock(n_ptr);
- n_ptr->cleanup_required = 0;
+ n_ptr->block_setup &= ~WAIT_NAMES_GONE;
tipc_node_unlock(n_ptr);
}
read_unlock_bh(&tipc_net_lock);
@@ -331,28 +332,32 @@ static void node_lost_contact(struct tipc_node *n_ptr)
char addr_string[16];
u32 i;
- /* Clean up broadcast reception remains */
- n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
- while (n_ptr->bclink.deferred_head) {
- struct sk_buff *buf = n_ptr->bclink.deferred_head;
- n_ptr->bclink.deferred_head = buf->next;
- buf_discard(buf);
- }
- if (n_ptr->bclink.defragm) {
- buf_discard(n_ptr->bclink.defragm);
- n_ptr->bclink.defragm = NULL;
- }
+ info("Lost contact with %s\n",
+ tipc_addr_string_fill(addr_string, n_ptr->addr));
+
+ /* Flush broadcast link info associated with lost node */
if (n_ptr->bclink.supported) {
+ n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
+ while (n_ptr->bclink.deferred_head) {
+ struct sk_buff *buf = n_ptr->bclink.deferred_head;
+ n_ptr->bclink.deferred_head = buf->next;
+ buf_discard(buf);
+ }
+
+ if (n_ptr->bclink.defragm) {
+ buf_discard(n_ptr->bclink.defragm);
+ n_ptr->bclink.defragm = NULL;
+ }
+
+ tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr);
tipc_bclink_acknowledge(n_ptr,
mod(n_ptr->bclink.acked + 10000));
- tipc_nmap_remove(&tipc_bcast_nmap, n_ptr->addr);
if (n_ptr->addr < tipc_own_addr)
tipc_own_tag--;
- }
- info("Lost contact with %s\n",
- tipc_addr_string_fill(addr_string, n_ptr->addr));
+ n_ptr->bclink.supported = 0;
+ }
/* Abort link changeover */
for (i = 0; i < MAX_BEARERS; i++) {
@@ -367,10 +372,10 @@ static void node_lost_contact(struct tipc_node *n_ptr)
/* Notify subscribers */
tipc_nodesub_notify(n_ptr);
- /* Prevent re-contact with node until all cleanup is done */
+ /* Prevent re-contact with node until cleanup is done */
- n_ptr->cleanup_required = 1;
- tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr);
+ n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE;
+ tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr);
}
struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 5c61afc..4f15cb4 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -42,6 +42,12 @@
#include "net.h"
#include "bearer.h"
+/* Flags used to block (re)establishment of contact with a neighboring node */
+
+#define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */
+#define WAIT_NAMES_GONE 0x0002 /* wait for peer's publications to be purged */
+#define WAIT_NODE_DOWN 0x0004 /* wait until peer node is declared down */
+
/**
* struct tipc_node - TIPC node structure
* @addr: network address of node
@@ -52,7 +58,7 @@
* @active_links: pointers to active links to node
* @links: pointers to all links to node
* @working_links: number of working links to node (both active and standby)
- * @cleanup_required: non-zero if cleaning up after a prior loss of contact
+ * @block_setup: bit mask of conditions preventing link establishment to node
* @link_cnt: number of links to node
* @permit_changeover: non-zero if node has redundant links to this system
* @bclink: broadcast-related info
@@ -77,7 +83,7 @@ struct tipc_node {
struct link *links[MAX_BEARERS];
int link_cnt;
int working_links;
- int cleanup_required;
+ int block_setup;
int permit_changeover;
struct {
int supported;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index adb2eff..42b8324 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -34,6 +34,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/export.h>
#include <net/sock.h>
#include "core.h"
@@ -49,7 +50,7 @@ struct tipc_sock {
struct sock sk;
struct tipc_port *p;
struct tipc_portid peer_name;
- long conn_timeout;
+ unsigned int conn_timeout;
};
#define tipc_sk(sk) ((struct tipc_sock *)(sk))
@@ -231,7 +232,7 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
sock_init_data(sock, sk);
sk->sk_backlog_rcv = backlog_rcv;
tipc_sk(sk)->p = tp_ptr;
- tipc_sk(sk)->conn_timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
+ tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
spin_unlock_bh(tp_ptr->lock);
@@ -525,6 +526,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
struct tipc_port *tport = tipc_sk_port(sk);
struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
int needs_conn;
+ long timeout_val;
int res = -EINVAL;
if (unlikely(!dest))
@@ -564,6 +566,8 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
reject_rx_queue(sk);
}
+ timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+
do {
if (dest->addrtype == TIPC_ADDR_NAME) {
res = dest_name_check(dest, m);
@@ -600,16 +604,14 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
sock->state = SS_CONNECTING;
break;
}
- if (m->msg_flags & MSG_DONTWAIT) {
- res = -EWOULDBLOCK;
+ if (timeout_val <= 0L) {
+ res = timeout_val ? timeout_val : -EWOULDBLOCK;
break;
}
release_sock(sk);
- res = wait_event_interruptible(*sk_sleep(sk),
- !tport->congested);
+ timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
+ !tport->congested, timeout_val);
lock_sock(sk);
- if (res)
- break;
} while (1);
exit:
@@ -636,6 +638,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
+ long timeout_val;
int res;
/* Handle implied connection establishment */
@@ -650,6 +653,8 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
if (iocb)
lock_sock(sk);
+ timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+
do {
if (unlikely(sock->state != SS_CONNECTED)) {
if (sock->state == SS_DISCONNECTING)
@@ -663,16 +668,14 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
total_len);
if (likely(res != -ELINKCONG))
break;
- if (m->msg_flags & MSG_DONTWAIT) {
- res = -EWOULDBLOCK;
+ if (timeout_val <= 0L) {
+ res = timeout_val ? timeout_val : -EWOULDBLOCK;
break;
}
release_sock(sk);
- res = wait_event_interruptible(*sk_sleep(sk),
- (!tport->congested || !tport->connected));
+ timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
+ (!tport->congested || !tport->connected), timeout_val);
lock_sock(sk);
- if (res)
- break;
} while (1);
if (iocb)
@@ -1369,7 +1372,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
struct msghdr m = {NULL,};
struct sk_buff *buf;
struct tipc_msg *msg;
- long timeout;
+ unsigned int timeout;
int res;
lock_sock(sk);
@@ -1434,7 +1437,8 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
res = wait_event_interruptible_timeout(*sk_sleep(sk),
(!skb_queue_empty(&sk->sk_receive_queue) ||
(sock->state != SS_CONNECTING)),
- timeout ? timeout : MAX_SCHEDULE_TIMEOUT);
+ timeout ? (long)msecs_to_jiffies(timeout)
+ : MAX_SCHEDULE_TIMEOUT);
lock_sock(sk);
if (res > 0) {
@@ -1480,9 +1484,7 @@ static int listen(struct socket *sock, int len)
lock_sock(sk);
- if (sock->state == SS_READY)
- res = -EOPNOTSUPP;
- else if (sock->state != SS_UNCONNECTED)
+ if (sock->state != SS_UNCONNECTED)
res = -EINVAL;
else {
sock->state = SS_LISTENING;
@@ -1510,10 +1512,6 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
lock_sock(sk);
- if (sock->state == SS_READY) {
- res = -EOPNOTSUPP;
- goto exit;
- }
if (sock->state != SS_LISTENING) {
res = -EINVAL;
goto exit;
@@ -1696,7 +1694,7 @@ static int setsockopt(struct socket *sock,
res = tipc_set_portunreturnable(tport->ref, value);
break;
case TIPC_CONN_TIMEOUT:
- tipc_sk(sk)->conn_timeout = msecs_to_jiffies(value);
+ tipc_sk(sk)->conn_timeout = value;
/* no need to set "res", since already 0 at this point */
break;
default:
@@ -1752,7 +1750,7 @@ static int getsockopt(struct socket *sock,
res = tipc_portunreturnable(tport->ref, &value);
break;
case TIPC_CONN_TIMEOUT:
- value = jiffies_to_msecs(tipc_sk(sk)->conn_timeout);
+ value = tipc_sk(sk)->conn_timeout;
/* no need to set "res", since already 0 at this point */
break;
case TIPC_NODE_RECVQ_DEPTH:
@@ -1790,11 +1788,11 @@ static const struct proto_ops msg_ops = {
.bind = bind,
.connect = connect,
.socketpair = sock_no_socketpair,
- .accept = accept,
+ .accept = sock_no_accept,
.getname = get_name,
.poll = poll,
.ioctl = sock_no_ioctl,
- .listen = listen,
+ .listen = sock_no_listen,
.shutdown = shutdown,
.setsockopt = setsockopt,
.getsockopt = getsockopt,
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 6cf7268..1983717 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -151,7 +151,7 @@ void tipc_subscr_report_overlap(struct subscription *sub,
if (!must && !(sub->filter & TIPC_SUB_PORTS))
return;
- sub->event_cb(sub, found_lower, found_upper, event, port_ref, node);
+ subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
}
/**
@@ -365,7 +365,6 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
subscr_terminate(subscriber);
return NULL;
}
- sub->event_cb = subscr_send_event;
INIT_LIST_HEAD(&sub->nameseq_list);
list_add(&sub->subscription_list, &subscriber->subscription_list);
sub->server_ref = subscriber->port_ref;
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 45d89bf..4b06ef6 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -39,16 +39,11 @@
struct subscription;
-typedef void (*tipc_subscr_event) (struct subscription *sub,
- u32 found_lower, u32 found_upper,
- u32 event, u32 port_ref, u32 node);
-
/**
* struct subscription - TIPC network topology subscription object
* @seq: name sequence associated with subscription
* @timeout: duration of subscription (in ms)
* @filter: event filtering to be done for subscription
- * @event_cb: routine invoked when a subscription event is detected
* @timer: timer governing subscription duration (optional)
* @nameseq_list: adjacent subscriptions in name sequence's subscription list
* @subscription_list: adjacent subscriptions in subscriber's subscription list
@@ -61,7 +56,6 @@ struct subscription {
struct tipc_name_seq seq;
u32 timeout;
u32 filter;
- tipc_subscr_event event_cb;
struct timer_list timer;
struct list_head nameseq_list;
struct list_head subscription_list;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index ec68e1c..466fbcc 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1381,8 +1381,10 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
{
int err = 0;
+
UNIXCB(skb).pid = get_pid(scm->pid);
- UNIXCB(skb).cred = get_cred(scm->cred);
+ if (scm->cred)
+ UNIXCB(skb).cred = get_cred(scm->cred);
UNIXCB(skb).fp = NULL;
if (scm->fp && send_fds)
err = unix_attach_fds(scm, skb);
@@ -1392,6 +1394,24 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen
}
/*
+ * Some apps rely on write() giving SCM_CREDENTIALS
+ * We include credentials if source or destination socket
+ * asserted SOCK_PASSCRED.
+ */
+static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
+ const struct sock *other)
+{
+ if (UNIXCB(skb).cred)
+ return;
+ if (test_bit(SOCK_PASSCRED, &sock->flags) ||
+ !other->sk_socket ||
+ test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
+ UNIXCB(skb).pid = get_pid(task_tgid(current));
+ UNIXCB(skb).cred = get_current_cred();
+ }
+}
+
+/*
* Send AF_UNIX data.
*/
@@ -1538,6 +1558,7 @@ restart:
if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb);
+ maybe_add_creds(skb, sock, other);
skb_queue_tail(&other->sk_receive_queue, skb);
if (max_level > unix_sk(other)->recursion_level)
unix_sk(other)->recursion_level = max_level;
@@ -1652,6 +1673,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
(other->sk_shutdown & RCV_SHUTDOWN))
goto pipe_err_free;
+ maybe_add_creds(skb, sock, other);
skb_queue_tail(&other->sk_receive_queue, skb);
if (max_level > unix_sk(other)->recursion_level)
unix_sk(other)->recursion_level = max_level;
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
index f346395..c43612e 100644
--- a/net/wanrouter/wanproc.c
+++ b/net/wanrouter/wanproc.c
@@ -81,7 +81,6 @@ static struct proc_dir_entry *proc_router;
* Iterator
*/
static void *r_start(struct seq_file *m, loff_t *pos)
- __acquires(kernel_lock)
{
struct wan_device *wandev;
loff_t l = *pos;
@@ -103,7 +102,6 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos)
}
static void r_stop(struct seq_file *m, void *v)
- __releases(kernel_lock)
{
mutex_unlock(&config_mutex);
}
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index d5b7c37..0694d62 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -77,6 +77,7 @@
#include <linux/netdevice.h>
#include <linux/wimax.h>
#include <linux/security.h>
+#include <linux/export.h>
#include "wimax-internal.h"
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index 68bedf3..7ceffe3 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -32,6 +32,7 @@
#include <net/genetlink.h>
#include <linux/wimax.h>
#include <linux/security.h>
+#include <linux/export.h>
#include "wimax-internal.h"
#define D_SUBMODULE op_reset
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 2609e44..7ab60ba 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -65,6 +65,7 @@
#include <linux/wimax.h>
#include <linux/security.h>
#include <linux/rfkill.h>
+#include <linux/export.h>
#include "wimax-internal.h"
#define D_SUBMODULE op_rfkill
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index ee99e7d..3c65eae 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -55,6 +55,7 @@
#include <net/genetlink.h>
#include <linux/netdevice.h>
#include <linux/wimax.h>
+#include <linux/module.h>
#include "wimax-internal.h"
diff --git a/net/wireless/core.c b/net/wireless/core.c
index c148651..220f3bd 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -582,7 +582,7 @@ int wiphy_register(struct wiphy *wiphy)
}
/* set up regulatory info */
- wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
+ regulatory_update(wiphy, NL80211_REGDOM_SET_BY_CORE);
list_add_rcu(&rdev->list, &cfg80211_rdev_list);
cfg80211_rdev_list_generation++;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 8672e02..b9ec306 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -279,8 +279,6 @@ extern int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
char *newname);
void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
-void wiphy_update_regulatory(struct wiphy *wiphy,
- enum nl80211_reg_initiator setby);
void cfg80211_bss_expire(struct cfg80211_registered_device *dev);
void cfg80211_bss_age(struct cfg80211_registered_device *dev,
@@ -377,7 +375,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
- const u8 *buf, size_t len, u64 *cookie);
+ const u8 *buf, size_t len, bool no_cck,
+ u64 *cookie);
/* SME */
int __cfg80211_connect(struct cfg80211_registered_device *rdev,
@@ -408,6 +407,7 @@ void cfg80211_sme_failed_assoc(struct wireless_dev *wdev);
bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev);
/* internal helpers */
+bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher);
int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
struct key_params *params, int key_idx,
bool pairwise, const u8 *mac_addr);
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index f33fbb7..30f20fe 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -7,6 +7,7 @@
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <net/cfg80211.h>
#include "wext-compat.h"
#include "nl80211.h"
diff --git a/net/wireless/lib80211.c b/net/wireless/lib80211.c
index 3268fac..a55c27b 100644
--- a/net/wireless/lib80211.c
+++ b/net/wireless/lib80211.c
@@ -41,6 +41,11 @@ struct lib80211_crypto_alg {
static LIST_HEAD(lib80211_crypto_algs);
static DEFINE_SPINLOCK(lib80211_crypto_lock);
+static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info,
+ int force);
+static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info);
+static void lib80211_crypt_deinit_handler(unsigned long data);
+
const char *print_ssid(char *buf, const char *ssid, u8 ssid_len)
{
const char *s = ssid;
@@ -111,7 +116,8 @@ void lib80211_crypt_info_free(struct lib80211_crypt_info *info)
}
EXPORT_SYMBOL(lib80211_crypt_info_free);
-void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force)
+static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info,
+ int force)
{
struct lib80211_crypt_data *entry, *next;
unsigned long flags;
@@ -131,10 +137,9 @@ void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info, int force)
}
spin_unlock_irqrestore(info->lock, flags);
}
-EXPORT_SYMBOL(lib80211_crypt_deinit_entries);
/* After this, crypt_deinit_list won't accept new members */
-void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
+static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
{
unsigned long flags;
@@ -142,9 +147,8 @@ void lib80211_crypt_quiescing(struct lib80211_crypt_info *info)
info->crypt_quiesced = 1;
spin_unlock_irqrestore(info->lock, flags);
}
-EXPORT_SYMBOL(lib80211_crypt_quiescing);
-void lib80211_crypt_deinit_handler(unsigned long data)
+static void lib80211_crypt_deinit_handler(unsigned long data)
{
struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data;
unsigned long flags;
@@ -160,7 +164,6 @@ void lib80211_crypt_deinit_handler(unsigned long data)
}
spin_unlock_irqrestore(info->lock, flags);
}
-EXPORT_SYMBOL(lib80211_crypt_deinit_handler);
void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info,
struct lib80211_crypt_data **crypt)
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index dacb3b4..755738d 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -77,8 +77,6 @@ static void *lib80211_ccmp_init(int key_idx)
priv->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tfm)) {
- printk(KERN_DEBUG "lib80211_crypt_ccmp: could not allocate "
- "crypto API aes\n");
priv->tfm = NULL;
goto fail;
}
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 7ea4f2b..3873484 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -101,7 +101,6 @@ static void *lib80211_tkip_init(int key_idx)
priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_arc4)) {
- printk(KERN_DEBUG pr_fmt("could not allocate crypto API arc4\n"));
priv->tx_tfm_arc4 = NULL;
goto fail;
}
@@ -109,7 +108,6 @@ static void *lib80211_tkip_init(int key_idx)
priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm_michael)) {
- printk(KERN_DEBUG pr_fmt("could not allocate crypto API michael_mic\n"));
priv->tx_tfm_michael = NULL;
goto fail;
}
@@ -117,7 +115,6 @@ static void *lib80211_tkip_init(int key_idx)
priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_arc4)) {
- printk(KERN_DEBUG pr_fmt("could not allocate crypto API arc4\n"));
priv->rx_tfm_arc4 = NULL;
goto fail;
}
@@ -125,7 +122,6 @@ static void *lib80211_tkip_init(int key_idx)
priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm_michael)) {
- printk(KERN_DEBUG pr_fmt("could not allocate crypto API michael_mic\n"));
priv->rx_tfm_michael = NULL;
goto fail;
}
diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c
index 2f265e0..c130401 100644
--- a/net/wireless/lib80211_crypt_wep.c
+++ b/net/wireless/lib80211_crypt_wep.c
@@ -50,16 +50,12 @@ static void *lib80211_wep_init(int keyidx)
priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->tx_tfm)) {
- printk(KERN_DEBUG "lib80211_crypt_wep: could not allocate "
- "crypto API arc4\n");
priv->tx_tfm = NULL;
goto fail;
}
priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(priv->rx_tfm)) {
- printk(KERN_DEBUG "lib80211_crypt_wep: could not allocate "
- "crypto API arc4\n");
priv->rx_tfm = NULL;
goto fail;
}
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 5c11608..b7b7868 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -1,4 +1,5 @@
#include <linux/ieee80211.h>
+#include <linux/export.h>
#include <net/cfg80211.h>
#include "nl80211.h"
#include "core.h"
@@ -12,6 +13,7 @@
#define MESH_HOLD_T 100
#define MESH_PATH_TIMEOUT 5000
+#define MESH_RANN_INTERVAL 5000
/*
* Minimum interval between two consecutive PREQs originated by the same
@@ -49,6 +51,8 @@ const struct mesh_config default_mesh_config = {
.dot11MeshHWMPmaxPREQretries = MESH_MAX_PREQ_RETRIES,
.path_refresh_time = MESH_PATH_REFRESH_TIME,
.min_discovery_timeout = MESH_MIN_DISCOVERY_TIMEOUT,
+ .dot11MeshHWMPRannInterval = MESH_RANN_INTERVAL,
+ .dot11MeshGateAnnouncementProtocol = false,
};
const struct mesh_setup default_mesh_setup = {
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 832f657..21fc970 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -900,7 +900,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
- const u8 *buf, size_t len, u64 *cookie)
+ const u8 *buf, size_t len, bool no_cck,
+ u64 *cookie)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
const struct ieee80211_mgmt *mgmt;
@@ -991,7 +992,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
/* Transmit the Action frame as requested by user space */
return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan,
channel_type, channel_type_valid,
- wait, buf, len, cookie);
+ wait, buf, len, no_cck, cookie);
}
bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf,
@@ -1095,3 +1096,14 @@ void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
}
EXPORT_SYMBOL(cfg80211_gtk_rekey_notify);
+
+void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
+ const u8 *bssid, bool preauth, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
+}
+EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ea40d54..48260c2 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -23,6 +23,12 @@
#include "nl80211.h"
#include "reg.h"
+static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type);
+static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
+ struct genl_info *info,
+ struct cfg80211_crypto_settings *settings,
+ int cipher_limit);
+
static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
struct genl_info *info);
static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb,
@@ -178,6 +184,19 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
[NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED },
[NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED },
+ [NL80211_ATTR_HIDDEN_SSID] = { .type = NLA_U32 },
+ [NL80211_ATTR_IE_PROBE_RESP] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_DATA_LEN },
+ [NL80211_ATTR_IE_ASSOC_RESP] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_DATA_LEN },
+ [NL80211_ATTR_ROAM_SUPPORT] = { .type = NLA_FLAG },
+ [NL80211_ATTR_SCHED_SCAN_MATCH] = { .type = NLA_NESTED },
+ [NL80211_ATTR_TX_NO_CCK_RATE] = { .type = NLA_FLAG },
+ [NL80211_ATTR_TDLS_ACTION] = { .type = NLA_U8 },
+ [NL80211_ATTR_TDLS_DIALOG_TOKEN] = { .type = NLA_U8 },
+ [NL80211_ATTR_TDLS_OPERATION] = { .type = NLA_U8 },
+ [NL80211_ATTR_TDLS_SUPPORT] = { .type = NLA_FLAG },
+ [NL80211_ATTR_TDLS_EXTERNAL_SETUP] = { .type = NLA_FLAG },
};
/* policy for the key attributes */
@@ -220,6 +239,12 @@ nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
[NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN },
};
+static const struct nla_policy
+nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
+ [NL80211_ATTR_SCHED_SCAN_MATCH_SSID] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_SSID_LEN },
+};
+
/* ifidx get helper */
static int nl80211_get_ifidx(struct netlink_callback *cb)
{
@@ -703,11 +728,21 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
dev->wiphy.max_scan_ie_len);
NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
dev->wiphy.max_sched_scan_ie_len);
+ NLA_PUT_U8(msg, NL80211_ATTR_MAX_MATCH_SETS,
+ dev->wiphy.max_match_sets);
if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)
NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN);
if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH)
NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH);
+ if (dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD)
+ NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_AP_UAPSD);
+ if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM)
+ NLA_PUT_FLAG(msg, NL80211_ATTR_ROAM_SUPPORT);
+ if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS)
+ NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_SUPPORT);
+ if (dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)
+ NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP);
NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES,
sizeof(u32) * dev->wiphy.n_cipher_suites,
@@ -850,6 +885,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
}
CMD(set_channel, SET_CHANNEL);
CMD(set_wds_peer, SET_WDS_PEER);
+ if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
+ CMD(tdls_mgmt, TDLS_MGMT);
+ CMD(tdls_oper, TDLS_OPER);
+ }
if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
CMD(sched_scan_start, START_SCHED_SCAN);
@@ -871,8 +910,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
dev->wiphy.max_remain_on_channel_duration);
- /* for now at least assume all drivers have it */
- if (dev->ops->mgmt_tx)
+ if (dev->ops->mgmt_tx_cancel_wait)
NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK);
if (mgmt_stypes) {
@@ -1210,6 +1248,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
goto bad_res;
}
+ if (!netdev) {
+ result = -EINVAL;
+ goto bad_res;
+ }
+
nla_for_each_nested(nl_txq_params,
info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
rem_txq_params) {
@@ -1222,6 +1265,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
goto bad_res;
result = rdev->ops->set_txq_params(&rdev->wiphy,
+ netdev,
&txq_params);
if (result)
goto bad_res;
@@ -1985,7 +2029,10 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
struct beacon_parameters params;
int haveinfo = 0, err;
- if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]))
+ if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]) ||
+ !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]) ||
+ !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_PROBE_RESP]) ||
+ !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]))
return -EINVAL;
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
@@ -2011,6 +2058,49 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
+ /*
+ * In theory, some of these attributes could be required for
+ * NEW_BEACON, but since they were not used when the command was
+ * originally added, keep them optional for old user space
+ * programs to work with drivers that do not need the additional
+ * information.
+ */
+ if (info->attrs[NL80211_ATTR_SSID]) {
+ params.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
+ params.ssid_len =
+ nla_len(info->attrs[NL80211_ATTR_SSID]);
+ if (params.ssid_len == 0 ||
+ params.ssid_len > IEEE80211_MAX_SSID_LEN)
+ return -EINVAL;
+ }
+
+ if (info->attrs[NL80211_ATTR_HIDDEN_SSID]) {
+ params.hidden_ssid = nla_get_u32(
+ info->attrs[NL80211_ATTR_HIDDEN_SSID]);
+ if (params.hidden_ssid !=
+ NL80211_HIDDEN_SSID_NOT_IN_USE &&
+ params.hidden_ssid !=
+ NL80211_HIDDEN_SSID_ZERO_LEN &&
+ params.hidden_ssid !=
+ NL80211_HIDDEN_SSID_ZERO_CONTENTS)
+ return -EINVAL;
+ }
+
+ params.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
+
+ if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
+ params.auth_type = nla_get_u32(
+ info->attrs[NL80211_ATTR_AUTH_TYPE]);
+ if (!nl80211_valid_auth_type(params.auth_type))
+ return -EINVAL;
+ } else
+ params.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
+
+ err = nl80211_crypto_settings(rdev, info, &params.crypto,
+ NL80211_MAX_NR_CIPHER_SUITES);
+ if (err)
+ return err;
+
call = rdev->ops->add_beacon;
break;
case NL80211_CMD_SET_BEACON:
@@ -2041,6 +2131,25 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
if (!haveinfo)
return -EINVAL;
+ if (info->attrs[NL80211_ATTR_IE]) {
+ params.beacon_ies = nla_data(info->attrs[NL80211_ATTR_IE]);
+ params.beacon_ies_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ }
+
+ if (info->attrs[NL80211_ATTR_IE_PROBE_RESP]) {
+ params.proberesp_ies =
+ nla_data(info->attrs[NL80211_ATTR_IE_PROBE_RESP]);
+ params.proberesp_ies_len =
+ nla_len(info->attrs[NL80211_ATTR_IE_PROBE_RESP]);
+ }
+
+ if (info->attrs[NL80211_ATTR_IE_ASSOC_RESP]) {
+ params.assocresp_ies =
+ nla_data(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
+ params.assocresp_ies_len =
+ nla_len(info->attrs[NL80211_ATTR_IE_ASSOC_RESP]);
+ }
+
err = call(&rdev->wiphy, dev, &params);
if (!err && params.interval)
wdev->beacon_interval = params.interval;
@@ -2235,8 +2344,16 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
nla_nest_end(msg, bss_param);
}
+ if (sinfo->filled & STATION_INFO_STA_FLAGS)
+ NLA_PUT(msg, NL80211_STA_INFO_STA_FLAGS,
+ sizeof(struct nl80211_sta_flag_update),
+ &sinfo->sta_flags);
nla_nest_end(msg, sinfoattr);
+ if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES)
+ NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
+ sinfo->assoc_req_ies);
+
return genlmsg_end(msg, hdr);
nla_put_failure:
@@ -2264,6 +2381,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
}
while (1) {
+ memset(&sinfo, 0, sizeof(sinfo));
err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx,
mac_addr, &sinfo);
if (err == -ENOENT)
@@ -2416,18 +2534,25 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
break;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_STATION:
- /* disallow everything but AUTHORIZED flag */
+ /* disallow things sta doesn't support */
if (params.plink_action)
err = -EINVAL;
if (params.vlan)
err = -EINVAL;
- if (params.supported_rates)
+ if (params.supported_rates &&
+ !(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
err = -EINVAL;
if (params.ht_capa)
err = -EINVAL;
if (params.listen_interval >= 0)
err = -EINVAL;
- if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
+ if (params.sta_flags_mask &
+ ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
+ BIT(NL80211_STA_FLAG_TDLS_PEER)))
+ err = -EINVAL;
+ /* can't change the TDLS bit */
+ if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
+ (params.sta_flags_mask & BIT(NL80211_STA_FLAG_TDLS_PEER)))
err = -EINVAL;
break;
case NL80211_IFTYPE_MESH_POINT:
@@ -2465,6 +2590,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static struct nla_policy
+nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
+ [NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 },
+ [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
+};
+
static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -2510,10 +2641,50 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
if (parse_station_flags(info, &params))
return -EINVAL;
+ /* parse WME attributes if sta is WME capable */
+ if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
+ (params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)) &&
+ info->attrs[NL80211_ATTR_STA_WME]) {
+ struct nlattr *tb[NL80211_STA_WME_MAX + 1];
+ struct nlattr *nla;
+
+ nla = info->attrs[NL80211_ATTR_STA_WME];
+ err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
+ nl80211_sta_wme_policy);
+ if (err)
+ return err;
+
+ if (tb[NL80211_STA_WME_UAPSD_QUEUES])
+ params.uapsd_queues =
+ nla_get_u8(tb[NL80211_STA_WME_UAPSD_QUEUES]);
+ if (params.uapsd_queues & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+ return -EINVAL;
+
+ if (tb[NL80211_STA_WME_MAX_SP])
+ params.max_sp =
+ nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
+
+ if (params.max_sp & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
+ return -EINVAL;
+
+ params.sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
+ }
+
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
+ return -EINVAL;
+
+ /*
+ * Only managed stations can add TDLS peers, and only when the
+ * wiphy supports external TDLS setup.
+ */
+ if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_STATION &&
+ !((params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
+ (rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
+ (rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)))
return -EINVAL;
err = get_vlan(info, rdev, &params.vlan);
@@ -2955,6 +3126,10 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
cur_params.dot11MeshHWMPnetDiameterTraversalTime);
NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
cur_params.dot11MeshHWMPRootMode);
+ NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
+ cur_params.dot11MeshHWMPRannInterval);
+ NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
+ cur_params.dot11MeshGateAnnouncementProtocol);
nla_nest_end(msg, pinfoattr);
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
@@ -2982,6 +3157,9 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
[NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 },
[NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 },
[NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 },
+ [NL80211_MESHCONF_HWMP_ROOTMODE] = { .type = NLA_U8 },
+ [NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
+ [NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
};
static const struct nla_policy
@@ -3060,6 +3238,14 @@ do {\
dot11MeshHWMPRootMode, mask,
NL80211_MESHCONF_HWMP_ROOTMODE,
nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
+ dot11MeshHWMPRannInterval, mask,
+ NL80211_MESHCONF_HWMP_RANN_INTERVAL,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
+ dot11MeshGateAnnouncementProtocol, mask,
+ NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
+ nla_get_u8);
if (mask_out)
*mask_out = mask;
@@ -3477,6 +3663,9 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
}
}
+ request->no_cck =
+ nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
+
request->dev = dev;
request->wiphy = &rdev->wiphy;
@@ -3503,10 +3692,11 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
struct net_device *dev = info->user_ptr[1];
struct nlattr *attr;
struct wiphy *wiphy;
- int err, tmp, n_ssids = 0, n_channels, i;
+ int err, tmp, n_ssids = 0, n_match_sets = 0, n_channels, i;
u32 interval;
enum ieee80211_band band;
size_t ie_len;
+ struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
!rdev->ops->sched_scan_start)
@@ -3545,6 +3735,15 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
if (n_ssids > wiphy->max_sched_scan_ssids)
return -EINVAL;
+ if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH])
+ nla_for_each_nested(attr,
+ info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
+ tmp)
+ n_match_sets++;
+
+ if (n_match_sets > wiphy->max_match_sets)
+ return -EINVAL;
+
if (info->attrs[NL80211_ATTR_IE])
ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
else
@@ -3562,6 +3761,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
request = kzalloc(sizeof(*request)
+ sizeof(*request->ssids) * n_ssids
+ + sizeof(*request->match_sets) * n_match_sets
+ sizeof(*request->channels) * n_channels
+ ie_len, GFP_KERNEL);
if (!request) {
@@ -3579,6 +3779,18 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
request->ie = (void *)(request->channels + n_channels);
}
+ if (n_match_sets) {
+ if (request->ie)
+ request->match_sets = (void *)(request->ie + ie_len);
+ else if (request->ssids)
+ request->match_sets =
+ (void *)(request->ssids + n_ssids);
+ else
+ request->match_sets =
+ (void *)(request->channels + n_channels);
+ }
+ request->n_match_sets = n_match_sets;
+
i = 0;
if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
/* user specified, bail out if channel not found */
@@ -3643,6 +3855,31 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
}
}
+ i = 0;
+ if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
+ nla_for_each_nested(attr,
+ info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
+ tmp) {
+ struct nlattr *ssid;
+
+ nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
+ nla_data(attr), nla_len(attr),
+ nl80211_match_policy);
+ ssid = tb[NL80211_ATTR_SCHED_SCAN_MATCH_SSID];
+ if (ssid) {
+ if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) {
+ err = -EINVAL;
+ goto out_free;
+ }
+ memcpy(request->match_sets[i].ssid.ssid,
+ nla_data(ssid), nla_len(ssid));
+ request->match_sets[i].ssid.ssid_len =
+ nla_len(ssid);
+ }
+ i++;
+ }
+ }
+
if (info->attrs[NL80211_ATTR_IE]) {
request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
memcpy((void *)request->ie,
@@ -3935,22 +4172,6 @@ static bool nl80211_valid_wpa_versions(u32 wpa_versions)
NL80211_WPA_VERSION_2));
}
-static bool nl80211_valid_akm_suite(u32 akm)
-{
- return akm == WLAN_AKM_SUITE_8021X ||
- akm == WLAN_AKM_SUITE_PSK;
-}
-
-static bool nl80211_valid_cipher_suite(u32 cipher)
-{
- return cipher == WLAN_CIPHER_SUITE_WEP40 ||
- cipher == WLAN_CIPHER_SUITE_WEP104 ||
- cipher == WLAN_CIPHER_SUITE_TKIP ||
- cipher == WLAN_CIPHER_SUITE_CCMP ||
- cipher == WLAN_CIPHER_SUITE_AES_CMAC;
-}
-
-
static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -4083,7 +4304,8 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
memcpy(settings->ciphers_pairwise, data, len);
for (i = 0; i < settings->n_ciphers_pairwise; i++)
- if (!nl80211_valid_cipher_suite(
+ if (!cfg80211_supported_cipher_suite(
+ &rdev->wiphy,
settings->ciphers_pairwise[i]))
return -EINVAL;
}
@@ -4091,7 +4313,8 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
if (info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]) {
settings->cipher_group =
nla_get_u32(info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]);
- if (!nl80211_valid_cipher_suite(settings->cipher_group))
+ if (!cfg80211_supported_cipher_suite(&rdev->wiphy,
+ settings->cipher_group))
return -EINVAL;
}
@@ -4104,7 +4327,7 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
if (info->attrs[NL80211_ATTR_AKM_SUITES]) {
void *data;
- int len, i;
+ int len;
data = nla_data(info->attrs[NL80211_ATTR_AKM_SUITES]);
len = nla_len(info->attrs[NL80211_ATTR_AKM_SUITES]);
@@ -4117,10 +4340,6 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
return -EINVAL;
memcpy(settings->akm_suites, data, len);
-
- for (i = 0; i < settings->n_akm_suites; i++)
- if (!nl80211_valid_akm_suite(settings->akm_suites[i]))
- return -EINVAL;
}
return 0;
@@ -4339,8 +4558,12 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
wiphy = &rdev->wiphy;
- if (info->attrs[NL80211_ATTR_MAC])
+ if (info->attrs[NL80211_ATTR_MAC]) {
ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ if (!is_valid_ether_addr(ibss.bssid))
+ return -EINVAL;
+ }
ibss.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
ibss.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
@@ -4777,6 +5000,57 @@ static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
return rdev->ops->flush_pmksa(&rdev->wiphy, dev);
}
+static int nl80211_tdls_mgmt(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ u8 action_code, dialog_token;
+ u16 status_code;
+ u8 *peer;
+
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
+ !rdev->ops->tdls_mgmt)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_TDLS_ACTION] ||
+ !info->attrs[NL80211_ATTR_STATUS_CODE] ||
+ !info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN] ||
+ !info->attrs[NL80211_ATTR_IE] ||
+ !info->attrs[NL80211_ATTR_MAC])
+ return -EINVAL;
+
+ peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ action_code = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_ACTION]);
+ status_code = nla_get_u16(info->attrs[NL80211_ATTR_STATUS_CODE]);
+ dialog_token = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_DIALOG_TOKEN]);
+
+ return rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
+ dialog_token, status_code,
+ nla_data(info->attrs[NL80211_ATTR_IE]),
+ nla_len(info->attrs[NL80211_ATTR_IE]));
+}
+
+static int nl80211_tdls_oper(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ enum nl80211_tdls_operation operation;
+ u8 *peer;
+
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
+ !rdev->ops->tdls_oper)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_TDLS_OPERATION] ||
+ !info->attrs[NL80211_ATTR_MAC])
+ return -EINVAL;
+
+ operation = nla_get_u8(info->attrs[NL80211_ATTR_TDLS_OPERATION]);
+ peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ return rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, operation);
+}
+
static int nl80211_remain_on_channel(struct sk_buff *skb,
struct genl_info *info)
{
@@ -4997,6 +5271,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
struct sk_buff *msg;
unsigned int wait = 0;
bool offchan;
+ bool no_cck;
if (!info->attrs[NL80211_ATTR_FRAME] ||
!info->attrs[NL80211_ATTR_WIPHY_FREQ])
@@ -5033,6 +5308,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK];
+ no_cck = nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
+
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
chan = rdev_freq_to_chan(rdev, freq, channel_type);
if (chan == NULL)
@@ -5053,7 +5330,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
channel_type_valid, wait,
nla_data(info->attrs[NL80211_ATTR_FRAME]),
nla_len(info->attrs[NL80211_ATTR_FRAME]),
- &cookie);
+ no_cck, &cookie);
if (err)
goto free_msg;
@@ -6089,6 +6366,22 @@ static struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_TDLS_MGMT,
+ .doit = nl80211_tdls_mgmt,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_TDLS_OPER,
+ .doit = nl80211_tdls_oper,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -7078,6 +7371,52 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
nlmsg_free(msg);
}
+void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, int index,
+ const u8 *bssid, bool preauth, gfp_t gfp)
+{
+ struct sk_buff *msg;
+ struct nlattr *attr;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PMKSA_CANDIDATE);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+
+ attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE);
+ if (!attr)
+ goto nla_put_failure;
+
+ NLA_PUT_U32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index);
+ NLA_PUT(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid);
+ if (preauth)
+ NLA_PUT_FLAG(msg, NL80211_PMKSA_CANDIDATE_PREAUTH);
+
+ nla_nest_end(msg, attr);
+
+ if (genlmsg_end(msg, hdr) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+
void
nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *peer,
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 5d69c56..f24a1fb 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -113,4 +113,8 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
const u8 *replay_ctr, gfp_t gfp);
+void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, int index,
+ const u8 *bssid, bool preauth, gfp_t gfp);
+
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index dbe35e1..c4ad795 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -15,6 +15,7 @@
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <net/cfg80211.h>
#include <net/ieee80211_radiotap.h>
#include <asm/unaligned.h>
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 68a471b..6acba9d 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -36,12 +36,14 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/random.h>
#include <linux/ctype.h>
#include <linux/nl80211.h>
#include <linux/platform_device.h>
+#include <linux/moduleparam.h>
#include <net/cfg80211.h>
#include "core.h"
#include "reg.h"
@@ -49,10 +51,8 @@
#include "nl80211.h"
#ifdef CONFIG_CFG80211_REG_DEBUG
-#define REG_DBG_PRINT(format, args...) \
- do { \
- printk(KERN_DEBUG pr_fmt(format), ##args); \
- } while (0)
+#define REG_DBG_PRINT(format, args...) \
+ printk(KERN_DEBUG pr_fmt(format), ##args)
#else
#define REG_DBG_PRINT(args...)
#endif
@@ -753,9 +753,10 @@ static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
chan->center_freq,
KHZ_TO_MHZ(desired_bw_khz));
- REG_DBG_PRINT("%d KHz - %d KHz @ KHz), (%s mBi, %d mBm)\n",
+ REG_DBG_PRINT("%d KHz - %d KHz @ %d KHz), (%s mBi, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
+ freq_range->max_bandwidth_khz,
max_antenna_gain,
power_rule->max_eirp);
}
@@ -891,7 +892,7 @@ static bool ignore_reg_update(struct wiphy *wiphy,
wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
REG_DBG_PRINT("Ignoring regulatory request %s "
"since the driver uses its own custom "
- "regulatory domain ",
+ "regulatory domain\n",
reg_initiator_name(initiator));
return true;
}
@@ -905,7 +906,7 @@ static bool ignore_reg_update(struct wiphy *wiphy,
!is_world_regdom(last_request->alpha2)) {
REG_DBG_PRINT("Ignoring regulatory request %s "
"since the driver requires its own regulatory "
- "domain to be set first",
+ "domain to be set first\n",
reg_initiator_name(initiator));
return true;
}
@@ -913,14 +914,6 @@ static bool ignore_reg_update(struct wiphy *wiphy,
return false;
}
-static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
-{
- struct cfg80211_registered_device *rdev;
-
- list_for_each_entry(rdev, &cfg80211_rdev_list, list)
- wiphy_update_regulatory(&rdev->wiphy, initiator);
-}
-
static void handle_reg_beacon(struct wiphy *wiphy,
unsigned int chan_idx,
struct reg_beacon *reg_beacon)
@@ -1120,11 +1113,13 @@ static void reg_process_ht_flags(struct wiphy *wiphy)
}
-void wiphy_update_regulatory(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator)
+static void wiphy_update_regulatory(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator)
{
enum ieee80211_band band;
+ assert_reg_lock();
+
if (ignore_reg_update(wiphy, initiator))
return;
@@ -1139,6 +1134,22 @@ void wiphy_update_regulatory(struct wiphy *wiphy,
wiphy->reg_notifier(wiphy, last_request);
}
+void regulatory_update(struct wiphy *wiphy,
+ enum nl80211_reg_initiator setby)
+{
+ mutex_lock(&reg_mutex);
+ wiphy_update_regulatory(wiphy, setby);
+ mutex_unlock(&reg_mutex);
+}
+
+static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
+{
+ struct cfg80211_registered_device *rdev;
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list)
+ wiphy_update_regulatory(&rdev->wiphy, initiator);
+}
+
static void handle_channel_custom(struct wiphy *wiphy,
enum ieee80211_band band,
unsigned int chan_idx,
@@ -1475,7 +1486,7 @@ static void reg_process_pending_hints(void)
/* When last_request->processed becomes true this will be rescheduled */
if (last_request && !last_request->processed) {
REG_DBG_PRINT("Pending regulatory request, waiting "
- "for it to be processed...");
+ "for it to be processed...\n");
goto out;
}
@@ -2188,7 +2199,7 @@ out:
static void reg_timeout_work(struct work_struct *work)
{
REG_DBG_PRINT("Timeout while waiting for CRDA to reply, "
- "restoring regulatory settings");
+ "restoring regulatory settings\n");
restore_regulatory_settings(true);
}
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index b67d1c3..4a56799 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -16,6 +16,8 @@ void regulatory_exit(void);
int set_regdom(const struct ieee80211_regdomain *rd);
+void regulatory_update(struct wiphy *wiphy, enum nl80211_reg_initiator setby);
+
/**
* regulatory_hint_found_beacon - hints a beacon was found on a channel
* @wiphy: the wireless device where the beacon was found on
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 2936cb8..0fb1424 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -12,6 +12,7 @@
#include <linux/etherdevice.h>
#include <net/arp.h>
#include <net/cfg80211.h>
+#include <net/cfg80211-wext.h>
#include <net/iw_handler.h>
#include "core.h"
#include "nl80211.h"
@@ -227,6 +228,33 @@ const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
}
EXPORT_SYMBOL(cfg80211_find_ie);
+const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
+ const u8 *ies, int len)
+{
+ struct ieee80211_vendor_ie *ie;
+ const u8 *pos = ies, *end = ies + len;
+ int ie_oui;
+
+ while (pos < end) {
+ pos = cfg80211_find_ie(WLAN_EID_VENDOR_SPECIFIC, pos,
+ end - pos);
+ if (!pos)
+ return NULL;
+
+ if (end - pos < sizeof(*ie))
+ return NULL;
+
+ ie = (struct ieee80211_vendor_ie *)pos;
+ ie_oui = ie->oui[0] << 16 | ie->oui[1] << 8 | ie->oui[2];
+ if (ie_oui == oui && ie->oui_type == oui_type)
+ return pos;
+
+ pos += 2 + ie->len;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(cfg80211_find_vendor_ie);
+
static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
{
const u8 *ie1 = cfg80211_find_ie(num, ies1, len1);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index dec0fa2..0acfdc9 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/wireless.h>
+#include <linux/export.h>
#include <net/iw_handler.h>
#include <net/cfg80211.h>
#include <net/rtnetlink.h>
@@ -110,17 +111,22 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
else {
int i = 0, j;
enum ieee80211_band band;
+ struct ieee80211_supported_band *bands;
+ struct ieee80211_channel *channel;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
- if (!wdev->wiphy->bands[band])
+ bands = wdev->wiphy->bands[band];
+ if (!bands)
continue;
- for (j = 0; j < wdev->wiphy->bands[band]->n_channels;
- i++, j++)
- request->channels[i] =
- &wdev->wiphy->bands[band]->channels[j];
- request->rates[band] =
- (1 << wdev->wiphy->bands[band]->n_bitrates) - 1;
+ for (j = 0; j < bands->n_channels; j++) {
+ channel = &bands->channels[j];
+ if (channel->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+ request->channels[i++] = channel;
+ }
+ request->rates[band] = (1 << bands->n_bitrates) - 1;
}
+ n_channels = i;
}
request->n_channels = n_channels;
request->ssids = (void *)&request->channels[n_channels];
diff --git a/net/wireless/util.c b/net/wireless/util.c
index be75a3a..4dde429 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -3,9 +3,11 @@
*
* Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
*/
+#include <linux/export.h>
#include <linux/bitops.h>
#include <linux/etherdevice.h>
#include <linux/slab.h>
+#include <linux/crc32.h>
#include <net/cfg80211.h>
#include <net/ip.h>
#include "core.h"
@@ -150,12 +152,19 @@ void ieee80211_set_bitrate_flags(struct wiphy *wiphy)
set_mandatory_flags_band(wiphy->bands[band], band);
}
+bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher)
+{
+ int i;
+ for (i = 0; i < wiphy->n_cipher_suites; i++)
+ if (cipher == wiphy->cipher_suites[i])
+ return true;
+ return false;
+}
+
int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
struct key_params *params, int key_idx,
bool pairwise, const u8 *mac_addr)
{
- int i;
-
if (key_idx > 5)
return -EINVAL;
@@ -225,10 +234,7 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
}
}
- for (i = 0; i < rdev->wiphy.n_cipher_suites; i++)
- if (params->cipher == rdev->wiphy.cipher_suites[i])
- break;
- if (i == rdev->wiphy.n_cipher_suites)
+ if (!cfg80211_supported_cipher_suite(&rdev->wiphy, params->cipher))
return -EINVAL;
return 0;
@@ -391,8 +397,9 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
}
break;
case cpu_to_le16(0):
- if (iftype != NL80211_IFTYPE_ADHOC)
- return -1;
+ if (iftype != NL80211_IFTYPE_ADHOC &&
+ iftype != NL80211_IFTYPE_STATION)
+ return -1;
break;
}
@@ -512,10 +519,9 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
if (head_need)
skb_orphan(skb);
- if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC)) {
- pr_err("failed to reallocate Tx buffer\n");
+ if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC))
return -ENOMEM;
- }
+
skb->truesize += head_need;
}
@@ -1044,3 +1050,170 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
return 0;
}
+
+u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
+ struct ieee802_11_elems *elems,
+ u64 filter, u32 crc)
+{
+ size_t left = len;
+ u8 *pos = start;
+ bool calc_crc = filter != 0;
+
+ memset(elems, 0, sizeof(*elems));
+ elems->ie_start = start;
+ elems->total_len = len;
+
+ while (left >= 2) {
+ u8 id, elen;
+
+ id = *pos++;
+ elen = *pos++;
+ left -= 2;
+
+ if (elen > left)
+ break;
+
+ if (calc_crc && id < 64 && (filter & (1ULL << id)))
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ switch (id) {
+ case WLAN_EID_SSID:
+ elems->ssid = pos;
+ elems->ssid_len = elen;
+ break;
+ case WLAN_EID_SUPP_RATES:
+ elems->supp_rates = pos;
+ elems->supp_rates_len = elen;
+ break;
+ case WLAN_EID_FH_PARAMS:
+ elems->fh_params = pos;
+ elems->fh_params_len = elen;
+ break;
+ case WLAN_EID_DS_PARAMS:
+ elems->ds_params = pos;
+ elems->ds_params_len = elen;
+ break;
+ case WLAN_EID_CF_PARAMS:
+ elems->cf_params = pos;
+ elems->cf_params_len = elen;
+ break;
+ case WLAN_EID_TIM:
+ if (elen >= sizeof(struct ieee80211_tim_ie)) {
+ elems->tim = (void *)pos;
+ elems->tim_len = elen;
+ }
+ break;
+ case WLAN_EID_IBSS_PARAMS:
+ elems->ibss_params = pos;
+ elems->ibss_params_len = elen;
+ break;
+ case WLAN_EID_CHALLENGE:
+ elems->challenge = pos;
+ elems->challenge_len = elen;
+ break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 &&
+ pos[2] == 0xf2) {
+ /* Microsoft OUI (00:50:F2) */
+
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ if (pos[3] == 1) {
+ /* OUI Type 1 - WPA IE */
+ elems->wpa = pos;
+ elems->wpa_len = elen;
+ } else if (elen >= 5 && pos[3] == 2) {
+ /* OUI Type 2 - WMM IE */
+ if (pos[4] == 0) {
+ elems->wmm_info = pos;
+ elems->wmm_info_len = elen;
+ } else if (pos[4] == 1) {
+ elems->wmm_param = pos;
+ elems->wmm_param_len = elen;
+ }
+ }
+ }
+ break;
+ case WLAN_EID_RSN:
+ elems->rsn = pos;
+ elems->rsn_len = elen;
+ break;
+ case WLAN_EID_ERP_INFO:
+ elems->erp_info = pos;
+ elems->erp_info_len = elen;
+ break;
+ case WLAN_EID_EXT_SUPP_RATES:
+ elems->ext_supp_rates = pos;
+ elems->ext_supp_rates_len = elen;
+ break;
+ case WLAN_EID_HT_CAPABILITY:
+ if (elen >= sizeof(struct ieee80211_ht_cap))
+ elems->ht_cap_elem = (void *)pos;
+ break;
+ case WLAN_EID_HT_INFORMATION:
+ if (elen >= sizeof(struct ieee80211_ht_info))
+ elems->ht_info_elem = (void *)pos;
+ break;
+ case WLAN_EID_MESH_ID:
+ elems->mesh_id = pos;
+ elems->mesh_id_len = elen;
+ break;
+ case WLAN_EID_MESH_CONFIG:
+ if (elen >= sizeof(struct ieee80211_meshconf_ie))
+ elems->mesh_config = (void *)pos;
+ break;
+ case WLAN_EID_PEER_MGMT:
+ elems->peering = pos;
+ elems->peering_len = elen;
+ break;
+ case WLAN_EID_PREQ:
+ elems->preq = pos;
+ elems->preq_len = elen;
+ break;
+ case WLAN_EID_PREP:
+ elems->prep = pos;
+ elems->prep_len = elen;
+ break;
+ case WLAN_EID_PERR:
+ elems->perr = pos;
+ elems->perr_len = elen;
+ break;
+ case WLAN_EID_RANN:
+ if (elen >= sizeof(struct ieee80211_rann_ie))
+ elems->rann = (void *)pos;
+ break;
+ case WLAN_EID_CHANNEL_SWITCH:
+ elems->ch_switch_elem = pos;
+ elems->ch_switch_elem_len = elen;
+ break;
+ case WLAN_EID_QUIET:
+ if (!elems->quiet_elem) {
+ elems->quiet_elem = pos;
+ elems->quiet_elem_len = elen;
+ }
+ elems->num_of_quiet_elem++;
+ break;
+ case WLAN_EID_COUNTRY:
+ elems->country_elem = pos;
+ elems->country_elem_len = elen;
+ break;
+ case WLAN_EID_PWR_CONSTRAINT:
+ elems->pwr_constr_elem = pos;
+ elems->pwr_constr_elem_len = elen;
+ break;
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ elems->timeout_int = pos;
+ elems->timeout_int_len = elen;
+ break;
+ default:
+ break;
+ }
+
+ left -= elen;
+ pos += elen;
+ }
+
+ return crc;
+}
+EXPORT_SYMBOL(ieee802_11_parse_elems_crc);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 0bf169b..6897436 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -8,6 +8,7 @@
* Copyright 2008-2009 Johannes Berg <johannes@sipsolutions.net>
*/
+#include <linux/export.h>
#include <linux/wireless.h>
#include <linux/nl80211.h>
#include <linux/if_arp.h>
@@ -15,6 +16,7 @@
#include <linux/slab.h>
#include <net/iw_handler.h>
#include <net/cfg80211.h>
+#include <net/cfg80211-wext.h>
#include "wext-compat.h"
#include "core.h"
@@ -363,9 +365,9 @@ int cfg80211_wext_giwfrag(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(cfg80211_wext_giwfrag);
-int cfg80211_wext_siwretry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *retry, char *extra)
+static int cfg80211_wext_siwretry(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *retry, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -402,7 +404,6 @@ int cfg80211_wext_siwretry(struct net_device *dev,
return err;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwretry);
int cfg80211_wext_giwretry(struct net_device *dev,
struct iw_request_info *info,
@@ -593,9 +594,9 @@ static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
return err;
}
-int cfg80211_wext_siwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *keybuf)
+static int cfg80211_wext_siwencode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *keybuf)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -652,11 +653,10 @@ int cfg80211_wext_siwencode(struct net_device *dev,
wdev->wext.default_key == -1,
idx, &params);
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwencode);
-int cfg80211_wext_siwencodeext(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *extra)
+static int cfg80211_wext_siwencodeext(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -744,11 +744,10 @@ int cfg80211_wext_siwencodeext(struct net_device *dev,
ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
idx, &params);
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwencodeext);
-int cfg80211_wext_giwencode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *erq, char *keybuf)
+static int cfg80211_wext_giwencode(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *erq, char *keybuf)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int idx;
@@ -782,11 +781,10 @@ int cfg80211_wext_giwencode(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode);
-int cfg80211_wext_siwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *wextfreq, char *extra)
+static int cfg80211_wext_siwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *wextfreq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -815,11 +813,10 @@ int cfg80211_wext_siwfreq(struct net_device *dev,
return -EOPNOTSUPP;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwfreq);
-int cfg80211_wext_giwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *freq, char *extra)
+static int cfg80211_wext_giwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *freq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -836,11 +833,10 @@ int cfg80211_wext_giwfreq(struct net_device *dev,
return 0;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwfreq);
-int cfg80211_wext_siwtxpower(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
+static int cfg80211_wext_siwtxpower(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -889,11 +885,10 @@ int cfg80211_wext_siwtxpower(struct net_device *dev,
return rdev->ops->set_tx_power(wdev->wiphy, type, DBM_TO_MBM(dbm));
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower);
-int cfg80211_wext_giwtxpower(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
+static int cfg80211_wext_giwtxpower(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *data, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -919,7 +914,6 @@ int cfg80211_wext_giwtxpower(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwtxpower);
static int cfg80211_set_auth_alg(struct wireless_dev *wdev,
s32 auth_alg)
@@ -1070,9 +1064,9 @@ static int cfg80211_set_key_mgt(struct wireless_dev *wdev, u32 key_mgt)
return 0;
}
-int cfg80211_wext_siwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *data, char *extra)
+static int cfg80211_wext_siwauth(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -1102,21 +1096,19 @@ int cfg80211_wext_siwauth(struct net_device *dev,
return -EOPNOTSUPP;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwauth);
-int cfg80211_wext_giwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *data, char *extra)
+static int cfg80211_wext_giwauth(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra)
{
/* XXX: what do we need? */
return -EOPNOTSUPP;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwauth);
-int cfg80211_wext_siwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq, char *extra)
+static int cfg80211_wext_siwpower(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *wrq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1160,11 +1152,10 @@ int cfg80211_wext_siwpower(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwpower);
-int cfg80211_wext_giwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq, char *extra)
+static int cfg80211_wext_giwpower(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *wrq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -1172,7 +1163,6 @@ int cfg80211_wext_giwpower(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwpower);
static int cfg80211_wds_wext_siwap(struct net_device *dev,
struct iw_request_info *info,
@@ -1218,9 +1208,9 @@ static int cfg80211_wds_wext_giwap(struct net_device *dev,
return 0;
}
-int cfg80211_wext_siwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rate, char *extra)
+static int cfg80211_wext_siwrate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rate, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1268,11 +1258,10 @@ int cfg80211_wext_siwrate(struct net_device *dev,
return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask);
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate);
-int cfg80211_wext_giwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rate, char *extra)
+static int cfg80211_wext_giwrate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rate, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1308,10 +1297,9 @@ int cfg80211_wext_giwrate(struct net_device *dev,
return 0;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwrate);
/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */
-struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
+static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1376,11 +1364,10 @@ struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
return &wstats;
}
-EXPORT_SYMBOL_GPL(cfg80211_wireless_stats);
-int cfg80211_wext_siwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *ap_addr, char *extra)
+static int cfg80211_wext_siwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -1395,11 +1382,10 @@ int cfg80211_wext_siwap(struct net_device *dev,
return -EOPNOTSUPP;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwap);
-int cfg80211_wext_giwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *ap_addr, char *extra)
+static int cfg80211_wext_giwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -1414,11 +1400,10 @@ int cfg80211_wext_giwap(struct net_device *dev,
return -EOPNOTSUPP;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwap);
-int cfg80211_wext_siwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *ssid)
+static int cfg80211_wext_siwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -1431,11 +1416,10 @@ int cfg80211_wext_siwessid(struct net_device *dev,
return -EOPNOTSUPP;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwessid);
-int cfg80211_wext_giwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *ssid)
+static int cfg80211_wext_giwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -1451,11 +1435,10 @@ int cfg80211_wext_giwessid(struct net_device *dev,
return -EOPNOTSUPP;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_giwessid);
-int cfg80211_wext_siwpmksa(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra)
+static int cfg80211_wext_siwpmksa(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
@@ -1493,7 +1476,6 @@ int cfg80211_wext_siwpmksa(struct net_device *dev,
return -EOPNOTSUPP;
}
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwpmksa);
static const iw_handler cfg80211_handlers[] = {
[IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname,
diff --git a/net/wireless/wext-compat.h b/net/wireless/wext-compat.h
index 20b3dae..5d766b0 100644
--- a/net/wireless/wext-compat.h
+++ b/net/wireless/wext-compat.h
@@ -42,6 +42,14 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *ssid);
+int cfg80211_wext_siwmlme(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra);
+int cfg80211_wext_siwgenie(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra);
+
+
int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq);
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index fdbc23c..0af7f54 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/wireless.h>
#include <linux/uaccess.h>
+#include <linux/export.h>
#include <net/cfg80211.h>
#include <net/iw_handler.h>
#include <net/netlink.h>
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 6fffe62..326750b 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -5,10 +5,12 @@
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*/
+#include <linux/export.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
#include <net/cfg80211.h>
+#include <net/cfg80211-wext.h>
#include "wext-compat.h"
#include "nl80211.h"
@@ -365,7 +367,6 @@ int cfg80211_wext_siwgenie(struct net_device *dev,
wdev_unlock(wdev);
return err;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwgenie);
int cfg80211_wext_siwmlme(struct net_device *dev,
struct iw_request_info *info,
@@ -402,4 +403,3 @@ int cfg80211_wext_siwmlme(struct net_device *dev,
return err;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwmlme);
diff --git a/net/wireless/wext-spy.c b/net/wireless/wext-spy.c
index 6dcfe65..5d643a5 100644
--- a/net/wireless/wext-spy.c
+++ b/net/wireless/wext-spy.c
@@ -10,6 +10,7 @@
#include <linux/wireless.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/export.h>
#include <net/iw_handler.h>
#include <net/arp.h>
#include <net/wext.h>
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 5f03e4e..3e16c6a 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1261,14 +1261,19 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
struct x25_sock *x25 = x25_sk(sk);
struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name;
size_t copied;
- int qbit, header_len = x25->neighbour->extended ?
- X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
-
+ int qbit, header_len;
struct sk_buff *skb;
unsigned char *asmptr;
int rc = -ENOTCONN;
lock_sock(sk);
+
+ if (x25->neighbour == NULL)
+ goto out;
+
+ header_len = x25->neighbour->extended ?
+ X25_EXT_MIN_LEN : X25_STD_MIN_LEN;
+
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index 7ff3737..2ffde46 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/x25.h>
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index fc91ad7..e5246fb 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -70,26 +70,29 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
while ((scratch += len, dlen -= len) > 0) {
skb_frag_t *frag;
+ struct page *page;
err = -EMSGSIZE;
if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
goto out;
frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
- frag->page = alloc_page(GFP_ATOMIC);
+ page = alloc_page(GFP_ATOMIC);
err = -ENOMEM;
- if (!frag->page)
+ if (!page)
goto out;
+ __skb_frag_set_page(frag, page);
+
len = PAGE_SIZE;
if (dlen < len)
len = dlen;
- memcpy(page_address(frag->page), scratch, len);
-
frag->page_offset = 0;
- frag->size = len;
+ skb_frag_size_set(frag, len);
+ memcpy(skb_frag_address(frag), scratch, len);
+
skb->truesize += len;
skb->data_len += len;
skb->len += len;
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index 58d9ae0..d0a1af8 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -12,6 +12,7 @@
*/
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/export.h>
#include <net/snmp.h>
#include <net/xfrm.h>
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index b11ea69..39e02c5 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -18,6 +18,7 @@
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
+#include <linux/export.h>
#include <net/xfrm.h>
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq)
@@ -203,8 +204,6 @@ static int xfrm_replay_check_bmp(struct xfrm_state *x,
if (!replay_esn->replay_window)
return 0;
- pos = (replay_esn->seq - 1) % replay_esn->replay_window;
-
if (unlikely(seq == 0))
goto err;
@@ -216,19 +215,18 @@ static int xfrm_replay_check_bmp(struct xfrm_state *x,
goto err;
}
- if (pos >= diff) {
+ pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+
+ if (pos >= diff)
bitnr = (pos - diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- if (replay_esn->bmp[nr] & (1U << bitnr))
- goto err_replay;
- } else {
+ else
bitnr = replay_esn->replay_window - (diff - pos);
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- if (replay_esn->bmp[nr] & (1U << bitnr))
- goto err_replay;
- }
+
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ if (replay_esn->bmp[nr] & (1U << bitnr))
+ goto err_replay;
+
return 0;
err_replay:
@@ -259,39 +257,27 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
bitnr = bitnr & 0x1F;
replay_esn->bmp[nr] &= ~(1U << bitnr);
}
-
- bitnr = (pos + diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
} else {
nr = (replay_esn->replay_window - 1) >> 5;
for (i = 0; i <= nr; i++)
replay_esn->bmp[i] = 0;
-
- bitnr = (pos + diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
}
+ bitnr = (pos + diff) % replay_esn->replay_window;
replay_esn->seq = seq;
} else {
diff = replay_esn->seq - seq;
- if (pos >= diff) {
+ if (pos >= diff)
bitnr = (pos - diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
- } else {
+ else
bitnr = replay_esn->replay_window - (diff - pos);
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
- }
}
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ replay_esn->bmp[nr] |= (1U << bitnr);
+
if (xfrm_aevent_is_on(xs_net(x)))
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
@@ -390,8 +376,6 @@ static int xfrm_replay_check_esn(struct xfrm_state *x,
if (!wsize)
return 0;
- pos = (replay_esn->seq - 1) % replay_esn->replay_window;
-
if (unlikely(seq == 0 && replay_esn->seq_hi == 0 &&
(replay_esn->seq < replay_esn->replay_window - 1)))
goto err;
@@ -415,19 +399,18 @@ static int xfrm_replay_check_esn(struct xfrm_state *x,
goto err;
}
- if (pos >= diff) {
+ pos = (replay_esn->seq - 1) % replay_esn->replay_window;
+
+ if (pos >= diff)
bitnr = (pos - diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- if (replay_esn->bmp[nr] & (1U << bitnr))
- goto err_replay;
- } else {
+ else
bitnr = replay_esn->replay_window - (diff - pos);
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- if (replay_esn->bmp[nr] & (1U << bitnr))
- goto err_replay;
- }
+
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ if (replay_esn->bmp[nr] & (1U << bitnr))
+ goto err_replay;
+
return 0;
err_replay:
@@ -465,22 +448,13 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
bitnr = bitnr & 0x1F;
replay_esn->bmp[nr] &= ~(1U << bitnr);
}
-
- bitnr = (pos + diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
} else {
nr = (replay_esn->replay_window - 1) >> 5;
for (i = 0; i <= nr; i++)
replay_esn->bmp[i] = 0;
-
- bitnr = (pos + diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
}
+ bitnr = (pos + diff) % replay_esn->replay_window;
replay_esn->seq = seq;
if (unlikely(wrap > 0))
@@ -488,19 +462,16 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
} else {
diff = replay_esn->seq - seq;
- if (pos >= diff) {
+ if (pos >= diff)
bitnr = (pos - diff) % replay_esn->replay_window;
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
- } else {
+ else
bitnr = replay_esn->replay_window - (diff - pos);
- nr = bitnr >> 5;
- bitnr = bitnr & 0x1F;
- replay_esn->bmp[nr] |= (1U << bitnr);
- }
}
+ nr = bitnr >> 5;
+ bitnr = bitnr & 0x1F;
+ replay_esn->bmp[nr] |= (1U << bitnr);
+
if (xfrm_aevent_is_on(xs_net(x)))
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 0256b8a..d0a42df 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2927,7 +2927,7 @@ static int __net_init xfrm_user_net_init(struct net *net)
if (nlsk == NULL)
return -ENOMEM;
net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
- rcu_assign_pointer(net->xfrm.nlsk, nlsk);
+ RCU_INIT_POINTER(net->xfrm.nlsk, nlsk);
return 0;
}
@@ -2935,7 +2935,7 @@ static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
{
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list)
- rcu_assign_pointer(net->xfrm.nlsk, NULL);
+ RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
synchronize_net();
list_for_each_entry(net, net_exit_list, exit_list)
netlink_kernel_release(net->xfrm.nlsk_stash);
OpenPOWER on IntegriCloud