summaryrefslogtreecommitdiffstats
path: root/drivers/staging/batman-adv
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/batman-adv')
-rw-r--r--drivers/staging/batman-adv/CHANGELOG63
-rw-r--r--drivers/staging/batman-adv/Makefile2
-rw-r--r--drivers/staging/batman-adv/README50
-rw-r--r--drivers/staging/batman-adv/TODO9
-rw-r--r--drivers/staging/batman-adv/aggregation.c70
-rw-r--r--drivers/staging/batman-adv/bat_debugfs.c2
-rw-r--r--drivers/staging/batman-adv/bat_sysfs.c140
-rw-r--r--drivers/staging/batman-adv/bitarray.c22
-rw-r--r--drivers/staging/batman-adv/bitarray.h7
-rw-r--r--drivers/staging/batman-adv/hard-interface.c291
-rw-r--r--drivers/staging/batman-adv/hard-interface.h19
-rw-r--r--drivers/staging/batman-adv/hash.c6
-rw-r--r--drivers/staging/batman-adv/hash.h4
-rw-r--r--drivers/staging/batman-adv/icmp_socket.c73
-rw-r--r--drivers/staging/batman-adv/main.c146
-rw-r--r--drivers/staging/batman-adv/main.h30
-rw-r--r--drivers/staging/batman-adv/originator.c186
-rw-r--r--drivers/staging/batman-adv/originator.h8
-rw-r--r--drivers/staging/batman-adv/packet.h28
-rw-r--r--drivers/staging/batman-adv/routing.c448
-rw-r--r--drivers/staging/batman-adv/routing.h16
-rw-r--r--drivers/staging/batman-adv/send.c175
-rw-r--r--drivers/staging/batman-adv/send.h7
-rw-r--r--drivers/staging/batman-adv/soft-interface.c241
-rw-r--r--drivers/staging/batman-adv/soft-interface.h13
-rw-r--r--drivers/staging/batman-adv/sysfs-class-net-mesh8
-rw-r--r--drivers/staging/batman-adv/translation-table.c271
-rw-r--r--drivers/staging/batman-adv/translation-table.h30
-rw-r--r--drivers/staging/batman-adv/types.h103
-rw-r--r--drivers/staging/batman-adv/unicast.c269
-rw-r--r--drivers/staging/batman-adv/unicast.h39
-rw-r--r--drivers/staging/batman-adv/vis.c534
-rw-r--r--drivers/staging/batman-adv/vis.h27
33 files changed, 1989 insertions, 1348 deletions
diff --git a/drivers/staging/batman-adv/CHANGELOG b/drivers/staging/batman-adv/CHANGELOG
deleted file mode 100644
index 86450b4..0000000
--- a/drivers/staging/batman-adv/CHANGELOG
+++ /dev/null
@@ -1,63 +0,0 @@
-batman-adv 2010.0.0:
-
-* support latest kernels (2.6.21 - 2.6.35)
-* further code refactoring and cleaning for coding style
-* move from procfs based configuration to sysfs
-* reorganized sequence number handling
-* limit queue lengths for batman and broadcast packets
-* many bugs (endless loop and rogue packets on shutdown, wrong tcpdump output,
- missing frees in error situations, sleeps in atomic contexts) squashed
-
- -- Fri, 18 Jun 2010 21:34:26 +0200
-
-batman-adv 0.2.1:
-
-* support latest kernels (2.6.20 - 2.6.33)
-* receive packets directly using skbs, remove old sockets and threads
-* fix various regressions in the vis server
-* don't disable interrupts while sending
-* replace internal logging mechanism with standard kernel logging
-* move vis formats into userland, one general format remains in the kernel
-* allow MAC address to be set, correctly initialize them
-* code refactoring and cleaning for coding style
-* many bugs (null pointers, locking, hash iterators) squashed
-
- -- Sun, 21 Mar 2010 20:46:47 +0100
-
-batman-adv 0.2:
-
-* support latest kernels (2.6.20 - 2.6.31)
-* temporary routing loops / TTL code bug / ghost entries in originator table fixed
-* internal packet queue for packet aggregation & transmission retry (ARQ)
- for payload broadcasts added
-* interface detection converted to event based handling to avoid timers
-* major linux coding style adjustments applied
-* all kernel version compatibility functions has been moved to compat.h
-* use random ethernet address generator from the kernel
-* /sys/module/batman_adv/version to export kernel module version
-* vis: secondary interface export for dot draw format + JSON output format added
-* many bugs (alignment issues, race conditions, deadlocks, etc) squashed
-
- -- Sat, 07 Nov 2009 15:44:31 +0100
-
-batman-adv 0.1:
-
-* support latest kernels (2.6.20 - 2.6.28)
-* LOTS of cleanup: locking, stack usage, memory leaks
-* Change Ethertype from 0x0842 to 0x4305
- unregistered at IEEE, if you want to sponsor an official Ethertype ($2500)
- please contact us
-
- -- Sun, 28 Dec 2008 00:44:31 +0100
-
-batman-adv 0.1-beta:
-
-* layer 2 meshing based on BATMAN TQ algorithm in kernelland
-* operates on any ethernet like interface
-* supports IPv4, IPv6, DHCP, etc
-* is controlled via /proc/net/batman-adv/
-* bridging via brctl is supported
-* interface watchdog (interfaces can be (de)activated dynamically)
-* offers integrated vis server which meshes/syncs with other vis servers in range
-
- -- Mon, 05 May 2008 14:10:04 +0200
diff --git a/drivers/staging/batman-adv/Makefile b/drivers/staging/batman-adv/Makefile
index e9817b5..78924283 100644
--- a/drivers/staging/batman-adv/Makefile
+++ b/drivers/staging/batman-adv/Makefile
@@ -19,4 +19,4 @@
#
obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
-batman-adv-objs := main.o bat_debugfs.o bat_sysfs.o send.o routing.o soft-interface.o icmp_socket.o translation-table.o bitarray.o hash.o ring_buffer.o vis.o hard-interface.o aggregation.o originator.o
+batman-adv-y := main.o bat_debugfs.o bat_sysfs.o send.o routing.o soft-interface.o icmp_socket.o translation-table.o bitarray.o hash.o ring_buffer.o vis.o hard-interface.o aggregation.o originator.o unicast.o
diff --git a/drivers/staging/batman-adv/README b/drivers/staging/batman-adv/README
index 7192b7f..7c878bb0 100644
--- a/drivers/staging/batman-adv/README
+++ b/drivers/staging/batman-adv/README
@@ -1,4 +1,4 @@
-[state: 12-06-2010]
+[state: 04-09-2010]
BATMAN-ADV
----------
@@ -67,15 +67,21 @@ All mesh wide settings can be found in batman's own interface
folder:
# ls /sys/class/net/bat0/mesh/
-# aggregate_ogm originators transtable_global vis_mode
-# orig_interval transtable_local vis_data
+# aggregated_ogms bonding orig_interval vis_mode
+
+
+There is a special folder for debugging informations:
+
+# ls /sys/kernel/debug/batman_adv/bat0/
+# originators socket transtable_global transtable_local
+# vis_data
Some of the files contain all sort of status information regard-
ing the mesh network. For example, you can view the table of
originators (mesh participants) with:
-# cat /sys/class/net/bat0/mesh/originators
+# cat /sys/kernel/debug/batman_adv/bat0/originators
Other files allow to change batman's behaviour to better fit your
requirements. For instance, you can check the current originator
@@ -83,7 +89,7 @@ interval (value in milliseconds which determines how often batman
sends its broadcast packets):
# cat /sys/class/net/bat0/mesh/orig_interval
-# status: 1000
+# 1000
and also change its value:
@@ -137,7 +143,7 @@ at least very similar) data.
When configured as server, you can get a topology snapshot of
your mesh:
-# cat /sys/class/net/bat0/mesh/vis_data
+# cat /sys/kernel/debug/batman_adv/bat0/vis_data
This raw output is intended to be easily parsable and convertable
with other tools. Have a look at the batctl README if you want a
@@ -181,32 +187,25 @@ enabled when compiling the batman-adv module. When building bat-
man-adv as part of kernel, use "make menuconfig" and enable the
option "B.A.T.M.A.N. debugging".
+Those additional debug messages can be accessed using a special
+file in debugfs
+
+# cat /sys/kernel/debug/batman_adv/bat0/log
+
The additional debug output is by default disabled. It can be en-
-abled either at kernel modules load time or during run time. To
-enable debug output at module load time, add the module parameter
-debug=<value>. <value> can take one of four values.
+abled during run time. Following log_levels are defined:
0 - All debug output disabled
1 - Enable messages related to routing / flooding / broadcasting
2 - Enable route or hna added / changed / deleted
3 - Enable all messages
-e.g.
-
-# modprobe batman-adv debug=2
-
-will load the module and enable debug messages for when routes or
-HNAs change.
-
-The debug output can also be changed at runtime using the file
-/sys/module/batman-adv/parameters/debug. e.g.
-
-# echo 2 > /sys/module/batman-adv/parameters/debug
+The debug output can be changed at runtime using the file
+/sys/class/net/bat0/mesh/log_level. e.g.
-enables debug messages for when routes or HNAs
+# echo 2 > /sys/class/net/bat0/mesh/log_level
-The debug output is sent to the kernel logs. So try dmesg, lo-
-gread, etc to see the debug messages.
+will enable debug messages for when routes or HNAs change.
BATCTL
@@ -230,8 +229,9 @@ CONTACT
Please send us comments, experiences, questions, anything :)
IRC: #batman on irc.freenode.org
-Mailing-list: b.a.t.m.a.n@open-mesh.net (optional subscription
- at https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
+Mailing-list: b.a.t.m.a.n@b.a.t.m.a.n@lists.open-mesh.org
+ (optional subscription at
+ https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
You can also contact the Authors:
diff --git a/drivers/staging/batman-adv/TODO b/drivers/staging/batman-adv/TODO
index 9c5aea2..11c384f 100644
--- a/drivers/staging/batman-adv/TODO
+++ b/drivers/staging/batman-adv/TODO
@@ -1,6 +1,7 @@
- * Use hweight* for hamming weight calculation
- * Save/cache packets direktly as skb instead of using a normal memory region
- and copying it in a skb using send_raw_packet and similar functions
+ * remove own list functionality from hash
+ * use hlist_head, hlist_node in hash
+ * don't use callbacks for compare+choose in hash
+ * think about more efficient ways instead of abstraction of hash
* Request a new review
* Process the comments from the review
* Move into mainline proper
@@ -9,5 +10,5 @@ Please send all patches to:
Marek Lindner <lindner_marek@yahoo.de>
Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
Andrew Lunn <andrew@lunn.ch>
- b.a.t.m.a.n@lists.open-mesh.net
+ b.a.t.m.a.n@lists.open-mesh.org
Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/batman-adv/aggregation.c b/drivers/staging/batman-adv/aggregation.c
index 9862d16..08624d4 100644
--- a/drivers/staging/batman-adv/aggregation.c
+++ b/drivers/staging/batman-adv/aggregation.c
@@ -39,7 +39,7 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
struct forw_packet *forw_packet)
{
struct batman_packet *batman_packet =
- (struct batman_packet *)forw_packet->packet_buff;
+ (struct batman_packet *)forw_packet->skb->data;
int aggregated_bytes = forw_packet->packet_len + packet_len;
/**
@@ -97,21 +97,19 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
/* create a new aggregated packet and add this packet to it */
-static void new_aggregated_packet(unsigned char *packet_buff,
- int packet_len,
- unsigned long send_time,
- bool direct_link,
- struct batman_if *if_incoming,
- int own_packet)
+static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
+ unsigned long send_time, bool direct_link,
+ struct batman_if *if_incoming,
+ int own_packet)
{
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct forw_packet *forw_packet_aggr;
unsigned long flags;
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ unsigned char *skb_buff;
/* own packet should always be scheduled */
if (!own_packet) {
- if (!atomic_dec_not_zero(&batman_queue_left)) {
+ if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
bat_dbg(DBG_BATMAN, bat_priv,
"batman packet queue full\n");
return;
@@ -121,27 +119,32 @@ static void new_aggregated_packet(unsigned char *packet_buff,
forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
if (!forw_packet_aggr) {
if (!own_packet)
- atomic_inc(&batman_queue_left);
+ atomic_inc(&bat_priv->batman_queue_left);
return;
}
- forw_packet_aggr->packet_buff = kmalloc(MAX_AGGREGATION_BYTES,
- GFP_ATOMIC);
- if (!forw_packet_aggr->packet_buff) {
+ if ((atomic_read(&bat_priv->aggregation_enabled)) &&
+ (packet_len < MAX_AGGREGATION_BYTES))
+ forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
+ sizeof(struct ethhdr));
+ else
+ forw_packet_aggr->skb = dev_alloc_skb(packet_len +
+ sizeof(struct ethhdr));
+
+ if (!forw_packet_aggr->skb) {
if (!own_packet)
- atomic_inc(&batman_queue_left);
+ atomic_inc(&bat_priv->batman_queue_left);
kfree(forw_packet_aggr);
return;
}
+ skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
INIT_HLIST_NODE(&forw_packet_aggr->list);
+ skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
forw_packet_aggr->packet_len = packet_len;
- memcpy(forw_packet_aggr->packet_buff,
- packet_buff,
- forw_packet_aggr->packet_len);
+ memcpy(skb_buff, packet_buff, packet_len);
- forw_packet_aggr->skb = NULL;
forw_packet_aggr->own = own_packet;
forw_packet_aggr->if_incoming = if_incoming;
forw_packet_aggr->num_packets = 0;
@@ -153,9 +156,9 @@ static void new_aggregated_packet(unsigned char *packet_buff,
forw_packet_aggr->direct_link_flags |= 1;
/* add new packet to packet list */
- spin_lock_irqsave(&forw_bat_list_lock, flags);
- hlist_add_head(&forw_packet_aggr->list, &forw_bat_list);
- spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
+ hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
+ spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
@@ -171,8 +174,10 @@ static void aggregate(struct forw_packet *forw_packet_aggr,
int packet_len,
bool direct_link)
{
- memcpy((forw_packet_aggr->packet_buff + forw_packet_aggr->packet_len),
- packet_buff, packet_len);
+ unsigned char *skb_buff;
+
+ skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
+ memcpy(skb_buff, packet_buff, packet_len);
forw_packet_aggr->packet_len += packet_len;
forw_packet_aggr->num_packets++;
@@ -199,11 +204,11 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
unsigned long flags;
/* find position for the packet in the forward queue */
- spin_lock_irqsave(&forw_bat_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
/* own packets are not to be aggregated */
if ((atomic_read(&bat_priv->aggregation_enabled)) && (!own_packet)) {
- hlist_for_each_entry(forw_packet_pos, tmp_node, &forw_bat_list,
- list) {
+ hlist_for_each_entry(forw_packet_pos, tmp_node,
+ &bat_priv->forw_bat_list, list) {
if (can_aggregate_with(batman_packet,
packet_len,
send_time,
@@ -220,7 +225,7 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
* suitable aggregation packet found */
if (forw_packet_aggr == NULL) {
/* the following section can run without the lock */
- spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
/**
* if we could not aggregate this packet with one of the others
@@ -238,7 +243,7 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
aggregate(forw_packet_aggr,
packet_buff, packet_len,
direct_link);
- spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
}
}
@@ -252,9 +257,7 @@ void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
batman_packet = (struct batman_packet *)packet_buff;
- while (aggregated_packet(buff_pos, packet_len,
- batman_packet->num_hna)) {
-
+ do {
/* network to host order for our 32bit seqno, and the
orig_interval. */
batman_packet->seqno = ntohl(batman_packet->seqno);
@@ -267,5 +270,6 @@ void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
buff_pos += BAT_PACKET_LEN + hna_len(batman_packet);
batman_packet = (struct batman_packet *)
(packet_buff + buff_pos);
- }
+ } while (aggregated_packet(buff_pos, packet_len,
+ batman_packet->num_hna));
}
diff --git a/drivers/staging/batman-adv/bat_debugfs.c b/drivers/staging/batman-adv/bat_debugfs.c
index 507da68..57f84a9 100644
--- a/drivers/staging/batman-adv/bat_debugfs.c
+++ b/drivers/staging/batman-adv/bat_debugfs.c
@@ -90,6 +90,7 @@ int debug_log(struct bat_priv *bat_priv, char *fmt, ...)
static int log_open(struct inode *inode, struct file *file)
{
+ nonseekable_open(inode, file);
file->private_data = inode->i_private;
inc_module_count();
return 0;
@@ -174,6 +175,7 @@ static const struct file_operations log_fops = {
.release = log_release,
.read = log_read,
.poll = log_poll,
+ .llseek = no_llseek,
};
static int debug_log_setup(struct bat_priv *bat_priv)
diff --git a/drivers/staging/batman-adv/bat_sysfs.c b/drivers/staging/batman-adv/bat_sysfs.c
index 05ca15a..bc17fb8 100644
--- a/drivers/staging/batman-adv/bat_sysfs.c
+++ b/drivers/staging/batman-adv/bat_sysfs.c
@@ -134,6 +134,56 @@ static ssize_t store_bond(struct kobject *kobj, struct attribute *attr,
return count;
}
+static ssize_t show_frag(struct kobject *kobj, struct attribute *attr,
+ char *buff)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct bat_priv *bat_priv = netdev_priv(to_net_dev(dev));
+ int frag_status = atomic_read(&bat_priv->frag_enabled);
+
+ return sprintf(buff, "%s\n",
+ frag_status == 0 ? "disabled" : "enabled");
+}
+
+static ssize_t store_frag(struct kobject *kobj, struct attribute *attr,
+ char *buff, size_t count)
+{
+ struct device *dev = to_dev(kobj->parent);
+ struct net_device *net_dev = to_net_dev(dev);
+ struct bat_priv *bat_priv = netdev_priv(net_dev);
+ int frag_enabled_tmp = -1;
+
+ if (((count == 2) && (buff[0] == '1')) ||
+ (strncmp(buff, "enable", 6) == 0))
+ frag_enabled_tmp = 1;
+
+ if (((count == 2) && (buff[0] == '0')) ||
+ (strncmp(buff, "disable", 7) == 0))
+ frag_enabled_tmp = 0;
+
+ if (frag_enabled_tmp < 0) {
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ bat_err(net_dev,
+ "Invalid parameter for 'fragmentation' setting on mesh"
+ "received: %s\n", buff);
+ return -EINVAL;
+ }
+
+ if (atomic_read(&bat_priv->frag_enabled) == frag_enabled_tmp)
+ return count;
+
+ bat_info(net_dev, "Changing fragmentation from: %s to: %s\n",
+ atomic_read(&bat_priv->frag_enabled) == 1 ?
+ "enabled" : "disabled",
+ frag_enabled_tmp == 1 ? "enabled" : "disabled");
+
+ atomic_set(&bat_priv->frag_enabled, (unsigned)frag_enabled_tmp);
+ update_min_mtu(net_dev);
+ return count;
+}
+
static ssize_t show_vis_mode(struct kobject *kobj, struct attribute *attr,
char *buff)
{
@@ -279,6 +329,7 @@ static ssize_t store_log_level(struct kobject *kobj, struct attribute *attr,
static BAT_ATTR(aggregated_ogms, S_IRUGO | S_IWUSR,
show_aggr_ogms, store_aggr_ogms);
static BAT_ATTR(bonding, S_IRUGO | S_IWUSR, show_bond, store_bond);
+static BAT_ATTR(fragmentation, S_IRUGO | S_IWUSR, show_frag, store_frag);
static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
static BAT_ATTR(orig_interval, S_IRUGO | S_IWUSR,
show_orig_interval, store_orig_interval);
@@ -289,6 +340,7 @@ static BAT_ATTR(log_level, S_IRUGO | S_IWUSR, show_log_level, store_log_level);
static struct bat_attribute *mesh_attrs[] = {
&bat_attr_aggregated_ogms,
&bat_attr_bonding,
+ &bat_attr_fragmentation,
&bat_attr_vis_mode,
&bat_attr_orig_interval,
#ifdef CONFIG_BATMAN_ADV_DEBUG
@@ -304,17 +356,6 @@ int sysfs_add_meshif(struct net_device *dev)
struct bat_attribute **bat_attr;
int err;
- /* FIXME: should be done in the general mesh setup
- routine as soon as we have it */
- atomic_set(&bat_priv->aggregation_enabled, 1);
- atomic_set(&bat_priv->bonding_enabled, 0);
- atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
- atomic_set(&bat_priv->orig_interval, 1000);
- atomic_set(&bat_priv->log_level, 0);
-
- bat_priv->primary_if = NULL;
- bat_priv->num_ifaces = 0;
-
bat_priv->mesh_obj = kobject_create_and_add(SYSFS_IF_MESH_SUBDIR,
batif_kobject);
if (!bat_priv->mesh_obj) {
@@ -364,13 +405,17 @@ static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
struct device *dev = to_dev(kobj->parent);
struct net_device *net_dev = to_net_dev(dev);
struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
+ ssize_t length;
if (!batman_if)
return 0;
- return sprintf(buff, "%s\n",
- batman_if->if_status == IF_NOT_IN_USE ?
- "none" : "bat0");
+ length = sprintf(buff, "%s\n", batman_if->if_status == IF_NOT_IN_USE ?
+ "none" : batman_if->soft_iface->name);
+
+ hardif_put(batman_if);
+
+ return length;
}
static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
@@ -380,36 +425,51 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
struct net_device *net_dev = to_net_dev(dev);
struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
int status_tmp = -1;
+ int ret;
if (!batman_if)
return count;
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ if (strlen(buff) >= IFNAMSIZ) {
+ pr_err("Invalid parameter for 'mesh_iface' setting received: "
+ "interface name too long '%s'\n", buff);
+ hardif_put(batman_if);
+ return -EINVAL;
+ }
+
if (strncmp(buff, "none", 4) == 0)
status_tmp = IF_NOT_IN_USE;
-
- if (strncmp(buff, "bat0", 4) == 0)
+ else
status_tmp = IF_I_WANT_YOU;
- if (status_tmp < 0) {
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- pr_err("Invalid parameter for 'mesh_iface' setting received: "
- "%s\n", buff);
- return -EINVAL;
+ if ((batman_if->if_status == status_tmp) || ((batman_if->soft_iface) &&
+ (strncmp(batman_if->soft_iface->name, buff, IFNAMSIZ) == 0))) {
+ hardif_put(batman_if);
+ return count;
}
- if ((batman_if->if_status == status_tmp) ||
- ((status_tmp == IF_I_WANT_YOU) &&
- (batman_if->if_status != IF_NOT_IN_USE)))
+ if (status_tmp == IF_NOT_IN_USE) {
+ rtnl_lock();
+ hardif_disable_interface(batman_if);
+ rtnl_unlock();
+ hardif_put(batman_if);
return count;
+ }
- if (status_tmp == IF_I_WANT_YOU)
- status_tmp = hardif_enable_interface(batman_if);
- else
+ /* if the interface already is in use */
+ if (batman_if->if_status != IF_NOT_IN_USE) {
+ rtnl_lock();
hardif_disable_interface(batman_if);
+ rtnl_unlock();
+ }
- return (status_tmp < 0 ? status_tmp : count);
+ ret = hardif_enable_interface(batman_if, buff);
+ hardif_put(batman_if);
+
+ return ret;
}
static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
@@ -418,23 +478,33 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
struct device *dev = to_dev(kobj->parent);
struct net_device *net_dev = to_net_dev(dev);
struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
+ ssize_t length;
if (!batman_if)
return 0;
switch (batman_if->if_status) {
case IF_TO_BE_REMOVED:
- return sprintf(buff, "disabling\n");
+ length = sprintf(buff, "disabling\n");
+ break;
case IF_INACTIVE:
- return sprintf(buff, "inactive\n");
+ length = sprintf(buff, "inactive\n");
+ break;
case IF_ACTIVE:
- return sprintf(buff, "active\n");
+ length = sprintf(buff, "active\n");
+ break;
case IF_TO_BE_ACTIVATED:
- return sprintf(buff, "enabling\n");
+ length = sprintf(buff, "enabling\n");
+ break;
case IF_NOT_IN_USE:
default:
- return sprintf(buff, "not in use\n");
+ length = sprintf(buff, "not in use\n");
+ break;
}
+
+ hardif_put(batman_if);
+
+ return length;
}
static BAT_ATTR(mesh_iface, S_IRUGO | S_IWUSR,
diff --git a/drivers/staging/batman-adv/bitarray.c b/drivers/staging/batman-adv/bitarray.c
index dd4193c..814274f 100644
--- a/drivers/staging/batman-adv/bitarray.c
+++ b/drivers/staging/batman-adv/bitarray.c
@@ -22,6 +22,8 @@
#include "main.h"
#include "bitarray.h"
+#include <linux/bitops.h>
+
/* returns true if the corresponding bit in the given seq_bits indicates true
* and curr_seqno is within range of last_seqno */
uint8_t get_bit_status(TYPE_OF_WORD *seq_bits, uint32_t last_seqno,
@@ -125,11 +127,10 @@ static void bit_reset_window(TYPE_OF_WORD *seq_bits)
* 1 if the window was moved (either new or very old)
* 0 if the window was not moved/shifted.
*/
-char bit_get_packet(TYPE_OF_WORD *seq_bits, int32_t seq_num_diff,
- int8_t set_mark)
+char bit_get_packet(void *priv, TYPE_OF_WORD *seq_bits,
+ int32_t seq_num_diff, int8_t set_mark)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct bat_priv *bat_priv = (struct bat_priv *)priv;
/* sequence number is slightly older. We already got a sequence number
* higher than this one, so we just mark it. */
@@ -187,21 +188,14 @@ char bit_get_packet(TYPE_OF_WORD *seq_bits, int32_t seq_num_diff,
}
/* count the hamming weight, how many good packets did we receive? just count
- * the 1's. The inner loop uses the Kernighan algorithm, see
- * http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetKernighan
+ * the 1's.
*/
int bit_packet_count(TYPE_OF_WORD *seq_bits)
{
int i, hamming = 0;
- TYPE_OF_WORD word;
- for (i = 0; i < NUM_WORDS; i++) {
- word = seq_bits[i];
+ for (i = 0; i < NUM_WORDS; i++)
+ hamming += hweight_long(seq_bits[i]);
- while (word) {
- word &= word-1;
- hamming++;
- }
- }
return hamming;
}
diff --git a/drivers/staging/batman-adv/bitarray.h b/drivers/staging/batman-adv/bitarray.h
index 01897d6..77b1e61 100644
--- a/drivers/staging/batman-adv/bitarray.h
+++ b/drivers/staging/batman-adv/bitarray.h
@@ -22,7 +22,8 @@
#ifndef _NET_BATMAN_ADV_BITARRAY_H_
#define _NET_BATMAN_ADV_BITARRAY_H_
-/* you should choose something big, if you don't want to waste cpu */
+/* you should choose something big, if you don't want to waste cpu
+ * and keep the type in sync with bit_packet_count */
#define TYPE_OF_WORD unsigned long
#define WORD_BIT_SIZE (sizeof(TYPE_OF_WORD) * 8)
@@ -37,8 +38,8 @@ void bit_mark(TYPE_OF_WORD *seq_bits, int32_t n);
/* receive and process one packet, returns 1 if received seq_num is considered
* new, 0 if old */
-char bit_get_packet(TYPE_OF_WORD *seq_bits, int32_t seq_num_diff,
- int8_t set_mark);
+char bit_get_packet(void *priv, TYPE_OF_WORD *seq_bits,
+ int32_t seq_num_diff, int8_t set_mark);
/* count the hamming weight, how many good packets did we receive? */
int bit_packet_count(TYPE_OF_WORD *seq_bits);
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 6e973a7..80cfa86 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -33,6 +33,9 @@
#define MIN(x, y) ((x) < (y) ? (x) : (y))
+/* protect update critical side of if_list - but not the content */
+static DEFINE_SPINLOCK(if_list_lock);
+
struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev)
{
struct batman_if *batman_if;
@@ -46,6 +49,9 @@ struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev)
batman_if = NULL;
out:
+ if (batman_if)
+ hardif_hold(batman_if);
+
rcu_read_unlock();
return batman_if;
}
@@ -77,13 +83,15 @@ static int is_valid_iface(struct net_device *net_dev)
return 1;
}
-static struct batman_if *get_active_batman_if(void)
+static struct batman_if *get_active_batman_if(struct net_device *soft_iface)
{
struct batman_if *batman_if;
- /* TODO: should check interfaces belonging to bat_priv */
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
+ if (batman_if->soft_iface != soft_iface)
+ continue;
+
if (batman_if->if_status == IF_ACTIVE)
goto out;
}
@@ -91,31 +99,54 @@ static struct batman_if *get_active_batman_if(void)
batman_if = NULL;
out:
+ if (batman_if)
+ hardif_hold(batman_if);
+
rcu_read_unlock();
return batman_if;
}
+static void update_primary_addr(struct bat_priv *bat_priv)
+{
+ struct vis_packet *vis_packet;
+
+ vis_packet = (struct vis_packet *)
+ bat_priv->my_vis_info->skb_packet->data;
+ memcpy(vis_packet->vis_orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
+ memcpy(vis_packet->sender_orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
+}
+
static void set_primary_if(struct bat_priv *bat_priv,
struct batman_if *batman_if)
{
struct batman_packet *batman_packet;
+ struct batman_if *old_if;
+
+ if (batman_if)
+ hardif_hold(batman_if);
+ old_if = bat_priv->primary_if;
bat_priv->primary_if = batman_if;
+ if (old_if)
+ hardif_put(old_if);
+
if (!bat_priv->primary_if)
return;
- set_main_if_addr(batman_if->net_dev->dev_addr);
-
batman_packet = (struct batman_packet *)(batman_if->packet_buff);
batman_packet->flags = PRIMARIES_FIRST_HOP;
batman_packet->ttl = TTL;
+ update_primary_addr(bat_priv);
+
/***
* hacky trick to make sure that we send the HNA information via
* our new primary interface
*/
- atomic_set(&hna_local_changed, 1);
+ atomic_set(&bat_priv->hna_local_changed, 1);
}
static bool hardif_is_iface_up(struct batman_if *batman_if)
@@ -128,11 +159,6 @@ static bool hardif_is_iface_up(struct batman_if *batman_if)
static void update_mac_addresses(struct batman_if *batman_if)
{
- if (!batman_if || !batman_if->packet_buff)
- return;
-
- addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
-
memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
batman_if->net_dev->dev_addr, ETH_ALEN);
memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender,
@@ -153,49 +179,60 @@ static void check_known_mac_addr(uint8_t *addr)
continue;
pr_warning("The newly added mac address (%pM) already exists "
- "on: %s\n", addr, batman_if->dev);
+ "on: %s\n", addr, batman_if->net_dev->name);
pr_warning("It is strongly recommended to keep mac addresses "
"unique to avoid problems!\n");
}
rcu_read_unlock();
}
-int hardif_min_mtu(void)
+int hardif_min_mtu(struct net_device *soft_iface)
{
+ struct bat_priv *bat_priv = netdev_priv(soft_iface);
struct batman_if *batman_if;
/* allow big frames if all devices are capable to do so
* (have MTU > 1500 + BAT_HEADER_LEN) */
int min_mtu = ETH_DATA_LEN;
+ if (atomic_read(&bat_priv->frag_enabled))
+ goto out;
+
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
- if ((batman_if->if_status == IF_ACTIVE) ||
- (batman_if->if_status == IF_TO_BE_ACTIVATED))
- min_mtu = MIN(batman_if->net_dev->mtu - BAT_HEADER_LEN,
- min_mtu);
+ if ((batman_if->if_status != IF_ACTIVE) &&
+ (batman_if->if_status != IF_TO_BE_ACTIVATED))
+ continue;
+
+ if (batman_if->soft_iface != soft_iface)
+ continue;
+
+ min_mtu = MIN(batman_if->net_dev->mtu - BAT_HEADER_LEN,
+ min_mtu);
}
rcu_read_unlock();
-
+out:
return min_mtu;
}
/* adjusts the MTU if a new interface with a smaller MTU appeared. */
-void update_min_mtu(void)
+void update_min_mtu(struct net_device *soft_iface)
{
int min_mtu;
- min_mtu = hardif_min_mtu();
- if (soft_device->mtu != min_mtu)
- soft_device->mtu = min_mtu;
+ min_mtu = hardif_min_mtu(soft_iface);
+ if (soft_iface->mtu != min_mtu)
+ soft_iface->mtu = min_mtu;
}
-static void hardif_activate_interface(struct net_device *net_dev,
- struct bat_priv *bat_priv,
- struct batman_if *batman_if)
+static void hardif_activate_interface(struct batman_if *batman_if)
{
+ struct bat_priv *bat_priv;
+
if (batman_if->if_status != IF_INACTIVE)
return;
+ bat_priv = netdev_priv(batman_if->soft_iface);
+
update_mac_addresses(batman_if);
batman_if->if_status = IF_TO_BE_ACTIVATED;
@@ -206,17 +243,14 @@ static void hardif_activate_interface(struct net_device *net_dev,
if (!bat_priv->primary_if)
set_primary_if(bat_priv, batman_if);
- bat_info(net_dev, "Interface activated: %s\n", batman_if->dev);
-
- if (atomic_read(&module_state) == MODULE_INACTIVE)
- activate_module();
+ bat_info(batman_if->soft_iface, "Interface activated: %s\n",
+ batman_if->net_dev->name);
- update_min_mtu();
+ update_min_mtu(batman_if->soft_iface);
return;
}
-static void hardif_deactivate_interface(struct net_device *net_dev,
- struct batman_if *batman_if)
+static void hardif_deactivate_interface(struct batman_if *batman_if)
{
if ((batman_if->if_status != IF_ACTIVE) &&
(batman_if->if_status != IF_TO_BE_ACTIVATED))
@@ -224,26 +258,39 @@ static void hardif_deactivate_interface(struct net_device *net_dev,
batman_if->if_status = IF_INACTIVE;
- bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev);
+ bat_info(batman_if->soft_iface, "Interface deactivated: %s\n",
+ batman_if->net_dev->name);
- update_min_mtu();
+ update_min_mtu(batman_if->soft_iface);
}
-int hardif_enable_interface(struct batman_if *batman_if)
+int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
{
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct bat_priv *bat_priv;
struct batman_packet *batman_packet;
if (batman_if->if_status != IF_NOT_IN_USE)
goto out;
+ batman_if->soft_iface = dev_get_by_name(&init_net, iface_name);
+
+ if (!batman_if->soft_iface) {
+ batman_if->soft_iface = softif_create(iface_name);
+
+ if (!batman_if->soft_iface)
+ goto err;
+
+ /* dev_get_by_name() increases the reference counter for us */
+ dev_hold(batman_if->soft_iface);
+ }
+
+ bat_priv = netdev_priv(batman_if->soft_iface);
batman_if->packet_len = BAT_PACKET_LEN;
batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC);
if (!batman_if->packet_buff) {
- bat_err(soft_device, "Can't add interface packet (%s): "
- "out of memory\n", batman_if->dev);
+ bat_err(batman_if->soft_iface, "Can't add interface packet "
+ "(%s): out of memory\n", batman_if->net_dev->name);
goto err;
}
@@ -260,15 +307,44 @@ int hardif_enable_interface(struct batman_if *batman_if)
batman_if->if_status = IF_INACTIVE;
orig_hash_add_if(batman_if, bat_priv->num_ifaces);
+ batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
+ batman_if->batman_adv_ptype.func = batman_skb_recv;
+ batman_if->batman_adv_ptype.dev = batman_if->net_dev;
+ hardif_hold(batman_if);
+ dev_add_pack(&batman_if->batman_adv_ptype);
+
atomic_set(&batman_if->seqno, 1);
- bat_info(soft_device, "Adding interface: %s\n", batman_if->dev);
+ atomic_set(&batman_if->frag_seqno, 1);
+ bat_info(batman_if->soft_iface, "Adding interface: %s\n",
+ batman_if->net_dev->name);
+
+ if (atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu <
+ ETH_DATA_LEN + BAT_HEADER_LEN)
+ bat_info(batman_if->soft_iface,
+ "The MTU of interface %s is too small (%i) to handle "
+ "the transport of batman-adv packets. Packets going "
+ "over this interface will be fragmented on layer2 "
+ "which could impact the performance. Setting the MTU "
+ "to %zi would solve the problem.\n",
+ batman_if->net_dev->name, batman_if->net_dev->mtu,
+ ETH_DATA_LEN + BAT_HEADER_LEN);
+
+ if (!atomic_read(&bat_priv->frag_enabled) && batman_if->net_dev->mtu <
+ ETH_DATA_LEN + BAT_HEADER_LEN)
+ bat_info(batman_if->soft_iface,
+ "The MTU of interface %s is too small (%i) to handle "
+ "the transport of batman-adv packets. If you experience"
+ " problems getting traffic through try increasing the "
+ "MTU to %zi.\n",
+ batman_if->net_dev->name, batman_if->net_dev->mtu,
+ ETH_DATA_LEN + BAT_HEADER_LEN);
if (hardif_is_iface_up(batman_if))
- hardif_activate_interface(soft_device, bat_priv, batman_if);
+ hardif_activate_interface(batman_if);
else
- bat_err(soft_device, "Not using interface %s "
+ bat_err(batman_if->soft_iface, "Not using interface %s "
"(retrying later): interface not active\n",
- batman_if->dev);
+ batman_if->net_dev->name);
/* begin scheduling originator messages on that interface */
schedule_own_packet(batman_if);
@@ -282,29 +358,46 @@ err:
void hardif_disable_interface(struct batman_if *batman_if)
{
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
if (batman_if->if_status == IF_ACTIVE)
- hardif_deactivate_interface(soft_device, batman_if);
+ hardif_deactivate_interface(batman_if);
if (batman_if->if_status != IF_INACTIVE)
return;
- bat_info(soft_device, "Removing interface: %s\n", batman_if->dev);
+ bat_info(batman_if->soft_iface, "Removing interface: %s\n",
+ batman_if->net_dev->name);
+ dev_remove_pack(&batman_if->batman_adv_ptype);
+ hardif_put(batman_if);
+
bat_priv->num_ifaces--;
orig_hash_del_if(batman_if, bat_priv->num_ifaces);
- if (batman_if == bat_priv->primary_if)
- set_primary_if(bat_priv, get_active_batman_if());
+ if (batman_if == bat_priv->primary_if) {
+ struct batman_if *new_if;
+
+ new_if = get_active_batman_if(batman_if->soft_iface);
+ set_primary_if(bat_priv, new_if);
+
+ if (new_if)
+ hardif_put(new_if);
+ }
kfree(batman_if->packet_buff);
batman_if->packet_buff = NULL;
batman_if->if_status = IF_NOT_IN_USE;
- if ((atomic_read(&module_state) == MODULE_ACTIVE) &&
- (bat_priv->num_ifaces == 0))
- deactivate_module();
+ /* delete all references to this batman_if */
+ purge_orig_ref(bat_priv);
+ purge_outstanding_packets(bat_priv, batman_if);
+ dev_put(batman_if->soft_iface);
+
+ /* nobody uses this interface anymore */
+ if (!bat_priv->num_ifaces)
+ softif_destroy(batman_if->soft_iface);
+
+ batman_if->soft_iface = NULL;
}
static struct batman_if *hardif_add_interface(struct net_device *net_dev)
@@ -325,26 +418,28 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
goto release_dev;
}
- batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC);
- if (!batman_if->dev)
- goto free_if;
-
ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev);
if (ret)
- goto free_dev;
+ goto free_if;
batman_if->if_num = -1;
batman_if->net_dev = net_dev;
+ batman_if->soft_iface = NULL;
batman_if->if_status = IF_NOT_IN_USE;
- batman_if->packet_buff = NULL;
INIT_LIST_HEAD(&batman_if->list);
+ atomic_set(&batman_if->refcnt, 0);
+ hardif_hold(batman_if);
check_known_mac_addr(batman_if->net_dev->dev_addr);
+
+ spin_lock(&if_list_lock);
list_add_tail_rcu(&batman_if->list, &if_list);
+ spin_unlock(&if_list_lock);
+
+ /* extra reference for return */
+ hardif_hold(batman_if);
return batman_if;
-free_dev:
- kfree(batman_if->dev);
free_if:
kfree(batman_if);
release_dev:
@@ -353,18 +448,6 @@ out:
return NULL;
}
-static void hardif_free_interface(struct rcu_head *rcu)
-{
- struct batman_if *batman_if = container_of(rcu, struct batman_if, rcu);
-
- /* delete all references to this batman_if */
- purge_orig(NULL);
- purge_outstanding_packets(batman_if);
-
- kfree(batman_if->dev);
- kfree(batman_if);
-}
-
static void hardif_remove_interface(struct batman_if *batman_if)
{
/* first deactivate interface */
@@ -375,18 +458,25 @@ static void hardif_remove_interface(struct batman_if *batman_if)
return;
batman_if->if_status = IF_TO_BE_REMOVED;
+
+ /* caller must take if_list_lock */
list_del_rcu(&batman_if->list);
+ synchronize_rcu();
sysfs_del_hardif(&batman_if->hardif_obj);
- dev_put(batman_if->net_dev);
- call_rcu(&batman_if->rcu, hardif_free_interface);
+ hardif_put(batman_if);
}
void hardif_remove_interfaces(void)
{
struct batman_if *batman_if, *batman_if_tmp;
- list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list)
+ rtnl_lock();
+ spin_lock(&if_list_lock);
+ list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) {
hardif_remove_interface(batman_if);
+ }
+ spin_unlock(&if_list_lock);
+ rtnl_unlock();
}
static int hard_if_event(struct notifier_block *this,
@@ -394,37 +484,48 @@ static int hard_if_event(struct notifier_block *this,
{
struct net_device *net_dev = (struct net_device *)ptr;
struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct bat_priv *bat_priv;
if (!batman_if && event == NETDEV_REGISTER)
- batman_if = hardif_add_interface(net_dev);
+ batman_if = hardif_add_interface(net_dev);
if (!batman_if)
goto out;
switch (event) {
case NETDEV_UP:
- hardif_activate_interface(soft_device, bat_priv, batman_if);
+ hardif_activate_interface(batman_if);
break;
case NETDEV_GOING_DOWN:
case NETDEV_DOWN:
- hardif_deactivate_interface(soft_device, batman_if);
+ hardif_deactivate_interface(batman_if);
break;
case NETDEV_UNREGISTER:
+ spin_lock(&if_list_lock);
hardif_remove_interface(batman_if);
+ spin_unlock(&if_list_lock);
break;
- case NETDEV_CHANGENAME:
+ case NETDEV_CHANGEMTU:
+ if (batman_if->soft_iface)
+ update_min_mtu(batman_if->soft_iface);
break;
case NETDEV_CHANGEADDR:
+ if (batman_if->if_status == IF_NOT_IN_USE) {
+ hardif_put(batman_if);
+ goto out;
+ }
+
check_known_mac_addr(batman_if->net_dev->dev_addr);
update_mac_addresses(batman_if);
+
+ bat_priv = netdev_priv(batman_if->soft_iface);
if (batman_if == bat_priv->primary_if)
- set_primary_if(bat_priv, batman_if);
+ update_primary_addr(bat_priv);
break;
default:
break;
};
+ hardif_put(batman_if);
out:
return NOTIFY_DONE;
@@ -435,23 +536,20 @@ out:
int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *orig_dev)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct bat_priv *bat_priv;
struct batman_packet *batman_packet;
struct batman_if *batman_if;
int ret;
+ batman_if = container_of(ptype, struct batman_if, batman_adv_ptype);
skb = skb_share_check(skb, GFP_ATOMIC);
/* skb was released by skb_share_check() */
if (!skb)
goto err_out;
- if (atomic_read(&module_state) != MODULE_ACTIVE)
- goto err_free;
-
/* packet should hold at least type and version */
- if (unlikely(skb_headlen(skb) < 2))
+ if (unlikely(!pskb_may_pull(skb, 2)))
goto err_free;
/* expect a valid ethernet header here. */
@@ -459,8 +557,12 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
|| !skb_mac_header(skb)))
goto err_free;
- batman_if = get_batman_if_by_netdev(skb->dev);
- if (!batman_if)
+ if (!batman_if->soft_iface)
+ goto err_free;
+
+ bat_priv = netdev_priv(batman_if->soft_iface);
+
+ if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
goto err_free;
/* discard frames on not active interfaces */
@@ -487,7 +589,7 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
/* batman icmp packet */
case BAT_ICMP:
- ret = recv_icmp_packet(skb);
+ ret = recv_icmp_packet(skb, batman_if);
break;
/* unicast packet */
@@ -495,14 +597,19 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
ret = recv_unicast_packet(skb, batman_if);
break;
+ /* fragmented unicast packet */
+ case BAT_UNICAST_FRAG:
+ ret = recv_ucast_frag_packet(skb, batman_if);
+ break;
+
/* broadcast packet */
case BAT_BCAST:
- ret = recv_bcast_packet(skb);
+ ret = recv_bcast_packet(skb, batman_if);
break;
/* vis packet */
case BAT_VIS:
- ret = recv_vis_packet(skb);
+ ret = recv_vis_packet(skb, batman_if);
break;
default:
ret = NET_RX_DROP;
diff --git a/drivers/staging/batman-adv/hard-interface.h b/drivers/staging/batman-adv/hard-interface.h
index d5640b0..d550889 100644
--- a/drivers/staging/batman-adv/hard-interface.h
+++ b/drivers/staging/batman-adv/hard-interface.h
@@ -32,14 +32,27 @@
extern struct notifier_block hard_if_notifier;
struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev);
-int hardif_enable_interface(struct batman_if *batman_if);
+int hardif_enable_interface(struct batman_if *batman_if, char *iface_name);
void hardif_disable_interface(struct batman_if *batman_if);
void hardif_remove_interfaces(void);
int batman_skb_recv(struct sk_buff *skb,
struct net_device *dev,
struct packet_type *ptype,
struct net_device *orig_dev);
-int hardif_min_mtu(void);
-void update_min_mtu(void);
+int hardif_min_mtu(struct net_device *soft_iface);
+void update_min_mtu(struct net_device *soft_iface);
+
+static inline void hardif_hold(struct batman_if *batman_if)
+{
+ atomic_inc(&batman_if->refcnt);
+}
+
+static inline void hardif_put(struct batman_if *batman_if)
+{
+ if (atomic_dec_and_test(&batman_if->refcnt)) {
+ dev_put(batman_if->net_dev);
+ kfree(batman_if);
+ }
+}
#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
diff --git a/drivers/staging/batman-adv/hash.c b/drivers/staging/batman-adv/hash.c
index 1286f8f..8ef26eb 100644
--- a/drivers/staging/batman-adv/hash.c
+++ b/drivers/staging/batman-adv/hash.c
@@ -36,7 +36,7 @@ static void hash_init(struct hashtable_t *hash)
/* remove the hash structure. if hashdata_free_cb != NULL, this function will be
* called to remove the elements inside of the hash. if you don't remove the
* elements, memory might be leaked. */
-void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb)
+void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb, void *arg)
{
struct element_t *bucket, *last_bucket;
int i;
@@ -46,7 +46,7 @@ void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb)
while (bucket != NULL) {
if (free_cb != NULL)
- free_cb(bucket->data);
+ free_cb(bucket->data, arg);
last_bucket = bucket;
bucket = bucket->next;
@@ -300,7 +300,7 @@ struct hashtable_t *hash_resize(struct hashtable_t *hash, int size)
/* remove hash and eventual overflow buckets but not the content
* itself. */
- hash_delete(hash, NULL);
+ hash_delete(hash, NULL, NULL);
return new_hash;
}
diff --git a/drivers/staging/batman-adv/hash.h b/drivers/staging/batman-adv/hash.h
index c483e11..2c8e176 100644
--- a/drivers/staging/batman-adv/hash.h
+++ b/drivers/staging/batman-adv/hash.h
@@ -30,7 +30,7 @@
typedef int (*hashdata_compare_cb)(void *, void *);
typedef int (*hashdata_choose_cb)(void *, int);
-typedef void (*hashdata_free_cb)(void *);
+typedef void (*hashdata_free_cb)(void *, void *);
struct element_t {
void *data; /* pointer to the data */
@@ -70,7 +70,7 @@ void *hash_remove_bucket(struct hashtable_t *hash, struct hash_it_t *hash_it_t);
/* remove the hash structure. if hashdata_free_cb != NULL, this function will be
* called to remove the elements inside of the hash. if you don't remove the
* elements, memory might be leaked. */
-void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb);
+void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb, void *arg);
/* free only the hashtable and the hash itself. */
void hash_destroy(struct hashtable_t *hash);
diff --git a/drivers/staging/batman-adv/icmp_socket.c b/drivers/staging/batman-adv/icmp_socket.c
index 3ae7dd2..48856ca 100644
--- a/drivers/staging/batman-adv/icmp_socket.c
+++ b/drivers/staging/batman-adv/icmp_socket.c
@@ -45,6 +45,8 @@ static int bat_socket_open(struct inode *inode, struct file *file)
unsigned int i;
struct socket_client *socket_client;
+ nonseekable_open(inode, file);
+
socket_client = kmalloc(sizeof(struct socket_client), GFP_KERNEL);
if (!socket_client)
@@ -154,7 +156,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
{
struct socket_client *socket_client = file->private_data;
struct bat_priv *bat_priv = socket_client->bat_priv;
- struct icmp_packet_rr icmp_packet;
+ struct sk_buff *skb;
+ struct icmp_packet_rr *icmp_packet;
+
struct orig_node *orig_node;
struct batman_if *batman_if;
size_t packet_len = sizeof(struct icmp_packet);
@@ -174,40 +178,54 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
if (len >= sizeof(struct icmp_packet_rr))
packet_len = sizeof(struct icmp_packet_rr);
- if (!access_ok(VERIFY_READ, buff, packet_len))
- return -EFAULT;
+ skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr));
+ if (!skb)
+ return -ENOMEM;
- if (__copy_from_user(&icmp_packet, buff, packet_len))
- return -EFAULT;
+ skb_reserve(skb, sizeof(struct ethhdr));
+ icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
+
+ if (!access_ok(VERIFY_READ, buff, packet_len)) {
+ len = -EFAULT;
+ goto free_skb;
+ }
+
+ if (__copy_from_user(icmp_packet, buff, packet_len)) {
+ len = -EFAULT;
+ goto free_skb;
+ }
- if (icmp_packet.packet_type != BAT_ICMP) {
+ if (icmp_packet->packet_type != BAT_ICMP) {
bat_dbg(DBG_BATMAN, bat_priv,
"Error - can't send packet from char device: "
"got bogus packet type (expected: BAT_ICMP)\n");
- return -EINVAL;
+ len = -EINVAL;
+ goto free_skb;
}
- if (icmp_packet.msg_type != ECHO_REQUEST) {
+ if (icmp_packet->msg_type != ECHO_REQUEST) {
bat_dbg(DBG_BATMAN, bat_priv,
"Error - can't send packet from char device: "
"got bogus message type (expected: ECHO_REQUEST)\n");
- return -EINVAL;
+ len = -EINVAL;
+ goto free_skb;
}
- icmp_packet.uid = socket_client->index;
+ icmp_packet->uid = socket_client->index;
- if (icmp_packet.version != COMPAT_VERSION) {
- icmp_packet.msg_type = PARAMETER_PROBLEM;
- icmp_packet.ttl = COMPAT_VERSION;
- bat_socket_add_packet(socket_client, &icmp_packet, packet_len);
- goto out;
+ if (icmp_packet->version != COMPAT_VERSION) {
+ icmp_packet->msg_type = PARAMETER_PROBLEM;
+ icmp_packet->ttl = COMPAT_VERSION;
+ bat_socket_add_packet(socket_client, icmp_packet, packet_len);
+ goto free_skb;
}
- if (atomic_read(&module_state) != MODULE_ACTIVE)
+ if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
goto dst_unreach;
- spin_lock_irqsave(&orig_hash_lock, flags);
- orig_node = ((struct orig_node *)hash_find(orig_hash, icmp_packet.dst));
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
+ icmp_packet->dst));
if (!orig_node)
goto unlock;
@@ -218,7 +236,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
if (!batman_if)
goto dst_unreach;
@@ -226,22 +244,24 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
if (batman_if->if_status != IF_ACTIVE)
goto dst_unreach;
- memcpy(icmp_packet.orig,
+ memcpy(icmp_packet->orig,
bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
if (packet_len == sizeof(struct icmp_packet_rr))
- memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN);
+ memcpy(icmp_packet->rr, batman_if->net_dev->dev_addr, ETH_ALEN);
+
- send_raw_packet((unsigned char *)&icmp_packet,
- packet_len, batman_if, dstaddr);
+ send_skb_packet(skb, batman_if, dstaddr);
goto out;
unlock:
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
dst_unreach:
- icmp_packet.msg_type = DESTINATION_UNREACHABLE;
- bat_socket_add_packet(socket_client, &icmp_packet, packet_len);
+ icmp_packet->msg_type = DESTINATION_UNREACHABLE;
+ bat_socket_add_packet(socket_client, icmp_packet, packet_len);
+free_skb:
+ kfree_skb(skb);
out:
return len;
}
@@ -265,6 +285,7 @@ static const struct file_operations fops = {
.read = bat_socket_read,
.write = bat_socket_write,
.poll = bat_socket_poll,
+ .llseek = no_llseek,
};
int bat_socket_setup(struct bat_priv *bat_priv)
diff --git a/drivers/staging/batman-adv/main.c b/drivers/staging/batman-adv/main.c
index ef7c20a..0587940 100644
--- a/drivers/staging/batman-adv/main.c
+++ b/drivers/staging/batman-adv/main.c
@@ -34,43 +34,14 @@
#include "hash.h"
struct list_head if_list;
-struct hlist_head forw_bat_list;
-struct hlist_head forw_bcast_list;
-struct hashtable_t *orig_hash;
-
-DEFINE_SPINLOCK(orig_hash_lock);
-DEFINE_SPINLOCK(forw_bat_list_lock);
-DEFINE_SPINLOCK(forw_bcast_list_lock);
-
-atomic_t bcast_queue_left;
-atomic_t batman_queue_left;
-
-int16_t num_hna;
-
-struct net_device *soft_device;
unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-atomic_t module_state;
-
-static struct packet_type batman_adv_packet_type __read_mostly = {
- .type = __constant_htons(ETH_P_BATMAN),
- .func = batman_skb_recv,
-};
struct workqueue_struct *bat_event_workqueue;
static int __init batman_init(void)
{
- int retval;
-
INIT_LIST_HEAD(&if_list);
- INIT_HLIST_HEAD(&forw_bat_list);
- INIT_HLIST_HEAD(&forw_bcast_list);
-
- atomic_set(&module_state, MODULE_INACTIVE);
-
- atomic_set(&bcast_queue_left, BCAST_QUEUE_LEN);
- atomic_set(&batman_queue_left, BATMAN_QUEUE_LEN);
/* the name should not be longer than 10 chars - see
* http://lwn.net/Articles/23634/ */
@@ -82,126 +53,86 @@ static int __init batman_init(void)
bat_socket_init();
debugfs_init();
- /* initialize layer 2 interface */
- soft_device = alloc_netdev(sizeof(struct bat_priv) , "bat%d",
- interface_setup);
-
- if (!soft_device) {
- pr_err("Unable to allocate the batman interface\n");
- goto end;
- }
-
- retval = register_netdev(soft_device);
-
- if (retval < 0) {
- pr_err("Unable to register the batman interface: %i\n", retval);
- goto free_soft_device;
- }
-
- retval = sysfs_add_meshif(soft_device);
-
- if (retval < 0)
- goto unreg_soft_device;
-
- retval = debugfs_add_meshif(soft_device);
-
- if (retval < 0)
- goto unreg_sysfs;
-
register_netdevice_notifier(&hard_if_notifier);
- dev_add_pack(&batman_adv_packet_type);
pr_info("B.A.T.M.A.N. advanced %s%s (compatibility version %i) "
"loaded\n", SOURCE_VERSION, REVISION_VERSION_STR,
COMPAT_VERSION);
return 0;
-
-unreg_sysfs:
- sysfs_del_meshif(soft_device);
-unreg_soft_device:
- unregister_netdev(soft_device);
- soft_device = NULL;
- return -ENOMEM;
-
-free_soft_device:
- free_netdev(soft_device);
- soft_device = NULL;
-end:
- return -ENOMEM;
}
static void __exit batman_exit(void)
{
- deactivate_module();
-
debugfs_destroy();
unregister_netdevice_notifier(&hard_if_notifier);
hardif_remove_interfaces();
- if (soft_device) {
- debugfs_del_meshif(soft_device);
- sysfs_del_meshif(soft_device);
- unregister_netdev(soft_device);
- soft_device = NULL;
- }
-
- dev_remove_pack(&batman_adv_packet_type);
-
+ flush_workqueue(bat_event_workqueue);
destroy_workqueue(bat_event_workqueue);
bat_event_workqueue = NULL;
+
+ rcu_barrier();
}
-/* activates the module, starts timer ... */
-void activate_module(void)
+int mesh_init(struct net_device *soft_iface)
{
- if (originator_init() < 1)
+ struct bat_priv *bat_priv = netdev_priv(soft_iface);
+
+ spin_lock_init(&bat_priv->orig_hash_lock);
+ spin_lock_init(&bat_priv->forw_bat_list_lock);
+ spin_lock_init(&bat_priv->forw_bcast_list_lock);
+ spin_lock_init(&bat_priv->hna_lhash_lock);
+ spin_lock_init(&bat_priv->hna_ghash_lock);
+ spin_lock_init(&bat_priv->vis_hash_lock);
+ spin_lock_init(&bat_priv->vis_list_lock);
+
+ INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
+ INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
+
+ if (originator_init(bat_priv) < 1)
goto err;
- if (hna_local_init() < 1)
+ if (hna_local_init(bat_priv) < 1)
goto err;
- if (hna_global_init() < 1)
+ if (hna_global_init(bat_priv) < 1)
goto err;
- hna_local_add(soft_device->dev_addr);
+ hna_local_add(soft_iface, soft_iface->dev_addr);
- if (vis_init() < 1)
+ if (vis_init(bat_priv) < 1)
goto err;
- update_min_mtu();
- atomic_set(&module_state, MODULE_ACTIVE);
+ atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
goto end;
err:
pr_err("Unable to allocate memory for mesh information structures: "
"out of mem ?\n");
- deactivate_module();
+ mesh_free(soft_iface);
+ return -1;
+
end:
- return;
+ return 0;
}
-/* shuts down the whole module.*/
-void deactivate_module(void)
+void mesh_free(struct net_device *soft_iface)
{
- atomic_set(&module_state, MODULE_DEACTIVATING);
+ struct bat_priv *bat_priv = netdev_priv(soft_iface);
- purge_outstanding_packets(NULL);
- flush_workqueue(bat_event_workqueue);
+ atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING);
- vis_quit();
+ purge_outstanding_packets(bat_priv, NULL);
- /* TODO: unregister BATMAN pack */
+ vis_quit(bat_priv);
- originator_free();
+ originator_free(bat_priv);
- hna_local_free();
- hna_global_free();
+ hna_local_free(bat_priv);
+ hna_global_free(bat_priv);
- synchronize_net();
-
- synchronize_rcu();
- atomic_set(&module_state, MODULE_INACTIVE);
+ atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
}
void inc_module_count(void)
@@ -214,11 +145,6 @@ void dec_module_count(void)
module_put(THIS_MODULE);
}
-int addr_to_string(char *buff, uint8_t *addr)
-{
- return sprintf(buff, "%pM", addr);
-}
-
/* returns 1 if they are the same originator */
int compare_orig(void *data1, void *data2)
diff --git a/drivers/staging/batman-adv/main.h b/drivers/staging/batman-adv/main.h
index 8513261..5e3f516 100644
--- a/drivers/staging/batman-adv/main.h
+++ b/drivers/staging/batman-adv/main.h
@@ -30,7 +30,7 @@
#define DRIVER_DESC "B.A.T.M.A.N. advanced"
#define DRIVER_DEVICE "batman-adv"
-#define SOURCE_VERSION "maint"
+#define SOURCE_VERSION "next"
/* B.A.T.M.A.N. parameters */
@@ -58,7 +58,6 @@
#define PACKBUFF_SIZE 2000
#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
-#define ETH_STR_LEN 20
#define VIS_INTERVAL 5000 /* 5 seconds */
@@ -76,9 +75,9 @@
#define EXPECTED_SEQNO_RANGE 65536
/* don't reset again within 30 seconds */
-#define MODULE_INACTIVE 0
-#define MODULE_ACTIVE 1
-#define MODULE_DEACTIVATING 2
+#define MESH_INACTIVE 0
+#define MESH_ACTIVE 1
+#define MESH_DEACTIVATING 2
#define BCAST_QUEUE_LEN 256
#define BATMAN_QUEUE_LEN 256
@@ -128,29 +127,14 @@
#endif
extern struct list_head if_list;
-extern struct hlist_head forw_bat_list;
-extern struct hlist_head forw_bcast_list;
-extern struct hashtable_t *orig_hash;
-
-extern spinlock_t orig_hash_lock;
-extern spinlock_t forw_bat_list_lock;
-extern spinlock_t forw_bcast_list_lock;
-
-extern atomic_t bcast_queue_left;
-extern atomic_t batman_queue_left;
-extern int16_t num_hna;
-
-extern struct net_device *soft_device;
extern unsigned char broadcast_addr[];
-extern atomic_t module_state;
extern struct workqueue_struct *bat_event_workqueue;
-void activate_module(void);
-void deactivate_module(void);
+int mesh_init(struct net_device *soft_iface);
+void mesh_free(struct net_device *soft_iface);
void inc_module_count(void);
void dec_module_count(void);
-int addr_to_string(char *buff, uint8_t *addr);
int compare_orig(void *data1, void *data2);
int choose_orig(void *data, int32_t size);
int is_my_mac(uint8_t *addr);
@@ -158,7 +142,7 @@ int is_bcast(uint8_t *addr);
int is_mcast(uint8_t *addr);
#ifdef CONFIG_BATMAN_ADV_DEBUG
-extern int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
+int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
#define bat_dbg(type, bat_priv, fmt, arg...) \
do { \
diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c
index de5a8c1..5527008 100644
--- a/drivers/staging/batman-adv/originator.c
+++ b/drivers/staging/batman-adv/originator.c
@@ -27,32 +27,34 @@
#include "translation-table.h"
#include "routing.h"
#include "hard-interface.h"
+#include "unicast.h"
-static DECLARE_DELAYED_WORK(purge_orig_wq, purge_orig);
+static void purge_orig(struct work_struct *work);
-static void start_purge_timer(void)
+static void start_purge_timer(struct bat_priv *bat_priv)
{
- queue_delayed_work(bat_event_workqueue, &purge_orig_wq, 1 * HZ);
+ INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
+ queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
}
-int originator_init(void)
+int originator_init(struct bat_priv *bat_priv)
{
unsigned long flags;
- if (orig_hash)
+ if (bat_priv->orig_hash)
return 1;
- spin_lock_irqsave(&orig_hash_lock, flags);
- orig_hash = hash_new(128, compare_orig, choose_orig);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ bat_priv->orig_hash = hash_new(128, compare_orig, choose_orig);
- if (!orig_hash)
+ if (!bat_priv->orig_hash)
goto err;
- spin_unlock_irqrestore(&orig_hash_lock, flags);
- start_purge_timer();
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ start_purge_timer(bat_priv);
return 1;
err:
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return 0;
}
@@ -60,8 +62,7 @@ struct neigh_node *
create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
uint8_t *neigh, struct batman_if *if_incoming)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct neigh_node *neigh_node;
bat_dbg(DBG_BATMAN, bat_priv,
@@ -81,11 +82,12 @@ create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
return neigh_node;
}
-static void free_orig_node(void *data)
+static void free_orig_node(void *data, void *arg)
{
struct list_head *list_pos, *list_pos_tmp;
struct neigh_node *neigh_node;
struct orig_node *orig_node = (struct orig_node *)data;
+ struct bat_priv *bat_priv = (struct bat_priv *)arg;
/* for all neighbors towards this originator ... */
list_for_each_safe(list_pos, list_pos_tmp, &orig_node->neigh_list) {
@@ -95,41 +97,40 @@ static void free_orig_node(void *data)
kfree(neigh_node);
}
- hna_global_del_orig(orig_node, "originator timed out");
+ frag_list_free(&orig_node->frag_list);
+ hna_global_del_orig(bat_priv, orig_node, "originator timed out");
kfree(orig_node->bcast_own);
kfree(orig_node->bcast_own_sum);
kfree(orig_node);
}
-void originator_free(void)
+void originator_free(struct bat_priv *bat_priv)
{
unsigned long flags;
- if (!orig_hash)
+ if (!bat_priv->orig_hash)
return;
- cancel_delayed_work_sync(&purge_orig_wq);
+ cancel_delayed_work_sync(&bat_priv->orig_work);
- spin_lock_irqsave(&orig_hash_lock, flags);
- hash_delete(orig_hash, free_orig_node);
- orig_hash = NULL;
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ hash_delete(bat_priv->orig_hash, free_orig_node, bat_priv);
+ bat_priv->orig_hash = NULL;
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
}
/* this function finds or creates an originator entry for the given
* address if it does not exits */
-struct orig_node *get_orig_node(uint8_t *addr)
+struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
{
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
struct orig_node *orig_node;
struct hashtable_t *swaphash;
int size;
- orig_node = ((struct orig_node *)hash_find(orig_hash, addr));
+ orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash, addr));
- if (orig_node != NULL)
+ if (orig_node)
return orig_node;
bat_dbg(DBG_BATMAN, bat_priv,
@@ -157,20 +158,25 @@ struct orig_node *get_orig_node(uint8_t *addr)
size = bat_priv->num_ifaces * sizeof(uint8_t);
orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
+
+ INIT_LIST_HEAD(&orig_node->frag_list);
+ orig_node->last_frag_packet = 0;
+
if (!orig_node->bcast_own_sum)
goto free_bcast_own;
- if (hash_add(orig_hash, orig_node) < 0)
+ if (hash_add(bat_priv->orig_hash, orig_node) < 0)
goto free_bcast_own_sum;
- if (orig_hash->elements * 4 > orig_hash->size) {
- swaphash = hash_resize(orig_hash, orig_hash->size * 2);
+ if (bat_priv->orig_hash->elements * 4 > bat_priv->orig_hash->size) {
+ swaphash = hash_resize(bat_priv->orig_hash,
+ bat_priv->orig_hash->size * 2);
- if (swaphash == NULL)
- bat_err(soft_device,
+ if (!swaphash)
+ bat_dbg(DBG_BATMAN, bat_priv,
"Couldn't resize orig hash table\n");
else
- orig_hash = swaphash;
+ bat_priv->orig_hash = swaphash;
}
return orig_node;
@@ -183,11 +189,10 @@ free_orig_node:
return NULL;
}
-static bool purge_orig_neighbors(struct orig_node *orig_node,
+static bool purge_orig_neighbors(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
struct neigh_node **best_neigh_node)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
struct list_head *list_pos, *list_pos_tmp;
struct neigh_node *neigh_node;
bool neigh_purged = false;
@@ -200,8 +205,8 @@ static bool purge_orig_neighbors(struct orig_node *orig_node,
if ((time_after(jiffies,
neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
- (neigh_node->if_incoming->if_status ==
- IF_TO_BE_REMOVED)) {
+ (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
+ (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
if (neigh_node->if_incoming->if_status ==
IF_TO_BE_REMOVED)
@@ -209,7 +214,7 @@ static bool purge_orig_neighbors(struct orig_node *orig_node,
"neighbor purge: originator %pM, "
"neighbor: %pM, iface: %s\n",
orig_node->orig, neigh_node->addr,
- neigh_node->if_incoming->dev);
+ neigh_node->if_incoming->net_dev->name);
else
bat_dbg(DBG_BATMAN, bat_priv,
"neighbor timeout: originator %pM, "
@@ -229,10 +234,9 @@ static bool purge_orig_neighbors(struct orig_node *orig_node,
return neigh_purged;
}
-static bool purge_orig_node(struct orig_node *orig_node)
+static bool purge_orig_node(struct bat_priv *bat_priv,
+ struct orig_node *orig_node)
{
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
struct neigh_node *best_neigh_node;
if (time_after(jiffies,
@@ -243,8 +247,10 @@ static bool purge_orig_node(struct orig_node *orig_node)
orig_node->orig, (orig_node->last_valid / HZ));
return true;
} else {
- if (purge_orig_neighbors(orig_node, &best_neigh_node)) {
- update_routes(orig_node, best_neigh_node,
+ if (purge_orig_neighbors(bat_priv, orig_node,
+ &best_neigh_node)) {
+ update_routes(bat_priv, orig_node,
+ best_neigh_node,
orig_node->hna_buff,
orig_node->hna_buff_len);
/* update bonding candidates, we could have lost
@@ -256,29 +262,46 @@ static bool purge_orig_node(struct orig_node *orig_node)
return false;
}
-void purge_orig(struct work_struct *work)
+static void _purge_orig(struct bat_priv *bat_priv)
{
HASHIT(hashit);
struct orig_node *orig_node;
unsigned long flags;
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
/* for all origins... */
- while (hash_iterate(orig_hash, &hashit)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
- if (purge_orig_node(orig_node)) {
- hash_remove_bucket(orig_hash, &hashit);
- free_orig_node(orig_node);
+
+ if (purge_orig_node(bat_priv, orig_node)) {
+ hash_remove_bucket(bat_priv->orig_hash, &hashit);
+ free_orig_node(orig_node, bat_priv);
}
+
+ if (time_after(jiffies, (orig_node->last_frag_packet +
+ msecs_to_jiffies(FRAG_TIMEOUT))))
+ frag_list_free(&orig_node->frag_list);
}
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+
+}
+
+static void purge_orig(struct work_struct *work)
+{
+ struct delayed_work *delayed_work =
+ container_of(work, struct delayed_work, work);
+ struct bat_priv *bat_priv =
+ container_of(delayed_work, struct bat_priv, orig_work);
- /* if work == NULL we were not called by the timer
- * and thus do not need to re-arm the timer */
- if (work)
- start_purge_timer();
+ _purge_orig(bat_priv);
+ start_purge_timer(bat_priv);
+}
+
+void purge_orig_ref(struct bat_priv *bat_priv)
+{
+ _purge_orig(bat_priv);
}
int orig_seq_print_text(struct seq_file *seq, void *offset)
@@ -292,7 +315,6 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
int last_seen_secs;
int last_seen_msecs;
unsigned long flags;
- char orig_str[ETH_STR_LEN], router_str[ETH_STR_LEN];
if ((!bat_priv->primary_if) ||
(bat_priv->primary_if->if_status != IF_ACTIVE)) {
@@ -306,19 +328,17 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
net_dev->name);
}
- rcu_read_lock();
- seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s (%s)]\n",
+ seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
SOURCE_VERSION, REVISION_VERSION_STR,
- bat_priv->primary_if->dev, bat_priv->primary_if->addr_str,
- net_dev->name);
+ bat_priv->primary_if->net_dev->name,
+ bat_priv->primary_if->net_dev->dev_addr, net_dev->name);
seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
"Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
"outgoingIF", "Potential nexthops");
- rcu_read_unlock();
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- while (hash_iterate(orig_hash, &hashit)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
@@ -328,21 +348,18 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
if (orig_node->router->tq_avg == 0)
continue;
- addr_to_string(orig_str, orig_node->orig);
- addr_to_string(router_str, orig_node->router->addr);
last_seen_secs = jiffies_to_msecs(jiffies -
orig_node->last_valid) / 1000;
last_seen_msecs = jiffies_to_msecs(jiffies -
orig_node->last_valid) % 1000;
- seq_printf(seq, "%-17s %4i.%03is (%3i) %17s [%10s]:",
- orig_str, last_seen_secs, last_seen_msecs,
- orig_node->router->tq_avg, router_str,
- orig_node->router->if_incoming->dev);
+ seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
+ orig_node->orig, last_seen_secs, last_seen_msecs,
+ orig_node->router->tq_avg, orig_node->router->addr,
+ orig_node->router->if_incoming->net_dev->name);
list_for_each_entry(neigh_node, &orig_node->neigh_list, list) {
- addr_to_string(orig_str, neigh_node->addr);
- seq_printf(seq, " %17s (%3i)", orig_str,
+ seq_printf(seq, " %pM (%3i)", neigh_node->addr,
neigh_node->tq_avg);
}
@@ -350,7 +367,7 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
batman_count++;
}
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
if ((batman_count == 0))
seq_printf(seq, "No batman nodes in range ...\n");
@@ -390,26 +407,27 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
{
+ struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
struct orig_node *orig_node;
unsigned long flags;
HASHIT(hashit);
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- while (hash_iterate(orig_hash, &hashit)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
if (orig_node_add_if(orig_node, max_if_num) == -1)
goto err;
}
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return 0;
err:
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return -ENOMEM;
}
@@ -434,7 +452,7 @@ static int orig_node_del_if(struct orig_node *orig_node,
memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
/* copy second part */
- memcpy(data_ptr,
+ memcpy(data_ptr + del_if_num * chunk_size,
orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
(max_if_num - del_if_num) * chunk_size);
@@ -454,7 +472,7 @@ free_bcast_own:
memcpy(data_ptr, orig_node->bcast_own_sum,
del_if_num * sizeof(uint8_t));
- memcpy(data_ptr,
+ memcpy(data_ptr + del_if_num * sizeof(uint8_t),
orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
(max_if_num - del_if_num) * sizeof(uint8_t));
@@ -467,6 +485,7 @@ free_own_sum:
int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
{
+ struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
struct batman_if *batman_if_tmp;
struct orig_node *orig_node;
unsigned long flags;
@@ -475,9 +494,9 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- while (hash_iterate(orig_hash, &hashit)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
ret = orig_node_del_if(orig_node, max_if_num,
@@ -496,16 +515,19 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
if (batman_if == batman_if_tmp)
continue;
+ if (batman_if->soft_iface != batman_if_tmp->soft_iface)
+ continue;
+
if (batman_if_tmp->if_num > batman_if->if_num)
batman_if_tmp->if_num--;
}
rcu_read_unlock();
batman_if->if_num = -1;
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return 0;
err:
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return -ENOMEM;
}
diff --git a/drivers/staging/batman-adv/originator.h b/drivers/staging/batman-adv/originator.h
index e88411d..a97c4004 100644
--- a/drivers/staging/batman-adv/originator.h
+++ b/drivers/staging/batman-adv/originator.h
@@ -22,10 +22,10 @@
#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
#define _NET_BATMAN_ADV_ORIGINATOR_H_
-int originator_init(void);
-void originator_free(void);
-void purge_orig(struct work_struct *work);
-struct orig_node *get_orig_node(uint8_t *addr);
+int originator_init(struct bat_priv *bat_priv);
+void originator_free(struct bat_priv *bat_priv);
+void purge_orig_ref(struct bat_priv *bat_priv);
+struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr);
struct neigh_node *
create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node,
uint8_t *neigh, struct batman_if *if_incoming);
diff --git a/drivers/staging/batman-adv/packet.h b/drivers/staging/batman-adv/packet.h
index abb5e46..2693383 100644
--- a/drivers/staging/batman-adv/packet.h
+++ b/drivers/staging/batman-adv/packet.h
@@ -24,14 +24,15 @@
#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
-#define BAT_PACKET 0x01
-#define BAT_ICMP 0x02
-#define BAT_UNICAST 0x03
-#define BAT_BCAST 0x04
-#define BAT_VIS 0x05
+#define BAT_PACKET 0x01
+#define BAT_ICMP 0x02
+#define BAT_UNICAST 0x03
+#define BAT_BCAST 0x04
+#define BAT_VIS 0x05
+#define BAT_UNICAST_FRAG 0x06
/* this file is included by batctl which needs these defines */
-#define COMPAT_VERSION 11
+#define COMPAT_VERSION 13
#define DIRECTLINK 0x40
#define VIS_SERVER 0x20
#define PRIMARIES_FIRST_HOP 0x10
@@ -47,6 +48,9 @@
#define VIS_TYPE_SERVER_SYNC 0
#define VIS_TYPE_CLIENT_UPDATE 1
+/* fragmentation defines */
+#define UNI_FRAG_HEAD 0x01
+
struct batman_packet {
uint8_t packet_type;
uint8_t version; /* batman version field */
@@ -75,7 +79,7 @@ struct icmp_packet {
#define BAT_RR_LEN 16
/* icmp_packet_rr must start with all fields from imcp_packet
- as this is assumed by code that handles ICMP packets */
+ * as this is assumed by code that handles ICMP packets */
struct icmp_packet_rr {
uint8_t packet_type;
uint8_t version; /* batman version field */
@@ -96,6 +100,16 @@ struct unicast_packet {
uint8_t ttl;
} __attribute__((packed));
+struct unicast_frag_packet {
+ uint8_t packet_type;
+ uint8_t version; /* batman version field */
+ uint8_t dest[6];
+ uint8_t ttl;
+ uint8_t flags;
+ uint8_t orig[6];
+ uint16_t seqno;
+} __attribute__((packed));
+
struct bcast_packet {
uint8_t packet_type;
uint8_t version; /* batman version field */
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index 032195e..9010263 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -32,31 +32,31 @@
#include "ring_buffer.h"
#include "vis.h"
#include "aggregation.h"
-
-static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
+#include "unicast.h"
void slide_own_bcast_window(struct batman_if *batman_if)
{
+ struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
HASHIT(hashit);
struct orig_node *orig_node;
TYPE_OF_WORD *word;
unsigned long flags;
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
- while (hash_iterate(orig_hash, &hashit)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
word = &(orig_node->bcast_own[batman_if->if_num * NUM_WORDS]);
- bit_get_packet(word, 1, 0);
+ bit_get_packet(bat_priv, word, 1, 0);
orig_node->bcast_own_sum[batman_if->if_num] =
bit_packet_count(word);
}
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
}
-static void update_HNA(struct orig_node *orig_node,
+static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
unsigned char *hna_buff, int hna_buff_len)
{
if ((hna_buff_len != orig_node->hna_buff_len) ||
@@ -65,27 +65,27 @@ static void update_HNA(struct orig_node *orig_node,
(memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
if (orig_node->hna_buff_len > 0)
- hna_global_del_orig(orig_node,
+ hna_global_del_orig(bat_priv, orig_node,
"originator changed hna");
if ((hna_buff_len > 0) && (hna_buff != NULL))
- hna_global_add_orig(orig_node, hna_buff, hna_buff_len);
+ hna_global_add_orig(bat_priv, orig_node,
+ hna_buff, hna_buff_len);
}
}
-static void update_route(struct orig_node *orig_node,
+static void update_route(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
struct neigh_node *neigh_node,
unsigned char *hna_buff, int hna_buff_len)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
-
/* route deleted */
if ((orig_node->router != NULL) && (neigh_node == NULL)) {
bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
orig_node->orig);
- hna_global_del_orig(orig_node, "originator timed out");
+ hna_global_del_orig(bat_priv, orig_node,
+ "originator timed out");
/* route added */
} else if ((orig_node->router == NULL) && (neigh_node != NULL)) {
@@ -93,7 +93,8 @@ static void update_route(struct orig_node *orig_node,
bat_dbg(DBG_ROUTES, bat_priv,
"Adding route towards: %pM (via %pM)\n",
orig_node->orig, neigh_node->addr);
- hna_global_add_orig(orig_node, hna_buff, hna_buff_len);
+ hna_global_add_orig(bat_priv, orig_node,
+ hna_buff, hna_buff_len);
/* route changed */
} else {
@@ -108,19 +109,20 @@ static void update_route(struct orig_node *orig_node,
}
-void update_routes(struct orig_node *orig_node,
- struct neigh_node *neigh_node,
- unsigned char *hna_buff, int hna_buff_len)
+void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
+ struct neigh_node *neigh_node, unsigned char *hna_buff,
+ int hna_buff_len)
{
if (orig_node == NULL)
return;
if (orig_node->router != neigh_node)
- update_route(orig_node, neigh_node, hna_buff, hna_buff_len);
+ update_route(bat_priv, orig_node, neigh_node,
+ hna_buff, hna_buff_len);
/* may be just HNA changed */
else
- update_HNA(orig_node, hna_buff, hna_buff_len);
+ update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
}
static int is_bidirectional_neigh(struct orig_node *orig_node,
@@ -128,8 +130,7 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
struct batman_packet *batman_packet,
struct batman_if *if_incoming)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
unsigned char total_count;
@@ -233,14 +234,14 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
return 0;
}
-static void update_orig(struct orig_node *orig_node, struct ethhdr *ethhdr,
+static void update_orig(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
+ struct ethhdr *ethhdr,
struct batman_packet *batman_packet,
struct batman_if *if_incoming,
unsigned char *hna_buff, int hna_buff_len,
char is_duplicate)
{
- /* FIXME: get bat_priv */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
int tmp_hna_buff_len;
@@ -266,12 +267,11 @@ static void update_orig(struct orig_node *orig_node, struct ethhdr *ethhdr,
if (!neigh_node) {
struct orig_node *orig_tmp;
- orig_tmp = get_orig_node(ethhdr->h_source);
+ orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
if (!orig_tmp)
return;
- neigh_node = create_neighbor(orig_node,
- orig_tmp,
+ neigh_node = create_neighbor(orig_node, orig_tmp,
ethhdr->h_source, if_incoming);
if (!neigh_node)
return;
@@ -313,11 +313,13 @@ static void update_orig(struct orig_node *orig_node, struct ethhdr *ethhdr,
>= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num])))
goto update_hna;
- update_routes(orig_node, neigh_node, hna_buff, tmp_hna_buff_len);
+ update_routes(bat_priv, orig_node, neigh_node,
+ hna_buff, tmp_hna_buff_len);
return;
update_hna:
- update_routes(orig_node, orig_node->router, hna_buff, tmp_hna_buff_len);
+ update_routes(bat_priv, orig_node, orig_node->router,
+ hna_buff, tmp_hna_buff_len);
}
/* checks whether the host restarted and is in the protection time.
@@ -325,12 +327,10 @@ update_hna:
* 0 if the packet is to be accepted
* 1 if the packet is to be ignored.
*/
-static int window_protected(int32_t seq_num_diff,
- unsigned long *last_reset)
+static int window_protected(struct bat_priv *bat_priv,
+ int32_t seq_num_diff,
+ unsigned long *last_reset)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
-
if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
|| (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
if (time_after(jiffies, *last_reset +
@@ -359,8 +359,7 @@ static char count_real_packets(struct ethhdr *ethhdr,
struct batman_packet *batman_packet,
struct batman_if *if_incoming)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct orig_node *orig_node;
struct neigh_node *tmp_neigh_node;
char is_duplicate = 0;
@@ -368,14 +367,15 @@ static char count_real_packets(struct ethhdr *ethhdr,
int need_update = 0;
int set_mark;
- orig_node = get_orig_node(batman_packet->orig);
+ orig_node = get_orig_node(bat_priv, batman_packet->orig);
if (orig_node == NULL)
return 0;
seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
/* signalize caller that the packet is to be dropped. */
- if (window_protected(seq_diff, &orig_node->batman_seqno_reset))
+ if (window_protected(bat_priv, seq_diff,
+ &orig_node->batman_seqno_reset))
return -1;
list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
@@ -391,8 +391,9 @@ static char count_real_packets(struct ethhdr *ethhdr,
set_mark = 0;
/* if the window moved, set the update flag. */
- need_update |= bit_get_packet(tmp_neigh_node->real_bits,
- seq_diff, set_mark);
+ need_update |= bit_get_packet(bat_priv,
+ tmp_neigh_node->real_bits,
+ seq_diff, set_mark);
tmp_neigh_node->real_packet_count =
bit_packet_count(tmp_neigh_node->real_bits);
@@ -520,8 +521,7 @@ void receive_bat_packet(struct ethhdr *ethhdr,
unsigned char *hna_buff, int hna_buff_len,
struct batman_if *if_incoming)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct batman_if *batman_if;
struct orig_node *orig_neigh_node, *orig_node;
char has_directlink_flag;
@@ -554,18 +554,23 @@ void receive_bat_packet(struct ethhdr *ethhdr,
batman_packet->orig) ? 1 : 0);
bat_dbg(DBG_BATMAN, bat_priv,
- "Received BATMAN packet via NB: %pM, IF: %s [%s] "
+ "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
"(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
"TTL %d, V %d, IDF %d)\n",
- ethhdr->h_source, if_incoming->dev, if_incoming->addr_str,
- batman_packet->orig, batman_packet->prev_sender,
- batman_packet->seqno, batman_packet->tq, batman_packet->ttl,
- batman_packet->version, has_directlink_flag);
+ ethhdr->h_source, if_incoming->net_dev->name,
+ if_incoming->net_dev->dev_addr, batman_packet->orig,
+ batman_packet->prev_sender, batman_packet->seqno,
+ batman_packet->tq, batman_packet->ttl, batman_packet->version,
+ has_directlink_flag);
+ rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
if (batman_if->if_status != IF_ACTIVE)
continue;
+ if (batman_if->soft_iface != if_incoming->soft_iface)
+ continue;
+
if (compare_orig(ethhdr->h_source,
batman_if->net_dev->dev_addr))
is_my_addr = 1;
@@ -581,6 +586,7 @@ void receive_bat_packet(struct ethhdr *ethhdr,
if (compare_orig(ethhdr->h_source, broadcast_addr))
is_broadcast = 1;
}
+ rcu_read_unlock();
if (batman_packet->version != COMPAT_VERSION) {
bat_dbg(DBG_BATMAN, bat_priv,
@@ -608,7 +614,7 @@ void receive_bat_packet(struct ethhdr *ethhdr,
TYPE_OF_WORD *word;
int offset;
- orig_neigh_node = get_orig_node(ethhdr->h_source);
+ orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
if (!orig_neigh_node)
return;
@@ -640,7 +646,7 @@ void receive_bat_packet(struct ethhdr *ethhdr,
return;
}
- orig_node = get_orig_node(batman_packet->orig);
+ orig_node = get_orig_node(bat_priv, batman_packet->orig);
if (orig_node == NULL)
return;
@@ -676,7 +682,8 @@ void receive_bat_packet(struct ethhdr *ethhdr,
/* if sender is a direct neighbor the sender mac equals
* originator mac */
orig_neigh_node = (is_single_hop_neigh ?
- orig_node : get_orig_node(ethhdr->h_source));
+ orig_node :
+ get_orig_node(bat_priv, ethhdr->h_source));
if (orig_neigh_node == NULL)
return;
@@ -698,7 +705,7 @@ void receive_bat_packet(struct ethhdr *ethhdr,
(!is_duplicate ||
((orig_node->last_real_seqno == batman_packet->seqno) &&
(orig_node->last_ttl - 3 <= batman_packet->ttl))))
- update_orig(orig_node, ethhdr, batman_packet,
+ update_orig(bat_priv, orig_node, ethhdr, batman_packet,
if_incoming, hna_buff, hna_buff_len, is_duplicate);
mark_bonding_address(bat_priv, orig_node,
@@ -736,15 +743,14 @@ void receive_bat_packet(struct ethhdr *ethhdr,
0, hna_buff_len, if_incoming);
}
-int recv_bat_packet(struct sk_buff *skb,
- struct batman_if *batman_if)
+int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
{
+ struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
struct ethhdr *ethhdr;
unsigned long flags;
- struct sk_buff *skb_old;
/* drop packet if it has not necessary minimum size */
- if (skb_headlen(skb) < sizeof(struct batman_packet))
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
return NET_RX_DROP;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -757,38 +763,33 @@ int recv_bat_packet(struct sk_buff *skb,
if (is_bcast(ethhdr->h_source))
return NET_RX_DROP;
- /* TODO: we use headlen instead of "length", because
- * only this data is paged in. */
-
/* create a copy of the skb, if needed, to modify it. */
- if (!skb_clone_writable(skb, skb_headlen(skb))) {
- skb_old = skb;
- skb = skb_copy(skb, GFP_ATOMIC);
- if (!skb)
- return NET_RX_DROP;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
- kfree_skb(skb_old);
- }
+ if (skb_cow(skb, 0) < 0)
+ return NET_RX_DROP;
+
+ /* keep skb linear */
+ if (skb_linearize(skb) < 0)
+ return NET_RX_DROP;
+
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
receive_aggr_bat_packet(ethhdr,
skb->data,
skb_headlen(skb),
batman_if);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
kfree_skb(skb);
return NET_RX_SUCCESS;
}
-static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
+static int recv_my_icmp_packet(struct bat_priv *bat_priv,
+ struct sk_buff *skb, size_t icmp_len)
{
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
struct orig_node *orig_node;
struct icmp_packet_rr *icmp_packet;
struct ethhdr *ethhdr;
- struct sk_buff *skb_old;
struct batman_if *batman_if;
int ret;
unsigned long flags;
@@ -808,8 +809,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
/* answer echo request (ping) */
/* get routing information */
- spin_lock_irqsave(&orig_hash_lock, flags);
- orig_node = ((struct orig_node *)hash_find(orig_hash,
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
icmp_packet->orig));
ret = NET_RX_DROP;
@@ -820,19 +821,14 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
* copy the required data before sending */
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
/* create a copy of the skb, if needed, to modify it. */
- skb_old = NULL;
- if (!skb_clone_writable(skb, icmp_len)) {
- skb_old = skb;
- skb = skb_copy(skb, GFP_ATOMIC);
- if (!skb)
- return NET_RX_DROP;
- icmp_packet = (struct icmp_packet_rr *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
- kfree_skb(skb_old);
- }
+ if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+ return NET_RX_DROP;
+
+ icmp_packet = (struct icmp_packet_rr *)skb->data;
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig,
@@ -844,19 +840,17 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
ret = NET_RX_SUCCESS;
} else
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return ret;
}
-static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
+static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
+ struct sk_buff *skb, size_t icmp_len)
{
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
struct orig_node *orig_node;
struct icmp_packet *icmp_packet;
struct ethhdr *ethhdr;
- struct sk_buff *skb_old;
struct batman_if *batman_if;
int ret;
unsigned long flags;
@@ -867,9 +861,9 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
/* send TTL exceeded if packet is an echo request (traceroute) */
if (icmp_packet->msg_type != ECHO_REQUEST) {
- pr_warning("Warning - can't forward icmp packet from %pM to "
- "%pM: ttl exceeded\n", icmp_packet->orig,
- icmp_packet->dst);
+ pr_debug("Warning - can't forward icmp packet from %pM to "
+ "%pM: ttl exceeded\n", icmp_packet->orig,
+ icmp_packet->dst);
return NET_RX_DROP;
}
@@ -877,9 +871,9 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
return NET_RX_DROP;
/* get routing information */
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
orig_node = ((struct orig_node *)
- hash_find(orig_hash, icmp_packet->orig));
+ hash_find(bat_priv->orig_hash, icmp_packet->orig));
ret = NET_RX_DROP;
if ((orig_node != NULL) &&
@@ -889,18 +883,14 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
* copy the required data before sending */
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
/* create a copy of the skb, if needed, to modify it. */
- if (!skb_clone_writable(skb, icmp_len)) {
- skb_old = skb;
- skb = skb_copy(skb, GFP_ATOMIC);
- if (!skb)
- return NET_RX_DROP;
- icmp_packet = (struct icmp_packet *) skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
- kfree_skb(skb_old);
- }
+ if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+ return NET_RX_DROP;
+
+ icmp_packet = (struct icmp_packet *) skb->data;
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig,
@@ -912,18 +902,18 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
ret = NET_RX_SUCCESS;
} else
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return ret;
}
-int recv_icmp_packet(struct sk_buff *skb)
+int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
{
+ struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct icmp_packet_rr *icmp_packet;
struct ethhdr *ethhdr;
struct orig_node *orig_node;
- struct sk_buff *skb_old;
struct batman_if *batman_if;
int hdr_size = sizeof(struct icmp_packet);
int ret;
@@ -933,11 +923,11 @@ int recv_icmp_packet(struct sk_buff *skb)
/**
* we truncate all incoming icmp packets if they don't match our size
*/
- if (skb_headlen(skb) >= sizeof(struct icmp_packet_rr))
+ if (skb->len >= sizeof(struct icmp_packet_rr))
hdr_size = sizeof(struct icmp_packet_rr);
/* drop packet if it has not necessary minimum size */
- if (skb_headlen(skb) < hdr_size)
+ if (unlikely(!pskb_may_pull(skb, hdr_size)))
return NET_RX_DROP;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -966,18 +956,18 @@ int recv_icmp_packet(struct sk_buff *skb)
/* packet for me */
if (is_my_mac(icmp_packet->dst))
- return recv_my_icmp_packet(skb, hdr_size);
+ return recv_my_icmp_packet(bat_priv, skb, hdr_size);
/* TTL exceeded */
if (icmp_packet->ttl < 2)
- return recv_icmp_ttl_exceeded(skb, hdr_size);
+ return recv_icmp_ttl_exceeded(bat_priv, skb, hdr_size);
ret = NET_RX_DROP;
/* get routing information */
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
orig_node = ((struct orig_node *)
- hash_find(orig_hash, icmp_packet->dst));
+ hash_find(bat_priv->orig_hash, icmp_packet->dst));
if ((orig_node != NULL) &&
(orig_node->router != NULL)) {
@@ -986,18 +976,14 @@ int recv_icmp_packet(struct sk_buff *skb)
* copy the required data before sending */
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
/* create a copy of the skb, if needed, to modify it. */
- if (!skb_clone_writable(skb, hdr_size)) {
- skb_old = skb;
- skb = skb_copy(skb, GFP_ATOMIC);
- if (!skb)
- return NET_RX_DROP;
- icmp_packet = (struct icmp_packet_rr *)skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
- kfree_skb(skb_old);
- }
+ if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+ return NET_RX_DROP;
+
+ icmp_packet = (struct icmp_packet_rr *)skb->data;
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* decrement ttl */
icmp_packet->ttl--;
@@ -1007,7 +993,7 @@ int recv_icmp_packet(struct sk_buff *skb)
ret = NET_RX_SUCCESS;
} else
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return ret;
}
@@ -1015,10 +1001,9 @@ int recv_icmp_packet(struct sk_buff *skb)
/* find a suitable router for this originator, and use
* bonding if possible. */
struct neigh_node *find_router(struct orig_node *orig_node,
- struct batman_if *recv_if)
+ struct batman_if *recv_if)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct bat_priv *bat_priv;
struct orig_node *primary_orig_node;
struct orig_node *router_orig;
struct neigh_node *router, *first_candidate, *best_router;
@@ -1034,9 +1019,14 @@ struct neigh_node *find_router(struct orig_node *orig_node,
/* without bonding, the first node should
* always choose the default router. */
+ if (!recv_if)
+ return orig_node->router;
+
+ bat_priv = netdev_priv(recv_if->soft_iface);
bonding_enabled = atomic_read(&bat_priv->bonding_enabled);
- if (!bonding_enabled && (recv_if == NULL))
- return orig_node->router;
+
+ if (!bonding_enabled)
+ return orig_node->router;
router_orig = orig_node->router->orig_node;
@@ -1052,8 +1042,9 @@ struct neigh_node *find_router(struct orig_node *orig_node,
router_orig->orig, ETH_ALEN) == 0) {
primary_orig_node = router_orig;
} else {
- primary_orig_node = hash_find(orig_hash,
+ primary_orig_node = hash_find(bat_priv->orig_hash,
router_orig->primary_addr);
+
if (!primary_orig_node)
return orig_node->router;
}
@@ -1105,61 +1096,68 @@ struct neigh_node *find_router(struct orig_node *orig_node,
return router;
}
-int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
+static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
{
- struct unicast_packet *unicast_packet;
- struct orig_node *orig_node;
- struct neigh_node *router;
struct ethhdr *ethhdr;
- struct batman_if *batman_if;
- struct sk_buff *skb_old;
- uint8_t dstaddr[ETH_ALEN];
- int hdr_size = sizeof(struct unicast_packet);
- unsigned long flags;
/* drop packet if it has not necessary minimum size */
- if (skb_headlen(skb) < hdr_size)
- return NET_RX_DROP;
+ if (unlikely(!pskb_may_pull(skb, hdr_size)))
+ return -1;
- ethhdr = (struct ethhdr *) skb_mac_header(skb);
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* packet with unicast indication but broadcast recipient */
if (is_bcast(ethhdr->h_dest))
- return NET_RX_DROP;
+ return -1;
/* packet with broadcast sender address */
if (is_bcast(ethhdr->h_source))
- return NET_RX_DROP;
+ return -1;
/* not for me */
if (!is_my_mac(ethhdr->h_dest))
- return NET_RX_DROP;
+ return -1;
+
+ return 0;
+}
+
+static int route_unicast_packet(struct sk_buff *skb,
+ struct batman_if *recv_if, int hdr_size)
+{
+ struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct orig_node *orig_node;
+ struct neigh_node *router;
+ struct batman_if *batman_if;
+ uint8_t dstaddr[ETH_ALEN];
+ unsigned long flags;
+ struct unicast_packet *unicast_packet;
+ struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
- unicast_packet = (struct unicast_packet *) skb->data;
+ unicast_packet = (struct unicast_packet *)skb->data;
/* packet for me */
if (is_my_mac(unicast_packet->dest)) {
- interface_rx(skb, hdr_size);
+ interface_rx(recv_if->soft_iface, skb, hdr_size);
return NET_RX_SUCCESS;
}
/* TTL exceeded */
if (unicast_packet->ttl < 2) {
- pr_warning("Warning - can't forward unicast packet from %pM to "
- "%pM: ttl exceeded\n", ethhdr->h_source,
- unicast_packet->dest);
+ pr_debug("Warning - can't forward unicast packet from %pM to "
+ "%pM: ttl exceeded\n", ethhdr->h_source,
+ unicast_packet->dest);
return NET_RX_DROP;
}
/* get routing information */
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
orig_node = ((struct orig_node *)
- hash_find(orig_hash, unicast_packet->dest));
+ hash_find(bat_priv->orig_hash, unicast_packet->dest));
router = find_router(orig_node, recv_if);
if (!router) {
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return NET_RX_DROP;
}
@@ -1169,18 +1167,14 @@ int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
batman_if = router->if_incoming;
memcpy(dstaddr, router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
/* create a copy of the skb, if needed, to modify it. */
- if (!skb_clone_writable(skb, sizeof(struct unicast_packet))) {
- skb_old = skb;
- skb = skb_copy(skb, GFP_ATOMIC);
- if (!skb)
- return NET_RX_DROP;
- unicast_packet = (struct unicast_packet *) skb->data;
- ethhdr = (struct ethhdr *)skb_mac_header(skb);
- kfree_skb(skb_old);
- }
+ if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
+ return NET_RX_DROP;
+
+ unicast_packet = (struct unicast_packet *)skb->data;
+ ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* decrement ttl */
unicast_packet->ttl--;
@@ -1191,8 +1185,90 @@ int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
return NET_RX_SUCCESS;
}
-int recv_bcast_packet(struct sk_buff *skb)
+int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
+{
+ struct unicast_packet *unicast_packet;
+ int hdr_size = sizeof(struct unicast_packet);
+
+ if (check_unicast_packet(skb, hdr_size) < 0)
+ return NET_RX_DROP;
+
+ unicast_packet = (struct unicast_packet *)skb->data;
+
+ /* packet for me */
+ if (is_my_mac(unicast_packet->dest)) {
+ interface_rx(recv_if->soft_iface, skb, hdr_size);
+ return NET_RX_SUCCESS;
+ }
+
+ return route_unicast_packet(skb, recv_if, hdr_size);
+}
+
+int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
{
+ struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct unicast_frag_packet *unicast_packet;
+ struct orig_node *orig_node;
+ struct frag_packet_list_entry *tmp_frag_entry;
+ int hdr_size = sizeof(struct unicast_frag_packet);
+ unsigned long flags;
+
+ if (check_unicast_packet(skb, hdr_size) < 0)
+ return NET_RX_DROP;
+
+ unicast_packet = (struct unicast_frag_packet *)skb->data;
+
+ /* packet for me */
+ if (is_my_mac(unicast_packet->dest)) {
+
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ orig_node = ((struct orig_node *)
+ hash_find(bat_priv->orig_hash, unicast_packet->orig));
+
+ if (!orig_node) {
+ pr_debug("couldn't find orig node for fragmentation\n");
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
+ flags);
+ return NET_RX_DROP;
+ }
+
+ orig_node->last_frag_packet = jiffies;
+
+ if (list_empty(&orig_node->frag_list) &&
+ create_frag_buffer(&orig_node->frag_list)) {
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
+ flags);
+ return NET_RX_DROP;
+ }
+
+ tmp_frag_entry =
+ search_frag_packet(&orig_node->frag_list,
+ unicast_packet);
+
+ if (!tmp_frag_entry) {
+ create_frag_entry(&orig_node->frag_list, skb);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
+ flags);
+ return NET_RX_SUCCESS;
+ }
+
+ skb = merge_frag_packet(&orig_node->frag_list,
+ tmp_frag_entry, skb);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+ if (!skb)
+ return NET_RX_DROP;
+
+ interface_rx(recv_if->soft_iface, skb, hdr_size);
+ return NET_RX_SUCCESS;
+ }
+
+ return route_unicast_packet(skb, recv_if, hdr_size);
+}
+
+
+int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
+{
+ struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct orig_node *orig_node;
struct bcast_packet *bcast_packet;
struct ethhdr *ethhdr;
@@ -1201,7 +1277,7 @@ int recv_bcast_packet(struct sk_buff *skb)
unsigned long flags;
/* drop packet if it has not necessary minimum size */
- if (skb_headlen(skb) < hdr_size)
+ if (unlikely(!pskb_may_pull(skb, hdr_size)))
return NET_RX_DROP;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1227,12 +1303,12 @@ int recv_bcast_packet(struct sk_buff *skb)
if (bcast_packet->ttl < 2)
return NET_RX_DROP;
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
orig_node = ((struct orig_node *)
- hash_find(orig_hash, bcast_packet->orig));
+ hash_find(bat_priv->orig_hash, bcast_packet->orig));
if (orig_node == NULL) {
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return NET_RX_DROP;
}
@@ -1240,44 +1316,49 @@ int recv_bcast_packet(struct sk_buff *skb)
if (get_bit_status(orig_node->bcast_bits,
orig_node->last_bcast_seqno,
ntohl(bcast_packet->seqno))) {
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return NET_RX_DROP;
}
seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
/* check whether the packet is old and the host just restarted. */
- if (window_protected(seq_diff, &orig_node->bcast_seqno_reset)) {
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ if (window_protected(bat_priv, seq_diff,
+ &orig_node->bcast_seqno_reset)) {
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
return NET_RX_DROP;
}
/* mark broadcast in flood history, update window position
* if required. */
- if (bit_get_packet(orig_node->bcast_bits, seq_diff, 1))
+ if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
/* rebroadcast packet */
- add_bcast_packet_to_list(skb);
+ add_bcast_packet_to_list(bat_priv, skb);
/* broadcast for me */
- interface_rx(skb, hdr_size);
+ interface_rx(recv_if->soft_iface, skb, hdr_size);
return NET_RX_SUCCESS;
}
-int recv_vis_packet(struct sk_buff *skb)
+int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
{
struct vis_packet *vis_packet;
struct ethhdr *ethhdr;
- struct bat_priv *bat_priv;
+ struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
int hdr_size = sizeof(struct vis_packet);
- if (skb_headlen(skb) < hdr_size)
+ /* keep skb linear */
+ if (skb_linearize(skb) < 0)
return NET_RX_DROP;
- vis_packet = (struct vis_packet *) skb->data;
+ if (unlikely(!pskb_may_pull(skb, hdr_size)))
+ return NET_RX_DROP;
+
+ vis_packet = (struct vis_packet *)skb->data;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* not for me */
@@ -1291,18 +1372,13 @@ int recv_vis_packet(struct sk_buff *skb)
if (is_my_mac(vis_packet->sender_orig))
return NET_RX_DROP;
- /* FIXME: each batman_if will be attached to a softif */
- bat_priv = netdev_priv(soft_device);
-
switch (vis_packet->vis_type) {
case VIS_TYPE_SERVER_SYNC:
- /* TODO: handle fragmented skbs properly */
receive_server_sync_packet(bat_priv, vis_packet,
skb_headlen(skb));
break;
case VIS_TYPE_CLIENT_UPDATE:
- /* TODO: handle fragmented skbs properly */
receive_client_update_packet(bat_priv, vis_packet,
skb_headlen(skb));
break;
diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h
index 3eac64e..06ea99d 100644
--- a/drivers/staging/batman-adv/routing.h
+++ b/drivers/staging/batman-adv/routing.h
@@ -29,15 +29,15 @@ void receive_bat_packet(struct ethhdr *ethhdr,
struct batman_packet *batman_packet,
unsigned char *hna_buff, int hna_buff_len,
struct batman_if *if_incoming);
-void update_routes(struct orig_node *orig_node,
- struct neigh_node *neigh_node,
- unsigned char *hna_buff, int hna_buff_len);
-int recv_icmp_packet(struct sk_buff *skb);
+void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
+ struct neigh_node *neigh_node, unsigned char *hna_buff,
+ int hna_buff_len);
+int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if);
int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if);
-int recv_bcast_packet(struct sk_buff *skb);
-int recv_vis_packet(struct sk_buff *skb);
-int recv_bat_packet(struct sk_buff *skb,
- struct batman_if *batman_if);
+int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if);
+int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
+int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
+int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
struct neigh_node *find_router(struct orig_node *orig_node,
struct batman_if *recv_if);
void update_bonding_candidates(struct bat_priv *bat_priv,
diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c
index da3c82e4..7adf76d 100644
--- a/drivers/staging/batman-adv/send.c
+++ b/drivers/staging/batman-adv/send.c
@@ -68,12 +68,12 @@ int send_skb_packet(struct sk_buff *skb,
if (!(batman_if->net_dev->flags & IFF_UP)) {
pr_warning("Interface %s is not up - can't send packet via "
- "that interface!\n", batman_if->dev);
+ "that interface!\n", batman_if->net_dev->name);
goto send_skb_err;
}
/* push to the ethernet header. */
- if (my_skb_push(skb, sizeof(struct ethhdr)) < 0)
+ if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0)
goto send_skb_err;
skb_reset_mac_header(skb);
@@ -99,41 +99,23 @@ send_skb_err:
return NET_XMIT_DROP;
}
-/* sends a raw packet. */
-void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
- struct batman_if *batman_if, uint8_t *dst_addr)
-{
- struct sk_buff *skb;
- char *data;
-
- skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
- if (!skb)
- return;
- data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
- memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
- /* pull back to the batman "network header" */
- skb_pull(skb, sizeof(struct ethhdr));
- send_skb_packet(skb, batman_if, dst_addr);
-}
-
/* Send a packet to a given interface */
static void send_packet_to_if(struct forw_packet *forw_packet,
struct batman_if *batman_if)
{
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
char *fwd_str;
uint8_t packet_num;
int16_t buff_pos;
struct batman_packet *batman_packet;
+ struct sk_buff *skb;
if (batman_if->if_status != IF_ACTIVE)
return;
packet_num = 0;
buff_pos = 0;
- batman_packet = (struct batman_packet *)
- (forw_packet->packet_buff);
+ batman_packet = (struct batman_packet *)forw_packet->skb->data;
/* adjust all flags and log packets */
while (aggregated_packet(buff_pos,
@@ -153,34 +135,35 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
"Forwarding"));
bat_dbg(DBG_BATMAN, bat_priv,
"%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
- " IDF %s) on interface %s [%s]\n",
+ " IDF %s) on interface %s [%pM]\n",
fwd_str, (packet_num > 0 ? "aggregated " : ""),
batman_packet->orig, ntohl(batman_packet->seqno),
batman_packet->tq, batman_packet->ttl,
(batman_packet->flags & DIRECTLINK ?
"on" : "off"),
- batman_if->dev, batman_if->addr_str);
+ batman_if->net_dev->name, batman_if->net_dev->dev_addr);
buff_pos += sizeof(struct batman_packet) +
(batman_packet->num_hna * ETH_ALEN);
packet_num++;
batman_packet = (struct batman_packet *)
- (forw_packet->packet_buff + buff_pos);
+ (forw_packet->skb->data + buff_pos);
}
- send_raw_packet(forw_packet->packet_buff,
- forw_packet->packet_len,
- batman_if, broadcast_addr);
+ /* create clone because function is called more than once */
+ skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
+ if (skb)
+ send_skb_packet(skb, batman_if, broadcast_addr);
}
/* send a batman packet */
static void send_packet(struct forw_packet *forw_packet)
{
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
struct batman_if *batman_if;
+ struct net_device *soft_iface;
+ struct bat_priv *bat_priv;
struct batman_packet *batman_packet =
- (struct batman_packet *)(forw_packet->packet_buff);
+ (struct batman_packet *)(forw_packet->skb->data);
unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
if (!forw_packet->if_incoming) {
@@ -189,6 +172,9 @@ static void send_packet(struct forw_packet *forw_packet)
return;
}
+ soft_iface = forw_packet->if_incoming->soft_iface;
+ bat_priv = netdev_priv(soft_iface);
+
if (forw_packet->if_incoming->if_status != IF_ACTIVE)
return;
@@ -200,33 +186,41 @@ static void send_packet(struct forw_packet *forw_packet)
/* FIXME: what about aggregated packets ? */
bat_dbg(DBG_BATMAN, bat_priv,
"%s packet (originator %pM, seqno %d, TTL %d) "
- "on interface %s [%s]\n",
+ "on interface %s [%pM]\n",
(forw_packet->own ? "Sending own" : "Forwarding"),
batman_packet->orig, ntohl(batman_packet->seqno),
- batman_packet->ttl, forw_packet->if_incoming->dev,
- forw_packet->if_incoming->addr_str);
+ batman_packet->ttl,
+ forw_packet->if_incoming->net_dev->name,
+ forw_packet->if_incoming->net_dev->dev_addr);
- send_raw_packet(forw_packet->packet_buff,
- forw_packet->packet_len,
- forw_packet->if_incoming,
+ /* skb is only used once and than forw_packet is free'd */
+ send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
broadcast_addr);
+ forw_packet->skb = NULL;
+
return;
}
/* broadcast on every interface */
rcu_read_lock();
- list_for_each_entry_rcu(batman_if, &if_list, list)
+ list_for_each_entry_rcu(batman_if, &if_list, list) {
+ if (batman_if->soft_iface != soft_iface)
+ continue;
+
send_packet_to_if(forw_packet, batman_if);
+ }
rcu_read_unlock();
}
-static void rebuild_batman_packet(struct batman_if *batman_if)
+static void rebuild_batman_packet(struct bat_priv *bat_priv,
+ struct batman_if *batman_if)
{
int new_len;
unsigned char *new_buff;
struct batman_packet *batman_packet;
- new_len = sizeof(struct batman_packet) + (num_hna * ETH_ALEN);
+ new_len = sizeof(struct batman_packet) +
+ (bat_priv->num_local_hna * ETH_ALEN);
new_buff = kmalloc(new_len, GFP_ATOMIC);
/* keep old buffer if kmalloc should fail */
@@ -235,9 +229,9 @@ static void rebuild_batman_packet(struct batman_if *batman_if)
sizeof(struct batman_packet));
batman_packet = (struct batman_packet *)new_buff;
- batman_packet->num_hna = hna_local_fill_buffer(
- new_buff + sizeof(struct batman_packet),
- new_len - sizeof(struct batman_packet));
+ batman_packet->num_hna = hna_local_fill_buffer(bat_priv,
+ new_buff + sizeof(struct batman_packet),
+ new_len - sizeof(struct batman_packet));
kfree(batman_if->packet_buff);
batman_if->packet_buff = new_buff;
@@ -247,8 +241,7 @@ static void rebuild_batman_packet(struct batman_if *batman_if)
void schedule_own_packet(struct batman_if *batman_if)
{
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
unsigned long send_time;
struct batman_packet *batman_packet;
int vis_server;
@@ -270,9 +263,9 @@ void schedule_own_packet(struct batman_if *batman_if)
batman_if->if_status = IF_ACTIVE;
/* if local hna has changed and interface is a primary interface */
- if ((atomic_read(&hna_local_changed)) &&
+ if ((atomic_read(&bat_priv->hna_local_changed)) &&
(batman_if == bat_priv->primary_if))
- rebuild_batman_packet(batman_if);
+ rebuild_batman_packet(bat_priv, batman_if);
/**
* NOTE: packet_buff might just have been re-allocated in
@@ -305,8 +298,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
uint8_t directlink, int hna_buff_len,
struct batman_if *if_incoming)
{
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
unsigned char in_tq, in_ttl, tq_avg = 0;
unsigned long send_time;
@@ -366,20 +358,20 @@ static void forw_packet_free(struct forw_packet *forw_packet)
{
if (forw_packet->skb)
kfree_skb(forw_packet->skb);
- kfree(forw_packet->packet_buff);
kfree(forw_packet);
}
-static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
+static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
+ struct forw_packet *forw_packet,
unsigned long send_time)
{
unsigned long flags;
INIT_HLIST_NODE(&forw_packet->list);
/* add new packet to packet list */
- spin_lock_irqsave(&forw_bcast_list_lock, flags);
- hlist_add_head(&forw_packet->list, &forw_bcast_list);
- spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
+ hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
+ spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet->delayed_work,
@@ -397,18 +389,19 @@ static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
*
* The skb is not consumed, so the caller should make sure that the
* skb is freed. */
-int add_bcast_packet_to_list(struct sk_buff *skb)
+int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
{
struct forw_packet *forw_packet;
struct bcast_packet *bcast_packet;
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
- if (!atomic_dec_not_zero(&bcast_queue_left)) {
+ if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
goto out;
}
+ if (!bat_priv->primary_if)
+ goto out;
+
forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
if (!forw_packet)
@@ -425,18 +418,18 @@ int add_bcast_packet_to_list(struct sk_buff *skb)
skb_reset_mac_header(skb);
forw_packet->skb = skb;
- forw_packet->packet_buff = NULL;
+ forw_packet->if_incoming = bat_priv->primary_if;
/* how often did we send the bcast packet ? */
forw_packet->num_packets = 0;
- _add_bcast_packet_to_list(forw_packet, 1);
+ _add_bcast_packet_to_list(bat_priv, forw_packet, 1);
return NETDEV_TX_OK;
packet_free:
kfree(forw_packet);
out_and_inc:
- atomic_inc(&bcast_queue_left);
+ atomic_inc(&bat_priv->bcast_queue_left);
out:
return NETDEV_TX_BUSY;
}
@@ -450,22 +443,26 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
container_of(delayed_work, struct forw_packet, delayed_work);
unsigned long flags;
struct sk_buff *skb1;
+ struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
+ struct bat_priv *bat_priv = netdev_priv(soft_iface);
- spin_lock_irqsave(&forw_bcast_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
hlist_del(&forw_packet->list);
- spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
- if (atomic_read(&module_state) == MODULE_DEACTIVATING)
+ if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
goto out;
/* rebroadcast packet */
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
+ if (batman_if->soft_iface != soft_iface)
+ continue;
+
/* send a copy of the saved skb */
- skb1 = skb_copy(forw_packet->skb, GFP_ATOMIC);
+ skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
if (skb1)
- send_skb_packet(skb1,
- batman_if, broadcast_addr);
+ send_skb_packet(skb1, batman_if, broadcast_addr);
}
rcu_read_unlock();
@@ -473,13 +470,14 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
/* if we still have some more bcasts to send */
if (forw_packet->num_packets < 3) {
- _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
+ _add_bcast_packet_to_list(bat_priv, forw_packet,
+ ((5 * HZ) / 1000));
return;
}
out:
forw_packet_free(forw_packet);
- atomic_inc(&bcast_queue_left);
+ atomic_inc(&bat_priv->bcast_queue_left);
}
void send_outstanding_bat_packet(struct work_struct *work)
@@ -489,12 +487,14 @@ void send_outstanding_bat_packet(struct work_struct *work)
struct forw_packet *forw_packet =
container_of(delayed_work, struct forw_packet, delayed_work);
unsigned long flags;
+ struct bat_priv *bat_priv;
- spin_lock_irqsave(&forw_bat_list_lock, flags);
+ bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
+ spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
hlist_del(&forw_packet->list);
- spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
- if (atomic_read(&module_state) == MODULE_DEACTIVATING)
+ if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
goto out;
send_packet(forw_packet);
@@ -510,15 +510,14 @@ void send_outstanding_bat_packet(struct work_struct *work)
out:
/* don't count own packet */
if (!forw_packet->own)
- atomic_inc(&batman_queue_left);
+ atomic_inc(&bat_priv->batman_queue_left);
forw_packet_free(forw_packet);
}
-void purge_outstanding_packets(struct batman_if *batman_if)
+void purge_outstanding_packets(struct bat_priv *bat_priv,
+ struct batman_if *batman_if)
{
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
struct forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node;
unsigned long flags;
@@ -526,15 +525,15 @@ void purge_outstanding_packets(struct batman_if *batman_if)
if (batman_if)
bat_dbg(DBG_BATMAN, bat_priv,
"purge_outstanding_packets(): %s\n",
- batman_if->dev);
+ batman_if->net_dev->name);
else
bat_dbg(DBG_BATMAN, bat_priv,
"purge_outstanding_packets()\n");
/* free bcast list */
- spin_lock_irqsave(&forw_bcast_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
- &forw_bcast_list, list) {
+ &bat_priv->forw_bcast_list, list) {
/**
* if purge_outstanding_packets() was called with an argmument
@@ -544,21 +543,21 @@ void purge_outstanding_packets(struct batman_if *batman_if)
(forw_packet->if_incoming != batman_if))
continue;
- spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
/**
* send_outstanding_bcast_packet() will lock the list to
* delete the item from the list
*/
cancel_delayed_work_sync(&forw_packet->delayed_work);
- spin_lock_irqsave(&forw_bcast_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bcast_list_lock, flags);
}
- spin_unlock_irqrestore(&forw_bcast_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bcast_list_lock, flags);
/* free batman packet list */
- spin_lock_irqsave(&forw_bat_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
- &forw_bat_list, list) {
+ &bat_priv->forw_bat_list, list) {
/**
* if purge_outstanding_packets() was called with an argmument
@@ -568,14 +567,14 @@ void purge_outstanding_packets(struct batman_if *batman_if)
(forw_packet->if_incoming != batman_if))
continue;
- spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
/**
* send_outstanding_bat_packet() will lock the list to
* delete the item from the list
*/
cancel_delayed_work_sync(&forw_packet->delayed_work);
- spin_lock_irqsave(&forw_bat_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->forw_bat_list_lock, flags);
}
- spin_unlock_irqrestore(&forw_bat_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->forw_bat_list_lock, flags);
}
diff --git a/drivers/staging/batman-adv/send.h b/drivers/staging/batman-adv/send.h
index b64c627..c4cefa8 100644
--- a/drivers/staging/batman-adv/send.h
+++ b/drivers/staging/batman-adv/send.h
@@ -27,16 +27,15 @@
int send_skb_packet(struct sk_buff *skb,
struct batman_if *batman_if,
uint8_t *dst_addr);
-void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
- struct batman_if *batman_if, uint8_t *dst_addr);
void schedule_own_packet(struct batman_if *batman_if);
void schedule_forward_packet(struct orig_node *orig_node,
struct ethhdr *ethhdr,
struct batman_packet *batman_packet,
uint8_t directlink, int hna_buff_len,
struct batman_if *if_outgoing);
-int add_bcast_packet_to_list(struct sk_buff *skb);
+int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb);
void send_outstanding_bat_packet(struct work_struct *work);
-void purge_outstanding_packets(struct batman_if *batman_if);
+void purge_outstanding_packets(struct bat_priv *bat_priv,
+ struct batman_if *batman_if);
#endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/drivers/staging/batman-adv/soft-interface.c b/drivers/staging/batman-adv/soft-interface.c
index 2ea97de..3904db9 100644
--- a/drivers/staging/batman-adv/soft-interface.c
+++ b/drivers/staging/batman-adv/soft-interface.c
@@ -24,19 +24,18 @@
#include "hard-interface.h"
#include "routing.h"
#include "send.h"
+#include "bat_debugfs.h"
#include "translation-table.h"
#include "types.h"
#include "hash.h"
+#include "send.h"
+#include "bat_sysfs.h"
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
+#include "unicast.h"
-static uint32_t bcast_seqno = 1; /* give own bcast messages seq numbers to avoid
- * broadcast storms */
-static int32_t skb_packets;
-static int32_t skb_bad_packets;
-unsigned char main_if_addr[ETH_ALEN];
static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
static void bat_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info);
@@ -56,23 +55,21 @@ static const struct ethtool_ops bat_ethtool_ops = {
.set_rx_csum = bat_set_rx_csum
};
-void set_main_if_addr(uint8_t *addr)
-{
- memcpy(main_if_addr, addr, ETH_ALEN);
-}
-
-int my_skb_push(struct sk_buff *skb, unsigned int len)
+int my_skb_head_push(struct sk_buff *skb, unsigned int len)
{
- int result = 0;
-
- skb_packets++;
- if (skb_headroom(skb) < len) {
- skb_bad_packets++;
- result = pskb_expand_head(skb, len, 0, GFP_ATOMIC);
-
- if (result < 0)
- return result;
- }
+ int result;
+
+ /**
+ * TODO: We must check if we can release all references to non-payload
+ * data using skb_header_release in our skbs to allow skb_cow_header to
+ * work optimally. This means that those skbs are not allowed to read
+ * or write any data which is before the current position of skb->data
+ * after that call and thus allow other skbs with the same data buffer
+ * to write freely in that area.
+ */
+ result = skb_cow_head(skb, len);
+ if (result < 0)
+ return result;
skb_push(skb, len);
return 0;
@@ -92,21 +89,23 @@ static int interface_release(struct net_device *dev)
static struct net_device_stats *interface_stats(struct net_device *dev)
{
- struct bat_priv *priv = netdev_priv(dev);
- return &priv->stats;
+ struct bat_priv *bat_priv = netdev_priv(dev);
+ return &bat_priv->stats;
}
static int interface_set_mac_addr(struct net_device *dev, void *p)
{
+ struct bat_priv *bat_priv = netdev_priv(dev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
/* only modify hna-table if it has been initialised before */
- if (atomic_read(&module_state) == MODULE_ACTIVE) {
- hna_local_remove(dev->dev_addr, "mac address changed");
- hna_local_add(addr->sa_data);
+ if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
+ hna_local_remove(bat_priv, dev->dev_addr,
+ "mac address changed");
+ hna_local_add(dev, addr->sa_data);
}
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -117,7 +116,7 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
static int interface_change_mtu(struct net_device *dev, int new_mtu)
{
/* check ranges */
- if ((new_mtu < 68) || (new_mtu > hardif_min_mtu()))
+ if ((new_mtu < 68) || (new_mtu > hardif_min_mtu(dev)))
return -EINVAL;
dev->mtu = new_mtu;
@@ -125,34 +124,27 @@ static int interface_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-int interface_tx(struct sk_buff *skb, struct net_device *dev)
+int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
{
- struct unicast_packet *unicast_packet;
- struct bcast_packet *bcast_packet;
- struct orig_node *orig_node;
- struct neigh_node *router;
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
- struct bat_priv *priv = netdev_priv(dev);
- struct batman_if *batman_if;
- struct bat_priv *bat_priv;
- uint8_t dstaddr[6];
- int data_len = skb->len;
- unsigned long flags;
+ struct bat_priv *bat_priv = netdev_priv(soft_iface);
+ struct bcast_packet *bcast_packet;
+ int data_len = skb->len, ret;
- if (atomic_read(&module_state) != MODULE_ACTIVE)
+ if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
goto dropped;
- /* FIXME: each batman_if will be attached to a softif */
- bat_priv = netdev_priv(soft_device);
+ soft_iface->trans_start = jiffies;
- dev->trans_start = jiffies;
/* TODO: check this for locks */
- hna_local_add(ethhdr->h_source);
+ hna_local_add(soft_iface, ethhdr->h_source);
/* ethernet packet should be broadcasted */
if (is_bcast(ethhdr->h_dest) || is_mcast(ethhdr->h_dest)) {
+ if (!bat_priv->primary_if)
+ goto dropped;
- if (my_skb_push(skb, sizeof(struct bcast_packet)) < 0)
+ if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0)
goto dropped;
bcast_packet = (struct bcast_packet *)skb->data;
@@ -164,14 +156,14 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
/* hw address of first interface is the orig mac because only
* this mac is known throughout the mesh */
- memcpy(bcast_packet->orig, main_if_addr, ETH_ALEN);
+ memcpy(bcast_packet->orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
/* set broadcast sequence number */
- bcast_packet->seqno = htonl(bcast_seqno);
+ bcast_packet->seqno =
+ htonl(atomic_inc_return(&bat_priv->bcast_seqno));
- /* broadcast packet. on success, increase seqno. */
- if (add_bcast_packet_to_list(skb) == NETDEV_TX_OK)
- bcast_seqno++;
+ add_bcast_packet_to_list(bat_priv, skb);
/* a copy is stored in the bcast list, therefore removing
* the original skb. */
@@ -179,64 +171,27 @@ int interface_tx(struct sk_buff *skb, struct net_device *dev)
/* unicast packet */
} else {
- spin_lock_irqsave(&orig_hash_lock, flags);
- /* get routing information */
- orig_node = ((struct orig_node *)hash_find(orig_hash,
- ethhdr->h_dest));
-
- /* check for hna host */
- if (!orig_node)
- orig_node = transtable_search(ethhdr->h_dest);
-
- router = find_router(orig_node, NULL);
-
- if (!router)
- goto unlock;
-
- /* don't lock while sending the packets ... we therefore
- * copy the required data before sending */
-
- batman_if = router->if_incoming;
- memcpy(dstaddr, router->addr, ETH_ALEN);
-
- spin_unlock_irqrestore(&orig_hash_lock, flags);
-
- if (batman_if->if_status != IF_ACTIVE)
- goto dropped;
-
- if (my_skb_push(skb, sizeof(struct unicast_packet)) < 0)
- goto dropped;
-
- unicast_packet = (struct unicast_packet *)skb->data;
-
- unicast_packet->version = COMPAT_VERSION;
- /* batman packet type: unicast */
- unicast_packet->packet_type = BAT_UNICAST;
- /* set unicast ttl */
- unicast_packet->ttl = TTL;
- /* copy the destination for faster routing */
- memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
-
- send_skb_packet(skb, batman_if, dstaddr);
+ ret = unicast_send_skb(skb, bat_priv);
+ if (ret != 0)
+ goto dropped_freed;
}
- priv->stats.tx_packets++;
- priv->stats.tx_bytes += data_len;
+ bat_priv->stats.tx_packets++;
+ bat_priv->stats.tx_bytes += data_len;
goto end;
-unlock:
- spin_unlock_irqrestore(&orig_hash_lock, flags);
dropped:
- priv->stats.tx_dropped++;
kfree_skb(skb);
+dropped_freed:
+ bat_priv->stats.tx_dropped++;
end:
return NETDEV_TX_OK;
}
-void interface_rx(struct sk_buff *skb, int hdr_size)
+void interface_rx(struct net_device *soft_iface,
+ struct sk_buff *skb, int hdr_size)
{
- struct net_device *dev = soft_device;
- struct bat_priv *priv = netdev_priv(dev);
+ struct bat_priv *priv = netdev_priv(soft_iface);
/* check if enough space is available for pulling, and pull */
if (!pskb_may_pull(skb, hdr_size)) {
@@ -246,8 +201,8 @@ void interface_rx(struct sk_buff *skb, int hdr_size)
skb_pull_rcsum(skb, hdr_size);
/* skb_set_mac_header(skb, -sizeof(struct ethhdr));*/
- skb->dev = dev;
- skb->protocol = eth_type_trans(skb, dev);
+ /* skb->dev & skb->pkt_type are set here */
+ skb->protocol = eth_type_trans(skb, soft_iface);
/* should not be neccesary anymore as we use skb_pull_rcsum()
* TODO: please verify this and remove this TODO
@@ -255,13 +210,10 @@ void interface_rx(struct sk_buff *skb, int hdr_size)
/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/
- /* TODO: set skb->pkt_type to PACKET_BROADCAST, PACKET_MULTICAST,
- * PACKET_OTHERHOST or PACKET_HOST */
-
priv->stats.rx_packets++;
- priv->stats.rx_bytes += skb->len;
+ priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr);
- dev->last_rx = jiffies;
+ soft_iface->last_rx = jiffies;
netif_rx(skb);
}
@@ -278,7 +230,7 @@ static const struct net_device_ops bat_netdev_ops = {
};
#endif
-void interface_setup(struct net_device *dev)
+static void interface_setup(struct net_device *dev)
{
struct bat_priv *priv = netdev_priv(dev);
char dev_addr[ETH_ALEN];
@@ -297,7 +249,11 @@ void interface_setup(struct net_device *dev)
#endif
dev->destructor = free_netdev;
- dev->mtu = hardif_min_mtu();
+ /**
+ * can't call min_mtu, because the needed variables
+ * have not been initialized yet
+ */
+ dev->mtu = ETH_DATA_LEN;
dev->hard_header_len = BAT_HEADER_LEN; /* reserve more space in the
* skbuff for our header */
@@ -310,6 +266,81 @@ void interface_setup(struct net_device *dev)
memset(priv, 0, sizeof(struct bat_priv));
}
+struct net_device *softif_create(char *name)
+{
+ struct net_device *soft_iface;
+ struct bat_priv *bat_priv;
+ int ret;
+
+ soft_iface = alloc_netdev(sizeof(struct bat_priv) , name,
+ interface_setup);
+
+ if (!soft_iface) {
+ pr_err("Unable to allocate the batman interface: %s\n", name);
+ goto out;
+ }
+
+ ret = register_netdev(soft_iface);
+ if (ret < 0) {
+ pr_err("Unable to register the batman interface '%s': %i\n",
+ name, ret);
+ goto free_soft_iface;
+ }
+
+ bat_priv = netdev_priv(soft_iface);
+
+ atomic_set(&bat_priv->aggregation_enabled, 1);
+ atomic_set(&bat_priv->bonding_enabled, 0);
+ atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
+ atomic_set(&bat_priv->orig_interval, 1000);
+ atomic_set(&bat_priv->log_level, 0);
+ atomic_set(&bat_priv->frag_enabled, 1);
+ atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
+ atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
+
+ atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
+ atomic_set(&bat_priv->bcast_seqno, 1);
+ atomic_set(&bat_priv->hna_local_changed, 0);
+
+ bat_priv->primary_if = NULL;
+ bat_priv->num_ifaces = 0;
+
+ ret = sysfs_add_meshif(soft_iface);
+ if (ret < 0)
+ goto unreg_soft_iface;
+
+ ret = debugfs_add_meshif(soft_iface);
+ if (ret < 0)
+ goto unreg_sysfs;
+
+ ret = mesh_init(soft_iface);
+ if (ret < 0)
+ goto unreg_debugfs;
+
+ return soft_iface;
+
+unreg_debugfs:
+ debugfs_del_meshif(soft_iface);
+unreg_sysfs:
+ sysfs_del_meshif(soft_iface);
+unreg_soft_iface:
+ unregister_netdev(soft_iface);
+ return NULL;
+
+free_soft_iface:
+ free_netdev(soft_iface);
+out:
+ return NULL;
+}
+
+void softif_destroy(struct net_device *soft_iface)
+{
+ debugfs_del_meshif(soft_iface);
+ sysfs_del_meshif(soft_iface);
+ mesh_free(soft_iface);
+ unregister_netdevice(soft_iface);
+}
+
/* ethtool */
static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
diff --git a/drivers/staging/batman-adv/soft-interface.h b/drivers/staging/batman-adv/soft-interface.h
index 6364854..843a7ec 100644
--- a/drivers/staging/batman-adv/soft-interface.h
+++ b/drivers/staging/batman-adv/soft-interface.h
@@ -22,12 +22,11 @@
#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_
#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
-void set_main_if_addr(uint8_t *addr);
-void interface_setup(struct net_device *dev);
-int interface_tx(struct sk_buff *skb, struct net_device *dev);
-void interface_rx(struct sk_buff *skb, int hdr_size);
-int my_skb_push(struct sk_buff *skb, unsigned int len);
-
-extern unsigned char main_if_addr[];
+int my_skb_head_push(struct sk_buff *skb, unsigned int len);
+int interface_tx(struct sk_buff *skb, struct net_device *soft_iface);
+void interface_rx(struct net_device *soft_iface,
+ struct sk_buff *skb, int hdr_size);
+struct net_device *softif_create(char *name);
+void softif_destroy(struct net_device *soft_iface);
#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/drivers/staging/batman-adv/sysfs-class-net-mesh b/drivers/staging/batman-adv/sysfs-class-net-mesh
index 5aa1912..b4cdb60 100644
--- a/drivers/staging/batman-adv/sysfs-class-net-mesh
+++ b/drivers/staging/batman-adv/sysfs-class-net-mesh
@@ -14,6 +14,14 @@ Description:
mesh will be sent using multiple interfaces at the
same time (if available).
+What: /sys/class/net/<mesh_iface>/mesh/fragmentation
+Date: October 2010
+Contact: Andreas Langer <an.langer@gmx.de>
+Description:
+ Indicates whether the data traffic going through the
+ mesh will be fragmented or silently discarded if the
+ packet size exceeds the outgoing interface MTU.
+
What: /sys/class/net/<mesh_iface>/mesh/orig_interval
Date: May 2010
Contact: Marek Lindner <lindner_marek@yahoo.de>
diff --git a/drivers/staging/batman-adv/translation-table.c b/drivers/staging/batman-adv/translation-table.c
index b233377..681ccbd 100644
--- a/drivers/staging/batman-adv/translation-table.c
+++ b/drivers/staging/batman-adv/translation-table.c
@@ -25,54 +25,49 @@
#include "types.h"
#include "hash.h"
-struct hashtable_t *hna_local_hash;
-static struct hashtable_t *hna_global_hash;
-atomic_t hna_local_changed;
-
-DEFINE_SPINLOCK(hna_local_hash_lock);
-static DEFINE_SPINLOCK(hna_global_hash_lock);
-
static void hna_local_purge(struct work_struct *work);
-static DECLARE_DELAYED_WORK(hna_local_purge_wq, hna_local_purge);
-static void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
+static void _hna_global_del_orig(struct bat_priv *bat_priv,
+ struct hna_global_entry *hna_global_entry,
char *message);
-static void hna_local_start_timer(void)
+static void hna_local_start_timer(struct bat_priv *bat_priv)
{
- queue_delayed_work(bat_event_workqueue, &hna_local_purge_wq, 10 * HZ);
+ INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
+ queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
}
-int hna_local_init(void)
+int hna_local_init(struct bat_priv *bat_priv)
{
- if (hna_local_hash)
+ if (bat_priv->hna_local_hash)
return 1;
- hna_local_hash = hash_new(128, compare_orig, choose_orig);
+ bat_priv->hna_local_hash = hash_new(128, compare_orig, choose_orig);
- if (!hna_local_hash)
+ if (!bat_priv->hna_local_hash)
return 0;
- atomic_set(&hna_local_changed, 0);
- hna_local_start_timer();
+ atomic_set(&bat_priv->hna_local_changed, 0);
+ hna_local_start_timer(bat_priv);
return 1;
}
-void hna_local_add(uint8_t *addr)
+void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
+ struct bat_priv *bat_priv = netdev_priv(soft_iface);
struct hna_local_entry *hna_local_entry;
struct hna_global_entry *hna_global_entry;
struct hashtable_t *swaphash;
unsigned long flags;
+ int required_bytes;
- spin_lock_irqsave(&hna_local_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
hna_local_entry =
- ((struct hna_local_entry *)hash_find(hna_local_hash, addr));
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
+ addr));
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
- if (hna_local_entry != NULL) {
+ if (hna_local_entry) {
hna_local_entry->last_seen = jiffies;
return;
}
@@ -80,8 +75,13 @@ void hna_local_add(uint8_t *addr)
/* only announce as many hosts as possible in the batman-packet and
space in batman_packet->num_hna That also should give a limit to
MAC-flooding. */
- if ((num_hna + 1 > (ETH_DATA_LEN - BAT_PACKET_LEN) / ETH_ALEN) ||
- (num_hna + 1 > 255)) {
+ required_bytes = (bat_priv->num_local_hna + 1) * ETH_ALEN;
+ required_bytes += BAT_PACKET_LEN;
+
+ if ((required_bytes > ETH_DATA_LEN) ||
+ (atomic_read(&bat_priv->aggregation_enabled) &&
+ required_bytes > MAX_AGGREGATION_BYTES) ||
+ (bat_priv->num_local_hna + 1 > 255)) {
bat_dbg(DBG_ROUTES, bat_priv,
"Can't add new local hna entry (%pM): "
"number of local hna entries exceeds packet size\n",
@@ -100,51 +100,54 @@ void hna_local_add(uint8_t *addr)
hna_local_entry->last_seen = jiffies;
/* the batman interface mac address should never be purged */
- if (compare_orig(addr, soft_device->dev_addr))
+ if (compare_orig(addr, soft_iface->dev_addr))
hna_local_entry->never_purge = 1;
else
hna_local_entry->never_purge = 0;
- spin_lock_irqsave(&hna_local_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
- hash_add(hna_local_hash, hna_local_entry);
- num_hna++;
- atomic_set(&hna_local_changed, 1);
+ hash_add(bat_priv->hna_local_hash, hna_local_entry);
+ bat_priv->num_local_hna++;
+ atomic_set(&bat_priv->hna_local_changed, 1);
- if (hna_local_hash->elements * 4 > hna_local_hash->size) {
- swaphash = hash_resize(hna_local_hash,
- hna_local_hash->size * 2);
+ if (bat_priv->hna_local_hash->elements * 4 >
+ bat_priv->hna_local_hash->size) {
+ swaphash = hash_resize(bat_priv->hna_local_hash,
+ bat_priv->hna_local_hash->size * 2);
- if (swaphash == NULL)
+ if (!swaphash)
pr_err("Couldn't resize local hna hash table\n");
else
- hna_local_hash = swaphash;
+ bat_priv->hna_local_hash = swaphash;
}
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
/* remove address from global hash if present */
- spin_lock_irqsave(&hna_global_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
- hna_global_entry =
- ((struct hna_global_entry *)hash_find(hna_global_hash, addr));
+ hna_global_entry = ((struct hna_global_entry *)
+ hash_find(bat_priv->hna_global_hash, addr));
- if (hna_global_entry != NULL)
- _hna_global_del_orig(hna_global_entry, "local hna received");
+ if (hna_global_entry)
+ _hna_global_del_orig(bat_priv, hna_global_entry,
+ "local hna received");
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
}
-int hna_local_fill_buffer(unsigned char *buff, int buff_len)
+int hna_local_fill_buffer(struct bat_priv *bat_priv,
+ unsigned char *buff, int buff_len)
{
struct hna_local_entry *hna_local_entry;
HASHIT(hashit);
int i = 0;
unsigned long flags;
- spin_lock_irqsave(&hna_local_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
- while (hash_iterate(hna_local_hash, &hashit)) {
+ while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
if (buff_len < (i + 1) * ETH_ALEN)
break;
@@ -156,11 +159,10 @@ int hna_local_fill_buffer(unsigned char *buff, int buff_len)
}
/* if we did not get all new local hnas see you next time ;-) */
- if (i == num_hna)
- atomic_set(&hna_local_changed, 0);
-
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ if (i == bat_priv->num_local_hna)
+ atomic_set(&bat_priv->hna_local_changed, 0);
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
return i;
}
@@ -185,119 +187,126 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
"announced via HNA:\n",
net_dev->name);
- spin_lock_irqsave(&hna_local_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
buf_size = 1;
/* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
- while (hash_iterate(hna_local_hash, &hashit_count))
+ while (hash_iterate(bat_priv->hna_local_hash, &hashit_count))
buf_size += 21;
buff = kmalloc(buf_size, GFP_ATOMIC);
if (!buff) {
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
return -ENOMEM;
}
buff[0] = '\0';
pos = 0;
- while (hash_iterate(hna_local_hash, &hashit)) {
+ while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
hna_local_entry = hashit.bucket->data;
pos += snprintf(buff + pos, 22, " * %pM\n",
hna_local_entry->addr);
}
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
seq_printf(seq, "%s", buff);
kfree(buff);
return 0;
}
-static void _hna_local_del(void *data)
+static void _hna_local_del(void *data, void *arg)
{
+ struct bat_priv *bat_priv = (struct bat_priv *)arg;
+
kfree(data);
- num_hna--;
- atomic_set(&hna_local_changed, 1);
+ bat_priv->num_local_hna--;
+ atomic_set(&bat_priv->hna_local_changed, 1);
}
-static void hna_local_del(struct hna_local_entry *hna_local_entry,
+static void hna_local_del(struct bat_priv *bat_priv,
+ struct hna_local_entry *hna_local_entry,
char *message)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
hna_local_entry->addr, message);
- hash_remove(hna_local_hash, hna_local_entry->addr);
- _hna_local_del(hna_local_entry);
+ hash_remove(bat_priv->hna_local_hash, hna_local_entry->addr);
+ _hna_local_del(hna_local_entry, bat_priv);
}
-void hna_local_remove(uint8_t *addr, char *message)
+void hna_local_remove(struct bat_priv *bat_priv,
+ uint8_t *addr, char *message)
{
struct hna_local_entry *hna_local_entry;
unsigned long flags;
- spin_lock_irqsave(&hna_local_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
hna_local_entry = (struct hna_local_entry *)
- hash_find(hna_local_hash, addr);
+ hash_find(bat_priv->hna_local_hash, addr);
if (hna_local_entry)
- hna_local_del(hna_local_entry, message);
+ hna_local_del(bat_priv, hna_local_entry, message);
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
}
static void hna_local_purge(struct work_struct *work)
{
+ struct delayed_work *delayed_work =
+ container_of(work, struct delayed_work, work);
+ struct bat_priv *bat_priv =
+ container_of(delayed_work, struct bat_priv, hna_work);
struct hna_local_entry *hna_local_entry;
HASHIT(hashit);
unsigned long flags;
unsigned long timeout;
- spin_lock_irqsave(&hna_local_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
- while (hash_iterate(hna_local_hash, &hashit)) {
+ while (hash_iterate(bat_priv->hna_local_hash, &hashit)) {
hna_local_entry = hashit.bucket->data;
timeout = hna_local_entry->last_seen + LOCAL_HNA_TIMEOUT * HZ;
+
if ((!hna_local_entry->never_purge) &&
time_after(jiffies, timeout))
- hna_local_del(hna_local_entry, "address timed out");
+ hna_local_del(bat_priv, hna_local_entry,
+ "address timed out");
}
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
- hna_local_start_timer();
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
+ hna_local_start_timer(bat_priv);
}
-void hna_local_free(void)
+void hna_local_free(struct bat_priv *bat_priv)
{
- if (!hna_local_hash)
+ if (!bat_priv->hna_local_hash)
return;
- cancel_delayed_work_sync(&hna_local_purge_wq);
- hash_delete(hna_local_hash, _hna_local_del);
- hna_local_hash = NULL;
+ cancel_delayed_work_sync(&bat_priv->hna_work);
+ hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv);
+ bat_priv->hna_local_hash = NULL;
}
-int hna_global_init(void)
+int hna_global_init(struct bat_priv *bat_priv)
{
- if (hna_global_hash)
+ if (bat_priv->hna_global_hash)
return 1;
- hna_global_hash = hash_new(128, compare_orig, choose_orig);
+ bat_priv->hna_global_hash = hash_new(128, compare_orig, choose_orig);
- if (!hna_global_hash)
+ if (!bat_priv->hna_global_hash)
return 0;
return 1;
}
-void hna_global_add_orig(struct orig_node *orig_node,
+void hna_global_add_orig(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
unsigned char *hna_buff, int hna_buff_len)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
struct hna_global_entry *hna_global_entry;
struct hna_local_entry *hna_local_entry;
struct hashtable_t *swaphash;
@@ -306,14 +315,15 @@ void hna_global_add_orig(struct orig_node *orig_node,
unsigned char *hna_ptr;
while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
- spin_lock_irqsave(&hna_global_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
hna_global_entry = (struct hna_global_entry *)
- hash_find(hna_global_hash, hna_ptr);
+ hash_find(bat_priv->hna_global_hash, hna_ptr);
- if (hna_global_entry == NULL) {
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ if (!hna_global_entry) {
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock,
+ flags);
hna_global_entry =
kmalloc(sizeof(struct hna_global_entry),
@@ -329,25 +339,26 @@ void hna_global_add_orig(struct orig_node *orig_node,
"%pM (via %pM)\n",
hna_global_entry->addr, orig_node->orig);
- spin_lock_irqsave(&hna_global_hash_lock, flags);
- hash_add(hna_global_hash, hna_global_entry);
+ spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
+ hash_add(bat_priv->hna_global_hash, hna_global_entry);
}
hna_global_entry->orig_node = orig_node;
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
/* remove address from local hash if present */
- spin_lock_irqsave(&hna_local_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
hna_local_entry = (struct hna_local_entry *)
- hash_find(hna_local_hash, hna_ptr);
+ hash_find(bat_priv->hna_local_hash, hna_ptr);
- if (hna_local_entry != NULL)
- hna_local_del(hna_local_entry, "global hna received");
+ if (hna_local_entry)
+ hna_local_del(bat_priv, hna_local_entry,
+ "global hna received");
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
hna_buff_count++;
}
@@ -364,19 +375,20 @@ void hna_global_add_orig(struct orig_node *orig_node,
}
}
- spin_lock_irqsave(&hna_global_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
- if (hna_global_hash->elements * 4 > hna_global_hash->size) {
- swaphash = hash_resize(hna_global_hash,
- hna_global_hash->size * 2);
+ if (bat_priv->hna_global_hash->elements * 4 >
+ bat_priv->hna_global_hash->size) {
+ swaphash = hash_resize(bat_priv->hna_global_hash,
+ bat_priv->hna_global_hash->size * 2);
- if (swaphash == NULL)
+ if (!swaphash)
pr_err("Couldn't resize global hna hash table\n");
else
- hna_global_hash = swaphash;
+ bat_priv->hna_global_hash = swaphash;
}
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
}
int hna_global_seq_print_text(struct seq_file *seq, void *offset)
@@ -399,22 +411,22 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
net_dev->name);
- spin_lock_irqsave(&hna_global_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
buf_size = 1;
/* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
- while (hash_iterate(hna_global_hash, &hashit_count))
+ while (hash_iterate(bat_priv->hna_global_hash, &hashit_count))
buf_size += 43;
buff = kmalloc(buf_size, GFP_ATOMIC);
if (!buff) {
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
return -ENOMEM;
}
buff[0] = '\0';
pos = 0;
- while (hash_iterate(hna_global_hash, &hashit)) {
+ while (hash_iterate(bat_priv->hna_global_hash, &hashit)) {
hna_global_entry = hashit.bucket->data;
pos += snprintf(buff + pos, 44,
@@ -422,28 +434,28 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
hna_global_entry->orig_node->orig);
}
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
seq_printf(seq, "%s", buff);
kfree(buff);
return 0;
}
-static void _hna_global_del_orig(struct hna_global_entry *hna_global_entry,
+static void _hna_global_del_orig(struct bat_priv *bat_priv,
+ struct hna_global_entry *hna_global_entry,
char *message)
{
- /* FIXME: each orig_node->batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
bat_dbg(DBG_ROUTES, bat_priv,
"Deleting global hna entry %pM (via %pM): %s\n",
hna_global_entry->addr, hna_global_entry->orig_node->orig,
message);
- hash_remove(hna_global_hash, hna_global_entry->addr);
+ hash_remove(bat_priv->hna_global_hash, hna_global_entry->addr);
kfree(hna_global_entry);
}
-void hna_global_del_orig(struct orig_node *orig_node, char *message)
+void hna_global_del_orig(struct bat_priv *bat_priv,
+ struct orig_node *orig_node, char *message)
{
struct hna_global_entry *hna_global_entry;
int hna_buff_count = 0;
@@ -453,52 +465,53 @@ void hna_global_del_orig(struct orig_node *orig_node, char *message)
if (orig_node->hna_buff_len == 0)
return;
- spin_lock_irqsave(&hna_global_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
hna_global_entry = (struct hna_global_entry *)
- hash_find(hna_global_hash, hna_ptr);
+ hash_find(bat_priv->hna_global_hash, hna_ptr);
- if ((hna_global_entry != NULL) &&
+ if ((hna_global_entry) &&
(hna_global_entry->orig_node == orig_node))
- _hna_global_del_orig(hna_global_entry, message);
+ _hna_global_del_orig(bat_priv, hna_global_entry,
+ message);
hna_buff_count++;
}
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
orig_node->hna_buff_len = 0;
kfree(orig_node->hna_buff);
orig_node->hna_buff = NULL;
}
-static void hna_global_del(void *data)
+static void hna_global_del(void *data, void *arg)
{
kfree(data);
}
-void hna_global_free(void)
+void hna_global_free(struct bat_priv *bat_priv)
{
- if (!hna_global_hash)
+ if (!bat_priv->hna_global_hash)
return;
- hash_delete(hna_global_hash, hna_global_del);
- hna_global_hash = NULL;
+ hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL);
+ bat_priv->hna_global_hash = NULL;
}
-struct orig_node *transtable_search(uint8_t *addr)
+struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
{
struct hna_global_entry *hna_global_entry;
unsigned long flags;
- spin_lock_irqsave(&hna_global_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->hna_ghash_lock, flags);
hna_global_entry = (struct hna_global_entry *)
- hash_find(hna_global_hash, addr);
- spin_unlock_irqrestore(&hna_global_hash_lock, flags);
+ hash_find(bat_priv->hna_global_hash, addr);
+ spin_unlock_irqrestore(&bat_priv->hna_ghash_lock, flags);
- if (hna_global_entry == NULL)
+ if (!hna_global_entry)
return NULL;
return hna_global_entry->orig_node;
diff --git a/drivers/staging/batman-adv/translation-table.h b/drivers/staging/batman-adv/translation-table.h
index fa93e37..10c4c5c 100644
--- a/drivers/staging/batman-adv/translation-table.h
+++ b/drivers/staging/batman-adv/translation-table.h
@@ -24,22 +24,22 @@
#include "types.h"
-int hna_local_init(void);
-void hna_local_add(uint8_t *addr);
-void hna_local_remove(uint8_t *addr, char *message);
-int hna_local_fill_buffer(unsigned char *buff, int buff_len);
+int hna_local_init(struct bat_priv *bat_priv);
+void hna_local_add(struct net_device *soft_iface, uint8_t *addr);
+void hna_local_remove(struct bat_priv *bat_priv,
+ uint8_t *addr, char *message);
+int hna_local_fill_buffer(struct bat_priv *bat_priv,
+ unsigned char *buff, int buff_len);
int hna_local_seq_print_text(struct seq_file *seq, void *offset);
-void hna_local_free(void);
-int hna_global_init(void);
-void hna_global_add_orig(struct orig_node *orig_node, unsigned char *hna_buff,
- int hna_buff_len);
+void hna_local_free(struct bat_priv *bat_priv);
+int hna_global_init(struct bat_priv *bat_priv);
+void hna_global_add_orig(struct bat_priv *bat_priv,
+ struct orig_node *orig_node,
+ unsigned char *hna_buff, int hna_buff_len);
int hna_global_seq_print_text(struct seq_file *seq, void *offset);
-void hna_global_del_orig(struct orig_node *orig_node, char *message);
-void hna_global_free(void);
-struct orig_node *transtable_search(uint8_t *addr);
-
-extern spinlock_t hna_local_hash_lock;
-extern struct hashtable_t *hna_local_hash;
-extern atomic_t hna_local_changed;
+void hna_global_del_orig(struct bat_priv *bat_priv,
+ struct orig_node *orig_node, char *message);
+void hna_global_free(struct bat_priv *bat_priv);
+struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr);
#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/drivers/staging/batman-adv/types.h b/drivers/staging/batman-adv/types.h
index 9aa9d36..f3f7366 100644
--- a/drivers/staging/batman-adv/types.h
+++ b/drivers/staging/batman-adv/types.h
@@ -36,31 +36,31 @@
struct batman_if {
struct list_head list;
int16_t if_num;
- char *dev;
char if_status;
- char addr_str[ETH_STR_LEN];
struct net_device *net_dev;
atomic_t seqno;
+ atomic_t frag_seqno;
unsigned char *packet_buff;
int packet_len;
struct kobject *hardif_obj;
- struct rcu_head rcu;
-
+ atomic_t refcnt;
+ struct packet_type batman_adv_ptype;
+ struct net_device *soft_iface;
};
/**
- * orig_node - structure for orig_list maintaining nodes of mesh
- * @primary_addr: hosts primary interface address
- * @last_valid: when last packet from this node was received
- * @bcast_seqno_reset: time when the broadcast seqno window was reset
- * @batman_seqno_reset: time when the batman seqno window was reset
- * @flags: for now only VIS_SERVER flag
- * @last_real_seqno: last and best known squence number
- * @last_ttl: ttl of last received packet
- * @last_bcast_seqno: last broadcast sequence number received by this host
- *
- * @candidates: how many candidates are available
- * @selected: next bonding candidate
+ * orig_node - structure for orig_list maintaining nodes of mesh
+ * @primary_addr: hosts primary interface address
+ * @last_valid: when last packet from this node was received
+ * @bcast_seqno_reset: time when the broadcast seqno window was reset
+ * @batman_seqno_reset: time when the batman seqno window was reset
+ * @flags: for now only VIS_SERVER flag
+ * @last_real_seqno: last and best known squence number
+ * @last_ttl: ttl of last received packet
+ * @last_bcast_seqno: last broadcast sequence number received by this host
+ *
+ * @candidates: how many candidates are available
+ * @selected: next bonding candidate
*/
struct orig_node {
uint8_t orig[ETH_ALEN];
@@ -81,6 +81,8 @@ struct orig_node {
TYPE_OF_WORD bcast_bits[NUM_WORDS];
uint32_t last_bcast_seqno;
struct list_head neigh_list;
+ struct list_head frag_list;
+ unsigned long last_frag_packet;
struct {
uint8_t candidates;
struct neigh_node *selected;
@@ -88,8 +90,8 @@ struct orig_node {
};
/**
- * neigh_node
- * @last_valid: when last packet via this neighbor was received
+ * neigh_node
+ * @last_valid: when last packet via this neighbor was received
*/
struct neigh_node {
struct list_head list;
@@ -106,25 +108,51 @@ struct neigh_node {
struct batman_if *if_incoming;
};
+
struct bat_priv {
+ atomic_t mesh_state;
struct net_device_stats stats;
atomic_t aggregation_enabled;
atomic_t bonding_enabled;
+ atomic_t frag_enabled;
atomic_t vis_mode;
atomic_t orig_interval;
atomic_t log_level;
+ atomic_t bcast_seqno;
+ atomic_t bcast_queue_left;
+ atomic_t batman_queue_left;
char num_ifaces;
struct debug_log *debug_log;
struct batman_if *primary_if;
struct kobject *mesh_obj;
struct dentry *debug_dir;
+ struct hlist_head forw_bat_list;
+ struct hlist_head forw_bcast_list;
+ struct list_head vis_send_list;
+ struct hashtable_t *orig_hash;
+ struct hashtable_t *hna_local_hash;
+ struct hashtable_t *hna_global_hash;
+ struct hashtable_t *vis_hash;
+ spinlock_t orig_hash_lock; /* protects orig_hash */
+ spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
+ spinlock_t forw_bcast_list_lock; /* protects */
+ spinlock_t hna_lhash_lock; /* protects hna_local_hash */
+ spinlock_t hna_ghash_lock; /* protects hna_global_hash */
+ spinlock_t vis_hash_lock; /* protects vis_hash */
+ spinlock_t vis_list_lock; /* protects vis_info::recv_list */
+ int16_t num_local_hna;
+ atomic_t hna_local_changed;
+ struct delayed_work hna_work;
+ struct delayed_work orig_work;
+ struct delayed_work vis_work;
+ struct vis_info *my_vis_info;
};
struct socket_client {
struct list_head queue_list;
unsigned int queue_len;
unsigned char index;
- spinlock_t lock;
+ spinlock_t lock; /* protects queue_list, queue_len, index */
wait_queue_head_t queue_wait;
struct bat_priv *bat_priv;
};
@@ -147,15 +175,14 @@ struct hna_global_entry {
};
/**
- * forw_packet - structure for forw_list maintaining packets to be
- * send/forwarded
+ * forw_packet - structure for forw_list maintaining packets to be
+ * send/forwarded
*/
struct forw_packet {
struct hlist_node list;
unsigned long send_time;
uint8_t own;
struct sk_buff *skb;
- unsigned char *packet_buff;
uint16_t packet_len;
uint32_t direct_link_flags;
uint8_t num_packets;
@@ -177,8 +204,38 @@ struct debug_log {
char log_buff[LOG_BUF_LEN];
unsigned long log_start;
unsigned long log_end;
- spinlock_t lock;
+ spinlock_t lock; /* protects log_buff, log_start and log_end */
wait_queue_head_t queue_wait;
};
+struct frag_packet_list_entry {
+ struct list_head list;
+ uint16_t seqno;
+ struct sk_buff *skb;
+};
+
+struct vis_info {
+ unsigned long first_seen;
+ struct list_head recv_list;
+ /* list of server-neighbors we received a vis-packet
+ * from. we should not reply to them. */
+ struct list_head send_list;
+ struct kref refcount;
+ struct bat_priv *bat_priv;
+ /* this packet might be part of the vis send queue. */
+ struct sk_buff *skb_packet;
+ /* vis_info may follow here*/
+} __attribute__((packed));
+
+struct vis_info_entry {
+ uint8_t src[ETH_ALEN];
+ uint8_t dest[ETH_ALEN];
+ uint8_t quality; /* quality = 0 means HNA */
+} __attribute__((packed));
+
+struct recvlist_node {
+ struct list_head list;
+ uint8_t mac[ETH_ALEN];
+};
+
#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/drivers/staging/batman-adv/unicast.c b/drivers/staging/batman-adv/unicast.c
new file mode 100644
index 0000000..0dac50d
--- /dev/null
+++ b/drivers/staging/batman-adv/unicast.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ *
+ * Andreas Langer
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#include "main.h"
+#include "unicast.h"
+#include "send.h"
+#include "soft-interface.h"
+#include "hash.h"
+#include "translation-table.h"
+#include "routing.h"
+#include "hard-interface.h"
+
+
+struct sk_buff *merge_frag_packet(struct list_head *head,
+ struct frag_packet_list_entry *tfp,
+ struct sk_buff *skb)
+{
+ struct unicast_frag_packet *up =
+ (struct unicast_frag_packet *)skb->data;
+ struct sk_buff *tmp_skb;
+
+ /* set skb to the first part and tmp_skb to the second part */
+ if (up->flags & UNI_FRAG_HEAD) {
+ tmp_skb = tfp->skb;
+ } else {
+ tmp_skb = skb;
+ skb = tfp->skb;
+ }
+
+ skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
+ if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) {
+ /* free buffered skb, skb will be freed later */
+ kfree_skb(tfp->skb);
+ return NULL;
+ }
+
+ /* move free entry to end */
+ tfp->skb = NULL;
+ tfp->seqno = 0;
+ list_move_tail(&tfp->list, head);
+
+ memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len);
+ kfree_skb(tmp_skb);
+ return skb;
+}
+
+void create_frag_entry(struct list_head *head, struct sk_buff *skb)
+{
+ struct frag_packet_list_entry *tfp;
+ struct unicast_frag_packet *up =
+ (struct unicast_frag_packet *)skb->data;
+
+ /* free and oldest packets stand at the end */
+ tfp = list_entry((head)->prev, typeof(*tfp), list);
+ kfree_skb(tfp->skb);
+
+ tfp->seqno = ntohs(up->seqno);
+ tfp->skb = skb;
+ list_move(&tfp->list, head);
+ return;
+}
+
+int create_frag_buffer(struct list_head *head)
+{
+ int i;
+ struct frag_packet_list_entry *tfp;
+
+ for (i = 0; i < FRAG_BUFFER_SIZE; i++) {
+ tfp = kmalloc(sizeof(struct frag_packet_list_entry),
+ GFP_ATOMIC);
+ if (!tfp) {
+ frag_list_free(head);
+ return -ENOMEM;
+ }
+ tfp->skb = NULL;
+ tfp->seqno = 0;
+ INIT_LIST_HEAD(&tfp->list);
+ list_add(&tfp->list, head);
+ }
+
+ return 0;
+}
+
+struct frag_packet_list_entry *search_frag_packet(struct list_head *head,
+ struct unicast_frag_packet *up)
+{
+ struct frag_packet_list_entry *tfp;
+ struct unicast_frag_packet *tmp_up = NULL;
+ uint16_t search_seqno;
+
+ if (up->flags & UNI_FRAG_HEAD)
+ search_seqno = ntohs(up->seqno)+1;
+ else
+ search_seqno = ntohs(up->seqno)-1;
+
+ list_for_each_entry(tfp, head, list) {
+
+ if (!tfp->skb)
+ continue;
+
+ if (tfp->seqno == ntohs(up->seqno))
+ goto mov_tail;
+
+ tmp_up = (struct unicast_frag_packet *)tfp->skb->data;
+
+ if (tfp->seqno == search_seqno) {
+
+ if ((tmp_up->flags & UNI_FRAG_HEAD) !=
+ (up->flags & UNI_FRAG_HEAD))
+ return tfp;
+ else
+ goto mov_tail;
+ }
+ }
+ return NULL;
+
+mov_tail:
+ list_move_tail(&tfp->list, head);
+ return NULL;
+}
+
+void frag_list_free(struct list_head *head)
+{
+ struct frag_packet_list_entry *pf, *tmp_pf;
+
+ if (!list_empty(head)) {
+
+ list_for_each_entry_safe(pf, tmp_pf, head, list) {
+ kfree_skb(pf->skb);
+ list_del(&pf->list);
+ kfree(pf);
+ }
+ }
+ return;
+}
+
+static int unicast_send_frag_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
+ struct batman_if *batman_if, uint8_t dstaddr[],
+ struct orig_node *orig_node)
+{
+ struct unicast_frag_packet *ucast_frag1, *ucast_frag2;
+ int hdr_len = sizeof(struct unicast_frag_packet);
+ struct sk_buff *frag_skb;
+ int data_len = skb->len;
+
+ if (!bat_priv->primary_if)
+ goto dropped;
+
+ frag_skb = dev_alloc_skb(data_len - (data_len / 2) + hdr_len);
+ skb_split(skb, frag_skb, data_len / 2);
+
+ if (my_skb_head_push(frag_skb, hdr_len) < 0 ||
+ my_skb_head_push(skb, hdr_len) < 0)
+ goto drop_frag;
+
+ ucast_frag1 = (struct unicast_frag_packet *)skb->data;
+ ucast_frag2 = (struct unicast_frag_packet *)frag_skb->data;
+
+ ucast_frag1->version = COMPAT_VERSION;
+ ucast_frag1->packet_type = BAT_UNICAST_FRAG;
+ ucast_frag1->ttl = TTL;
+ memcpy(ucast_frag1->orig,
+ bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
+ memcpy(ucast_frag1->dest, orig_node->orig, ETH_ALEN);
+
+ memcpy(ucast_frag2, ucast_frag1, sizeof(struct unicast_frag_packet));
+
+ ucast_frag1->flags |= UNI_FRAG_HEAD;
+ ucast_frag2->flags &= ~UNI_FRAG_HEAD;
+
+ ucast_frag1->seqno = htons((uint16_t)atomic_inc_return(
+ &batman_if->frag_seqno));
+
+ ucast_frag2->seqno = htons((uint16_t)atomic_inc_return(
+ &batman_if->frag_seqno));
+
+ send_skb_packet(skb, batman_if, dstaddr);
+ send_skb_packet(frag_skb, batman_if, dstaddr);
+ return 0;
+
+drop_frag:
+ kfree_skb(frag_skb);
+dropped:
+ kfree_skb(skb);
+ return 1;
+}
+
+int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
+{
+ struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+ struct unicast_packet *unicast_packet;
+ struct orig_node *orig_node;
+ struct batman_if *batman_if;
+ struct neigh_node *router;
+ int data_len = skb->len;
+ uint8_t dstaddr[6];
+ unsigned long flags;
+
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+
+ /* get routing information */
+ orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
+ ethhdr->h_dest));
+
+ /* check for hna host */
+ if (!orig_node)
+ orig_node = transtable_search(bat_priv, ethhdr->h_dest);
+
+ router = find_router(orig_node, NULL);
+
+ if (!router)
+ goto unlock;
+
+ /* don't lock while sending the packets ... we therefore
+ * copy the required data before sending */
+
+ batman_if = router->if_incoming;
+ memcpy(dstaddr, router->addr, ETH_ALEN);
+
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+
+ if (batman_if->if_status != IF_ACTIVE)
+ goto dropped;
+
+ if (atomic_read(&bat_priv->frag_enabled) &&
+ data_len + sizeof(struct unicast_packet) > batman_if->net_dev->mtu)
+ return unicast_send_frag_skb(skb, bat_priv, batman_if,
+ dstaddr, orig_node);
+
+ if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
+ goto dropped;
+
+ unicast_packet = (struct unicast_packet *)skb->data;
+
+ unicast_packet->version = COMPAT_VERSION;
+ /* batman packet type: unicast */
+ unicast_packet->packet_type = BAT_UNICAST;
+ /* set unicast ttl */
+ unicast_packet->ttl = TTL;
+ /* copy the destination for faster routing */
+ memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
+
+ send_skb_packet(skb, batman_if, dstaddr);
+ return 0;
+
+unlock:
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+dropped:
+ kfree_skb(skb);
+ return 1;
+}
diff --git a/drivers/staging/batman-adv/unicast.h b/drivers/staging/batman-adv/unicast.h
new file mode 100644
index 0000000..7973697
--- /dev/null
+++ b/drivers/staging/batman-adv/unicast.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ *
+ * Andreas Langer
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ *
+ */
+
+#ifndef _NET_BATMAN_ADV_UNICAST_H_
+#define _NET_BATMAN_ADV_UNICAST_H_
+
+#define FRAG_TIMEOUT 10000 /* purge frag list entrys after time in ms */
+#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
+
+struct sk_buff *merge_frag_packet(struct list_head *head,
+ struct frag_packet_list_entry *tfp,
+ struct sk_buff *skb);
+
+void create_frag_entry(struct list_head *head, struct sk_buff *skb);
+int create_frag_buffer(struct list_head *head);
+struct frag_packet_list_entry *search_frag_packet(struct list_head *head,
+ struct unicast_frag_packet *up);
+void frag_list_free(struct list_head *head);
+int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
+
+#endif /* _NET_BATMAN_ADV_UNICAST_H_ */
diff --git a/drivers/staging/batman-adv/vis.c b/drivers/staging/batman-adv/vis.c
index 4b6a5045..3d2c1bc 100644
--- a/drivers/staging/batman-adv/vis.c
+++ b/drivers/staging/batman-adv/vis.c
@@ -27,54 +27,55 @@
#include "hard-interface.h"
#include "hash.h"
+#define MAX_VIS_PACKET_SIZE 1000
+
/* Returns the smallest signed integer in two's complement with the sizeof x */
#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
/* Checks if a sequence number x is a predecessor/successor of y.
- they handle overflows/underflows and can correctly check for a
- predecessor/successor unless the variable sequence number has grown by
- more then 2**(bitwidth(x)-1)-1.
- This means that for a uint8_t with the maximum value 255, it would think:
- * when adding nothing - it is neither a predecessor nor a successor
- * before adding more than 127 to the starting value - it is a predecessor,
- * when adding 128 - it is neither a predecessor nor a successor,
- * after adding more than 127 to the starting value - it is a successor */
+ * they handle overflows/underflows and can correctly check for a
+ * predecessor/successor unless the variable sequence number has grown by
+ * more then 2**(bitwidth(x)-1)-1.
+ * This means that for a uint8_t with the maximum value 255, it would think:
+ * - when adding nothing - it is neither a predecessor nor a successor
+ * - before adding more than 127 to the starting value - it is a predecessor,
+ * - when adding 128 - it is neither a predecessor nor a successor,
+ * - after adding more than 127 to the starting value - it is a successor */
#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
_dummy > smallest_signed_int(_dummy); })
#define seq_after(x, y) seq_before(y, x)
-static struct hashtable_t *vis_hash;
-static DEFINE_SPINLOCK(vis_hash_lock);
-static DEFINE_SPINLOCK(recv_list_lock);
-static struct vis_info *my_vis_info;
-static struct list_head send_list; /* always locked with vis_hash_lock */
-
-static void start_vis_timer(void);
+static void start_vis_timer(struct bat_priv *bat_priv);
/* free the info */
static void free_info(struct kref *ref)
{
struct vis_info *info = container_of(ref, struct vis_info, refcount);
+ struct bat_priv *bat_priv = info->bat_priv;
struct recvlist_node *entry, *tmp;
unsigned long flags;
list_del_init(&info->send_list);
- spin_lock_irqsave(&recv_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
list_del(&entry->list);
kfree(entry);
}
- spin_unlock_irqrestore(&recv_list_lock, flags);
- kfree(info);
+
+ spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
+ kfree_skb(info->skb_packet);
}
/* Compare two vis packets, used by the hashing algorithm */
static int vis_info_cmp(void *data1, void *data2)
{
struct vis_info *d1, *d2;
+ struct vis_packet *p1, *p2;
d1 = data1;
d2 = data2;
- return compare_orig(d1->packet.vis_orig, d2->packet.vis_orig);
+ p1 = (struct vis_packet *)d1->skb_packet->data;
+ p2 = (struct vis_packet *)d2->skb_packet->data;
+ return compare_orig(p1->vis_orig, p2->vis_orig);
}
/* hash function to choose an entry in a hash table of given size */
@@ -82,11 +83,13 @@ static int vis_info_cmp(void *data1, void *data2)
static int vis_info_choose(void *data, int size)
{
struct vis_info *vis_info = data;
+ struct vis_packet *packet;
unsigned char *key;
uint32_t hash = 0;
size_t i;
- key = vis_info->packet.vis_orig;
+ packet = (struct vis_packet *)vis_info->skb_packet->data;
+ key = packet->vis_orig;
for (i = 0; i < ETH_ALEN; i++) {
hash += key[i];
hash += (hash << 10);
@@ -127,15 +130,13 @@ static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list)
{
struct if_list_entry *entry;
struct hlist_node *pos;
- char tmp_addr_str[ETH_STR_LEN];
size_t len = 0;
hlist_for_each_entry(entry, pos, if_list, list) {
if (entry->primary)
len += sprintf(buff + len, "PRIMARY, ");
else {
- addr_to_string(tmp_addr_str, entry->addr);
- len += sprintf(buff + len, "SEC %s, ", tmp_addr_str);
+ len += sprintf(buff + len, "SEC %pM, ", entry->addr);
}
}
@@ -162,14 +163,12 @@ static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
uint8_t *src, bool primary)
{
- char to[18];
-
/* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
- addr_to_string(to, entry->dest);
if (primary && entry->quality == 0)
- return sprintf(buff, "HNA %s, ", to);
+ return sprintf(buff, "HNA %pM, ", entry->dest);
else if (compare_orig(entry->src, src))
- return sprintf(buff, "TQ %s %d, ", to, entry->quality);
+ return sprintf(buff, "TQ %pM %d, ", entry->dest,
+ entry->quality);
return 0;
}
@@ -179,6 +178,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
HASHIT(hashit);
HASHIT(hashit_count);
struct vis_info *info;
+ struct vis_packet *packet;
struct vis_info_entry *entries;
struct net_device *net_dev = (struct net_device *)seq->private;
struct bat_priv *bat_priv = netdev_priv(net_dev);
@@ -186,7 +186,6 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
struct if_list_entry *entry;
struct hlist_node *pos, *n;
int i;
- char tmp_addr_str[ETH_STR_LEN];
unsigned long flags;
int vis_server = atomic_read(&bat_priv->vis_mode);
size_t buff_pos, buf_size;
@@ -198,25 +197,25 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
buf_size = 1;
/* Estimate length */
- spin_lock_irqsave(&vis_hash_lock, flags);
- while (hash_iterate(vis_hash, &hashit_count)) {
+ spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ while (hash_iterate(bat_priv->vis_hash, &hashit_count)) {
info = hashit_count.bucket->data;
+ packet = (struct vis_packet *)info->skb_packet->data;
entries = (struct vis_info_entry *)
- ((char *)info + sizeof(struct vis_info));
+ ((char *)packet + sizeof(struct vis_packet));
- for (i = 0; i < info->packet.entries; i++) {
+ for (i = 0; i < packet->entries; i++) {
if (entries[i].quality == 0)
continue;
vis_data_insert_interface(entries[i].src, &vis_if_list,
- compare_orig(entries[i].src,
- info->packet.vis_orig));
+ compare_orig(entries[i].src, packet->vis_orig));
}
hlist_for_each_entry(entry, pos, &vis_if_list, list) {
- buf_size += 18 + 26 * info->packet.entries;
+ buf_size += 18 + 26 * packet->entries;
/* add primary/secondary records */
- if (compare_orig(entry->addr, info->packet.vis_orig))
+ if (compare_orig(entry->addr, packet->vis_orig))
buf_size +=
vis_data_count_prim_sec(&vis_if_list);
@@ -231,38 +230,37 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
buff = kmalloc(buf_size, GFP_ATOMIC);
if (!buff) {
- spin_unlock_irqrestore(&vis_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
return -ENOMEM;
}
buff[0] = '\0';
buff_pos = 0;
- while (hash_iterate(vis_hash, &hashit)) {
+ while (hash_iterate(bat_priv->vis_hash, &hashit)) {
info = hashit.bucket->data;
+ packet = (struct vis_packet *)info->skb_packet->data;
entries = (struct vis_info_entry *)
- ((char *)info + sizeof(struct vis_info));
+ ((char *)packet + sizeof(struct vis_packet));
- for (i = 0; i < info->packet.entries; i++) {
+ for (i = 0; i < packet->entries; i++) {
if (entries[i].quality == 0)
continue;
vis_data_insert_interface(entries[i].src, &vis_if_list,
- compare_orig(entries[i].src,
- info->packet.vis_orig));
+ compare_orig(entries[i].src, packet->vis_orig));
}
hlist_for_each_entry(entry, pos, &vis_if_list, list) {
- addr_to_string(tmp_addr_str, entry->addr);
- buff_pos += sprintf(buff + buff_pos, "%s,",
- tmp_addr_str);
+ buff_pos += sprintf(buff + buff_pos, "%pM,",
+ entry->addr);
- for (i = 0; i < info->packet.entries; i++)
+ for (i = 0; i < packet->entries; i++)
buff_pos += vis_data_read_entry(buff + buff_pos,
&entries[i],
entry->addr,
entry->primary);
/* add primary/secondary records */
- if (compare_orig(entry->addr, info->packet.vis_orig))
+ if (compare_orig(entry->addr, packet->vis_orig))
buff_pos +=
vis_data_read_prim_sec(buff + buff_pos,
&vis_if_list);
@@ -276,7 +274,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
}
}
- spin_unlock_irqrestore(&vis_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
seq_printf(seq, "%s", buff);
kfree(buff);
@@ -286,11 +284,11 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
/* add the info packet to the send list, if it was not
* already linked in. */
-static void send_list_add(struct vis_info *info)
+static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
{
if (list_empty(&info->send_list)) {
kref_get(&info->refcount);
- list_add_tail(&info->send_list, &send_list);
+ list_add_tail(&info->send_list, &bat_priv->vis_send_list);
}
}
@@ -305,7 +303,8 @@ static void send_list_del(struct vis_info *info)
}
/* tries to add one entry to the receive list. */
-static void recv_list_add(struct list_head *recv_list, char *mac)
+static void recv_list_add(struct bat_priv *bat_priv,
+ struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
unsigned long flags;
@@ -315,52 +314,65 @@ static void recv_list_add(struct list_head *recv_list, char *mac)
return;
memcpy(entry->mac, mac, ETH_ALEN);
- spin_lock_irqsave(&recv_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
list_add_tail(&entry->list, recv_list);
- spin_unlock_irqrestore(&recv_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
}
/* returns 1 if this mac is in the recv_list */
-static int recv_list_is_in(struct list_head *recv_list, char *mac)
+static int recv_list_is_in(struct bat_priv *bat_priv,
+ struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
unsigned long flags;
- spin_lock_irqsave(&recv_list_lock, flags);
+ spin_lock_irqsave(&bat_priv->vis_list_lock, flags);
list_for_each_entry(entry, recv_list, list) {
if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
- spin_unlock_irqrestore(&recv_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_list_lock,
+ flags);
return 1;
}
}
- spin_unlock_irqrestore(&recv_list_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_list_lock, flags);
return 0;
}
/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
* broken.. ). vis hash must be locked outside. is_new is set when the packet
* is newer than old entries in the hash. */
-static struct vis_info *add_packet(struct vis_packet *vis_packet,
+static struct vis_info *add_packet(struct bat_priv *bat_priv,
+ struct vis_packet *vis_packet,
int vis_info_len, int *is_new,
int make_broadcast)
{
struct vis_info *info, *old_info;
+ struct vis_packet *search_packet, *old_packet;
struct vis_info search_elem;
+ struct vis_packet *packet;
*is_new = 0;
/* sanity check */
- if (vis_hash == NULL)
+ if (!bat_priv->vis_hash)
return NULL;
/* see if the packet is already in vis_hash */
- memcpy(search_elem.packet.vis_orig, vis_packet->vis_orig, ETH_ALEN);
- old_info = hash_find(vis_hash, &search_elem);
+ search_elem.skb_packet = dev_alloc_skb(sizeof(struct vis_packet));
+ if (!search_elem.skb_packet)
+ return NULL;
+ search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
+ sizeof(struct vis_packet));
+
+ memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
+ old_info = hash_find(bat_priv->vis_hash, &search_elem);
+ kfree_skb(search_elem.skb_packet);
if (old_info != NULL) {
+ old_packet = (struct vis_packet *)old_info->skb_packet->data;
if (!seq_after(ntohl(vis_packet->seqno),
- ntohl(old_info->packet.seqno))) {
- if (old_info->packet.seqno == vis_packet->seqno) {
- recv_list_add(&old_info->recv_list,
+ ntohl(old_packet->seqno))) {
+ if (old_packet->seqno == vis_packet->seqno) {
+ recv_list_add(bat_priv, &old_info->recv_list,
vis_packet->sender_orig);
return old_info;
} else {
@@ -369,38 +381,48 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet,
}
}
/* remove old entry */
- hash_remove(vis_hash, old_info);
+ hash_remove(bat_priv->vis_hash, old_info);
send_list_del(old_info);
kref_put(&old_info->refcount, free_info);
}
- info = kmalloc(sizeof(struct vis_info) + vis_info_len, GFP_ATOMIC);
- if (info == NULL)
+ info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC);
+ if (!info)
return NULL;
+ info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) +
+ vis_info_len + sizeof(struct ethhdr));
+ if (!info->skb_packet) {
+ kfree(info);
+ return NULL;
+ }
+ skb_reserve(info->skb_packet, sizeof(struct ethhdr));
+ packet = (struct vis_packet *)skb_put(info->skb_packet,
+ sizeof(struct vis_packet) +
+ vis_info_len);
+
kref_init(&info->refcount);
INIT_LIST_HEAD(&info->send_list);
INIT_LIST_HEAD(&info->recv_list);
info->first_seen = jiffies;
- memcpy(&info->packet, vis_packet,
- sizeof(struct vis_packet) + vis_info_len);
+ info->bat_priv = bat_priv;
+ memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len);
/* initialize and add new packet. */
*is_new = 1;
/* Make it a broadcast packet, if required */
if (make_broadcast)
- memcpy(info->packet.target_orig, broadcast_addr, ETH_ALEN);
+ memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
/* repair if entries is longer than packet. */
- if (info->packet.entries * sizeof(struct vis_info_entry) > vis_info_len)
- info->packet.entries = vis_info_len /
- sizeof(struct vis_info_entry);
+ if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len)
+ packet->entries = vis_info_len / sizeof(struct vis_info_entry);
- recv_list_add(&info->recv_list, info->packet.sender_orig);
+ recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
/* try to add it */
- if (hash_add(vis_hash, info) < 0) {
+ if (hash_add(bat_priv->vis_hash, info) < 0) {
/* did not work (for some reason) */
kref_put(&old_info->refcount, free_info);
info = NULL;
@@ -421,17 +443,18 @@ void receive_server_sync_packet(struct bat_priv *bat_priv,
make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
- spin_lock_irqsave(&vis_hash_lock, flags);
- info = add_packet(vis_packet, vis_info_len, &is_new, make_broadcast);
- if (info == NULL)
+ spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ info = add_packet(bat_priv, vis_packet, vis_info_len,
+ &is_new, make_broadcast);
+ if (!info)
goto end;
/* only if we are server ourselves and packet is newer than the one in
* hash.*/
if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
- send_list_add(info);
+ send_list_add(bat_priv, info);
end:
- spin_unlock_irqrestore(&vis_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
}
/* handle an incoming client update packet and schedule forward if needed. */
@@ -440,6 +463,7 @@ void receive_client_update_packet(struct bat_priv *bat_priv,
int vis_info_len)
{
struct vis_info *info;
+ struct vis_packet *packet;
int is_new;
unsigned long flags;
int vis_server = atomic_read(&bat_priv->vis_mode);
@@ -454,45 +478,51 @@ void receive_client_update_packet(struct bat_priv *bat_priv,
is_my_mac(vis_packet->target_orig))
are_target = 1;
- spin_lock_irqsave(&vis_hash_lock, flags);
- info = add_packet(vis_packet, vis_info_len, &is_new, are_target);
- if (info == NULL)
+ spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ info = add_packet(bat_priv, vis_packet, vis_info_len,
+ &is_new, are_target);
+
+ if (!info)
goto end;
/* note that outdated packets will be dropped at this point. */
+ packet = (struct vis_packet *)info->skb_packet->data;
/* send only if we're the target server or ... */
if (are_target && is_new) {
- info->packet.vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
- send_list_add(info);
+ packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
+ send_list_add(bat_priv, info);
/* ... we're not the recipient (and thus need to forward). */
- } else if (!is_my_mac(info->packet.target_orig)) {
- send_list_add(info);
+ } else if (!is_my_mac(packet->target_orig)) {
+ send_list_add(bat_priv, info);
}
+
end:
- spin_unlock_irqrestore(&vis_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
}
/* Walk the originators and find the VIS server with the best tq. Set the packet
* address to its address and return the best_tq.
*
* Must be called with the originator hash locked */
-static int find_best_vis_server(struct vis_info *info)
+static int find_best_vis_server(struct bat_priv *bat_priv,
+ struct vis_info *info)
{
HASHIT(hashit);
struct orig_node *orig_node;
+ struct vis_packet *packet;
int best_tq = -1;
- while (hash_iterate(orig_hash, &hashit)) {
+ packet = (struct vis_packet *)info->skb_packet->data;
+
+ while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
- if ((orig_node != NULL) &&
- (orig_node->router != NULL) &&
+ if ((orig_node) && (orig_node->router) &&
(orig_node->flags & VIS_SERVER) &&
(orig_node->router->tq_avg > best_tq)) {
best_tq = orig_node->router->tq_avg;
- memcpy(info->packet.target_orig, orig_node->orig,
- ETH_ALEN);
+ memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
}
}
return best_tq;
@@ -501,8 +531,11 @@ static int find_best_vis_server(struct vis_info *info)
/* Return true if the vis packet is full. */
static bool vis_packet_full(struct vis_info *info)
{
- if (info->packet.entries + 1 >
- (1000 - sizeof(struct vis_info)) / sizeof(struct vis_info_entry))
+ struct vis_packet *packet;
+ packet = (struct vis_packet *)info->skb_packet->data;
+
+ if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
+ < packet->entries + 1)
return true;
return false;
}
@@ -514,109 +547,128 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
HASHIT(hashit_local);
HASHIT(hashit_global);
struct orig_node *orig_node;
- struct vis_info *info = (struct vis_info *)my_vis_info;
- struct vis_info_entry *entry, *entry_array;
+ struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
+ struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
+ struct vis_info_entry *entry;
struct hna_local_entry *hna_local_entry;
int best_tq = -1;
unsigned long flags;
info->first_seen = jiffies;
- info->packet.vis_type = atomic_read(&bat_priv->vis_mode);
+ packet->vis_type = atomic_read(&bat_priv->vis_mode);
- spin_lock_irqsave(&orig_hash_lock, flags);
- memcpy(info->packet.target_orig, broadcast_addr, ETH_ALEN);
- info->packet.ttl = TTL;
- info->packet.seqno = htonl(ntohl(info->packet.seqno) + 1);
- info->packet.entries = 0;
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
+ packet->ttl = TTL;
+ packet->seqno = htonl(ntohl(packet->seqno) + 1);
+ packet->entries = 0;
+ skb_trim(info->skb_packet, sizeof(struct vis_packet));
+
+ if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
+ best_tq = find_best_vis_server(bat_priv, info);
- if (info->packet.vis_type == VIS_TYPE_CLIENT_UPDATE) {
- best_tq = find_best_vis_server(info);
if (best_tq < 0) {
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock,
+ flags);
return -1;
}
}
- entry_array = (struct vis_info_entry *)
- ((char *)info + sizeof(struct vis_info));
-
- while (hash_iterate(orig_hash, &hashit_global)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit_global)) {
orig_node = hashit_global.bucket->data;
- if (orig_node->router != NULL
- && compare_orig(orig_node->router->addr,
- orig_node->orig)
- && (orig_node->router->if_incoming->if_status ==
- IF_ACTIVE)
- && orig_node->router->tq_avg > 0) {
-
- /* fill one entry into buffer. */
- entry = &entry_array[info->packet.entries];
- memcpy(entry->src,
- orig_node->router->if_incoming->net_dev->dev_addr,
- ETH_ALEN);
- memcpy(entry->dest, orig_node->orig, ETH_ALEN);
- entry->quality = orig_node->router->tq_avg;
- info->packet.entries++;
-
- if (vis_packet_full(info)) {
- spin_unlock_irqrestore(&orig_hash_lock, flags);
- return 0;
- }
+
+ if (!orig_node->router)
+ continue;
+
+ if (!compare_orig(orig_node->router->addr, orig_node->orig))
+ continue;
+
+ if (orig_node->router->if_incoming->if_status != IF_ACTIVE)
+ continue;
+
+ if (orig_node->router->tq_avg < 1)
+ continue;
+
+ /* fill one entry into buffer. */
+ entry = (struct vis_info_entry *)
+ skb_put(info->skb_packet, sizeof(*entry));
+ memcpy(entry->src,
+ orig_node->router->if_incoming->net_dev->dev_addr,
+ ETH_ALEN);
+ memcpy(entry->dest, orig_node->orig, ETH_ALEN);
+ entry->quality = orig_node->router->tq_avg;
+ packet->entries++;
+
+ if (vis_packet_full(info)) {
+ spin_unlock_irqrestore(
+ &bat_priv->orig_hash_lock, flags);
+ return 0;
}
}
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- spin_lock_irqsave(&hna_local_hash_lock, flags);
- while (hash_iterate(hna_local_hash, &hashit_local)) {
+ spin_lock_irqsave(&bat_priv->hna_lhash_lock, flags);
+ while (hash_iterate(bat_priv->hna_local_hash, &hashit_local)) {
hna_local_entry = hashit_local.bucket->data;
- entry = &entry_array[info->packet.entries];
+ entry = (struct vis_info_entry *)skb_put(info->skb_packet,
+ sizeof(*entry));
memset(entry->src, 0, ETH_ALEN);
memcpy(entry->dest, hna_local_entry->addr, ETH_ALEN);
entry->quality = 0; /* 0 means HNA */
- info->packet.entries++;
+ packet->entries++;
if (vis_packet_full(info)) {
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock,
+ flags);
return 0;
}
}
- spin_unlock_irqrestore(&hna_local_hash_lock, flags);
+
+ spin_unlock_irqrestore(&bat_priv->hna_lhash_lock, flags);
return 0;
}
/* free old vis packets. Must be called with this vis_hash_lock
* held */
-static void purge_vis_packets(void)
+static void purge_vis_packets(struct bat_priv *bat_priv)
{
HASHIT(hashit);
struct vis_info *info;
- while (hash_iterate(vis_hash, &hashit)) {
+ while (hash_iterate(bat_priv->vis_hash, &hashit)) {
info = hashit.bucket->data;
- if (info == my_vis_info) /* never purge own data. */
+
+ /* never purge own data. */
+ if (info == bat_priv->my_vis_info)
continue;
+
if (time_after(jiffies,
info->first_seen + VIS_TIMEOUT * HZ)) {
- hash_remove_bucket(vis_hash, &hashit);
+ hash_remove_bucket(bat_priv->vis_hash, &hashit);
send_list_del(info);
kref_put(&info->refcount, free_info);
}
}
}
-static void broadcast_vis_packet(struct vis_info *info, int packet_length)
+static void broadcast_vis_packet(struct bat_priv *bat_priv,
+ struct vis_info *info)
{
HASHIT(hashit);
struct orig_node *orig_node;
+ struct vis_packet *packet;
+ struct sk_buff *skb;
unsigned long flags;
struct batman_if *batman_if;
uint8_t dstaddr[ETH_ALEN];
- spin_lock_irqsave(&orig_hash_lock, flags);
+
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ packet = (struct vis_packet *)info->skb_packet->data;
/* send to all routers in range. */
- while (hash_iterate(orig_hash, &hashit)) {
+ while (hash_iterate(bat_priv->orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
/* if it's a vis server and reachable, send it. */
@@ -626,34 +678,40 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length)
continue;
/* don't send it if we already received the packet from
* this node. */
- if (recv_list_is_in(&info->recv_list, orig_node->orig))
+ if (recv_list_is_in(bat_priv, &info->recv_list,
+ orig_node->orig))
continue;
- memcpy(info->packet.target_orig, orig_node->orig, ETH_ALEN);
+ memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
- send_raw_packet((unsigned char *)&info->packet,
- packet_length, batman_if, dstaddr);
+ skb = skb_clone(info->skb_packet, GFP_ATOMIC);
+ if (skb)
+ send_skb_packet(skb, batman_if, dstaddr);
- spin_lock_irqsave(&orig_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
}
- spin_unlock_irqrestore(&orig_hash_lock, flags);
- memcpy(info->packet.target_orig, broadcast_addr, ETH_ALEN);
+
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
}
-static void unicast_vis_packet(struct vis_info *info, int packet_length)
+static void unicast_vis_packet(struct bat_priv *bat_priv,
+ struct vis_info *info)
{
struct orig_node *orig_node;
+ struct sk_buff *skb;
+ struct vis_packet *packet;
unsigned long flags;
struct batman_if *batman_if;
uint8_t dstaddr[ETH_ALEN];
- spin_lock_irqsave(&orig_hash_lock, flags);
- orig_node = ((struct orig_node *)
- hash_find(orig_hash, info->packet.target_orig));
+ spin_lock_irqsave(&bat_priv->orig_hash_lock, flags);
+ packet = (struct vis_packet *)info->skb_packet->data;
+ orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
+ packet->target_orig));
if ((!orig_node) || (!orig_node->router))
goto out;
@@ -662,129 +720,148 @@ static void unicast_vis_packet(struct vis_info *info, int packet_length)
* copy the required data before sending */
batman_if = orig_node->router->if_incoming;
memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
+
+ skb = skb_clone(info->skb_packet, GFP_ATOMIC);
+ if (skb)
+ send_skb_packet(skb, batman_if, dstaddr);
- send_raw_packet((unsigned char *)&info->packet,
- packet_length, batman_if, dstaddr);
return;
out:
- spin_unlock_irqrestore(&orig_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
}
/* only send one vis packet. called from send_vis_packets() */
-static void send_vis_packet(struct vis_info *info)
+static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
{
- int packet_length;
+ struct vis_packet *packet;
- if (info->packet.ttl < 2) {
- pr_warning("Error - can't send vis packet: ttl exceeded\n");
+ packet = (struct vis_packet *)info->skb_packet->data;
+ if (packet->ttl < 2) {
+ pr_debug("Error - can't send vis packet: ttl exceeded\n");
return;
}
- memcpy(info->packet.sender_orig, main_if_addr, ETH_ALEN);
- info->packet.ttl--;
-
- packet_length = sizeof(struct vis_packet) +
- info->packet.entries * sizeof(struct vis_info_entry);
+ memcpy(packet->sender_orig, bat_priv->primary_if->net_dev->dev_addr,
+ ETH_ALEN);
+ packet->ttl--;
- if (is_bcast(info->packet.target_orig))
- broadcast_vis_packet(info, packet_length);
+ if (is_bcast(packet->target_orig))
+ broadcast_vis_packet(bat_priv, info);
else
- unicast_vis_packet(info, packet_length);
- info->packet.ttl++; /* restore TTL */
+ unicast_vis_packet(bat_priv, info);
+ packet->ttl++; /* restore TTL */
}
/* called from timer; send (and maybe generate) vis packet. */
static void send_vis_packets(struct work_struct *work)
{
+ struct delayed_work *delayed_work =
+ container_of(work, struct delayed_work, work);
+ struct bat_priv *bat_priv =
+ container_of(delayed_work, struct bat_priv, vis_work);
struct vis_info *info, *temp;
unsigned long flags;
- /* FIXME: each batman_if will be attached to a softif */
- struct bat_priv *bat_priv = netdev_priv(soft_device);
- spin_lock_irqsave(&vis_hash_lock, flags);
-
- purge_vis_packets();
+ spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
+ purge_vis_packets(bat_priv);
if (generate_vis_packet(bat_priv) == 0) {
/* schedule if generation was successful */
- send_list_add(my_vis_info);
+ send_list_add(bat_priv, bat_priv->my_vis_info);
}
- list_for_each_entry_safe(info, temp, &send_list, send_list) {
+ list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list,
+ send_list) {
kref_get(&info->refcount);
- spin_unlock_irqrestore(&vis_hash_lock, flags);
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
- send_vis_packet(info);
+ if (bat_priv->primary_if)
+ send_vis_packet(bat_priv, info);
- spin_lock_irqsave(&vis_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
send_list_del(info);
kref_put(&info->refcount, free_info);
}
- spin_unlock_irqrestore(&vis_hash_lock, flags);
- start_vis_timer();
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ start_vis_timer(bat_priv);
}
-static DECLARE_DELAYED_WORK(vis_timer_wq, send_vis_packets);
/* init the vis server. this may only be called when if_list is already
* initialized (e.g. bat0 is initialized, interfaces have been added) */
-int vis_init(void)
+int vis_init(struct bat_priv *bat_priv)
{
+ struct vis_packet *packet;
unsigned long flags;
- if (vis_hash)
+
+ if (bat_priv->vis_hash)
return 1;
- spin_lock_irqsave(&vis_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
- vis_hash = hash_new(256, vis_info_cmp, vis_info_choose);
- if (!vis_hash) {
+ bat_priv->vis_hash = hash_new(256, vis_info_cmp, vis_info_choose);
+ if (!bat_priv->vis_hash) {
pr_err("Can't initialize vis_hash\n");
goto err;
}
- my_vis_info = kmalloc(1000, GFP_ATOMIC);
- if (!my_vis_info) {
+ bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
+ if (!bat_priv->my_vis_info) {
pr_err("Can't initialize vis packet\n");
goto err;
}
+ bat_priv->my_vis_info->skb_packet = dev_alloc_skb(
+ sizeof(struct vis_packet) +
+ MAX_VIS_PACKET_SIZE +
+ sizeof(struct ethhdr));
+ if (!bat_priv->my_vis_info->skb_packet)
+ goto free_info;
+
+ skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
+ packet = (struct vis_packet *)skb_put(
+ bat_priv->my_vis_info->skb_packet,
+ sizeof(struct vis_packet));
+
/* prefill the vis info */
- my_vis_info->first_seen = jiffies - msecs_to_jiffies(VIS_INTERVAL);
- INIT_LIST_HEAD(&my_vis_info->recv_list);
- INIT_LIST_HEAD(&my_vis_info->send_list);
- kref_init(&my_vis_info->refcount);
- my_vis_info->packet.version = COMPAT_VERSION;
- my_vis_info->packet.packet_type = BAT_VIS;
- my_vis_info->packet.ttl = TTL;
- my_vis_info->packet.seqno = 0;
- my_vis_info->packet.entries = 0;
-
- INIT_LIST_HEAD(&send_list);
-
- memcpy(my_vis_info->packet.vis_orig, main_if_addr, ETH_ALEN);
- memcpy(my_vis_info->packet.sender_orig, main_if_addr, ETH_ALEN);
-
- if (hash_add(vis_hash, my_vis_info) < 0) {
+ bat_priv->my_vis_info->first_seen = jiffies -
+ msecs_to_jiffies(VIS_INTERVAL);
+ INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
+ INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
+ kref_init(&bat_priv->my_vis_info->refcount);
+ bat_priv->my_vis_info->bat_priv = bat_priv;
+ packet->version = COMPAT_VERSION;
+ packet->packet_type = BAT_VIS;
+ packet->ttl = TTL;
+ packet->seqno = 0;
+ packet->entries = 0;
+
+ INIT_LIST_HEAD(&bat_priv->vis_send_list);
+
+ if (hash_add(bat_priv->vis_hash, bat_priv->my_vis_info) < 0) {
pr_err("Can't add own vis packet into hash\n");
/* not in hash, need to remove it manually. */
- kref_put(&my_vis_info->refcount, free_info);
+ kref_put(&bat_priv->my_vis_info->refcount, free_info);
goto err;
}
- spin_unlock_irqrestore(&vis_hash_lock, flags);
- start_vis_timer();
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ start_vis_timer(bat_priv);
return 1;
+free_info:
+ kfree(bat_priv->my_vis_info);
+ bat_priv->my_vis_info = NULL;
err:
- spin_unlock_irqrestore(&vis_hash_lock, flags);
- vis_quit();
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
+ vis_quit(bat_priv);
return 0;
}
/* Decrease the reference count on a hash item info */
-static void free_info_ref(void *data)
+static void free_info_ref(void *data, void *arg)
{
struct vis_info *info = data;
@@ -793,25 +870,26 @@ static void free_info_ref(void *data)
}
/* shutdown vis-server */
-void vis_quit(void)
+void vis_quit(struct bat_priv *bat_priv)
{
unsigned long flags;
- if (!vis_hash)
+ if (!bat_priv->vis_hash)
return;
- cancel_delayed_work_sync(&vis_timer_wq);
+ cancel_delayed_work_sync(&bat_priv->vis_work);
- spin_lock_irqsave(&vis_hash_lock, flags);
+ spin_lock_irqsave(&bat_priv->vis_hash_lock, flags);
/* properly remove, kill timers ... */
- hash_delete(vis_hash, free_info_ref);
- vis_hash = NULL;
- my_vis_info = NULL;
- spin_unlock_irqrestore(&vis_hash_lock, flags);
+ hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
+ bat_priv->vis_hash = NULL;
+ bat_priv->my_vis_info = NULL;
+ spin_unlock_irqrestore(&bat_priv->vis_hash_lock, flags);
}
/* schedule packets for (re)transmission */
-static void start_vis_timer(void)
+static void start_vis_timer(struct bat_priv *bat_priv)
{
- queue_delayed_work(bat_event_workqueue, &vis_timer_wq,
- (VIS_INTERVAL * HZ) / 1000);
+ INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets);
+ queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work,
+ msecs_to_jiffies(VIS_INTERVAL));
}
diff --git a/drivers/staging/batman-adv/vis.h b/drivers/staging/batman-adv/vis.h
index bb13bf1..2c3b330 100644
--- a/drivers/staging/batman-adv/vis.h
+++ b/drivers/staging/batman-adv/vis.h
@@ -24,29 +24,6 @@
#define VIS_TIMEOUT 200 /* timeout of vis packets in seconds */
-struct vis_info {
- unsigned long first_seen;
- struct list_head recv_list;
- /* list of server-neighbors we received a vis-packet
- * from. we should not reply to them. */
- struct list_head send_list;
- struct kref refcount;
- /* this packet might be part of the vis send queue. */
- struct vis_packet packet;
- /* vis_info may follow here*/
-} __attribute__((packed));
-
-struct vis_info_entry {
- uint8_t src[ETH_ALEN];
- uint8_t dest[ETH_ALEN];
- uint8_t quality; /* quality = 0 means HNA */
-} __attribute__((packed));
-
-struct recvlist_node {
- struct list_head list;
- uint8_t mac[ETH_ALEN];
-};
-
int vis_seq_print_text(struct seq_file *seq, void *offset);
void receive_server_sync_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
@@ -54,7 +31,7 @@ void receive_server_sync_packet(struct bat_priv *bat_priv,
void receive_client_update_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
int vis_info_len);
-int vis_init(void);
-void vis_quit(void);
+int vis_init(struct bat_priv *bat_priv);
+void vis_quit(struct bat_priv *bat_priv);
#endif /* _NET_BATMAN_ADV_VIS_H_ */
OpenPOWER on IntegriCloud