2015-04-23 19:16:35 +08:00
|
|
|
/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
|
2010-12-13 19:19:28 +08:00
|
|
|
*
|
|
|
|
* Marek Lindner, Simon Wunderlich
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
|
|
* License as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
2013-11-04 03:40:48 +08:00
|
|
|
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
2010-12-13 19:19:28 +08:00
|
|
|
*/
|
|
|
|
|
2015-04-18 01:40:28 +08:00
|
|
|
#include "originator.h"
|
2010-12-13 19:19:28 +08:00
|
|
|
#include "main.h"
|
2015-04-18 01:40:28 +08:00
|
|
|
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/jiffies.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/lockdep.h>
|
|
|
|
#include <linux/netdevice.h>
|
2015-06-22 00:30:22 +08:00
|
|
|
#include <linux/rculist.h>
|
2015-04-18 01:40:28 +08:00
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
|
2011-11-23 18:35:44 +08:00
|
|
|
#include "distributed-arp-table.h"
|
2015-04-18 01:40:28 +08:00
|
|
|
#include "fragmentation.h"
|
2010-12-13 19:19:28 +08:00
|
|
|
#include "gateway_client.h"
|
|
|
|
#include "hard-interface.h"
|
2015-04-18 01:40:28 +08:00
|
|
|
#include "hash.h"
|
2014-02-16 00:47:51 +08:00
|
|
|
#include "multicast.h"
|
2015-04-18 01:40:28 +08:00
|
|
|
#include "network-coding.h"
|
|
|
|
#include "routing.h"
|
|
|
|
#include "translation-table.h"
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2012-11-10 18:00:32 +08:00
|
|
|
/* hash class keys */
|
|
|
|
static struct lock_class_key batadv_orig_hash_lock_class_key;
|
|
|
|
|
2012-05-13 00:34:00 +08:00
|
|
|
static void batadv_purge_orig(struct work_struct *work);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2011-06-15 21:08:59 +08:00
|
|
|
/* returns 1 if they are the same originator */
|
2013-09-02 18:15:02 +08:00
|
|
|
int batadv_compare_orig(const struct hlist_node *node, const void *data2)
|
2011-06-15 21:08:59 +08:00
|
|
|
{
|
2012-06-06 04:31:31 +08:00
|
|
|
const void *data1 = container_of(node, struct batadv_orig_node,
|
|
|
|
hash_entry);
|
2011-06-15 21:08:59 +08:00
|
|
|
|
2013-12-26 19:40:39 +08:00
|
|
|
return batadv_compare_eth(data1, data2);
|
2011-06-15 21:08:59 +08:00
|
|
|
}
|
|
|
|
|
2013-07-31 04:16:25 +08:00
|
|
|
/**
|
|
|
|
* batadv_orig_node_vlan_get - get an orig_node_vlan object
|
|
|
|
* @orig_node: the originator serving the VLAN
|
|
|
|
* @vid: the VLAN identifier
|
|
|
|
*
|
|
|
|
* Returns the vlan object identified by vid and belonging to orig_node or NULL
|
|
|
|
* if it does not exist.
|
|
|
|
*/
|
|
|
|
struct batadv_orig_node_vlan *
|
|
|
|
batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
|
|
|
|
unsigned short vid)
|
|
|
|
{
|
|
|
|
struct batadv_orig_node_vlan *vlan = NULL, *tmp;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2015-06-22 00:30:22 +08:00
|
|
|
hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
|
2013-07-31 04:16:25 +08:00
|
|
|
if (tmp->vid != vid)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!atomic_inc_not_zero(&tmp->refcount))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
vlan = tmp;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return vlan;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
|
|
|
|
* object
|
|
|
|
* @orig_node: the originator serving the VLAN
|
|
|
|
* @vid: the VLAN identifier
|
|
|
|
*
|
|
|
|
* Returns NULL in case of failure or the vlan object identified by vid and
|
|
|
|
* belonging to orig_node otherwise. The object is created and added to the list
|
|
|
|
* if it does not exist.
|
|
|
|
*
|
|
|
|
* The object is returned with refcounter increased by 1.
|
|
|
|
*/
|
|
|
|
struct batadv_orig_node_vlan *
|
|
|
|
batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
|
|
|
|
unsigned short vid)
|
|
|
|
{
|
|
|
|
struct batadv_orig_node_vlan *vlan;
|
|
|
|
|
|
|
|
spin_lock_bh(&orig_node->vlan_list_lock);
|
|
|
|
|
|
|
|
/* first look if an object for this vid already exists */
|
|
|
|
vlan = batadv_orig_node_vlan_get(orig_node, vid);
|
|
|
|
if (vlan)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
|
|
|
|
if (!vlan)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
atomic_set(&vlan->refcount, 2);
|
|
|
|
vlan->vid = vid;
|
|
|
|
|
2015-06-22 00:30:22 +08:00
|
|
|
hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
|
2013-07-31 04:16:25 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock_bh(&orig_node->vlan_list_lock);
|
|
|
|
|
|
|
|
return vlan;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
|
|
|
|
* the originator-vlan object
|
|
|
|
* @orig_vlan: the originator-vlan object to release
|
|
|
|
*/
|
|
|
|
void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&orig_vlan->refcount))
|
|
|
|
kfree_rcu(orig_vlan, rcu);
|
|
|
|
}
|
|
|
|
|
2012-06-06 04:31:31 +08:00
|
|
|
int batadv_originator_init(struct batadv_priv *bat_priv)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
|
|
|
if (bat_priv->orig_hash)
|
2012-05-05 19:27:28 +08:00
|
|
|
return 0;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2012-05-12 08:09:32 +08:00
|
|
|
bat_priv->orig_hash = batadv_hash_new(1024);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
|
|
|
if (!bat_priv->orig_hash)
|
|
|
|
goto err;
|
|
|
|
|
2012-11-10 18:00:32 +08:00
|
|
|
batadv_hash_set_lock_class(bat_priv->orig_hash,
|
|
|
|
&batadv_orig_hash_lock_class_key);
|
|
|
|
|
2012-12-25 20:14:37 +08:00
|
|
|
INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
|
|
|
|
queue_delayed_work(batadv_event_workqueue,
|
|
|
|
&bat_priv->orig_work,
|
|
|
|
msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
|
|
|
|
|
2012-05-05 19:27:28 +08:00
|
|
|
return 0;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
|
|
|
err:
|
2012-05-05 19:27:28 +08:00
|
|
|
return -ENOMEM;
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
|
2013-11-14 02:14:46 +08:00
|
|
|
/**
|
|
|
|
* batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
|
|
|
|
* @rcu: rcu pointer of the neigh_ifinfo object
|
|
|
|
*/
|
|
|
|
static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct batadv_neigh_ifinfo *neigh_ifinfo;
|
|
|
|
|
|
|
|
neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
|
|
|
|
|
|
|
|
if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
|
|
|
|
batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
|
|
|
|
|
|
|
|
kfree(neigh_ifinfo);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
|
|
|
|
* the neigh_ifinfo (without rcu callback)
|
|
|
|
* @neigh_ifinfo: the neigh_ifinfo object to release
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&neigh_ifinfo->refcount))
|
|
|
|
batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
|
|
|
|
* the neigh_ifinfo
|
|
|
|
* @neigh_ifinfo: the neigh_ifinfo object to release
|
|
|
|
*/
|
|
|
|
void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&neigh_ifinfo->refcount))
|
|
|
|
call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* batadv_neigh_node_free_rcu - free the neigh_node
|
|
|
|
* @rcu: rcu pointer of the neigh_node
|
|
|
|
*/
|
|
|
|
static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct hlist_node *node_tmp;
|
|
|
|
struct batadv_neigh_node *neigh_node;
|
|
|
|
struct batadv_neigh_ifinfo *neigh_ifinfo;
|
2015-03-01 00:50:17 +08:00
|
|
|
struct batadv_algo_ops *bao;
|
2013-11-14 02:14:46 +08:00
|
|
|
|
|
|
|
neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
|
2015-03-01 00:50:17 +08:00
|
|
|
bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
|
2013-11-14 02:14:46 +08:00
|
|
|
|
|
|
|
hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
|
|
|
|
&neigh_node->ifinfo_list, list) {
|
|
|
|
batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
|
|
|
|
}
|
2015-03-01 00:50:17 +08:00
|
|
|
|
|
|
|
if (bao->bat_neigh_free)
|
|
|
|
bao->bat_neigh_free(neigh_node);
|
|
|
|
|
2013-11-14 02:14:46 +08:00
|
|
|
batadv_hardif_free_ref_now(neigh_node->if_incoming);
|
|
|
|
|
|
|
|
kfree(neigh_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
|
|
|
|
* and possibly free it (without rcu callback)
|
|
|
|
* @neigh_node: neigh neighbor to free
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&neigh_node->refcount))
|
|
|
|
batadv_neigh_node_free_rcu(&neigh_node->rcu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* batadv_neigh_node_free_ref - decrement the neighbors refcounter
|
|
|
|
* and possibly free it
|
|
|
|
* @neigh_node: neigh neighbor to free
|
|
|
|
*/
|
2012-06-06 04:31:31 +08:00
|
|
|
void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
|
2011-01-20 04:01:43 +08:00
|
|
|
{
|
2011-02-10 22:33:53 +08:00
|
|
|
if (atomic_dec_and_test(&neigh_node->refcount))
|
2013-11-14 02:14:46 +08:00
|
|
|
call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
|
2011-01-20 04:01:43 +08:00
|
|
|
}
|
|
|
|
|
2013-11-14 02:14:47 +08:00
|
|
|
/**
|
|
|
|
* batadv_orig_node_get_router - router to the originator depending on iface
|
|
|
|
* @orig_node: the orig node for the router
|
|
|
|
* @if_outgoing: the interface where the payload packet has been received or
|
|
|
|
* the OGM should be sent to
|
|
|
|
*
|
|
|
|
* Returns the neighbor which should be router for this orig_node/iface.
|
|
|
|
*
|
|
|
|
* The object is returned with refcounter increased by 1.
|
|
|
|
*/
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_neigh_node *
|
2013-11-14 02:14:47 +08:00
|
|
|
batadv_orig_router_get(struct batadv_orig_node *orig_node,
|
|
|
|
const struct batadv_hard_iface *if_outgoing)
|
2011-03-15 06:43:37 +08:00
|
|
|
{
|
2013-11-14 02:14:47 +08:00
|
|
|
struct batadv_orig_ifinfo *orig_ifinfo;
|
|
|
|
struct batadv_neigh_node *router = NULL;
|
2011-03-15 06:43:37 +08:00
|
|
|
|
|
|
|
rcu_read_lock();
|
2013-11-14 02:14:47 +08:00
|
|
|
hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
|
|
|
|
if (orig_ifinfo->if_outgoing != if_outgoing)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
router = rcu_dereference(orig_ifinfo->router);
|
|
|
|
break;
|
|
|
|
}
|
2011-03-15 06:43:37 +08:00
|
|
|
|
|
|
|
if (router && !atomic_inc_not_zero(&router->refcount))
|
|
|
|
router = NULL;
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
return router;
|
|
|
|
}
|
|
|
|
|
2013-11-14 02:14:47 +08:00
|
|
|
/**
|
|
|
|
* batadv_orig_ifinfo_get - find the ifinfo from an orig_node
|
|
|
|
* @orig_node: the orig node to be queried
|
|
|
|
* @if_outgoing: the interface for which the ifinfo should be acquired
|
|
|
|
*
|
|
|
|
* Returns the requested orig_ifinfo or NULL if not found.
|
|
|
|
*
|
|
|
|
* The object is returned with refcounter increased by 1.
|
|
|
|
*/
|
|
|
|
struct batadv_orig_ifinfo *
|
|
|
|
batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
|
|
|
|
struct batadv_hard_iface *if_outgoing)
|
|
|
|
{
|
|
|
|
struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
|
|
|
|
list) {
|
|
|
|
if (tmp->if_outgoing != if_outgoing)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!atomic_inc_not_zero(&tmp->refcount))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
orig_ifinfo = tmp;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return orig_ifinfo;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
|
|
|
|
* @orig_node: the orig node to be queried
|
|
|
|
* @if_outgoing: the interface for which the ifinfo should be acquired
|
|
|
|
*
|
|
|
|
* Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
|
|
|
|
* interface otherwise. The object is created and added to the list
|
|
|
|
* if it does not exist.
|
|
|
|
*
|
|
|
|
* The object is returned with refcounter increased by 1.
|
|
|
|
*/
|
|
|
|
struct batadv_orig_ifinfo *
|
|
|
|
batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
|
|
|
|
struct batadv_hard_iface *if_outgoing)
|
|
|
|
{
|
|
|
|
struct batadv_orig_ifinfo *orig_ifinfo = NULL;
|
|
|
|
unsigned long reset_time;
|
|
|
|
|
|
|
|
spin_lock_bh(&orig_node->neigh_list_lock);
|
|
|
|
|
|
|
|
orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
|
|
|
|
if (orig_ifinfo)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
|
|
|
|
if (!orig_ifinfo)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (if_outgoing != BATADV_IF_DEFAULT &&
|
|
|
|
!atomic_inc_not_zero(&if_outgoing->refcount)) {
|
|
|
|
kfree(orig_ifinfo);
|
|
|
|
orig_ifinfo = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
reset_time = jiffies - 1;
|
|
|
|
reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
|
|
|
|
orig_ifinfo->batman_seqno_reset = reset_time;
|
|
|
|
orig_ifinfo->if_outgoing = if_outgoing;
|
|
|
|
INIT_HLIST_NODE(&orig_ifinfo->list);
|
|
|
|
atomic_set(&orig_ifinfo->refcount, 2);
|
|
|
|
hlist_add_head_rcu(&orig_ifinfo->list,
|
|
|
|
&orig_node->ifinfo_list);
|
|
|
|
out:
|
|
|
|
spin_unlock_bh(&orig_node->neigh_list_lock);
|
|
|
|
return orig_ifinfo;
|
|
|
|
}
|
|
|
|
|
2013-11-14 02:14:46 +08:00
|
|
|
/**
|
|
|
|
* batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
|
|
|
|
* @neigh_node: the neigh node to be queried
|
|
|
|
* @if_outgoing: the interface for which the ifinfo should be acquired
|
|
|
|
*
|
|
|
|
* The object is returned with refcounter increased by 1.
|
|
|
|
*
|
|
|
|
* Returns the requested neigh_ifinfo or NULL if not found
|
|
|
|
*/
|
|
|
|
struct batadv_neigh_ifinfo *
|
|
|
|
batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
|
|
|
|
struct batadv_hard_iface *if_outgoing)
|
|
|
|
{
|
|
|
|
struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
|
|
|
|
*tmp_neigh_ifinfo;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
|
|
|
|
list) {
|
|
|
|
if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
neigh_ifinfo = tmp_neigh_ifinfo;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return neigh_ifinfo;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
|
|
|
|
* @neigh_node: the neigh node to be queried
|
|
|
|
* @if_outgoing: the interface for which the ifinfo should be acquired
|
|
|
|
*
|
|
|
|
* Returns NULL in case of failure or the neigh_ifinfo object for the
|
|
|
|
* if_outgoing interface otherwise. The object is created and added to the list
|
|
|
|
* if it does not exist.
|
|
|
|
*
|
|
|
|
* The object is returned with refcounter increased by 1.
|
|
|
|
*/
|
|
|
|
struct batadv_neigh_ifinfo *
|
|
|
|
batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
|
|
|
|
struct batadv_hard_iface *if_outgoing)
|
|
|
|
{
|
|
|
|
struct batadv_neigh_ifinfo *neigh_ifinfo;
|
|
|
|
|
|
|
|
spin_lock_bh(&neigh->ifinfo_lock);
|
|
|
|
|
|
|
|
neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
|
|
|
|
if (neigh_ifinfo)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
|
|
|
|
if (!neigh_ifinfo)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
|
|
|
|
kfree(neigh_ifinfo);
|
|
|
|
neigh_ifinfo = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
INIT_HLIST_NODE(&neigh_ifinfo->list);
|
|
|
|
atomic_set(&neigh_ifinfo->refcount, 2);
|
|
|
|
neigh_ifinfo->if_outgoing = if_outgoing;
|
|
|
|
|
|
|
|
hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
|
|
|
|
|
|
|
|
out:
|
|
|
|
spin_unlock_bh(&neigh->ifinfo_lock);
|
|
|
|
|
|
|
|
return neigh_ifinfo;
|
|
|
|
}
|
|
|
|
|
2015-08-04 23:31:44 +08:00
|
|
|
/**
|
|
|
|
* batadv_neigh_node_get - retrieve a neighbour from the list
|
|
|
|
* @orig_node: originator which the neighbour belongs to
|
|
|
|
* @hard_iface: the interface where this neighbour is connected to
|
|
|
|
* @addr: the address of the neighbour
|
|
|
|
*
|
|
|
|
* Looks for and possibly returns a neighbour belonging to this originator list
|
|
|
|
* which is connected through the provided hard interface.
|
|
|
|
* Returns NULL if the neighbour is not found.
|
|
|
|
*/
|
|
|
|
static struct batadv_neigh_node *
|
|
|
|
batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
|
|
|
|
const struct batadv_hard_iface *hard_iface,
|
|
|
|
const u8 *addr)
|
|
|
|
{
|
|
|
|
struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
|
|
|
|
if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (tmp_neigh_node->if_incoming != hard_iface)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
res = tmp_neigh_node;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2013-09-02 18:15:01 +08:00
|
|
|
/**
|
|
|
|
* batadv_neigh_node_new - create and init a new neigh_node object
|
2015-07-26 04:59:15 +08:00
|
|
|
* @orig_node: originator object representing the neighbour
|
2013-09-02 18:15:01 +08:00
|
|
|
* @hard_iface: the interface where the neighbour is connected to
|
|
|
|
* @neigh_addr: the mac address of the neighbour interface
|
|
|
|
*
|
|
|
|
* Allocates a new neigh_node object and initialises all the generic fields.
|
|
|
|
* Returns the new object or NULL on failure.
|
|
|
|
*/
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_neigh_node *
|
2015-07-26 04:59:15 +08:00
|
|
|
batadv_neigh_node_new(struct batadv_orig_node *orig_node,
|
|
|
|
struct batadv_hard_iface *hard_iface,
|
|
|
|
const u8 *neigh_addr)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_neigh_node *neigh_node;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2015-07-26 04:57:43 +08:00
|
|
|
neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
|
|
|
|
if (neigh_node)
|
|
|
|
goto out;
|
|
|
|
|
2011-05-15 05:14:54 +08:00
|
|
|
neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
|
2010-12-13 19:19:28 +08:00
|
|
|
if (!neigh_node)
|
2012-03-01 15:35:21 +08:00
|
|
|
goto out;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2015-07-26 04:37:15 +08:00
|
|
|
if (!atomic_inc_not_zero(&hard_iface->refcount)) {
|
|
|
|
kfree(neigh_node);
|
|
|
|
neigh_node = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2010-12-13 05:57:11 +08:00
|
|
|
INIT_HLIST_NODE(&neigh_node->list);
|
2013-11-14 02:14:46 +08:00
|
|
|
INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
|
|
|
|
spin_lock_init(&neigh_node->ifinfo_lock);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2014-01-22 07:42:11 +08:00
|
|
|
ether_addr_copy(neigh_node->addr, neigh_addr);
|
2013-09-02 18:15:01 +08:00
|
|
|
neigh_node->if_incoming = hard_iface;
|
|
|
|
neigh_node->orig_node = orig_node;
|
|
|
|
|
2011-02-18 20:28:11 +08:00
|
|
|
/* extra reference for return */
|
|
|
|
atomic_set(&neigh_node->refcount, 2);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2015-07-26 04:57:43 +08:00
|
|
|
spin_lock_bh(&orig_node->neigh_list_lock);
|
|
|
|
hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
|
|
|
|
spin_unlock_bh(&orig_node->neigh_list_lock);
|
|
|
|
|
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
|
|
|
|
"Creating new neighbor %pM for orig_node %pM on interface %s\n",
|
|
|
|
neigh_addr, orig_node->orig, hard_iface->net_dev->name);
|
|
|
|
|
2012-03-01 15:35:21 +08:00
|
|
|
out:
|
2010-12-13 19:19:28 +08:00
|
|
|
return neigh_node;
|
|
|
|
}
|
|
|
|
|
2013-11-14 02:14:47 +08:00
|
|
|
/**
|
|
|
|
* batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
|
|
|
|
* @rcu: rcu pointer of the orig_ifinfo object
|
|
|
|
*/
|
|
|
|
static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct batadv_orig_ifinfo *orig_ifinfo;
|
2014-03-26 22:46:22 +08:00
|
|
|
struct batadv_neigh_node *router;
|
2013-11-14 02:14:47 +08:00
|
|
|
|
|
|
|
orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
|
|
|
|
|
|
|
|
if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
|
|
|
|
batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
|
|
|
|
|
2014-03-26 22:46:22 +08:00
|
|
|
/* this is the last reference to this object */
|
|
|
|
router = rcu_dereference_protected(orig_ifinfo->router, true);
|
|
|
|
if (router)
|
|
|
|
batadv_neigh_node_free_ref_now(router);
|
2013-11-14 02:14:47 +08:00
|
|
|
kfree(orig_ifinfo);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
|
|
|
|
* the orig_ifinfo (without rcu callback)
|
|
|
|
* @orig_ifinfo: the orig_ifinfo object to release
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&orig_ifinfo->refcount))
|
|
|
|
batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
|
|
|
|
* the orig_ifinfo
|
|
|
|
* @orig_ifinfo: the orig_ifinfo object to release
|
|
|
|
*/
|
|
|
|
void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&orig_ifinfo->refcount))
|
|
|
|
call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
|
|
|
|
}
|
|
|
|
|
2012-05-13 00:34:00 +08:00
|
|
|
static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
struct hlist_node *node_tmp;
|
2013-11-14 02:14:45 +08:00
|
|
|
struct batadv_neigh_node *neigh_node;
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_orig_node *orig_node;
|
2013-11-14 02:14:47 +08:00
|
|
|
struct batadv_orig_ifinfo *orig_ifinfo;
|
2011-01-20 04:01:42 +08:00
|
|
|
|
2012-06-06 04:31:31 +08:00
|
|
|
orig_node = container_of(rcu, struct batadv_orig_node, rcu);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2010-12-13 05:57:12 +08:00
|
|
|
spin_lock_bh(&orig_node->neigh_list_lock);
|
|
|
|
|
2010-12-13 19:19:28 +08:00
|
|
|
/* for all neighbors towards this originator ... */
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry_safe(neigh_node, node_tmp,
|
2010-12-13 05:57:11 +08:00
|
|
|
&orig_node->neigh_list, list) {
|
2010-12-13 05:57:12 +08:00
|
|
|
hlist_del_rcu(&neigh_node->list);
|
2013-11-14 02:14:46 +08:00
|
|
|
batadv_neigh_node_free_ref_now(neigh_node);
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
|
2013-11-14 02:14:47 +08:00
|
|
|
hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
|
|
|
|
&orig_node->ifinfo_list, list) {
|
|
|
|
hlist_del_rcu(&orig_ifinfo->list);
|
|
|
|
batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
|
|
|
|
}
|
2010-12-13 05:57:12 +08:00
|
|
|
spin_unlock_bh(&orig_node->neigh_list_lock);
|
|
|
|
|
2014-02-16 00:47:51 +08:00
|
|
|
batadv_mcast_purge_orig(orig_node);
|
|
|
|
|
2013-01-25 18:12:39 +08:00
|
|
|
/* Free nc_nodes */
|
|
|
|
batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
|
|
|
|
|
2013-05-23 22:53:02 +08:00
|
|
|
batadv_frag_purge_orig(orig_node, NULL);
|
|
|
|
|
2013-09-03 17:10:23 +08:00
|
|
|
if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
|
|
|
|
orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
|
|
|
|
|
2011-04-27 20:27:44 +08:00
|
|
|
kfree(orig_node->tt_buff);
|
2010-12-13 19:19:28 +08:00
|
|
|
kfree(orig_node);
|
|
|
|
}
|
|
|
|
|
2013-04-15 21:43:29 +08:00
|
|
|
/**
|
|
|
|
* batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
|
|
|
|
* schedule an rcu callback for freeing it
|
|
|
|
* @orig_node: the orig node to free
|
|
|
|
*/
|
2012-06-06 04:31:31 +08:00
|
|
|
void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
|
2011-02-18 20:28:10 +08:00
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&orig_node->refcount))
|
2012-05-13 00:34:00 +08:00
|
|
|
call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
|
2011-02-18 20:28:10 +08:00
|
|
|
}
|
|
|
|
|
2013-04-15 21:43:29 +08:00
|
|
|
/**
|
|
|
|
* batadv_orig_node_free_ref_now - decrement the orig node refcounter and
|
|
|
|
* possibly free it (without rcu callback)
|
|
|
|
* @orig_node: the orig node to free
|
|
|
|
*/
|
|
|
|
void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&orig_node->refcount))
|
|
|
|
batadv_orig_node_free_rcu(&orig_node->rcu);
|
|
|
|
}
|
|
|
|
|
2012-06-06 04:31:31 +08:00
|
|
|
void batadv_originator_free(struct batadv_priv *bat_priv)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
2012-06-06 04:31:28 +08:00
|
|
|
struct batadv_hashtable *hash = bat_priv->orig_hash;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
struct hlist_node *node_tmp;
|
2011-01-20 04:01:42 +08:00
|
|
|
struct hlist_head *head;
|
|
|
|
spinlock_t *list_lock; /* spinlock to protect write access */
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_orig_node *orig_node;
|
2015-05-27 00:34:26 +08:00
|
|
|
u32 i;
|
2011-01-20 04:01:42 +08:00
|
|
|
|
|
|
|
if (!hash)
|
2010-12-13 19:19:28 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
cancel_delayed_work_sync(&bat_priv->orig_work);
|
|
|
|
|
|
|
|
bat_priv->orig_hash = NULL;
|
2011-01-20 04:01:42 +08:00
|
|
|
|
|
|
|
for (i = 0; i < hash->size; i++) {
|
|
|
|
head = &hash->table[i];
|
|
|
|
list_lock = &hash->list_locks[i];
|
|
|
|
|
|
|
|
spin_lock_bh(list_lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry_safe(orig_node, node_tmp,
|
2011-02-18 20:28:09 +08:00
|
|
|
head, hash_entry) {
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_del_rcu(&orig_node->hash_entry);
|
2012-05-12 08:09:34 +08:00
|
|
|
batadv_orig_node_free_ref(orig_node);
|
2011-01-20 04:01:42 +08:00
|
|
|
}
|
|
|
|
spin_unlock_bh(list_lock);
|
|
|
|
}
|
|
|
|
|
2012-05-12 08:09:32 +08:00
|
|
|
batadv_hash_destroy(hash);
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
|
2013-09-02 18:15:02 +08:00
|
|
|
/**
|
|
|
|
* batadv_orig_node_new - creates a new orig_node
|
|
|
|
* @bat_priv: the bat priv with all the soft interface information
|
|
|
|
* @addr: the mac address of the originator
|
|
|
|
*
|
|
|
|
* Creates a new originator object and initialise all the generic fields.
|
|
|
|
* The new object is not added to the originator list.
|
|
|
|
* Returns the newly created object or NULL on failure.
|
2012-05-12 08:09:43 +08:00
|
|
|
*/
|
2013-09-02 18:15:02 +08:00
|
|
|
struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
|
2015-05-27 00:34:26 +08:00
|
|
|
const u8 *addr)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_orig_node *orig_node;
|
2013-07-31 04:16:25 +08:00
|
|
|
struct batadv_orig_node_vlan *vlan;
|
2012-06-04 04:19:17 +08:00
|
|
|
unsigned long reset_time;
|
2013-09-02 18:15:02 +08:00
|
|
|
int i;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2012-06-04 04:19:22 +08:00
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
|
|
|
"Creating new originator: %pM\n", addr);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2011-05-15 05:14:54 +08:00
|
|
|
orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
|
2010-12-13 19:19:28 +08:00
|
|
|
if (!orig_node)
|
|
|
|
return NULL;
|
|
|
|
|
2010-12-13 05:57:11 +08:00
|
|
|
INIT_HLIST_HEAD(&orig_node->neigh_list);
|
2015-06-22 00:30:22 +08:00
|
|
|
INIT_HLIST_HEAD(&orig_node->vlan_list);
|
2013-11-14 02:14:47 +08:00
|
|
|
INIT_HLIST_HEAD(&orig_node->ifinfo_list);
|
2011-01-26 05:52:11 +08:00
|
|
|
spin_lock_init(&orig_node->bcast_seqno_lock);
|
2010-12-13 05:57:12 +08:00
|
|
|
spin_lock_init(&orig_node->neigh_list_lock);
|
2011-04-27 20:27:44 +08:00
|
|
|
spin_lock_init(&orig_node->tt_buff_lock);
|
2013-07-31 04:16:24 +08:00
|
|
|
spin_lock_init(&orig_node->tt_lock);
|
2013-07-31 04:16:25 +08:00
|
|
|
spin_lock_init(&orig_node->vlan_list_lock);
|
2011-02-18 20:28:10 +08:00
|
|
|
|
2013-01-25 18:12:39 +08:00
|
|
|
batadv_nc_init_orig(orig_node);
|
|
|
|
|
2011-02-18 20:28:10 +08:00
|
|
|
/* extra reference for return */
|
|
|
|
atomic_set(&orig_node->refcount, 2);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2011-01-20 04:01:42 +08:00
|
|
|
orig_node->bat_priv = bat_priv;
|
2014-01-22 07:42:11 +08:00
|
|
|
ether_addr_copy(orig_node->orig, addr);
|
2011-11-23 18:35:44 +08:00
|
|
|
batadv_dat_init_orig_node_addr(orig_node);
|
2011-07-07 07:40:57 +08:00
|
|
|
atomic_set(&orig_node->last_ttvn, 0);
|
2011-05-05 14:42:45 +08:00
|
|
|
orig_node->tt_buff = NULL;
|
2011-04-27 20:27:44 +08:00
|
|
|
orig_node->tt_buff_len = 0;
|
2014-10-30 13:23:40 +08:00
|
|
|
orig_node->last_seen = jiffies;
|
2012-06-04 04:19:17 +08:00
|
|
|
reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
|
|
|
|
orig_node->bcast_seqno_reset = reset_time;
|
2015-06-16 23:10:26 +08:00
|
|
|
|
2014-02-16 00:47:51 +08:00
|
|
|
#ifdef CONFIG_BATMAN_ADV_MCAST
|
|
|
|
orig_node->mcast_flags = BATADV_NO_FLAGS;
|
2015-06-16 23:10:26 +08:00
|
|
|
INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
|
|
|
|
INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
|
|
|
|
INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
|
|
|
|
spin_lock_init(&orig_node->mcast_handler_lock);
|
2014-02-16 00:47:51 +08:00
|
|
|
#endif
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2013-07-31 04:16:25 +08:00
|
|
|
/* create a vlan object for the "untagged" LAN */
|
|
|
|
vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
|
|
|
|
if (!vlan)
|
|
|
|
goto free_orig_node;
|
|
|
|
/* batadv_orig_node_vlan_new() increases the refcounter.
|
|
|
|
* Immediately release vlan since it is not needed anymore in this
|
|
|
|
* context
|
|
|
|
*/
|
|
|
|
batadv_orig_node_vlan_free_ref(vlan);
|
|
|
|
|
2013-05-23 22:53:02 +08:00
|
|
|
for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
|
|
|
|
INIT_HLIST_HEAD(&orig_node->fragments[i].head);
|
|
|
|
spin_lock_init(&orig_node->fragments[i].lock);
|
|
|
|
orig_node->fragments[i].size = 0;
|
|
|
|
}
|
|
|
|
|
2010-12-13 19:19:28 +08:00
|
|
|
return orig_node;
|
|
|
|
free_orig_node:
|
|
|
|
kfree(orig_node);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-03-26 22:46:24 +08:00
|
|
|
/**
|
|
|
|
* batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
|
|
|
|
* @bat_priv: the bat priv with all the soft interface information
|
|
|
|
* @neigh: orig node which is to be checked
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
|
|
|
|
struct batadv_neigh_node *neigh)
|
|
|
|
{
|
|
|
|
struct batadv_neigh_ifinfo *neigh_ifinfo;
|
|
|
|
struct batadv_hard_iface *if_outgoing;
|
|
|
|
struct hlist_node *node_tmp;
|
|
|
|
|
|
|
|
spin_lock_bh(&neigh->ifinfo_lock);
|
|
|
|
|
|
|
|
/* for all ifinfo objects for this neighinator */
|
|
|
|
hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
|
|
|
|
&neigh->ifinfo_list, list) {
|
|
|
|
if_outgoing = neigh_ifinfo->if_outgoing;
|
|
|
|
|
|
|
|
/* always keep the default interface */
|
|
|
|
if (if_outgoing == BATADV_IF_DEFAULT)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* don't purge if the interface is not (going) down */
|
|
|
|
if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
|
|
|
|
(if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
|
|
|
|
(if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
|
|
|
"neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
|
|
|
|
neigh->addr, if_outgoing->net_dev->name);
|
|
|
|
|
|
|
|
hlist_del_rcu(&neigh_ifinfo->list);
|
|
|
|
batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&neigh->ifinfo_lock);
|
|
|
|
}
|
|
|
|
|
2013-11-14 02:14:47 +08:00
|
|
|
/**
|
|
|
|
* batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
|
|
|
|
* @bat_priv: the bat priv with all the soft interface information
|
|
|
|
* @orig_node: orig node which is to be checked
|
|
|
|
*
|
|
|
|
* Returns true if any ifinfo entry was purged, false otherwise.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
|
|
|
|
struct batadv_orig_node *orig_node)
|
|
|
|
{
|
|
|
|
struct batadv_orig_ifinfo *orig_ifinfo;
|
|
|
|
struct batadv_hard_iface *if_outgoing;
|
|
|
|
struct hlist_node *node_tmp;
|
|
|
|
bool ifinfo_purged = false;
|
|
|
|
|
|
|
|
spin_lock_bh(&orig_node->neigh_list_lock);
|
|
|
|
|
|
|
|
/* for all ifinfo objects for this originator */
|
|
|
|
hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
|
|
|
|
&orig_node->ifinfo_list, list) {
|
|
|
|
if_outgoing = orig_ifinfo->if_outgoing;
|
|
|
|
|
|
|
|
/* always keep the default interface */
|
|
|
|
if (if_outgoing == BATADV_IF_DEFAULT)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* don't purge if the interface is not (going) down */
|
|
|
|
if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
|
|
|
|
(if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
|
|
|
|
(if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
|
|
|
"router/ifinfo purge: originator %pM, iface: %s\n",
|
|
|
|
orig_node->orig, if_outgoing->net_dev->name);
|
|
|
|
|
|
|
|
ifinfo_purged = true;
|
|
|
|
|
|
|
|
hlist_del_rcu(&orig_ifinfo->list);
|
|
|
|
batadv_orig_ifinfo_free_ref(orig_ifinfo);
|
2013-11-14 02:14:50 +08:00
|
|
|
if (orig_node->last_bonding_candidate == orig_ifinfo) {
|
|
|
|
orig_node->last_bonding_candidate = NULL;
|
|
|
|
batadv_orig_ifinfo_free_ref(orig_ifinfo);
|
|
|
|
}
|
2013-11-14 02:14:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&orig_node->neigh_list_lock);
|
|
|
|
|
|
|
|
return ifinfo_purged;
|
|
|
|
}
|
|
|
|
|
2013-11-14 02:14:46 +08:00
|
|
|
/**
|
|
|
|
* batadv_purge_orig_neighbors - purges neighbors from originator
|
|
|
|
* @bat_priv: the bat priv with all the soft interface information
|
|
|
|
* @orig_node: orig node which is to be checked
|
|
|
|
*
|
|
|
|
* Returns true if any neighbor was purged, false otherwise
|
|
|
|
*/
|
2012-06-06 04:31:31 +08:00
|
|
|
static bool
|
|
|
|
batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
|
2013-11-14 02:14:46 +08:00
|
|
|
struct batadv_orig_node *orig_node)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
struct hlist_node *node_tmp;
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_neigh_node *neigh_node;
|
2010-12-13 19:19:28 +08:00
|
|
|
bool neigh_purged = false;
|
2012-03-01 15:35:20 +08:00
|
|
|
unsigned long last_seen;
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_hard_iface *if_incoming;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2010-12-13 05:57:12 +08:00
|
|
|
spin_lock_bh(&orig_node->neigh_list_lock);
|
|
|
|
|
2010-12-13 19:19:28 +08:00
|
|
|
/* for all neighbors towards this originator ... */
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry_safe(neigh_node, node_tmp,
|
2010-12-13 05:57:11 +08:00
|
|
|
&orig_node->neigh_list, list) {
|
2012-05-12 19:48:58 +08:00
|
|
|
last_seen = neigh_node->last_seen;
|
|
|
|
if_incoming = neigh_node->if_incoming;
|
|
|
|
|
2012-06-04 04:19:17 +08:00
|
|
|
if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
|
2012-06-04 04:19:19 +08:00
|
|
|
(if_incoming->if_status == BATADV_IF_INACTIVE) ||
|
|
|
|
(if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
|
|
|
|
(if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
|
|
|
|
if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
|
|
|
|
(if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
|
|
|
|
(if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
|
2012-06-04 04:19:22 +08:00
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
2012-05-12 19:48:58 +08:00
|
|
|
"neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
|
|
|
|
orig_node->orig, neigh_node->addr,
|
|
|
|
if_incoming->net_dev->name);
|
2010-12-13 19:19:28 +08:00
|
|
|
else
|
2012-06-04 04:19:22 +08:00
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
2012-05-12 19:48:58 +08:00
|
|
|
"neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
|
|
|
|
orig_node->orig, neigh_node->addr,
|
|
|
|
jiffies_to_msecs(last_seen));
|
2010-12-13 19:19:28 +08:00
|
|
|
|
|
|
|
neigh_purged = true;
|
2010-12-13 05:57:11 +08:00
|
|
|
|
2010-12-13 05:57:12 +08:00
|
|
|
hlist_del_rcu(&neigh_node->list);
|
2012-05-12 08:09:34 +08:00
|
|
|
batadv_neigh_node_free_ref(neigh_node);
|
2014-03-26 22:46:24 +08:00
|
|
|
} else {
|
|
|
|
/* only necessary if not the whole neighbor is to be
|
|
|
|
* deleted, but some interface has been removed.
|
|
|
|
*/
|
|
|
|
batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
}
|
2010-12-13 05:57:12 +08:00
|
|
|
|
|
|
|
spin_unlock_bh(&orig_node->neigh_list_lock);
|
2010-12-13 19:19:28 +08:00
|
|
|
return neigh_purged;
|
|
|
|
}
|
|
|
|
|
2013-11-14 02:14:46 +08:00
|
|
|
/**
|
|
|
|
* batadv_find_best_neighbor - finds the best neighbor after purging
|
|
|
|
* @bat_priv: the bat priv with all the soft interface information
|
|
|
|
* @orig_node: orig node which is to be checked
|
|
|
|
* @if_outgoing: the interface for which the metric should be compared
|
|
|
|
*
|
|
|
|
* Returns the current best neighbor, with refcount increased.
|
|
|
|
*/
|
|
|
|
static struct batadv_neigh_node *
|
|
|
|
batadv_find_best_neighbor(struct batadv_priv *bat_priv,
|
|
|
|
struct batadv_orig_node *orig_node,
|
|
|
|
struct batadv_hard_iface *if_outgoing)
|
|
|
|
{
|
|
|
|
struct batadv_neigh_node *best = NULL, *neigh;
|
|
|
|
struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
|
|
|
|
if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
|
|
|
|
best, if_outgoing) <= 0))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!atomic_inc_not_zero(&neigh->refcount))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (best)
|
|
|
|
batadv_neigh_node_free_ref(best);
|
|
|
|
|
|
|
|
best = neigh;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return best;
|
|
|
|
}
|
|
|
|
|
2013-11-14 02:14:47 +08:00
|
|
|
/**
|
|
|
|
* batadv_purge_orig_node - purges obsolete information from an orig_node
|
|
|
|
* @bat_priv: the bat priv with all the soft interface information
|
|
|
|
* @orig_node: orig node which is to be checked
|
|
|
|
*
|
|
|
|
* This function checks if the orig_node or substructures of it have become
|
|
|
|
* obsolete, and purges this information if that's the case.
|
|
|
|
*
|
|
|
|
* Returns true if the orig_node is to be removed, false otherwise.
|
|
|
|
*/
|
2012-06-06 04:31:31 +08:00
|
|
|
static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
|
|
|
|
struct batadv_orig_node *orig_node)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_neigh_node *best_neigh_node;
|
2013-11-14 02:14:47 +08:00
|
|
|
struct batadv_hard_iface *hard_iface;
|
2014-03-26 22:46:23 +08:00
|
|
|
bool changed_ifinfo, changed_neigh;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2012-06-04 04:19:17 +08:00
|
|
|
if (batadv_has_timed_out(orig_node->last_seen,
|
|
|
|
2 * BATADV_PURGE_TIMEOUT)) {
|
2012-06-04 04:19:22 +08:00
|
|
|
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
|
2012-05-12 19:48:58 +08:00
|
|
|
"Originator timeout: originator %pM, last_seen %u\n",
|
|
|
|
orig_node->orig,
|
|
|
|
jiffies_to_msecs(orig_node->last_seen));
|
2010-12-13 19:19:28 +08:00
|
|
|
return true;
|
|
|
|
}
|
2014-03-26 22:46:23 +08:00
|
|
|
changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
|
|
|
|
changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
|
2013-11-14 02:14:47 +08:00
|
|
|
|
2014-03-26 22:46:23 +08:00
|
|
|
if (!changed_ifinfo && !changed_neigh)
|
2013-11-14 02:14:46 +08:00
|
|
|
return false;
|
|
|
|
|
2013-11-14 02:14:47 +08:00
|
|
|
/* first for NULL ... */
|
2013-11-14 02:14:46 +08:00
|
|
|
best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
|
|
|
|
BATADV_IF_DEFAULT);
|
2013-11-14 02:14:47 +08:00
|
|
|
batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
|
|
|
|
best_neigh_node);
|
2013-11-14 02:14:46 +08:00
|
|
|
if (best_neigh_node)
|
|
|
|
batadv_neigh_node_free_ref(best_neigh_node);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2013-11-14 02:14:47 +08:00
|
|
|
/* ... then for all other interfaces. */
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
|
|
|
|
if (hard_iface->if_status != BATADV_IF_ACTIVE)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (hard_iface->soft_iface != bat_priv->soft_iface)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
best_neigh_node = batadv_find_best_neighbor(bat_priv,
|
|
|
|
orig_node,
|
|
|
|
hard_iface);
|
|
|
|
batadv_update_route(bat_priv, orig_node, hard_iface,
|
|
|
|
best_neigh_node);
|
|
|
|
if (best_neigh_node)
|
|
|
|
batadv_neigh_node_free_ref(best_neigh_node);
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2010-12-13 19:19:28 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-06-06 04:31:31 +08:00
|
|
|
static void _batadv_purge_orig(struct batadv_priv *bat_priv)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
2012-06-06 04:31:28 +08:00
|
|
|
struct batadv_hashtable *hash = bat_priv->orig_hash;
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
struct hlist_node *node_tmp;
|
2010-12-13 19:19:28 +08:00
|
|
|
struct hlist_head *head;
|
2011-01-20 04:01:40 +08:00
|
|
|
spinlock_t *list_lock; /* spinlock to protect write access */
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_orig_node *orig_node;
|
2015-05-27 00:34:26 +08:00
|
|
|
u32 i;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
|
|
|
if (!hash)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* for all origins... */
|
|
|
|
for (i = 0; i < hash->size; i++) {
|
|
|
|
head = &hash->table[i];
|
2011-01-20 04:01:40 +08:00
|
|
|
list_lock = &hash->list_locks[i];
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2011-01-20 04:01:40 +08:00
|
|
|
spin_lock_bh(list_lock);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry_safe(orig_node, node_tmp,
|
2011-02-18 20:28:09 +08:00
|
|
|
head, hash_entry) {
|
2012-05-13 00:34:00 +08:00
|
|
|
if (batadv_purge_orig_node(bat_priv, orig_node)) {
|
2013-04-23 21:39:58 +08:00
|
|
|
batadv_gw_node_delete(bat_priv, orig_node);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_del_rcu(&orig_node->hash_entry);
|
2014-12-14 06:32:15 +08:00
|
|
|
batadv_tt_global_del_orig(orig_node->bat_priv,
|
|
|
|
orig_node, -1,
|
|
|
|
"originator timed out");
|
2012-05-12 08:09:34 +08:00
|
|
|
batadv_orig_node_free_ref(orig_node);
|
2011-01-20 04:01:40 +08:00
|
|
|
continue;
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
2013-05-23 22:53:02 +08:00
|
|
|
|
|
|
|
batadv_frag_purge_orig(orig_node,
|
|
|
|
batadv_frag_check_entry);
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
2011-01-20 04:01:40 +08:00
|
|
|
spin_unlock_bh(list_lock);
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
|
2012-05-12 08:09:29 +08:00
|
|
|
batadv_gw_election(bat_priv);
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
|
2012-05-13 00:34:00 +08:00
|
|
|
static void batadv_purge_orig(struct work_struct *work)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
2012-06-06 04:31:31 +08:00
|
|
|
struct delayed_work *delayed_work;
|
|
|
|
struct batadv_priv *bat_priv;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2012-06-06 04:31:31 +08:00
|
|
|
delayed_work = container_of(work, struct delayed_work, work);
|
|
|
|
bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
|
2012-05-13 00:34:00 +08:00
|
|
|
_batadv_purge_orig(bat_priv);
|
2012-12-25 20:14:37 +08:00
|
|
|
queue_delayed_work(batadv_event_workqueue,
|
|
|
|
&bat_priv->orig_work,
|
|
|
|
msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
|
2012-06-06 04:31:31 +08:00
|
|
|
void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
2012-05-13 00:34:00 +08:00
|
|
|
_batadv_purge_orig(bat_priv);
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
|
2012-05-12 08:09:34 +08:00
|
|
|
int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
|
|
|
struct net_device *net_dev = (struct net_device *)seq->private;
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_priv *bat_priv = netdev_priv(net_dev);
|
|
|
|
struct batadv_hard_iface *primary_if;
|
2011-04-20 21:40:58 +08:00
|
|
|
|
2012-08-03 23:15:46 +08:00
|
|
|
primary_if = batadv_seq_print_text_primary_if_get(seq);
|
|
|
|
if (!primary_if)
|
2013-09-02 18:15:03 +08:00
|
|
|
return 0;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2013-09-02 18:15:03 +08:00
|
|
|
seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
|
2012-06-04 04:19:17 +08:00
|
|
|
BATADV_SOURCE_VERSION, primary_if->net_dev->name,
|
2013-09-02 18:15:03 +08:00
|
|
|
primary_if->net_dev->dev_addr, net_dev->name,
|
|
|
|
bat_priv->bat_algo_ops->name);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2013-09-02 18:15:03 +08:00
|
|
|
batadv_hardif_free_ref(primary_if);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2013-09-02 18:15:03 +08:00
|
|
|
if (!bat_priv->bat_algo_ops->bat_orig_print) {
|
|
|
|
seq_puts(seq,
|
|
|
|
"No printing function for this routing protocol\n");
|
|
|
|
return 0;
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
|
2013-11-21 18:52:16 +08:00
|
|
|
bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
|
|
|
|
BATADV_IF_DEFAULT);
|
2010-12-13 19:19:28 +08:00
|
|
|
|
2012-08-03 23:15:46 +08:00
|
|
|
return 0;
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
|
2013-11-21 18:52:16 +08:00
|
|
|
/**
|
|
|
|
* batadv_orig_hardif_seq_print_text - writes originator infos for a specific
|
|
|
|
* outgoing interface
|
|
|
|
* @seq: debugfs table seq_file struct
|
|
|
|
* @offset: not used
|
|
|
|
*
|
|
|
|
* Returns 0
|
|
|
|
*/
|
|
|
|
int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
|
|
|
|
{
|
|
|
|
struct net_device *net_dev = (struct net_device *)seq->private;
|
|
|
|
struct batadv_hard_iface *hard_iface;
|
|
|
|
struct batadv_priv *bat_priv;
|
|
|
|
|
|
|
|
hard_iface = batadv_hardif_get_by_netdev(net_dev);
|
|
|
|
|
|
|
|
if (!hard_iface || !hard_iface->soft_iface) {
|
|
|
|
seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
bat_priv = netdev_priv(hard_iface->soft_iface);
|
|
|
|
if (!bat_priv->bat_algo_ops->bat_orig_print) {
|
|
|
|
seq_puts(seq,
|
|
|
|
"No printing function for this routing protocol\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hard_iface->if_status != BATADV_IF_ACTIVE) {
|
|
|
|
seq_puts(seq, "Interface not active\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
|
|
|
|
BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
|
|
|
|
hard_iface->net_dev->dev_addr,
|
|
|
|
hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
|
|
|
|
|
|
|
|
bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
|
|
|
|
|
|
|
|
out:
|
2014-04-24 03:44:25 +08:00
|
|
|
if (hard_iface)
|
|
|
|
batadv_hardif_free_ref(hard_iface);
|
2013-11-21 18:52:16 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-06 04:31:31 +08:00
|
|
|
int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
|
|
|
|
int max_if_num)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
2013-09-03 17:10:23 +08:00
|
|
|
struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
|
2012-06-06 04:31:28 +08:00
|
|
|
struct batadv_hashtable *hash = bat_priv->orig_hash;
|
2010-12-13 19:19:28 +08:00
|
|
|
struct hlist_head *head;
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_orig_node *orig_node;
|
2015-05-27 00:34:26 +08:00
|
|
|
u32 i;
|
2011-10-05 23:05:25 +08:00
|
|
|
int ret;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
|
|
|
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
|
2012-05-12 08:09:43 +08:00
|
|
|
* if_num
|
|
|
|
*/
|
2010-12-13 19:19:28 +08:00
|
|
|
for (i = 0; i < hash->size; i++) {
|
|
|
|
head = &hash->table[i];
|
|
|
|
|
2011-01-20 04:01:40 +08:00
|
|
|
rcu_read_lock();
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
|
2013-09-03 17:10:23 +08:00
|
|
|
ret = 0;
|
|
|
|
if (bao->bat_orig_add_if)
|
|
|
|
ret = bao->bat_orig_add_if(orig_node,
|
|
|
|
max_if_num);
|
2012-05-05 19:27:28 +08:00
|
|
|
if (ret == -ENOMEM)
|
2010-12-13 19:19:28 +08:00
|
|
|
goto err;
|
|
|
|
}
|
2011-01-20 04:01:40 +08:00
|
|
|
rcu_read_unlock();
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
2011-01-20 04:01:40 +08:00
|
|
|
rcu_read_unlock();
|
2010-12-13 19:19:28 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2012-06-06 04:31:31 +08:00
|
|
|
int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
|
|
|
|
int max_if_num)
|
2010-12-13 19:19:28 +08:00
|
|
|
{
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
|
2012-06-06 04:31:28 +08:00
|
|
|
struct batadv_hashtable *hash = bat_priv->orig_hash;
|
2010-12-13 19:19:28 +08:00
|
|
|
struct hlist_head *head;
|
2012-06-06 04:31:31 +08:00
|
|
|
struct batadv_hard_iface *hard_iface_tmp;
|
|
|
|
struct batadv_orig_node *orig_node;
|
2013-09-03 17:10:23 +08:00
|
|
|
struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
|
2015-05-27 00:34:26 +08:00
|
|
|
u32 i;
|
2011-10-05 23:05:25 +08:00
|
|
|
int ret;
|
2010-12-13 19:19:28 +08:00
|
|
|
|
|
|
|
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
|
2012-05-12 08:09:43 +08:00
|
|
|
* if_num
|
|
|
|
*/
|
2010-12-13 19:19:28 +08:00
|
|
|
for (i = 0; i < hash->size; i++) {
|
|
|
|
head = &hash->table[i];
|
|
|
|
|
2011-01-20 04:01:40 +08:00
|
|
|
rcu_read_lock();
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
|
2013-09-03 17:10:23 +08:00
|
|
|
ret = 0;
|
|
|
|
if (bao->bat_orig_del_if)
|
|
|
|
ret = bao->bat_orig_del_if(orig_node,
|
|
|
|
max_if_num,
|
|
|
|
hard_iface->if_num);
|
2012-05-05 19:27:28 +08:00
|
|
|
if (ret == -ENOMEM)
|
2010-12-13 19:19:28 +08:00
|
|
|
goto err;
|
|
|
|
}
|
2011-01-20 04:01:40 +08:00
|
|
|
rcu_read_unlock();
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* renumber remaining batman interfaces _inside_ of orig_hash_lock */
|
|
|
|
rcu_read_lock();
|
2012-05-12 08:09:42 +08:00
|
|
|
list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
|
2012-06-04 04:19:19 +08:00
|
|
|
if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
|
2010-12-13 19:19:28 +08:00
|
|
|
continue;
|
|
|
|
|
2011-02-18 20:33:20 +08:00
|
|
|
if (hard_iface == hard_iface_tmp)
|
2010-12-13 19:19:28 +08:00
|
|
|
continue;
|
|
|
|
|
2011-02-18 20:33:20 +08:00
|
|
|
if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
|
2010-12-13 19:19:28 +08:00
|
|
|
continue;
|
|
|
|
|
2011-02-18 20:33:20 +08:00
|
|
|
if (hard_iface_tmp->if_num > hard_iface->if_num)
|
|
|
|
hard_iface_tmp->if_num--;
|
2010-12-13 19:19:28 +08:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2011-02-18 20:33:20 +08:00
|
|
|
hard_iface->if_num = -1;
|
2010-12-13 19:19:28 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
2011-01-20 04:01:40 +08:00
|
|
|
rcu_read_unlock();
|
2010-12-13 19:19:28 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|