mirror of https://gitee.com/openkylin/linux.git
Merge branch 'rhashtable-New-features-in-walk-and-bucket'
Tom Herbert says: ==================== rhashtable: New features in walk and bucket This patch contains some changes to related rhashtable: - Above allow rhashtable_walk_start to return void - Add a functon to peek at the next entry during a walk - Abstract out function to compute a has for a table - A library function to alloc a spinlocks bucket array - Call the above function for rhashtable locks allocation Tested: Exercised using various operations on an ILA xlat table. v2: - Apply feedback from Herbert. Don't change semantics of resize event reporting and -EAGAIN, just simplify API for callers that ignore those. - Add end_of_table in iter to reliably tell when the iterator has reached to the eno. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9944a0f2f5
|
@ -1412,11 +1412,7 @@ bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
|
|||
void *flow_node;
|
||||
int rc, i;
|
||||
|
||||
rc = rhashtable_walk_start(iter);
|
||||
if (rc && rc != -EAGAIN) {
|
||||
i = 0;
|
||||
goto done;
|
||||
}
|
||||
rhashtable_walk_start(iter);
|
||||
|
||||
rc = 0;
|
||||
for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
|
||||
|
|
|
@ -763,9 +763,7 @@ static void ch_flower_stats_handler(struct work_struct *work)
|
|||
|
||||
rhashtable_walk_enter(&adap->flower_tbl, &iter);
|
||||
do {
|
||||
flower_entry = ERR_PTR(rhashtable_walk_start(&iter));
|
||||
if (IS_ERR(flower_entry))
|
||||
goto walk_stop;
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((flower_entry = rhashtable_walk_next(&iter)) &&
|
||||
!IS_ERR(flower_entry)) {
|
||||
|
@ -784,8 +782,9 @@ static void ch_flower_stats_handler(struct work_struct *work)
|
|||
spin_unlock(&flower_entry->lock);
|
||||
}
|
||||
}
|
||||
walk_stop:
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
|
||||
} while (flower_entry == ERR_PTR(-EAGAIN));
|
||||
rhashtable_walk_exit(&iter);
|
||||
mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
|
||||
|
|
|
@ -1549,16 +1549,13 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
|
|||
rhashtable_walk_enter(&gl_hash_table, &iter);
|
||||
|
||||
do {
|
||||
gl = ERR_PTR(rhashtable_walk_start(&iter));
|
||||
if (IS_ERR(gl))
|
||||
goto walk_stop;
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
|
||||
if (gl->gl_name.ln_sbd == sdp &&
|
||||
lockref_get_not_dead(&gl->gl_lockref))
|
||||
examiner(gl);
|
||||
|
||||
walk_stop:
|
||||
rhashtable_walk_stop(&iter);
|
||||
} while (cond_resched(), gl == ERR_PTR(-EAGAIN));
|
||||
|
||||
|
@ -1947,7 +1944,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
|
|||
loff_t n = *pos;
|
||||
|
||||
rhashtable_walk_enter(&gl_hash_table, &gi->hti);
|
||||
if (rhashtable_walk_start(&gi->hti) != 0)
|
||||
if (rhashtable_walk_start_check(&gi->hti) != 0)
|
||||
return NULL;
|
||||
|
||||
do {
|
||||
|
|
|
@ -207,6 +207,7 @@ struct rhashtable_iter {
|
|||
struct rhashtable_walker walker;
|
||||
unsigned int slot;
|
||||
unsigned int skip;
|
||||
bool end_of_table;
|
||||
};
|
||||
|
||||
static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
|
||||
|
@ -239,34 +240,42 @@ static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
|
|||
return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
|
||||
}
|
||||
|
||||
static inline unsigned int rht_key_hashfn(
|
||||
struct rhashtable *ht, const struct bucket_table *tbl,
|
||||
const void *key, const struct rhashtable_params params)
|
||||
static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
|
||||
const void *key, const struct rhashtable_params params,
|
||||
unsigned int hash_rnd)
|
||||
{
|
||||
unsigned int hash;
|
||||
|
||||
/* params must be equal to ht->p if it isn't constant. */
|
||||
if (!__builtin_constant_p(params.key_len))
|
||||
hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
|
||||
hash = ht->p.hashfn(key, ht->key_len, hash_rnd);
|
||||
else if (params.key_len) {
|
||||
unsigned int key_len = params.key_len;
|
||||
|
||||
if (params.hashfn)
|
||||
hash = params.hashfn(key, key_len, tbl->hash_rnd);
|
||||
hash = params.hashfn(key, key_len, hash_rnd);
|
||||
else if (key_len & (sizeof(u32) - 1))
|
||||
hash = jhash(key, key_len, tbl->hash_rnd);
|
||||
hash = jhash(key, key_len, hash_rnd);
|
||||
else
|
||||
hash = jhash2(key, key_len / sizeof(u32),
|
||||
tbl->hash_rnd);
|
||||
hash = jhash2(key, key_len / sizeof(u32), hash_rnd);
|
||||
} else {
|
||||
unsigned int key_len = ht->p.key_len;
|
||||
|
||||
if (params.hashfn)
|
||||
hash = params.hashfn(key, key_len, tbl->hash_rnd);
|
||||
hash = params.hashfn(key, key_len, hash_rnd);
|
||||
else
|
||||
hash = jhash(key, key_len, tbl->hash_rnd);
|
||||
hash = jhash(key, key_len, hash_rnd);
|
||||
}
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
static inline unsigned int rht_key_hashfn(
|
||||
struct rhashtable *ht, const struct bucket_table *tbl,
|
||||
const void *key, const struct rhashtable_params params)
|
||||
{
|
||||
unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd);
|
||||
|
||||
return rht_bucket_index(tbl, hash);
|
||||
}
|
||||
|
||||
|
@ -378,8 +387,15 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
|
|||
void rhashtable_walk_enter(struct rhashtable *ht,
|
||||
struct rhashtable_iter *iter);
|
||||
void rhashtable_walk_exit(struct rhashtable_iter *iter);
|
||||
int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
|
||||
int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU);
|
||||
|
||||
static inline void rhashtable_walk_start(struct rhashtable_iter *iter)
|
||||
{
|
||||
(void)rhashtable_walk_start_check(iter);
|
||||
}
|
||||
|
||||
void *rhashtable_walk_next(struct rhashtable_iter *iter);
|
||||
void *rhashtable_walk_peek(struct rhashtable_iter *iter);
|
||||
void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
|
||||
|
||||
void rhashtable_free_and_destroy(struct rhashtable *ht,
|
||||
|
|
|
@ -414,4 +414,10 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
|
|||
#define atomic_dec_and_lock(atomic, lock) \
|
||||
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
|
||||
|
||||
int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
|
||||
size_t max_size, unsigned int cpu_mult,
|
||||
gfp_t gfp);
|
||||
|
||||
void free_bucket_spinlocks(spinlock_t *locks);
|
||||
|
||||
#endif /* __LINUX_SPINLOCK_H */
|
||||
|
|
|
@ -116,7 +116,7 @@ extern struct percpu_counter sctp_sockets_allocated;
|
|||
int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
|
||||
struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
|
||||
|
||||
int sctp_transport_walk_start(struct rhashtable_iter *iter);
|
||||
void sctp_transport_walk_start(struct rhashtable_iter *iter);
|
||||
void sctp_transport_walk_stop(struct rhashtable_iter *iter);
|
||||
struct sctp_transport *sctp_transport_get_next(struct net *net,
|
||||
struct rhashtable_iter *iter);
|
||||
|
|
|
@ -39,7 +39,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
|
|||
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
|
||||
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
|
||||
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
|
||||
once.o refcount.o usercopy.o errseq.o
|
||||
once.o refcount.o usercopy.o errseq.o bucket_locks.o
|
||||
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
|
||||
obj-y += string_helpers.o
|
||||
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
/* Allocate an array of spinlocks to be accessed by a hash. Two arguments
|
||||
* indicate the number of elements to allocate in the array. max_size
|
||||
* gives the maximum number of elements to allocate. cpu_mult gives
|
||||
* the number of locks per CPU to allocate. The size is rounded up
|
||||
* to a power of 2 to be suitable as a hash table.
|
||||
*/
|
||||
|
||||
int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
|
||||
size_t max_size, unsigned int cpu_mult, gfp_t gfp)
|
||||
{
|
||||
spinlock_t *tlocks = NULL;
|
||||
unsigned int i, size;
|
||||
#if defined(CONFIG_PROVE_LOCKING)
|
||||
unsigned int nr_pcpus = 2;
|
||||
#else
|
||||
unsigned int nr_pcpus = num_possible_cpus();
|
||||
#endif
|
||||
|
||||
if (cpu_mult) {
|
||||
nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
|
||||
size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size);
|
||||
} else {
|
||||
size = max_size;
|
||||
}
|
||||
|
||||
if (sizeof(spinlock_t) != 0) {
|
||||
if (gfpflags_allow_blocking(gfp))
|
||||
tlocks = kvmalloc(size * sizeof(spinlock_t), gfp);
|
||||
else
|
||||
tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp);
|
||||
if (!tlocks)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < size; i++)
|
||||
spin_lock_init(&tlocks[i]);
|
||||
}
|
||||
|
||||
*locks = tlocks;
|
||||
*locks_mask = size - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(alloc_bucket_spinlocks);
|
||||
|
||||
void free_bucket_spinlocks(spinlock_t *locks)
|
||||
{
|
||||
kvfree(locks);
|
||||
}
|
||||
EXPORT_SYMBOL(free_bucket_spinlocks);
|
160
lib/rhashtable.c
160
lib/rhashtable.c
|
@ -65,42 +65,6 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
|
|||
#define ASSERT_RHT_MUTEX(HT)
|
||||
#endif
|
||||
|
||||
|
||||
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
|
||||
gfp_t gfp)
|
||||
{
|
||||
unsigned int i, size;
|
||||
#if defined(CONFIG_PROVE_LOCKING)
|
||||
unsigned int nr_pcpus = 2;
|
||||
#else
|
||||
unsigned int nr_pcpus = num_possible_cpus();
|
||||
#endif
|
||||
|
||||
nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
|
||||
size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
|
||||
|
||||
/* Never allocate more than 0.5 locks per bucket */
|
||||
size = min_t(unsigned int, size, tbl->size >> 1);
|
||||
|
||||
if (tbl->nest)
|
||||
size = min(size, 1U << tbl->nest);
|
||||
|
||||
if (sizeof(spinlock_t) != 0) {
|
||||
if (gfpflags_allow_blocking(gfp))
|
||||
tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
|
||||
else
|
||||
tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
|
||||
gfp);
|
||||
if (!tbl->locks)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < size; i++)
|
||||
spin_lock_init(&tbl->locks[i]);
|
||||
}
|
||||
tbl->locks_mask = size - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nested_table_free(union nested_table *ntbl, unsigned int size)
|
||||
{
|
||||
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
|
||||
|
@ -140,7 +104,7 @@ static void bucket_table_free(const struct bucket_table *tbl)
|
|||
if (tbl->nest)
|
||||
nested_bucket_table_free(tbl);
|
||||
|
||||
kvfree(tbl->locks);
|
||||
free_bucket_spinlocks(tbl->locks);
|
||||
kvfree(tbl);
|
||||
}
|
||||
|
||||
|
@ -207,7 +171,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
|
|||
gfp_t gfp)
|
||||
{
|
||||
struct bucket_table *tbl = NULL;
|
||||
size_t size;
|
||||
size_t size, max_locks;
|
||||
int i;
|
||||
|
||||
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
|
||||
|
@ -227,7 +191,12 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
|
|||
|
||||
tbl->size = size;
|
||||
|
||||
if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
|
||||
max_locks = size >> 1;
|
||||
if (tbl->nest)
|
||||
max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
|
||||
|
||||
if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
|
||||
ht->p.locks_mul, gfp) < 0) {
|
||||
bucket_table_free(tbl);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -707,6 +676,7 @@ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
|
|||
iter->p = NULL;
|
||||
iter->slot = 0;
|
||||
iter->skip = 0;
|
||||
iter->end_of_table = 0;
|
||||
|
||||
spin_lock(&ht->lock);
|
||||
iter->walker.tbl =
|
||||
|
@ -732,7 +702,7 @@ void rhashtable_walk_exit(struct rhashtable_iter *iter)
|
|||
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
|
||||
|
||||
/**
|
||||
* rhashtable_walk_start - Start a hash table walk
|
||||
* rhashtable_walk_start_check - Start a hash table walk
|
||||
* @iter: Hash table iterator
|
||||
*
|
||||
* Start a hash table walk at the current iterator position. Note that we take
|
||||
|
@ -744,8 +714,12 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
|
|||
* Returns -EAGAIN if resize event occured. Note that the iterator
|
||||
* will rewind back to the beginning and you may use it immediately
|
||||
* by calling rhashtable_walk_next.
|
||||
*
|
||||
* rhashtable_walk_start is defined as an inline variant that returns
|
||||
* void. This is preferred in cases where the caller would ignore
|
||||
* resize events and always continue.
|
||||
*/
|
||||
int rhashtable_walk_start(struct rhashtable_iter *iter)
|
||||
int rhashtable_walk_start_check(struct rhashtable_iter *iter)
|
||||
__acquires(RCU)
|
||||
{
|
||||
struct rhashtable *ht = iter->ht;
|
||||
|
@ -757,28 +731,26 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
|
|||
list_del(&iter->walker.list);
|
||||
spin_unlock(&ht->lock);
|
||||
|
||||
if (!iter->walker.tbl) {
|
||||
if (!iter->walker.tbl && !iter->end_of_table) {
|
||||
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rhashtable_walk_start);
|
||||
EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
|
||||
|
||||
/**
|
||||
* rhashtable_walk_next - Return the next object and advance the iterator
|
||||
* __rhashtable_walk_find_next - Find the next element in a table (or the first
|
||||
* one in case of a new walk).
|
||||
*
|
||||
* @iter: Hash table iterator
|
||||
*
|
||||
* Note that you must call rhashtable_walk_stop when you are finished
|
||||
* with the walk.
|
||||
* Returns the found object or NULL when the end of the table is reached.
|
||||
*
|
||||
* Returns the next object or NULL when the end of the table is reached.
|
||||
*
|
||||
* Returns -EAGAIN if resize event occured. Note that the iterator
|
||||
* will rewind back to the beginning and you may continue to use it.
|
||||
* Returns -EAGAIN if resize event occurred.
|
||||
*/
|
||||
void *rhashtable_walk_next(struct rhashtable_iter *iter)
|
||||
static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
|
||||
{
|
||||
struct bucket_table *tbl = iter->walker.tbl;
|
||||
struct rhlist_head *list = iter->list;
|
||||
|
@ -786,13 +758,8 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
|
|||
struct rhash_head *p = iter->p;
|
||||
bool rhlist = ht->rhlist;
|
||||
|
||||
if (p) {
|
||||
if (!rhlist || !(list = rcu_dereference(list->next))) {
|
||||
p = rcu_dereference(p->next);
|
||||
list = container_of(p, struct rhlist_head, rhead);
|
||||
}
|
||||
goto next;
|
||||
}
|
||||
if (!tbl)
|
||||
return NULL;
|
||||
|
||||
for (; iter->slot < tbl->size; iter->slot++) {
|
||||
int skip = iter->skip;
|
||||
|
@ -836,12 +803,89 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
|
|||
iter->slot = 0;
|
||||
iter->skip = 0;
|
||||
return ERR_PTR(-EAGAIN);
|
||||
} else {
|
||||
iter->end_of_table = true;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* rhashtable_walk_next - Return the next object and advance the iterator
|
||||
* @iter: Hash table iterator
|
||||
*
|
||||
* Note that you must call rhashtable_walk_stop when you are finished
|
||||
* with the walk.
|
||||
*
|
||||
* Returns the next object or NULL when the end of the table is reached.
|
||||
*
|
||||
* Returns -EAGAIN if resize event occurred. Note that the iterator
|
||||
* will rewind back to the beginning and you may continue to use it.
|
||||
*/
|
||||
void *rhashtable_walk_next(struct rhashtable_iter *iter)
|
||||
{
|
||||
struct rhlist_head *list = iter->list;
|
||||
struct rhashtable *ht = iter->ht;
|
||||
struct rhash_head *p = iter->p;
|
||||
bool rhlist = ht->rhlist;
|
||||
|
||||
if (p) {
|
||||
if (!rhlist || !(list = rcu_dereference(list->next))) {
|
||||
p = rcu_dereference(p->next);
|
||||
list = container_of(p, struct rhlist_head, rhead);
|
||||
}
|
||||
if (!rht_is_a_nulls(p)) {
|
||||
iter->skip++;
|
||||
iter->p = p;
|
||||
iter->list = list;
|
||||
return rht_obj(ht, rhlist ? &list->rhead : p);
|
||||
}
|
||||
|
||||
/* At the end of this slot, switch to next one and then find
|
||||
* next entry from that point.
|
||||
*/
|
||||
iter->skip = 0;
|
||||
iter->slot++;
|
||||
}
|
||||
|
||||
return __rhashtable_walk_find_next(iter);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rhashtable_walk_next);
|
||||
|
||||
/**
|
||||
* rhashtable_walk_peek - Return the next object but don't advance the iterator
|
||||
* @iter: Hash table iterator
|
||||
*
|
||||
* Returns the next object or NULL when the end of the table is reached.
|
||||
*
|
||||
* Returns -EAGAIN if resize event occurred. Note that the iterator
|
||||
* will rewind back to the beginning and you may continue to use it.
|
||||
*/
|
||||
void *rhashtable_walk_peek(struct rhashtable_iter *iter)
|
||||
{
|
||||
struct rhlist_head *list = iter->list;
|
||||
struct rhashtable *ht = iter->ht;
|
||||
struct rhash_head *p = iter->p;
|
||||
|
||||
if (p)
|
||||
return rht_obj(ht, ht->rhlist ? &list->rhead : p);
|
||||
|
||||
/* No object found in current iter, find next one in the table. */
|
||||
|
||||
if (iter->skip) {
|
||||
/* A nonzero skip value points to the next entry in the table
|
||||
* beyond that last one that was found. Decrement skip so
|
||||
* we find the current value. __rhashtable_walk_find_next
|
||||
* will restore the original value of skip assuming that
|
||||
* the table hasn't changed.
|
||||
*/
|
||||
iter->skip--;
|
||||
}
|
||||
|
||||
return __rhashtable_walk_find_next(iter);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
|
||||
|
||||
/**
|
||||
* rhashtable_walk_stop - Finish a hash table walk
|
||||
* @iter: Hash table iterator
|
||||
|
|
|
@ -162,11 +162,7 @@ static void test_bucket_stats(struct rhashtable *ht, unsigned int entries)
|
|||
return;
|
||||
}
|
||||
|
||||
err = rhashtable_walk_start(&hti);
|
||||
if (err && err != -EAGAIN) {
|
||||
pr_warn("Test failed: iterator failed: %d\n", err);
|
||||
return;
|
||||
}
|
||||
rhashtable_walk_start(&hti);
|
||||
|
||||
while ((pos = rhashtable_walk_next(&hti))) {
|
||||
if (PTR_ERR(pos) == -EAGAIN) {
|
||||
|
|
|
@ -512,9 +512,7 @@ static int ila_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
struct ila_map *ila;
|
||||
int ret;
|
||||
|
||||
ret = rhashtable_walk_start(rhiter);
|
||||
if (ret && ret != -EAGAIN)
|
||||
goto done;
|
||||
rhashtable_walk_start(rhiter);
|
||||
|
||||
for (;;) {
|
||||
ila = rhashtable_walk_next(rhiter);
|
||||
|
|
|
@ -306,9 +306,7 @@ static int seg6_genl_dumphmac(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
struct seg6_hmac_info *hinfo;
|
||||
int ret;
|
||||
|
||||
ret = rhashtable_walk_start(iter);
|
||||
if (ret && ret != -EAGAIN)
|
||||
goto done;
|
||||
rhashtable_walk_start(iter);
|
||||
|
||||
for (;;) {
|
||||
hinfo = rhashtable_walk_next(iter);
|
||||
|
|
|
@ -257,9 +257,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
|
|||
if (ret)
|
||||
return NULL;
|
||||
|
||||
ret = rhashtable_walk_start(&iter);
|
||||
if (ret && ret != -EAGAIN)
|
||||
goto err;
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((mpath = rhashtable_walk_next(&iter))) {
|
||||
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
||||
|
@ -269,7 +267,6 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
|
|||
if (i++ == idx)
|
||||
break;
|
||||
}
|
||||
err:
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
|
||||
|
@ -513,9 +510,7 @@ void mesh_plink_broken(struct sta_info *sta)
|
|||
if (ret)
|
||||
return;
|
||||
|
||||
ret = rhashtable_walk_start(&iter);
|
||||
if (ret && ret != -EAGAIN)
|
||||
goto out;
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((mpath = rhashtable_walk_next(&iter))) {
|
||||
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
||||
|
@ -535,7 +530,6 @@ void mesh_plink_broken(struct sta_info *sta)
|
|||
WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
|
||||
}
|
||||
}
|
||||
out:
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
}
|
||||
|
@ -584,9 +578,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
|
|||
if (ret)
|
||||
return;
|
||||
|
||||
ret = rhashtable_walk_start(&iter);
|
||||
if (ret && ret != -EAGAIN)
|
||||
goto out;
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((mpath = rhashtable_walk_next(&iter))) {
|
||||
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
||||
|
@ -597,7 +589,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
|
|||
if (rcu_access_pointer(mpath->next_hop) == sta)
|
||||
__mesh_path_del(tbl, mpath);
|
||||
}
|
||||
out:
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
}
|
||||
|
@ -614,9 +606,7 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
|
|||
if (ret)
|
||||
return;
|
||||
|
||||
ret = rhashtable_walk_start(&iter);
|
||||
if (ret && ret != -EAGAIN)
|
||||
goto out;
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((mpath = rhashtable_walk_next(&iter))) {
|
||||
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
||||
|
@ -627,7 +617,7 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
|
|||
if (ether_addr_equal(mpath->mpp, proxy))
|
||||
__mesh_path_del(tbl, mpath);
|
||||
}
|
||||
out:
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
}
|
||||
|
@ -642,9 +632,7 @@ static void table_flush_by_iface(struct mesh_table *tbl)
|
|||
if (ret)
|
||||
return;
|
||||
|
||||
ret = rhashtable_walk_start(&iter);
|
||||
if (ret && ret != -EAGAIN)
|
||||
goto out;
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((mpath = rhashtable_walk_next(&iter))) {
|
||||
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
||||
|
@ -653,7 +641,7 @@ static void table_flush_by_iface(struct mesh_table *tbl)
|
|||
break;
|
||||
__mesh_path_del(tbl, mpath);
|
||||
}
|
||||
out:
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
}
|
||||
|
@ -873,9 +861,7 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
|
|||
if (ret)
|
||||
return;
|
||||
|
||||
ret = rhashtable_walk_start(&iter);
|
||||
if (ret && ret != -EAGAIN)
|
||||
goto out;
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((mpath = rhashtable_walk_next(&iter))) {
|
||||
if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
|
||||
|
@ -887,7 +873,7 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
|
|||
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
|
||||
__mesh_path_del(tbl, mpath);
|
||||
}
|
||||
out:
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
rhashtable_walk_exit(&iter);
|
||||
}
|
||||
|
|
|
@ -251,11 +251,7 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
|
|||
if (err)
|
||||
return;
|
||||
|
||||
err = rhashtable_walk_start(&hti);
|
||||
if (err && err != -EAGAIN) {
|
||||
iter->err = err;
|
||||
goto out;
|
||||
}
|
||||
rhashtable_walk_start(&hti);
|
||||
|
||||
while ((he = rhashtable_walk_next(&hti))) {
|
||||
if (IS_ERR(he)) {
|
||||
|
@ -306,9 +302,7 @@ static void nft_rhash_gc(struct work_struct *work)
|
|||
if (err)
|
||||
goto schedule;
|
||||
|
||||
err = rhashtable_walk_start(&hti);
|
||||
if (err && err != -EAGAIN)
|
||||
goto out;
|
||||
rhashtable_walk_start(&hti);
|
||||
|
||||
while ((he = rhashtable_walk_next(&hti))) {
|
||||
if (IS_ERR(he)) {
|
||||
|
|
|
@ -2478,8 +2478,9 @@ static int netlink_walk_start(struct nl_seq_iter *iter)
|
|||
return err;
|
||||
}
|
||||
|
||||
err = rhashtable_walk_start(&iter->hti);
|
||||
return err == -EAGAIN ? 0 : err;
|
||||
rhashtable_walk_start(&iter->hti);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void netlink_walk_stop(struct nl_seq_iter *iter)
|
||||
|
|
|
@ -115,11 +115,7 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
|||
if (!s_num)
|
||||
rhashtable_walk_enter(&tbl->hash, hti);
|
||||
|
||||
ret = rhashtable_walk_start(hti);
|
||||
if (ret == -EAGAIN)
|
||||
ret = 0;
|
||||
if (ret)
|
||||
goto stop;
|
||||
rhashtable_walk_start(hti);
|
||||
|
||||
while ((nlsk = rhashtable_walk_next(hti))) {
|
||||
if (IS_ERR(nlsk)) {
|
||||
|
@ -146,8 +142,8 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
|
|||
}
|
||||
}
|
||||
|
||||
stop:
|
||||
rhashtable_walk_stop(hti);
|
||||
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
|
|
|
@ -288,12 +288,8 @@ struct sctp_ht_iter {
|
|||
static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
{
|
||||
struct sctp_ht_iter *iter = seq->private;
|
||||
int err = sctp_transport_walk_start(&iter->hti);
|
||||
|
||||
if (err) {
|
||||
iter->start_fail = 1;
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
sctp_transport_walk_start(&iter->hti);
|
||||
|
||||
iter->start_fail = 0;
|
||||
return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
|
||||
|
|
|
@ -4676,20 +4676,11 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
|
|||
EXPORT_SYMBOL_GPL(sctp_get_sctp_info);
|
||||
|
||||
/* use callback to avoid exporting the core structure */
|
||||
int sctp_transport_walk_start(struct rhashtable_iter *iter)
|
||||
void sctp_transport_walk_start(struct rhashtable_iter *iter)
|
||||
{
|
||||
int err;
|
||||
|
||||
rhltable_walk_enter(&sctp_transport_hashtable, iter);
|
||||
|
||||
err = rhashtable_walk_start(iter);
|
||||
if (err && err != -EAGAIN) {
|
||||
rhashtable_walk_stop(iter);
|
||||
rhashtable_walk_exit(iter);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
rhashtable_walk_start(iter);
|
||||
}
|
||||
|
||||
void sctp_transport_walk_stop(struct rhashtable_iter *iter)
|
||||
|
@ -4780,12 +4771,10 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
|
|||
struct net *net, int *pos, void *p) {
|
||||
struct rhashtable_iter hti;
|
||||
struct sctp_transport *tsp;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
again:
|
||||
ret = sctp_transport_walk_start(&hti);
|
||||
if (ret)
|
||||
return ret;
|
||||
sctp_transport_walk_start(&hti);
|
||||
|
||||
tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
|
||||
for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
|
||||
|
|
|
@ -2640,9 +2640,7 @@ void tipc_sk_reinit(struct net *net)
|
|||
rhashtable_walk_enter(&tn->sk_rht, &iter);
|
||||
|
||||
do {
|
||||
tsk = ERR_PTR(rhashtable_walk_start(&iter));
|
||||
if (IS_ERR(tsk))
|
||||
goto walk_stop;
|
||||
rhashtable_walk_start(&iter);
|
||||
|
||||
while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
|
||||
spin_lock_bh(&tsk->sk.sk_lock.slock);
|
||||
|
@ -2651,7 +2649,7 @@ void tipc_sk_reinit(struct net *net)
|
|||
msg_set_orignode(msg, tn->own_addr);
|
||||
spin_unlock_bh(&tsk->sk.sk_lock.slock);
|
||||
}
|
||||
walk_stop:
|
||||
|
||||
rhashtable_walk_stop(&iter);
|
||||
} while (tsk == ERR_PTR(-EAGAIN));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue