mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
b67bfe0d42
I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
190 lines
5.8 KiB
C
190 lines
5.8 KiB
C
/*
|
|
* Statically sized hash table implementation
|
|
* (C) 2012 Sasha Levin <levinsasha928@gmail.com>
|
|
*/
|
|
|
|
#ifndef _LINUX_HASHTABLE_H
|
|
#define _LINUX_HASHTABLE_H
|
|
|
|
#include <linux/list.h>
|
|
#include <linux/types.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/hash.h>
|
|
#include <linux/rculist.h>
|
|
|
|
#define DEFINE_HASHTABLE(name, bits) \
|
|
struct hlist_head name[1 << (bits)] = \
|
|
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
|
|
|
|
#define DECLARE_HASHTABLE(name, bits) \
|
|
struct hlist_head name[1 << (bits)]
|
|
|
|
#define HASH_SIZE(name) (ARRAY_SIZE(name))
|
|
#define HASH_BITS(name) ilog2(HASH_SIZE(name))
|
|
|
|
/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
|
|
#define hash_min(val, bits) \
|
|
(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
|
|
|
|
static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
|
|
{
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < sz; i++)
|
|
INIT_HLIST_HEAD(&ht[i]);
|
|
}
|
|
|
|
/**
|
|
* hash_init - initialize a hash table
|
|
* @hashtable: hashtable to be initialized
|
|
*
|
|
* Calculates the size of the hashtable from the given parameter, otherwise
|
|
* same as hash_init_size.
|
|
*
|
|
* This has to be a macro since HASH_BITS() will not work on pointers since
|
|
* it calculates the size during preprocessing.
|
|
*/
|
|
#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
|
|
|
|
/**
|
|
* hash_add - add an object to a hashtable
|
|
* @hashtable: hashtable to add to
|
|
* @node: the &struct hlist_node of the object to be added
|
|
* @key: the key of the object to be added
|
|
*/
|
|
#define hash_add(hashtable, node, key) \
|
|
hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
|
|
|
|
/**
|
|
* hash_add_rcu - add an object to a rcu enabled hashtable
|
|
* @hashtable: hashtable to add to
|
|
* @node: the &struct hlist_node of the object to be added
|
|
* @key: the key of the object to be added
|
|
*/
|
|
#define hash_add_rcu(hashtable, node, key) \
|
|
hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
|
|
|
|
/**
|
|
* hash_hashed - check whether an object is in any hashtable
|
|
* @node: the &struct hlist_node of the object to be checked
|
|
*/
|
|
static inline bool hash_hashed(struct hlist_node *node)
|
|
{
|
|
return !hlist_unhashed(node);
|
|
}
|
|
|
|
static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
|
|
{
|
|
unsigned int i;
|
|
|
|
for (i = 0; i < sz; i++)
|
|
if (!hlist_empty(&ht[i]))
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
/**
|
|
* hash_empty - check whether a hashtable is empty
|
|
* @hashtable: hashtable to check
|
|
*
|
|
* This has to be a macro since HASH_BITS() will not work on pointers since
|
|
* it calculates the size during preprocessing.
|
|
*/
|
|
#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
|
|
|
|
/**
|
|
* hash_del - remove an object from a hashtable
|
|
* @node: &struct hlist_node of the object to remove
|
|
*/
|
|
static inline void hash_del(struct hlist_node *node)
|
|
{
|
|
hlist_del_init(node);
|
|
}
|
|
|
|
/**
|
|
* hash_del_rcu - remove an object from a rcu enabled hashtable
|
|
* @node: &struct hlist_node of the object to remove
|
|
*/
|
|
static inline void hash_del_rcu(struct hlist_node *node)
|
|
{
|
|
hlist_del_init_rcu(node);
|
|
}
|
|
|
|
/**
|
|
* hash_for_each - iterate over a hashtable
|
|
* @name: hashtable to iterate
|
|
* @bkt: integer to use as bucket loop cursor
|
|
* @obj: the type * to use as a loop cursor for each entry
|
|
* @member: the name of the hlist_node within the struct
|
|
*/
|
|
#define hash_for_each(name, bkt, obj, member) \
|
|
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
|
|
(bkt)++)\
|
|
hlist_for_each_entry(obj, &name[bkt], member)
|
|
|
|
/**
|
|
* hash_for_each_rcu - iterate over a rcu enabled hashtable
|
|
* @name: hashtable to iterate
|
|
* @bkt: integer to use as bucket loop cursor
|
|
* @obj: the type * to use as a loop cursor for each entry
|
|
* @member: the name of the hlist_node within the struct
|
|
*/
|
|
#define hash_for_each_rcu(name, bkt, obj, member) \
|
|
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
|
|
(bkt)++)\
|
|
hlist_for_each_entry_rcu(obj, &name[bkt], member)
|
|
|
|
/**
|
|
* hash_for_each_safe - iterate over a hashtable safe against removal of
|
|
* hash entry
|
|
* @name: hashtable to iterate
|
|
* @bkt: integer to use as bucket loop cursor
|
|
* @tmp: a &struct used for temporary storage
|
|
* @obj: the type * to use as a loop cursor for each entry
|
|
* @member: the name of the hlist_node within the struct
|
|
*/
|
|
#define hash_for_each_safe(name, bkt, tmp, obj, member) \
|
|
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
|
|
(bkt)++)\
|
|
hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
|
|
|
|
/**
|
|
* hash_for_each_possible - iterate over all possible objects hashing to the
|
|
* same bucket
|
|
* @name: hashtable to iterate
|
|
* @obj: the type * to use as a loop cursor for each entry
|
|
* @member: the name of the hlist_node within the struct
|
|
* @key: the key of the objects to iterate over
|
|
*/
|
|
#define hash_for_each_possible(name, obj, member, key) \
|
|
hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
|
|
|
|
/**
|
|
* hash_for_each_possible_rcu - iterate over all possible objects hashing to the
|
|
* same bucket in an rcu enabled hashtable
|
|
* in a rcu enabled hashtable
|
|
* @name: hashtable to iterate
|
|
* @obj: the type * to use as a loop cursor for each entry
|
|
* @member: the name of the hlist_node within the struct
|
|
* @key: the key of the objects to iterate over
|
|
*/
|
|
#define hash_for_each_possible_rcu(name, obj, member, key) \
|
|
hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
|
|
member)
|
|
|
|
/**
|
|
* hash_for_each_possible_safe - iterate over all possible objects hashing to the
|
|
* same bucket safe against removals
|
|
* @name: hashtable to iterate
|
|
* @obj: the type * to use as a loop cursor for each entry
|
|
* @tmp: a &struct used for temporary storage
|
|
* @member: the name of the hlist_node within the struct
|
|
* @key: the key of the objects to iterate over
|
|
*/
|
|
#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
|
|
hlist_for_each_entry_safe(obj, tmp,\
|
|
&name[hash_min(key, HASH_BITS(name))], member)
|
|
|
|
|
|
#endif
|