spinlock: Add library function to allocate spinlock buckets array

Add two new library functions: alloc_bucket_spinlocks and
free_bucket_spinlocks. These are used to allocate and free an array
of spinlocks that are useful as locks for hash buckets. The interface
specifies the maximum number of spinlocks in the array as well
as a CPU multiplier to derive the number of spinlocks to allocate.
The number allocated is rounded up to a power of two to make the
array amenable to hash lookup.

Signed-off-by: Tom Herbert <tom@quantonium.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Tom Herbert 2017-12-04 10:31:44 -08:00 committed by David S. Miller
parent 2b86093135
commit 92f36cca57
3 changed files with 61 additions and 1 deletions

View File

@ -414,4 +414,10 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp);
void free_bucket_spinlocks(spinlock_t *locks);
#endif /* __LINUX_SPINLOCK_H */

View File

@ -39,7 +39,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
once.o refcount.o usercopy.o errseq.o
once.o refcount.o usercopy.o errseq.o bucket_locks.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o

54
lib/bucket_locks.c Normal file
View File

@ -0,0 +1,54 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
/* Allocate an array of spinlocks to be accessed by a hash. Two arguments
* indicate the number of elements to allocate in the array. max_size
* gives the maximum number of elements to allocate. cpu_mult gives
* the number of locks per CPU to allocate. The size is rounded up
* to a power of 2 to be suitable as a hash table.
*/
int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
size_t max_size, unsigned int cpu_mult, gfp_t gfp)
{
spinlock_t *tlocks = NULL;
unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
unsigned int nr_pcpus = 2;
#else
unsigned int nr_pcpus = num_possible_cpus();
#endif
if (cpu_mult) {
nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size);
} else {
size = max_size;
}
if (sizeof(spinlock_t) != 0) {
if (gfpflags_allow_blocking(gfp))
tlocks = kvmalloc(size * sizeof(spinlock_t), gfp);
else
tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp);
if (!tlocks)
return -ENOMEM;
for (i = 0; i < size; i++)
spin_lock_init(&tlocks[i]);
}
*locks = tlocks;
*locks_mask = size - 1;
return 0;
}
EXPORT_SYMBOL(alloc_bucket_spinlocks);
void free_bucket_spinlocks(spinlock_t *locks)
{
kvfree(locks);
}
EXPORT_SYMBOL(free_bucket_spinlocks);