modules: Use a better scheme for refcounting

Current refcounting for modules (done if CONFIG_MODULE_UNLOAD=y) is
using a lot of memory.

Each 'struct module' contains an [NR_CPUS] array of full cache lines.

This patch uses existing infrastructure (percpu_modalloc() &
percpu_modfree()) to allocate percpu space for the refcount storage.

Instead of wasting NR_CPUS*128 bytes (on i386), we now use
nr_cpu_ids*sizeof(local_t) bytes.

On a typical distro, where NR_CPUS=8, shiping 2000 modules, we reduce
size of module files by about 2 Mbytes. (1Kb per module)

Instead of having all refcounters in the same memory node - with TLB misses
because of vmalloc() - this new implementation permits to have better
NUMA properties, since each  CPU will use storage on its preferred node,
thanks to percpu storage.

Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Eric Dumazet 2009-02-03 13:31:36 +10:30 committed by Linus Torvalds
parent 27421e211a
commit 720eba31f4
2 changed files with 41 additions and 19 deletions

View File

@ -219,11 +219,6 @@ void *__symbol_get_gpl(const char *symbol);
#endif #endif
struct module_ref
{
local_t count;
} ____cacheline_aligned;
enum module_state enum module_state
{ {
MODULE_STATE_LIVE, MODULE_STATE_LIVE,
@ -344,8 +339,11 @@ struct module
/* Destruction function. */ /* Destruction function. */
void (*exit)(void); void (*exit)(void);
/* Reference counts */ #ifdef CONFIG_SMP
struct module_ref ref[NR_CPUS]; char *refptr;
#else
local_t ref;
#endif
#endif #endif
}; };
#ifndef MODULE_ARCH_INIT #ifndef MODULE_ARCH_INIT
@ -395,13 +393,22 @@ void __symbol_put(const char *symbol);
#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x) #define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
void symbol_put_addr(void *addr); void symbol_put_addr(void *addr);
static inline local_t *__module_ref_addr(struct module *mod, int cpu)
{
#ifdef CONFIG_SMP
return (local_t *) (mod->refptr + per_cpu_offset(cpu));
#else
return &mod->ref;
#endif
}
/* Sometimes we know we already have a refcount, and it's easier not /* Sometimes we know we already have a refcount, and it's easier not
to handle the error case (which only happens with rmmod --wait). */ to handle the error case (which only happens with rmmod --wait). */
static inline void __module_get(struct module *module) static inline void __module_get(struct module *module)
{ {
if (module) { if (module) {
BUG_ON(module_refcount(module) == 0); BUG_ON(module_refcount(module) == 0);
local_inc(&module->ref[get_cpu()].count); local_inc(__module_ref_addr(module, get_cpu()));
put_cpu(); put_cpu();
} }
} }
@ -413,7 +420,7 @@ static inline int try_module_get(struct module *module)
if (module) { if (module) {
unsigned int cpu = get_cpu(); unsigned int cpu = get_cpu();
if (likely(module_is_live(module))) if (likely(module_is_live(module)))
local_inc(&module->ref[cpu].count); local_inc(__module_ref_addr(module, cpu));
else else
ret = 0; ret = 0;
put_cpu(); put_cpu();

View File

@ -573,13 +573,13 @@ static char last_unloaded_module[MODULE_NAME_LEN+1];
/* Init the unload section of the module. */ /* Init the unload section of the module. */
static void module_unload_init(struct module *mod) static void module_unload_init(struct module *mod)
{ {
unsigned int i; int cpu;
INIT_LIST_HEAD(&mod->modules_which_use_me); INIT_LIST_HEAD(&mod->modules_which_use_me);
for (i = 0; i < NR_CPUS; i++) for_each_possible_cpu(cpu)
local_set(&mod->ref[i].count, 0); local_set(__module_ref_addr(mod, cpu), 0);
/* Hold reference count during initialization. */ /* Hold reference count during initialization. */
local_set(&mod->ref[raw_smp_processor_id()].count, 1); local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1);
/* Backwards compatibility macros put refcount during init. */ /* Backwards compatibility macros put refcount during init. */
mod->waiter = current; mod->waiter = current;
} }
@ -717,10 +717,11 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
unsigned int module_refcount(struct module *mod) unsigned int module_refcount(struct module *mod)
{ {
unsigned int i, total = 0; unsigned int total = 0;
int cpu;
for (i = 0; i < NR_CPUS; i++) for_each_possible_cpu(cpu)
total += local_read(&mod->ref[i].count); total += local_read(__module_ref_addr(mod, cpu));
return total; return total;
} }
EXPORT_SYMBOL(module_refcount); EXPORT_SYMBOL(module_refcount);
@ -894,7 +895,7 @@ void module_put(struct module *module)
{ {
if (module) { if (module) {
unsigned int cpu = get_cpu(); unsigned int cpu = get_cpu();
local_dec(&module->ref[cpu].count); local_dec(__module_ref_addr(module, cpu));
/* Maybe they're waiting for us to drop reference? */ /* Maybe they're waiting for us to drop reference? */
if (unlikely(!module_is_live(module))) if (unlikely(!module_is_live(module)))
wake_up_process(module->waiter); wake_up_process(module->waiter);
@ -1464,7 +1465,10 @@ static void free_module(struct module *mod)
kfree(mod->args); kfree(mod->args);
if (mod->percpu) if (mod->percpu)
percpu_modfree(mod->percpu); percpu_modfree(mod->percpu);
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
if (mod->refptr)
percpu_modfree(mod->refptr);
#endif
/* Free lock-classes: */ /* Free lock-classes: */
lockdep_free_key_range(mod->module_core, mod->core_size); lockdep_free_key_range(mod->module_core, mod->core_size);
@ -2011,6 +2015,14 @@ static noinline struct module *load_module(void __user *umod,
if (err < 0) if (err < 0)
goto free_mod; goto free_mod;
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
mod->name);
if (!mod->refptr) {
err = -ENOMEM;
goto free_mod;
}
#endif
if (pcpuindex) { if (pcpuindex) {
/* We have a special allocation for this section. */ /* We have a special allocation for this section. */
percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
@ -2018,7 +2030,7 @@ static noinline struct module *load_module(void __user *umod,
mod->name); mod->name);
if (!percpu) { if (!percpu) {
err = -ENOMEM; err = -ENOMEM;
goto free_mod; goto free_percpu;
} }
sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
mod->percpu = percpu; mod->percpu = percpu;
@ -2282,6 +2294,9 @@ static noinline struct module *load_module(void __user *umod,
free_percpu: free_percpu:
if (percpu) if (percpu)
percpu_modfree(percpu); percpu_modfree(percpu);
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
percpu_modfree(mod->refptr);
#endif
free_mod: free_mod:
kfree(args); kfree(args);
free_hdr: free_hdr: