CGroup API files: update cpusets to use cgroup structured file API

Many of the cpusets control files are simple integer values, which don't
require the overhead of memory allocations for reads and writes.

Move the handlers for these control files into cpuset_read_u64() and
cpuset_write_u64().

[akpm@linux-foundation.org: ad dmissing `break']
Signed-off-by: Paul Menage <menage@google.com>
Cc: "Li Zefan" <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Paul Jackson <pj@sgi.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: "YAMAMOTO Takashi" <yamamoto@valinux.co.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Paul Menage 2008-04-29 01:00:00 -07:00 committed by Linus Torvalds
parent b7269dfc82
commit 700fe1ab99

View file

@ -1023,19 +1023,6 @@ int current_cpuset_is_being_rebound(void)
return task_cs(current) == cpuset_being_rebound;
}
/*
* Call with cgroup_mutex held.
*/
static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
{
if (simple_strtoul(buf, NULL, 10) != 0)
cpuset_memory_pressure_enabled = 1;
else
cpuset_memory_pressure_enabled = 0;
return 0;
}
static int update_relax_domain_level(struct cpuset *cs, char *buf)
{
int val = simple_strtol(buf, NULL, 10);
@ -1063,15 +1050,13 @@ static int update_relax_domain_level(struct cpuset *cs, char *buf)
* Call with cgroup_mutex held.
*/
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
int turning_on)
{
int turning_on;
struct cpuset trialcs;
int err;
int cpus_nonempty, balance_flag_changed;
turning_on = (simple_strtoul(buf, NULL, 10) != 0);
trialcs = *cs;
if (turning_on)
set_bit(bit, &trialcs.flags);
@ -1289,35 +1274,9 @@ static ssize_t cpuset_common_file_write(struct cgroup *cont,
case FILE_MEMLIST:
retval = update_nodemask(cs, buffer);
break;
case FILE_CPU_EXCLUSIVE:
retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer);
break;
case FILE_MEM_EXCLUSIVE:
retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer);
break;
case FILE_SCHED_LOAD_BALANCE:
retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer);
break;
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
retval = update_relax_domain_level(cs, buffer);
break;
case FILE_MEMORY_MIGRATE:
retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
break;
case FILE_MEMORY_PRESSURE_ENABLED:
retval = update_memory_pressure_enabled(cs, buffer);
break;
case FILE_MEMORY_PRESSURE:
retval = -EACCES;
break;
case FILE_SPREAD_PAGE:
retval = update_flag(CS_SPREAD_PAGE, cs, buffer);
cs->mems_generation = cpuset_mems_generation++;
break;
case FILE_SPREAD_SLAB:
retval = update_flag(CS_SPREAD_SLAB, cs, buffer);
cs->mems_generation = cpuset_mems_generation++;
break;
default:
retval = -EINVAL;
goto out2;
@ -1332,6 +1291,54 @@ static ssize_t cpuset_common_file_write(struct cgroup *cont,
return retval;
}
static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
{
int retval = 0;
struct cpuset *cs = cgroup_cs(cgrp);
cpuset_filetype_t type = cft->private;
cgroup_lock();
if (cgroup_is_removed(cgrp)) {
cgroup_unlock();
return -ENODEV;
}
switch (type) {
case FILE_CPU_EXCLUSIVE:
retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
break;
case FILE_MEM_EXCLUSIVE:
retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
break;
case FILE_SCHED_LOAD_BALANCE:
retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
break;
case FILE_MEMORY_MIGRATE:
retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
break;
case FILE_MEMORY_PRESSURE_ENABLED:
cpuset_memory_pressure_enabled = !!val;
break;
case FILE_MEMORY_PRESSURE:
retval = -EACCES;
break;
case FILE_SPREAD_PAGE:
retval = update_flag(CS_SPREAD_PAGE, cs, val);
cs->mems_generation = cpuset_mems_generation++;
break;
case FILE_SPREAD_SLAB:
retval = update_flag(CS_SPREAD_SLAB, cs, val);
cs->mems_generation = cpuset_mems_generation++;
break;
default:
retval = -EINVAL;
break;
}
cgroup_unlock();
return retval;
}
/*
* These ascii lists should be read in a single call, by using a user
* buffer large enough to hold the entire map. If read in smaller
@ -1390,33 +1397,9 @@ static ssize_t cpuset_common_file_read(struct cgroup *cont,
case FILE_MEMLIST:
s += cpuset_sprintf_memlist(s, cs);
break;
case FILE_CPU_EXCLUSIVE:
*s++ = is_cpu_exclusive(cs) ? '1' : '0';
break;
case FILE_MEM_EXCLUSIVE:
*s++ = is_mem_exclusive(cs) ? '1' : '0';
break;
case FILE_SCHED_LOAD_BALANCE:
*s++ = is_sched_load_balance(cs) ? '1' : '0';
break;
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
s += sprintf(s, "%d", cs->relax_domain_level);
break;
case FILE_MEMORY_MIGRATE:
*s++ = is_memory_migrate(cs) ? '1' : '0';
break;
case FILE_MEMORY_PRESSURE_ENABLED:
*s++ = cpuset_memory_pressure_enabled ? '1' : '0';
break;
case FILE_MEMORY_PRESSURE:
s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter));
break;
case FILE_SPREAD_PAGE:
*s++ = is_spread_page(cs) ? '1' : '0';
break;
case FILE_SPREAD_SLAB:
*s++ = is_spread_slab(cs) ? '1' : '0';
break;
default:
retval = -EINVAL;
goto out;
@ -1429,8 +1412,31 @@ static ssize_t cpuset_common_file_read(struct cgroup *cont,
return retval;
}
static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
{
struct cpuset *cs = cgroup_cs(cont);
cpuset_filetype_t type = cft->private;
switch (type) {
case FILE_CPU_EXCLUSIVE:
return is_cpu_exclusive(cs);
case FILE_MEM_EXCLUSIVE:
return is_mem_exclusive(cs);
case FILE_SCHED_LOAD_BALANCE:
return is_sched_load_balance(cs);
case FILE_MEMORY_MIGRATE:
return is_memory_migrate(cs);
case FILE_MEMORY_PRESSURE_ENABLED:
return cpuset_memory_pressure_enabled;
case FILE_MEMORY_PRESSURE:
return fmeter_getrate(&cs->fmeter);
case FILE_SPREAD_PAGE:
return is_spread_page(cs);
case FILE_SPREAD_SLAB:
return is_spread_slab(cs);
default:
BUG();
}
}
/*
@ -1453,22 +1459,22 @@ static struct cftype cft_mems = {
static struct cftype cft_cpu_exclusive = {
.name = "cpu_exclusive",
.read = cpuset_common_file_read,
.write = cpuset_common_file_write,
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_CPU_EXCLUSIVE,
};
static struct cftype cft_mem_exclusive = {
.name = "mem_exclusive",
.read = cpuset_common_file_read,
.write = cpuset_common_file_write,
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEM_EXCLUSIVE,
};
static struct cftype cft_sched_load_balance = {
.name = "sched_load_balance",
.read = cpuset_common_file_read,
.write = cpuset_common_file_write,
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_SCHED_LOAD_BALANCE,
};
@ -1481,36 +1487,36 @@ static struct cftype cft_sched_relax_domain_level = {
static struct cftype cft_memory_migrate = {
.name = "memory_migrate",
.read = cpuset_common_file_read,
.write = cpuset_common_file_write,
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_MIGRATE,
};
static struct cftype cft_memory_pressure_enabled = {
.name = "memory_pressure_enabled",
.read = cpuset_common_file_read,
.write = cpuset_common_file_write,
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_PRESSURE_ENABLED,
};
static struct cftype cft_memory_pressure = {
.name = "memory_pressure",
.read = cpuset_common_file_read,
.write = cpuset_common_file_write,
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_MEMORY_PRESSURE,
};
static struct cftype cft_spread_page = {
.name = "memory_spread_page",
.read = cpuset_common_file_read,
.write = cpuset_common_file_write,
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_SPREAD_PAGE,
};
static struct cftype cft_spread_slab = {
.name = "memory_spread_slab",
.read = cpuset_common_file_read,
.write = cpuset_common_file_write,
.read_u64 = cpuset_read_u64,
.write_u64 = cpuset_write_u64,
.private = FILE_SPREAD_SLAB,
};
@ -1643,7 +1649,7 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
cpuset_update_task_memory_state();
if (is_sched_load_balance(cs))
update_flag(CS_SCHED_LOAD_BALANCE, cs, "0");
update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
number_of_cpusets--;
kfree(cs);