mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 00:48:50 +00:00
a85ee6401a
The struct dbs_data embeds a struct gov_attr_set and
the struct gov_attr_set embeds a kobject. Since every kobject must have
a release() method and we can't use kfree() to free it directly,
so introduce cpufreq_dbs_data_release() to release the dbs_data via
the kobject::release() method. This fixes the calltrace like below:
ODEBUG: free active (active state 0) object type: timer_list hint: delayed_work_timer_fn+0x0/0x34
WARNING: CPU: 12 PID: 810 at lib/debugobjects.c:505 debug_print_object+0xb8/0x100
Modules linked in:
CPU: 12 PID: 810 Comm: sh Not tainted 5.16.0-next-20220120-yocto-standard+ #536
Hardware name: Marvell OcteonTX CN96XX board (DT)
pstate: 60400009 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
pc : debug_print_object+0xb8/0x100
lr : debug_print_object+0xb8/0x100
sp : ffff80001dfcf9a0
x29: ffff80001dfcf9a0 x28: 0000000000000001 x27: ffff0001464f0000
x26: 0000000000000000 x25: ffff8000090e3f00 x24: ffff80000af60210
x23: ffff8000094dfb78 x22: ffff8000090e3f00 x21: ffff0001080b7118
x20: ffff80000aeb2430 x19: ffff800009e8f5e0 x18: 0000000000000000
x17: 0000000000000002 x16: 00004d62e58be040 x15: 013590470523aff8
x14: ffff8000090e1828 x13: 0000000001359047 x12: 00000000f5257d14
x11: 0000000000040591 x10: 0000000066c1ffea x9 : ffff8000080d15e0
x8 : ffff80000a1765a8 x7 : 0000000000000000 x6 : 0000000000000001
x5 : ffff800009e8c000 x4 : ffff800009e8c760 x3 : 0000000000000000
x2 : 0000000000000000 x1 : 0000000000000000 x0 : ffff0001474ed040
Call trace:
debug_print_object+0xb8/0x100
__debug_check_no_obj_freed+0x1d0/0x25c
debug_check_no_obj_freed+0x24/0xa0
kfree+0x11c/0x440
cpufreq_dbs_governor_exit+0xa8/0xac
cpufreq_exit_governor+0x44/0x90
cpufreq_set_policy+0x29c/0x570
store_scaling_governor+0x110/0x154
store+0xb0/0xe0
sysfs_kf_write+0x58/0x84
kernfs_fop_write_iter+0x12c/0x1c0
new_sync_write+0xf0/0x18c
vfs_write+0x1cc/0x220
ksys_write+0x74/0x100
__arm64_sys_write+0x28/0x3c
invoke_syscall.constprop.0+0x58/0xf0
do_el0_svc+0x70/0x170
el0_svc+0x54/0x190
el0t_64_sync_handler+0xa4/0x130
el0t_64_sync+0x1a0/0x1a4
irq event stamp: 189006
hardirqs last enabled at (189005): [<ffff8000080849d0>] finish_task_switch.isra.0+0xe0/0x2c0
hardirqs last disabled at (189006): [<ffff8000090667a4>] el1_dbg+0x24/0xa0
softirqs last enabled at (188966): [<ffff8000080106d0>] __do_softirq+0x4b0/0x6a0
softirqs last disabled at (188957): [<ffff80000804a618>] __irq_exit_rcu+0x108/0x1a4
[ rjw: Because can be freed by the gov_attr_set_put() in
cpufreq_dbs_governor_exit() now, it is also necessary to put the
invocation of the governor ->exit() callback into the new
cpufreq_dbs_data_release() function. ]
Fixes: c443563036
("cpufreq: governor: New sysfs show/store callbacks for governor tunables")
Signed-off-by: Kevin Hao <haokexin@gmail.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
181 lines
5.4 KiB
C
181 lines
5.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* drivers/cpufreq/cpufreq_governor.h
|
|
*
|
|
* Header file for CPUFreq governors common code
|
|
*
|
|
* Copyright (C) 2001 Russell King
|
|
* (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
|
|
* (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
|
|
* (C) 2009 Alexander Clouter <alex@digriz.org.uk>
|
|
* (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
|
|
*/
|
|
|
|
#ifndef _CPUFREQ_GOVERNOR_H
|
|
#define _CPUFREQ_GOVERNOR_H
|
|
|
|
#include <linux/atomic.h>
|
|
#include <linux/irq_work.h>
|
|
#include <linux/cpufreq.h>
|
|
#include <linux/sched/cpufreq.h>
|
|
#include <linux/kernel_stat.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mutex.h>
|
|
|
|
/* Ondemand Sampling types */
|
|
enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
|
|
|
|
/*
|
|
* Abbreviations:
|
|
* dbs: used as a shortform for demand based switching It helps to keep variable
|
|
* names smaller, simpler
|
|
* cdbs: common dbs
|
|
* od_*: On-demand governor
|
|
* cs_*: Conservative governor
|
|
*/
|
|
|
|
/* Governor demand based switching data (per-policy or global). */
|
|
struct dbs_data {
|
|
struct gov_attr_set attr_set;
|
|
struct dbs_governor *gov;
|
|
void *tuners;
|
|
unsigned int ignore_nice_load;
|
|
unsigned int sampling_rate;
|
|
unsigned int sampling_down_factor;
|
|
unsigned int up_threshold;
|
|
unsigned int io_is_busy;
|
|
};
|
|
|
|
static inline struct dbs_data *to_dbs_data(struct gov_attr_set *attr_set)
|
|
{
|
|
return container_of(attr_set, struct dbs_data, attr_set);
|
|
}
|
|
|
|
#define gov_show_one(_gov, file_name) \
|
|
static ssize_t file_name##_show \
|
|
(struct gov_attr_set *attr_set, char *buf) \
|
|
{ \
|
|
struct dbs_data *dbs_data = to_dbs_data(attr_set); \
|
|
struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
|
|
return sprintf(buf, "%u\n", tuners->file_name); \
|
|
}
|
|
|
|
#define gov_show_one_common(file_name) \
|
|
static ssize_t file_name##_show \
|
|
(struct gov_attr_set *attr_set, char *buf) \
|
|
{ \
|
|
struct dbs_data *dbs_data = to_dbs_data(attr_set); \
|
|
return sprintf(buf, "%u\n", dbs_data->file_name); \
|
|
}
|
|
|
|
#define gov_attr_ro(_name) \
|
|
static struct governor_attr _name = __ATTR_RO(_name)
|
|
|
|
#define gov_attr_rw(_name) \
|
|
static struct governor_attr _name = __ATTR_RW(_name)
|
|
|
|
/* Common to all CPUs of a policy */
|
|
struct policy_dbs_info {
|
|
struct cpufreq_policy *policy;
|
|
/*
|
|
* Per policy mutex that serializes load evaluation from limit-change
|
|
* and work-handler.
|
|
*/
|
|
struct mutex update_mutex;
|
|
|
|
u64 last_sample_time;
|
|
s64 sample_delay_ns;
|
|
atomic_t work_count;
|
|
struct irq_work irq_work;
|
|
struct work_struct work;
|
|
/* dbs_data may be shared between multiple policy objects */
|
|
struct dbs_data *dbs_data;
|
|
struct list_head list;
|
|
/* Multiplier for increasing sample delay temporarily. */
|
|
unsigned int rate_mult;
|
|
unsigned int idle_periods; /* For conservative */
|
|
/* Status indicators */
|
|
bool is_shared; /* This object is used by multiple CPUs */
|
|
bool work_in_progress; /* Work is being queued up or in progress */
|
|
};
|
|
|
|
static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
|
|
unsigned int delay_us)
|
|
{
|
|
policy_dbs->sample_delay_ns = delay_us * NSEC_PER_USEC;
|
|
}
|
|
|
|
/* Per cpu structures */
|
|
struct cpu_dbs_info {
|
|
u64 prev_cpu_idle;
|
|
u64 prev_update_time;
|
|
u64 prev_cpu_nice;
|
|
/*
|
|
* Used to keep track of load in the previous interval. However, when
|
|
* explicitly set to zero, it is used as a flag to ensure that we copy
|
|
* the previous load to the current interval only once, upon the first
|
|
* wake-up from idle.
|
|
*/
|
|
unsigned int prev_load;
|
|
struct update_util_data update_util;
|
|
struct policy_dbs_info *policy_dbs;
|
|
};
|
|
|
|
/* Common Governor data across policies */
|
|
struct dbs_governor {
|
|
struct cpufreq_governor gov;
|
|
struct kobj_type kobj_type;
|
|
|
|
/*
|
|
* Common data for platforms that don't set
|
|
* CPUFREQ_HAVE_GOVERNOR_PER_POLICY
|
|
*/
|
|
struct dbs_data *gdbs_data;
|
|
|
|
unsigned int (*gov_dbs_update)(struct cpufreq_policy *policy);
|
|
struct policy_dbs_info *(*alloc)(void);
|
|
void (*free)(struct policy_dbs_info *policy_dbs);
|
|
int (*init)(struct dbs_data *dbs_data);
|
|
void (*exit)(struct dbs_data *dbs_data);
|
|
void (*start)(struct cpufreq_policy *policy);
|
|
};
|
|
|
|
static inline struct dbs_governor *dbs_governor_of(struct cpufreq_policy *policy)
|
|
{
|
|
return container_of(policy->governor, struct dbs_governor, gov);
|
|
}
|
|
|
|
/* Governor callback routines */
|
|
int cpufreq_dbs_governor_init(struct cpufreq_policy *policy);
|
|
void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy);
|
|
int cpufreq_dbs_governor_start(struct cpufreq_policy *policy);
|
|
void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy);
|
|
void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy);
|
|
|
|
#define CPUFREQ_DBS_GOVERNOR_INITIALIZER(_name_) \
|
|
{ \
|
|
.name = _name_, \
|
|
.flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, \
|
|
.owner = THIS_MODULE, \
|
|
.init = cpufreq_dbs_governor_init, \
|
|
.exit = cpufreq_dbs_governor_exit, \
|
|
.start = cpufreq_dbs_governor_start, \
|
|
.stop = cpufreq_dbs_governor_stop, \
|
|
.limits = cpufreq_dbs_governor_limits, \
|
|
}
|
|
|
|
/* Governor specific operations */
|
|
struct od_ops {
|
|
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
|
|
unsigned int freq_next, unsigned int relation);
|
|
};
|
|
|
|
unsigned int dbs_update(struct cpufreq_policy *policy);
|
|
void od_register_powersave_bias_handler(unsigned int (*f)
|
|
(struct cpufreq_policy *, unsigned int, unsigned int),
|
|
unsigned int powersave_bias);
|
|
void od_unregister_powersave_bias_handler(void);
|
|
ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf,
|
|
size_t count);
|
|
void gov_update_cpu_data(struct dbs_data *dbs_data);
|
|
#endif /* _CPUFREQ_GOVERNOR_H */
|