Merge branch 'pm-sleep'

Merge changes related to system-wide power management for 6.9-rc1:

 - Fix and clean up system suspend statistics collection (Rafael
   Wysocki).

 - Simplify device suspend and resume handling in the power management
   core code (Rafael Wysocki).

 - Add support for LZ4 compression algorithm to the hibernation image
   creation and loading code (Nikhil V).

 - Fix PCI hibernation support description (Yiwei Lin).

 - Make hibernation take set_memory_ro() return values into account as
   appropriate (Christophe Leroy).

 - Set mem_sleep_current during kernel command line setup to avoid an
   ordering issue with handling it (Maulik Shah).

 - Fix wake IRQs handling when pm_runtime_force_suspend() is used as a
   driver's system suspend callback (Qingliang Li).

* pm-sleep: (21 commits)
  PM: sleep: wakeirq: fix wake irq warning in system suspend
  PM: suspend: Set mem_sleep_current during kernel command line setup
  PM: hibernate: Don't ignore return from set_memory_ro()
  PM: hibernate: Support to select compression algorithm
  Documentation: PM: Fix PCI hibernation support description
  PM: hibernate: Add support for LZ4 compression for hibernation
  PM: hibernate: Move to crypto APIs for LZO compression
  PM: hibernate: Rename lzo* to make it generic
  PM: sleep: Call dpm_async_fn() directly in each suspend phase
  PM: sleep: Move devices to new lists earlier in each suspend phase
  PM: sleep: Move some assignments from under a lock
  PM: sleep: stats: Log errors right after running suspend callbacks
  PM: sleep: stats: Use locking in dpm_save_failed_dev()
  PM: sleep: stats: Call dpm_save_failed_step() at most once per phase
  PM: sleep: stats: Define suspend_stats next to the code using it
  PM: sleep: stats: Use unsigned int for success and failure counters
  PM: sleep: stats: Use an array of step failure counters
  PM: sleep: stats: Use array of suspend step names
  PM: sleep: Relocate two device PM core functions
  PM: sleep: Simplify dpm_suspended_list walk in dpm_resume()
  ...
This commit is contained in:
Rafael J. Wysocki 2024-03-11 15:10:57 +01:00
commit 86b84bdd5c
14 changed files with 570 additions and 393 deletions

View File

@ -1748,6 +1748,17 @@
(that will set all pages holding image data
during restoration read-only).
hibernate.compressor= [HIBERNATION] Compression algorithm to be
used with hibernation.
Format: { lzo | lz4 }
Default: lzo
lzo: Select LZO compression algorithm to
compress/decompress hibernation image.
lz4: Select LZ4 compression algorithm to
compress/decompress hibernation image.
highmem=nn[KMG] [KNL,BOOT] forces the highmem zone to have an exact
size of <nn>. This works even on boxes that have no
highmem otherwise. This also works to reduce highmem

View File

@ -625,7 +625,7 @@ The PCI subsystem-level callbacks they correspond to::
pci_pm_poweroff()
pci_pm_poweroff_noirq()
work in analogy with pci_pm_suspend() and pci_pm_poweroff_noirq(), respectively,
work in analogy with pci_pm_suspend() and pci_pm_suspend_noirq(), respectively,
although they don't attempt to save the device's standard configuration
registers.

View File

@ -60,7 +60,6 @@ static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
@ -578,6 +577,35 @@ bool dev_pm_skip_resume(struct device *dev)
return !dev->power.must_resume;
}
static bool is_async(struct device *dev)
{
return dev->power.async_suspend && pm_async_enabled
&& !pm_trace_is_enabled();
}
static bool dpm_async_fn(struct device *dev, async_func_t func)
{
reinit_completion(&dev->power.completion);
if (is_async(dev)) {
dev->power.async_in_progress = true;
get_device(dev);
if (async_schedule_dev_nocall(func, dev))
return true;
put_device(dev);
}
/*
* Because async_schedule_dev_nocall() above has returned false or it
* has not been called at all, func() is not running and it is safe to
* update the async_in_progress flag without extra synchronization.
*/
dev->power.async_in_progress = false;
return false;
}
/**
* device_resume_noirq - Execute a "noirq resume" callback for given device.
* @dev: Device to handle.
@ -657,42 +685,12 @@ Out:
TRACE_RESUME(error);
if (error) {
suspend_stats.failed_resume_noirq++;
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
}
}
static bool is_async(struct device *dev)
{
return dev->power.async_suspend && pm_async_enabled
&& !pm_trace_is_enabled();
}
static bool dpm_async_fn(struct device *dev, async_func_t func)
{
reinit_completion(&dev->power.completion);
if (is_async(dev)) {
dev->power.async_in_progress = true;
get_device(dev);
if (async_schedule_dev_nocall(func, dev))
return true;
put_device(dev);
}
/*
* Because async_schedule_dev_nocall() above has returned false or it
* has not been called at all, func() is not running and it is safe to
* update the async_in_progress flag without extra synchronization.
*/
dev->power.async_in_progress = false;
return false;
}
static void async_resume_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
@ -707,9 +705,12 @@ static void dpm_noirq_resume_devices(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
mutex_lock(&dpm_list_mtx);
async_error = 0;
pm_transition = state;
mutex_lock(&dpm_list_mtx);
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@ -736,6 +737,9 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "noirq");
if (async_error)
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
@ -817,8 +821,7 @@ Out:
complete_all(&dev->power.completion);
if (error) {
suspend_stats.failed_resume_early++;
dpm_save_failed_step(SUSPEND_RESUME_EARLY);
async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async early" : " early", error);
}
@ -842,9 +845,12 @@ void dpm_resume_early(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
mutex_lock(&dpm_list_mtx);
async_error = 0;
pm_transition = state;
mutex_lock(&dpm_list_mtx);
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@ -871,6 +877,9 @@ void dpm_resume_early(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "early");
if (async_error)
dpm_save_failed_step(SUSPEND_RESUME_EARLY);
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
@ -974,8 +983,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
if (error) {
suspend_stats.failed_resume++;
dpm_save_failed_step(SUSPEND_RESUME);
async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
@ -1004,10 +1012,11 @@ void dpm_resume(pm_message_t state)
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
might_sleep();
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
mutex_lock(&dpm_list_mtx);
/*
* Trigger the resume of "async" devices upfront so they don't have to
* wait for the "non-async" ones they don't depend on.
@ -1017,29 +1026,25 @@ void dpm_resume(pm_message_t state)
while (!list_empty(&dpm_suspended_list)) {
dev = to_device(dpm_suspended_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_prepared_list);
if (!dev->power.async_in_progress) {
get_device(dev);
mutex_unlock(&dpm_list_mtx);
device_resume(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
mutex_unlock(&dpm_list_mtx);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, NULL);
if (async_error)
dpm_save_failed_step(SUSPEND_RESUME);
cpufreq_resume();
devfreq_resume();
@ -1187,7 +1192,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
}
/**
* __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* device_suspend_noirq - Execute a "noirq suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
@ -1195,7 +1200,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@ -1240,6 +1245,8 @@ Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
goto Complete;
}
@ -1269,54 +1276,37 @@ Complete:
static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
struct device *dev = data;
int error;
error = __device_suspend_noirq(dev, pm_transition, true);
if (error) {
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, pm_transition, " async", error);
}
device_suspend_noirq(dev, pm_transition, true);
put_device(dev);
}
static int device_suspend_noirq(struct device *dev)
{
if (dpm_async_fn(dev, async_suspend_noirq))
return 0;
return __device_suspend_noirq(dev, pm_transition, false);
}
static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.prev);
list_move(&dev->power.entry, &dpm_noirq_list);
if (dpm_async_fn(dev, async_suspend_noirq))
continue;
get_device(dev);
mutex_unlock(&dpm_list_mtx);
error = device_suspend_noirq(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " noirq", error);
dpm_save_failed_dev(dev_name(dev));
} else if (!list_empty(&dev->power.entry)) {
list_move(&dev->power.entry, &dpm_noirq_list);
}
mutex_unlock(&dpm_list_mtx);
error = device_suspend_noirq(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
@ -1324,15 +1314,16 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
if (error || async_error)
break;
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
if (!error)
error = async_error;
if (error) {
suspend_stats.failed_suspend_noirq++;
if (error)
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
}
dpm_show_time(starttime, state, error, "noirq");
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
@ -1375,14 +1366,14 @@ static void dpm_propagate_wakeup_to_parent(struct device *dev)
}
/**
* __device_suspend_late - Execute a "late suspend" callback for given device.
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@ -1434,6 +1425,8 @@ Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async late" : " late", error);
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
@ -1450,24 +1443,11 @@ Complete:
static void async_suspend_late(void *data, async_cookie_t cookie)
{
struct device *dev = data;
int error;
error = __device_suspend_late(dev, pm_transition, true);
if (error) {
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, pm_transition, " async", error);
}
device_suspend_late(dev, pm_transition, true);
put_device(dev);
}
static int device_suspend_late(struct device *dev)
{
if (dpm_async_fn(dev, async_suspend_late))
return 0;
return __device_suspend_late(dev, pm_transition, false);
}
/**
* dpm_suspend_late - Execute "late suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
@ -1478,31 +1458,27 @@ int dpm_suspend_late(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
wake_up_all_idle_cpus();
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
wake_up_all_idle_cpus();
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_suspended_list)) {
struct device *dev = to_device(dpm_suspended_list.prev);
list_move(&dev->power.entry, &dpm_late_early_list);
if (dpm_async_fn(dev, async_suspend_late))
continue;
get_device(dev);
mutex_unlock(&dpm_list_mtx);
error = device_suspend_late(dev);
mutex_lock(&dpm_list_mtx);
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &dpm_late_early_list);
if (error) {
pm_dev_err(dev, state, " late", error);
dpm_save_failed_dev(dev_name(dev));
}
mutex_unlock(&dpm_list_mtx);
error = device_suspend_late(dev, state, false);
put_device(dev);
@ -1511,12 +1487,14 @@ int dpm_suspend_late(pm_message_t state)
if (error || async_error)
break;
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
if (!error)
error = async_error;
if (error) {
suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
}
@ -1597,12 +1575,12 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
}
/**
* __device_suspend - Execute "suspend" callbacks for given device.
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*/
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
static int device_suspend(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@ -1716,8 +1694,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_watchdog_clear(&wd);
Complete:
if (error)
if (error) {
async_error = error;
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
@ -1727,25 +1708,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
static void async_suspend(void *data, async_cookie_t cookie)
{
struct device *dev = data;
int error;
error = __device_suspend(dev, pm_transition, true);
if (error) {
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, pm_transition, " async", error);
}
device_suspend(dev, pm_transition, true);
put_device(dev);
}
static int device_suspend(struct device *dev)
{
if (dpm_async_fn(dev, async_suspend))
return 0;
return __device_suspend(dev, pm_transition, false);
}
/**
* dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
* @state: PM transition of the system being carried out.
@ -1761,28 +1728,24 @@ int dpm_suspend(pm_message_t state)
devfreq_suspend();
cpufreq_suspend();
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_prepared_list)) {
struct device *dev = to_device(dpm_prepared_list.prev);
list_move(&dev->power.entry, &dpm_suspended_list);
if (dpm_async_fn(dev, async_suspend))
continue;
get_device(dev);
mutex_unlock(&dpm_list_mtx);
error = device_suspend(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, "", error);
dpm_save_failed_dev(dev_name(dev));
} else if (!list_empty(&dev->power.entry)) {
list_move(&dev->power.entry, &dpm_suspended_list);
}
mutex_unlock(&dpm_list_mtx);
error = device_suspend(dev, state, false);
put_device(dev);
@ -1791,14 +1754,16 @@ int dpm_suspend(pm_message_t state)
if (error || async_error)
break;
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
if (!error)
error = async_error;
if (error) {
suspend_stats.failed_suspend++;
if (error)
dpm_save_failed_step(SUSPEND_SUSPEND);
}
dpm_show_time(starttime, state, error, NULL);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
@ -1949,11 +1914,11 @@ int dpm_suspend_start(pm_message_t state)
int error;
error = dpm_prepare(state);
if (error) {
suspend_stats.failed_prepare++;
if (error)
dpm_save_failed_step(SUSPEND_PREPARE);
} else
else
error = dpm_suspend(state);
dpm_show_time(starttime, state, error, "start");
return error;
}

View File

@ -313,8 +313,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev)
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
enable_irq(wirq->irq);
wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
}
}
/**

View File

@ -662,8 +662,8 @@ struct pm_subsys_data {
struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
unsigned int async_suspend:1;
bool can_wakeup:1;
bool async_suspend:1;
bool in_dpm_list:1; /* Owned by the PM core */
bool is_prepared:1; /* Owned by the PM core */
bool is_suspended:1; /* Ditto */
@ -682,10 +682,10 @@ struct dev_pm_info {
bool syscore:1;
bool no_pm_callbacks:1; /* Owned by the PM core */
bool async_in_progress:1; /* Owned by the PM core */
unsigned int must_resume:1; /* Owned by the PM core */
unsigned int may_skip_resume:1; /* Set by subsystems */
bool must_resume:1; /* Owned by the PM core */
bool may_skip_resume:1; /* Set by subsystems */
#else
unsigned int should_wakeup:1;
bool should_wakeup:1;
#endif
#ifdef CONFIG_PM
struct hrtimer suspend_timer;
@ -696,17 +696,17 @@ struct dev_pm_info {
atomic_t usage_count;
atomic_t child_count;
unsigned int disable_depth:3;
unsigned int idle_notification:1;
unsigned int request_pending:1;
unsigned int deferred_resume:1;
unsigned int needs_force_resume:1;
unsigned int runtime_auto:1;
bool idle_notification:1;
bool request_pending:1;
bool deferred_resume:1;
bool needs_force_resume:1;
bool runtime_auto:1;
bool ignore_children:1;
unsigned int no_callbacks:1;
unsigned int irq_safe:1;
unsigned int use_autosuspend:1;
unsigned int timer_autosuspends:1;
unsigned int memalloc_noio:1;
bool no_callbacks:1;
bool irq_safe:1;
bool use_autosuspend:1;
bool timer_autosuspends:1;
bool memalloc_noio:1;
unsigned int links_count;
enum rpm_request request;
enum rpm_status runtime_status;

View File

@ -40,65 +40,6 @@ typedef int __bitwise suspend_state_t;
#define PM_SUSPEND_MIN PM_SUSPEND_TO_IDLE
#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
enum suspend_stat_step {
SUSPEND_FREEZE = 1,
SUSPEND_PREPARE,
SUSPEND_SUSPEND,
SUSPEND_SUSPEND_LATE,
SUSPEND_SUSPEND_NOIRQ,
SUSPEND_RESUME_NOIRQ,
SUSPEND_RESUME_EARLY,
SUSPEND_RESUME
};
struct suspend_stats {
int success;
int fail;
int failed_freeze;
int failed_prepare;
int failed_suspend;
int failed_suspend_late;
int failed_suspend_noirq;
int failed_resume;
int failed_resume_early;
int failed_resume_noirq;
#define REC_FAILED_NUM 2
int last_failed_dev;
char failed_devs[REC_FAILED_NUM][40];
int last_failed_errno;
int errno[REC_FAILED_NUM];
int last_failed_step;
u64 last_hw_sleep;
u64 total_hw_sleep;
u64 max_hw_sleep;
enum suspend_stat_step failed_steps[REC_FAILED_NUM];
};
extern struct suspend_stats suspend_stats;
static inline void dpm_save_failed_dev(const char *name)
{
strscpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
name,
sizeof(suspend_stats.failed_devs[0]));
suspend_stats.last_failed_dev++;
suspend_stats.last_failed_dev %= REC_FAILED_NUM;
}
static inline void dpm_save_failed_errno(int err)
{
suspend_stats.errno[suspend_stats.last_failed_errno] = err;
suspend_stats.last_failed_errno++;
suspend_stats.last_failed_errno %= REC_FAILED_NUM;
}
static inline void dpm_save_failed_step(enum suspend_stat_step step)
{
suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
suspend_stats.last_failed_step++;
suspend_stats.last_failed_step %= REC_FAILED_NUM;
}
/**
* struct platform_suspend_ops - Callbacks for managing platform dependent
* system sleep states.
@ -626,4 +567,19 @@ static inline void queue_up_suspend_work(void) {}
#endif /* !CONFIG_PM_AUTOSLEEP */
enum suspend_stat_step {
SUSPEND_WORKING = 0,
SUSPEND_FREEZE,
SUSPEND_PREPARE,
SUSPEND_SUSPEND,
SUSPEND_SUSPEND_LATE,
SUSPEND_SUSPEND_NOIRQ,
SUSPEND_RESUME_NOIRQ,
SUSPEND_RESUME_EARLY,
SUSPEND_RESUME
};
void dpm_save_failed_dev(const char *name);
void dpm_save_failed_step(enum suspend_stat_step step);
#endif /* _LINUX_SUSPEND_H */

View File

@ -39,9 +39,9 @@ config HIBERNATION
bool "Hibernation (aka 'suspend to disk')"
depends on SWAP && ARCH_HIBERNATION_POSSIBLE
select HIBERNATE_CALLBACKS
select LZO_COMPRESS
select LZO_DECOMPRESS
select CRC32
select CRYPTO
select CRYPTO_LZO
help
Enable the suspend to disk (STD) functionality, which is usually
called "hibernation" in user interfaces. STD checkpoints the
@ -92,6 +92,28 @@ config HIBERNATION_SNAPSHOT_DEV
If in doubt, say Y.
choice
prompt "Default compressor"
default HIBERNATION_COMP_LZO
depends on HIBERNATION
config HIBERNATION_COMP_LZO
bool "lzo"
depends on CRYPTO_LZO
config HIBERNATION_COMP_LZ4
bool "lz4"
depends on CRYPTO_LZ4
endchoice
config HIBERNATION_DEF_COMP
string
default "lzo" if HIBERNATION_COMP_LZO
default "lz4" if HIBERNATION_COMP_LZ4
help
Default compressor to be used for hibernation.
config PM_STD_PARTITION
string "Default resume partition"
depends on HIBERNATION

View File

@ -47,6 +47,15 @@ dev_t swsusp_resume_device;
sector_t swsusp_resume_block;
__visible int in_suspend __nosavedata;
static char hibernate_compressor[CRYPTO_MAX_ALG_NAME] = CONFIG_HIBERNATION_DEF_COMP;
/*
* Compression/decompression algorithm to be used while saving/loading
* image to/from disk. This would later be used in 'kernel/power/swap.c'
* to allocate comp streams.
*/
char hib_comp_algo[CRYPTO_MAX_ALG_NAME];
enum {
HIBERNATION_INVALID,
HIBERNATION_PLATFORM,
@ -718,6 +727,9 @@ static int load_image_and_restore(void)
return error;
}
#define COMPRESSION_ALGO_LZO "lzo"
#define COMPRESSION_ALGO_LZ4 "lz4"
/**
* hibernate - Carry out system hibernation, including saving the image.
*/
@ -732,6 +744,17 @@ int hibernate(void)
return -EPERM;
}
/*
* Query for the compression algorithm support if compression is enabled.
*/
if (!nocompress) {
strscpy(hib_comp_algo, hibernate_compressor, sizeof(hib_comp_algo));
if (crypto_has_comp(hib_comp_algo, 0, 0) != 1) {
pr_err("%s compression is not available\n", hib_comp_algo);
return -EOPNOTSUPP;
}
}
sleep_flags = lock_system_sleep();
/* The snapshot device should not be opened while we're running */
if (!hibernate_acquire()) {
@ -766,11 +789,24 @@ int hibernate(void)
if (hibernation_mode == HIBERNATION_PLATFORM)
flags |= SF_PLATFORM_MODE;
if (nocompress)
if (nocompress) {
flags |= SF_NOCOMPRESS_MODE;
else
} else {
flags |= SF_CRC32_MODE;
/*
* By default, LZO compression is enabled. Use SF_COMPRESSION_ALG_LZ4
* to override this behaviour and use LZ4.
*
* Refer kernel/power/power.h for more details
*/
if (!strcmp(hib_comp_algo, COMPRESSION_ALGO_LZ4))
flags |= SF_COMPRESSION_ALG_LZ4;
else
flags |= SF_COMPRESSION_ALG_LZO;
}
pm_pr_dbg("Writing hibernation image.\n");
error = swsusp_write(flags);
swsusp_free();
@ -955,6 +991,22 @@ static int software_resume(void)
if (error)
goto Unlock;
/*
* Check if the hibernation image is compressed. If so, query for
* the algorithm support.
*/
if (!(swsusp_header_flags & SF_NOCOMPRESS_MODE)) {
if (swsusp_header_flags & SF_COMPRESSION_ALG_LZ4)
strscpy(hib_comp_algo, COMPRESSION_ALGO_LZ4, sizeof(hib_comp_algo));
else
strscpy(hib_comp_algo, COMPRESSION_ALGO_LZO, sizeof(hib_comp_algo));
if (crypto_has_comp(hib_comp_algo, 0, 0) != 1) {
pr_err("%s compression is not available\n", hib_comp_algo);
error = -EOPNOTSUPP;
goto Unlock;
}
}
/* The snapshot device should not be opened while we're running */
if (!hibernate_acquire()) {
error = -EBUSY;
@ -1370,6 +1422,57 @@ static int __init nohibernate_setup(char *str)
return 1;
}
static const char * const comp_alg_enabled[] = {
#if IS_ENABLED(CONFIG_CRYPTO_LZO)
COMPRESSION_ALGO_LZO,
#endif
#if IS_ENABLED(CONFIG_CRYPTO_LZ4)
COMPRESSION_ALGO_LZ4,
#endif
};
static int hibernate_compressor_param_set(const char *compressor,
const struct kernel_param *kp)
{
unsigned int sleep_flags;
int index, ret;
sleep_flags = lock_system_sleep();
index = sysfs_match_string(comp_alg_enabled, compressor);
if (index >= 0) {
ret = param_set_copystring(comp_alg_enabled[index], kp);
if (!ret)
strscpy(hib_comp_algo, comp_alg_enabled[index],
sizeof(hib_comp_algo));
} else {
ret = index;
}
unlock_system_sleep(sleep_flags);
if (ret)
pr_debug("Cannot set specified compressor %s\n",
compressor);
return ret;
}
static const struct kernel_param_ops hibernate_compressor_param_ops = {
.set = hibernate_compressor_param_set,
.get = param_get_string,
};
static struct kparam_string hibernate_compressor_param_string = {
.maxlen = sizeof(hibernate_compressor),
.string = hibernate_compressor,
};
module_param_cb(compressor, &hibernate_compressor_param_ops,
&hibernate_compressor_param_string, 0644);
MODULE_PARM_DESC(compressor,
"Compression algorithm to be used with hibernation");
__setup("noresume", noresume_setup);
__setup("resume_offset=", resume_offset_setup);
__setup("resume=", resume_setup);

View File

@ -95,19 +95,6 @@ int unregister_pm_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_pm_notifier);
void pm_report_hw_sleep_time(u64 t)
{
suspend_stats.last_hw_sleep = t;
suspend_stats.total_hw_sleep += t;
}
EXPORT_SYMBOL_GPL(pm_report_hw_sleep_time);
void pm_report_max_hw_sleep(u64 t)
{
suspend_stats.max_hw_sleep = t;
}
EXPORT_SYMBOL_GPL(pm_report_max_hw_sleep);
int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down)
{
int ret;
@ -319,26 +306,86 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
power_attr(pm_test);
#endif /* CONFIG_PM_SLEEP_DEBUG */
static char *suspend_step_name(enum suspend_stat_step step)
#define SUSPEND_NR_STEPS SUSPEND_RESUME
#define REC_FAILED_NUM 2
struct suspend_stats {
unsigned int step_failures[SUSPEND_NR_STEPS];
unsigned int success;
unsigned int fail;
int last_failed_dev;
char failed_devs[REC_FAILED_NUM][40];
int last_failed_errno;
int errno[REC_FAILED_NUM];
int last_failed_step;
u64 last_hw_sleep;
u64 total_hw_sleep;
u64 max_hw_sleep;
enum suspend_stat_step failed_steps[REC_FAILED_NUM];
};
static struct suspend_stats suspend_stats;
static DEFINE_MUTEX(suspend_stats_lock);
void dpm_save_failed_dev(const char *name)
{
switch (step) {
case SUSPEND_FREEZE:
return "freeze";
case SUSPEND_PREPARE:
return "prepare";
case SUSPEND_SUSPEND:
return "suspend";
case SUSPEND_SUSPEND_NOIRQ:
return "suspend_noirq";
case SUSPEND_RESUME_NOIRQ:
return "resume_noirq";
case SUSPEND_RESUME:
return "resume";
default:
return "";
}
mutex_lock(&suspend_stats_lock);
strscpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
name, sizeof(suspend_stats.failed_devs[0]));
suspend_stats.last_failed_dev++;
suspend_stats.last_failed_dev %= REC_FAILED_NUM;
mutex_unlock(&suspend_stats_lock);
}
void dpm_save_failed_step(enum suspend_stat_step step)
{
suspend_stats.step_failures[step-1]++;
suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
suspend_stats.last_failed_step++;
suspend_stats.last_failed_step %= REC_FAILED_NUM;
}
void dpm_save_errno(int err)
{
if (!err) {
suspend_stats.success++;
return;
}
suspend_stats.fail++;
suspend_stats.errno[suspend_stats.last_failed_errno] = err;
suspend_stats.last_failed_errno++;
suspend_stats.last_failed_errno %= REC_FAILED_NUM;
}
void pm_report_hw_sleep_time(u64 t)
{
suspend_stats.last_hw_sleep = t;
suspend_stats.total_hw_sleep += t;
}
EXPORT_SYMBOL_GPL(pm_report_hw_sleep_time);
void pm_report_max_hw_sleep(u64 t)
{
suspend_stats.max_hw_sleep = t;
}
EXPORT_SYMBOL_GPL(pm_report_max_hw_sleep);
static const char * const suspend_step_names[] = {
[SUSPEND_WORKING] = "",
[SUSPEND_FREEZE] = "freeze",
[SUSPEND_PREPARE] = "prepare",
[SUSPEND_SUSPEND] = "suspend",
[SUSPEND_SUSPEND_LATE] = "suspend_late",
[SUSPEND_SUSPEND_NOIRQ] = "suspend_noirq",
[SUSPEND_RESUME_NOIRQ] = "resume_noirq",
[SUSPEND_RESUME_EARLY] = "resume_early",
[SUSPEND_RESUME] = "resume",
};
#define suspend_attr(_name, format_str) \
static ssize_t _name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf) \
@ -347,20 +394,30 @@ static ssize_t _name##_show(struct kobject *kobj, \
} \
static struct kobj_attribute _name = __ATTR_RO(_name)
suspend_attr(success, "%d\n");
suspend_attr(fail, "%d\n");
suspend_attr(failed_freeze, "%d\n");
suspend_attr(failed_prepare, "%d\n");
suspend_attr(failed_suspend, "%d\n");
suspend_attr(failed_suspend_late, "%d\n");
suspend_attr(failed_suspend_noirq, "%d\n");
suspend_attr(failed_resume, "%d\n");
suspend_attr(failed_resume_early, "%d\n");
suspend_attr(failed_resume_noirq, "%d\n");
suspend_attr(success, "%u\n");
suspend_attr(fail, "%u\n");
suspend_attr(last_hw_sleep, "%llu\n");
suspend_attr(total_hw_sleep, "%llu\n");
suspend_attr(max_hw_sleep, "%llu\n");
#define suspend_step_attr(_name, step) \
static ssize_t _name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%u\n", \
suspend_stats.step_failures[step-1]); \
} \
static struct kobj_attribute _name = __ATTR_RO(_name)
suspend_step_attr(failed_freeze, SUSPEND_FREEZE);
suspend_step_attr(failed_prepare, SUSPEND_PREPARE);
suspend_step_attr(failed_suspend, SUSPEND_SUSPEND);
suspend_step_attr(failed_suspend_late, SUSPEND_SUSPEND_LATE);
suspend_step_attr(failed_suspend_noirq, SUSPEND_SUSPEND_NOIRQ);
suspend_step_attr(failed_resume, SUSPEND_RESUME);
suspend_step_attr(failed_resume_early, SUSPEND_RESUME_EARLY);
suspend_step_attr(failed_resume_noirq, SUSPEND_RESUME_NOIRQ);
static ssize_t last_failed_dev_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@ -392,16 +449,14 @@ static struct kobj_attribute last_failed_errno = __ATTR_RO(last_failed_errno);
static ssize_t last_failed_step_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
int index;
enum suspend_stat_step step;
char *last_failed_step = NULL;
int index;
index = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
index %= REC_FAILED_NUM;
step = suspend_stats.failed_steps[index];
last_failed_step = suspend_step_name(step);
return sprintf(buf, "%s\n", last_failed_step);
return sprintf(buf, "%s\n", suspend_step_names[step]);
}
static struct kobj_attribute last_failed_step = __ATTR_RO(last_failed_step);
@ -449,6 +504,7 @@ static const struct attribute_group suspend_attr_group = {
static int suspend_stats_show(struct seq_file *s, void *unused)
{
int i, index, last_dev, last_errno, last_step;
enum suspend_stat_step step;
last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
last_dev %= REC_FAILED_NUM;
@ -456,47 +512,35 @@ static int suspend_stats_show(struct seq_file *s, void *unused)
last_errno %= REC_FAILED_NUM;
last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
last_step %= REC_FAILED_NUM;
seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
"%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
"success", suspend_stats.success,
"fail", suspend_stats.fail,
"failed_freeze", suspend_stats.failed_freeze,
"failed_prepare", suspend_stats.failed_prepare,
"failed_suspend", suspend_stats.failed_suspend,
"failed_suspend_late",
suspend_stats.failed_suspend_late,
"failed_suspend_noirq",
suspend_stats.failed_suspend_noirq,
"failed_resume", suspend_stats.failed_resume,
"failed_resume_early",
suspend_stats.failed_resume_early,
"failed_resume_noirq",
suspend_stats.failed_resume_noirq);
seq_printf(s, "success: %u\nfail: %u\n",
suspend_stats.success, suspend_stats.fail);
for (step = SUSPEND_FREEZE; step <= SUSPEND_NR_STEPS; step++)
seq_printf(s, "failed_%s: %u\n", suspend_step_names[step],
suspend_stats.step_failures[step-1]);
seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
suspend_stats.failed_devs[last_dev]);
suspend_stats.failed_devs[last_dev]);
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_dev + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-s\n",
suspend_stats.failed_devs[index]);
seq_printf(s, "\t\t\t%-s\n", suspend_stats.failed_devs[index]);
}
seq_printf(s, " last_failed_errno:\t%-d\n",
suspend_stats.errno[last_errno]);
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_errno + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-d\n",
suspend_stats.errno[index]);
seq_printf(s, "\t\t\t%-d\n", suspend_stats.errno[index]);
}
seq_printf(s, " last_failed_step:\t%-s\n",
suspend_step_name(
suspend_stats.failed_steps[last_step]));
suspend_step_names[suspend_stats.failed_steps[last_step]]);
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_step + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-s\n",
suspend_step_name(
suspend_stats.failed_steps[index]));
suspend_step_names[suspend_stats.failed_steps[index]]);
}
return 0;

View File

@ -6,6 +6,7 @@
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/crypto.h>
struct swsusp_info {
struct new_utsname uts;
@ -54,6 +55,10 @@ asmlinkage int swsusp_save(void);
/* kernel/power/hibernate.c */
extern bool freezer_test_done;
extern char hib_comp_algo[CRYPTO_MAX_ALG_NAME];
/* kernel/power/swap.c */
extern unsigned int swsusp_header_flags;
extern int hibernation_snapshot(int platform_mode);
extern int hibernation_restore(int platform_mode);
@ -148,7 +153,7 @@ extern unsigned int snapshot_additional_pages(struct zone *zone);
extern unsigned long snapshot_get_image_size(void);
extern int snapshot_read_next(struct snapshot_handle *handle);
extern int snapshot_write_next(struct snapshot_handle *handle);
extern void snapshot_write_finalize(struct snapshot_handle *handle);
int snapshot_write_finalize(struct snapshot_handle *handle);
extern int snapshot_image_loaded(struct snapshot_handle *handle);
extern bool hibernate_acquire(void);
@ -162,11 +167,25 @@ extern int swsusp_swap_in_use(void);
* Flags that can be passed from the hibernatig hernel to the "boot" kernel in
* the image header.
*/
#define SF_COMPRESSION_ALG_LZO 0 /* dummy, details given below */
#define SF_PLATFORM_MODE 1
#define SF_NOCOMPRESS_MODE 2
#define SF_CRC32_MODE 4
#define SF_HW_SIG 8
/*
* Bit to indicate the compression algorithm to be used(for LZ4). The same
* could be checked while saving/loading image to/from disk to use the
* corresponding algorithms.
*
* By default, LZO compression is enabled if SF_CRC32_MODE is set. Use
* SF_COMPRESSION_ALG_LZ4 to override this behaviour and use LZ4.
*
* SF_CRC32_MODE, SF_COMPRESSION_ALG_LZO(dummy) -> Compression, LZO
* SF_CRC32_MODE, SF_COMPRESSION_ALG_LZ4 -> Compression, LZ4
*/
#define SF_COMPRESSION_ALG_LZ4 16
/* kernel/power/hibernate.c */
int swsusp_check(bool exclusive);
extern void swsusp_free(void);
@ -327,3 +346,5 @@ static inline void pm_sleep_enable_secondary_cpus(void)
suspend_enable_secondary_cpus();
cpuidle_resume();
}
void dpm_save_errno(int err);

View File

@ -58,22 +58,24 @@ static inline void hibernate_restore_protection_end(void)
hibernate_restore_protection_active = false;
}
static inline void hibernate_restore_protect_page(void *page_address)
static inline int __must_check hibernate_restore_protect_page(void *page_address)
{
if (hibernate_restore_protection_active)
set_memory_ro((unsigned long)page_address, 1);
return set_memory_ro((unsigned long)page_address, 1);
return 0;
}
static inline void hibernate_restore_unprotect_page(void *page_address)
static inline int hibernate_restore_unprotect_page(void *page_address)
{
if (hibernate_restore_protection_active)
set_memory_rw((unsigned long)page_address, 1);
return set_memory_rw((unsigned long)page_address, 1);
return 0;
}
#else
static inline void hibernate_restore_protection_begin(void) {}
static inline void hibernate_restore_protection_end(void) {}
static inline void hibernate_restore_protect_page(void *page_address) {}
static inline void hibernate_restore_unprotect_page(void *page_address) {}
static inline int __must_check hibernate_restore_protect_page(void *page_address) {return 0; }
static inline int hibernate_restore_unprotect_page(void *page_address) {return 0; }
#endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
@ -2832,7 +2834,9 @@ next:
}
} else {
copy_last_highmem_page();
hibernate_restore_protect_page(handle->buffer);
error = hibernate_restore_protect_page(handle->buffer);
if (error)
return error;
handle->buffer = get_buffer(&orig_bm, &ca);
if (IS_ERR(handle->buffer))
return PTR_ERR(handle->buffer);
@ -2858,15 +2862,18 @@ next:
* stored in highmem. Additionally, it recycles bitmap memory that's not
* necessary any more.
*/
void snapshot_write_finalize(struct snapshot_handle *handle)
int snapshot_write_finalize(struct snapshot_handle *handle)
{
int error;
copy_last_highmem_page();
hibernate_restore_protect_page(handle->buffer);
error = hibernate_restore_protect_page(handle->buffer);
/* Do that only if we have loaded the image entirely */
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) {
memory_bm_recycle(&orig_bm);
free_highmem_data();
}
return error;
}
int snapshot_image_loaded(struct snapshot_handle *handle)

View File

@ -192,6 +192,7 @@ static int __init mem_sleep_default_setup(char *str)
if (mem_sleep_labels[state] &&
!strcmp(str, mem_sleep_labels[state])) {
mem_sleep_default = state;
mem_sleep_current = state;
break;
}
@ -367,7 +368,6 @@ static int suspend_prepare(suspend_state_t state)
if (!error)
return 0;
suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
pm_notifier_call_chain(PM_POST_SUSPEND);
Restore:
@ -617,12 +617,7 @@ int pm_suspend(suspend_state_t state)
pr_info("suspend entry (%s)\n", mem_sleep_labels[state]);
error = enter_state(state);
if (error) {
suspend_stats.fail++;
dpm_save_failed_errno(error);
} else {
suspend_stats.success++;
}
dpm_save_errno(error);
pr_info("suspend exit\n");
return error;
}

View File

@ -23,7 +23,6 @@
#include <linux/swapops.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/lzo.h>
#include <linux/vmalloc.h>
#include <linux/cpumask.h>
#include <linux/atomic.h>
@ -339,6 +338,13 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
return error;
}
/*
* Hold the swsusp_header flag. This is used in software_resume() in
* 'kernel/power/hibernate' to check if the image is compressed and query
* for the compression algorithm support(if so).
*/
unsigned int swsusp_header_flags;
/**
* swsusp_swap_check - check if the resume device is a swap device
* and get its index (if so)
@ -514,25 +520,30 @@ static int swap_writer_finish(struct swap_map_handle *handle,
return error;
}
/*
* Bytes we need for compressed data in worst case. We assume(limitation)
* this is the worst of all the compression algorithms.
*/
#define bytes_worst_compress(x) ((x) + ((x) / 16) + 64 + 3 + 2)
/* We need to remember how much compressed data we need to read. */
#define LZO_HEADER sizeof(size_t)
#define CMP_HEADER sizeof(size_t)
/* Number of pages/bytes we'll compress at one time. */
#define LZO_UNC_PAGES 32
#define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
#define UNC_PAGES 32
#define UNC_SIZE (UNC_PAGES * PAGE_SIZE)
/* Number of pages/bytes we need for compressed data (worst case). */
#define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
LZO_HEADER, PAGE_SIZE)
#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
/* Number of pages we need for compressed data (worst case). */
#define CMP_PAGES DIV_ROUND_UP(bytes_worst_compress(UNC_SIZE) + \
CMP_HEADER, PAGE_SIZE)
#define CMP_SIZE (CMP_PAGES * PAGE_SIZE)
/* Maximum number of threads for compression/decompression. */
#define LZO_THREADS 3
#define CMP_THREADS 3
/* Minimum/maximum number of pages for read buffering. */
#define LZO_MIN_RD_PAGES 1024
#define LZO_MAX_RD_PAGES 8192
#define CMP_MIN_RD_PAGES 1024
#define CMP_MAX_RD_PAGES 8192
/**
* save_image - save the suspend image data
@ -593,8 +604,8 @@ struct crc_data {
wait_queue_head_t go; /* start crc update */
wait_queue_head_t done; /* crc update done */
u32 *crc32; /* points to handle's crc32 */
size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
unsigned char *unc[LZO_THREADS]; /* uncompressed data */
size_t *unc_len[CMP_THREADS]; /* uncompressed lengths */
unsigned char *unc[CMP_THREADS]; /* uncompressed data */
};
/*
@ -625,10 +636,11 @@ static int crc32_threadfn(void *data)
return 0;
}
/*
* Structure used for LZO data compression.
* Structure used for data compression.
*/
struct cmp_data {
struct task_struct *thr; /* thread */
struct crypto_comp *cc; /* crypto compressor stream */
atomic_t ready; /* ready to start flag */
atomic_t stop; /* ready to stop flag */
int ret; /* return code */
@ -636,17 +648,20 @@ struct cmp_data {
wait_queue_head_t done; /* compression done */
size_t unc_len; /* uncompressed length */
size_t cmp_len; /* compressed length */
unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
unsigned char unc[UNC_SIZE]; /* uncompressed buffer */
unsigned char cmp[CMP_SIZE]; /* compressed buffer */
};
/* Indicates the image size after compression */
static atomic_t compressed_size = ATOMIC_INIT(0);
/*
* Compression function that runs in its own thread.
*/
static int lzo_compress_threadfn(void *data)
static int compress_threadfn(void *data)
{
struct cmp_data *d = data;
unsigned int cmp_len = 0;
while (1) {
wait_event(d->go, atomic_read_acquire(&d->ready) ||
@ -660,9 +675,13 @@ static int lzo_compress_threadfn(void *data)
}
atomic_set(&d->ready, 0);
d->ret = lzo1x_1_compress(d->unc, d->unc_len,
d->cmp + LZO_HEADER, &d->cmp_len,
d->wrk);
cmp_len = CMP_SIZE - CMP_HEADER;
d->ret = crypto_comp_compress(d->cc, d->unc, d->unc_len,
d->cmp + CMP_HEADER,
&cmp_len);
d->cmp_len = cmp_len;
atomic_set(&compressed_size, atomic_read(&compressed_size) + d->cmp_len);
atomic_set_release(&d->stop, 1);
wake_up(&d->done);
}
@ -670,14 +689,14 @@ static int lzo_compress_threadfn(void *data)
}
/**
* save_image_lzo - Save the suspend image data compressed with LZO.
* save_compressed_image - Save the suspend image data after compression.
* @handle: Swap map handle to use for saving the image.
* @snapshot: Image to read data from.
* @nr_to_write: Number of pages to save.
*/
static int save_image_lzo(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_write)
static int save_compressed_image(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_write)
{
unsigned int m;
int ret = 0;
@ -694,23 +713,25 @@ static int save_image_lzo(struct swap_map_handle *handle,
hib_init_batch(&hb);
atomic_set(&compressed_size, 0);
/*
* We'll limit the number of threads for compression to limit memory
* footprint.
*/
nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
if (!page) {
pr_err("Failed to allocate LZO page\n");
pr_err("Failed to allocate %s page\n", hib_comp_algo);
ret = -ENOMEM;
goto out_clean;
}
data = vzalloc(array_size(nr_threads, sizeof(*data)));
if (!data) {
pr_err("Failed to allocate LZO data\n");
pr_err("Failed to allocate %s data\n", hib_comp_algo);
ret = -ENOMEM;
goto out_clean;
}
@ -729,7 +750,14 @@ static int save_image_lzo(struct swap_map_handle *handle,
init_waitqueue_head(&data[thr].go);
init_waitqueue_head(&data[thr].done);
data[thr].thr = kthread_run(lzo_compress_threadfn,
data[thr].cc = crypto_alloc_comp(hib_comp_algo, 0, 0);
if (IS_ERR_OR_NULL(data[thr].cc)) {
pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc));
ret = -EFAULT;
goto out_clean;
}
data[thr].thr = kthread_run(compress_threadfn,
&data[thr],
"image_compress/%u", thr);
if (IS_ERR(data[thr].thr)) {
@ -767,7 +795,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
*/
handle->reqd_free_pages = reqd_free_pages();
pr_info("Using %u thread(s) for compression\n", nr_threads);
pr_info("Using %u thread(s) for %s compression\n", nr_threads, hib_comp_algo);
pr_info("Compressing and saving image data (%u pages)...\n",
nr_to_write);
m = nr_to_write / 10;
@ -777,7 +805,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
start = ktime_get();
for (;;) {
for (thr = 0; thr < nr_threads; thr++) {
for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
for (off = 0; off < UNC_SIZE; off += PAGE_SIZE) {
ret = snapshot_read_next(snapshot);
if (ret < 0)
goto out_finish;
@ -817,14 +845,14 @@ static int save_image_lzo(struct swap_map_handle *handle,
ret = data[thr].ret;
if (ret < 0) {
pr_err("LZO compression failed\n");
pr_err("%s compression failed\n", hib_comp_algo);
goto out_finish;
}
if (unlikely(!data[thr].cmp_len ||
data[thr].cmp_len >
lzo1x_worst_compress(data[thr].unc_len))) {
pr_err("Invalid LZO compressed length\n");
bytes_worst_compress(data[thr].unc_len))) {
pr_err("Invalid %s compressed length\n", hib_comp_algo);
ret = -1;
goto out_finish;
}
@ -840,7 +868,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
* read it.
*/
for (off = 0;
off < LZO_HEADER + data[thr].cmp_len;
off < CMP_HEADER + data[thr].cmp_len;
off += PAGE_SIZE) {
memcpy(page, data[thr].cmp + off, PAGE_SIZE);
@ -862,6 +890,9 @@ out_finish:
if (!ret)
pr_info("Image saving done\n");
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
pr_info("Image size after compression: %d kbytes\n",
(atomic_read(&compressed_size) / 1024));
out_clean:
hib_finish_batch(&hb);
if (crc) {
@ -870,9 +901,12 @@ out_clean:
kfree(crc);
}
if (data) {
for (thr = 0; thr < nr_threads; thr++)
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
kthread_stop(data[thr].thr);
if (data[thr].cc)
crypto_free_comp(data[thr].cc);
}
vfree(data);
}
if (page) free_page((unsigned long)page);
@ -942,7 +976,7 @@ int swsusp_write(unsigned int flags)
if (!error) {
error = (flags & SF_NOCOMPRESS_MODE) ?
save_image(&handle, &snapshot, pages - 1) :
save_image_lzo(&handle, &snapshot, pages - 1);
save_compressed_image(&handle, &snapshot, pages - 1);
}
out_finish:
error = swap_writer_finish(&handle, flags, error);
@ -1100,8 +1134,8 @@ static int load_image(struct swap_map_handle *handle,
ret = err2;
if (!ret) {
pr_info("Image loading done\n");
snapshot_write_finalize(snapshot);
if (!snapshot_image_loaded(snapshot))
ret = snapshot_write_finalize(snapshot);
if (!ret && !snapshot_image_loaded(snapshot))
ret = -ENODATA;
}
swsusp_show_speed(start, stop, nr_to_read, "Read");
@ -1109,10 +1143,11 @@ static int load_image(struct swap_map_handle *handle,
}
/*
* Structure used for LZO data decompression.
* Structure used for data decompression.
*/
struct dec_data {
struct task_struct *thr; /* thread */
struct crypto_comp *cc; /* crypto compressor stream */
atomic_t ready; /* ready to start flag */
atomic_t stop; /* ready to stop flag */
int ret; /* return code */
@ -1120,16 +1155,17 @@ struct dec_data {
wait_queue_head_t done; /* decompression done */
size_t unc_len; /* uncompressed length */
size_t cmp_len; /* compressed length */
unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
unsigned char unc[UNC_SIZE]; /* uncompressed buffer */
unsigned char cmp[CMP_SIZE]; /* compressed buffer */
};
/*
* Decompression function that runs in its own thread.
*/
static int lzo_decompress_threadfn(void *data)
static int decompress_threadfn(void *data)
{
struct dec_data *d = data;
unsigned int unc_len = 0;
while (1) {
wait_event(d->go, atomic_read_acquire(&d->ready) ||
@ -1143,9 +1179,11 @@ static int lzo_decompress_threadfn(void *data)
}
atomic_set(&d->ready, 0);
d->unc_len = LZO_UNC_SIZE;
d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
d->unc, &d->unc_len);
unc_len = UNC_SIZE;
d->ret = crypto_comp_decompress(d->cc, d->cmp + CMP_HEADER, d->cmp_len,
d->unc, &unc_len);
d->unc_len = unc_len;
if (clean_pages_on_decompress)
flush_icache_range((unsigned long)d->unc,
(unsigned long)d->unc + d->unc_len);
@ -1157,14 +1195,14 @@ static int lzo_decompress_threadfn(void *data)
}
/**
* load_image_lzo - Load compressed image data and decompress them with LZO.
* load_compressed_image - Load compressed image data and decompress it.
* @handle: Swap map handle to use for loading data.
* @snapshot: Image to copy uncompressed data into.
* @nr_to_read: Number of pages to load.
*/
static int load_image_lzo(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_read)
static int load_compressed_image(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_read)
{
unsigned int m;
int ret = 0;
@ -1189,18 +1227,18 @@ static int load_image_lzo(struct swap_map_handle *handle,
* footprint.
*/
nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
page = vmalloc(array_size(CMP_MAX_RD_PAGES, sizeof(*page)));
if (!page) {
pr_err("Failed to allocate LZO page\n");
pr_err("Failed to allocate %s page\n", hib_comp_algo);
ret = -ENOMEM;
goto out_clean;
}
data = vzalloc(array_size(nr_threads, sizeof(*data)));
if (!data) {
pr_err("Failed to allocate LZO data\n");
pr_err("Failed to allocate %s data\n", hib_comp_algo);
ret = -ENOMEM;
goto out_clean;
}
@ -1221,7 +1259,14 @@ static int load_image_lzo(struct swap_map_handle *handle,
init_waitqueue_head(&data[thr].go);
init_waitqueue_head(&data[thr].done);
data[thr].thr = kthread_run(lzo_decompress_threadfn,
data[thr].cc = crypto_alloc_comp(hib_comp_algo, 0, 0);
if (IS_ERR_OR_NULL(data[thr].cc)) {
pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc));
ret = -EFAULT;
goto out_clean;
}
data[thr].thr = kthread_run(decompress_threadfn,
&data[thr],
"image_decompress/%u", thr);
if (IS_ERR(data[thr].thr)) {
@ -1262,18 +1307,18 @@ static int load_image_lzo(struct swap_map_handle *handle,
*/
if (low_free_pages() > snapshot_get_image_size())
read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
read_pages = clamp_val(read_pages, CMP_MIN_RD_PAGES, CMP_MAX_RD_PAGES);
for (i = 0; i < read_pages; i++) {
page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
page[i] = (void *)__get_free_page(i < CMP_PAGES ?
GFP_NOIO | __GFP_HIGH :
GFP_NOIO | __GFP_NOWARN |
__GFP_NORETRY);
if (!page[i]) {
if (i < LZO_CMP_PAGES) {
if (i < CMP_PAGES) {
ring_size = i;
pr_err("Failed to allocate LZO pages\n");
pr_err("Failed to allocate %s pages\n", hib_comp_algo);
ret = -ENOMEM;
goto out_clean;
} else {
@ -1283,7 +1328,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
want = ring_size = i;
pr_info("Using %u thread(s) for decompression\n", nr_threads);
pr_info("Using %u thread(s) for %s decompression\n", nr_threads, hib_comp_algo);
pr_info("Loading and decompressing image data (%u pages)...\n",
nr_to_read);
m = nr_to_read / 10;
@ -1344,13 +1389,13 @@ static int load_image_lzo(struct swap_map_handle *handle,
data[thr].cmp_len = *(size_t *)page[pg];
if (unlikely(!data[thr].cmp_len ||
data[thr].cmp_len >
lzo1x_worst_compress(LZO_UNC_SIZE))) {
pr_err("Invalid LZO compressed length\n");
bytes_worst_compress(UNC_SIZE))) {
pr_err("Invalid %s compressed length\n", hib_comp_algo);
ret = -1;
goto out_finish;
}
need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
need = DIV_ROUND_UP(data[thr].cmp_len + CMP_HEADER,
PAGE_SIZE);
if (need > have) {
if (eof > 1) {
@ -1361,7 +1406,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
for (off = 0;
off < LZO_HEADER + data[thr].cmp_len;
off < CMP_HEADER + data[thr].cmp_len;
off += PAGE_SIZE) {
memcpy(data[thr].cmp + off,
page[pg], PAGE_SIZE);
@ -1378,7 +1423,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
/*
* Wait for more data while we are decompressing.
*/
if (have < LZO_CMP_PAGES && asked) {
if (have < CMP_PAGES && asked) {
ret = hib_wait_io(&hb);
if (ret)
goto out_finish;
@ -1396,14 +1441,14 @@ static int load_image_lzo(struct swap_map_handle *handle,
ret = data[thr].ret;
if (ret < 0) {
pr_err("LZO decompression failed\n");
pr_err("%s decompression failed\n", hib_comp_algo);
goto out_finish;
}
if (unlikely(!data[thr].unc_len ||
data[thr].unc_len > LZO_UNC_SIZE ||
data[thr].unc_len & (PAGE_SIZE - 1))) {
pr_err("Invalid LZO uncompressed length\n");
data[thr].unc_len > UNC_SIZE ||
data[thr].unc_len & (PAGE_SIZE - 1))) {
pr_err("Invalid %s uncompressed length\n", hib_comp_algo);
ret = -1;
goto out_finish;
}
@ -1441,8 +1486,8 @@ out_finish:
stop = ktime_get();
if (!ret) {
pr_info("Image loading done\n");
snapshot_write_finalize(snapshot);
if (!snapshot_image_loaded(snapshot))
ret = snapshot_write_finalize(snapshot);
if (!ret && !snapshot_image_loaded(snapshot))
ret = -ENODATA;
if (!ret) {
if (swsusp_header->flags & SF_CRC32_MODE) {
@ -1464,9 +1509,12 @@ out_clean:
kfree(crc);
}
if (data) {
for (thr = 0; thr < nr_threads; thr++)
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
kthread_stop(data[thr].thr);
if (data[thr].cc)
crypto_free_comp(data[thr].cc);
}
vfree(data);
}
vfree(page);
@ -1500,7 +1548,7 @@ int swsusp_read(unsigned int *flags_p)
if (!error) {
error = (*flags_p & SF_NOCOMPRESS_MODE) ?
load_image(&handle, &snapshot, header->pages - 1) :
load_image_lzo(&handle, &snapshot, header->pages - 1);
load_compressed_image(&handle, &snapshot, header->pages - 1);
}
swap_reader_finish(&handle);
end:
@ -1535,6 +1583,7 @@ int swsusp_check(bool exclusive)
if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
swsusp_header_flags = swsusp_header->flags;
/* Reset swap signature now */
error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
swsusp_resume_block,

View File

@ -317,7 +317,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
break;
case SNAPSHOT_ATOMIC_RESTORE:
snapshot_write_finalize(&data->handle);
error = snapshot_write_finalize(&data->handle);
if (error)
break;
if (data->mode != O_WRONLY || !data->frozen ||
!snapshot_image_loaded(&data->handle)) {
error = -EPERM;