Merge back material related to system sleep for 6.19

This commit is contained in:
Rafael J. Wysocki
2025-11-20 22:28:23 +01:00
20 changed files with 285 additions and 159 deletions

View File

@@ -454,3 +454,19 @@ Description:
disables it. Reads from the file return the current value.
The default is "1" if the build-time "SUSPEND_SKIP_SYNC" config
flag is unset, or "0" otherwise.
What: /sys/power/hibernate_compression_threads
Date: October 2025
Contact: <luoxueqin@kylinos.cn>
Description:
Controls the number of threads used for compression
and decompression of hibernation images.
The value can be adjusted at runtime to balance
performance and CPU utilization.
The change takes effect on the next hibernation or
resume operation.
Minimum value: 1
Default value: 3

View File

@@ -1907,6 +1907,16 @@
/sys/power/pm_test). Only available when CONFIG_PM_DEBUG
is set. Default value is 5.
hibernate_compression_threads=
[HIBERNATION]
Set the number of threads used for compressing or decompressing
hibernation images.
Format: <integer>
Default: 3
Minimum: 1
Example: hibernate_compression_threads=4
highmem=nn[KMG] [KNL,BOOT,EARLY] forces the highmem zone to have an exact
size of <nn>. This works even on boxes that have no
highmem otherwise. This also works to reduce highmem

View File

@@ -19,6 +19,7 @@ Power Management
power_supply_class
runtime_pm
s2ram
shutdown-debugging
suspend-and-cpuhotplug
suspend-and-interrupts
swsusp-and-swap-files

View File

@@ -0,0 +1,53 @@
.. SPDX-License-Identifier: GPL-2.0
Debugging Kernel Shutdown Hangs with pstore
+++++++++++++++++++++++++++++++++++++++++++
Overview
========
If the system hangs while shutting down, the kernel logs may need to be
retrieved to debug the issue.
On systems that have a UART available, it is best to configure the kernel to use
this UART for kernel console output.
If a UART isn't available, the ``pstore`` subsystem provides a mechanism to
persist this data across a system reset, allowing it to be retrieved on the next
boot.
Kernel Configuration
====================
To enable ``pstore`` and enable saving kernel ring buffer logs, set the
following kernel configuration options:
* ``CONFIG_PSTORE=y``
* ``CONFIG_PSTORE_CONSOLE=y``
Additionally, enable a backend to store the data. Depending upon your platform
some potential options include:
* ``CONFIG_EFI_VARS_PSTORE=y``
* ``CONFIG_PSTORE_RAM=y``
* ``CONFIG_CHROMEOS_PSTORE=y``
* ``CONFIG_PSTORE_BLK=y``
Kernel Command-line Parameters
==============================
Add these parameters to your kernel command line:
* ``printk.always_kmsg_dump=Y``
* Forces the kernel to dump the entire message buffer to pstore during
shutdown
* ``efi_pstore.pstore_disable=N``
* For EFI-based systems, ensures the EFI backend is active
Userspace Interaction and Log Retrieval
=======================================
On the next boot after a hang, pstore logs will be available in the pstore
filesystem (``/sys/fs/pstore``) and can be retrieved by userspace.
On systemd systems, the ``systemd-pstore`` service will help do the following:
#. Locate pstore data in ``/sys/fs/pstore``
#. Read and save it to ``/var/lib/systemd/pstore``
#. Clear pstore data for the next event

View File

@@ -8,6 +8,13 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
#define CALL_PM_OP(dev, op) \
({ \
struct device *_dev = (dev); \
const struct dev_pm_ops *pm = _dev->driver ? _dev->driver->pm : NULL; \
pm && pm->op ? pm->op(_dev) : 0; \
})
#ifdef CONFIG_PM
/**
* pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
@@ -19,12 +26,7 @@
*/
int pm_generic_runtime_suspend(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int ret;
ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
return ret;
return CALL_PM_OP(dev, runtime_suspend);
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
@@ -38,12 +40,7 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
*/
int pm_generic_runtime_resume(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int ret;
ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
return ret;
return CALL_PM_OP(dev, runtime_resume);
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
#endif /* CONFIG_PM */
@@ -72,9 +69,7 @@ int pm_generic_prepare(struct device *dev)
*/
int pm_generic_suspend_noirq(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
return CALL_PM_OP(dev, suspend_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
@@ -84,9 +79,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
*/
int pm_generic_suspend_late(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
return CALL_PM_OP(dev, suspend_late);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
@@ -96,9 +89,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
*/
int pm_generic_suspend(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->suspend ? pm->suspend(dev) : 0;
return CALL_PM_OP(dev, suspend);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend);
@@ -108,9 +99,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend);
*/
int pm_generic_freeze_noirq(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
return CALL_PM_OP(dev, freeze_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
@@ -120,9 +109,7 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
*/
int pm_generic_freeze(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->freeze ? pm->freeze(dev) : 0;
return CALL_PM_OP(dev, freeze);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze);
@@ -132,9 +119,7 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze);
*/
int pm_generic_poweroff_noirq(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
return CALL_PM_OP(dev, poweroff_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
@@ -144,9 +129,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
*/
int pm_generic_poweroff_late(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
return CALL_PM_OP(dev, poweroff_late);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
@@ -156,9 +139,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
*/
int pm_generic_poweroff(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->poweroff ? pm->poweroff(dev) : 0;
return CALL_PM_OP(dev, poweroff);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff);
@@ -168,9 +149,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
*/
int pm_generic_thaw_noirq(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
return CALL_PM_OP(dev, thaw_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
@@ -180,9 +159,7 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
*/
int pm_generic_thaw(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->thaw ? pm->thaw(dev) : 0;
return CALL_PM_OP(dev, thaw);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw);
@@ -192,9 +169,7 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
*/
int pm_generic_resume_noirq(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
return CALL_PM_OP(dev, resume_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
@@ -204,9 +179,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
*/
int pm_generic_resume_early(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->resume_early ? pm->resume_early(dev) : 0;
return CALL_PM_OP(dev, resume_early);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_early);
@@ -216,9 +189,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_early);
*/
int pm_generic_resume(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->resume ? pm->resume(dev) : 0;
return CALL_PM_OP(dev, resume);
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
@@ -228,9 +199,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
*/
int pm_generic_restore_noirq(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
return CALL_PM_OP(dev, restore_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
@@ -240,9 +209,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
*/
int pm_generic_restore_early(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->restore_early ? pm->restore_early(dev) : 0;
return CALL_PM_OP(dev, restore_early);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_early);
@@ -252,9 +219,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_early);
*/
int pm_generic_restore(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
return pm && pm->restore ? pm->restore(dev) : 0;
return CALL_PM_OP(dev, restore);
}
EXPORT_SYMBOL_GPL(pm_generic_restore);

View File

@@ -34,6 +34,7 @@
#include <linux/cpufreq.h>
#include <linux/devfreq.h>
#include <linux/timer.h>
#include <linux/nmi.h>
#include "../base.h"
#include "power.h"
@@ -95,6 +96,8 @@ static const char *pm_verb(int event)
return "restore";
case PM_EVENT_RECOVER:
return "recover";
case PM_EVENT_POWEROFF:
return "poweroff";
default:
return "(unknown PM event)";
}
@@ -367,6 +370,7 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze;
case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff;
case PM_EVENT_THAW:
@@ -401,6 +405,7 @@ static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_late;
case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff_late;
case PM_EVENT_THAW:
@@ -435,6 +440,7 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_noirq;
case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff_noirq;
case PM_EVENT_THAW:
@@ -515,6 +521,11 @@ struct dpm_watchdog {
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
struct dpm_watchdog wd
static bool __read_mostly dpm_watchdog_all_cpu_backtrace;
module_param(dpm_watchdog_all_cpu_backtrace, bool, 0644);
MODULE_PARM_DESC(dpm_watchdog_all_cpu_backtrace,
"Backtrace all CPUs on DPM watchdog timeout");
/**
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
* @t: The timer that PM watchdog depends on.
@@ -530,8 +541,12 @@ static void dpm_watchdog_handler(struct timer_list *t)
unsigned int time_left;
if (wd->fatal) {
unsigned int this_cpu = smp_processor_id();
dev_emerg(wd->dev, "**** DPM device timeout ****\n");
show_stack(wd->tsk, NULL, KERN_EMERG);
if (dpm_watchdog_all_cpu_backtrace)
trigger_allbutcpu_cpu_backtrace(this_cpu);
panic("%s %s: unrecoverable failure\n",
dev_driver_string(wd->dev), dev_name(wd->dev));
}

View File

@@ -238,10 +238,8 @@ int show_trace_dev_match(char *buf, size_t size)
unsigned int hash = hash_string(DEVSEED, dev_name(dev),
DEVHASH);
if (hash == value) {
int len = snprintf(buf, size, "%s\n",
int len = scnprintf(buf, size, "%s\n",
dev_driver_string(dev));
if (len > size)
len = size;
buf += len;
ret += len;
size -= len;

View File

@@ -189,17 +189,16 @@ static void wakeup_source_remove(struct wakeup_source *ws)
if (WARN_ON(!ws))
return;
/*
* After shutting down the timer, wakeup_source_activate() will warn if
* the given wakeup source is passed to it.
*/
timer_shutdown_sync(&ws->timer);
raw_spin_lock_irqsave(&events_lock, flags);
list_del_rcu(&ws->entry);
raw_spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
timer_delete_sync(&ws->timer);
/*
* Clear timer.function to make wakeup_source_not_registered() treat
* this wakeup source as not registered.
*/
ws->timer.function = NULL;
}
/**
@@ -506,14 +505,14 @@ int device_set_wakeup_enable(struct device *dev, bool enable)
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
/**
* wakeup_source_not_registered - validate the given wakeup source.
* wakeup_source_not_usable - validate the given wakeup source.
* @ws: Wakeup source to be validated.
*/
static bool wakeup_source_not_registered(struct wakeup_source *ws)
static bool wakeup_source_not_usable(struct wakeup_source *ws)
{
/*
* Use timer struct to check if the given source is initialized
* by wakeup_source_add.
* Use the timer struct to check if the given wakeup source has been
* initialized by wakeup_source_add() and it is not going away.
*/
return ws->timer.function != pm_wakeup_timer_fn;
}
@@ -558,8 +557,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
{
unsigned int cec;
if (WARN_ONCE(wakeup_source_not_registered(ws),
"unregistered wakeup source\n"))
if (WARN_ONCE(wakeup_source_not_usable(ws), "unusable wakeup source\n"))
return;
ws->active = true;

View File

@@ -1762,6 +1762,7 @@ static int mesh_suspend(struct macio_dev *mdev, pm_message_t mesg)
case PM_EVENT_SUSPEND:
case PM_EVENT_HIBERNATE:
case PM_EVENT_FREEZE:
case PM_EVENT_POWEROFF:
break;
default:
return 0;

View File

@@ -1965,6 +1965,7 @@ static int stex_choice_sleep_mic(struct st_hba *hba, pm_message_t state)
case PM_EVENT_SUSPEND:
return ST_S3;
case PM_EVENT_HIBERNATE:
case PM_EVENT_POWEROFF:
hba->msi_lock = 0;
return ST_S4;
default:

View File

@@ -1748,6 +1748,7 @@ sl811h_suspend(struct platform_device *dev, pm_message_t state)
break;
case PM_EVENT_SUSPEND:
case PM_EVENT_HIBERNATE:
case PM_EVENT_POWEROFF:
case PM_EVENT_PRETHAW: /* explicitly discard hw state */
port_power(sl811, 0);
break;

View File

@@ -22,14 +22,18 @@ extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
extern unsigned int freeze_timeout_msecs;
/*
* Check if a process has been frozen
* Check if a process has been frozen for PM or cgroup1 freezer. Note that
* cgroup2 freezer uses the job control mechanism and does not interact with
* the PM freezer.
*/
extern bool frozen(struct task_struct *p);
extern bool freezing_slow_path(struct task_struct *p);
/*
* Check if there is a request to freeze a process
* Check if there is a request to freeze a task from PM or cgroup1 freezer.
* Note that cgroup2 freezer uses the job control mechanism and does not
* interact with the PM freezer.
*/
static inline bool freezing(struct task_struct *p)
{
@@ -63,9 +67,9 @@ extern bool freeze_task(struct task_struct *p);
extern bool set_freezable(void);
#ifdef CONFIG_CGROUP_FREEZER
extern bool cgroup_freezing(struct task_struct *task);
extern bool cgroup1_freezing(struct task_struct *task);
#else /* !CONFIG_CGROUP_FREEZER */
static inline bool cgroup_freezing(struct task_struct *task)
static inline bool cgroup1_freezing(struct task_struct *task)
{
return false;
}

View File

@@ -25,11 +25,12 @@ extern void (*pm_power_off)(void);
struct device; /* we have a circular dep with device.h */
#ifdef CONFIG_VT_CONSOLE_SLEEP
extern void pm_vt_switch_required(struct device *dev, bool required);
extern int pm_vt_switch_required(struct device *dev, bool required);
extern void pm_vt_switch_unregister(struct device *dev);
#else
static inline void pm_vt_switch_required(struct device *dev, bool required)
static inline int pm_vt_switch_required(struct device *dev, bool required)
{
return 0;
}
static inline void pm_vt_switch_unregister(struct device *dev)
{
@@ -507,6 +508,7 @@ const struct dev_pm_ops name = { \
* RECOVER Creation of a hibernation image or restoration of the main
* memory contents from a hibernation image has failed, call
* ->thaw() and ->complete() for all devices.
* POWEROFF System will poweroff, call ->poweroff() for all devices.
*
* The following PM_EVENT_ messages are defined for internal use by
* kernel subsystems. They are never issued by the PM core.
@@ -537,6 +539,7 @@ const struct dev_pm_ops name = { \
#define PM_EVENT_USER 0x0100
#define PM_EVENT_REMOTE 0x0200
#define PM_EVENT_AUTO 0x0400
#define PM_EVENT_POWEROFF 0x0800
#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND)
@@ -551,6 +554,7 @@ const struct dev_pm_ops name = { \
#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
#define PMSG_POWEROFF ((struct pm_message){ .event = PM_EVENT_POWEROFF, })
#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, })
#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, })
#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })

View File

@@ -179,7 +179,8 @@ TRACE_EVENT(pstate_sample,
{ PM_EVENT_HIBERNATE, "hibernate" }, \
{ PM_EVENT_THAW, "thaw" }, \
{ PM_EVENT_RESTORE, "restore" }, \
{ PM_EVENT_RECOVER, "recover" })
{ PM_EVENT_RECOVER, "recover" }, \
{ PM_EVENT_POWEROFF, "poweroff" })
DEFINE_EVENT(cpu, cpu_frequency,

View File

@@ -63,7 +63,7 @@ static struct freezer *parent_freezer(struct freezer *freezer)
return css_freezer(freezer->css.parent);
}
bool cgroup_freezing(struct task_struct *task)
bool cgroup1_freezing(struct task_struct *task)
{
bool ret;

View File

@@ -44,7 +44,7 @@ bool freezing_slow_path(struct task_struct *p)
if (tsk_is_oom_victim(p))
return false;
if (pm_nosig_freezing || cgroup_freezing(p))
if (pm_nosig_freezing || cgroup1_freezing(p))
return true;
if (pm_freezing && !(p->flags & PF_KTHREAD))

View File

@@ -44,9 +44,10 @@ static LIST_HEAD(pm_vt_switch_list);
* no_console_suspend argument has been passed on the command line, VT
* switches will occur.
*/
void pm_vt_switch_required(struct device *dev, bool required)
int pm_vt_switch_required(struct device *dev, bool required)
{
struct pm_vt_switch *entry, *tmp;
int ret = 0;
mutex_lock(&vt_switch_mutex);
list_for_each_entry(tmp, &pm_vt_switch_list, head) {
@@ -58,8 +59,10 @@ void pm_vt_switch_required(struct device *dev, bool required)
}
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
if (!entry) {
ret = -ENOMEM;
goto out;
}
entry->required = required;
entry->dev = dev;
@@ -67,6 +70,7 @@ void pm_vt_switch_required(struct device *dev, bool required)
list_add(&entry->head, &pm_vt_switch_list);
out:
mutex_unlock(&vt_switch_mutex);
return ret;
}
EXPORT_SYMBOL(pm_vt_switch_required);

View File

@@ -2110,22 +2110,20 @@ asmlinkage __visible int swsusp_save(void)
{
unsigned int nr_pages, nr_highmem;
pr_info("Creating image:\n");
pm_deferred_pr_dbg("Creating image\n");
drain_local_pages(NULL);
nr_pages = count_data_pages();
nr_highmem = count_highmem_pages();
pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
pm_deferred_pr_dbg("Need to copy %u pages\n", nr_pages + nr_highmem);
if (!enough_free_mem(nr_pages, nr_highmem)) {
pr_err("Not enough free memory\n");
pm_deferred_pr_dbg("Not enough free memory for image creation\n");
return -ENOMEM;
}
if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
pr_err("Memory allocation failed\n");
if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem))
return -ENOMEM;
}
/*
* During allocating of suspend pagedir, new cold pages may appear.
@@ -2144,7 +2142,8 @@ asmlinkage __visible int swsusp_save(void)
nr_zero_pages = nr_pages - nr_copy_pages;
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
pr_info("Image created (%d pages copied, %d zero pages)\n", nr_copy_pages, nr_zero_pages);
pm_deferred_pr_dbg("Image created (%d pages copied, %d zero pages)\n",
nr_copy_pages, nr_zero_pages);
return 0;
}

View File

@@ -344,10 +344,14 @@ MODULE_PARM_DESC(pm_test_delay,
static int suspend_test(int level)
{
#ifdef CONFIG_PM_DEBUG
int i;
if (pm_test_level == level) {
pr_info("suspend debug: Waiting for %d second(s).\n",
pm_test_delay);
mdelay(pm_test_delay * 1000);
for (i = 0; i < pm_test_delay && !pm_wakeup_pending(); i++)
msleep(1000);
return 1;
}
#endif /* !CONFIG_PM_DEBUG */

View File

@@ -336,16 +336,14 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
*/
unsigned int swsusp_header_flags;
/**
* swsusp_swap_check - check if the resume device is a swap device
* and get its index (if so)
*
* This is called before saving image
*/
static int swsusp_swap_check(void)
{
int res;
/*
* Check if the resume device is a swap device and get its index (if so).
* This is called before saving the image.
*/
if (swsusp_resume_device)
res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
else
@@ -362,13 +360,6 @@ static int swsusp_swap_check(void)
return 0;
}
/**
* write_page - Write one page to given swap location.
* @buf: Address we're writing.
* @offset: Offset of the swap page we're writing to.
* @hb: bio completion batch
*/
static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
{
gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
@@ -519,17 +510,14 @@ static int swap_writer_finish(struct swap_map_handle *handle,
CMP_HEADER, PAGE_SIZE)
#define CMP_SIZE (CMP_PAGES * PAGE_SIZE)
/* Maximum number of threads for compression/decompression. */
#define CMP_THREADS 3
/* Default number of threads for compression/decompression. */
#define CMP_THREADS 3
static unsigned int hibernate_compression_threads = CMP_THREADS;
/* Minimum/maximum number of pages for read buffering. */
#define CMP_MIN_RD_PAGES 1024
#define CMP_MAX_RD_PAGES 8192
/**
* save_image - save the suspend image data
*/
static int save_image(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_write)
@@ -585,10 +573,48 @@ struct crc_data {
wait_queue_head_t go; /* start crc update */
wait_queue_head_t done; /* crc update done */
u32 *crc32; /* points to handle's crc32 */
size_t *unc_len[CMP_THREADS]; /* uncompressed lengths */
unsigned char *unc[CMP_THREADS]; /* uncompressed data */
size_t **unc_len; /* uncompressed lengths */
unsigned char **unc; /* uncompressed data */
};
static struct crc_data *alloc_crc_data(int nr_threads)
{
struct crc_data *crc;
crc = kzalloc(sizeof(*crc), GFP_KERNEL);
if (!crc)
return NULL;
crc->unc = kcalloc(nr_threads, sizeof(*crc->unc), GFP_KERNEL);
if (!crc->unc)
goto err_free_crc;
crc->unc_len = kcalloc(nr_threads, sizeof(*crc->unc_len), GFP_KERNEL);
if (!crc->unc_len)
goto err_free_unc;
return crc;
err_free_unc:
kfree(crc->unc);
err_free_crc:
kfree(crc);
return NULL;
}
static void free_crc_data(struct crc_data *crc)
{
if (!crc)
return;
if (crc->thr)
kthread_stop(crc->thr);
kfree(crc->unc_len);
kfree(crc->unc);
kfree(crc);
}
/*
* CRC32 update function that runs in its own thread.
*/
@@ -671,12 +697,6 @@ static int compress_threadfn(void *data)
return 0;
}
/**
* save_compressed_image - Save the suspend image data after compression.
* @handle: Swap map handle to use for saving the image.
* @snapshot: Image to read data from.
* @nr_to_write: Number of pages to save.
*/
static int save_compressed_image(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_write)
@@ -703,7 +723,7 @@ static int save_compressed_image(struct swap_map_handle *handle,
* footprint.
*/
nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
nr_threads = clamp_val(nr_threads, 1, hibernate_compression_threads);
page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
if (!page) {
@@ -719,7 +739,7 @@ static int save_compressed_image(struct swap_map_handle *handle,
goto out_clean;
}
crc = kzalloc(sizeof(*crc), GFP_KERNEL);
crc = alloc_crc_data(nr_threads);
if (!crc) {
pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
@@ -888,11 +908,7 @@ out_finish:
out_clean:
hib_finish_batch(&hb);
if (crc) {
if (crc->thr)
kthread_stop(crc->thr);
kfree(crc);
}
free_crc_data(crc);
if (data) {
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
@@ -908,13 +924,6 @@ out_clean:
return ret;
}
/**
* enough_swap - Make sure we have enough swap to save the image.
*
* Returns TRUE or FALSE after checking the total amount of swap
* space available from the resume partition.
*/
static int enough_swap(unsigned int nr_pages)
{
unsigned int free_swap = count_swap_pages(root_swap, 1);
@@ -934,8 +943,9 @@ static int enough_swap(unsigned int nr_pages)
* them synced (in case something goes wrong) but we DO not want to mark
* filesystem clean: it is not. (And it does not matter, if we resume
* correctly, we'll mark system clean, anyway.)
*
* Return: 0 on success, negative error code on failure.
*/
int swsusp_write(unsigned int flags)
{
struct swap_map_handle handle;
@@ -1081,12 +1091,6 @@ static int swap_reader_finish(struct swap_map_handle *handle)
return 0;
}
/**
* load_image - load the image using the swap map handle
* @handle and the snapshot handle @snapshot
* (assume there are @nr_pages pages to load)
*/
static int load_image(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_read)
@@ -1194,12 +1198,6 @@ static int decompress_threadfn(void *data)
return 0;
}
/**
* load_compressed_image - Load compressed image data and decompress it.
* @handle: Swap map handle to use for loading data.
* @snapshot: Image to copy uncompressed data into.
* @nr_to_read: Number of pages to load.
*/
static int load_compressed_image(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_read)
@@ -1227,7 +1225,7 @@ static int load_compressed_image(struct swap_map_handle *handle,
* footprint.
*/
nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
nr_threads = clamp_val(nr_threads, 1, hibernate_compression_threads);
page = vmalloc_array(CMP_MAX_RD_PAGES, sizeof(*page));
if (!page) {
@@ -1243,7 +1241,7 @@ static int load_compressed_image(struct swap_map_handle *handle,
goto out_clean;
}
crc = kzalloc(sizeof(*crc), GFP_KERNEL);
crc = alloc_crc_data(nr_threads);
if (!crc) {
pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
@@ -1510,11 +1508,7 @@ out_clean:
hib_finish_batch(&hb);
for (i = 0; i < ring_size; i++)
free_page((unsigned long)page[i]);
if (crc) {
if (crc->thr)
kthread_stop(crc->thr);
kfree(crc);
}
free_crc_data(crc);
if (data) {
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
@@ -1533,8 +1527,9 @@ out_clean:
* swsusp_read - read the hibernation image.
* @flags_p: flags passed by the "frozen" kernel in the image header should
* be written into this memory location
*
* Return: 0 on success, negative error code on failure.
*/
int swsusp_read(unsigned int *flags_p)
{
int error;
@@ -1571,8 +1566,9 @@ static void *swsusp_holder;
/**
* swsusp_check - Open the resume device and check for the swsusp signature.
* @exclusive: Open the resume device exclusively.
*
* Return: 0 if a valid image is found, negative error code otherwise.
*/
int swsusp_check(bool exclusive)
{
void *holder = exclusive ? &swsusp_holder : NULL;
@@ -1635,8 +1631,9 @@ void swsusp_close(void)
/**
* swsusp_unmark - Unmark swsusp signature in the resume device
*
* Return: 0 on success, negative error code on failure.
*/
#ifdef CONFIG_SUSPEND
int swsusp_unmark(void)
{
@@ -1662,8 +1659,46 @@ int swsusp_unmark(void)
}
#endif
static ssize_t hibernate_compression_threads_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%d\n", hibernate_compression_threads);
}
static ssize_t hibernate_compression_threads_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val < 1)
return -EINVAL;
hibernate_compression_threads = val;
return n;
}
power_attr(hibernate_compression_threads);
static struct attribute *g[] = {
&hibernate_compression_threads_attr.attr,
NULL,
};
static const struct attribute_group attr_group = {
.attrs = g,
};
static int __init swsusp_header_init(void)
{
int error;
error = sysfs_create_group(power_kobj, &attr_group);
if (error)
return -ENOMEM;
swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
if (!swsusp_header)
panic("Could not allocate memory for swsusp_header\n");
@@ -1671,3 +1706,19 @@ static int __init swsusp_header_init(void)
}
core_initcall(swsusp_header_init);
static int __init hibernate_compression_threads_setup(char *str)
{
int rc = kstrtouint(str, 0, &hibernate_compression_threads);
if (rc)
return rc;
if (hibernate_compression_threads < 1)
hibernate_compression_threads = CMP_THREADS;
return 1;
}
__setup("hibernate_compression_threads=", hibernate_compression_threads_setup);