Merge branch 'acpi-pm'

* acpi-pm:
  ACPI / PM: Fix acpi_pm_notifier_lock vs flush_workqueue() deadlock
  ACPI / LPSS: Consolidate runtime PM and system sleep handling
  ACPI / PM: Combine device suspend routines
  ACPI / LPIT: Add Low Power Idle Table (LPIT) support
  ACPI / PM: Split code validating need for runtime resume in ->prepare()
  ACPI / PM: Restore acpi_subsys_complete()
  ACPI / PM: Combine two identical device resume routines
  ACPI / PM: Remove stale function header
This commit is contained in:
Rafael J. Wysocki 2017-11-13 01:41:11 +01:00
commit 794c33555f
11 changed files with 348 additions and 167 deletions

View File

@ -0,0 +1,25 @@
To enumerate platform Low Power Idle states, Intel platforms are using
“Low Power Idle Table” (LPIT). More details about this table can be
downloaded from:
http://www.uefi.org/sites/default/files/resources/Intel_ACPI_Low_Power_S0_Idle.pdf
Residencies for each low power state can be read via FFH
(Function fixed hardware) or a memory mapped interface.
On platforms supporting S0ix sleep states, there can be two types of
residencies:
- CPU PKG C10 (Read via FFH interface)
- Platform Controller Hub (PCH) SLP_S0 (Read via memory mapped interface)
The following attributes are added dynamically to the cpuidle
sysfs attribute group:
/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us
/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us
The "low_power_idle_cpu_residency_us" attribute shows time spent
by the CPU package in PKG C10
The "low_power_idle_system_residency_us" attribute shows SLP_S0
residency, or system time spent with the SLP_S0# signal asserted.
This is the lowest possible system power state, achieved only when CPU is in
PKG C10 and all functional blocks in PCH are in a low power state.

View File

@ -81,6 +81,11 @@ endif
config ACPI_SPCR_TABLE
bool
config ACPI_LPIT
bool
depends on X86_64
default y
config ACPI_SLEEP
bool
depends on SUSPEND || HIBERNATION

View File

@ -57,6 +57,7 @@ acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-$(CONFIG_ACPI_NUMA) += numa.o
acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
acpi-y += acpi_lpat.o
acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o

162
drivers/acpi/acpi_lpit.c Normal file
View File

@ -0,0 +1,162 @@
/*
* acpi_lpit.c - LPIT table processing functions
*
* Copyright (C) 2017 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/cpu.h>
#include <linux/acpi.h>
#include <asm/msr.h>
#include <asm/tsc.h>
struct lpit_residency_info {
struct acpi_generic_address gaddr;
u64 frequency;
void __iomem *iomem_addr;
};
/* Storage for an memory mapped and FFH based entries */
static struct lpit_residency_info residency_info_mem;
static struct lpit_residency_info residency_info_ffh;
static int lpit_read_residency_counter_us(u64 *counter, bool io_mem)
{
int err;
if (io_mem) {
u64 count = 0;
int error;
error = acpi_os_read_iomem(residency_info_mem.iomem_addr, &count,
residency_info_mem.gaddr.bit_width);
if (error)
return error;
*counter = div64_u64(count * 1000000ULL, residency_info_mem.frequency);
return 0;
}
err = rdmsrl_safe(residency_info_ffh.gaddr.address, counter);
if (!err) {
u64 mask = GENMASK_ULL(residency_info_ffh.gaddr.bit_offset +
residency_info_ffh.gaddr. bit_width - 1,
residency_info_ffh.gaddr.bit_offset);
*counter &= mask;
*counter >>= residency_info_ffh.gaddr.bit_offset;
*counter = div64_u64(*counter * 1000000ULL, residency_info_ffh.frequency);
return 0;
}
return -ENODATA;
}
static ssize_t low_power_idle_system_residency_us_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u64 counter;
int ret;
ret = lpit_read_residency_counter_us(&counter, true);
if (ret)
return ret;
return sprintf(buf, "%llu\n", counter);
}
static DEVICE_ATTR_RO(low_power_idle_system_residency_us);
static ssize_t low_power_idle_cpu_residency_us_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u64 counter;
int ret;
ret = lpit_read_residency_counter_us(&counter, false);
if (ret)
return ret;
return sprintf(buf, "%llu\n", counter);
}
static DEVICE_ATTR_RO(low_power_idle_cpu_residency_us);
int lpit_read_residency_count_address(u64 *address)
{
if (!residency_info_mem.gaddr.address)
return -EINVAL;
*address = residency_info_mem.gaddr.address;
return 0;
}
static void lpit_update_residency(struct lpit_residency_info *info,
struct acpi_lpit_native *lpit_native)
{
info->frequency = lpit_native->counter_frequency ?
lpit_native->counter_frequency : tsc_khz * 1000;
if (!info->frequency)
info->frequency = 1;
info->gaddr = lpit_native->residency_counter;
if (info->gaddr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
info->iomem_addr = ioremap_nocache(info->gaddr.address,
info->gaddr.bit_width / 8);
if (!info->iomem_addr)
return;
/* Silently fail, if cpuidle attribute group is not present */
sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
&dev_attr_low_power_idle_system_residency_us.attr,
"cpuidle");
} else if (info->gaddr.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
/* Silently fail, if cpuidle attribute group is not present */
sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
&dev_attr_low_power_idle_cpu_residency_us.attr,
"cpuidle");
}
}
static void lpit_process(u64 begin, u64 end)
{
while (begin + sizeof(struct acpi_lpit_native) < end) {
struct acpi_lpit_native *lpit_native = (struct acpi_lpit_native *)begin;
if (!lpit_native->header.type && !lpit_native->header.flags) {
if (lpit_native->residency_counter.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY &&
!residency_info_mem.gaddr.address) {
lpit_update_residency(&residency_info_mem, lpit_native);
} else if (lpit_native->residency_counter.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
!residency_info_ffh.gaddr.address) {
lpit_update_residency(&residency_info_ffh, lpit_native);
}
}
begin += lpit_native->header.length;
}
}
void acpi_init_lpit(void)
{
acpi_status status;
u64 lpit_begin;
struct acpi_table_lpit *lpit;
status = acpi_get_table(ACPI_SIG_LPIT, 0, (struct acpi_table_header **)&lpit);
if (ACPI_FAILURE(status))
return;
lpit_begin = (u64)lpit + sizeof(*lpit);
lpit_process(lpit_begin, lpit_begin + lpit->header.length);
}

View File

@ -693,7 +693,7 @@ static int acpi_lpss_activate(struct device *dev)
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
ret = acpi_dev_runtime_resume(dev);
ret = acpi_dev_resume(dev);
if (ret)
return ret;
@ -713,43 +713,9 @@ static int acpi_lpss_activate(struct device *dev)
static void acpi_lpss_dismiss(struct device *dev)
{
acpi_dev_runtime_suspend(dev);
acpi_dev_suspend(dev, false);
}
#ifdef CONFIG_PM_SLEEP
static int acpi_lpss_suspend_late(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
ret = pm_generic_suspend_late(dev);
if (ret)
return ret;
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
acpi_lpss_save_ctx(dev, pdata);
return acpi_dev_suspend_late(dev);
}
static int acpi_lpss_resume_early(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
ret = acpi_dev_resume_early(dev);
if (ret)
return ret;
acpi_lpss_d3_to_d0_delay(pdata);
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
acpi_lpss_restore_ctx(dev, pdata);
return pm_generic_resume_early(dev);
}
#endif /* CONFIG_PM_SLEEP */
/* IOSF SB for LPSS island */
#define LPSS_IOSF_UNIT_LPIOEP 0xA0
#define LPSS_IOSF_UNIT_LPIO1 0xAB
@ -835,19 +801,15 @@ static void lpss_iosf_exit_d3_state(void)
mutex_unlock(&lpss_iosf_mutex);
}
static int acpi_lpss_runtime_suspend(struct device *dev)
static int acpi_lpss_suspend(struct device *dev, bool wakeup)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
ret = pm_generic_runtime_suspend(dev);
if (ret)
return ret;
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
acpi_lpss_save_ctx(dev, pdata);
ret = acpi_dev_runtime_suspend(dev);
ret = acpi_dev_suspend(dev, wakeup);
/*
* This call must be last in the sequence, otherwise PMC will return
@ -860,7 +822,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
return ret;
}
static int acpi_lpss_runtime_resume(struct device *dev)
static int acpi_lpss_resume(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
@ -872,7 +834,7 @@ static int acpi_lpss_runtime_resume(struct device *dev)
if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_exit_d3_state();
ret = acpi_dev_runtime_resume(dev);
ret = acpi_dev_resume(dev);
if (ret)
return ret;
@ -881,7 +843,37 @@ static int acpi_lpss_runtime_resume(struct device *dev)
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
acpi_lpss_restore_ctx(dev, pdata);
return pm_generic_runtime_resume(dev);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int acpi_lpss_suspend_late(struct device *dev)
{
int ret = pm_generic_suspend_late(dev);
return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
}
static int acpi_lpss_resume_early(struct device *dev)
{
int ret = acpi_lpss_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
#endif /* CONFIG_PM_SLEEP */
static int acpi_lpss_runtime_suspend(struct device *dev)
{
int ret = pm_generic_runtime_suspend(dev);
return ret ? ret : acpi_lpss_suspend(dev, true);
}
static int acpi_lpss_runtime_resume(struct device *dev)
{
int ret = acpi_lpss_resume(dev);
return ret ? ret : pm_generic_runtime_resume(dev);
}
#endif /* CONFIG_PM */
@ -894,7 +886,7 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
.prepare = acpi_subsys_prepare,
.complete = pm_complete_with_resume_check,
.complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
.suspend_late = acpi_lpss_suspend_late,
.resume_early = acpi_lpss_resume_early,

View File

@ -387,6 +387,7 @@ EXPORT_SYMBOL(acpi_bus_power_manageable);
#ifdef CONFIG_PM
static DEFINE_MUTEX(acpi_pm_notifier_lock);
static DEFINE_MUTEX(acpi_pm_notifier_install_lock);
void acpi_pm_wakeup_event(struct device *dev)
{
@ -443,24 +444,25 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
if (!dev && !func)
return AE_BAD_PARAMETER;
mutex_lock(&acpi_pm_notifier_lock);
mutex_lock(&acpi_pm_notifier_install_lock);
if (adev->wakeup.flags.notifier_present)
goto out;
adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
adev->wakeup.context.dev = dev;
adev->wakeup.context.func = func;
status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
acpi_pm_notify_handler, NULL);
if (ACPI_FAILURE(status))
goto out;
mutex_lock(&acpi_pm_notifier_lock);
adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
adev->wakeup.context.dev = dev;
adev->wakeup.context.func = func;
adev->wakeup.flags.notifier_present = true;
mutex_unlock(&acpi_pm_notifier_lock);
out:
mutex_unlock(&acpi_pm_notifier_lock);
mutex_unlock(&acpi_pm_notifier_install_lock);
return status;
}
@ -472,7 +474,7 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
{
acpi_status status = AE_BAD_PARAMETER;
mutex_lock(&acpi_pm_notifier_lock);
mutex_lock(&acpi_pm_notifier_install_lock);
if (!adev->wakeup.flags.notifier_present)
goto out;
@ -483,14 +485,15 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
if (ACPI_FAILURE(status))
goto out;
mutex_lock(&acpi_pm_notifier_lock);
adev->wakeup.context.func = NULL;
adev->wakeup.context.dev = NULL;
wakeup_source_unregister(adev->wakeup.ws);
adev->wakeup.flags.notifier_present = false;
mutex_unlock(&acpi_pm_notifier_lock);
out:
mutex_unlock(&acpi_pm_notifier_lock);
mutex_unlock(&acpi_pm_notifier_install_lock);
return status;
}
@ -847,47 +850,48 @@ static int acpi_dev_pm_full_power(struct acpi_device *adev)
}
/**
* acpi_dev_runtime_suspend - Put device into a low-power state using ACPI.
* acpi_dev_suspend - Put device into a low-power state using ACPI.
* @dev: Device to put into a low-power state.
* @wakeup: Whether or not to enable wakeup for the device.
*
* Put the given device into a runtime low-power state using the standard ACPI
* Put the given device into a low-power state using the standard ACPI
* mechanism. Set up remote wakeup if desired, choose the state to put the
* device into (this checks if remote wakeup is expected to work too), and set
* the power state of the device.
*/
int acpi_dev_runtime_suspend(struct device *dev)
int acpi_dev_suspend(struct device *dev, bool wakeup)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
bool remote_wakeup;
u32 target_state = acpi_target_system_state();
int error;
if (!adev)
return 0;
remote_wakeup = acpi_device_can_wakeup(adev);
if (remote_wakeup) {
error = acpi_device_wakeup_enable(adev, ACPI_STATE_S0);
if (wakeup && acpi_device_can_wakeup(adev)) {
error = acpi_device_wakeup_enable(adev, target_state);
if (error)
return -EAGAIN;
} else {
wakeup = false;
}
error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
if (error && remote_wakeup)
error = acpi_dev_pm_low_power(dev, adev, target_state);
if (error && wakeup)
acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend);
EXPORT_SYMBOL_GPL(acpi_dev_suspend);
/**
* acpi_dev_runtime_resume - Put device into the full-power state using ACPI.
* acpi_dev_resume - Put device into the full-power state using ACPI.
* @dev: Device to put into the full-power state.
*
* Put the given device into the full-power state using the standard ACPI
* mechanism at run time. Set the power state of the device to ACPI D0 and
* disable remote wakeup.
* mechanism. Set the power state of the device to ACPI D0 and disable wakeup.
*/
int acpi_dev_runtime_resume(struct device *dev)
int acpi_dev_resume(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
int error;
@ -899,7 +903,7 @@ int acpi_dev_runtime_resume(struct device *dev)
acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
EXPORT_SYMBOL_GPL(acpi_dev_resume);
/**
* acpi_subsys_runtime_suspend - Suspend device using ACPI.
@ -911,7 +915,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
int acpi_subsys_runtime_suspend(struct device *dev)
{
int ret = pm_generic_runtime_suspend(dev);
return ret ? ret : acpi_dev_runtime_suspend(dev);
return ret ? ret : acpi_dev_suspend(dev, true);
}
EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend);
@ -924,68 +928,32 @@ EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend);
*/
int acpi_subsys_runtime_resume(struct device *dev)
{
int ret = acpi_dev_runtime_resume(dev);
int ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_runtime_resume(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume);
#ifdef CONFIG_PM_SLEEP
/**
* acpi_dev_suspend_late - Put device into a low-power state using ACPI.
* @dev: Device to put into a low-power state.
*
* Put the given device into a low-power state during system transition to a
* sleep state using the standard ACPI mechanism. Set up system wakeup if
* desired, choose the state to put the device into (this checks if system
* wakeup is expected to work too), and set the power state of the device.
*/
int acpi_dev_suspend_late(struct device *dev)
static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
u32 target_state;
bool wakeup;
int error;
u32 sys_target = acpi_target_system_state();
int ret, state;
if (!adev)
return 0;
if (device_may_wakeup(dev) != !!adev->wakeup.prepare_count)
return true;
target_state = acpi_target_system_state();
wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev);
if (wakeup) {
error = acpi_device_wakeup_enable(adev, target_state);
if (error)
return error;
}
if (sys_target == ACPI_STATE_S0)
return false;
error = acpi_dev_pm_low_power(dev, adev, target_state);
if (error && wakeup)
acpi_device_wakeup_disable(adev);
if (adev->power.flags.dsw_present)
return true;
return error;
ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state);
if (ret)
return true;
return state != adev->power.state;
}
EXPORT_SYMBOL_GPL(acpi_dev_suspend_late);
/**
* acpi_dev_resume_early - Put device into the full-power state using ACPI.
* @dev: Device to put into the full-power state.
*
* Put the given device into the full-power state using the standard ACPI
* mechanism during system transition to the working state. Set the power
* state of the device to ACPI D0 and disable remote wakeup.
*/
int acpi_dev_resume_early(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
int error;
if (!adev)
return 0;
error = acpi_dev_pm_full_power(adev);
acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
/**
* acpi_subsys_prepare - Prepare device for system transition to a sleep state.
@ -994,29 +962,36 @@ EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
int acpi_subsys_prepare(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
u32 sys_target;
int ret, state;
int ret;
ret = pm_generic_prepare(dev);
if (ret < 0)
return ret;
if (!adev || !pm_runtime_suspended(dev)
|| device_may_wakeup(dev) != !!adev->wakeup.prepare_count)
if (!adev || !pm_runtime_suspended(dev))
return 0;
sys_target = acpi_target_system_state();
if (sys_target == ACPI_STATE_S0)
return 1;
if (adev->power.flags.dsw_present)
return 0;
ret = acpi_dev_pm_get_state(dev, adev, sys_target, NULL, &state);
return !ret && state == adev->power.state;
return !acpi_dev_needs_resume(dev, adev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
/**
* acpi_subsys_complete - Finalize device's resume during system resume.
* @dev: Device to handle.
*/
void acpi_subsys_complete(struct device *dev)
{
pm_generic_complete(dev);
/*
* If the device had been runtime-suspended before the system went into
* the sleep state it is going out of and it has never been resumed till
* now, resume it in case the firmware powered it up.
*/
if (dev->power.direct_complete && pm_resume_via_firmware())
pm_request_resume(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_complete);
/**
* acpi_subsys_suspend - Run the device driver's suspend callback.
* @dev: Device to handle.
@ -1041,7 +1016,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend);
int acpi_subsys_suspend_late(struct device *dev)
{
int ret = pm_generic_suspend_late(dev);
return ret ? ret : acpi_dev_suspend_late(dev);
return ret ? ret : acpi_dev_suspend(dev, device_may_wakeup(dev));
}
EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
@ -1055,7 +1030,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
*/
int acpi_subsys_resume_early(struct device *dev)
{
int ret = acpi_dev_resume_early(dev);
int ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
@ -1085,7 +1060,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
.runtime_resume = acpi_subsys_runtime_resume,
#ifdef CONFIG_PM_SLEEP
.prepare = acpi_subsys_prepare,
.complete = pm_complete_with_resume_check,
.complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
.suspend_late = acpi_subsys_suspend_late,
.resume_early = acpi_subsys_resume_early,

View File

@ -248,4 +248,10 @@ void acpi_watchdog_init(void);
static inline void acpi_watchdog_init(void) {}
#endif
#ifdef CONFIG_ACPI_LPIT
void acpi_init_lpit(void);
#else
static inline void acpi_init_lpit(void) { }
#endif
#endif /* _ACPI_INTERNAL_H_ */

View File

@ -663,26 +663,8 @@ acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
EXPORT_SYMBOL(acpi_os_write_port);
acpi_status
acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width)
{
void __iomem *virt_addr;
unsigned int size = width / 8;
bool unmap = false;
u64 dummy;
rcu_read_lock();
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
if (!virt_addr) {
rcu_read_unlock();
virt_addr = acpi_os_ioremap(phys_addr, size);
if (!virt_addr)
return AE_BAD_ADDRESS;
unmap = true;
}
if (!value)
value = &dummy;
switch (width) {
case 8:
@ -698,9 +680,37 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
*(u64 *) value = readq(virt_addr);
break;
default:
BUG();
return -EINVAL;
}
return 0;
}
acpi_status
acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
{
void __iomem *virt_addr;
unsigned int size = width / 8;
bool unmap = false;
u64 dummy;
int error;
rcu_read_lock();
virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
if (!virt_addr) {
rcu_read_unlock();
virt_addr = acpi_os_ioremap(phys_addr, size);
if (!virt_addr)
return AE_BAD_ADDRESS;
unmap = true;
}
if (!value)
value = &dummy;
error = acpi_os_read_iomem(virt_addr, value, width);
BUG_ON(error);
if (unmap)
iounmap(virt_addr);
else

View File

@ -2122,6 +2122,7 @@ int __init acpi_scan_init(void)
acpi_int340x_thermal_init();
acpi_amba_init();
acpi_watchdog_init();
acpi_init_lpit();
acpi_scan_add_handler(&generic_device_handler);

View File

@ -287,6 +287,8 @@ acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width);
/*
* Platform and hardware-independent physical memory interfaces
*/
int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width);
#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_read_memory
acpi_status
acpi_os_read_memory(acpi_physical_address address, u64 *value, u32 width);

View File

@ -864,21 +864,16 @@ static inline void arch_reserve_mem_area(acpi_physical_address addr,
#endif
#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
int acpi_dev_runtime_suspend(struct device *dev);
int acpi_dev_runtime_resume(struct device *dev);
int acpi_dev_suspend(struct device *dev, bool wakeup);
int acpi_dev_resume(struct device *dev);
int acpi_subsys_runtime_suspend(struct device *dev);
int acpi_subsys_runtime_resume(struct device *dev);
struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
int acpi_dev_pm_attach(struct device *dev, bool power_on);
#else
static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
{
return NULL;
}
static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
return -ENODEV;
@ -887,7 +882,6 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
int acpi_dev_suspend_late(struct device *dev);
int acpi_dev_resume_early(struct device *dev);
int acpi_subsys_prepare(struct device *dev);
void acpi_subsys_complete(struct device *dev);
int acpi_subsys_suspend_late(struct device *dev);
@ -895,7 +889,6 @@ int acpi_subsys_resume_early(struct device *dev);
int acpi_subsys_suspend(struct device *dev);
int acpi_subsys_freeze(struct device *dev);
#else
static inline int acpi_dev_suspend_late(struct device *dev) { return 0; }
static inline int acpi_dev_resume_early(struct device *dev) { return 0; }
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
static inline void acpi_subsys_complete(struct device *dev) {}
@ -1254,4 +1247,13 @@ int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
}
#endif
#ifdef CONFIG_ACPI_LPIT
int lpit_read_residency_count_address(u64 *address);
#else
static inline int lpit_read_residency_count_address(u64 *address)
{
return -EINVAL;
}
#endif
#endif /*_LINUX_ACPI_H*/