Merge branches 'acpi-pm', 'acpi-processor' and 'acpi-resources'

* acpi-pm:
  ACPI: PM: postpone bringing devices to D0 unless we need them
  ACPI: PM: Adjust behavior for field problems on AMD systems
  ACPI: PM: s2idle: Add support for new Microsoft UUID
  ACPI: PM: s2idle: Add support for multiple func mask
  ACPI: PM: s2idle: Refactor common code
  ACPI: PM: s2idle: Use correct revision id
  ACPI: power: Use dev_dbg() to print some messages
  ACPI: sleep: Fix acpi_pm_pre_suspend() kernel-doc
  ACPI: power: Rework turning off unused power resources
  ACPI: power: Save the last known state of each power resource
  ACPI: power: Use u8 as the power resource state data type
  ACPI: PM / fan: Put fan device IDs into separate header file
  ACPI: PM: s2idle: Add missing LPS0 functions for AMD

* acpi-processor:
  ACPI: processor_throttling: Fix several coding style issues
  ACPI: processor_throttling: Remove redundant initialization of 'obj'
  ACPI: processor idle: Fix up C-state latency if not ordered

* acpi-resources:
  ACPI: resources: Add checks for ACPI IRQ override
This commit is contained in:
Rafael J. Wysocki 2021-06-29 15:47:29 +02:00
commit 2f4edfadbc
11 changed files with 266 additions and 131 deletions

View file

@ -20,6 +20,7 @@
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include "fan.h"
#include "internal.h"
/**
@ -1133,19 +1134,48 @@ static int acpi_subsys_resume_noirq(struct device *dev)
*
* Use ACPI to put the given device into the full-power state and carry out the
* generic early resume procedure for it during system transition into the
* working state.
* working state, but only do that if device either defines early resume
* handler, or does not define power operations at all. Otherwise powering up
* of the device is postponed to the normal resume phase.
*/
static int acpi_subsys_resume_early(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int ret;
if (dev_pm_skip_resume(dev))
return 0;
if (pm && !pm->resume_early) {
dev_dbg(dev, "postponing D0 transition to normal resume stage\n");
return 0;
}
ret = acpi_dev_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
/**
* acpi_subsys_resume - Resume device using ACPI.
* @dev: Device to Resume.
*
* Use ACPI to put the given device into the full-power state if it has not been
* powered up during early resume phase, and carry out the generic resume
* procedure for it during system transition into the working state.
*/
static int acpi_subsys_resume(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int ret = 0;
if (!dev_pm_skip_resume(dev) && pm && !pm->resume_early) {
dev_dbg(dev, "executing postponed D0 transition\n");
ret = acpi_dev_resume(dev);
}
return ret ? ret : pm_generic_resume(dev);
}
/**
* acpi_subsys_freeze - Run the device driver's freeze callback.
* @dev: Device to handle.
@ -1239,6 +1269,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
.prepare = acpi_subsys_prepare,
.complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
.resume = acpi_subsys_resume,
.suspend_late = acpi_subsys_suspend_late,
.suspend_noirq = acpi_subsys_suspend_noirq,
.resume_noirq = acpi_subsys_resume_noirq,
@ -1310,10 +1341,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
* with the generic ACPI PM domain.
*/
static const struct acpi_device_id special_pm_ids[] = {
{"PNP0C0B", }, /* Generic ACPI fan */
{"INT3404", }, /* Fan */
{"INTC1044", }, /* Fan for Tiger Lake generation */
{"INTC1048", }, /* Fan for Alder Lake generation */
ACPI_FAN_DEVICE_IDS,
{}
};
struct acpi_device *adev = ACPI_COMPANION(dev);

View file

@ -16,6 +16,8 @@
#include <linux/platform_device.h>
#include <linux/sort.h>
#include "fan.h"
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Fan Driver");
MODULE_LICENSE("GPL");
@ -24,10 +26,7 @@ static int acpi_fan_probe(struct platform_device *pdev);
static int acpi_fan_remove(struct platform_device *pdev);
static const struct acpi_device_id fan_device_ids[] = {
{"PNP0C0B", 0},
{"INT3404", 0},
{"INTC1044", 0},
{"INTC1048", 0},
ACPI_FAN_DEVICE_IDS,
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fan_device_ids);

13
drivers/acpi/fan.h Normal file
View file

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ACPI fan device IDs are shared between the fan driver and the device power
* management code.
*
* Add new device IDs before the generic ACPI fan one.
*/
#define ACPI_FAN_DEVICE_IDS \
{"INT3404", }, /* Fan */ \
{"INTC1044", }, /* Fan for Tiger Lake generation */ \
{"INTC1048", }, /* Fan for Alder Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */

View file

@ -142,7 +142,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
int acpi_power_on_resources(struct acpi_device *device, int state);
int acpi_power_transition(struct acpi_device *device, int state);
void acpi_turn_off_unused_power_resources(bool init);
void acpi_turn_off_unused_power_resources(void);
/* --------------------------------------------------------------------------
Device Power Management

View file

@ -52,7 +52,7 @@ struct acpi_power_resource {
u32 system_level;
u32 order;
unsigned int ref_count;
unsigned int users;
u8 state;
bool wakeup_enabled;
struct mutex resource_lock;
struct list_head dependents;
@ -173,8 +173,6 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
err = acpi_power_resources_list_add(rhandle, list);
if (err)
break;
to_power_resource(rdev)->users++;
}
if (err)
acpi_power_resources_list_free(list);
@ -182,44 +180,54 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
return err;
}
static int acpi_power_get_state(acpi_handle handle, int *state)
static int __get_state(acpi_handle handle, u8 *state)
{
acpi_status status = AE_OK;
unsigned long long sta = 0;
if (!handle || !state)
return -EINVAL;
u8 cur_state;
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status))
return -ENODEV;
*state = (sta & 0x01)?ACPI_POWER_RESOURCE_STATE_ON:
ACPI_POWER_RESOURCE_STATE_OFF;
cur_state = sta & ACPI_POWER_RESOURCE_STATE_ON;
acpi_handle_debug(handle, "Power resource is %s\n",
*state ? "on" : "off");
cur_state ? "on" : "off");
*state = cur_state;
return 0;
}
static int acpi_power_get_list_state(struct list_head *list, int *state)
static int acpi_power_get_state(struct acpi_power_resource *resource, u8 *state)
{
if (resource->state == ACPI_POWER_RESOURCE_STATE_UNKNOWN) {
int ret;
ret = __get_state(resource->device.handle, &resource->state);
if (ret)
return ret;
}
*state = resource->state;
return 0;
}
static int acpi_power_get_list_state(struct list_head *list, u8 *state)
{
struct acpi_power_resource_entry *entry;
int cur_state;
u8 cur_state = ACPI_POWER_RESOURCE_STATE_OFF;
if (!list || !state)
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
cur_state = 0;
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
acpi_handle handle = resource->device.handle;
int result;
mutex_lock(&resource->resource_lock);
result = acpi_power_get_state(handle, &cur_state);
result = acpi_power_get_state(resource, &cur_state);
mutex_unlock(&resource->resource_lock);
if (result)
return result;
@ -352,8 +360,12 @@ static int __acpi_power_on(struct acpi_power_resource *resource)
acpi_status status = AE_OK;
status = acpi_evaluate_object(resource->device.handle, "_ON", NULL, NULL);
if (ACPI_FAILURE(status))
if (ACPI_FAILURE(status)) {
resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
return -ENODEV;
}
resource->state = ACPI_POWER_RESOURCE_STATE_ON;
pr_debug("Power resource [%s] turned on\n", resource->name);
@ -405,8 +417,12 @@ static int __acpi_power_off(struct acpi_power_resource *resource)
status = acpi_evaluate_object(resource->device.handle, "_OFF",
NULL, NULL);
if (ACPI_FAILURE(status))
if (ACPI_FAILURE(status)) {
resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
return -ENODEV;
}
resource->state = ACPI_POWER_RESOURCE_STATE_OFF;
pr_debug("Power resource [%s] turned off\n", resource->name);
@ -590,13 +606,12 @@ int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p)
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
acpi_handle handle = resource->device.handle;
int result;
int state;
u8 state;
mutex_lock(&resource->resource_lock);
result = acpi_power_get_state(handle, &state);
result = acpi_power_get_state(resource, &state);
if (result) {
mutex_unlock(&resource->resource_lock);
return result;
@ -789,8 +804,8 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
{
u8 list_state = ACPI_POWER_RESOURCE_STATE_OFF;
int result = 0;
int list_state = 0;
int i = 0;
if (!device || !state)
@ -919,7 +934,7 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
union acpi_object acpi_object;
struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object };
acpi_status status;
int state, result = -ENODEV;
int result;
acpi_bus_get_device(handle, &device);
if (device)
@ -946,13 +961,9 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
resource->system_level = acpi_object.power_resource.system_level;
resource->order = acpi_object.power_resource.resource_order;
resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
result = acpi_power_get_state(handle, &state);
if (result)
goto err;
pr_info("%s [%s] (%s)\n", acpi_device_name(device),
acpi_device_bid(device), state ? "on" : "off");
pr_info("%s [%s]\n", acpi_device_name(device), acpi_device_bid(device));
device->flags.match_driver = true;
result = acpi_device_add(device, acpi_release_power_resource);
@ -979,11 +990,13 @@ void acpi_resume_power_resources(void)
mutex_lock(&power_resource_list_lock);
list_for_each_entry(resource, &acpi_power_resource_list, list_node) {
int result, state;
int result;
u8 state;
mutex_lock(&resource->resource_lock);
result = acpi_power_get_state(resource->device.handle, &state);
resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
result = acpi_power_get_state(resource, &state);
if (result) {
mutex_unlock(&resource->resource_lock);
continue;
@ -991,7 +1004,7 @@ void acpi_resume_power_resources(void)
if (state == ACPI_POWER_RESOURCE_STATE_OFF
&& resource->ref_count) {
dev_info(&resource->device.dev, "Turning ON\n");
dev_dbg(&resource->device.dev, "Turning ON\n");
__acpi_power_on(resource);
}
@ -1002,38 +1015,10 @@ void acpi_resume_power_resources(void)
}
#endif
static void acpi_power_turn_off_if_unused(struct acpi_power_resource *resource,
bool init)
{
if (resource->ref_count > 0)
return;
if (init) {
if (resource->users > 0)
return;
} else {
int result, state;
result = acpi_power_get_state(resource->device.handle, &state);
if (result || state == ACPI_POWER_RESOURCE_STATE_OFF)
return;
}
dev_info(&resource->device.dev, "Turning OFF\n");
__acpi_power_off(resource);
}
/**
* acpi_turn_off_unused_power_resources - Turn off power resources not in use.
* @init: Control switch.
*
* If @ainit is set, unconditionally turn off all of the ACPI power resources
* without any users.
*
* Otherwise, turn off all ACPI power resources without active references (that
* is, the ones that should be "off" at the moment) that are "on".
*/
void acpi_turn_off_unused_power_resources(bool init)
void acpi_turn_off_unused_power_resources(void)
{
struct acpi_power_resource *resource;
@ -1042,7 +1027,16 @@ void acpi_turn_off_unused_power_resources(bool init)
list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
mutex_lock(&resource->resource_lock);
acpi_power_turn_off_if_unused(resource, init);
/*
* Turn off power resources in an unknown state too, because the
* platform firmware on some system expects the OS to turn off
* power resources without any users unconditionally.
*/
if (!resource->ref_count &&
resource->state != ACPI_POWER_RESOURCE_STATE_OFF) {
dev_dbg(&resource->device.dev, "Turning OFF\n");
__acpi_power_off(resource);
}
mutex_unlock(&resource->resource_lock);
}

View file

@ -16,6 +16,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/sched.h> /* need_resched() */
#include <linux/sort.h>
#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <linux/cpu.h>
@ -384,10 +385,37 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
return;
}
static int acpi_cst_latency_cmp(const void *a, const void *b)
{
const struct acpi_processor_cx *x = a, *y = b;
if (!(x->valid && y->valid))
return 0;
if (x->latency > y->latency)
return 1;
if (x->latency < y->latency)
return -1;
return 0;
}
static void acpi_cst_latency_swap(void *a, void *b, int n)
{
struct acpi_processor_cx *x = a, *y = b;
u32 tmp;
if (!(x->valid && y->valid))
return;
tmp = x->latency;
x->latency = y->latency;
y->latency = tmp;
}
static int acpi_processor_power_verify(struct acpi_processor *pr)
{
unsigned int i;
unsigned int working = 0;
unsigned int last_latency = 0;
unsigned int last_type = 0;
bool buggy_latency = false;
pr->power.timer_broadcast_on_state = INT_MAX;
@ -411,12 +439,24 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
}
if (!cx->valid)
continue;
if (cx->type >= last_type && cx->latency < last_latency)
buggy_latency = true;
last_latency = cx->latency;
last_type = cx->type;
lapic_timer_check_state(i, pr, cx);
tsc_check_state(cx->type);
working++;
}
if (buggy_latency) {
pr_notice("FW issue: working around C-state latencies out of order\n");
sort(&pr->power.states[1], max_cstate,
sizeof(struct acpi_processor_cx),
acpi_cst_latency_cmp,
acpi_cst_latency_swap);
}
lapic_timer_propagate_broadcast(pr);
return (working);

View file

@ -6,7 +6,7 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
* Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
* - Added processor hotplug support
* - Added processor hotplug support
*/
#include <linux/kernel.h>
@ -195,15 +195,13 @@ void acpi_processor_throttling_init(void)
{
if (acpi_processor_update_tsd_coord())
pr_debug("Assume no T-state coordination\n");
return;
}
static int acpi_processor_throttling_notifier(unsigned long event, void *data)
{
struct throttling_tstate *p_tstate = data;
struct acpi_processor *pr;
unsigned int cpu ;
unsigned int cpu;
int target_state;
struct acpi_processor_limit *p_limit;
struct acpi_processor_throttling *p_throttling;
@ -408,7 +406,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
acpi_status status = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *ptc = NULL;
union acpi_object obj = { 0 };
union acpi_object obj;
struct acpi_processor_throttling *throttling;
status = acpi_evaluate_object(pr->handle, "_PTC", NULL, &buffer);
@ -477,7 +475,7 @@ static int acpi_processor_get_throttling_control(struct acpi_processor *pr)
goto end;
}
end:
end:
kfree(buffer.pointer);
return result;
@ -554,7 +552,7 @@ static int acpi_processor_get_throttling_states(struct acpi_processor *pr)
}
}
end:
end:
kfree(buffer.pointer);
return result;
@ -639,7 +637,7 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
}
end:
end:
kfree(buffer.pointer);
return result;
}
@ -717,7 +715,7 @@ static int acpi_throttling_rdmsr(u64 *value)
msr_low = 0;
msr_high = 0;
rdmsr_safe(MSR_IA32_THERM_CONTROL,
(u32 *)&msr_low , (u32 *) &msr_high);
(u32 *)&msr_low, (u32 *) &msr_high);
msr = (msr_high << 32) | msr_low;
*value = (u64) msr;
ret = 0;
@ -1185,8 +1183,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
*/
if (acpi_processor_get_throttling_control(pr) ||
acpi_processor_get_throttling_states(pr) ||
acpi_processor_get_platform_limit(pr))
{
acpi_processor_get_platform_limit(pr)) {
pr->throttling.acpi_processor_get_throttling =
&acpi_processor_get_throttling_fadt;
pr->throttling.acpi_processor_set_throttling =
@ -1246,7 +1243,7 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
goto end;
}
end:
end:
if (result)
pr->flags.throttling = 0;

View file

@ -423,6 +423,13 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
}
}
static bool irq_is_legacy(struct acpi_resource_irq *irq)
{
return irq->triggering == ACPI_EDGE_SENSITIVE &&
irq->polarity == ACPI_ACTIVE_HIGH &&
irq->shareable == ACPI_EXCLUSIVE;
}
/**
* acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
* @ares: Input ACPI resource object.
@ -461,7 +468,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
}
acpi_dev_get_irqresource(res, irq->interrupts[index],
irq->triggering, irq->polarity,
irq->shareable, true);
irq->shareable, irq_is_legacy(irq));
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
ext_irq = &ares->data.extended_irq;

View file

@ -2485,7 +2485,7 @@ int __init acpi_scan_init(void)
}
}
acpi_turn_off_unused_power_resources(true);
acpi_turn_off_unused_power_resources();
acpi_scan_initialized = true;

View file

@ -406,7 +406,7 @@ static int acpi_pm_freeze(void)
}
/**
* acpi_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
* acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
*/
static int acpi_pm_pre_suspend(void)
{
@ -504,7 +504,7 @@ static void acpi_pm_start(u32 acpi_state)
*/
static void acpi_pm_end(void)
{
acpi_turn_off_unused_power_resources(false);
acpi_turn_off_unused_power_resources();
acpi_scan_lock_release();
/*
* This is necessary in case acpi_pm_finish() is not called during a

View file

@ -32,6 +32,9 @@ static const struct acpi_device_id lps0_device_ids[] = {
{"", },
};
/* Microsoft platform agnostic UUID */
#define ACPI_LPS0_DSM_UUID_MICROSOFT "11e00d56-ce64-47ce-837b-1f898f9aa461"
#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
@ -39,15 +42,22 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_SCREEN_ON 4
#define ACPI_LPS0_ENTRY 5
#define ACPI_LPS0_EXIT 6
#define ACPI_LPS0_MS_ENTRY 7
#define ACPI_LPS0_MS_EXIT 8
/* AMD */
#define ACPI_LPS0_DSM_UUID_AMD "e3f32452-febc-43ce-9039-932122d37721"
#define ACPI_LPS0_ENTRY_AMD 2
#define ACPI_LPS0_EXIT_AMD 3
#define ACPI_LPS0_SCREEN_OFF_AMD 4
#define ACPI_LPS0_SCREEN_ON_AMD 5
static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
static char lps0_dsm_func_mask;
static int lps0_dsm_func_mask;
static guid_t lps0_dsm_guid_microsoft;
static int lps0_dsm_func_mask_microsoft;
/* Device constraint entry structure */
struct lpi_device_info {
@ -68,15 +78,7 @@ struct lpi_constraints {
int min_dstate;
};
/* AMD */
/* Device constraint entry structure */
struct lpi_device_info_amd {
int revision;
int count;
union acpi_object *package;
};
/* Constraint package structure */
/* AMD Constraint package structure */
struct lpi_device_constraint_amd {
char *name;
int enabled;
@ -94,15 +96,15 @@ static void lpi_device_get_constraints_amd(void)
int i, j, k;
out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
rev_id, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
NULL, ACPI_TYPE_PACKAGE);
if (!out_obj)
return;
acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
out_obj ? "successful" : "failed");
if (!out_obj)
return;
for (i = 0; i < out_obj->package.count; i++) {
union acpi_object *package = &out_obj->package.elements[i];
@ -315,14 +317,15 @@ static void lpi_check_constraints(void)
}
}
static void acpi_sleep_run_lps0_dsm(unsigned int func)
static void acpi_sleep_run_lps0_dsm(unsigned int func, unsigned int func_mask, guid_t dsm_guid)
{
union acpi_object *out_obj;
if (!(lps0_dsm_func_mask & (1 << func)))
if (!(func_mask & (1 << func)))
return;
out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, rev_id, func, NULL);
out_obj = acpi_evaluate_dsm(lps0_device_handle, &dsm_guid,
rev_id, func, NULL);
ACPI_FREE(out_obj);
acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
@ -334,11 +337,33 @@ static bool acpi_s2idle_vendor_amd(void)
return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
}
static int validate_dsm(acpi_handle handle, const char *uuid, int rev, guid_t *dsm_guid)
{
union acpi_object *obj;
int ret = -EINVAL;
guid_parse(uuid, dsm_guid);
obj = acpi_evaluate_dsm(handle, dsm_guid, rev, 0, NULL);
/* Check if the _DSM is present and as expected. */
if (!obj || obj->type != ACPI_TYPE_BUFFER || obj->buffer.length == 0 ||
obj->buffer.length > sizeof(u32)) {
acpi_handle_debug(handle,
"_DSM UUID %s rev %d function 0 evaluation failed\n", uuid, rev);
goto out;
}
ret = *(int *)obj->buffer.pointer;
acpi_handle_debug(handle, "_DSM UUID %s rev %d function mask: 0x%x\n", uuid, rev, ret);
out:
ACPI_FREE(obj);
return ret;
}
static int lps0_device_attach(struct acpi_device *adev,
const struct acpi_device_id *not_used)
{
union acpi_object *out_obj;
if (lps0_device_handle)
return 0;
@ -346,28 +371,36 @@ static int lps0_device_attach(struct acpi_device *adev,
return 0;
if (acpi_s2idle_vendor_amd()) {
guid_parse(ACPI_LPS0_DSM_UUID_AMD, &lps0_dsm_guid);
out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 0, 0, NULL);
/* AMD0004, AMDI0005:
* - Should use rev_id 0x0
* - function mask > 0x3: Should use AMD method, but has off by one bug
* - function mask = 0x3: Should use Microsoft method
* AMDI0006:
* - should use rev_id 0x0
* - function mask = 0x3: Should use Microsoft method
*/
const char *hid = acpi_device_hid(adev);
rev_id = 0;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
lps0_dsm_func_mask_microsoft = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID_MICROSOFT, rev_id,
&lps0_dsm_guid_microsoft);
if (lps0_dsm_func_mask > 0x3 && (!strcmp(hid, "AMD0004") ||
!strcmp(hid, "AMDI0005"))) {
lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
}
} else {
guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
rev_id = 1;
lps0_dsm_func_mask = validate_dsm(adev->handle,
ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
lps0_dsm_func_mask_microsoft = -EINVAL;
}
/* Check if the _DSM is present and as expected. */
if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
acpi_handle_debug(adev->handle,
"_DSM function 0 evaluation failed\n");
return 0;
}
lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
ACPI_FREE(out_obj);
acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
lps0_dsm_func_mask);
if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
return 0; //function evaluation failed
lps0_device_handle = adev->handle;
@ -406,11 +439,23 @@ int acpi_s2idle_prepare_late(void)
if (pm_debug_messages_on)
lpi_check_constraints();
if (acpi_s2idle_vendor_amd()) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
if (lps0_dsm_func_mask_microsoft > 0) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
} else if (acpi_s2idle_vendor_amd()) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD,
lps0_dsm_func_mask, lps0_dsm_guid);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY_AMD,
lps0_dsm_func_mask, lps0_dsm_guid);
} else {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF,
lps0_dsm_func_mask, lps0_dsm_guid);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
lps0_dsm_func_mask, lps0_dsm_guid);
}
return 0;
@ -421,11 +466,23 @@ void acpi_s2idle_restore_early(void)
if (!lps0_device_handle || sleep_no_lps0)
return;
if (acpi_s2idle_vendor_amd()) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
if (lps0_dsm_func_mask_microsoft > 0) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
} else if (acpi_s2idle_vendor_amd()) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT_AMD,
lps0_dsm_func_mask, lps0_dsm_guid);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD,
lps0_dsm_func_mask, lps0_dsm_guid);
} else {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
lps0_dsm_func_mask, lps0_dsm_guid);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
lps0_dsm_func_mask, lps0_dsm_guid);
}
}