2018-03-14 23:13:07 +00:00
|
|
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
|
2005-04-16 22:20:36 +00:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* Module Name: exregion - ACPI default op_region (address space) handlers
|
|
|
|
*
|
2022-04-11 18:54:22 +00:00
|
|
|
* Copyright (C) 2000 - 2022, Intel Corp.
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2018-03-14 23:13:07 +00:00
|
|
|
*****************************************************************************/
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include <acpi/acpi.h>
|
2009-01-09 05:30:03 +00:00
|
|
|
#include "accommon.h"
|
|
|
|
#include "acinterp.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#define _COMPONENT ACPI_EXECUTER
|
2005-08-05 04:44:28 +00:00
|
|
|
ACPI_MODULE_NAME("exregion")
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_ex_system_memory_space_handler
|
|
|
|
*
|
2012-07-12 01:40:10 +00:00
|
|
|
* PARAMETERS: function - Read or Write operation
|
|
|
|
* address - Where in the space to read or write
|
2005-04-16 22:20:36 +00:00
|
|
|
* bit_width - Field width in bits (8, 16, or 32)
|
2012-07-12 01:40:10 +00:00
|
|
|
* value - Pointer to in or out value
|
2005-04-16 22:20:36 +00:00
|
|
|
* handler_context - Pointer to Handler's context
|
|
|
|
* region_context - Pointer to context specific to the
|
|
|
|
* accessed region
|
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
|
|
|
* DESCRIPTION: Handler for the System Memory address space (Op Region)
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
acpi_status
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_ex_system_memory_space_handler(u32 function,
|
|
|
|
acpi_physical_address address,
|
|
|
|
u32 bit_width,
|
2010-01-21 02:06:32 +00:00
|
|
|
u64 *value,
|
2005-08-05 04:44:28 +00:00
|
|
|
void *handler_context, void *region_context)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_status status = AE_OK;
|
|
|
|
void *logical_addr_ptr = NULL;
|
|
|
|
struct acpi_mem_space_context *mem_info = region_context;
|
ACPICA: Preserve memory opregion mappings
The ACPICA's strategy with respect to the handling of memory mappings
associated with memory operation regions is to avoid mapping the
entire region at once which may be problematic at least in principle
(for example, it may lead to conflicts with overlapping mappings
having different attributes created by drivers). It may also be
wasteful, because memory opregions on some systems take up vast
chunks of address space while the fields in those regions actually
accessed by AML are sparsely distributed.
For this reason, a one-page "window" is mapped for a given opregion
on the first memory access through it and if that "window" does not
cover an address range accessed through that opregion subsequently,
it is unmapped and a new "window" is mapped to replace it. Next,
if the new "window" is not sufficient to acess memory through the
opregion in question in the future, it will be replaced with yet
another "window" and so on. That may lead to a suboptimal sequence
of memory mapping and unmapping operations, for example if two fields
in one opregion separated from each other by a sufficiently wide
chunk of unused address space are accessed in an alternating pattern.
The situation may still be suboptimal if the deferred unmapping
introduced previously is supported by the OS layer. For instance,
the alternating memory access pattern mentioned above may produce
a relatively long list of mappings to release with substantial
duplication among the entries in it, which could be avoided if
acpi_ex_system_memory_space_handler() did not release the mapping
used by it previously as soon as the current access was not covered
by it.
In order to improve that, modify acpi_ex_system_memory_space_handler()
to preserve all of the memory mappings created by it until the memory
regions associated with them go away.
Accordingly, update acpi_ev_system_memory_region_setup() to unmap all
memory associated with memory opregions that go away.
Reported-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Xiang Li <xiang.z.li@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-06-30 11:40:59 +00:00
|
|
|
struct acpi_mem_mapping *mm = mem_info->cur_mm;
|
2005-08-05 04:44:28 +00:00
|
|
|
u32 length;
|
2009-10-22 01:11:11 +00:00
|
|
|
acpi_size map_length;
|
|
|
|
acpi_size page_boundary_map_length;
|
2005-10-21 04:00:00 +00:00
|
|
|
#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
|
2005-08-05 04:44:28 +00:00
|
|
|
u32 remainder;
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
ACPI: ACPICA 20060421
Removed a device initialization optimization introduced in
20051216 where the _STA method was not run unless an _INI
was also present for the same device. This optimization
could cause problems because it could allow _INI methods
to be run within a not-present device subtree (If a
not-present device had no _INI, _STA would not be run,
the not-present status would not be discovered, and the
children of the device would be incorrectly traversed.)
Implemented a new _STA optimization where namespace
subtrees that do not contain _INI are identified and
ignored during device initialization. Selectively running
_STA can significantly improve boot time on large machines
(with assistance from Len Brown.)
Implemented support for the device initialization case
where the returned _STA flags indicate a device not-present
but functioning. In this case, _INI is not run, but the
device children are examined for presence, as per the
ACPI specification.
Implemented an additional change to the IndexField support
in order to conform to MS behavior. The value written to
the Index Register is not simply a byte offset, it is a
byte offset in units of the access width of the parent
Index Field. (Fiodor Suietov)
Defined and deployed a new OSL interface,
acpi_os_validate_address(). This interface is called during
the creation of all AML operation regions, and allows
the host OS to exert control over what addresses it will
allow the AML code to access. Operation Regions whose
addresses are disallowed will cause a runtime exception
when they are actually accessed (will not affect or abort
table loading.)
Defined and deployed a new OSL interface,
acpi_os_validate_interface(). This interface allows the host OS
to match the various "optional" interface/behavior strings
for the _OSI predefined control method as appropriate
(with assistance from Bjorn Helgaas.)
Restructured and corrected various problems in the
exception handling code paths within DsCallControlMethod
and DsTerminateControlMethod in dsmethod (with assistance
from Takayoshi Kochi.)
Modified the Linux source converter to ignore quoted string
literals while converting identifiers from mixed to lower
case. This will correct problems with the disassembler
and other areas where such strings must not be modified.
The ACPI_FUNCTION_* macros no longer require quotes around
the function name. This allows the Linux source converter
to convert the names, now that the converter ignores
quoted strings.
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-21 21:15:00 +00:00
|
|
|
ACPI_FUNCTION_TRACE(ex_system_memory_space_handler);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Validate and translate the bit width */
|
|
|
|
|
|
|
|
switch (bit_width) {
|
|
|
|
case 8:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
length = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 16:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
length = 2;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 32:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
length = 4;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 64:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
length = 8;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2010-03-05 09:56:40 +00:00
|
|
|
ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %u",
|
2006-01-27 21:43:00 +00:00
|
|
|
bit_width));
|
2005-08-05 04:44:28 +00:00
|
|
|
return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2005-10-21 04:00:00 +00:00
|
|
|
#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Hardware does not support non-aligned data transfers, we must verify
|
|
|
|
* the request.
|
|
|
|
*/
|
2010-01-21 02:06:32 +00:00
|
|
|
(void)acpi_ut_short_divide((u64) address, length, NULL, &remainder);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (remainder != 0) {
|
2005-08-05 04:44:28 +00:00
|
|
|
return_ACPI_STATUS(AE_AML_ALIGNMENT);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Does the request fit into the cached memory mapping?
|
|
|
|
* Is 1) Address below the current mapping? OR
|
|
|
|
* 2) Address beyond the current mapping?
|
|
|
|
*/
|
ACPICA: Preserve memory opregion mappings
The ACPICA's strategy with respect to the handling of memory mappings
associated with memory operation regions is to avoid mapping the
entire region at once which may be problematic at least in principle
(for example, it may lead to conflicts with overlapping mappings
having different attributes created by drivers). It may also be
wasteful, because memory opregions on some systems take up vast
chunks of address space while the fields in those regions actually
accessed by AML are sparsely distributed.
For this reason, a one-page "window" is mapped for a given opregion
on the first memory access through it and if that "window" does not
cover an address range accessed through that opregion subsequently,
it is unmapped and a new "window" is mapped to replace it. Next,
if the new "window" is not sufficient to acess memory through the
opregion in question in the future, it will be replaced with yet
another "window" and so on. That may lead to a suboptimal sequence
of memory mapping and unmapping operations, for example if two fields
in one opregion separated from each other by a sufficiently wide
chunk of unused address space are accessed in an alternating pattern.
The situation may still be suboptimal if the deferred unmapping
introduced previously is supported by the OS layer. For instance,
the alternating memory access pattern mentioned above may produce
a relatively long list of mappings to release with substantial
duplication among the entries in it, which could be avoided if
acpi_ex_system_memory_space_handler() did not release the mapping
used by it previously as soon as the current access was not covered
by it.
In order to improve that, modify acpi_ex_system_memory_space_handler()
to preserve all of the memory mappings created by it until the memory
regions associated with them go away.
Accordingly, update acpi_ev_system_memory_region_setup() to unmap all
memory associated with memory opregions that go away.
Reported-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Xiang Li <xiang.z.li@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-06-30 11:40:59 +00:00
|
|
|
if (!mm || (address < mm->physical_address) ||
|
|
|
|
((u64) address + length > (u64) mm->physical_address + mm->length)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
ACPICA: Preserve memory opregion mappings
The ACPICA's strategy with respect to the handling of memory mappings
associated with memory operation regions is to avoid mapping the
entire region at once which may be problematic at least in principle
(for example, it may lead to conflicts with overlapping mappings
having different attributes created by drivers). It may also be
wasteful, because memory opregions on some systems take up vast
chunks of address space while the fields in those regions actually
accessed by AML are sparsely distributed.
For this reason, a one-page "window" is mapped for a given opregion
on the first memory access through it and if that "window" does not
cover an address range accessed through that opregion subsequently,
it is unmapped and a new "window" is mapped to replace it. Next,
if the new "window" is not sufficient to acess memory through the
opregion in question in the future, it will be replaced with yet
another "window" and so on. That may lead to a suboptimal sequence
of memory mapping and unmapping operations, for example if two fields
in one opregion separated from each other by a sufficiently wide
chunk of unused address space are accessed in an alternating pattern.
The situation may still be suboptimal if the deferred unmapping
introduced previously is supported by the OS layer. For instance,
the alternating memory access pattern mentioned above may produce
a relatively long list of mappings to release with substantial
duplication among the entries in it, which could be avoided if
acpi_ex_system_memory_space_handler() did not release the mapping
used by it previously as soon as the current access was not covered
by it.
In order to improve that, modify acpi_ex_system_memory_space_handler()
to preserve all of the memory mappings created by it until the memory
regions associated with them go away.
Accordingly, update acpi_ev_system_memory_region_setup() to unmap all
memory associated with memory opregions that go away.
Reported-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Xiang Li <xiang.z.li@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-06-30 11:40:59 +00:00
|
|
|
* The request cannot be resolved by the current memory mapping.
|
|
|
|
*
|
|
|
|
* Look for an existing saved mapping covering the address range
|
|
|
|
* at hand. If found, save it as the current one and carry out
|
|
|
|
* the access.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
ACPICA: Preserve memory opregion mappings
The ACPICA's strategy with respect to the handling of memory mappings
associated with memory operation regions is to avoid mapping the
entire region at once which may be problematic at least in principle
(for example, it may lead to conflicts with overlapping mappings
having different attributes created by drivers). It may also be
wasteful, because memory opregions on some systems take up vast
chunks of address space while the fields in those regions actually
accessed by AML are sparsely distributed.
For this reason, a one-page "window" is mapped for a given opregion
on the first memory access through it and if that "window" does not
cover an address range accessed through that opregion subsequently,
it is unmapped and a new "window" is mapped to replace it. Next,
if the new "window" is not sufficient to acess memory through the
opregion in question in the future, it will be replaced with yet
another "window" and so on. That may lead to a suboptimal sequence
of memory mapping and unmapping operations, for example if two fields
in one opregion separated from each other by a sufficiently wide
chunk of unused address space are accessed in an alternating pattern.
The situation may still be suboptimal if the deferred unmapping
introduced previously is supported by the OS layer. For instance,
the alternating memory access pattern mentioned above may produce
a relatively long list of mappings to release with substantial
duplication among the entries in it, which could be avoided if
acpi_ex_system_memory_space_handler() did not release the mapping
used by it previously as soon as the current access was not covered
by it.
In order to improve that, modify acpi_ex_system_memory_space_handler()
to preserve all of the memory mappings created by it until the memory
regions associated with them go away.
Accordingly, update acpi_ev_system_memory_region_setup() to unmap all
memory associated with memory opregions that go away.
Reported-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Xiang Li <xiang.z.li@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-06-30 11:40:59 +00:00
|
|
|
for (mm = mem_info->first_mm; mm; mm = mm->next_mm) {
|
|
|
|
if (mm == mem_info->cur_mm)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (address < mm->physical_address)
|
|
|
|
continue;
|
2006-10-02 04:00:00 +00:00
|
|
|
|
ACPICA: Preserve memory opregion mappings
The ACPICA's strategy with respect to the handling of memory mappings
associated with memory operation regions is to avoid mapping the
entire region at once which may be problematic at least in principle
(for example, it may lead to conflicts with overlapping mappings
having different attributes created by drivers). It may also be
wasteful, because memory opregions on some systems take up vast
chunks of address space while the fields in those regions actually
accessed by AML are sparsely distributed.
For this reason, a one-page "window" is mapped for a given opregion
on the first memory access through it and if that "window" does not
cover an address range accessed through that opregion subsequently,
it is unmapped and a new "window" is mapped to replace it. Next,
if the new "window" is not sufficient to acess memory through the
opregion in question in the future, it will be replaced with yet
another "window" and so on. That may lead to a suboptimal sequence
of memory mapping and unmapping operations, for example if two fields
in one opregion separated from each other by a sufficiently wide
chunk of unused address space are accessed in an alternating pattern.
The situation may still be suboptimal if the deferred unmapping
introduced previously is supported by the OS layer. For instance,
the alternating memory access pattern mentioned above may produce
a relatively long list of mappings to release with substantial
duplication among the entries in it, which could be avoided if
acpi_ex_system_memory_space_handler() did not release the mapping
used by it previously as soon as the current access was not covered
by it.
In order to improve that, modify acpi_ex_system_memory_space_handler()
to preserve all of the memory mappings created by it until the memory
regions associated with them go away.
Accordingly, update acpi_ev_system_memory_region_setup() to unmap all
memory associated with memory opregions that go away.
Reported-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Xiang Li <xiang.z.li@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-06-30 11:40:59 +00:00
|
|
|
if ((u64) address + length >
|
|
|
|
(u64) mm->physical_address + mm->length)
|
|
|
|
continue;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
ACPICA: Preserve memory opregion mappings
The ACPICA's strategy with respect to the handling of memory mappings
associated with memory operation regions is to avoid mapping the
entire region at once which may be problematic at least in principle
(for example, it may lead to conflicts with overlapping mappings
having different attributes created by drivers). It may also be
wasteful, because memory opregions on some systems take up vast
chunks of address space while the fields in those regions actually
accessed by AML are sparsely distributed.
For this reason, a one-page "window" is mapped for a given opregion
on the first memory access through it and if that "window" does not
cover an address range accessed through that opregion subsequently,
it is unmapped and a new "window" is mapped to replace it. Next,
if the new "window" is not sufficient to acess memory through the
opregion in question in the future, it will be replaced with yet
another "window" and so on. That may lead to a suboptimal sequence
of memory mapping and unmapping operations, for example if two fields
in one opregion separated from each other by a sufficiently wide
chunk of unused address space are accessed in an alternating pattern.
The situation may still be suboptimal if the deferred unmapping
introduced previously is supported by the OS layer. For instance,
the alternating memory access pattern mentioned above may produce
a relatively long list of mappings to release with substantial
duplication among the entries in it, which could be avoided if
acpi_ex_system_memory_space_handler() did not release the mapping
used by it previously as soon as the current access was not covered
by it.
In order to improve that, modify acpi_ex_system_memory_space_handler()
to preserve all of the memory mappings created by it until the memory
regions associated with them go away.
Accordingly, update acpi_ev_system_memory_region_setup() to unmap all
memory associated with memory opregions that go away.
Reported-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Xiang Li <xiang.z.li@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-06-30 11:40:59 +00:00
|
|
|
mem_info->cur_mm = mm;
|
|
|
|
goto access;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create a new mappings list entry */
|
|
|
|
mm = ACPI_ALLOCATE_ZEROED(sizeof(*mm));
|
|
|
|
if (!mm) {
|
|
|
|
ACPI_ERROR((AE_INFO,
|
|
|
|
"Unable to save memory mapping at 0x%8.8X%8.8X, size %u",
|
|
|
|
ACPI_FORMAT_UINT64(address), length));
|
|
|
|
return_ACPI_STATUS(AE_NO_MEMORY);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-12-19 05:36:49 +00:00
|
|
|
* October 2009: Attempt to map from the requested address to the
|
|
|
|
* end of the region. However, we will never map more than one
|
|
|
|
* page, nor will we cross a page boundary.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2009-10-22 01:11:11 +00:00
|
|
|
map_length = (acpi_size)
|
2005-08-05 04:44:28 +00:00
|
|
|
((mem_info->address + mem_info->length) - address);
|
2005-04-19 02:49:35 +00:00
|
|
|
|
2009-10-22 01:11:11 +00:00
|
|
|
/*
|
|
|
|
* If mapping the entire remaining portion of the region will cross
|
|
|
|
* a page boundary, just map up to the page boundary, do not cross.
|
|
|
|
* On some systems, crossing a page boundary while mapping regions
|
|
|
|
* can cause warnings if the pages have different attributes
|
2012-12-19 05:36:49 +00:00
|
|
|
* due to resource management.
|
|
|
|
*
|
|
|
|
* This has the added benefit of constraining a single mapping to
|
|
|
|
* one page, which is similar to the original code that used a 4k
|
|
|
|
* maximum window.
|
2009-10-22 01:11:11 +00:00
|
|
|
*/
|
2015-04-13 03:49:54 +00:00
|
|
|
page_boundary_map_length = (acpi_size)
|
|
|
|
(ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address);
|
2012-12-20 01:07:26 +00:00
|
|
|
if (page_boundary_map_length == 0) {
|
2009-10-22 01:11:11 +00:00
|
|
|
page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (map_length > page_boundary_map_length) {
|
|
|
|
map_length = page_boundary_map_length;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Create a new mapping starting at the address given */
|
|
|
|
|
ACPICA: Preserve memory opregion mappings
The ACPICA's strategy with respect to the handling of memory mappings
associated with memory operation regions is to avoid mapping the
entire region at once which may be problematic at least in principle
(for example, it may lead to conflicts with overlapping mappings
having different attributes created by drivers). It may also be
wasteful, because memory opregions on some systems take up vast
chunks of address space while the fields in those regions actually
accessed by AML are sparsely distributed.
For this reason, a one-page "window" is mapped for a given opregion
on the first memory access through it and if that "window" does not
cover an address range accessed through that opregion subsequently,
it is unmapped and a new "window" is mapped to replace it. Next,
if the new "window" is not sufficient to acess memory through the
opregion in question in the future, it will be replaced with yet
another "window" and so on. That may lead to a suboptimal sequence
of memory mapping and unmapping operations, for example if two fields
in one opregion separated from each other by a sufficiently wide
chunk of unused address space are accessed in an alternating pattern.
The situation may still be suboptimal if the deferred unmapping
introduced previously is supported by the OS layer. For instance,
the alternating memory access pattern mentioned above may produce
a relatively long list of mappings to release with substantial
duplication among the entries in it, which could be avoided if
acpi_ex_system_memory_space_handler() did not release the mapping
used by it previously as soon as the current access was not covered
by it.
In order to improve that, modify acpi_ex_system_memory_space_handler()
to preserve all of the memory mappings created by it until the memory
regions associated with them go away.
Accordingly, update acpi_ev_system_memory_region_setup() to unmap all
memory associated with memory opregions that go away.
Reported-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Xiang Li <xiang.z.li@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-06-30 11:40:59 +00:00
|
|
|
logical_addr_ptr = acpi_os_map_memory(address, map_length);
|
|
|
|
if (!logical_addr_ptr) {
|
2006-01-27 21:43:00 +00:00
|
|
|
ACPI_ERROR((AE_INFO,
|
2010-03-05 09:56:40 +00:00
|
|
|
"Could not map memory at 0x%8.8X%8.8X, size %u",
|
2015-04-13 03:48:52 +00:00
|
|
|
ACPI_FORMAT_UINT64(address),
|
2015-04-13 03:48:12 +00:00
|
|
|
(u32)map_length));
|
ACPICA: Preserve memory opregion mappings
The ACPICA's strategy with respect to the handling of memory mappings
associated with memory operation regions is to avoid mapping the
entire region at once which may be problematic at least in principle
(for example, it may lead to conflicts with overlapping mappings
having different attributes created by drivers). It may also be
wasteful, because memory opregions on some systems take up vast
chunks of address space while the fields in those regions actually
accessed by AML are sparsely distributed.
For this reason, a one-page "window" is mapped for a given opregion
on the first memory access through it and if that "window" does not
cover an address range accessed through that opregion subsequently,
it is unmapped and a new "window" is mapped to replace it. Next,
if the new "window" is not sufficient to acess memory through the
opregion in question in the future, it will be replaced with yet
another "window" and so on. That may lead to a suboptimal sequence
of memory mapping and unmapping operations, for example if two fields
in one opregion separated from each other by a sufficiently wide
chunk of unused address space are accessed in an alternating pattern.
The situation may still be suboptimal if the deferred unmapping
introduced previously is supported by the OS layer. For instance,
the alternating memory access pattern mentioned above may produce
a relatively long list of mappings to release with substantial
duplication among the entries in it, which could be avoided if
acpi_ex_system_memory_space_handler() did not release the mapping
used by it previously as soon as the current access was not covered
by it.
In order to improve that, modify acpi_ex_system_memory_space_handler()
to preserve all of the memory mappings created by it until the memory
regions associated with them go away.
Accordingly, update acpi_ev_system_memory_region_setup() to unmap all
memory associated with memory opregions that go away.
Reported-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Xiang Li <xiang.z.li@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-06-30 11:40:59 +00:00
|
|
|
ACPI_FREE(mm);
|
2007-02-02 16:48:18 +00:00
|
|
|
return_ACPI_STATUS(AE_NO_MEMORY);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Save the physical address and mapping size */
|
|
|
|
|
ACPICA: Preserve memory opregion mappings
The ACPICA's strategy with respect to the handling of memory mappings
associated with memory operation regions is to avoid mapping the
entire region at once which may be problematic at least in principle
(for example, it may lead to conflicts with overlapping mappings
having different attributes created by drivers). It may also be
wasteful, because memory opregions on some systems take up vast
chunks of address space while the fields in those regions actually
accessed by AML are sparsely distributed.
For this reason, a one-page "window" is mapped for a given opregion
on the first memory access through it and if that "window" does not
cover an address range accessed through that opregion subsequently,
it is unmapped and a new "window" is mapped to replace it. Next,
if the new "window" is not sufficient to acess memory through the
opregion in question in the future, it will be replaced with yet
another "window" and so on. That may lead to a suboptimal sequence
of memory mapping and unmapping operations, for example if two fields
in one opregion separated from each other by a sufficiently wide
chunk of unused address space are accessed in an alternating pattern.
The situation may still be suboptimal if the deferred unmapping
introduced previously is supported by the OS layer. For instance,
the alternating memory access pattern mentioned above may produce
a relatively long list of mappings to release with substantial
duplication among the entries in it, which could be avoided if
acpi_ex_system_memory_space_handler() did not release the mapping
used by it previously as soon as the current access was not covered
by it.
In order to improve that, modify acpi_ex_system_memory_space_handler()
to preserve all of the memory mappings created by it until the memory
regions associated with them go away.
Accordingly, update acpi_ev_system_memory_region_setup() to unmap all
memory associated with memory opregions that go away.
Reported-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Xiang Li <xiang.z.li@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-06-30 11:40:59 +00:00
|
|
|
mm->logical_address = logical_addr_ptr;
|
|
|
|
mm->physical_address = address;
|
|
|
|
mm->length = map_length;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add the new entry to the mappigs list and save it as the
|
|
|
|
* current mapping.
|
|
|
|
*/
|
|
|
|
mm->next_mm = mem_info->first_mm;
|
|
|
|
mem_info->first_mm = mm;
|
|
|
|
|
|
|
|
mem_info->cur_mm = mm;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
ACPICA: Preserve memory opregion mappings
The ACPICA's strategy with respect to the handling of memory mappings
associated with memory operation regions is to avoid mapping the
entire region at once which may be problematic at least in principle
(for example, it may lead to conflicts with overlapping mappings
having different attributes created by drivers). It may also be
wasteful, because memory opregions on some systems take up vast
chunks of address space while the fields in those regions actually
accessed by AML are sparsely distributed.
For this reason, a one-page "window" is mapped for a given opregion
on the first memory access through it and if that "window" does not
cover an address range accessed through that opregion subsequently,
it is unmapped and a new "window" is mapped to replace it. Next,
if the new "window" is not sufficient to acess memory through the
opregion in question in the future, it will be replaced with yet
another "window" and so on. That may lead to a suboptimal sequence
of memory mapping and unmapping operations, for example if two fields
in one opregion separated from each other by a sufficiently wide
chunk of unused address space are accessed in an alternating pattern.
The situation may still be suboptimal if the deferred unmapping
introduced previously is supported by the OS layer. For instance,
the alternating memory access pattern mentioned above may produce
a relatively long list of mappings to release with substantial
duplication among the entries in it, which could be avoided if
acpi_ex_system_memory_space_handler() did not release the mapping
used by it previously as soon as the current access was not covered
by it.
In order to improve that, modify acpi_ex_system_memory_space_handler()
to preserve all of the memory mappings created by it until the memory
regions associated with them go away.
Accordingly, update acpi_ev_system_memory_region_setup() to unmap all
memory associated with memory opregions that go away.
Reported-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Xiang Li <xiang.z.li@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-06-30 11:40:59 +00:00
|
|
|
access:
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Generate a logical pointer corresponding to the address we want to
|
|
|
|
* access
|
|
|
|
*/
|
ACPICA: Preserve memory opregion mappings
The ACPICA's strategy with respect to the handling of memory mappings
associated with memory operation regions is to avoid mapping the
entire region at once which may be problematic at least in principle
(for example, it may lead to conflicts with overlapping mappings
having different attributes created by drivers). It may also be
wasteful, because memory opregions on some systems take up vast
chunks of address space while the fields in those regions actually
accessed by AML are sparsely distributed.
For this reason, a one-page "window" is mapped for a given opregion
on the first memory access through it and if that "window" does not
cover an address range accessed through that opregion subsequently,
it is unmapped and a new "window" is mapped to replace it. Next,
if the new "window" is not sufficient to acess memory through the
opregion in question in the future, it will be replaced with yet
another "window" and so on. That may lead to a suboptimal sequence
of memory mapping and unmapping operations, for example if two fields
in one opregion separated from each other by a sufficiently wide
chunk of unused address space are accessed in an alternating pattern.
The situation may still be suboptimal if the deferred unmapping
introduced previously is supported by the OS layer. For instance,
the alternating memory access pattern mentioned above may produce
a relatively long list of mappings to release with substantial
duplication among the entries in it, which could be avoided if
acpi_ex_system_memory_space_handler() did not release the mapping
used by it previously as soon as the current access was not covered
by it.
In order to improve that, modify acpi_ex_system_memory_space_handler()
to preserve all of the memory mappings created by it until the memory
regions associated with them go away.
Accordingly, update acpi_ev_system_memory_region_setup() to unmap all
memory associated with memory opregions that go away.
Reported-by: Dan Williams <dan.j.williams@intel.com>
Tested-by: Xiang Li <xiang.z.li@intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2020-06-30 11:40:59 +00:00
|
|
|
logical_addr_ptr = mm->logical_address +
|
|
|
|
((u64) address - (u64) mm->physical_address);
|
2005-08-05 04:44:28 +00:00
|
|
|
|
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
2010-05-26 03:47:13 +00:00
|
|
|
"System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
|
2015-04-13 03:48:52 +00:00
|
|
|
bit_width, function, ACPI_FORMAT_UINT64(address)));
|
2005-08-05 04:44:28 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform the memory read or write
|
|
|
|
*
|
|
|
|
* Note: For machines that do not support non-aligned transfers, the target
|
2012-10-31 02:26:55 +00:00
|
|
|
* address was checked for alignment above. We do not attempt to break the
|
2005-08-05 04:44:28 +00:00
|
|
|
* transfer up into smaller (byte-size) chunks because the AML specifically
|
|
|
|
* asked for a transfer width that the hardware may require.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (function) {
|
|
|
|
case ACPI_READ:
|
|
|
|
|
|
|
|
*value = 0;
|
|
|
|
switch (bit_width) {
|
|
|
|
case 8:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
|
|
|
*value = (u64)ACPI_GET8(logical_addr_ptr);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 16:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
|
|
|
*value = (u64)ACPI_GET16(logical_addr_ptr);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 32:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
|
|
|
*value = (u64)ACPI_GET32(logical_addr_ptr);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 64:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
|
|
|
*value = (u64)ACPI_GET64(logical_addr_ptr);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
2007-02-02 16:48:23 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
default:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* bit_width was already validated */
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ACPI_WRITE:
|
|
|
|
|
|
|
|
switch (bit_width) {
|
|
|
|
case 8:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2012-12-19 05:38:24 +00:00
|
|
|
ACPI_SET8(logical_addr_ptr, *value);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 16:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2012-12-19 05:38:24 +00:00
|
|
|
ACPI_SET16(logical_addr_ptr, *value);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 32:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2012-12-19 05:38:24 +00:00
|
|
|
ACPI_SET32(logical_addr_ptr, *value);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 64:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2012-12-19 05:38:24 +00:00
|
|
|
ACPI_SET64(logical_addr_ptr, *value);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* bit_width was already validated */
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
status = AE_BAD_PARAMETER;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
return_ACPI_STATUS(status);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_ex_system_io_space_handler
|
|
|
|
*
|
2012-07-12 01:40:10 +00:00
|
|
|
* PARAMETERS: function - Read or Write operation
|
|
|
|
* address - Where in the space to read or write
|
2005-04-16 22:20:36 +00:00
|
|
|
* bit_width - Field width in bits (8, 16, or 32)
|
2012-07-12 01:40:10 +00:00
|
|
|
* value - Pointer to in or out value
|
2005-04-16 22:20:36 +00:00
|
|
|
* handler_context - Pointer to Handler's context
|
|
|
|
* region_context - Pointer to context specific to the
|
|
|
|
* accessed region
|
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
|
|
|
* DESCRIPTION: Handler for the System IO address space (Op Region)
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
acpi_status
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_ex_system_io_space_handler(u32 function,
|
|
|
|
acpi_physical_address address,
|
|
|
|
u32 bit_width,
|
2010-01-21 02:06:32 +00:00
|
|
|
u64 *value,
|
2005-08-05 04:44:28 +00:00
|
|
|
void *handler_context, void *region_context)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_status status = AE_OK;
|
|
|
|
u32 value32;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
ACPI: ACPICA 20060421
Removed a device initialization optimization introduced in
20051216 where the _STA method was not run unless an _INI
was also present for the same device. This optimization
could cause problems because it could allow _INI methods
to be run within a not-present device subtree (If a
not-present device had no _INI, _STA would not be run,
the not-present status would not be discovered, and the
children of the device would be incorrectly traversed.)
Implemented a new _STA optimization where namespace
subtrees that do not contain _INI are identified and
ignored during device initialization. Selectively running
_STA can significantly improve boot time on large machines
(with assistance from Len Brown.)
Implemented support for the device initialization case
where the returned _STA flags indicate a device not-present
but functioning. In this case, _INI is not run, but the
device children are examined for presence, as per the
ACPI specification.
Implemented an additional change to the IndexField support
in order to conform to MS behavior. The value written to
the Index Register is not simply a byte offset, it is a
byte offset in units of the access width of the parent
Index Field. (Fiodor Suietov)
Defined and deployed a new OSL interface,
acpi_os_validate_address(). This interface is called during
the creation of all AML operation regions, and allows
the host OS to exert control over what addresses it will
allow the AML code to access. Operation Regions whose
addresses are disallowed will cause a runtime exception
when they are actually accessed (will not affect or abort
table loading.)
Defined and deployed a new OSL interface,
acpi_os_validate_interface(). This interface allows the host OS
to match the various "optional" interface/behavior strings
for the _OSI predefined control method as appropriate
(with assistance from Bjorn Helgaas.)
Restructured and corrected various problems in the
exception handling code paths within DsCallControlMethod
and DsTerminateControlMethod in dsmethod (with assistance
from Takayoshi Kochi.)
Modified the Linux source converter to ignore quoted string
literals while converting identifiers from mixed to lower
case. This will correct problems with the disassembler
and other areas where such strings must not be modified.
The ACPI_FUNCTION_* macros no longer require quotes around
the function name. This allows the Linux source converter
to convert the names, now that the converter ignores
quoted strings.
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-21 21:15:00 +00:00
|
|
|
ACPI_FUNCTION_TRACE(ex_system_io_space_handler);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
2010-05-26 03:47:13 +00:00
|
|
|
"System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
|
2015-04-13 03:48:52 +00:00
|
|
|
bit_width, function, ACPI_FORMAT_UINT64(address)));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* Decode the function parameter */
|
|
|
|
|
|
|
|
switch (function) {
|
|
|
|
case ACPI_READ:
|
|
|
|
|
2016-05-05 04:57:53 +00:00
|
|
|
status = acpi_hw_read_port((acpi_io_address)address,
|
2005-08-05 04:44:28 +00:00
|
|
|
&value32, bit_width);
|
2005-04-16 22:20:36 +00:00
|
|
|
*value = value32;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ACPI_WRITE:
|
|
|
|
|
2016-05-05 04:57:53 +00:00
|
|
|
status = acpi_hw_write_port((acpi_io_address)address,
|
|
|
|
(u32)*value, bit_width);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2013-06-08 00:58:14 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
status = AE_BAD_PARAMETER;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
return_ACPI_STATUS(status);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2018-12-19 22:46:55 +00:00
|
|
|
#ifdef ACPI_PCI_CONFIGURED
|
2005-04-16 22:20:36 +00:00
|
|
|
/*******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_ex_pci_config_space_handler
|
|
|
|
*
|
2012-07-12 01:40:10 +00:00
|
|
|
* PARAMETERS: function - Read or Write operation
|
|
|
|
* address - Where in the space to read or write
|
2005-04-16 22:20:36 +00:00
|
|
|
* bit_width - Field width in bits (8, 16, or 32)
|
2012-07-12 01:40:10 +00:00
|
|
|
* value - Pointer to in or out value
|
2005-04-16 22:20:36 +00:00
|
|
|
* handler_context - Pointer to Handler's context
|
|
|
|
* region_context - Pointer to context specific to the
|
|
|
|
* accessed region
|
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
|
|
|
* DESCRIPTION: Handler for the PCI Config address space (Op Region)
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
acpi_status
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_ex_pci_config_space_handler(u32 function,
|
|
|
|
acpi_physical_address address,
|
|
|
|
u32 bit_width,
|
2010-01-21 02:06:32 +00:00
|
|
|
u64 *value,
|
2005-08-05 04:44:28 +00:00
|
|
|
void *handler_context, void *region_context)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_status status = AE_OK;
|
|
|
|
struct acpi_pci_id *pci_id;
|
|
|
|
u16 pci_register;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
ACPI: ACPICA 20060421
Removed a device initialization optimization introduced in
20051216 where the _STA method was not run unless an _INI
was also present for the same device. This optimization
could cause problems because it could allow _INI methods
to be run within a not-present device subtree (If a
not-present device had no _INI, _STA would not be run,
the not-present status would not be discovered, and the
children of the device would be incorrectly traversed.)
Implemented a new _STA optimization where namespace
subtrees that do not contain _INI are identified and
ignored during device initialization. Selectively running
_STA can significantly improve boot time on large machines
(with assistance from Len Brown.)
Implemented support for the device initialization case
where the returned _STA flags indicate a device not-present
but functioning. In this case, _INI is not run, but the
device children are examined for presence, as per the
ACPI specification.
Implemented an additional change to the IndexField support
in order to conform to MS behavior. The value written to
the Index Register is not simply a byte offset, it is a
byte offset in units of the access width of the parent
Index Field. (Fiodor Suietov)
Defined and deployed a new OSL interface,
acpi_os_validate_address(). This interface is called during
the creation of all AML operation regions, and allows
the host OS to exert control over what addresses it will
allow the AML code to access. Operation Regions whose
addresses are disallowed will cause a runtime exception
when they are actually accessed (will not affect or abort
table loading.)
Defined and deployed a new OSL interface,
acpi_os_validate_interface(). This interface allows the host OS
to match the various "optional" interface/behavior strings
for the _OSI predefined control method as appropriate
(with assistance from Bjorn Helgaas.)
Restructured and corrected various problems in the
exception handling code paths within DsCallControlMethod
and DsTerminateControlMethod in dsmethod (with assistance
from Takayoshi Kochi.)
Modified the Linux source converter to ignore quoted string
literals while converting identifiers from mixed to lower
case. This will correct problems with the disassembler
and other areas where such strings must not be modified.
The ACPI_FUNCTION_* macros no longer require quotes around
the function name. This allows the Linux source converter
to convert the names, now that the converter ignores
quoted strings.
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-21 21:15:00 +00:00
|
|
|
ACPI_FUNCTION_TRACE(ex_pci_config_space_handler);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The arguments to acpi_os(Read|Write)pci_configuration are:
|
|
|
|
*
|
|
|
|
* pci_segment is the PCI bus segment range 0-31
|
|
|
|
* pci_bus is the PCI bus number range 0-255
|
|
|
|
* pci_device is the PCI device number range 0-31
|
|
|
|
* pci_function is the PCI device function number
|
|
|
|
* pci_register is the Config space register range 0-255 bytes
|
|
|
|
*
|
2012-07-12 01:40:10 +00:00
|
|
|
* value - input value for write, output address for read
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
*/
|
2005-08-05 04:44:28 +00:00
|
|
|
pci_id = (struct acpi_pci_id *)region_context;
|
2005-04-16 22:20:36 +00:00
|
|
|
pci_register = (u16) (u32) address;
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
2015-12-29 05:54:36 +00:00
|
|
|
"Pci-Config %u (%u) Seg(%04x) Bus(%04x) "
|
|
|
|
"Dev(%04x) Func(%04x) Reg(%04x)\n",
|
2005-08-05 04:44:28 +00:00
|
|
|
function, bit_width, pci_id->segment, pci_id->bus,
|
|
|
|
pci_id->device, pci_id->function, pci_register));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
switch (function) {
|
|
|
|
case ACPI_READ:
|
|
|
|
|
2013-10-31 01:29:49 +00:00
|
|
|
*value = 0;
|
2015-12-29 05:54:36 +00:00
|
|
|
status =
|
|
|
|
acpi_os_read_pci_configuration(pci_id, pci_register, value,
|
|
|
|
bit_width);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ACPI_WRITE:
|
|
|
|
|
2015-12-29 05:54:36 +00:00
|
|
|
status =
|
|
|
|
acpi_os_write_pci_configuration(pci_id, pci_register,
|
|
|
|
*value, bit_width);
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
|
|
|
status = AE_BAD_PARAMETER;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
return_ACPI_STATUS(status);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2018-12-19 22:46:55 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_ex_cmos_space_handler
|
|
|
|
*
|
2012-07-12 01:40:10 +00:00
|
|
|
* PARAMETERS: function - Read or Write operation
|
|
|
|
* address - Where in the space to read or write
|
2005-04-16 22:20:36 +00:00
|
|
|
* bit_width - Field width in bits (8, 16, or 32)
|
2012-07-12 01:40:10 +00:00
|
|
|
* value - Pointer to in or out value
|
2005-04-16 22:20:36 +00:00
|
|
|
* handler_context - Pointer to Handler's context
|
|
|
|
* region_context - Pointer to context specific to the
|
|
|
|
* accessed region
|
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
|
|
|
* DESCRIPTION: Handler for the CMOS address space (Op Region)
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
acpi_status
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_ex_cmos_space_handler(u32 function,
|
|
|
|
acpi_physical_address address,
|
|
|
|
u32 bit_width,
|
2010-01-21 02:06:32 +00:00
|
|
|
u64 *value,
|
2005-08-05 04:44:28 +00:00
|
|
|
void *handler_context, void *region_context)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_status status = AE_OK;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
ACPI: ACPICA 20060421
Removed a device initialization optimization introduced in
20051216 where the _STA method was not run unless an _INI
was also present for the same device. This optimization
could cause problems because it could allow _INI methods
to be run within a not-present device subtree (If a
not-present device had no _INI, _STA would not be run,
the not-present status would not be discovered, and the
children of the device would be incorrectly traversed.)
Implemented a new _STA optimization where namespace
subtrees that do not contain _INI are identified and
ignored during device initialization. Selectively running
_STA can significantly improve boot time on large machines
(with assistance from Len Brown.)
Implemented support for the device initialization case
where the returned _STA flags indicate a device not-present
but functioning. In this case, _INI is not run, but the
device children are examined for presence, as per the
ACPI specification.
Implemented an additional change to the IndexField support
in order to conform to MS behavior. The value written to
the Index Register is not simply a byte offset, it is a
byte offset in units of the access width of the parent
Index Field. (Fiodor Suietov)
Defined and deployed a new OSL interface,
acpi_os_validate_address(). This interface is called during
the creation of all AML operation regions, and allows
the host OS to exert control over what addresses it will
allow the AML code to access. Operation Regions whose
addresses are disallowed will cause a runtime exception
when they are actually accessed (will not affect or abort
table loading.)
Defined and deployed a new OSL interface,
acpi_os_validate_interface(). This interface allows the host OS
to match the various "optional" interface/behavior strings
for the _OSI predefined control method as appropriate
(with assistance from Bjorn Helgaas.)
Restructured and corrected various problems in the
exception handling code paths within DsCallControlMethod
and DsTerminateControlMethod in dsmethod (with assistance
from Takayoshi Kochi.)
Modified the Linux source converter to ignore quoted string
literals while converting identifiers from mixed to lower
case. This will correct problems with the disassembler
and other areas where such strings must not be modified.
The ACPI_FUNCTION_* macros no longer require quotes around
the function name. This allows the Linux source converter
to convert the names, now that the converter ignores
quoted strings.
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-21 21:15:00 +00:00
|
|
|
ACPI_FUNCTION_TRACE(ex_cmos_space_handler);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
return_ACPI_STATUS(status);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2018-12-19 22:46:55 +00:00
|
|
|
#ifdef ACPI_PCI_CONFIGURED
|
2005-04-16 22:20:36 +00:00
|
|
|
/*******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_ex_pci_bar_space_handler
|
|
|
|
*
|
2012-07-12 01:40:10 +00:00
|
|
|
* PARAMETERS: function - Read or Write operation
|
|
|
|
* address - Where in the space to read or write
|
2005-04-16 22:20:36 +00:00
|
|
|
* bit_width - Field width in bits (8, 16, or 32)
|
2012-07-12 01:40:10 +00:00
|
|
|
* value - Pointer to in or out value
|
2005-04-16 22:20:36 +00:00
|
|
|
* handler_context - Pointer to Handler's context
|
|
|
|
* region_context - Pointer to context specific to the
|
|
|
|
* accessed region
|
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
|
|
|
* DESCRIPTION: Handler for the PCI bar_target address space (Op Region)
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
acpi_status
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_ex_pci_bar_space_handler(u32 function,
|
|
|
|
acpi_physical_address address,
|
|
|
|
u32 bit_width,
|
2010-01-21 02:06:32 +00:00
|
|
|
u64 *value,
|
2005-08-05 04:44:28 +00:00
|
|
|
void *handler_context, void *region_context)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_status status = AE_OK;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
ACPI: ACPICA 20060421
Removed a device initialization optimization introduced in
20051216 where the _STA method was not run unless an _INI
was also present for the same device. This optimization
could cause problems because it could allow _INI methods
to be run within a not-present device subtree (If a
not-present device had no _INI, _STA would not be run,
the not-present status would not be discovered, and the
children of the device would be incorrectly traversed.)
Implemented a new _STA optimization where namespace
subtrees that do not contain _INI are identified and
ignored during device initialization. Selectively running
_STA can significantly improve boot time on large machines
(with assistance from Len Brown.)
Implemented support for the device initialization case
where the returned _STA flags indicate a device not-present
but functioning. In this case, _INI is not run, but the
device children are examined for presence, as per the
ACPI specification.
Implemented an additional change to the IndexField support
in order to conform to MS behavior. The value written to
the Index Register is not simply a byte offset, it is a
byte offset in units of the access width of the parent
Index Field. (Fiodor Suietov)
Defined and deployed a new OSL interface,
acpi_os_validate_address(). This interface is called during
the creation of all AML operation regions, and allows
the host OS to exert control over what addresses it will
allow the AML code to access. Operation Regions whose
addresses are disallowed will cause a runtime exception
when they are actually accessed (will not affect or abort
table loading.)
Defined and deployed a new OSL interface,
acpi_os_validate_interface(). This interface allows the host OS
to match the various "optional" interface/behavior strings
for the _OSI predefined control method as appropriate
(with assistance from Bjorn Helgaas.)
Restructured and corrected various problems in the
exception handling code paths within DsCallControlMethod
and DsTerminateControlMethod in dsmethod (with assistance
from Takayoshi Kochi.)
Modified the Linux source converter to ignore quoted string
literals while converting identifiers from mixed to lower
case. This will correct problems with the disassembler
and other areas where such strings must not be modified.
The ACPI_FUNCTION_* macros no longer require quotes around
the function name. This allows the Linux source converter
to convert the names, now that the converter ignores
quoted strings.
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-21 21:15:00 +00:00
|
|
|
ACPI_FUNCTION_TRACE(ex_pci_bar_space_handler);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-08-05 04:44:28 +00:00
|
|
|
return_ACPI_STATUS(status);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2018-12-19 22:46:55 +00:00
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*******************************************************************************
|
|
|
|
*
|
|
|
|
* FUNCTION: acpi_ex_data_table_space_handler
|
|
|
|
*
|
2012-07-12 01:40:10 +00:00
|
|
|
* PARAMETERS: function - Read or Write operation
|
|
|
|
* address - Where in the space to read or write
|
2005-04-16 22:20:36 +00:00
|
|
|
* bit_width - Field width in bits (8, 16, or 32)
|
2012-07-12 01:40:10 +00:00
|
|
|
* value - Pointer to in or out value
|
2005-04-16 22:20:36 +00:00
|
|
|
* handler_context - Pointer to Handler's context
|
|
|
|
* region_context - Pointer to context specific to the
|
|
|
|
* accessed region
|
|
|
|
*
|
|
|
|
* RETURN: Status
|
|
|
|
*
|
|
|
|
* DESCRIPTION: Handler for the Data Table address space (Op Region)
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
acpi_status
|
2005-08-05 04:44:28 +00:00
|
|
|
acpi_ex_data_table_space_handler(u32 function,
|
|
|
|
acpi_physical_address address,
|
|
|
|
u32 bit_width,
|
2010-01-21 02:06:32 +00:00
|
|
|
u64 *value,
|
2005-08-05 04:44:28 +00:00
|
|
|
void *handler_context, void *region_context)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2021-12-22 16:21:25 +00:00
|
|
|
struct acpi_data_table_space_context *mapping;
|
|
|
|
char *pointer;
|
|
|
|
|
ACPI: ACPICA 20060421
Removed a device initialization optimization introduced in
20051216 where the _STA method was not run unless an _INI
was also present for the same device. This optimization
could cause problems because it could allow _INI methods
to be run within a not-present device subtree (If a
not-present device had no _INI, _STA would not be run,
the not-present status would not be discovered, and the
children of the device would be incorrectly traversed.)
Implemented a new _STA optimization where namespace
subtrees that do not contain _INI are identified and
ignored during device initialization. Selectively running
_STA can significantly improve boot time on large machines
(with assistance from Len Brown.)
Implemented support for the device initialization case
where the returned _STA flags indicate a device not-present
but functioning. In this case, _INI is not run, but the
device children are examined for presence, as per the
ACPI specification.
Implemented an additional change to the IndexField support
in order to conform to MS behavior. The value written to
the Index Register is not simply a byte offset, it is a
byte offset in units of the access width of the parent
Index Field. (Fiodor Suietov)
Defined and deployed a new OSL interface,
acpi_os_validate_address(). This interface is called during
the creation of all AML operation regions, and allows
the host OS to exert control over what addresses it will
allow the AML code to access. Operation Regions whose
addresses are disallowed will cause a runtime exception
when they are actually accessed (will not affect or abort
table loading.)
Defined and deployed a new OSL interface,
acpi_os_validate_interface(). This interface allows the host OS
to match the various "optional" interface/behavior strings
for the _OSI predefined control method as appropriate
(with assistance from Bjorn Helgaas.)
Restructured and corrected various problems in the
exception handling code paths within DsCallControlMethod
and DsTerminateControlMethod in dsmethod (with assistance
from Takayoshi Kochi.)
Modified the Linux source converter to ignore quoted string
literals while converting identifiers from mixed to lower
case. This will correct problems with the disassembler
and other areas where such strings must not be modified.
The ACPI_FUNCTION_* macros no longer require quotes around
the function name. This allows the Linux source converter
to convert the names, now that the converter ignores
quoted strings.
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-21 21:15:00 +00:00
|
|
|
ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2021-12-22 16:21:25 +00:00
|
|
|
mapping = (struct acpi_data_table_space_context *) region_context;
|
|
|
|
pointer = ACPI_CAST_PTR(char, mapping->pointer) +
|
|
|
|
(address - ACPI_PTR_TO_PHYSADDR(mapping->pointer));
|
|
|
|
|
2010-04-01 03:09:00 +00:00
|
|
|
/*
|
|
|
|
* Perform the memory read or write. The bit_width was already
|
|
|
|
* validated.
|
|
|
|
*/
|
2005-04-16 22:20:36 +00:00
|
|
|
switch (function) {
|
|
|
|
case ACPI_READ:
|
|
|
|
|
2021-12-22 16:21:25 +00:00
|
|
|
memcpy(ACPI_CAST_PTR(char, value), pointer,
|
|
|
|
ACPI_DIV_8(bit_width));
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ACPI_WRITE:
|
2010-04-01 03:09:00 +00:00
|
|
|
|
2021-12-22 16:21:25 +00:00
|
|
|
memcpy(pointer, ACPI_CAST_PTR(char, value),
|
|
|
|
ACPI_DIV_8(bit_width));
|
2010-04-01 03:09:00 +00:00
|
|
|
break;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
default:
|
|
|
|
|
2010-04-01 03:09:00 +00:00
|
|
|
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-05-26 20:36:00 +00:00
|
|
|
return_ACPI_STATUS(AE_OK);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|