mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-14 06:35:12 +00:00
fbaa38214c
The CDAT exposed in sysfs differs between little endian and big endian
arches: On big endian, every 4 bytes are byte-swapped.
PCI Configuration Space is little endian (PCI r3.0 sec 6.1). Accessors
such as pci_read_config_dword() implicitly swap bytes on big endian.
That way, the macros in include/uapi/linux/pci_regs.h work regardless of
the arch's endianness. For an example of implicit byte-swapping, see
ppc4xx_pciex_read_config(), which calls in_le32(), which uses lwbrx
(Load Word Byte-Reverse Indexed).
DOE Read/Write Data Mailbox Registers are unlike other registers in
Configuration Space in that they contain or receive a 4 byte portion of
an opaque byte stream (a "Data Object" per PCIe r6.0 sec 7.9.24.5f).
They need to be copied to or from the request/response buffer verbatim.
So amend pci_doe_send_req() and pci_doe_recv_resp() to undo the implicit
byte-swapping.
The CXL_DOE_TABLE_ACCESS_* and PCI_DOE_DATA_OBJECT_DISC_* macros assume
implicit byte-swapping. Byte-swap requests after constructing them with
those macros and byte-swap responses before parsing them.
Change the request and response type to __le32 to avoid sparse warnings.
Per a request from Jonathan, replace sizeof(u32) with sizeof(__le32) for
consistency.
Fixes: c97006046c
("cxl/port: Read CDAT table")
Tested-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Lukas Wunner <lukas@wunner.de>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Cc: stable@vger.kernel.org # v6.0+
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/3051114102f41d19df3debbee123129118fc5e6d.1678543498.git.lukas@wunner.de
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
81 lines
2.4 KiB
C
81 lines
2.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Data Object Exchange
|
|
* PCIe r6.0, sec 6.30 DOE
|
|
*
|
|
* Copyright (C) 2021 Huawei
|
|
* Jonathan Cameron <Jonathan.Cameron@huawei.com>
|
|
*
|
|
* Copyright (C) 2022 Intel Corporation
|
|
* Ira Weiny <ira.weiny@intel.com>
|
|
*/
|
|
|
|
#ifndef LINUX_PCI_DOE_H
|
|
#define LINUX_PCI_DOE_H
|
|
|
|
struct pci_doe_protocol {
|
|
u16 vid;
|
|
u8 type;
|
|
};
|
|
|
|
struct pci_doe_mb;
|
|
|
|
/**
|
|
* struct pci_doe_task - represents a single query/response
|
|
*
|
|
* @prot: DOE Protocol
|
|
* @request_pl: The request payload
|
|
* @request_pl_sz: Size of the request payload (bytes)
|
|
* @response_pl: The response payload
|
|
* @response_pl_sz: Size of the response payload (bytes)
|
|
* @rv: Return value. Length of received response or error (bytes)
|
|
* @complete: Called when task is complete
|
|
* @private: Private data for the consumer
|
|
* @work: Used internally by the mailbox
|
|
* @doe_mb: Used internally by the mailbox
|
|
*
|
|
* Payloads are treated as opaque byte streams which are transmitted verbatim,
|
|
* without byte-swapping. If payloads contain little-endian register values,
|
|
* the caller is responsible for conversion with cpu_to_le32() / le32_to_cpu().
|
|
*
|
|
* The payload sizes and rv are specified in bytes with the following
|
|
* restrictions concerning the protocol.
|
|
*
|
|
* 1) The request_pl_sz must be a multiple of double words (4 bytes)
|
|
* 2) The response_pl_sz must be >= a single double word (4 bytes)
|
|
* 3) rv is returned as bytes but it will be a multiple of double words
|
|
*
|
|
* NOTE there is no need for the caller to initialize work or doe_mb.
|
|
*/
|
|
struct pci_doe_task {
|
|
struct pci_doe_protocol prot;
|
|
__le32 *request_pl;
|
|
size_t request_pl_sz;
|
|
__le32 *response_pl;
|
|
size_t response_pl_sz;
|
|
int rv;
|
|
void (*complete)(struct pci_doe_task *task);
|
|
void *private;
|
|
|
|
/* No need for the user to initialize these fields */
|
|
struct work_struct work;
|
|
struct pci_doe_mb *doe_mb;
|
|
};
|
|
|
|
/**
|
|
* pci_doe_for_each_off - Iterate each DOE capability
|
|
* @pdev: struct pci_dev to iterate
|
|
* @off: u16 of config space offset of each mailbox capability found
|
|
*/
|
|
#define pci_doe_for_each_off(pdev, off) \
|
|
for (off = pci_find_next_ext_capability(pdev, off, \
|
|
PCI_EXT_CAP_ID_DOE); \
|
|
off > 0; \
|
|
off = pci_find_next_ext_capability(pdev, off, \
|
|
PCI_EXT_CAP_ID_DOE))
|
|
|
|
struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset);
|
|
bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type);
|
|
int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task);
|
|
|
|
#endif
|