From 7701c8bef4f14bd9f7940c6ed0e6a73584115a96 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 14 Apr 2023 11:53:55 -0700 Subject: [PATCH 01/38] cxl/hdm: Fail upon detecting 0-sized decoders Decoders committed with 0-size lead to later crashes on shutdown as __cxl_dpa_release() assumes a 'struct resource' has been established in the in 'cxlds->dpa_res'. Just fail the driver load in this instance since there are deeper problems with the enumeration or the setup when this happens. Fixes: 9c57cde0dcbd ("cxl/hdm: Enumerate allocated DPA") Cc: Reviewed-by: Dave Jiang Reviewed-by: Alison Schofield Link: https://lore.kernel.org/r/168149843516.792294.11872242648319572632.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/hdm.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 02cc2c38b44b..35b338b716fe 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -269,8 +269,11 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, lockdep_assert_held_write(&cxl_dpa_rwsem); - if (!len) - goto success; + if (!len) { + dev_warn(dev, "decoder%d.%d: empty reservation attempted\n", + port->id, cxled->cxld.id); + return -EINVAL; + } if (cxled->dpa_res) { dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n", @@ -323,7 +326,6 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, cxled->mode = CXL_DECODER_MIXED; } -success: port->hdm_end++; get_device(&cxled->cxld.dev); return 0; @@ -833,6 +835,13 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, port->id, cxld->id); return -ENXIO; } + + if (size == 0) { + dev_warn(&port->dev, + "decoder%d.%d: Committed with zero size\n", + port->id, cxld->id); + return -ENXIO; + } port->commit_end = cxld->id; } else { /* unless / until type-2 drivers arrive, assume type-3 */ From 1423885c84a5b3a53b79bcf241b18124d0d7cba6 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 14 Apr 2023 11:54:00 -0700 Subject: [PATCH 02/38] cxl/hdm: Use 4-byte reads to retrieve HDM decoder base+limit The CXL specification mandates that 4-byte registers must be accessed with 4-byte access cycles. CXL 3.0 8.2.3 "Component Register Layout and Definition" states that the behavior is undefined if (2) 32-bit registers are accessed as an 8-byte quantity. It turns out that at least one hardware implementation is sensitive to this in practice. The @size variable results in zero with: size = readq(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which)); ...and the correct size with: lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which)); hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which)); size = (hi << 32) + lo; Fixes: d17d0540a0db ("cxl/core/hdm: Add CXL standard decoder enumeration to the core") Cc: Reviewed-by: Dave Jiang Reviewed-by: Alison Schofield Link: https://lore.kernel.org/r/168149844056.792294.8224490474529733736.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/hdm.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 35b338b716fe..6fdf7981ddc7 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ -#include #include #include #include @@ -785,8 +784,8 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, int *target_map, void __iomem *hdm, int which, u64 *dpa_base, struct cxl_endpoint_dvsec_info *info) { + u64 size, base, skip, dpa_size, lo, hi; struct cxl_endpoint_decoder *cxled; - u64 size, base, skip, dpa_size; bool committed; u32 remainder; int i, rc; @@ -801,8 +800,12 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, which, info); ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); - base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which)); - size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which)); + lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which)); + hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which)); + base = (hi << 32) + lo; + lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which)); + hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which)); + size = (hi << 32) + lo; committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED); cxld->commit = cxl_decoder_commit; cxld->reset = cxl_decoder_reset; @@ -865,8 +868,9 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, return rc; if (!info) { - target_list.value = - ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which)); + lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which)); + hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which)); + target_list.value = (hi << 32) + lo; for (i = 0; i < cxld->interleave_ways; i++) target_map[i] = target_list.target_id[i]; @@ -883,7 +887,9 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, port->id, cxld->id, size, cxld->interleave_ways); return -ENXIO; } - skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which)); + lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which)); + hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which)); + skip = (hi << 32) + lo; cxled = to_cxl_endpoint_decoder(&cxld->dev); rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip); if (rc) { From 104087a8aaf0f46d89376917eca977fad972cc93 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 14 Apr 2023 11:54:06 -0700 Subject: [PATCH 03/38] cxl/core: Drop unused io-64-nonatomic-lo-hi.h After the discovery of a case where an implementation misbehaves with register reads larger than the definition of the register the other usages of readq() were audited and found to be correct, but some cases where the io-64-nonatomic-lo-hi.h include is not needed were discovered, delete them. Reviewed-by: Dave Jiang Reviewed-by: Alison Schofield Link: https://lore.kernel.org/r/168149844596.792294.8273108394688012953.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 1 - drivers/cxl/core/port.c | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index f2addb457172..6198637cb0bb 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ -#include #include #include #include diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index 4d1f9c5b5029..b060757c4000 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ -#include #include #include #include From 7bba261e0aa6e8e5f28a3a3def8338b6512534ee Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 14 Apr 2023 11:54:11 -0700 Subject: [PATCH 04/38] cxl/port: Scan single-target ports for decoders Do not assume that a single-target port falls back to a passthrough decoder configuration. Scan for decoders and only fallback after probing that the HDM decoder capability is not present. One user visible affect of this bug is the inability to enumerate present CXL regions as the decoder settings for the present decoders are skipped. Fixes: d17d0540a0db ("cxl/core/hdm: Add CXL standard decoder enumeration to the core") Reported-by: Jonathan Cameron Link: http://lore.kernel.org/r/20230227153128.8164-1-Jonathan.Cameron@huawei.com Cc: Reviewed-by: Jonathan Cameron Reviewed-by: Dave Jiang Reviewed-by: Alison Schofield Link: https://lore.kernel.org/r/168149845130.792294.3210421233937427962.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/hdm.c | 5 +++-- drivers/cxl/port.c | 20 ++++++++++++++------ 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 6fdf7981ddc7..abe3877cfa63 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -92,8 +92,9 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb, cxl_probe_component_regs(&port->dev, crb, &map.component_map); if (!map.component_map.hdm_decoder.valid) { - dev_err(&port->dev, "HDM decoder registers invalid\n"); - return -ENXIO; + dev_dbg(&port->dev, "HDM decoder registers not implemented\n"); + /* unique error code to indicate no HDM decoder capability */ + return -ENODEV; } return cxl_map_component_regs(&port->dev, regs, &map, diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c index 22a7ab2bae7c..eb57324c4ad4 100644 --- a/drivers/cxl/port.c +++ b/drivers/cxl/port.c @@ -66,14 +66,22 @@ static int cxl_switch_port_probe(struct cxl_port *port) if (rc < 0) return rc; - if (rc == 1) - return devm_cxl_add_passthrough_decoder(port); - cxlhdm = devm_cxl_setup_hdm(port, NULL); - if (IS_ERR(cxlhdm)) - return PTR_ERR(cxlhdm); + if (!IS_ERR(cxlhdm)) + return devm_cxl_enumerate_decoders(cxlhdm, NULL); - return devm_cxl_enumerate_decoders(cxlhdm, NULL); + if (PTR_ERR(cxlhdm) != -ENODEV) { + dev_err(&port->dev, "Failed to map HDM decoder capability\n"); + return PTR_ERR(cxlhdm); + } + + if (rc == 1) { + dev_dbg(&port->dev, "Fallback to passthrough decoder\n"); + return devm_cxl_add_passthrough_decoder(port); + } + + dev_err(&port->dev, "HDM decoder capability not found\n"); + return -ENXIO; } static int cxl_endpoint_port_probe(struct cxl_port *port) From c841ecd8277154c9297dd9ac959494f6deb61e76 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 14 Apr 2023 11:54:16 -0700 Subject: [PATCH 05/38] cxl/hdm: Add more HDM decoder debug messages at startup A recent debug session yielded a couple debug messages that were useful for determining the reason why the driver was or was not falling back to CXL range register emulation, and for identifying decoder setting enumeration problems. Reviewed-by: Dave Jiang Reviewed-by: Alison Schofield Link: https://lore.kernel.org/r/168149845668.792294.11814353796371419167.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/hdm.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index abe3877cfa63..7889ff203a34 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -130,6 +130,14 @@ static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info) */ for (i = 0; i < cxlhdm->decoder_count; i++) { ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i)); + dev_dbg(&info->port->dev, + "decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n", + info->port->id, i, + FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl), + readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)), + readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)), + readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)), + readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i))); if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl)) return false; } @@ -868,6 +876,10 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, if (rc) return rc; + dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n", + port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end, + cxld->interleave_ways, cxld->interleave_granularity); + if (!info) { lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which)); hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which)); From 62e8b17ffc2ff0b0e29d5e05a18570c3e70b35ff Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:07 +0100 Subject: [PATCH 06/38] PCI/DOE: Provide synchronous API and use it internally The DOE API only allows asynchronous exchanges and forces callers to provide a completion callback. Yet all existing callers only perform synchronous exchanges. Upcoming commits for CMA (Component Measurement and Authentication, PCIe r6.0 sec 6.31) likewise require only synchronous DOE exchanges. Provide a synchronous pci_doe() API call which builds on the internal asynchronous machinery. Convert the internal pci_doe_discovery() to the new call. The new API allows submission of const-declared requests, necessitating the addition of a const qualifier in struct pci_doe_task. Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Ming Li Reviewed-by: Ira Weiny Reviewed-by: Davidlohr Bueso Reviewed-by: Dan Williams Reviewed-by: Jonathan Cameron Acked-by: Bjorn Helgaas Link: https://lore.kernel.org/r/0f444206da9615c56301fbaff459c0f45d27f122.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/pci/doe.c | 69 ++++++++++++++++++++++++++++++++--------- include/linux/pci-doe.h | 6 +++- 2 files changed, 59 insertions(+), 16 deletions(-) diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c index e5e9b287b976..adde8c66499c 100644 --- a/drivers/pci/doe.c +++ b/drivers/pci/doe.c @@ -321,26 +321,15 @@ static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid, __le32 request_pl_le = cpu_to_le32(request_pl); __le32 response_pl_le; u32 response_pl; - DECLARE_COMPLETION_ONSTACK(c); - struct pci_doe_task task = { - .prot.vid = PCI_VENDOR_ID_PCI_SIG, - .prot.type = PCI_DOE_PROTOCOL_DISCOVERY, - .request_pl = &request_pl_le, - .request_pl_sz = sizeof(request_pl), - .response_pl = &response_pl_le, - .response_pl_sz = sizeof(response_pl), - .complete = pci_doe_task_complete, - .private = &c, - }; int rc; - rc = pci_doe_submit_task(doe_mb, &task); + rc = pci_doe(doe_mb, PCI_VENDOR_ID_PCI_SIG, PCI_DOE_PROTOCOL_DISCOVERY, + &request_pl_le, sizeof(request_pl_le), + &response_pl_le, sizeof(response_pl_le)); if (rc < 0) return rc; - wait_for_completion(&c); - - if (task.rv != sizeof(response_pl)) + if (rc != sizeof(response_pl_le)) return -EIO; response_pl = le32_to_cpu(response_pl_le); @@ -552,3 +541,53 @@ int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task) return 0; } EXPORT_SYMBOL_GPL(pci_doe_submit_task); + +/** + * pci_doe() - Perform Data Object Exchange + * + * @doe_mb: DOE Mailbox + * @vendor: Vendor ID + * @type: Data Object Type + * @request: Request payload + * @request_sz: Size of request payload (bytes) + * @response: Response payload + * @response_sz: Size of response payload (bytes) + * + * Submit @request to @doe_mb and store the @response. + * The DOE exchange is performed synchronously and may therefore sleep. + * + * Payloads are treated as opaque byte streams which are transmitted verbatim, + * without byte-swapping. If payloads contain little-endian register values, + * the caller is responsible for conversion with cpu_to_le32() / le32_to_cpu(). + * + * RETURNS: Length of received response or negative errno. + * Received data in excess of @response_sz is discarded. + * The length may be smaller than @response_sz and the caller + * is responsible for checking that. + */ +int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type, + const void *request, size_t request_sz, + void *response, size_t response_sz) +{ + DECLARE_COMPLETION_ONSTACK(c); + struct pci_doe_task task = { + .prot.vid = vendor, + .prot.type = type, + .request_pl = request, + .request_pl_sz = request_sz, + .response_pl = response, + .response_pl_sz = response_sz, + .complete = pci_doe_task_complete, + .private = &c, + }; + int rc; + + rc = pci_doe_submit_task(doe_mb, &task); + if (rc) + return rc; + + wait_for_completion(&c); + + return task.rv; +} +EXPORT_SYMBOL_GPL(pci_doe); diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h index 43765eaf2342..5dcd54f892e5 100644 --- a/include/linux/pci-doe.h +++ b/include/linux/pci-doe.h @@ -49,7 +49,7 @@ struct pci_doe_mb; */ struct pci_doe_task { struct pci_doe_protocol prot; - __le32 *request_pl; + const __le32 *request_pl; size_t request_pl_sz; __le32 *response_pl; size_t response_pl_sz; @@ -78,4 +78,8 @@ struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset); bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type); int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task); +int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type, + const void *request, size_t request_sz, + void *response, size_t response_sz); + #endif From 58709b924ea5911b7d500bba9ed36b71e1e76598 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:08 +0100 Subject: [PATCH 07/38] cxl/pci: Use synchronous API for DOE A synchronous API for DOE has just been introduced. Convert CXL CDAT retrieval over to it. Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Ira Weiny Reviewed-by: Jonathan Cameron Cc: Dan Williams Link: https://lore.kernel.org/r/c329c0a21c11c3b524ce2336b0bbb3c80a28c415.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 66 ++++++++++++++---------------------------- 1 file changed, 22 insertions(+), 44 deletions(-) diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 523d5b9fd7fc..8575eaadd522 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -469,51 +469,26 @@ static struct pci_doe_mb *find_cdat_doe(struct device *uport) CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \ FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle))) -static void cxl_doe_task_complete(struct pci_doe_task *task) -{ - complete(task->private); -} - -struct cdat_doe_task { - __le32 request_pl; - __le32 response_pl[32]; - struct completion c; - struct pci_doe_task task; -}; - -#define DECLARE_CDAT_DOE_TASK(req, cdt) \ -struct cdat_doe_task cdt = { \ - .c = COMPLETION_INITIALIZER_ONSTACK(cdt.c), \ - .request_pl = req, \ - .task = { \ - .prot.vid = PCI_DVSEC_VENDOR_ID_CXL, \ - .prot.type = CXL_DOE_PROTOCOL_TABLE_ACCESS, \ - .request_pl = &cdt.request_pl, \ - .request_pl_sz = sizeof(cdt.request_pl), \ - .response_pl = cdt.response_pl, \ - .response_pl_sz = sizeof(cdt.response_pl), \ - .complete = cxl_doe_task_complete, \ - .private = &cdt.c, \ - } \ -} - static int cxl_cdat_get_length(struct device *dev, struct pci_doe_mb *cdat_doe, size_t *length) { - DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(0), t); + __le32 request = CDAT_DOE_REQ(0); + __le32 response[32]; int rc; - rc = pci_doe_submit_task(cdat_doe, &t.task); + rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL, + CXL_DOE_PROTOCOL_TABLE_ACCESS, + &request, sizeof(request), + &response, sizeof(response)); if (rc < 0) { - dev_err(dev, "DOE submit failed: %d", rc); + dev_err(dev, "DOE failed: %d", rc); return rc; } - wait_for_completion(&t.c); - if (t.task.rv < 2 * sizeof(__le32)) + if (rc < 2 * sizeof(__le32)) return -EIO; - *length = le32_to_cpu(t.response_pl[1]); + *length = le32_to_cpu(response[1]); dev_dbg(dev, "CDAT length %zu\n", *length); return 0; @@ -528,31 +503,34 @@ static int cxl_cdat_read_table(struct device *dev, int entry_handle = 0; do { - DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t); + __le32 request = CDAT_DOE_REQ(entry_handle); struct cdat_entry_header *entry; + __le32 response[32]; size_t entry_dw; int rc; - rc = pci_doe_submit_task(cdat_doe, &t.task); + rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL, + CXL_DOE_PROTOCOL_TABLE_ACCESS, + &request, sizeof(request), + &response, sizeof(response)); if (rc < 0) { - dev_err(dev, "DOE submit failed: %d", rc); + dev_err(dev, "DOE failed: %d", rc); return rc; } - wait_for_completion(&t.c); /* 1 DW Table Access Response Header + CDAT entry */ - entry = (struct cdat_entry_header *)(t.response_pl + 1); + entry = (struct cdat_entry_header *)(response + 1); if ((entry_handle == 0 && - t.task.rv != sizeof(__le32) + sizeof(struct cdat_header)) || + rc != sizeof(__le32) + sizeof(struct cdat_header)) || (entry_handle > 0 && - (t.task.rv < sizeof(__le32) + sizeof(*entry) || - t.task.rv != sizeof(__le32) + le16_to_cpu(entry->length)))) + (rc < sizeof(__le32) + sizeof(*entry) || + rc != sizeof(__le32) + le16_to_cpu(entry->length)))) return -EIO; /* Get the CXL table access header entry handle */ entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, - le32_to_cpu(t.response_pl[0])); - entry_dw = t.task.rv / sizeof(__le32); + le32_to_cpu(response[0])); + entry_dw = rc / sizeof(__le32); /* Skip Header */ entry_dw -= 1; entry_dw = min(length / sizeof(__le32), entry_dw); From 0821ff8ed059d38dfc9bec2106c5cc53bcaa15b1 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:09 +0100 Subject: [PATCH 08/38] PCI/DOE: Make asynchronous API private A synchronous API for DOE has just been introduced. CXL (the only in-tree DOE user so far) was converted to use it instead of the asynchronous API. Consequently, pci_doe_submit_task() as well as the pci_doe_task struct are only used internally, so make them private. Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Ming Li Reviewed-by: Ira Weiny Reviewed-by: Jonathan Cameron Acked-by: Bjorn Helgaas Link: https://lore.kernel.org/r/cc19544068483681e91dfe27545c2180cd09f931.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/pci/doe.c | 45 ++++++++++++++++++++++++++++++++++++-- include/linux/pci-doe.h | 48 ----------------------------------------- 2 files changed, 43 insertions(+), 50 deletions(-) diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c index adde8c66499c..dfdec73f6abc 100644 --- a/drivers/pci/doe.c +++ b/drivers/pci/doe.c @@ -56,6 +56,47 @@ struct pci_doe_mb { unsigned long flags; }; +struct pci_doe_protocol { + u16 vid; + u8 type; +}; + +/** + * struct pci_doe_task - represents a single query/response + * + * @prot: DOE Protocol + * @request_pl: The request payload + * @request_pl_sz: Size of the request payload (bytes) + * @response_pl: The response payload + * @response_pl_sz: Size of the response payload (bytes) + * @rv: Return value. Length of received response or error (bytes) + * @complete: Called when task is complete + * @private: Private data for the consumer + * @work: Used internally by the mailbox + * @doe_mb: Used internally by the mailbox + * + * The payload sizes and rv are specified in bytes with the following + * restrictions concerning the protocol. + * + * 1) The request_pl_sz must be a multiple of double words (4 bytes) + * 2) The response_pl_sz must be >= a single double word (4 bytes) + * 3) rv is returned as bytes but it will be a multiple of double words + */ +struct pci_doe_task { + struct pci_doe_protocol prot; + const __le32 *request_pl; + size_t request_pl_sz; + __le32 *response_pl; + size_t response_pl_sz; + int rv; + void (*complete)(struct pci_doe_task *task); + void *private; + + /* initialized by pci_doe_submit_task() */ + struct work_struct work; + struct pci_doe_mb *doe_mb; +}; + static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout) { if (wait_event_timeout(doe_mb->wq, @@ -519,7 +560,8 @@ EXPORT_SYMBOL_GPL(pci_doe_supports_prot); * * RETURNS: 0 when task has been successfully queued, -ERRNO on error */ -int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task) +static int pci_doe_submit_task(struct pci_doe_mb *doe_mb, + struct pci_doe_task *task) { if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type)) return -EINVAL; @@ -540,7 +582,6 @@ int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task) queue_work(doe_mb->work_queue, &task->work); return 0; } -EXPORT_SYMBOL_GPL(pci_doe_submit_task); /** * pci_doe() - Perform Data Object Exchange diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h index 5dcd54f892e5..7f16749c6aa3 100644 --- a/include/linux/pci-doe.h +++ b/include/linux/pci-doe.h @@ -13,55 +13,8 @@ #ifndef LINUX_PCI_DOE_H #define LINUX_PCI_DOE_H -struct pci_doe_protocol { - u16 vid; - u8 type; -}; - struct pci_doe_mb; -/** - * struct pci_doe_task - represents a single query/response - * - * @prot: DOE Protocol - * @request_pl: The request payload - * @request_pl_sz: Size of the request payload (bytes) - * @response_pl: The response payload - * @response_pl_sz: Size of the response payload (bytes) - * @rv: Return value. Length of received response or error (bytes) - * @complete: Called when task is complete - * @private: Private data for the consumer - * @work: Used internally by the mailbox - * @doe_mb: Used internally by the mailbox - * - * Payloads are treated as opaque byte streams which are transmitted verbatim, - * without byte-swapping. If payloads contain little-endian register values, - * the caller is responsible for conversion with cpu_to_le32() / le32_to_cpu(). - * - * The payload sizes and rv are specified in bytes with the following - * restrictions concerning the protocol. - * - * 1) The request_pl_sz must be a multiple of double words (4 bytes) - * 2) The response_pl_sz must be >= a single double word (4 bytes) - * 3) rv is returned as bytes but it will be a multiple of double words - * - * NOTE there is no need for the caller to initialize work or doe_mb. - */ -struct pci_doe_task { - struct pci_doe_protocol prot; - const __le32 *request_pl; - size_t request_pl_sz; - __le32 *response_pl; - size_t response_pl_sz; - int rv; - void (*complete)(struct pci_doe_task *task); - void *private; - - /* No need for the user to initialize these fields */ - struct work_struct work; - struct pci_doe_mb *doe_mb; -}; - /** * pci_doe_for_each_off - Iterate each DOE capability * @pdev: struct pci_dev to iterate @@ -76,7 +29,6 @@ struct pci_doe_task { struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset); bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type); -int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task); int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type, const void *request, size_t request_sz, From c8fc07abeba5cd14e88a2d77f4c4fed670da9492 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:10 +0100 Subject: [PATCH 09/38] PCI/DOE: Deduplicate mailbox flushing When a DOE mailbox is torn down, its workqueue is flushed once in pci_doe_flush_mb() through a call to flush_workqueue() and subsequently flushed once more in pci_doe_destroy_workqueue() through a call to destroy_workqueue(). Deduplicate by dropping flush_workqueue() from pci_doe_flush_mb(). Rename pci_doe_flush_mb() to pci_doe_cancel_tasks() to more aptly describe what it now does. Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Ming Li Reviewed-by: Jonathan Cameron Acked-by: Bjorn Helgaas Link: https://lore.kernel.org/r/1f009f60b326d1c6d776641d4b20aff27de0c234.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/pci/doe.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c index dfdec73f6abc..d9f3676bce29 100644 --- a/drivers/pci/doe.c +++ b/drivers/pci/doe.c @@ -429,7 +429,7 @@ static void pci_doe_destroy_workqueue(void *mb) destroy_workqueue(doe_mb->work_queue); } -static void pci_doe_flush_mb(void *mb) +static void pci_doe_cancel_tasks(void *mb) { struct pci_doe_mb *doe_mb = mb; @@ -439,9 +439,6 @@ static void pci_doe_flush_mb(void *mb) /* Cancel an in progress work item, if necessary */ set_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags); wake_up(&doe_mb->wq); - - /* Flush all work items */ - flush_workqueue(doe_mb->work_queue); } /** @@ -498,9 +495,9 @@ struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset) /* * The state machine and the mailbox should be in sync now; - * Set up mailbox flush prior to using the mailbox to query protocols. + * Set up cancel tasks prior to using the mailbox to query protocols. */ - rc = devm_add_action_or_reset(dev, pci_doe_flush_mb, doe_mb); + rc = devm_add_action_or_reset(dev, pci_doe_cancel_tasks, doe_mb); if (rc) return ERR_PTR(rc); From 022b66f38195f6760c193ceee18e0b676d9c9070 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:11 +0100 Subject: [PATCH 10/38] PCI/DOE: Allow mailbox creation without devres management DOE mailbox creation is currently only possible through a devres-managed API. The lifetime of mailboxes thus ends with driver unbinding. An upcoming commit will create DOE mailboxes upon device enumeration by the PCI core. Their lifetime shall not be limited by a driver. Therefore rework pcim_doe_create_mb() into the non-devres-managed pci_doe_create_mb(). Add pci_doe_destroy_mb() for mailbox destruction on device removal. Provide a devres-managed wrapper under the existing pcim_doe_create_mb() name. The error path of pcim_doe_create_mb() previously called xa_destroy() if alloc_ordered_workqueue() failed. That's unnecessary because the xarray is still empty at that point. It doesn't need to be destroyed until it's been populated by pci_doe_cache_protocols(). Arrange the error path of the new pci_doe_create_mb() accordingly. pci_doe_cancel_tasks() is no longer used as callback for devm_add_action(), so refactor it to accept a struct pci_doe_mb pointer instead of a generic void pointer. Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Ming Li Reviewed-by: Jonathan Cameron Acked-by: Bjorn Helgaas Link: https://lore.kernel.org/r/7c9a63867d70233c5e9d26cd8bf956742cd6d650.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/pci/doe.c | 103 +++++++++++++++++++++++++++++----------------- 1 file changed, 66 insertions(+), 37 deletions(-) diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c index d9f3676bce29..7539e72db5cb 100644 --- a/drivers/pci/doe.c +++ b/drivers/pci/doe.c @@ -37,7 +37,7 @@ * * This state is used to manage a single DOE mailbox capability. All fields * should be considered opaque to the consumers and the structure passed into - * the helpers below after being created by devm_pci_doe_create() + * the helpers below after being created by pci_doe_create_mb(). * * @pdev: PCI device this mailbox belongs to * @cap_offset: Capability offset @@ -415,24 +415,8 @@ static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb) return 0; } -static void pci_doe_xa_destroy(void *mb) +static void pci_doe_cancel_tasks(struct pci_doe_mb *doe_mb) { - struct pci_doe_mb *doe_mb = mb; - - xa_destroy(&doe_mb->prots); -} - -static void pci_doe_destroy_workqueue(void *mb) -{ - struct pci_doe_mb *doe_mb = mb; - - destroy_workqueue(doe_mb->work_queue); -} - -static void pci_doe_cancel_tasks(void *mb) -{ - struct pci_doe_mb *doe_mb = mb; - /* Stop all pending work items from starting */ set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags); @@ -442,7 +426,7 @@ static void pci_doe_cancel_tasks(void *mb) } /** - * pcim_doe_create_mb() - Create a DOE mailbox object + * pci_doe_create_mb() - Create a DOE mailbox object * * @pdev: PCI device to create the DOE mailbox for * @cap_offset: Offset of the DOE mailbox @@ -453,24 +437,20 @@ static void pci_doe_cancel_tasks(void *mb) * RETURNS: created mailbox object on success * ERR_PTR(-errno) on failure */ -struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset) +static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev, + u16 cap_offset) { struct pci_doe_mb *doe_mb; - struct device *dev = &pdev->dev; int rc; - doe_mb = devm_kzalloc(dev, sizeof(*doe_mb), GFP_KERNEL); + doe_mb = kzalloc(sizeof(*doe_mb), GFP_KERNEL); if (!doe_mb) return ERR_PTR(-ENOMEM); doe_mb->pdev = pdev; doe_mb->cap_offset = cap_offset; init_waitqueue_head(&doe_mb->wq); - xa_init(&doe_mb->prots); - rc = devm_add_action(dev, pci_doe_xa_destroy, doe_mb); - if (rc) - return ERR_PTR(rc); doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0, dev_driver_string(&pdev->dev), @@ -479,36 +459,85 @@ struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset) if (!doe_mb->work_queue) { pci_err(pdev, "[%x] failed to allocate work queue\n", doe_mb->cap_offset); - return ERR_PTR(-ENOMEM); + rc = -ENOMEM; + goto err_free; } - rc = devm_add_action_or_reset(dev, pci_doe_destroy_workqueue, doe_mb); - if (rc) - return ERR_PTR(rc); /* Reset the mailbox by issuing an abort */ rc = pci_doe_abort(doe_mb); if (rc) { pci_err(pdev, "[%x] failed to reset mailbox with abort command : %d\n", doe_mb->cap_offset, rc); - return ERR_PTR(rc); + goto err_destroy_wq; } /* * The state machine and the mailbox should be in sync now; - * Set up cancel tasks prior to using the mailbox to query protocols. + * Use the mailbox to query protocols. */ - rc = devm_add_action_or_reset(dev, pci_doe_cancel_tasks, doe_mb); - if (rc) - return ERR_PTR(rc); - rc = pci_doe_cache_protocols(doe_mb); if (rc) { pci_err(pdev, "[%x] failed to cache protocols : %d\n", doe_mb->cap_offset, rc); - return ERR_PTR(rc); + goto err_cancel; } return doe_mb; + +err_cancel: + pci_doe_cancel_tasks(doe_mb); + xa_destroy(&doe_mb->prots); +err_destroy_wq: + destroy_workqueue(doe_mb->work_queue); +err_free: + kfree(doe_mb); + return ERR_PTR(rc); +} + +/** + * pci_doe_destroy_mb() - Destroy a DOE mailbox object + * + * @ptr: Pointer to DOE mailbox + * + * Destroy all internal data structures created for the DOE mailbox. + */ +static void pci_doe_destroy_mb(void *ptr) +{ + struct pci_doe_mb *doe_mb = ptr; + + pci_doe_cancel_tasks(doe_mb); + xa_destroy(&doe_mb->prots); + destroy_workqueue(doe_mb->work_queue); + kfree(doe_mb); +} + +/** + * pcim_doe_create_mb() - Create a DOE mailbox object + * + * @pdev: PCI device to create the DOE mailbox for + * @cap_offset: Offset of the DOE mailbox + * + * Create a single mailbox object to manage the mailbox protocol at the + * cap_offset specified. The mailbox will automatically be destroyed on + * driver unbinding from @pdev. + * + * RETURNS: created mailbox object on success + * ERR_PTR(-errno) on failure + */ +struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset) +{ + struct pci_doe_mb *doe_mb; + int rc; + + doe_mb = pci_doe_create_mb(pdev, cap_offset); + if (IS_ERR(doe_mb)) + return doe_mb; + + rc = devm_add_action_or_reset(&pdev->dev, pci_doe_destroy_mb, doe_mb); + if (rc) + return ERR_PTR(rc); + + return doe_mb; } EXPORT_SYMBOL_GPL(pcim_doe_create_mb); From ac04840350e2c21a17d867b262a1586603b87a92 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:12 +0100 Subject: [PATCH 11/38] PCI/DOE: Create mailboxes on device enumeration Currently a DOE instance cannot be shared by multiple drivers because each driver creates its own pci_doe_mb struct for a given DOE instance. For the same reason a DOE instance cannot be shared between the PCI core and a driver. Moreover, finding out which protocols a DOE instance supports requires creating a pci_doe_mb for it. If a device has multiple DOE instances, a driver looking for a specific protocol may need to create a pci_doe_mb for each of the device's DOE instances and then destroy those which do not support the desired protocol. That's obviously an inefficient way to do things. Overcome these issues by creating mailboxes in the PCI core on device enumeration. Provide a pci_find_doe_mailbox() API call to allow drivers to get a pci_doe_mb for a given (pci_dev, vendor, protocol) triple. This API is modeled after pci_find_capability() and can later be amended with a pci_find_next_doe_mailbox() call to iterate over all mailboxes of a given pci_dev which support a specific protocol. On removal, destroy the mailboxes in pci_destroy_dev(), after the driver is unbound. This allows drivers to use DOE in their ->remove() hook. On surprise removal, cancel ongoing DOE exchanges and prevent new ones from being scheduled. Thereby ensure that a hot-removed device doesn't needlessly wait for a running exchange to time out. Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Ming Li Reviewed-by: Ira Weiny Reviewed-by: Jonathan Cameron Acked-by: Bjorn Helgaas Link: https://lore.kernel.org/r/40a6f973f72ef283d79dd55e7e6fddc7481199af.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/pci/doe.c | 73 +++++++++++++++++++++++++++++++++++++++++ drivers/pci/pci.h | 11 +++++++ drivers/pci/probe.c | 1 + drivers/pci/remove.c | 1 + include/linux/pci-doe.h | 2 ++ include/linux/pci.h | 3 ++ 6 files changed, 91 insertions(+) diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c index 7539e72db5cb..9c577f5b3878 100644 --- a/drivers/pci/doe.c +++ b/drivers/pci/doe.c @@ -20,6 +20,8 @@ #include #include +#include "pci.h" + #define PCI_DOE_PROTOCOL_DISCOVERY 0 /* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */ @@ -658,3 +660,74 @@ int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type, return task.rv; } EXPORT_SYMBOL_GPL(pci_doe); + +/** + * pci_find_doe_mailbox() - Find Data Object Exchange mailbox + * + * @pdev: PCI device + * @vendor: Vendor ID + * @type: Data Object Type + * + * Find first DOE mailbox of a PCI device which supports the given protocol. + * + * RETURNS: Pointer to the DOE mailbox or NULL if none was found. + */ +struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor, + u8 type) +{ + struct pci_doe_mb *doe_mb; + unsigned long index; + + xa_for_each(&pdev->doe_mbs, index, doe_mb) + if (pci_doe_supports_prot(doe_mb, vendor, type)) + return doe_mb; + + return NULL; +} +EXPORT_SYMBOL_GPL(pci_find_doe_mailbox); + +void pci_doe_init(struct pci_dev *pdev) +{ + struct pci_doe_mb *doe_mb; + u16 offset = 0; + int rc; + + xa_init(&pdev->doe_mbs); + + while ((offset = pci_find_next_ext_capability(pdev, offset, + PCI_EXT_CAP_ID_DOE))) { + doe_mb = pci_doe_create_mb(pdev, offset); + if (IS_ERR(doe_mb)) { + pci_err(pdev, "[%x] failed to create mailbox: %ld\n", + offset, PTR_ERR(doe_mb)); + continue; + } + + rc = xa_insert(&pdev->doe_mbs, offset, doe_mb, GFP_KERNEL); + if (rc) { + pci_err(pdev, "[%x] failed to insert mailbox: %d\n", + offset, rc); + pci_doe_destroy_mb(doe_mb); + } + } +} + +void pci_doe_destroy(struct pci_dev *pdev) +{ + struct pci_doe_mb *doe_mb; + unsigned long index; + + xa_for_each(&pdev->doe_mbs, index, doe_mb) + pci_doe_destroy_mb(doe_mb); + + xa_destroy(&pdev->doe_mbs); +} + +void pci_doe_disconnected(struct pci_dev *pdev) +{ + struct pci_doe_mb *doe_mb; + unsigned long index; + + xa_for_each(&pdev->doe_mbs, index, doe_mb) + pci_doe_cancel_tasks(doe_mb); +} diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d2c08670a20e..815a4d2a41da 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -318,6 +318,16 @@ struct pci_sriov { bool drivers_autoprobe; /* Auto probing of VFs by driver */ }; +#ifdef CONFIG_PCI_DOE +void pci_doe_init(struct pci_dev *pdev); +void pci_doe_destroy(struct pci_dev *pdev); +void pci_doe_disconnected(struct pci_dev *pdev); +#else +static inline void pci_doe_init(struct pci_dev *pdev) { } +static inline void pci_doe_destroy(struct pci_dev *pdev) { } +static inline void pci_doe_disconnected(struct pci_dev *pdev) { } +#endif + /** * pci_dev_set_io_state - Set the new error state if possible. * @@ -354,6 +364,7 @@ static inline bool pci_dev_set_io_state(struct pci_dev *dev, static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused) { pci_dev_set_io_state(dev, pci_channel_io_perm_failure); + pci_doe_disconnected(dev); return 0; } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index a3f68b6ba6ac..02d2bf80eedb 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -2479,6 +2479,7 @@ static void pci_init_capabilities(struct pci_dev *dev) pci_aer_init(dev); /* Advanced Error Reporting */ pci_dpc_init(dev); /* Downstream Port Containment */ pci_rcec_init(dev); /* Root Complex Event Collector */ + pci_doe_init(dev); /* Data Object Exchange */ pcie_report_downtraining(dev); pci_init_reset_methods(dev); diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 0145aef1b930..f25acf50879f 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -39,6 +39,7 @@ static void pci_destroy_dev(struct pci_dev *dev) list_del(&dev->bus_list); up_write(&pci_bus_sem); + pci_doe_destroy(dev); pcie_aspm_exit_link_state(dev); pci_bridge_d3_update(dev); pci_free_resources(dev); diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h index 7f16749c6aa3..d6192ee0ac07 100644 --- a/include/linux/pci-doe.h +++ b/include/linux/pci-doe.h @@ -29,6 +29,8 @@ struct pci_doe_mb; struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset); bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type); +struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor, + u8 type); int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type, const void *request, size_t request_sz, diff --git a/include/linux/pci.h b/include/linux/pci.h index b50e5c79f7e3..29d99530ec17 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -511,6 +511,9 @@ struct pci_dev { #endif #ifdef CONFIG_PCI_P2PDMA struct pci_p2pdma __rcu *p2pdma; +#endif +#ifdef CONFIG_PCI_DOE + struct xarray doe_mbs; /* Data Object Exchange mailboxes */ #endif u16 acs_cap; /* ACS Capability offset */ phys_addr_t rom; /* Physical address if not from BAR */ From af0a6c3587dc39df348d23c46996b9dad46d07db Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:13 +0100 Subject: [PATCH 12/38] cxl/pci: Use CDAT DOE mailbox created by PCI core The PCI core has just been amended to create a pci_doe_mb struct for every DOE instance on device enumeration. Drop creation of a (duplicate) CDAT DOE mailbox on cxl probing in favor of the one already created by the PCI core. Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Ira Weiny Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/becaf70e8faf9681d474200117d62d7eaac46cca.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 27 +++++------------------ drivers/cxl/cxlmem.h | 3 --- drivers/cxl/pci.c | 49 ------------------------------------------ 3 files changed, 5 insertions(+), 74 deletions(-) diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 8575eaadd522..c868f4a2f1de 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -441,27 +441,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL); #define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff #define CXL_DOE_PROTOCOL_TABLE_ACCESS 2 -static struct pci_doe_mb *find_cdat_doe(struct device *uport) -{ - struct cxl_memdev *cxlmd; - struct cxl_dev_state *cxlds; - unsigned long index; - void *entry; - - cxlmd = to_cxl_memdev(uport); - cxlds = cxlmd->cxlds; - - xa_for_each(&cxlds->doe_mbs, index, entry) { - struct pci_doe_mb *cur = entry; - - if (pci_doe_supports_prot(cur, PCI_DVSEC_VENDOR_ID_CXL, - CXL_DOE_PROTOCOL_TABLE_ACCESS)) - return cur; - } - - return NULL; -} - #define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \ (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \ CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \ @@ -559,10 +538,14 @@ void read_cdat_data(struct cxl_port *port) struct pci_doe_mb *cdat_doe; struct device *dev = &port->dev; struct device *uport = port->uport; + struct cxl_memdev *cxlmd = to_cxl_memdev(uport); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct pci_dev *pdev = to_pci_dev(cxlds->dev); size_t cdat_length; int rc; - cdat_doe = find_cdat_doe(uport); + cdat_doe = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL, + CXL_DOE_PROTOCOL_TABLE_ACCESS); if (!cdat_doe) { dev_dbg(dev, "No CDAT mailbox\n"); return; diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 090acebba4fa..001dabf0231b 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -249,7 +249,6 @@ struct cxl_event_state { * @component_reg_phys: register base of component registers * @info: Cached DVSEC information about the device. * @serial: PCIe Device Serial Number - * @doe_mbs: PCI DOE mailbox array * @event: event log driver state * @mbox_send: @dev specific transport for transmitting mailbox commands * @@ -287,8 +286,6 @@ struct cxl_dev_state { resource_size_t component_reg_phys; u64 serial; - struct xarray doe_mbs; - struct cxl_event_state event; int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd); diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 60b23624d167..ea38bd49b0cf 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include "cxlmem.h" @@ -357,52 +356,6 @@ static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type, return rc; } -static void cxl_pci_destroy_doe(void *mbs) -{ - xa_destroy(mbs); -} - -static void devm_cxl_pci_create_doe(struct cxl_dev_state *cxlds) -{ - struct device *dev = cxlds->dev; - struct pci_dev *pdev = to_pci_dev(dev); - u16 off = 0; - - xa_init(&cxlds->doe_mbs); - if (devm_add_action(&pdev->dev, cxl_pci_destroy_doe, &cxlds->doe_mbs)) { - dev_err(dev, "Failed to create XArray for DOE's\n"); - return; - } - - /* - * Mailbox creation is best effort. Higher layers must determine if - * the lack of a mailbox for their protocol is a device failure or not. - */ - pci_doe_for_each_off(pdev, off) { - struct pci_doe_mb *doe_mb; - - doe_mb = pcim_doe_create_mb(pdev, off); - if (IS_ERR(doe_mb)) { - dev_err(dev, "Failed to create MB object for MB @ %x\n", - off); - continue; - } - - if (!pci_request_config_region_exclusive(pdev, off, - PCI_DOE_CAP_SIZEOF, - dev_name(dev))) - pci_err(pdev, "Failed to exclude DOE registers\n"); - - if (xa_insert(&cxlds->doe_mbs, off, doe_mb, GFP_KERNEL)) { - dev_err(dev, "xa_insert failed to insert MB @ %x\n", - off); - continue; - } - - dev_dbg(dev, "Created DOE mailbox @%x\n", off); - } -} - /* * Assume that any RCIEP that emits the CXL memory expander class code * is an RCD @@ -750,8 +703,6 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) cxlds->component_reg_phys = map.resource; - devm_cxl_pci_create_doe(cxlds); - rc = cxl_map_component_regs(&pdev->dev, &cxlds->regs.component, &map, BIT(CXL_CM_CAP_CAP_ID_RAS)); if (rc) From 74e491e5d1bcc35a699291df720191760ff4130e Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:14 +0100 Subject: [PATCH 13/38] PCI/DOE: Make mailbox creation API private The PCI core has just been amended to create a pci_doe_mb struct for every DOE instance on device enumeration. CXL (the only in-tree DOE user so far) has been migrated to use those mailboxes instead of creating its own. That leaves pcim_doe_create_mb() and pci_doe_for_each_off() without any callers, so drop them. pci_doe_supports_prot() is now only used internally, so declare it static. pci_doe_destroy_mb() is no longer used as callback for devm_add_action(), so refactor it to accept a struct pci_doe_mb pointer instead of a generic void pointer. Because pci_doe_create_mb() is only called on device enumeration, i.e. before driver binding, the workqueue name never contains a driver name. So replace dev_driver_string() with dev_bus_name() when generating the workqueue name. Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Ming Li Reviewed-by: Ira Weiny Reviewed-by: Jonathan Cameron Acked-by: Bjorn Helgaas Link: https://lore.kernel.org/r/64f614b6584982986c55d2c6229b4ee2b276dd59.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- .clang-format | 1 - drivers/pci/doe.c | 41 ++++------------------------------------- include/linux/pci-doe.h | 14 -------------- 3 files changed, 4 insertions(+), 52 deletions(-) diff --git a/.clang-format b/.clang-format index d988e9fa9b26..1c85f6ddc71a 100644 --- a/.clang-format +++ b/.clang-format @@ -520,7 +520,6 @@ ForEachMacros: - 'of_property_for_each_string' - 'of_property_for_each_u32' - 'pci_bus_for_each_resource' - - 'pci_doe_for_each_off' - 'pcl_for_each_chunk' - 'pcl_for_each_segment' - 'pcm_for_each_format' diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c index 9c577f5b3878..db9a6b0c33c7 100644 --- a/drivers/pci/doe.c +++ b/drivers/pci/doe.c @@ -455,7 +455,7 @@ static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev, xa_init(&doe_mb->prots); doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0, - dev_driver_string(&pdev->dev), + dev_bus_name(&pdev->dev), pci_name(pdev), doe_mb->cap_offset); if (!doe_mb->work_queue) { @@ -499,50 +499,18 @@ err_free: /** * pci_doe_destroy_mb() - Destroy a DOE mailbox object * - * @ptr: Pointer to DOE mailbox + * @doe_mb: DOE mailbox * * Destroy all internal data structures created for the DOE mailbox. */ -static void pci_doe_destroy_mb(void *ptr) +static void pci_doe_destroy_mb(struct pci_doe_mb *doe_mb) { - struct pci_doe_mb *doe_mb = ptr; - pci_doe_cancel_tasks(doe_mb); xa_destroy(&doe_mb->prots); destroy_workqueue(doe_mb->work_queue); kfree(doe_mb); } -/** - * pcim_doe_create_mb() - Create a DOE mailbox object - * - * @pdev: PCI device to create the DOE mailbox for - * @cap_offset: Offset of the DOE mailbox - * - * Create a single mailbox object to manage the mailbox protocol at the - * cap_offset specified. The mailbox will automatically be destroyed on - * driver unbinding from @pdev. - * - * RETURNS: created mailbox object on success - * ERR_PTR(-errno) on failure - */ -struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset) -{ - struct pci_doe_mb *doe_mb; - int rc; - - doe_mb = pci_doe_create_mb(pdev, cap_offset); - if (IS_ERR(doe_mb)) - return doe_mb; - - rc = devm_add_action_or_reset(&pdev->dev, pci_doe_destroy_mb, doe_mb); - if (rc) - return ERR_PTR(rc); - - return doe_mb; -} -EXPORT_SYMBOL_GPL(pcim_doe_create_mb); - /** * pci_doe_supports_prot() - Return if the DOE instance supports the given * protocol @@ -552,7 +520,7 @@ EXPORT_SYMBOL_GPL(pcim_doe_create_mb); * * RETURNS: True if the DOE mailbox supports the protocol specified */ -bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type) +static bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type) { unsigned long index; void *entry; @@ -567,7 +535,6 @@ bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type) return false; } -EXPORT_SYMBOL_GPL(pci_doe_supports_prot); /** * pci_doe_submit_task() - Submit a task to be processed by the state machine diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h index d6192ee0ac07..1f14aed4354b 100644 --- a/include/linux/pci-doe.h +++ b/include/linux/pci-doe.h @@ -15,20 +15,6 @@ struct pci_doe_mb; -/** - * pci_doe_for_each_off - Iterate each DOE capability - * @pdev: struct pci_dev to iterate - * @off: u16 of config space offset of each mailbox capability found - */ -#define pci_doe_for_each_off(pdev, off) \ - for (off = pci_find_next_ext_capability(pdev, off, \ - PCI_EXT_CAP_ID_DOE); \ - off > 0; \ - off = pci_find_next_ext_capability(pdev, off, \ - PCI_EXT_CAP_ID_DOE)) - -struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset); -bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type); struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor, u8 type); From cedf8d8a5013ce515df2edbc52575614f3409593 Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:15 +0100 Subject: [PATCH 14/38] PCI/DOE: Relax restrictions on request and response size An upcoming user of DOE is CMA (Component Measurement and Authentication, PCIe r6.0 sec 6.31). It builds on SPDM (Security Protocol and Data Model): https://www.dmtf.org/dsp/DSP0274 SPDM message sizes are not always a multiple of dwords. To transport them over DOE without using bounce buffers, allow sending requests and receiving responses whose final dword is only partially populated. To be clear, PCIe r6.0 sec 6.30.1 specifies the Data Object Header 2 "Length" in dwords and pci_doe_send_req() and pci_doe_recv_resp() read/write dwords. So from a spec point of view, DOE is still specified in dwords and allowing non-dword request/response buffers is merely for the convenience of callers. Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Ming Li Reviewed-by: Jonathan Cameron Acked-by: Bjorn Helgaas Link: https://lore.kernel.org/r/151b1a6a1794afb65d941287ecbc032c5b8004b9.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/pci/doe.c | 74 +++++++++++++++++++++++++++++++---------------- 1 file changed, 49 insertions(+), 25 deletions(-) diff --git a/drivers/pci/doe.c b/drivers/pci/doe.c index db9a6b0c33c7..1b97a5ab71a9 100644 --- a/drivers/pci/doe.c +++ b/drivers/pci/doe.c @@ -76,13 +76,6 @@ struct pci_doe_protocol { * @private: Private data for the consumer * @work: Used internally by the mailbox * @doe_mb: Used internally by the mailbox - * - * The payload sizes and rv are specified in bytes with the following - * restrictions concerning the protocol. - * - * 1) The request_pl_sz must be a multiple of double words (4 bytes) - * 2) The response_pl_sz must be >= a single double word (4 bytes) - * 3) rv is returned as bytes but it will be a multiple of double words */ struct pci_doe_task { struct pci_doe_protocol prot; @@ -153,7 +146,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb, { struct pci_dev *pdev = doe_mb->pdev; int offset = doe_mb->cap_offset; - size_t length; + size_t length, remainder; u32 val; int i; @@ -171,7 +164,7 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb, return -EIO; /* Length is 2 DW of header + length of payload in DW */ - length = 2 + task->request_pl_sz / sizeof(__le32); + length = 2 + DIV_ROUND_UP(task->request_pl_sz, sizeof(__le32)); if (length > PCI_DOE_MAX_LENGTH) return -EIO; if (length == PCI_DOE_MAX_LENGTH) @@ -184,10 +177,21 @@ static int pci_doe_send_req(struct pci_doe_mb *doe_mb, pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, length)); + + /* Write payload */ for (i = 0; i < task->request_pl_sz / sizeof(__le32); i++) pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, le32_to_cpu(task->request_pl[i])); + /* Write last payload dword */ + remainder = task->request_pl_sz % sizeof(__le32); + if (remainder) { + val = 0; + memcpy(&val, &task->request_pl[i], remainder); + le32_to_cpus(&val); + pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val); + } + pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO); return 0; @@ -207,11 +211,11 @@ static bool pci_doe_data_obj_ready(struct pci_doe_mb *doe_mb) static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *task) { + size_t length, payload_length, remainder, received; struct pci_dev *pdev = doe_mb->pdev; int offset = doe_mb->cap_offset; - size_t length, payload_length; + int i = 0; u32 val; - int i; /* Read the first dword to get the protocol */ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val); @@ -238,15 +242,38 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas /* First 2 dwords have already been read */ length -= 2; - payload_length = min(length, task->response_pl_sz / sizeof(__le32)); - /* Read the rest of the response payload */ - for (i = 0; i < payload_length; i++) { + received = task->response_pl_sz; + payload_length = DIV_ROUND_UP(task->response_pl_sz, sizeof(__le32)); + remainder = task->response_pl_sz % sizeof(__le32); + + /* remainder signifies number of data bytes in last payload dword */ + if (!remainder) + remainder = sizeof(__le32); + + if (length < payload_length) { + received = length * sizeof(__le32); + payload_length = length; + remainder = sizeof(__le32); + } + + if (payload_length) { + /* Read all payload dwords except the last */ + for (; i < payload_length - 1; i++) { + pci_read_config_dword(pdev, offset + PCI_DOE_READ, + &val); + task->response_pl[i] = cpu_to_le32(val); + pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); + } + + /* Read last payload dword */ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val); - task->response_pl[i] = cpu_to_le32(val); + cpu_to_le32s(&val); + memcpy(&task->response_pl[i], &val, remainder); /* Prior to the last ack, ensure Data Object Ready */ - if (i == (payload_length - 1) && !pci_doe_data_obj_ready(doe_mb)) + if (!pci_doe_data_obj_ready(doe_mb)) return -EIO; pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); + i++; } /* Flush excess length */ @@ -260,7 +287,7 @@ static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *tas if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) return -EIO; - return min(length, task->response_pl_sz / sizeof(__le32)) * sizeof(__le32); + return received; } static void signal_task_complete(struct pci_doe_task *task, int rv) @@ -561,14 +588,6 @@ static int pci_doe_submit_task(struct pci_doe_mb *doe_mb, if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type)) return -EINVAL; - /* - * DOE requests must be a whole number of DW and the response needs to - * be big enough for at least 1 DW - */ - if (task->request_pl_sz % sizeof(__le32) || - task->response_pl_sz < sizeof(__le32)) - return -EINVAL; - if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) return -EIO; @@ -596,6 +615,11 @@ static int pci_doe_submit_task(struct pci_doe_mb *doe_mb, * without byte-swapping. If payloads contain little-endian register values, * the caller is responsible for conversion with cpu_to_le32() / le32_to_cpu(). * + * For convenience, arbitrary payload sizes are allowed even though PCIe r6.0 + * sec 6.30.1 specifies the Data Object Header 2 "Length" in dwords. The last + * (partial) dword is copied with byte granularity and padded with zeroes if + * necessary. Callers are thus relieved of using dword-sized bounce buffers. + * * RETURNS: Length of received response or negative errno. * Received data in excess of @response_sz is discarded. * The length may be smaller than @response_sz and the caller From 7a877c923995b8257069209b1d757735af4f4ce0 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Sat, 11 Mar 2023 15:40:16 +0100 Subject: [PATCH 15/38] cxl/pci: Simplify CDAT retrieval error path The cdat.table and cdat.length fields in struct cxl_port are set before CDAT retrieval and must therefore be unset on failure. Simplify by setting only on success. Suggested-by: Jonathan Cameron Link: https://lore.kernel.org/linux-cxl/20230209113422.00007017@Huawei.com/ Signed-off-by: Dave Jiang [lukas: rebase and rephrase commit message] Signed-off-by: Lukas Wunner Reviewed-by: Ira Weiny Reviewed-by: Jonathan Cameron Acked-by: Bjorn Helgaas Link: https://lore.kernel.org/r/7a5c7104fb6a3016dbaec1c5d0ed34619ea11a0c.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index c868f4a2f1de..0609bd629073 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -475,10 +475,10 @@ static int cxl_cdat_get_length(struct device *dev, static int cxl_cdat_read_table(struct device *dev, struct pci_doe_mb *cdat_doe, - struct cxl_cdat *cdat) + void *cdat_table, size_t *cdat_length) { - size_t length = cdat->length; - __le32 *data = cdat->table; + size_t length = *cdat_length; + __le32 *data = cdat_table; int entry_handle = 0; do { @@ -522,7 +522,7 @@ static int cxl_cdat_read_table(struct device *dev, } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY); /* Length in CDAT header may exceed concatenation of CDAT entries */ - cdat->length -= length; + *cdat_length -= length; return 0; } @@ -542,6 +542,7 @@ void read_cdat_data(struct cxl_port *port) struct cxl_dev_state *cxlds = cxlmd->cxlds; struct pci_dev *pdev = to_pci_dev(cxlds->dev); size_t cdat_length; + void *cdat_table; int rc; cdat_doe = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL, @@ -558,19 +559,19 @@ void read_cdat_data(struct cxl_port *port) return; } - port->cdat.table = devm_kzalloc(dev, cdat_length, GFP_KERNEL); - if (!port->cdat.table) + cdat_table = devm_kzalloc(dev, cdat_length, GFP_KERNEL); + if (!cdat_table) return; - port->cdat.length = cdat_length; - rc = cxl_cdat_read_table(dev, cdat_doe, &port->cdat); + rc = cxl_cdat_read_table(dev, cdat_doe, cdat_table, &cdat_length); if (rc) { /* Don't leave table data allocated on error */ - devm_kfree(dev, port->cdat.table); - port->cdat.table = NULL; - port->cdat.length = 0; + devm_kfree(dev, cdat_table); dev_err(dev, "CDAT data read error\n"); } + + port->cdat.table = cdat_table; + port->cdat.length = cdat_length; } EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL); From f960e57dca9fa3653d9e9c0a9e1386d2241e0aad Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 11 Mar 2023 15:40:17 +0100 Subject: [PATCH 16/38] cxl/pci: Rightsize CDAT response allocation Jonathan notes that cxl_cdat_get_length() and cxl_cdat_read_table() allocate 32 dwords for the DOE response even though it may be smaller. In the case of cxl_cdat_get_length(), only the second dword of the response is of interest (it contains the length). So reduce the allocation to 2 dwords and let DOE discard the remainder. In the case of cxl_cdat_read_table(), a correctly sized allocation for the full CDAT already exists. Let DOE write each table entry directly into that allocation. There's a snag in that the table entry is preceded by a Table Access Response Header (1 dword, CXL 3.0 table 8-14). Save the last dword of the previous table entry, let DOE overwrite it with the header of the next entry and restore it afterwards. The resulting CDAT is preceded by 4 unavoidable useless bytes. Increase the allocation size accordingly. The buffer overflow check in cxl_cdat_read_table() becomes unnecessary because the remaining bytes in the allocation are tracked in "length", which is passed to DOE and limits how many bytes it writes to the allocation. Additionally, cxl_cdat_read_table() bails out if the DOE response is truncated due to insufficient space. Tested-by: Ira Weiny Signed-off-by: Lukas Wunner Reviewed-by: Jonathan Cameron Cc: Dave Jiang Link: https://lore.kernel.org/r/7a4e1f86958a79a70f29b96a92199522f08f8322.1678543498.git.lukas@wunner.de Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 0609bd629073..25b7e8125d5d 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -453,7 +453,7 @@ static int cxl_cdat_get_length(struct device *dev, size_t *length) { __le32 request = CDAT_DOE_REQ(0); - __le32 response[32]; + __le32 response[2]; int rc; rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL, @@ -464,7 +464,7 @@ static int cxl_cdat_get_length(struct device *dev, dev_err(dev, "DOE failed: %d", rc); return rc; } - if (rc < 2 * sizeof(__le32)) + if (rc < sizeof(response)) return -EIO; *length = le32_to_cpu(response[1]); @@ -477,28 +477,28 @@ static int cxl_cdat_read_table(struct device *dev, struct pci_doe_mb *cdat_doe, void *cdat_table, size_t *cdat_length) { - size_t length = *cdat_length; + size_t length = *cdat_length + sizeof(__le32); __le32 *data = cdat_table; int entry_handle = 0; + __le32 saved_dw = 0; do { __le32 request = CDAT_DOE_REQ(entry_handle); struct cdat_entry_header *entry; - __le32 response[32]; size_t entry_dw; int rc; rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS, &request, sizeof(request), - &response, sizeof(response)); + data, length); if (rc < 0) { dev_err(dev, "DOE failed: %d", rc); return rc; } /* 1 DW Table Access Response Header + CDAT entry */ - entry = (struct cdat_entry_header *)(response + 1); + entry = (struct cdat_entry_header *)(data + 1); if ((entry_handle == 0 && rc != sizeof(__le32) + sizeof(struct cdat_header)) || (entry_handle > 0 && @@ -508,21 +508,22 @@ static int cxl_cdat_read_table(struct device *dev, /* Get the CXL table access header entry handle */ entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, - le32_to_cpu(response[0])); + le32_to_cpu(data[0])); entry_dw = rc / sizeof(__le32); /* Skip Header */ entry_dw -= 1; - entry_dw = min(length / sizeof(__le32), entry_dw); - /* Prevent length < 1 DW from causing a buffer overflow */ - if (entry_dw) { - memcpy(data, entry, entry_dw * sizeof(__le32)); - length -= entry_dw * sizeof(__le32); - data += entry_dw; - } + /* + * Table Access Response Header overwrote the last DW of + * previous entry, so restore that DW + */ + *data = saved_dw; + length -= entry_dw * sizeof(__le32); + data += entry_dw; + saved_dw = *data; } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY); /* Length in CDAT header may exceed concatenation of CDAT entries */ - *cdat_length -= length; + *cdat_length -= length - sizeof(__le32); return 0; } @@ -559,7 +560,8 @@ void read_cdat_data(struct cxl_port *port) return; } - cdat_table = devm_kzalloc(dev, cdat_length, GFP_KERNEL); + cdat_table = devm_kzalloc(dev, cdat_length + sizeof(__le32), + GFP_KERNEL); if (!cdat_table) return; @@ -570,7 +572,7 @@ void read_cdat_data(struct cxl_port *port) dev_err(dev, "CDAT data read error\n"); } - port->cdat.table = cdat_table; + port->cdat.table = cdat_table + sizeof(__le32); port->cdat.length = cdat_length; } EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL); From 267214a2319b5692bbc9b128a6514960291dcca8 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 21 Apr 2023 19:51:47 -0700 Subject: [PATCH 17/38] cxl/port: Fix port to pci device assumptions in read_cdat_data() Not all endpoint CXL ports are associated with PCI devices. The cxl_test infrastructure models 'struct cxl_port' instances hosted by platform devices. Teach read_cdat_data() to be careful about non-pci hosted cxl_memdev instances. Otherwise, cxl_test crashes with this signature: RIP: 0010:xas_start+0x6d/0x290 [..] Call Trace: xas_load+0xa/0x50 xas_find+0x25b/0x2f0 xa_find+0x118/0x1d0 pci_find_doe_mailbox+0x51/0xc0 read_cdat_data+0x45/0x190 [cxl_core] cxl_port_probe+0x10a/0x1e0 [cxl_port] cxl_bus_probe+0x17/0x50 [cxl_core] Some other cleanups are included like removing the single-use @uport variable, and removing the indirection through 'struct cxl_dev_state' to lookup the device that registered the memdev and may be a pci device. Fixes: af0a6c3587dc ("cxl/pci: Use CDAT DOE mailbox created by PCI core") Reviewed-by: Lukas Wunner Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/168213190748.708404.16215095414060364800.stgit@dwillia2-xfh.jf.intel.com Signed-off-by: Dan Williams --- drivers/cxl/core/pci.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 25b7e8125d5d..bdbd907884ce 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -536,17 +536,18 @@ static int cxl_cdat_read_table(struct device *dev, */ void read_cdat_data(struct cxl_port *port) { - struct pci_doe_mb *cdat_doe; + struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport); + struct device *host = cxlmd->dev.parent; struct device *dev = &port->dev; - struct device *uport = port->uport; - struct cxl_memdev *cxlmd = to_cxl_memdev(uport); - struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct pci_dev *pdev = to_pci_dev(cxlds->dev); + struct pci_doe_mb *cdat_doe; size_t cdat_length; void *cdat_table; int rc; - cdat_doe = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL, + if (!dev_is_pci(host)) + return; + cdat_doe = pci_find_doe_mailbox(to_pci_dev(host), + PCI_DVSEC_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS); if (!cdat_doe) { dev_dbg(dev, "No CDAT mailbox\n"); From 3db166d6cf0ea73dd2c887036aad2e95e0884d9b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 18 Apr 2023 10:39:01 -0700 Subject: [PATCH 18/38] cxl/mbox: Deprecate poison commands The CXL subsystem is adding formal mechanisms for managing device poison. Minimize the maintenance burden going forward, and maximize the investment in common tooling by deprecating direct user access to poison commands outside of CXL_MEM_RAW_COMMANDS debug scenarios. A new cxl_deprecated_commands[] list is created for querying which command ids defined in previous kernels are now deprecated. CXL Media and Poison Management commands, opcodes 0x43XX, defined in CXL 3.0 Spec, Table 8-93 are deprecated with one exception: Get Scan Media Capabilities. Keep Get Scan Media Capabilities as it simply provides information and has no impact on the device state. Effectively all of the commands defined in: commit 87815ee9d006 ("cxl/pci: Add media provisioning required commands") ...were defined prematurely and should have waited until the kernel implementation was decided. To my knowledge there are no shipping devices with poison support and no known tools that would regress with this change. Co-developed-by: Alison Schofield Signed-off-by: Alison Schofield Link: https://lore.kernel.org/r/652197e9bc8885e6448d989405b9e50ee9d6b0a6.1681838291.git.alison.schofield@intel.com Tested-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 5 ----- include/uapi/linux/cxl_mem.h | 35 ++++++++++++++++++++++++++++++----- 2 files changed, 30 insertions(+), 10 deletions(-) diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index f2addb457172..938cff2c948e 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -61,12 +61,7 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), - CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0), - CXL_CMD(INJECT_POISON, 0x8, 0, 0), - CXL_CMD(CLEAR_POISON, 0x48, 0, 0), CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), - CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), - CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0), }; /* diff --git a/include/uapi/linux/cxl_mem.h b/include/uapi/linux/cxl_mem.h index 86bbacf2a315..14bc6e742148 100644 --- a/include/uapi/linux/cxl_mem.h +++ b/include/uapi/linux/cxl_mem.h @@ -40,19 +40,22 @@ ___C(SET_ALERT_CONFIG, "Set Alert Configuration"), \ ___C(GET_SHUTDOWN_STATE, "Get Shutdown State"), \ ___C(SET_SHUTDOWN_STATE, "Set Shutdown State"), \ - ___C(GET_POISON, "Get Poison List"), \ - ___C(INJECT_POISON, "Inject Poison"), \ - ___C(CLEAR_POISON, "Clear Poison"), \ + ___DEPRECATED(GET_POISON, "Get Poison List"), \ + ___DEPRECATED(INJECT_POISON, "Inject Poison"), \ + ___DEPRECATED(CLEAR_POISON, "Clear Poison"), \ ___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \ - ___C(SCAN_MEDIA, "Scan Media"), \ - ___C(GET_SCAN_MEDIA, "Get Scan Media Results"), \ + ___DEPRECATED(SCAN_MEDIA, "Scan Media"), \ + ___DEPRECATED(GET_SCAN_MEDIA, "Get Scan Media Results"), \ ___C(MAX, "invalid / last command") #define ___C(a, b) CXL_MEM_COMMAND_ID_##a +#define ___DEPRECATED(a, b) CXL_MEM_DEPRECATED_ID_##a enum { CXL_CMDS }; #undef ___C +#undef ___DEPRECATED #define ___C(a, b) { b } +#define ___DEPRECATED(a, b) { "Deprecated " b } static const struct { const char *name; } cxl_command_names[] __attribute__((__unused__)) = { CXL_CMDS }; @@ -68,6 +71,28 @@ static const struct { */ #undef ___C +#undef ___DEPRECATED +#define ___C(a, b) (0) +#define ___DEPRECATED(a, b) (1) + +static const __u8 cxl_deprecated_commands[] + __attribute__((__unused__)) = { CXL_CMDS }; + +/* + * Here's how this actually breaks out: + * cxl_deprecated_commands[] = { + * [CXL_MEM_COMMAND_ID_INVALID] = 0, + * [CXL_MEM_COMMAND_ID_IDENTIFY] = 0, + * ... + * [CXL_MEM_DEPRECATED_ID_GET_POISON] = 1, + * [CXL_MEM_DEPRECATED_ID_INJECT_POISON] = 1, + * [CXL_MEM_DEPRECATED_ID_CLEAR_POISON] = 1, + * ... + * }; + */ + +#undef ___C +#undef ___DEPRECATED /** * struct cxl_command_info - Command information returned from a query. From dec441d32a9a1e4a891ccda3356cac61cc1ffe79 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 10:39:02 -0700 Subject: [PATCH 19/38] cxl/mbox: Restrict poison cmds to debugfs cxl_raw_allow_all The Get, Inject, and Clear poison commands are not available for direct user access because they require kernel driver controls to perform safely. Further restrict access to these commands by requiring the selection of the debugfs attribute 'cxl_raw_allow_all' to enable in raw mode. Signed-off-by: Alison Schofield Link: https://lore.kernel.org/r/0e5cb41ffae2bab800957d3b9003eedfd0a2dfd5.1681838291.git.alison.schofield@intel.com Reviewed-by: Jonathan Cameron Tested-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 938cff2c948e..fd1026970d3a 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -82,6 +82,9 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { * * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that * is kept up to date with patrol notifications and error management. + * + * CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel + * driver orchestration for safety. */ static u16 cxl_disabled_raw_commands[] = { CXL_MBOX_OP_ACTIVATE_FW, @@ -90,6 +93,9 @@ static u16 cxl_disabled_raw_commands[] = { CXL_MBOX_OP_SET_SHUTDOWN_STATE, CXL_MBOX_OP_SCAN_MEDIA, CXL_MBOX_OP_GET_SCAN_MEDIA, + CXL_MBOX_OP_GET_POISON, + CXL_MBOX_OP_INJECT_POISON, + CXL_MBOX_OP_CLEAR_POISON, }; /* From d0abf5787adc0341a04667d3b4a23b4d0999af30 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 10:39:03 -0700 Subject: [PATCH 20/38] cxl/mbox: Initialize the poison state Driver reads of the poison list are synchronized to ensure that a reader does not get an incomplete list because their request overlapped (was interrupted or preceded by) another read request of the same DPA range. (CXL Spec 3.0 Section 8.2.9.8.4.1). The driver maintains state information to achieve this goal. To initialize the state, first recognize the poison commands in the CEL (Command Effects Log). If the device supports Get Poison List, allocate a single buffer for the poison list and protect it with a lock. Signed-off-by: Alison Schofield Link: https://lore.kernel.org/r/9078d180769be28a5087288b38cdfc827cae58bf.1681838291.git.alison.schofield@intel.com Reviewed-by: Jonathan Cameron Tested-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 81 ++++++++++++++++++++++++++++++++++++++++- drivers/cxl/cxlmem.h | 34 +++++++++++++++++ drivers/cxl/pci.c | 4 ++ 3 files changed, 117 insertions(+), 2 deletions(-) diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index fd1026970d3a..17737386283a 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -120,6 +121,43 @@ static bool cxl_is_security_command(u16 opcode) return false; } +static bool cxl_is_poison_command(u16 opcode) +{ +#define CXL_MBOX_OP_POISON_CMDS 0x43 + + if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS) + return true; + + return false; +} + +static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison, + u16 opcode) +{ + switch (opcode) { + case CXL_MBOX_OP_GET_POISON: + set_bit(CXL_POISON_ENABLED_LIST, poison->enabled_cmds); + break; + case CXL_MBOX_OP_INJECT_POISON: + set_bit(CXL_POISON_ENABLED_INJECT, poison->enabled_cmds); + break; + case CXL_MBOX_OP_CLEAR_POISON: + set_bit(CXL_POISON_ENABLED_CLEAR, poison->enabled_cmds); + break; + case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS: + set_bit(CXL_POISON_ENABLED_SCAN_CAPS, poison->enabled_cmds); + break; + case CXL_MBOX_OP_SCAN_MEDIA: + set_bit(CXL_POISON_ENABLED_SCAN_MEDIA, poison->enabled_cmds); + break; + case CXL_MBOX_OP_GET_SCAN_MEDIA: + set_bit(CXL_POISON_ENABLED_SCAN_RESULTS, poison->enabled_cmds); + break; + default: + break; + } +} + static struct cxl_mem_command *cxl_mem_find_command(u16 opcode) { struct cxl_mem_command *c; @@ -635,13 +673,18 @@ static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel) u16 opcode = le16_to_cpu(cel_entry[i].opcode); struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); - if (!cmd) { + if (!cmd && !cxl_is_poison_command(opcode)) { dev_dbg(cxlds->dev, "Opcode 0x%04x unsupported by driver\n", opcode); continue; } - set_bit(cmd->info.id, cxlds->enabled_cmds); + if (cmd) + set_bit(cmd->info.id, cxlds->enabled_cmds); + + if (cxl_is_poison_command(opcode)) + cxl_set_poison_cmd_enabled(&cxlds->poison, opcode); + dev_dbg(cxlds->dev, "Opcode 0x%04x enabled\n", opcode); } } @@ -1108,6 +1151,40 @@ int cxl_set_timestamp(struct cxl_dev_state *cxlds) } EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL); +static void free_poison_buf(void *buf) +{ + kvfree(buf); +} + +/* Get Poison List output buffer is protected by cxlds->poison.lock */ +static int cxl_poison_alloc_buf(struct cxl_dev_state *cxlds) +{ + cxlds->poison.list_out = kvmalloc(cxlds->payload_size, GFP_KERNEL); + if (!cxlds->poison.list_out) + return -ENOMEM; + + return devm_add_action_or_reset(cxlds->dev, free_poison_buf, + cxlds->poison.list_out); +} + +int cxl_poison_state_init(struct cxl_dev_state *cxlds) +{ + int rc; + + if (!test_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds)) + return 0; + + rc = cxl_poison_alloc_buf(cxlds); + if (rc) { + clear_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds); + return rc; + } + + mutex_init(&cxlds->poison.lock); + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL); + struct cxl_dev_state *cxl_dev_state_create(struct device *dev) { struct cxl_dev_state *cxlds; diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 090acebba4fa..40de21fac128 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -215,6 +215,37 @@ struct cxl_event_state { struct mutex log_lock; }; +/* Device enabled poison commands */ +enum poison_cmd_enabled_bits { + CXL_POISON_ENABLED_LIST, + CXL_POISON_ENABLED_INJECT, + CXL_POISON_ENABLED_CLEAR, + CXL_POISON_ENABLED_SCAN_CAPS, + CXL_POISON_ENABLED_SCAN_MEDIA, + CXL_POISON_ENABLED_SCAN_RESULTS, + CXL_POISON_ENABLED_MAX +}; + +/** + * struct cxl_poison_state - Driver poison state info + * + * @max_errors: Maximum media error records held in device cache + * @enabled_cmds: All poison commands enabled in the CEL + * @list_out: The poison list payload returned by device + * @lock: Protect reads of the poison list + * + * Reads of the poison list are synchronized to ensure that a reader + * does not get an incomplete list because their request overlapped + * (was interrupted or preceded by) another read request of the same + * DPA range. CXL Spec 3.0 Section 8.2.9.8.4.1 + */ +struct cxl_poison_state { + u32 max_errors; + DECLARE_BITMAP(enabled_cmds, CXL_POISON_ENABLED_MAX); + struct cxl_mbox_poison_out *list_out; + struct mutex lock; /* Protect reads of poison list */ +}; + /** * struct cxl_dev_state - The driver device state * @@ -251,6 +282,7 @@ struct cxl_event_state { * @serial: PCIe Device Serial Number * @doe_mbs: PCI DOE mailbox array * @event: event log driver state + * @poison: poison driver state info * @mbox_send: @dev specific transport for transmitting mailbox commands * * See section 8.2.9.5.2 Capacity Configuration and Label Storage for @@ -290,6 +322,7 @@ struct cxl_dev_state { struct xarray doe_mbs; struct cxl_event_state event; + struct cxl_poison_state poison; int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd); }; @@ -608,6 +641,7 @@ void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds); void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status); int cxl_set_timestamp(struct cxl_dev_state *cxlds); +int cxl_poison_state_init(struct cxl_dev_state *cxlds); #ifdef CONFIG_CXL_SUSPEND void cxl_mem_active_inc(void); diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 60b23624d167..827ea0895778 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -769,6 +769,10 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; + rc = cxl_poison_state_init(cxlds); + if (rc) + return rc; + rc = cxl_dev_state_identify(cxlds); if (rc) return rc; From ed83f7ca398b3798b82c1d5d1113011c0e5a2198 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 10:39:04 -0700 Subject: [PATCH 21/38] cxl/mbox: Add GET_POISON_LIST mailbox command CXL devices maintain a list of locations that are poisoned or result in poison if the addresses are accessed by the host. Per the spec, (CXL 3.0 8.2.9.8.4.1), the device returns this Poison list as a set of Media Error Records that include the source of the error, the starting device physical address, and length. The length is the number of adjacent DPAs in the record and is in units of 64 bytes. Retrieve the poison list. Signed-off-by: Alison Schofield Reviewed-by: Jonathan Cameron Reviewed-by: Ira Weiny Link: https://lore.kernel.org/r/a1f332e817834ef8e89c0ff32e760308fb903346.1681838291.git.alison.schofield@intel.com Tested-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/mbox.c | 55 +++++++++++++++++++++++++++++++++++++++++ drivers/cxl/cxlmem.h | 46 ++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 17737386283a..05ff50ee8489 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -1038,6 +1039,7 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds) /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ struct cxl_mbox_identify id; struct cxl_mbox_cmd mbox_cmd; + u32 val; int rc; mbox_cmd = (struct cxl_mbox_cmd) { @@ -1061,6 +1063,11 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds) cxlds->lsa_size = le32_to_cpu(id.lsa_size); memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision)); + if (test_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds)) { + val = get_unaligned_le24(id.poison_list_max_mer); + cxlds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX); + } + return 0; } EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL); @@ -1151,6 +1158,54 @@ int cxl_set_timestamp(struct cxl_dev_state *cxlds) } EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL); +int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, + struct cxl_region *cxlr) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_mbox_poison_out *po; + struct cxl_mbox_poison_in pi; + struct cxl_mbox_cmd mbox_cmd; + int nr_records = 0; + int rc; + + rc = mutex_lock_interruptible(&cxlds->poison.lock); + if (rc) + return rc; + + po = cxlds->poison.list_out; + pi.offset = cpu_to_le64(offset); + pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT); + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_POISON, + .size_in = sizeof(pi), + .payload_in = &pi, + .size_out = cxlds->payload_size, + .payload_out = po, + .min_out = struct_size(po, record, 0), + }; + + do { + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); + if (rc) + break; + + /* TODO TRACE the media error records */ + + /* Protect against an uncleared _FLAG_MORE */ + nr_records = nr_records + le16_to_cpu(po->count); + if (nr_records >= cxlds->poison.max_errors) { + dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n", + nr_records); + break; + } + } while (po->flags & CXL_POISON_FLAG_MORE); + + mutex_unlock(&cxlds->poison.lock); + return rc; +} +EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL); + static void free_poison_buf(void *buf) { kvfree(buf); diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 40de21fac128..f2f0cdda5825 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -571,6 +571,50 @@ struct cxl_mbox_set_timestamp_in { } __packed; +/* Get Poison List CXL 3.0 Spec 8.2.9.8.4.1 */ +struct cxl_mbox_poison_in { + __le64 offset; + __le64 length; +} __packed; + +struct cxl_mbox_poison_out { + u8 flags; + u8 rsvd1; + __le64 overflow_ts; + __le16 count; + u8 rsvd2[20]; + struct cxl_poison_record { + __le64 address; + __le32 length; + __le32 rsvd; + } __packed record[]; +} __packed; + +/* + * Get Poison List address field encodes the starting + * address of poison, and the source of the poison. + */ +#define CXL_POISON_START_MASK GENMASK_ULL(63, 6) +#define CXL_POISON_SOURCE_MASK GENMASK(2, 0) + +/* Get Poison List record length is in units of 64 bytes */ +#define CXL_POISON_LEN_MULT 64 + +/* Kernel defined maximum for a list of poison errors */ +#define CXL_POISON_LIST_MAX 1024 + +/* Get Poison List: Payload out flags */ +#define CXL_POISON_FLAG_MORE BIT(0) +#define CXL_POISON_FLAG_OVERFLOW BIT(1) +#define CXL_POISON_FLAG_SCANNING BIT(2) + +/* Get Poison List: Poison Source */ +#define CXL_POISON_SOURCE_UNKNOWN 0 +#define CXL_POISON_SOURCE_EXTERNAL 1 +#define CXL_POISON_SOURCE_INTERNAL 2 +#define CXL_POISON_SOURCE_INJECTED 3 +#define CXL_POISON_SOURCE_VENDOR 7 + /** * struct cxl_mem_command - Driver representation of a memory device command * @info: Command information as it exists for the UAPI @@ -642,6 +686,8 @@ void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cm void cxl_mem_get_event_records(struct cxl_dev_state *cxlds, u32 status); int cxl_set_timestamp(struct cxl_dev_state *cxlds); int cxl_poison_state_init(struct cxl_dev_state *cxlds); +int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, + struct cxl_region *cxlr); #ifdef CONFIG_CXL_SUSPEND void cxl_mem_active_inc(void); From ddf49d57b841e55e1b0aee1224a9f526e50e1bcc Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 10:39:05 -0700 Subject: [PATCH 22/38] cxl/trace: Add TRACE support for CXL media-error records CXL devices may support the retrieval of a device poison list. Add a new trace event that the CXL subsystem may use to log the media-error records returned in the poison list. Log each media-error record as a cxl_poison trace event of type 'List'. Signed-off-by: Alison Schofield Reviewed-by: Jonathan Cameron Reviewed-by: Ira Weiny Link: https://lore.kernel.org/r/de6196f5269483d886ab1834744f82d27189a666.1681838291.git.alison.schofield@intel.com Tested-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/core.h | 4 ++ drivers/cxl/core/mbox.c | 5 ++- drivers/cxl/core/trace.h | 94 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 102 insertions(+), 1 deletion(-) diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index cde475e13216..e888e293943e 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -64,4 +64,8 @@ int cxl_memdev_init(void); void cxl_memdev_exit(void); void cxl_mbox_init(void); +enum cxl_poison_trace_type { + CXL_POISON_TRACE_LIST, +}; + #endif /* __CXL_CORE_H__ */ diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 05ff50ee8489..2daeeedb16e1 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -1190,7 +1190,10 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, if (rc) break; - /* TODO TRACE the media error records */ + for (int i = 0; i < le16_to_cpu(po->count); i++) + trace_cxl_poison(cxlmd, cxlr, &po->record[i], + po->flags, po->overflow_ts, + CXL_POISON_TRACE_LIST); /* Protect against an uncleared _FLAG_MORE */ nr_records = nr_records + le16_to_cpu(po->count); diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index 9b8d3d997834..17c24baaf740 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -7,10 +7,12 @@ #define _CXL_EVENTS_H #include +#include #include #include #include +#include "core.h" #define CXL_RAS_UC_CACHE_DATA_PARITY BIT(0) #define CXL_RAS_UC_CACHE_ADDR_PARITY BIT(1) @@ -600,6 +602,98 @@ TRACE_EVENT(cxl_memory_module, ) ); +#define show_poison_trace_type(type) \ + __print_symbolic(type, \ + { CXL_POISON_TRACE_LIST, "List" }) + +#define __show_poison_source(source) \ + __print_symbolic(source, \ + { CXL_POISON_SOURCE_UNKNOWN, "Unknown" }, \ + { CXL_POISON_SOURCE_EXTERNAL, "External" }, \ + { CXL_POISON_SOURCE_INTERNAL, "Internal" }, \ + { CXL_POISON_SOURCE_INJECTED, "Injected" }, \ + { CXL_POISON_SOURCE_VENDOR, "Vendor" }) + +#define show_poison_source(source) \ + (((source > CXL_POISON_SOURCE_INJECTED) && \ + (source != CXL_POISON_SOURCE_VENDOR)) ? "Reserved" \ + : __show_poison_source(source)) + +#define show_poison_flags(flags) \ + __print_flags(flags, "|", \ + { CXL_POISON_FLAG_MORE, "More" }, \ + { CXL_POISON_FLAG_OVERFLOW, "Overflow" }, \ + { CXL_POISON_FLAG_SCANNING, "Scanning" }) + +#define __cxl_poison_addr(record) \ + (le64_to_cpu(record->address)) +#define cxl_poison_record_dpa(record) \ + (__cxl_poison_addr(record) & CXL_POISON_START_MASK) +#define cxl_poison_record_source(record) \ + (__cxl_poison_addr(record) & CXL_POISON_SOURCE_MASK) +#define cxl_poison_record_dpa_length(record) \ + (le32_to_cpu(record->length) * CXL_POISON_LEN_MULT) +#define cxl_poison_overflow(flags, time) \ + (flags & CXL_POISON_FLAG_OVERFLOW ? le64_to_cpu(time) : 0) + +TRACE_EVENT(cxl_poison, + + TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *region, + const struct cxl_poison_record *record, u8 flags, + __le64 overflow_ts, enum cxl_poison_trace_type trace_type), + + TP_ARGS(cxlmd, region, record, flags, overflow_ts, trace_type), + + TP_STRUCT__entry( + __string(memdev, dev_name(&cxlmd->dev)) + __string(host, dev_name(cxlmd->dev.parent)) + __field(u64, serial) + __field(u8, trace_type) + __string(region, region) + __field(u64, overflow_ts) + __field(u64, dpa) + __field(u32, dpa_length) + __array(char, uuid, 16) + __field(u8, source) + __field(u8, flags) + ), + + TP_fast_assign( + __assign_str(memdev, dev_name(&cxlmd->dev)); + __assign_str(host, dev_name(cxlmd->dev.parent)); + __entry->serial = cxlmd->cxlds->serial; + __entry->overflow_ts = cxl_poison_overflow(flags, overflow_ts); + __entry->dpa = cxl_poison_record_dpa(record); + __entry->dpa_length = cxl_poison_record_dpa_length(record); + __entry->source = cxl_poison_record_source(record); + __entry->trace_type = trace_type; + __entry->flags = flags; + if (region) { + __assign_str(region, dev_name(®ion->dev)); + memcpy(__entry->uuid, ®ion->params.uuid, 16); + } else { + __assign_str(region, ""); + memset(__entry->uuid, 0, 16); + } + ), + + TP_printk("memdev=%s host=%s serial=%lld trace_type=%s region=%s " \ + "region_uuid=%pU dpa=0x%llx dpa_length=0x%x source=%s " \ + "flags=%s overflow_time=%llu", + __get_str(memdev), + __get_str(host), + __entry->serial, + show_poison_trace_type(__entry->trace_type), + __get_str(region), + __entry->uuid, + __entry->dpa, + __entry->dpa_length, + show_poison_source(__entry->source), + show_poison_flags(__entry->flags), + __entry->overflow_ts + ) +); + #endif /* _CXL_EVENTS_H */ #define TRACE_INCLUDE_FILE trace From 7ff6ad1075885fdc71f6fea94b95109a582dec29 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 10:39:06 -0700 Subject: [PATCH 23/38] cxl/memdev: Add trigger_poison_list sysfs attribute When a boolean 'true' is written to this attribute the memdev driver retrieves the poison list from the device. The list consists of addresses that are poisoned, or would result in poison if accessed, and the source of the poison. This attribute is only visible for devices supporting the capability. The retrieved errors are logged as kernel events when cxl_poison event tracing is enabled. Signed-off-by: Alison Schofield Reviewed-by: Jonathan Cameron Reviewed-by: Ira Weiny Link: https://lore.kernel.org/r/1081cfdc8a349dc754779642d584707e56db26ba.1681838291.git.alison.schofield@intel.com Tested-by: Jonathan Cameron Signed-off-by: Dan Williams --- Documentation/ABI/testing/sysfs-bus-cxl | 14 ++++++++ drivers/cxl/core/memdev.c | 43 +++++++++++++++++++++++++ drivers/cxl/cxlmem.h | 3 +- drivers/cxl/mem.c | 43 +++++++++++++++++++++++++ 4 files changed, 102 insertions(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-bus-cxl b/Documentation/ABI/testing/sysfs-bus-cxl index 3acf2f17a73f..48ac0d911801 100644 --- a/Documentation/ABI/testing/sysfs-bus-cxl +++ b/Documentation/ABI/testing/sysfs-bus-cxl @@ -415,3 +415,17 @@ Description: 1), and checks that the hardware accepts the commit request. Reading this value indicates whether the region is committed or not. + + +What: /sys/bus/cxl/devices/memX/trigger_poison_list +Date: April, 2023 +KernelVersion: v6.4 +Contact: linux-cxl@vger.kernel.org +Description: + (WO) When a boolean 'true' is written to this attribute the + memdev driver retrieves the poison list from the device. The + list consists of addresses that are poisoned, or would result + in poison if accessed, and the source of the poison. This + attribute is only visible for devices supporting the + capability. The retrieved errors are logged as kernel + events when cxl_poison event tracing is enabled. diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 28a05f2fe32d..8be01479d40b 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -106,6 +106,49 @@ static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(numa_node); +static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + u64 offset, length; + int rc = 0; + + /* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */ + if (resource_size(&cxlds->pmem_res)) { + offset = cxlds->pmem_res.start; + length = resource_size(&cxlds->pmem_res); + rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); + if (rc) + return rc; + } + if (resource_size(&cxlds->ram_res)) { + offset = cxlds->ram_res.start; + length = resource_size(&cxlds->ram_res); + rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); + /* + * Invalid Physical Address is not an error for + * volatile addresses. Device support is optional. + */ + if (rc == -EFAULT) + rc = 0; + } + return rc; +} + +int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) +{ + int rc; + + rc = down_read_interruptible(&cxl_dpa_rwsem); + if (rc) + return rc; + + rc = cxl_get_poison_by_memdev(cxlmd); + up_read(&cxl_dpa_rwsem); + + return rc; +} +EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL); + static struct attribute *cxl_memdev_attributes[] = { &dev_attr_serial.attr, &dev_attr_firmware_version.attr, diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index f2f0cdda5825..bfb75bf0182e 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -145,7 +145,7 @@ struct cxl_mbox_cmd { C(FWROLLBACK, -ENXIO, "rolled back to the previous active FW"), \ C(FWRESET, -ENXIO, "FW failed to activate, needs cold reset"), \ C(HANDLE, -ENXIO, "one or more Event Record Handles were invalid"), \ - C(PADDR, -ENXIO, "physical address specified is invalid"), \ + C(PADDR, -EFAULT, "physical address specified is invalid"), \ C(POISONLMT, -ENXIO, "poison injection limit has been reached"), \ C(MEDIAFAILURE, -ENXIO, "permanent issue with the media"), \ C(ABORT, -ENXIO, "background cmd was aborted by device"), \ @@ -688,6 +688,7 @@ int cxl_set_timestamp(struct cxl_dev_state *cxlds); int cxl_poison_state_init(struct cxl_dev_state *cxlds); int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, struct cxl_region *cxlr); +int cxl_trigger_poison_list(struct cxl_memdev *cxlmd); #ifdef CONFIG_CXL_SUSPEND void cxl_mem_active_inc(void); diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index 39c4b54f0715..b6a413facbd7 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -176,10 +176,53 @@ unlock: return devm_add_action_or_reset(dev, enable_suspend, NULL); } +static ssize_t trigger_poison_list_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + bool trigger; + int rc; + + if (kstrtobool(buf, &trigger) || !trigger) + return -EINVAL; + + rc = cxl_trigger_poison_list(to_cxl_memdev(dev)); + + return rc ? rc : len; +} +static DEVICE_ATTR_WO(trigger_poison_list); + +static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n) +{ + if (a == &dev_attr_trigger_poison_list.attr) { + struct device *dev = kobj_to_dev(kobj); + + if (!test_bit(CXL_POISON_ENABLED_LIST, + to_cxl_memdev(dev)->cxlds->poison.enabled_cmds)) + return 0; + } + return a->mode; +} + +static struct attribute *cxl_mem_attrs[] = { + &dev_attr_trigger_poison_list.attr, + NULL +}; + +static struct attribute_group cxl_mem_group = { + .attrs = cxl_mem_attrs, + .is_visible = cxl_mem_visible, +}; + +__ATTRIBUTE_GROUPS(cxl_mem); + static struct cxl_driver cxl_mem_driver = { .name = "cxl_mem", .probe = cxl_mem_probe, .id = CXL_DEVICE_MEMORY_EXPANDER, + .drv = { + .dev_groups = cxl_mem_groups, + }, }; module_cxl_driver(cxl_mem_driver); From f0832a58639691af575fa28ffaeb657c51f3ca06 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 10:39:07 -0700 Subject: [PATCH 24/38] cxl/region: Provide region info to the cxl_poison trace event User space may need to know which region, if any, maps the poison address(es) logged in a cxl_poison trace event. Since the mapping of DPAs (device physical addresses) to a region can change, the kernel must provide this information at the time the poison list is read. The event informs user space that at event this mapped to this , which is poisoned. The cxl_poison trace event is already wired up to log the region name and uuid if it receives param 'struct cxl_region'. In order to provide that cxl_region, add another method for gathering poison - by committed endpoint decoder mappings. This method is only available with CONFIG_CXL_REGION and is only used if a region actually maps the memdev where poison is being read. After the region driver reads the poison list for all the mapped resources, poison is read for any remaining unmapped resources. The default method remains: read the poison by memdev resource. Signed-off-by: Alison Schofield Tested-by: Jonathan Cameron Reviewed-by: Jonathan Cameron Reviewed-by: Ira Weiny Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/438b01ccaa70592539e8eda4eb2b1d617ba03160.1681838292.git.alison.schofield@intel.com Tested-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/core.h | 5 ++ drivers/cxl/core/memdev.c | 13 +++- drivers/cxl/core/region.c | 124 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 141 insertions(+), 1 deletion(-) diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index e888e293943e..deb5f87d6d0a 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -25,7 +25,12 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled); #define CXL_DAX_REGION_TYPE(x) (&cxl_dax_region_type) int cxl_region_init(void); void cxl_region_exit(void); +int cxl_get_poison_by_endpoint(struct cxl_port *port); #else +static inline int cxl_get_poison_by_endpoint(struct cxl_port *port) +{ + return 0; +} static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled) { } diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 8be01479d40b..185b6d27b698 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -136,13 +136,24 @@ static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd) int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) { + struct cxl_port *port; int rc; + port = dev_get_drvdata(&cxlmd->dev); + if (!port || !is_cxl_endpoint(port)) + return -EINVAL; + rc = down_read_interruptible(&cxl_dpa_rwsem); if (rc) return rc; - rc = cxl_get_poison_by_memdev(cxlmd); + if (port->commit_end == -1) { + /* No regions mapped to this memdev */ + rc = cxl_get_poison_by_memdev(cxlmd); + } else { + /* Regions mapped, collect poison by endpoint */ + rc = cxl_get_poison_by_endpoint(port); + } up_read(&cxl_dpa_rwsem); return rc; diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index b2fd67fcebfb..f822de44bee0 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -2238,6 +2238,130 @@ struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev) } EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL); +struct cxl_poison_context { + struct cxl_port *port; + enum cxl_decoder_mode mode; + u64 offset; +}; + +static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd, + struct cxl_poison_context *ctx) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + u64 offset, length; + int rc = 0; + + /* + * Collect poison for the remaining unmapped resources + * after poison is collected by committed endpoints. + * + * Knowing that PMEM must always follow RAM, get poison + * for unmapped resources based on the last decoder's mode: + * ram: scan remains of ram range, then any pmem range + * pmem: scan remains of pmem range + */ + + if (ctx->mode == CXL_DECODER_RAM) { + offset = ctx->offset; + length = resource_size(&cxlds->ram_res) - offset; + rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); + if (rc == -EFAULT) + rc = 0; + if (rc) + return rc; + } + if (ctx->mode == CXL_DECODER_PMEM) { + offset = ctx->offset; + length = resource_size(&cxlds->dpa_res) - offset; + if (!length) + return 0; + } else if (resource_size(&cxlds->pmem_res)) { + offset = cxlds->pmem_res.start; + length = resource_size(&cxlds->pmem_res); + } else { + return 0; + } + + return cxl_mem_get_poison(cxlmd, offset, length, NULL); +} + +static int poison_by_decoder(struct device *dev, void *arg) +{ + struct cxl_poison_context *ctx = arg; + struct cxl_endpoint_decoder *cxled; + struct cxl_memdev *cxlmd; + u64 offset, length; + int rc = 0; + + if (!is_endpoint_decoder(dev)) + return rc; + + cxled = to_cxl_endpoint_decoder(dev); + if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) + return rc; + + /* + * Regions are only created with single mode decoders: pmem or ram. + * Linux does not support mixed mode decoders. This means that + * reading poison per endpoint decoder adheres to the requirement + * that poison reads of pmem and ram must be separated. + * CXL 3.0 Spec 8.2.9.8.4.1 + */ + if (cxled->mode == CXL_DECODER_MIXED) { + dev_dbg(dev, "poison list read unsupported in mixed mode\n"); + return rc; + } + + cxlmd = cxled_to_memdev(cxled); + if (cxled->skip) { + offset = cxled->dpa_res->start - cxled->skip; + length = cxled->skip; + rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); + if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) + rc = 0; + if (rc) + return rc; + } + + offset = cxled->dpa_res->start; + length = cxled->dpa_res->end - offset + 1; + rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region); + if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) + rc = 0; + if (rc) + return rc; + + /* Iterate until commit_end is reached */ + if (cxled->cxld.id == ctx->port->commit_end) { + ctx->offset = cxled->dpa_res->end + 1; + ctx->mode = cxled->mode; + return 1; + } + + return 0; +} + +int cxl_get_poison_by_endpoint(struct cxl_port *port) +{ + struct cxl_poison_context ctx; + int rc = 0; + + rc = down_read_interruptible(&cxl_region_rwsem); + if (rc) + return rc; + + ctx = (struct cxl_poison_context) { + .port = port + }; + + rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder); + if (rc == 1) + rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport), &ctx); + + up_read(&cxl_region_rwsem); + return rc; +} + static struct lock_class_key cxl_pmem_region_key; static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) From 28a3ae4ff66c622448f5dfb7416bbe753e182eb4 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 10:39:08 -0700 Subject: [PATCH 25/38] cxl/trace: Add an HPA to cxl_poison trace events When a cxl_poison trace event is reported for a region, the poisoned Device Physical Address (DPA) can be translated to a Host Physical Address (HPA) for consumption by user space. Translate and add the resulting HPA to the cxl_poison trace event. Follow the device decode logic as defined in the CXL Spec 3.0 Section 8.2.4.19.13. If no region currently maps the poison, assign ULLONG_MAX to the cxl_poison event hpa field. Signed-off-by: Alison Schofield Reviewed-by: Jonathan Cameron Reviewed-by: Dave Jiang Reviewed-by: Ira Weiny Link: https://lore.kernel.org/r/6d3cd726f9042a59902785b0a2cb3ddfb70e0219.1681838292.git.alison.schofield@intel.com Tested-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/trace.c | 94 ++++++++++++++++++++++++++++++++++++++++ drivers/cxl/core/trace.h | 11 ++++- 2 files changed, 103 insertions(+), 2 deletions(-) diff --git a/drivers/cxl/core/trace.c b/drivers/cxl/core/trace.c index 29ae7ce81dc5..d0403dc3c8ab 100644 --- a/drivers/cxl/core/trace.c +++ b/drivers/cxl/core/trace.c @@ -1,5 +1,99 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ +#include +#include "core.h" + #define CREATE_TRACE_POINTS #include "trace.h" + +static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos) +{ + struct cxl_region_params *p = &cxlr->params; + int gran = p->interleave_granularity; + int ways = p->interleave_ways; + u64 offset; + + /* Is the hpa within this region at all */ + if (hpa < p->res->start || hpa > p->res->end) { + dev_dbg(&cxlr->dev, + "Addr trans fail: hpa 0x%llx not in region\n", hpa); + return false; + } + + /* Is the hpa in an expected chunk for its pos(-ition) */ + offset = hpa - p->res->start; + offset = do_div(offset, gran * ways); + if ((offset >= pos * gran) && (offset < (pos + 1) * gran)) + return true; + + dev_dbg(&cxlr->dev, + "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa); + + return false; +} + +static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled) +{ + u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa; + struct cxl_region_params *p = &cxlr->params; + int pos = cxled->pos; + u16 eig = 0; + u8 eiw = 0; + + ways_to_eiw(p->interleave_ways, &eiw); + granularity_to_eig(p->interleave_granularity, &eig); + + /* + * The device position in the region interleave set was removed + * from the offset at HPA->DPA translation. To reconstruct the + * HPA, place the 'pos' in the offset. + * + * The placement of 'pos' in the HPA is determined by interleave + * ways and granularity and is defined in the CXL Spec 3.0 Section + * 8.2.4.19.13 Implementation Note: Device Decode Logic + */ + + /* Remove the dpa base */ + dpa_offset = dpa - cxl_dpa_resource_start(cxled); + + mask_upper = GENMASK_ULL(51, eig + 8); + + if (eiw < 8) { + hpa_offset = (dpa_offset & mask_upper) << eiw; + hpa_offset |= pos << (eig + 8); + } else { + bits_upper = (dpa_offset & mask_upper) >> (eig + 8); + bits_upper = bits_upper * 3; + hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8); + } + + /* The lower bits remain unchanged */ + hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0); + + /* Apply the hpa_offset to the region base address */ + hpa = hpa_offset + p->res->start; + + if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos)) + return ULLONG_MAX; + + return hpa; +} + +u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *cxlmd, + u64 dpa) +{ + struct cxl_region_params *p = &cxlr->params; + struct cxl_endpoint_decoder *cxled = NULL; + + for (int i = 0; i < p->nr_targets; i++) { + cxled = p->targets[i]; + if (cxlmd == cxled_to_memdev(cxled)) + break; + } + if (!cxled || cxlmd != cxled_to_memdev(cxled)) + return ULLONG_MAX; + + return cxl_dpa_to_hpa(dpa, cxlr, cxled); +} diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index 17c24baaf740..220cc7e721b8 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -636,6 +636,8 @@ TRACE_EVENT(cxl_memory_module, #define cxl_poison_overflow(flags, time) \ (flags & CXL_POISON_FLAG_OVERFLOW ? le64_to_cpu(time) : 0) +u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa); + TRACE_EVENT(cxl_poison, TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *region, @@ -651,6 +653,7 @@ TRACE_EVENT(cxl_poison, __field(u8, trace_type) __string(region, region) __field(u64, overflow_ts) + __field(u64, hpa) __field(u64, dpa) __field(u32, dpa_length) __array(char, uuid, 16) @@ -671,21 +674,25 @@ TRACE_EVENT(cxl_poison, if (region) { __assign_str(region, dev_name(®ion->dev)); memcpy(__entry->uuid, ®ion->params.uuid, 16); + __entry->hpa = cxl_trace_hpa(region, cxlmd, + __entry->dpa); } else { __assign_str(region, ""); memset(__entry->uuid, 0, 16); + __entry->hpa = ULLONG_MAX; } ), TP_printk("memdev=%s host=%s serial=%lld trace_type=%s region=%s " \ - "region_uuid=%pU dpa=0x%llx dpa_length=0x%x source=%s " \ - "flags=%s overflow_time=%llu", + "region_uuid=%pU hpa=0x%llx dpa=0x%llx dpa_length=0x%x " \ + "source=%s flags=%s overflow_time=%llu", __get_str(memdev), __get_str(host), __entry->serial, show_poison_trace_type(__entry->trace_type), __get_str(region), __entry->uuid, + __entry->hpa, __entry->dpa, __entry->dpa_length, show_poison_source(__entry->source), From f8d22bf50ca56a334ef58bf59ee299ed62940e42 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 10:39:09 -0700 Subject: [PATCH 26/38] tools/testing/cxl: Mock support for Get Poison List Make mock memdevs support the Get Poison List mailbox command. Return a fake poison error record when the get poison list command is issued. This supports testing the kernel tracing and cxl list capabilities for media errors. Signed-off-by: Alison Schofield Reviewed-by: Jonathan Cameron Reviewed-by: Ira Weiny Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/14d661ce3e3a32b7d8e76b8ecc5eb88343b3d09c.1681838292.git.alison.schofield@intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 42 ++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index 9263b04d35f7..cf7975db05ed 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include "trace.h" @@ -40,6 +41,10 @@ static struct cxl_cel_entry mock_cel[] = { .opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO), .effect = cpu_to_le16(0), }, + { + .opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON), + .effect = cpu_to_le16(0), + }, }; /* See CXL 2.0 Table 181 Get Health Info Output Payload */ @@ -471,6 +476,8 @@ static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), }; + put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer); + if (cmd->size_out < sizeof(id)) return -EINVAL; @@ -888,6 +895,34 @@ static int mock_health_info(struct cxl_dev_state *cxlds, return 0; } +static int mock_get_poison(struct cxl_dev_state *cxlds, + struct cxl_mbox_cmd *cmd) +{ + struct cxl_mbox_poison_in *pi = cmd->payload_in; + + /* Mock one poison record at pi.offset for 64 bytes */ + struct { + struct cxl_mbox_poison_out po; + struct cxl_poison_record record; + } __packed mock_plist = { + .po = { + .count = cpu_to_le16(1), + }, + .record = { + .length = cpu_to_le32(1), + .address = cpu_to_le64(le64_to_cpu(pi->offset) + + CXL_POISON_SOURCE_INJECTED), + }, + }; + + if (cmd->size_out < sizeof(mock_plist)) + return -EINVAL; + + memcpy(cmd->payload_out, &mock_plist, sizeof(mock_plist)); + cmd->size_out = sizeof(mock_plist); + return 0; +} + static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct device *dev = cxlds->dev; @@ -942,6 +977,9 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd * case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE: rc = mock_passphrase_secure_erase(cxlds, cmd); break; + case CXL_MBOX_OP_GET_POISON: + rc = mock_get_poison(cxlds, cmd); + break; default: break; } @@ -1010,6 +1048,10 @@ static int cxl_mock_mem_probe(struct platform_device *pdev) if (rc) return rc; + rc = cxl_poison_state_init(cxlds); + if (rc) + return rc; + rc = cxl_dev_state_identify(cxlds); if (rc) return rc; From d2fbc48658022f48625064ae192baff52057987d Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 20:26:25 -0700 Subject: [PATCH 27/38] cxl/memdev: Add support for the Inject Poison mailbox command CXL devices optionally support the INJECT POISON mailbox command. Add memdev driver support for the mailbox command. Per the CXL Specification (3.0 8.2.9.8.4.2), after receiving a valid inject poison request, the device will return poison when the address is accessed through the CXL.mem driver. Injecting poison adds the address to the device's Poison List and the error source is set to Injected. In addition, the device adds a poison creation event to its internal Informational Event log, updates the Event Status register, and if configured, interrupts the host. Also, per the CXL Specification, it is not an error to inject poison into an address that already has poison present and no error is returned from the device. If the address is not contained in the device's dpa resource, or is not 64 byte aligned, return -EINVAL without issuing the mbox command. Poison injection is intended for debug only and will be exposed to userspace through debugfs. Restrict compilation to CONFIG_DEBUG_FS. Signed-off-by: Alison Schofield Reviewed-by: Jonathan Cameron Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/241c64115e6bd2effed9c7a20b08b3908dd7be8f.1681874357.git.alison.schofield@intel.com Tested-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/memdev.c | 56 +++++++++++++++++++++++++++++++++++++++ drivers/cxl/cxlmem.h | 6 +++++ 2 files changed, 62 insertions(+) diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 185b6d27b698..8a3b7d7505fe 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -160,6 +160,62 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) } EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL); +static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) + return 0; + + if (!resource_size(&cxlds->dpa_res)) { + dev_dbg(cxlds->dev, "device has no dpa resource\n"); + return -EINVAL; + } + if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) { + dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n", + dpa, &cxlds->dpa_res); + return -EINVAL; + } + if (!IS_ALIGNED(dpa, 64)) { + dev_dbg(cxlds->dev, "dpa:0x%llx is not 64-byte aligned\n", dpa); + return -EINVAL; + } + + return 0; +} + +int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_mbox_inject_poison inject; + struct cxl_mbox_cmd mbox_cmd; + int rc; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) + return 0; + + rc = down_read_interruptible(&cxl_dpa_rwsem); + if (rc) + return rc; + + rc = cxl_validate_poison_dpa(cxlmd, dpa); + if (rc) + goto out; + + inject.address = cpu_to_le64(dpa); + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_INJECT_POISON, + .size_in = sizeof(inject), + .payload_in = &inject, + }; + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); +out: + up_read(&cxl_dpa_rwsem); + + return rc; +} +EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL); + static struct attribute *cxl_memdev_attributes[] = { &dev_attr_serial.attr, &dev_attr_firmware_version.attr, diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index bfb75bf0182e..2a0625e1d3aa 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -615,6 +615,11 @@ struct cxl_mbox_poison_out { #define CXL_POISON_SOURCE_INJECTED 3 #define CXL_POISON_SOURCE_VENDOR 7 +/* Inject & Clear Poison CXL 3.0 Spec 8.2.9.8.4.2/3 */ +struct cxl_mbox_inject_poison { + __le64 address; +}; + /** * struct cxl_mem_command - Driver representation of a memory device command * @info: Command information as it exists for the UAPI @@ -689,6 +694,7 @@ int cxl_poison_state_init(struct cxl_dev_state *cxlds); int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, struct cxl_region *cxlr); int cxl_trigger_poison_list(struct cxl_memdev *cxlmd); +int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa); #ifdef CONFIG_CXL_SUSPEND void cxl_mem_active_inc(void); From 9690b07748d18ac667036a68442081c4aea33ba7 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 20:26:26 -0700 Subject: [PATCH 28/38] cxl/memdev: Add support for the Clear Poison mailbox command CXL devices optionally support the CLEAR POISON mailbox command. Add memdev driver support for clearing poison. Per the CXL Specification (3.0 8.2.9.8.4.3), after receiving a valid clear poison request, the device removes the address from the device's Poison List and writes 0 (zero) for 64 bytes starting at address. If the device cannot clear poison from the address, it returns a permanent media error and -ENXIO is returned to the user. Additionally, and per the spec also, it is not an error to clear poison of an address that is not poisoned. If the address is not contained in the device's dpa resource, or is not 64 byte aligned, the driver returns -EINVAL without sending the command to the device. Poison clearing is intended for debug only and will be exposed to userspace through debugfs. Restrict compilation to CONFIG_DEBUG_FS. Implementation note: Although the CXL specification defines the clear command to accept 64 bytes of 'write-data', this implementation always uses zeroes as write-data. Signed-off-by: Alison Schofield Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/8682c30ec24bd9c45af5feccb04b02be51e58c0a.1681874357.git.alison.schofield@intel.com Tested-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/memdev.c | 43 +++++++++++++++++++++++++++++++++++++++ drivers/cxl/cxlmem.h | 7 +++++++ 2 files changed, 50 insertions(+) diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 8a3b7d7505fe..813fd1eeba3d 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -216,6 +216,49 @@ out: } EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL); +int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_mbox_clear_poison clear; + struct cxl_mbox_cmd mbox_cmd; + int rc; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) + return 0; + + rc = down_read_interruptible(&cxl_dpa_rwsem); + if (rc) + return rc; + + rc = cxl_validate_poison_dpa(cxlmd, dpa); + if (rc) + goto out; + + /* + * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command + * is defined to accept 64 bytes of write-data, along with the + * address to clear. This driver uses zeroes as write-data. + */ + clear = (struct cxl_mbox_clear_poison) { + .address = cpu_to_le64(dpa) + }; + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_CLEAR_POISON, + .size_in = sizeof(clear), + .payload_in = &clear, + }; + + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); + if (rc) + goto out; +out: + up_read(&cxl_dpa_rwsem); + + return rc; +} +EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL); + static struct attribute *cxl_memdev_attributes[] = { &dev_attr_serial.attr, &dev_attr_firmware_version.attr, diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 2a0625e1d3aa..80276f37b78f 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -620,6 +620,12 @@ struct cxl_mbox_inject_poison { __le64 address; }; +/* Clear Poison CXL 3.0 Spec 8.2.9.8.4.3 */ +struct cxl_mbox_clear_poison { + __le64 address; + u8 write_data[CXL_POISON_LEN_MULT]; +} __packed; + /** * struct cxl_mem_command - Driver representation of a memory device command * @info: Command information as it exists for the UAPI @@ -695,6 +701,7 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, struct cxl_region *cxlr); int cxl_trigger_poison_list(struct cxl_memdev *cxlmd); int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa); +int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa); #ifdef CONFIG_CXL_SUSPEND void cxl_mem_active_inc(void); From 0a105ab28a4de44eb738ce64e9ac74946aa5133b Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 20:26:27 -0700 Subject: [PATCH 29/38] cxl/memdev: Warn of poison inject or clear to a mapped region Inject and clear poison capabilities and intended for debug usage only. In order to be useful in debug environments, the driver needs to allow inject and clear operations on DPAs mapped in regions. dev_warn_once() when either operation occurs. Signed-off-by: Alison Schofield Link: https://lore.kernel.org/r/f911ca5277c9d0f9757b72d7e6842871bfff4fa2.1681874357.git.alison.schofield@intel.com Tested-by: Jonathan Cameron Reviewed-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/memdev.c | 59 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 813fd1eeba3d..40ce74f5500a 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -160,6 +160,50 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) } EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL); +struct cxl_dpa_to_region_context { + struct cxl_region *cxlr; + u64 dpa; +}; + +static int __cxl_dpa_to_region(struct device *dev, void *arg) +{ + struct cxl_dpa_to_region_context *ctx = arg; + struct cxl_endpoint_decoder *cxled; + u64 dpa = ctx->dpa; + + if (!is_endpoint_decoder(dev)) + return 0; + + cxled = to_cxl_endpoint_decoder(dev); + if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) + return 0; + + if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) + return 0; + + dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa, + dev_name(&cxled->cxld.region->dev)); + + ctx->cxlr = cxled->cxld.region; + + return 1; +} + +static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa) +{ + struct cxl_dpa_to_region_context ctx; + struct cxl_port *port; + + ctx = (struct cxl_dpa_to_region_context) { + .dpa = dpa, + }; + port = dev_get_drvdata(&cxlmd->dev); + if (port && is_cxl_endpoint(port) && port->commit_end != -1) + device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); + + return ctx.cxlr; +} + static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) { struct cxl_dev_state *cxlds = cxlmd->cxlds; @@ -189,6 +233,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_mbox_inject_poison inject; struct cxl_mbox_cmd mbox_cmd; + struct cxl_region *cxlr; int rc; if (!IS_ENABLED(CONFIG_DEBUG_FS)) @@ -209,6 +254,14 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) .payload_in = &inject, }; rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); + if (rc) + goto out; + + cxlr = cxl_dpa_to_region(cxlmd, dpa); + if (cxlr) + dev_warn_once(cxlds->dev, + "poison inject dpa:%#llx region: %s\n", dpa, + dev_name(&cxlr->dev)); out: up_read(&cxl_dpa_rwsem); @@ -221,6 +274,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_mbox_clear_poison clear; struct cxl_mbox_cmd mbox_cmd; + struct cxl_region *cxlr; int rc; if (!IS_ENABLED(CONFIG_DEBUG_FS)) @@ -252,6 +306,11 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); if (rc) goto out; + + cxlr = cxl_dpa_to_region(cxlmd, dpa); + if (cxlr) + dev_warn_once(cxlds->dev, "poison clear dpa:%#llx region: %s\n", + dpa, dev_name(&cxlr->dev)); out: up_read(&cxl_dpa_rwsem); From 98b6926562d9ccdbca69de9a0e0bf4f90d7f1326 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 20:26:28 -0700 Subject: [PATCH 30/38] cxl/memdev: Trace inject and clear poison as cxl_poison events The cxl_poison trace event allows users to view the history of poison list reads. With the addition of inject and clear poison capabilities, users will expect similar tracing. Add trace types 'Inject' and 'Clear' to the cxl_poison trace_event and trace successful operations only. If the driver finds that the DPA being injected or cleared of poison is mapped in a region, that region info is included in the cxl_poison trace event. Region reconfigurations can make this extra info useless if the debug operations are not carefully managed. Signed-off-by: Alison Schofield Reviewed-by: Jonathan Cameron Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/e20eb7c3029137b480ece671998c183da0477e2e.1681874357.git.alison.schofield@intel.com Tested-by: Jonathan Cameron Signed-off-by: Dan Williams --- drivers/cxl/core/core.h | 2 ++ drivers/cxl/core/memdev.c | 15 +++++++++++++++ drivers/cxl/core/trace.h | 8 +++++--- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index deb5f87d6d0a..27f0968449de 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -71,6 +71,8 @@ void cxl_mbox_init(void); enum cxl_poison_trace_type { CXL_POISON_TRACE_LIST, + CXL_POISON_TRACE_INJECT, + CXL_POISON_TRACE_CLEAR, }; #endif /* __CXL_CORE_H__ */ diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 40ce74f5500a..057a43267290 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -6,6 +6,7 @@ #include #include #include +#include "trace.h" #include "core.h" static DECLARE_RWSEM(cxl_memdev_rwsem); @@ -232,6 +233,7 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) { struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_mbox_inject_poison inject; + struct cxl_poison_record record; struct cxl_mbox_cmd mbox_cmd; struct cxl_region *cxlr; int rc; @@ -262,6 +264,12 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) dev_warn_once(cxlds->dev, "poison inject dpa:%#llx region: %s\n", dpa, dev_name(&cxlr->dev)); + + record = (struct cxl_poison_record) { + .address = cpu_to_le64(dpa), + .length = cpu_to_le32(1), + }; + trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT); out: up_read(&cxl_dpa_rwsem); @@ -273,6 +281,7 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) { struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_mbox_clear_poison clear; + struct cxl_poison_record record; struct cxl_mbox_cmd mbox_cmd; struct cxl_region *cxlr; int rc; @@ -311,6 +320,12 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) if (cxlr) dev_warn_once(cxlds->dev, "poison clear dpa:%#llx region: %s\n", dpa, dev_name(&cxlr->dev)); + + record = (struct cxl_poison_record) { + .address = cpu_to_le64(dpa), + .length = cpu_to_le32(1), + }; + trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR); out: up_read(&cxl_dpa_rwsem); diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index 220cc7e721b8..a0b5819bc70b 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -602,9 +602,11 @@ TRACE_EVENT(cxl_memory_module, ) ); -#define show_poison_trace_type(type) \ - __print_symbolic(type, \ - { CXL_POISON_TRACE_LIST, "List" }) +#define show_poison_trace_type(type) \ + __print_symbolic(type, \ + { CXL_POISON_TRACE_LIST, "List" }, \ + { CXL_POISON_TRACE_INJECT, "Inject" }, \ + { CXL_POISON_TRACE_CLEAR, "Clear" }) #define __show_poison_source(source) \ __print_symbolic(source, \ From 50d527f52cbf0680c87d11a254383ca730c5c19f Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 20:26:29 -0700 Subject: [PATCH 31/38] cxl/mem: Add debugfs attributes for poison inject and clear Inject and Clear Poison commands are optionally supported by CXL memdev devices and are intended for use in debug environments only. Add debugfs attributes for user access. Documentation/ABI/testing/debugfs-cxl describes the usage. Signed-off-by: Alison Schofield Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/0c9ea8e671b8e58465d18722788b60d325c675c7.1681874357.git.alison.schofield@intel.com Tested-by: Jonathan Cameron Signed-off-by: Dan Williams --- Documentation/ABI/testing/debugfs-cxl | 35 +++++++++++++++++++++++++++ drivers/cxl/mem.c | 28 +++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 Documentation/ABI/testing/debugfs-cxl diff --git a/Documentation/ABI/testing/debugfs-cxl b/Documentation/ABI/testing/debugfs-cxl new file mode 100644 index 000000000000..fe61d372e3fa --- /dev/null +++ b/Documentation/ABI/testing/debugfs-cxl @@ -0,0 +1,35 @@ +What: /sys/kernel/debug/cxl/memX/inject_poison +Date: April, 2023 +KernelVersion: v6.4 +Contact: linux-cxl@vger.kernel.org +Description: + (WO) When a Device Physical Address (DPA) is written to this + attribute, the memdev driver sends an inject poison command to + the device for the specified address. The DPA must be 64-byte + aligned and the length of the injected poison is 64-bytes. If + successful, the device returns poison when the address is + accessed through the CXL.mem bus. Injecting poison adds the + address to the device's Poison List and the error source is set + to Injected. In addition, the device adds a poison creation + event to its internal Informational Event log, updates the + Event Status register, and if configured, interrupts the host. + It is not an error to inject poison into an address that + already has poison present and no error is returned. The + inject_poison attribute is only visible for devices supporting + the capability. + + +What: /sys/kernel/debug/memX/clear_poison +Date: April, 2023 +KernelVersion: v6.4 +Contact: linux-cxl@vger.kernel.org +Description: + (WO) When a Device Physical Address (DPA) is written to this + attribute, the memdev driver sends a clear poison command to + the device for the specified address. Clearing poison removes + the address from the device's Poison List and writes 0 (zero) + for 64 bytes starting at address. It is not an error to clear + poison from an address that does not have poison set. If the + device cannot clear poison from the address, -ENXIO is returned. + The clear_poison attribute is only visible for devices + supporting the capability. diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index b6a413facbd7..10caf180b3fa 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -94,6 +94,26 @@ static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd, return 0; } +static int cxl_debugfs_poison_inject(void *data, u64 dpa) +{ + struct cxl_memdev *cxlmd = data; + + return cxl_inject_poison(cxlmd, dpa); +} + +DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_inject_fops, NULL, + cxl_debugfs_poison_inject, "%llx\n"); + +static int cxl_debugfs_poison_clear(void *data, u64 dpa) +{ + struct cxl_memdev *cxlmd = data; + + return cxl_clear_poison(cxlmd, dpa); +} + +DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL, + cxl_debugfs_poison_clear, "%llx\n"); + static int cxl_mem_probe(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); @@ -117,6 +137,14 @@ static int cxl_mem_probe(struct device *dev) dentry = cxl_debugfs_create_dir(dev_name(dev)); debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show); + + if (test_bit(CXL_POISON_ENABLED_INJECT, cxlds->poison.enabled_cmds)) + debugfs_create_file("inject_poison", 0200, dentry, cxlmd, + &cxl_poison_inject_fops); + if (test_bit(CXL_POISON_ENABLED_CLEAR, cxlds->poison.enabled_cmds)) + debugfs_create_file("clear_poison", 0200, dentry, cxlmd, + &cxl_poison_clear_fops); + rc = devm_add_action_or_reset(dev, remove_debugfs, dentry); if (rc) return rc; From 371c16101ee8a076cbe93ab95bbefdb43927003e Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 20:26:30 -0700 Subject: [PATCH 32/38] tools/testing/cxl: Mock the Inject Poison mailbox command Mock the injection of poison by storing the device:address entries in mock_poison_list[]. Enforce a limit of 8 poison injections per memdev device and 128 total entries for the cxl_test mock driver. Introducing the mock_poison[] list here, makes it available for use in the mock of Clear Poison, and the mock of Get Poison List. Signed-off-by: Alison Schofield Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/f6b7f03541eaa8c2260d3eafadd04afe3f0d7962.1681874357.git.alison.schofield@intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 77 ++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index cf7975db05ed..2731ebbd175b 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -16,6 +16,9 @@ #define DEV_SIZE SZ_2G #define EFFECT(x) (1U << x) +#define MOCK_INJECT_DEV_MAX 8 +#define MOCK_INJECT_TEST_MAX 128 + static struct cxl_cel_entry mock_cel[] = { { .opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS), @@ -45,6 +48,10 @@ static struct cxl_cel_entry mock_cel[] = { .opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON), .effect = cpu_to_le16(0), }, + { + .opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON), + .effect = cpu_to_le16(0), + }, }; /* See CXL 2.0 Table 181 Get Health Info Output Payload */ @@ -474,6 +481,7 @@ static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER), .total_capacity = cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), + .inject_poison_limit = cpu_to_le16(MOCK_INJECT_DEV_MAX), }; put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer); @@ -895,6 +903,11 @@ static int mock_health_info(struct cxl_dev_state *cxlds, return 0; } +static struct mock_poison { + struct cxl_dev_state *cxlds; + u64 dpa; +} mock_poison_list[MOCK_INJECT_TEST_MAX]; + static int mock_get_poison(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { @@ -923,6 +936,67 @@ static int mock_get_poison(struct cxl_dev_state *cxlds, return 0; } +static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds) +{ + int count = 0; + + for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { + if (mock_poison_list[i].cxlds == cxlds) + count++; + } + return (count >= MOCK_INJECT_DEV_MAX); +} + +static bool mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa) +{ + if (mock_poison_dev_max_injected(cxlds)) { + dev_dbg(cxlds->dev, + "Device poison injection limit has been reached: %d\n", + MOCK_INJECT_DEV_MAX); + return false; + } + + for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { + if (!mock_poison_list[i].cxlds) { + mock_poison_list[i].cxlds = cxlds; + mock_poison_list[i].dpa = dpa; + return true; + } + } + dev_dbg(cxlds->dev, + "Mock test poison injection limit has been reached: %d\n", + MOCK_INJECT_TEST_MAX); + + return false; +} + +static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa) +{ + for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { + if (mock_poison_list[i].cxlds == cxlds && + mock_poison_list[i].dpa == dpa) + return true; + } + return false; +} + +static int mock_inject_poison(struct cxl_dev_state *cxlds, + struct cxl_mbox_cmd *cmd) +{ + struct cxl_mbox_inject_poison *pi = cmd->payload_in; + u64 dpa = le64_to_cpu(pi->address); + + if (mock_poison_found(cxlds, dpa)) { + /* Not an error to inject poison if already poisoned */ + dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa); + return 0; + } + if (!mock_poison_add(cxlds, dpa)) + return -ENXIO; + + return 0; +} + static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct device *dev = cxlds->dev; @@ -980,6 +1054,9 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd * case CXL_MBOX_OP_GET_POISON: rc = mock_get_poison(cxlds, cmd); break; + case CXL_MBOX_OP_INJECT_POISON: + rc = mock_inject_poison(cxlds, cmd); + break; default: break; } From 6ec4b6d23e3a5b653cc8b6a7b09c20c30190cfce Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 20:26:31 -0700 Subject: [PATCH 33/38] tools/testing/cxl: Mock the Clear Poison mailbox command Mock the clear of poison by deleting the device:address entry from the mock_poison_list[]. Behave like a real CXL device and do not fail if the address is not in the poison list, but offer a dev_dbg() message. Signed-off-by: Alison Schofield Reviewed-by: Jonathan Cameron Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/ecf19743c6572e60971bbd078f67d520cf5bca5d.1681874357.git.alison.schofield@intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index 2731ebbd175b..3c3909d30d03 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -52,6 +52,10 @@ static struct cxl_cel_entry mock_cel[] = { .opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON), .effect = cpu_to_le16(0), }, + { + .opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON), + .effect = cpu_to_le16(0), + }, }; /* See CXL 2.0 Table 181 Get Health Info Output Payload */ @@ -997,6 +1001,35 @@ static int mock_inject_poison(struct cxl_dev_state *cxlds, return 0; } +static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa) +{ + for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { + if (mock_poison_list[i].cxlds == cxlds && + mock_poison_list[i].dpa == dpa) { + mock_poison_list[i].cxlds = NULL; + return true; + } + } + return false; +} + +static int mock_clear_poison(struct cxl_dev_state *cxlds, + struct cxl_mbox_cmd *cmd) +{ + struct cxl_mbox_clear_poison *pi = cmd->payload_in; + u64 dpa = le64_to_cpu(pi->address); + + /* + * A real CXL device will write pi->write_data to the address + * being cleared. In this mock, just delete this address from + * the mock poison list. + */ + if (!mock_poison_del(cxlds, dpa)) + dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa); + + return 0; +} + static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct device *dev = cxlds->dev; @@ -1057,6 +1090,9 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd * case CXL_MBOX_OP_INJECT_POISON: rc = mock_inject_poison(cxlds, cmd); break; + case CXL_MBOX_OP_CLEAR_POISON: + rc = mock_clear_poison(cxlds, cmd); + break; default: break; } From 8eac7ea72593010726713c5359a4b6aedf29b6fe Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 20:26:32 -0700 Subject: [PATCH 34/38] tools/testing/cxl: Use injected poison for get poison list Prior to poison inject support, the mock of 'Get Poison List' returned a poison list containing a single mocked error record. Following the addition of poison inject and clear support to the mock driver, use the mock_poison_list[], rather than faking an error record. Mock_poison_list[] list tracks the actual poison inject and clear requests issued by userspace. Signed-off-by: Alison Schofield Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/0f4242c81821f4982b02cb1009c22783ef66b2f1.1681874357.git.alison.schofield@intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 62 +++++++++++++++++++++++++----------- 1 file changed, 43 insertions(+), 19 deletions(-) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index 3c3909d30d03..8b053de23504 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -912,31 +912,55 @@ static struct mock_poison { u64 dpa; } mock_poison_list[MOCK_INJECT_TEST_MAX]; +static struct cxl_mbox_poison_out * +cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length) +{ + struct cxl_mbox_poison_out *po; + int nr_records = 0; + u64 dpa; + + po = kzalloc(struct_size(po, record, MOCK_INJECT_DEV_MAX), GFP_KERNEL); + if (!po) + return NULL; + + for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { + if (mock_poison_list[i].cxlds != cxlds) + continue; + if (mock_poison_list[i].dpa < offset || + mock_poison_list[i].dpa > offset + length - 1) + continue; + + dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED; + po->record[nr_records].address = cpu_to_le64(dpa); + po->record[nr_records].length = cpu_to_le32(1); + nr_records++; + if (nr_records == MOCK_INJECT_DEV_MAX) + break; + } + + /* Always return count, even when zero */ + po->count = cpu_to_le16(nr_records); + + return po; +} + static int mock_get_poison(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_poison_in *pi = cmd->payload_in; + struct cxl_mbox_poison_out *po; + u64 offset = le64_to_cpu(pi->offset); + u64 length = le64_to_cpu(pi->length); + int nr_records; - /* Mock one poison record at pi.offset for 64 bytes */ - struct { - struct cxl_mbox_poison_out po; - struct cxl_poison_record record; - } __packed mock_plist = { - .po = { - .count = cpu_to_le16(1), - }, - .record = { - .length = cpu_to_le32(1), - .address = cpu_to_le64(le64_to_cpu(pi->offset) + - CXL_POISON_SOURCE_INJECTED), - }, - }; + po = cxl_get_injected_po(cxlds, offset, length); + if (!po) + return -ENOMEM; + nr_records = le16_to_cpu(po->count); + memcpy(cmd->payload_out, po, struct_size(po, record, nr_records)); + cmd->size_out = struct_size(po, record, nr_records); + kfree(po); - if (cmd->size_out < sizeof(mock_plist)) - return -EINVAL; - - memcpy(cmd->payload_out, &mock_plist, sizeof(mock_plist)); - cmd->size_out = sizeof(mock_plist); return 0; } From 98980d76c3fc3d56b34e425eb102b10355ccc743 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 20:26:33 -0700 Subject: [PATCH 35/38] tools/testing/cxl: Add a sysfs attr to test poison inject limits CXL devices may report a maximum number of addresses that a device allows to be poisoned using poison injection. When cxl_test creates mock CXL memory devices, it defaults to MOCK_INJECT_DEV_MAX==88 for all mocked memdevs. Add a sysfs attribute, poison_inject_max to module cxl_mock_mem so that users can set a custom device injection limit. Fail, and return -EBUSY, if the mock poison list is not empty. /sys/bus/platform/drivers/cxl_mock_mem/poison_inject_max A simple usage model is to set the attribute before running a test in order to emulate a device's poison handling. Signed-off-by: Alison Schofield Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/0f25b2862b90013545450222d2199e435c6cc11a.1681874357.git.alison.schofield@intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 52 +++++++++++++++++++++++++++++++++--- 1 file changed, 48 insertions(+), 4 deletions(-) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index 8b053de23504..0fd7e7b8b44a 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -19,6 +19,8 @@ #define MOCK_INJECT_DEV_MAX 8 #define MOCK_INJECT_TEST_MAX 128 +static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX; + static struct cxl_cel_entry mock_cel[] = { { .opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS), @@ -485,7 +487,7 @@ static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER), .total_capacity = cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), - .inject_poison_limit = cpu_to_le16(MOCK_INJECT_DEV_MAX), + .inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX), }; put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer); @@ -919,7 +921,7 @@ cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length) int nr_records = 0; u64 dpa; - po = kzalloc(struct_size(po, record, MOCK_INJECT_DEV_MAX), GFP_KERNEL); + po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL); if (!po) return NULL; @@ -934,7 +936,7 @@ cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length) po->record[nr_records].address = cpu_to_le64(dpa); po->record[nr_records].length = cpu_to_le32(1); nr_records++; - if (nr_records == MOCK_INJECT_DEV_MAX) + if (nr_records == poison_inject_dev_max) break; } @@ -972,7 +974,7 @@ static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds) if (mock_poison_list[i].cxlds == cxlds) count++; } - return (count >= MOCK_INJECT_DEV_MAX); + return (count >= poison_inject_dev_max); } static bool mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa) @@ -1054,6 +1056,47 @@ static int mock_clear_poison(struct cxl_dev_state *cxlds, return 0; } +static bool mock_poison_list_empty(void) +{ + for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { + if (mock_poison_list[i].cxlds) + return false; + } + return true; +} + +static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf) +{ + return sysfs_emit(buf, "%u\n", poison_inject_dev_max); +} + +static ssize_t poison_inject_max_store(struct device_driver *drv, + const char *buf, size_t len) +{ + int val; + + if (kstrtoint(buf, 0, &val) < 0) + return -EINVAL; + + if (!mock_poison_list_empty()) + return -EBUSY; + + if (val <= MOCK_INJECT_TEST_MAX) + poison_inject_dev_max = val; + else + return -EINVAL; + + return len; +} + +static DRIVER_ATTR_RW(poison_inject_max); + +static struct attribute *cxl_mock_mem_core_attrs[] = { + &driver_attr_poison_inject_max.attr, + NULL +}; +ATTRIBUTE_GROUPS(cxl_mock_mem_core); + static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct device *dev = cxlds->dev; @@ -1262,6 +1305,7 @@ static struct platform_driver cxl_mock_mem_driver = { .driver = { .name = KBUILD_MODNAME, .dev_groups = cxl_mock_mem_groups, + .groups = cxl_mock_mem_core_groups, }, }; From 30a8a105f0ce9f2c83063b289f76833386d4a120 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 18 Apr 2023 20:26:34 -0700 Subject: [PATCH 36/38] tools/testing/cxl: Require CONFIG_DEBUG_FS The cxl_mem driver uses debugfs to support poison inject and clear. Add debugfs to the list of required symbols so that cxl_test can emulate those poison operations. Signed-off-by: Alison Schofield Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/4f3aab57fbf1cc3ccde2eb887c5d90566c8d0e90.1681874357.git.alison.schofield@intel.com Signed-off-by: Dan Williams --- tools/testing/cxl/config_check.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/cxl/config_check.c b/tools/testing/cxl/config_check.c index 99b56b5f6edf..0902c5d6e410 100644 --- a/tools/testing/cxl/config_check.c +++ b/tools/testing/cxl/config_check.c @@ -13,4 +13,5 @@ void check(void) BUILD_BUG_ON(!IS_MODULE(CONFIG_CXL_PMEM)); BUILD_BUG_ON(!IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)); BUILD_BUG_ON(!IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST)); + BUILD_BUG_ON(!IS_ENABLED(CONFIG_DEBUG_FS)); } From bfe58458fd2557c9a81b89bc0ff10eb03d6c0745 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Mon, 6 Mar 2023 20:26:55 -0800 Subject: [PATCH 37/38] cxl/mbox: Update CMD_RC_TABLE As of CXL 3.0 there have some added return codes, update the driver accordingly. Signed-off-by: Davidlohr Bueso Reviewed-by: Dave Jiang Link: https://lore.kernel.org/r/20230307042655.6714-1-dave@stgolabs.net Signed-off-by: Dan Williams --- drivers/cxl/cxlmem.h | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 6090611669a6..db12b6313afb 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -127,7 +127,7 @@ struct cxl_mbox_cmd { }; /* - * Per CXL 2.0 Section 8.2.8.4.5.1 + * Per CXL 3.0 Section 8.2.8.4.5.1 */ #define CMD_CMD_RC_TABLE \ C(SUCCESS, 0, NULL), \ @@ -152,7 +152,15 @@ struct cxl_mbox_cmd { C(SECURITY, -ENXIO, "not valid in the current security state"), \ C(PASSPHRASE, -ENXIO, "phrase doesn't match current set passphrase"), \ C(MBUNSUPPORTED, -ENXIO, "unsupported on the mailbox it was issued on"),\ - C(PAYLOADLEN, -ENXIO, "invalid payload length") + C(PAYLOADLEN, -ENXIO, "invalid payload length"), \ + C(LOG, -ENXIO, "invalid or unsupported log page"), \ + C(INTERRUPTED, -ENXIO, "asynchronous event occured"), \ + C(FEATUREVERSION, -ENXIO, "unsupported feature version"), \ + C(FEATURESELVALUE, -ENXIO, "unsupported feature selection value"), \ + C(FEATURETRANSFERIP, -ENXIO, "feature transfer in progress"), \ + C(FEATURETRANSFEROOO, -ENXIO, "feature transfer out of order"), \ + C(RESOURCEEXHAUSTED, -ENXIO, "resources are exhausted"), \ + C(EXTLIST, -ENXIO, "invalid Extent List"), \ #undef C #define C(a, b, c) CXL_MBOX_CMD_RC_##a From fd35fdcbf75b5f31dba6c284886b676bb2145fe6 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Sun, 23 Apr 2023 15:12:31 -0700 Subject: [PATCH 38/38] cxl/test: Add mock test for set_timestamp Support the command testing in a unit-test fashion. Reviewed-by: Ira Weiny Signed-off-by: Davidlohr Bueso Link: https://lore.kernel.org/r/20230423221231.6357-1-dave@stgolabs.net Signed-off-by: Dan Williams --- tools/testing/cxl/test/mem.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c index 0fd7e7b8b44a..ba572d03c687 100644 --- a/tools/testing/cxl/test/mem.c +++ b/tools/testing/cxl/test/mem.c @@ -116,6 +116,7 @@ struct cxl_mockmem_data { int master_limit; struct mock_event_store mes; u8 event_buf[SZ_4K]; + u64 timestamp; }; static struct mock_event_log *event_find_log(struct device *dev, int log_type) @@ -379,6 +380,22 @@ struct cxl_event_mem_module mem_module = { } }; +static int mock_set_timestamp(struct cxl_dev_state *cxlds, + struct cxl_mbox_cmd *cmd) +{ + struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); + struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in; + + if (cmd->size_in != sizeof(*ts)) + return -EINVAL; + + if (cmd->size_out != 0) + return -EINVAL; + + mdata->timestamp = le64_to_cpu(ts->timestamp); + return 0; +} + static void cxl_mock_add_event_logs(struct mock_event_store *mes) { put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK, @@ -1103,6 +1120,9 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd * int rc = -EIO; switch (cmd->opcode) { + case CXL_MBOX_OP_SET_TIMESTAMP: + rc = mock_set_timestamp(cxlds, cmd); + break; case CXL_MBOX_OP_GET_SUPPORTED_LOGS: rc = mock_gsl(cmd); break; @@ -1232,6 +1252,10 @@ static int cxl_mock_mem_probe(struct platform_device *pdev) if (rc) return rc; + rc = cxl_set_timestamp(cxlds); + if (rc) + return rc; + rc = cxl_dev_state_identify(cxlds); if (rc) return rc;