2014-10-08 08:55:02 +00:00
|
|
|
|
/*
|
|
|
|
|
* Copyright 2014 IBM Corp.
|
|
|
|
|
*
|
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <linux/pci_regs.h>
|
|
|
|
|
#include <linux/pci_ids.h>
|
|
|
|
|
#include <linux/device.h>
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
|
#include <linux/sort.h>
|
|
|
|
|
#include <linux/pci.h>
|
|
|
|
|
#include <linux/of.h>
|
|
|
|
|
#include <linux/delay.h>
|
|
|
|
|
#include <asm/opal.h>
|
|
|
|
|
#include <asm/msi_bitmap.h>
|
|
|
|
|
#include <asm/pnv-pci.h>
|
2015-01-19 17:52:51 +00:00
|
|
|
|
#include <asm/io.h>
|
2016-03-31 09:19:28 +00:00
|
|
|
|
#include <asm/reg.h>
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
#include "cxl.h"
|
2015-08-14 07:41:26 +00:00
|
|
|
|
#include <misc/cxl.h>
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define CXL_PCI_VSEC_ID 0x1280
|
|
|
|
|
#define CXL_VSEC_MIN_SIZE 0x80
|
|
|
|
|
|
|
|
|
|
#define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \
|
|
|
|
|
{ \
|
|
|
|
|
pci_read_config_word(dev, vsec + 0x6, dest); \
|
|
|
|
|
*dest >>= 4; \
|
|
|
|
|
}
|
|
|
|
|
#define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
|
|
|
|
|
pci_read_config_byte(dev, vsec + 0x8, dest)
|
|
|
|
|
|
|
|
|
|
#define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
|
|
|
|
|
pci_read_config_byte(dev, vsec + 0x9, dest)
|
|
|
|
|
#define CXL_STATUS_SECOND_PORT 0x80
|
|
|
|
|
#define CXL_STATUS_MSI_X_FULL 0x40
|
|
|
|
|
#define CXL_STATUS_MSI_X_SINGLE 0x20
|
|
|
|
|
#define CXL_STATUS_FLASH_RW 0x08
|
|
|
|
|
#define CXL_STATUS_FLASH_RO 0x04
|
|
|
|
|
#define CXL_STATUS_LOADABLE_AFU 0x02
|
|
|
|
|
#define CXL_STATUS_LOADABLE_PSL 0x01
|
|
|
|
|
/* If we see these features we won't try to use the card */
|
|
|
|
|
#define CXL_UNSUPPORTED_FEATURES \
|
|
|
|
|
(CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
|
|
|
|
|
|
|
|
|
|
#define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
|
|
|
|
|
pci_read_config_byte(dev, vsec + 0xa, dest)
|
|
|
|
|
#define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
|
|
|
|
|
pci_write_config_byte(dev, vsec + 0xa, val)
|
cxl: Add cxl_check_and_switch_mode() API to switch bi-modal cards
Add a new API, cxl_check_and_switch_mode() to allow for switching of
bi-modal CAPI cards, such as the Mellanox CX-4 network card.
When a driver requests to switch a card to CAPI mode, use PCI hotplug
infrastructure to remove all PCI devices underneath the slot. We then write
an updated mode control register to the CAPI VSEC, hot reset the card, and
reprobe the card.
As the card may present a different set of PCI devices after the mode
switch, use the infrastructure provided by the pnv_php driver and the OPAL
PCI slot management facilities to ensure that:
* the old devices are removed from both the OPAL and Linux device trees
* the new devices are probed by OPAL and added to the OPAL device tree
* the new devices are added to the Linux device tree and probed through
the regular PCI device probe path
As such, introduce a new option, CONFIG_CXL_BIMODAL, with a dependency on
the pnv_php driver.
Refactor existing code that touches the mode control register in the
regular single mode case into a new function, setup_cxl_protocol_area().
Co-authored-by: Ian Munsie <imunsie@au1.ibm.com>
Cc: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-13 21:17:14 +00:00
|
|
|
|
#define CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, vsec, val) \
|
|
|
|
|
pci_bus_write_config_byte(bus, devfn, vsec + 0xa, val)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
#define CXL_VSEC_PROTOCOL_MASK 0xe0
|
|
|
|
|
#define CXL_VSEC_PROTOCOL_1024TB 0x80
|
|
|
|
|
#define CXL_VSEC_PROTOCOL_512TB 0x40
|
2017-04-12 14:34:07 +00:00
|
|
|
|
#define CXL_VSEC_PROTOCOL_256TB 0x20 /* Power 8/9 uses this */
|
2014-10-08 08:55:02 +00:00
|
|
|
|
#define CXL_VSEC_PROTOCOL_ENABLE 0x01
|
|
|
|
|
|
|
|
|
|
#define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
|
|
|
|
|
pci_read_config_word(dev, vsec + 0xc, dest)
|
|
|
|
|
#define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
|
|
|
|
|
pci_read_config_byte(dev, vsec + 0xe, dest)
|
|
|
|
|
#define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
|
|
|
|
|
pci_read_config_byte(dev, vsec + 0xf, dest)
|
|
|
|
|
#define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
|
|
|
|
|
pci_read_config_word(dev, vsec + 0x10, dest)
|
|
|
|
|
|
|
|
|
|
#define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
|
|
|
|
|
pci_read_config_byte(dev, vsec + 0x13, dest)
|
|
|
|
|
#define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
|
|
|
|
|
pci_write_config_byte(dev, vsec + 0x13, val)
|
|
|
|
|
#define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */
|
|
|
|
|
#define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */
|
|
|
|
|
#define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */
|
|
|
|
|
|
|
|
|
|
#define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x20, dest)
|
|
|
|
|
#define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x24, dest)
|
|
|
|
|
#define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x28, dest)
|
|
|
|
|
#define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x2c, dest)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* This works a little different than the p1/p2 register accesses to make it
|
|
|
|
|
* easier to pull out individual fields */
|
2016-03-04 11:26:35 +00:00
|
|
|
|
#define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off)
|
|
|
|
|
#define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
#define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
|
|
|
|
|
#define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
|
|
|
|
|
|
|
|
|
|
#define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0)
|
|
|
|
|
#define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15)
|
|
|
|
|
#define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31)
|
|
|
|
|
#define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47)
|
|
|
|
|
#define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48)
|
|
|
|
|
#define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55)
|
|
|
|
|
#define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59)
|
|
|
|
|
#define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61)
|
|
|
|
|
#define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63)
|
|
|
|
|
#define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20)
|
|
|
|
|
#define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
|
|
|
|
|
#define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28)
|
|
|
|
|
#define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30)
|
|
|
|
|
#define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6)
|
|
|
|
|
#define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7)
|
|
|
|
|
#define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
|
|
|
|
|
#define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38)
|
|
|
|
|
#define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40)
|
|
|
|
|
#define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
|
|
|
|
|
#define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
|
|
|
|
|
|
2015-07-19 17:23:52 +00:00
|
|
|
|
static const struct pci_device_id cxl_pci_tbl[] = {
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
|
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
|
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
|
2015-12-07 22:03:32 +00:00
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
|
2017-03-24 16:03:19 +00:00
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0623), },
|
|
|
|
|
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0628), },
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{ }
|
|
|
|
|
};
|
|
|
|
|
MODULE_DEVICE_TABLE(pci, cxl_pci_tbl);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Mostly using these wrappers to avoid confusion:
|
|
|
|
|
* priv 1 is BAR2, while priv 2 is BAR0
|
|
|
|
|
*/
|
|
|
|
|
static inline resource_size_t p1_base(struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
return pci_resource_start(dev, 2);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline resource_size_t p1_size(struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
return pci_resource_len(dev, 2);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline resource_size_t p2_base(struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
return pci_resource_start(dev, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline resource_size_t p2_size(struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
return pci_resource_len(dev, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int find_cxl_vsec(struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
int vsec = 0;
|
|
|
|
|
u16 val;
|
|
|
|
|
|
|
|
|
|
while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) {
|
|
|
|
|
pci_read_config_word(dev, vsec + 0x4, &val);
|
|
|
|
|
if (val == CXL_PCI_VSEC_ID)
|
|
|
|
|
return vsec;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void dump_cxl_config_space(struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
int vsec;
|
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
|
|
dev_info(&dev->dev, "dump_cxl_config_space\n");
|
|
|
|
|
|
|
|
|
|
pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
|
|
|
|
|
dev_info(&dev->dev, "BAR0: %#.8x\n", val);
|
|
|
|
|
pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
|
|
|
|
|
dev_info(&dev->dev, "BAR1: %#.8x\n", val);
|
|
|
|
|
pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
|
|
|
|
|
dev_info(&dev->dev, "BAR2: %#.8x\n", val);
|
|
|
|
|
pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
|
|
|
|
|
dev_info(&dev->dev, "BAR3: %#.8x\n", val);
|
|
|
|
|
pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
|
|
|
|
|
dev_info(&dev->dev, "BAR4: %#.8x\n", val);
|
|
|
|
|
pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
|
|
|
|
|
dev_info(&dev->dev, "BAR5: %#.8x\n", val);
|
|
|
|
|
|
|
|
|
|
dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
|
|
|
|
|
p1_base(dev), p1_size(dev));
|
|
|
|
|
dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
|
2015-06-18 05:15:10 +00:00
|
|
|
|
p2_base(dev), p2_size(dev));
|
2014-10-08 08:55:02 +00:00
|
|
|
|
dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
|
|
|
|
|
pci_resource_start(dev, 4), pci_resource_len(dev, 4));
|
|
|
|
|
|
|
|
|
|
if (!(vsec = find_cxl_vsec(dev)))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
#define show_reg(name, what) \
|
|
|
|
|
dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
|
|
|
|
|
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x0, &val);
|
|
|
|
|
show_reg("Cap ID", (val >> 0) & 0xffff);
|
|
|
|
|
show_reg("Cap Ver", (val >> 16) & 0xf);
|
|
|
|
|
show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x4, &val);
|
|
|
|
|
show_reg("VSEC ID", (val >> 0) & 0xffff);
|
|
|
|
|
show_reg("VSEC Rev", (val >> 16) & 0xf);
|
|
|
|
|
show_reg("VSEC Length", (val >> 20) & 0xfff);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x8, &val);
|
|
|
|
|
show_reg("Num AFUs", (val >> 0) & 0xff);
|
|
|
|
|
show_reg("Status", (val >> 8) & 0xff);
|
|
|
|
|
show_reg("Mode Control", (val >> 16) & 0xff);
|
|
|
|
|
show_reg("Reserved", (val >> 24) & 0xff);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0xc, &val);
|
|
|
|
|
show_reg("PSL Rev", (val >> 0) & 0xffff);
|
|
|
|
|
show_reg("CAIA Ver", (val >> 16) & 0xffff);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x10, &val);
|
|
|
|
|
show_reg("Base Image Rev", (val >> 0) & 0xffff);
|
|
|
|
|
show_reg("Reserved", (val >> 16) & 0x0fff);
|
|
|
|
|
show_reg("Image Control", (val >> 28) & 0x3);
|
|
|
|
|
show_reg("Reserved", (val >> 30) & 0x1);
|
|
|
|
|
show_reg("Image Loaded", (val >> 31) & 0x1);
|
|
|
|
|
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x14, &val);
|
|
|
|
|
show_reg("Reserved", val);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x18, &val);
|
|
|
|
|
show_reg("Reserved", val);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x1c, &val);
|
|
|
|
|
show_reg("Reserved", val);
|
|
|
|
|
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x20, &val);
|
|
|
|
|
show_reg("AFU Descriptor Offset", val);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x24, &val);
|
|
|
|
|
show_reg("AFU Descriptor Size", val);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x28, &val);
|
|
|
|
|
show_reg("Problem State Offset", val);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x2c, &val);
|
|
|
|
|
show_reg("Problem State Size", val);
|
|
|
|
|
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x30, &val);
|
|
|
|
|
show_reg("Reserved", val);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x34, &val);
|
|
|
|
|
show_reg("Reserved", val);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x38, &val);
|
|
|
|
|
show_reg("Reserved", val);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x3c, &val);
|
|
|
|
|
show_reg("Reserved", val);
|
|
|
|
|
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x40, &val);
|
|
|
|
|
show_reg("PSL Programming Port", val);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x44, &val);
|
|
|
|
|
show_reg("PSL Programming Control", val);
|
|
|
|
|
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x48, &val);
|
|
|
|
|
show_reg("Reserved", val);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x4c, &val);
|
|
|
|
|
show_reg("Reserved", val);
|
|
|
|
|
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x50, &val);
|
|
|
|
|
show_reg("Flash Address Register", val);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x54, &val);
|
|
|
|
|
show_reg("Flash Size Register", val);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x58, &val);
|
|
|
|
|
show_reg("Flash Status/Control Register", val);
|
|
|
|
|
pci_read_config_dword(dev, vsec + 0x58, &val);
|
|
|
|
|
show_reg("Flash Data Port", val);
|
|
|
|
|
|
|
|
|
|
#undef show_reg
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void dump_afu_descriptor(struct cxl_afu *afu)
|
|
|
|
|
{
|
2015-05-27 06:07:06 +00:00
|
|
|
|
u64 val, afu_cr_num, afu_cr_off, afu_cr_len;
|
|
|
|
|
int i;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
#define show_reg(name, what) \
|
|
|
|
|
dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
|
|
|
|
|
|
|
|
|
|
val = AFUD_READ_INFO(afu);
|
|
|
|
|
show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val));
|
|
|
|
|
show_reg("num_of_processes", AFUD_NUM_PROCS(val));
|
|
|
|
|
show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
|
|
|
|
|
show_reg("req_prog_mode", val & 0xffffULL);
|
2015-05-27 06:07:06 +00:00
|
|
|
|
afu_cr_num = AFUD_NUM_CRS(val);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
val = AFUD_READ(afu, 0x8);
|
|
|
|
|
show_reg("Reserved", val);
|
|
|
|
|
val = AFUD_READ(afu, 0x10);
|
|
|
|
|
show_reg("Reserved", val);
|
|
|
|
|
val = AFUD_READ(afu, 0x18);
|
|
|
|
|
show_reg("Reserved", val);
|
|
|
|
|
|
|
|
|
|
val = AFUD_READ_CR(afu);
|
|
|
|
|
show_reg("Reserved", (val >> (63-7)) & 0xff);
|
|
|
|
|
show_reg("AFU_CR_len", AFUD_CR_LEN(val));
|
2015-05-27 06:07:06 +00:00
|
|
|
|
afu_cr_len = AFUD_CR_LEN(val) * 256;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
val = AFUD_READ_CR_OFF(afu);
|
2015-05-27 06:07:06 +00:00
|
|
|
|
afu_cr_off = val;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
show_reg("AFU_CR_offset", val);
|
|
|
|
|
|
|
|
|
|
val = AFUD_READ_PPPSA(afu);
|
|
|
|
|
show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff);
|
|
|
|
|
show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val));
|
|
|
|
|
|
|
|
|
|
val = AFUD_READ_PPPSA_OFF(afu);
|
|
|
|
|
show_reg("PerProcessPSA_offset", val);
|
|
|
|
|
|
|
|
|
|
val = AFUD_READ_EB(afu);
|
|
|
|
|
show_reg("Reserved", (val >> (63-7)) & 0xff);
|
|
|
|
|
show_reg("AFU_EB_len", AFUD_EB_LEN(val));
|
|
|
|
|
|
|
|
|
|
val = AFUD_READ_EB_OFF(afu);
|
|
|
|
|
show_reg("AFU_EB_offset", val);
|
|
|
|
|
|
2015-05-27 06:07:06 +00:00
|
|
|
|
for (i = 0; i < afu_cr_num; i++) {
|
|
|
|
|
val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len);
|
|
|
|
|
show_reg("CR Vendor", val & 0xffff);
|
|
|
|
|
show_reg("CR Device", (val >> 16) & 0xffff);
|
|
|
|
|
}
|
2014-10-08 08:55:02 +00:00
|
|
|
|
#undef show_reg
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-07 14:11:58 +00:00
|
|
|
|
#define P8_CAPP_UNIT0_ID 0xBA
|
|
|
|
|
#define P8_CAPP_UNIT1_ID 0XBE
|
2017-04-12 14:34:07 +00:00
|
|
|
|
#define P9_CAPP_UNIT0_ID 0xC0
|
|
|
|
|
#define P9_CAPP_UNIT1_ID 0xE0
|
2016-03-31 09:19:28 +00:00
|
|
|
|
|
2017-04-12 14:34:07 +00:00
|
|
|
|
static int get_phb_index(struct device_node *np, u32 *phb_index)
|
2016-03-31 09:19:28 +00:00
|
|
|
|
{
|
2017-04-12 14:34:07 +00:00
|
|
|
|
if (of_property_read_u32(np, "ibm,phb-index", phb_index))
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2016-03-31 09:19:28 +00:00
|
|
|
|
|
2017-04-12 14:34:07 +00:00
|
|
|
|
static u64 get_capp_unit_id(struct device_node *np, u32 phb_index)
|
|
|
|
|
{
|
2016-03-31 09:19:28 +00:00
|
|
|
|
/*
|
2017-04-07 14:11:58 +00:00
|
|
|
|
* POWER 8:
|
|
|
|
|
* - For chips other than POWER8NVL, we only have CAPP 0,
|
|
|
|
|
* irrespective of which PHB is used.
|
|
|
|
|
* - For POWER8NVL, assume CAPP 0 is attached to PHB0 and
|
|
|
|
|
* CAPP 1 is attached to PHB1.
|
2016-03-31 09:19:28 +00:00
|
|
|
|
*/
|
2017-04-07 14:11:58 +00:00
|
|
|
|
if (cxl_is_power8()) {
|
|
|
|
|
if (!pvr_version_is(PVR_POWER8NVL))
|
|
|
|
|
return P8_CAPP_UNIT0_ID;
|
2016-03-31 09:19:28 +00:00
|
|
|
|
|
2017-04-07 14:11:58 +00:00
|
|
|
|
if (phb_index == 0)
|
|
|
|
|
return P8_CAPP_UNIT0_ID;
|
2016-03-31 09:19:28 +00:00
|
|
|
|
|
2017-04-07 14:11:58 +00:00
|
|
|
|
if (phb_index == 1)
|
|
|
|
|
return P8_CAPP_UNIT1_ID;
|
|
|
|
|
}
|
2016-03-31 09:19:28 +00:00
|
|
|
|
|
|
|
|
|
/*
|
2017-04-12 14:34:07 +00:00
|
|
|
|
* POWER 9:
|
|
|
|
|
* PEC0 (PHB0). Capp ID = CAPP0 (0b1100_0000)
|
|
|
|
|
* PEC1 (PHB1 - PHB2). No capi mode
|
|
|
|
|
* PEC2 (PHB3 - PHB4 - PHB5): Capi mode on PHB3 only. Capp ID = CAPP1 (0b1110_0000)
|
2016-03-31 09:19:28 +00:00
|
|
|
|
*/
|
2017-04-12 14:34:07 +00:00
|
|
|
|
if (cxl_is_power9()) {
|
|
|
|
|
if (phb_index == 0)
|
|
|
|
|
return P9_CAPP_UNIT0_ID;
|
2016-03-31 09:19:28 +00:00
|
|
|
|
|
2017-04-12 14:34:07 +00:00
|
|
|
|
if (phb_index == 3)
|
|
|
|
|
return P9_CAPP_UNIT1_ID;
|
|
|
|
|
}
|
2016-03-31 09:19:28 +00:00
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-22 13:07:27 +00:00
|
|
|
|
int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
|
2017-04-12 14:34:07 +00:00
|
|
|
|
u32 *phb_index, u64 *capp_unit_id)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
2017-04-12 14:34:07 +00:00
|
|
|
|
int rc;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
struct device_node *np;
|
|
|
|
|
const __be32 *prop;
|
|
|
|
|
|
2015-01-29 02:16:04 +00:00
|
|
|
|
if (!(np = pnv_pci_get_phb_node(dev)))
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
|
|
while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
|
|
|
|
|
np = of_get_next_parent(np);
|
|
|
|
|
if (!np)
|
|
|
|
|
return -ENODEV;
|
2017-04-12 14:34:07 +00:00
|
|
|
|
|
2016-05-23 17:39:18 +00:00
|
|
|
|
*chipid = be32_to_cpup(prop);
|
2017-04-12 14:34:07 +00:00
|
|
|
|
|
|
|
|
|
rc = get_phb_index(np, phb_index);
|
|
|
|
|
if (rc) {
|
|
|
|
|
pr_err("cxl: invalid phb index\n");
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*capp_unit_id = get_capp_unit_id(np, *phb_index);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
of_node_put(np);
|
2016-05-23 17:39:18 +00:00
|
|
|
|
if (!*capp_unit_id) {
|
2017-09-08 13:52:11 +00:00
|
|
|
|
pr_err("cxl: invalid capp unit id (phb_index: %d)\n",
|
|
|
|
|
*phb_index);
|
2016-03-31 09:19:28 +00:00
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2016-05-23 17:39:18 +00:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-02 09:56:12 +00:00
|
|
|
|
static DEFINE_MUTEX(indications_mutex);
|
|
|
|
|
|
|
|
|
|
static int get_phb_indications(struct pci_dev *dev, u64 *capiind, u64 *asnind,
|
|
|
|
|
u64 *nbwind)
|
|
|
|
|
{
|
|
|
|
|
static u64 nbw, asn, capi = 0;
|
|
|
|
|
struct device_node *np;
|
|
|
|
|
const __be32 *prop;
|
|
|
|
|
|
|
|
|
|
mutex_lock(&indications_mutex);
|
|
|
|
|
if (!capi) {
|
|
|
|
|
if (!(np = pnv_pci_get_phb_node(dev))) {
|
|
|
|
|
mutex_unlock(&indications_mutex);
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
prop = of_get_property(np, "ibm,phb-indications", NULL);
|
|
|
|
|
if (!prop) {
|
|
|
|
|
nbw = 0x0300UL; /* legacy values */
|
|
|
|
|
asn = 0x0400UL;
|
|
|
|
|
capi = 0x0200UL;
|
|
|
|
|
} else {
|
|
|
|
|
nbw = (u64)be32_to_cpu(prop[2]);
|
|
|
|
|
asn = (u64)be32_to_cpu(prop[1]);
|
|
|
|
|
capi = (u64)be32_to_cpu(prop[0]);
|
|
|
|
|
}
|
|
|
|
|
of_node_put(np);
|
|
|
|
|
}
|
|
|
|
|
*capiind = capi;
|
|
|
|
|
*asnind = asn;
|
|
|
|
|
*nbwind = nbw;
|
|
|
|
|
mutex_unlock(&indications_mutex);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg)
|
2017-04-12 14:34:07 +00:00
|
|
|
|
{
|
2017-06-22 13:07:27 +00:00
|
|
|
|
u64 xsl_dsnctl;
|
2018-03-02 09:56:12 +00:00
|
|
|
|
u64 capiind, asnind, nbwind;
|
2017-04-12 14:34:07 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* CAPI Identifier bits [0:7]
|
|
|
|
|
* bit 61:60 MSI bits --> 0
|
|
|
|
|
* bit 59 TVT selector --> 0
|
|
|
|
|
*/
|
2018-03-02 09:56:12 +00:00
|
|
|
|
if (get_phb_indications(dev, &capiind, &asnind, &nbwind))
|
|
|
|
|
return -ENODEV;
|
2017-04-12 14:34:07 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Tell XSL where to route data to.
|
|
|
|
|
* The field chipid should match the PHB CAPI_CMPM register
|
|
|
|
|
*/
|
2018-03-02 09:56:12 +00:00
|
|
|
|
xsl_dsnctl = (capiind << (63-15)); /* Bit 57 */
|
2017-04-12 14:34:07 +00:00
|
|
|
|
xsl_dsnctl |= (capp_unit_id << (63-15));
|
|
|
|
|
|
|
|
|
|
/* nMMU_ID Defaults to: b’000001001’*/
|
|
|
|
|
xsl_dsnctl |= ((u64)0x09 << (63-28));
|
|
|
|
|
|
2017-06-13 15:41:05 +00:00
|
|
|
|
if (!(cxl_is_power9_dd1())) {
|
2017-04-12 14:34:07 +00:00
|
|
|
|
/*
|
|
|
|
|
* Used to identify CAPI packets which should be sorted into
|
|
|
|
|
* the Non-Blocking queues by the PHB. This field should match
|
|
|
|
|
* the PHB PBL_NBW_CMPM register
|
|
|
|
|
* nbwind=0x03, bits [57:58], must include capi indicator.
|
|
|
|
|
* Not supported on P9 DD1.
|
|
|
|
|
*/
|
2018-03-02 09:56:12 +00:00
|
|
|
|
xsl_dsnctl |= (nbwind << (63-55));
|
2017-04-12 14:34:07 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Upper 16b address bits of ASB_Notify messages sent to the
|
|
|
|
|
* system. Need to match the PHB’s ASN Compare/Mask Register.
|
|
|
|
|
* Not supported on P9 DD1.
|
|
|
|
|
*/
|
2018-03-02 09:56:12 +00:00
|
|
|
|
xsl_dsnctl |= asnind;
|
2017-04-12 14:34:07 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-06-22 13:07:27 +00:00
|
|
|
|
*reg = xsl_dsnctl;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
|
|
|
|
|
struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
u64 xsl_dsnctl, psl_fircntl;
|
|
|
|
|
u64 chipid;
|
|
|
|
|
u32 phb_index;
|
|
|
|
|
u64 capp_unit_id;
|
2018-02-15 15:49:24 +00:00
|
|
|
|
u64 psl_debug;
|
2017-06-22 13:07:27 +00:00
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
|
|
rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
|
|
|
|
|
if (rc)
|
|
|
|
|
return rc;
|
|
|
|
|
|
2018-03-02 09:56:12 +00:00
|
|
|
|
rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &xsl_dsnctl);
|
2017-06-22 13:07:27 +00:00
|
|
|
|
if (rc)
|
|
|
|
|
return rc;
|
|
|
|
|
|
2017-04-12 14:34:07 +00:00
|
|
|
|
cxl_p1_write(adapter, CXL_XSL9_DSNCTL, xsl_dsnctl);
|
|
|
|
|
|
|
|
|
|
/* Set fir_cntl to recommended value for production env */
|
|
|
|
|
psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
|
|
|
|
|
psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
|
|
|
|
|
psl_fircntl |= 0x1ULL; /* ce_thresh */
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl);
|
|
|
|
|
|
2017-09-08 13:52:11 +00:00
|
|
|
|
/* Setup the PSL to transmit packets on the PCIe before the
|
|
|
|
|
* CAPP is enabled
|
2017-04-12 14:34:07 +00:00
|
|
|
|
*/
|
2017-09-08 13:52:11 +00:00
|
|
|
|
cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000002A10ULL);
|
2017-04-12 14:34:07 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* A response to an ASB_Notify request is returned by the
|
|
|
|
|
* system as an MMIO write to the address defined in
|
2017-09-08 13:52:11 +00:00
|
|
|
|
* the PSL_TNR_ADDR register.
|
|
|
|
|
* keep the Reset Value: 0x00020000E0000000
|
2017-04-12 14:34:07 +00:00
|
|
|
|
*/
|
|
|
|
|
|
2017-09-08 13:52:11 +00:00
|
|
|
|
/* Enable XSL rty limit */
|
|
|
|
|
cxl_p1_write(adapter, CXL_XSL9_DEF, 0x51F8000000000005ULL);
|
|
|
|
|
|
|
|
|
|
/* Change XSL_INV dummy read threshold */
|
|
|
|
|
cxl_p1_write(adapter, CXL_XSL9_INV, 0x0000040007FFC200ULL);
|
|
|
|
|
|
|
|
|
|
if (phb_index == 3) {
|
|
|
|
|
/* disable machines 31-47 and 20-27 for DMA */
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000FF3FFFF0000ULL);
|
|
|
|
|
}
|
2017-04-12 14:34:07 +00:00
|
|
|
|
|
2017-09-08 13:52:11 +00:00
|
|
|
|
/* Snoop machines */
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL);
|
2017-04-12 14:34:07 +00:00
|
|
|
|
|
2017-09-08 13:52:11 +00:00
|
|
|
|
if (cxl_is_power9_dd1()) {
|
|
|
|
|
/* Disabling deadlock counter CAR */
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL9_GP_CT, 0x0020000000000001ULL);
|
2018-02-09 04:09:16 +00:00
|
|
|
|
/* Enable NORST */
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0x8000000000000000ULL);
|
|
|
|
|
} else {
|
|
|
|
|
/* Enable NORST and DD2 features */
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL);
|
|
|
|
|
}
|
2017-04-12 14:34:07 +00:00
|
|
|
|
|
2018-02-15 15:49:24 +00:00
|
|
|
|
/*
|
|
|
|
|
* Check if PSL has data-cache. We need to flush adapter datacache
|
|
|
|
|
* when as its about to be removed.
|
|
|
|
|
*/
|
|
|
|
|
psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG);
|
|
|
|
|
if (psl_debug & CXL_PSL_DEBUG_CDC) {
|
|
|
|
|
dev_dbg(&dev->dev, "No data-cache present\n");
|
|
|
|
|
adapter->native->no_data_cache = true;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-12 14:34:07 +00:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-07 14:11:57 +00:00
|
|
|
|
static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci_dev *dev)
|
2016-05-23 17:39:18 +00:00
|
|
|
|
{
|
2016-08-08 09:57:48 +00:00
|
|
|
|
u64 psl_dsnctl, psl_fircntl;
|
2016-05-23 17:39:18 +00:00
|
|
|
|
u64 chipid;
|
2017-04-12 14:34:07 +00:00
|
|
|
|
u32 phb_index;
|
2016-05-23 17:39:18 +00:00
|
|
|
|
u64 capp_unit_id;
|
|
|
|
|
int rc;
|
|
|
|
|
|
2017-06-22 13:07:27 +00:00
|
|
|
|
rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
|
2016-05-23 17:39:18 +00:00
|
|
|
|
if (rc)
|
|
|
|
|
return rc;
|
|
|
|
|
|
2016-04-19 16:34:24 +00:00
|
|
|
|
psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */
|
|
|
|
|
psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */
|
2014-10-08 08:55:02 +00:00
|
|
|
|
/* Tell PSL where to route data to */
|
2016-04-19 16:34:24 +00:00
|
|
|
|
psl_dsnctl |= (chipid << (63-5));
|
2016-03-31 09:19:28 +00:00
|
|
|
|
psl_dsnctl |= (capp_unit_id << (63-13));
|
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
|
|
|
|
|
/* snoop write mask */
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
|
2016-08-08 09:57:48 +00:00
|
|
|
|
/* set fir_cntl to recommended value for production env */
|
|
|
|
|
psl_fircntl = (0x2ULL << (63-3)); /* ce_report */
|
|
|
|
|
psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */
|
|
|
|
|
psl_fircntl |= 0x1ULL; /* ce_thresh */
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
/* for debugging with trace arrays */
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-07 14:11:56 +00:00
|
|
|
|
static int init_implementation_adapter_regs_xsl(struct cxl *adapter, struct pci_dev *dev)
|
2016-05-23 17:39:18 +00:00
|
|
|
|
{
|
|
|
|
|
u64 xsl_dsnctl;
|
|
|
|
|
u64 chipid;
|
2017-04-12 14:34:07 +00:00
|
|
|
|
u32 phb_index;
|
2016-05-23 17:39:18 +00:00
|
|
|
|
u64 capp_unit_id;
|
|
|
|
|
int rc;
|
|
|
|
|
|
2017-06-22 13:07:27 +00:00
|
|
|
|
rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
|
2016-05-23 17:39:18 +00:00
|
|
|
|
if (rc)
|
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
|
|
/* Tell XSL where to route data to */
|
|
|
|
|
xsl_dsnctl = 0x0000600000000000ULL | (chipid << (63-5));
|
|
|
|
|
xsl_dsnctl |= (capp_unit_id << (63-13));
|
|
|
|
|
cxl_p1_write(adapter, CXL_XSL_DSNCTL, xsl_dsnctl);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* PSL & XSL */
|
|
|
|
|
#define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3))
|
2015-08-28 07:37:36 +00:00
|
|
|
|
#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
|
2016-05-23 17:39:18 +00:00
|
|
|
|
/* For the PSL this is a multiple for 0 < n <= 7: */
|
|
|
|
|
#define PSL_2048_250MHZ_CYCLES 1
|
|
|
|
|
|
2017-04-07 14:11:57 +00:00
|
|
|
|
static void write_timebase_ctrl_psl8(struct cxl *adapter)
|
2016-05-23 17:39:18 +00:00
|
|
|
|
{
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
|
|
|
|
|
TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* XSL */
|
|
|
|
|
#define TBSYNC_ENA (1ULL << 63)
|
|
|
|
|
/* For the XSL this is 2**n * 2000 clocks for 0 < n <= 6: */
|
|
|
|
|
#define XSL_2000_CLOCKS 1
|
|
|
|
|
#define XSL_4000_CLOCKS 2
|
|
|
|
|
#define XSL_8000_CLOCKS 3
|
|
|
|
|
|
|
|
|
|
static void write_timebase_ctrl_xsl(struct cxl *adapter)
|
|
|
|
|
{
|
|
|
|
|
cxl_p1_write(adapter, CXL_XSL_TB_CTLSTAT,
|
|
|
|
|
TBSYNC_ENA |
|
|
|
|
|
TBSYNC_CAL(3) |
|
|
|
|
|
TBSYNC_CNT(XSL_4000_CLOCKS));
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-12 14:34:07 +00:00
|
|
|
|
static u64 timebase_read_psl9(struct cxl *adapter)
|
|
|
|
|
{
|
|
|
|
|
return cxl_p1_read(adapter, CXL_PSL9_Timebase);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-07 14:11:57 +00:00
|
|
|
|
static u64 timebase_read_psl8(struct cxl *adapter)
|
2016-05-23 17:39:18 +00:00
|
|
|
|
{
|
|
|
|
|
return cxl_p1_read(adapter, CXL_PSL_Timebase);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static u64 timebase_read_xsl(struct cxl *adapter)
|
|
|
|
|
{
|
|
|
|
|
return cxl_p1_read(adapter, CXL_XSL_Timebase);
|
|
|
|
|
}
|
2015-08-28 07:37:36 +00:00
|
|
|
|
|
2016-03-21 19:32:48 +00:00
|
|
|
|
static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
|
2015-08-28 07:37:36 +00:00
|
|
|
|
{
|
|
|
|
|
struct device_node *np;
|
|
|
|
|
|
2016-03-21 19:32:48 +00:00
|
|
|
|
adapter->psl_timebase_synced = false;
|
|
|
|
|
|
2015-08-28 07:37:36 +00:00
|
|
|
|
if (!(np = pnv_pci_get_phb_node(dev)))
|
2016-03-21 19:32:48 +00:00
|
|
|
|
return;
|
2015-08-28 07:37:36 +00:00
|
|
|
|
|
|
|
|
|
/* Do not fail when CAPP timebase sync is not supported by OPAL */
|
|
|
|
|
of_node_get(np);
|
|
|
|
|
if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
|
|
|
|
|
of_node_put(np);
|
2016-03-21 19:32:48 +00:00
|
|
|
|
dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n");
|
|
|
|
|
return;
|
2015-08-28 07:37:36 +00:00
|
|
|
|
}
|
|
|
|
|
of_node_put(np);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Setup PSL Timebase Control and Status register
|
|
|
|
|
* with the recommended Timebase Sync Count value
|
|
|
|
|
*/
|
2018-02-15 06:19:36 +00:00
|
|
|
|
if (adapter->native->sl_ops->write_timebase_ctrl)
|
|
|
|
|
adapter->native->sl_ops->write_timebase_ctrl(adapter);
|
2015-08-28 07:37:36 +00:00
|
|
|
|
|
|
|
|
|
/* Enable PSL Timebase */
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
|
|
|
|
|
|
2016-03-21 19:32:48 +00:00
|
|
|
|
return;
|
2015-08-28 07:37:36 +00:00
|
|
|
|
}
|
|
|
|
|
|
2017-04-12 14:34:07 +00:00
|
|
|
|
static int init_implementation_afu_regs_psl9(struct cxl_afu *afu)
|
|
|
|
|
{
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-07 14:11:57 +00:00
|
|
|
|
static int init_implementation_afu_regs_psl8(struct cxl_afu *afu)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
|
|
|
|
/* read/write masks for this slice */
|
|
|
|
|
cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
|
|
|
|
|
/* APC read/write masks for this slice */
|
|
|
|
|
cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
|
|
|
|
|
/* for debugging with trace arrays */
|
|
|
|
|
cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
|
2014-12-08 08:17:59 +00:00
|
|
|
|
cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq,
|
|
|
|
|
unsigned int virq)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
|
|
|
|
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
|
|
|
|
|
|
|
|
|
|
return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
|
|
|
|
|
}
|
|
|
|
|
|
2015-01-19 17:52:48 +00:00
|
|
|
|
int cxl_update_image_control(struct cxl *adapter)
|
|
|
|
|
{
|
|
|
|
|
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
|
|
|
|
|
int rc;
|
|
|
|
|
int vsec;
|
|
|
|
|
u8 image_state;
|
|
|
|
|
|
|
|
|
|
if (!(vsec = find_cxl_vsec(dev))) {
|
|
|
|
|
dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
|
|
|
|
|
dev_err(&dev->dev, "failed to read image state: %i\n", rc);
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (adapter->perst_loads_image)
|
|
|
|
|
image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
|
|
|
|
|
else
|
|
|
|
|
image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
|
|
|
|
|
|
|
|
|
|
if (adapter->perst_select_user)
|
|
|
|
|
image_state |= CXL_VSEC_PERST_SELECT_USER;
|
|
|
|
|
else
|
|
|
|
|
image_state &= ~CXL_VSEC_PERST_SELECT_USER;
|
|
|
|
|
|
|
|
|
|
if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
|
|
|
|
|
dev_err(&dev->dev, "failed to update image control: %i\n", rc);
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
int cxl_pci_alloc_one_irq(struct cxl *adapter)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
|
|
|
|
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
|
|
|
|
|
|
|
|
|
|
return pnv_cxl_alloc_hwirqs(dev, 1);
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
|
|
|
|
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
|
|
|
|
|
|
|
|
|
|
return pnv_cxl_release_hwirqs(dev, hwirq, 1);
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
|
|
|
|
|
struct cxl *adapter, unsigned int num)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
|
|
|
|
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
|
|
|
|
|
|
|
|
|
|
return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs,
|
|
|
|
|
struct cxl *adapter)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
|
|
|
|
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
|
|
|
|
|
|
|
|
|
|
pnv_cxl_release_hwirq_ranges(irqs, dev);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int setup_cxl_bars(struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
/* Safety check in case we get backported to < 3.17 without M64 */
|
|
|
|
|
if ((p1_base(dev) < 0x100000000ULL) ||
|
|
|
|
|
(p2_base(dev) < 0x100000000ULL)) {
|
|
|
|
|
dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n");
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* BAR 4/5 has a special meaning for CXL and must be programmed with a
|
|
|
|
|
* special value corresponding to the CXL protocol address range.
|
2017-04-12 14:34:07 +00:00
|
|
|
|
* For POWER 8/9 that means bits 48:49 must be set to 10
|
2014-10-08 08:55:02 +00:00
|
|
|
|
*/
|
|
|
|
|
pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
|
|
|
|
|
pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
cxl: Add cxl_check_and_switch_mode() API to switch bi-modal cards
Add a new API, cxl_check_and_switch_mode() to allow for switching of
bi-modal CAPI cards, such as the Mellanox CX-4 network card.
When a driver requests to switch a card to CAPI mode, use PCI hotplug
infrastructure to remove all PCI devices underneath the slot. We then write
an updated mode control register to the CAPI VSEC, hot reset the card, and
reprobe the card.
As the card may present a different set of PCI devices after the mode
switch, use the infrastructure provided by the pnv_php driver and the OPAL
PCI slot management facilities to ensure that:
* the old devices are removed from both the OPAL and Linux device trees
* the new devices are probed by OPAL and added to the OPAL device tree
* the new devices are added to the Linux device tree and probed through
the regular PCI device probe path
As such, introduce a new option, CONFIG_CXL_BIMODAL, with a dependency on
the pnv_php driver.
Refactor existing code that touches the mode control register in the
regular single mode case into a new function, setup_cxl_protocol_area().
Co-authored-by: Ian Munsie <imunsie@au1.ibm.com>
Cc: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-13 21:17:14 +00:00
|
|
|
|
#ifdef CONFIG_CXL_BIMODAL
|
|
|
|
|
|
|
|
|
|
struct cxl_switch_work {
|
|
|
|
|
struct pci_dev *dev;
|
|
|
|
|
struct work_struct work;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
int vsec;
|
cxl: Add cxl_check_and_switch_mode() API to switch bi-modal cards
Add a new API, cxl_check_and_switch_mode() to allow for switching of
bi-modal CAPI cards, such as the Mellanox CX-4 network card.
When a driver requests to switch a card to CAPI mode, use PCI hotplug
infrastructure to remove all PCI devices underneath the slot. We then write
an updated mode control register to the CAPI VSEC, hot reset the card, and
reprobe the card.
As the card may present a different set of PCI devices after the mode
switch, use the infrastructure provided by the pnv_php driver and the OPAL
PCI slot management facilities to ensure that:
* the old devices are removed from both the OPAL and Linux device trees
* the new devices are probed by OPAL and added to the OPAL device tree
* the new devices are added to the Linux device tree and probed through
the regular PCI device probe path
As such, introduce a new option, CONFIG_CXL_BIMODAL, with a dependency on
the pnv_php driver.
Refactor existing code that touches the mode control register in the
regular single mode case into a new function, setup_cxl_protocol_area().
Co-authored-by: Ian Munsie <imunsie@au1.ibm.com>
Cc: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-13 21:17:14 +00:00
|
|
|
|
int mode;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void switch_card_to_cxl(struct work_struct *work)
|
|
|
|
|
{
|
|
|
|
|
struct cxl_switch_work *switch_work =
|
|
|
|
|
container_of(work, struct cxl_switch_work, work);
|
|
|
|
|
struct pci_dev *dev = switch_work->dev;
|
|
|
|
|
struct pci_bus *bus = dev->bus;
|
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(bus);
|
|
|
|
|
struct pci_dev *bridge;
|
|
|
|
|
struct pnv_php_slot *php_slot;
|
|
|
|
|
unsigned int devfn;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
u8 val;
|
|
|
|
|
int rc;
|
|
|
|
|
|
cxl: Add cxl_check_and_switch_mode() API to switch bi-modal cards
Add a new API, cxl_check_and_switch_mode() to allow for switching of
bi-modal CAPI cards, such as the Mellanox CX-4 network card.
When a driver requests to switch a card to CAPI mode, use PCI hotplug
infrastructure to remove all PCI devices underneath the slot. We then write
an updated mode control register to the CAPI VSEC, hot reset the card, and
reprobe the card.
As the card may present a different set of PCI devices after the mode
switch, use the infrastructure provided by the pnv_php driver and the OPAL
PCI slot management facilities to ensure that:
* the old devices are removed from both the OPAL and Linux device trees
* the new devices are probed by OPAL and added to the OPAL device tree
* the new devices are added to the Linux device tree and probed through
the regular PCI device probe path
As such, introduce a new option, CONFIG_CXL_BIMODAL, with a dependency on
the pnv_php driver.
Refactor existing code that touches the mode control register in the
regular single mode case into a new function, setup_cxl_protocol_area().
Co-authored-by: Ian Munsie <imunsie@au1.ibm.com>
Cc: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-13 21:17:14 +00:00
|
|
|
|
dev_info(&bus->dev, "cxl: Preparing for mode switch...\n");
|
|
|
|
|
bridge = list_first_entry_or_null(&hose->bus->devices, struct pci_dev,
|
|
|
|
|
bus_list);
|
|
|
|
|
if (!bridge) {
|
|
|
|
|
dev_WARN(&bus->dev, "cxl: Couldn't find root port!\n");
|
|
|
|
|
goto err_dev_put;
|
|
|
|
|
}
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
cxl: Add cxl_check_and_switch_mode() API to switch bi-modal cards
Add a new API, cxl_check_and_switch_mode() to allow for switching of
bi-modal CAPI cards, such as the Mellanox CX-4 network card.
When a driver requests to switch a card to CAPI mode, use PCI hotplug
infrastructure to remove all PCI devices underneath the slot. We then write
an updated mode control register to the CAPI VSEC, hot reset the card, and
reprobe the card.
As the card may present a different set of PCI devices after the mode
switch, use the infrastructure provided by the pnv_php driver and the OPAL
PCI slot management facilities to ensure that:
* the old devices are removed from both the OPAL and Linux device trees
* the new devices are probed by OPAL and added to the OPAL device tree
* the new devices are added to the Linux device tree and probed through
the regular PCI device probe path
As such, introduce a new option, CONFIG_CXL_BIMODAL, with a dependency on
the pnv_php driver.
Refactor existing code that touches the mode control register in the
regular single mode case into a new function, setup_cxl_protocol_area().
Co-authored-by: Ian Munsie <imunsie@au1.ibm.com>
Cc: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-13 21:17:14 +00:00
|
|
|
|
php_slot = pnv_php_find_slot(pci_device_to_OF_node(bridge));
|
|
|
|
|
if (!php_slot) {
|
|
|
|
|
dev_err(&bus->dev, "cxl: Failed to find slot hotplug "
|
|
|
|
|
"information. You may need to upgrade "
|
|
|
|
|
"skiboot. Aborting.\n");
|
|
|
|
|
goto err_dev_put;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rc = CXL_READ_VSEC_MODE_CONTROL(dev, switch_work->vsec, &val);
|
|
|
|
|
if (rc) {
|
|
|
|
|
dev_err(&bus->dev, "cxl: Failed to read CAPI mode control: %i\n", rc);
|
|
|
|
|
goto err_dev_put;
|
|
|
|
|
}
|
|
|
|
|
devfn = dev->devfn;
|
|
|
|
|
|
|
|
|
|
/* Release the reference obtained in cxl_check_and_switch_mode() */
|
|
|
|
|
pci_dev_put(dev);
|
|
|
|
|
|
|
|
|
|
dev_dbg(&bus->dev, "cxl: Removing PCI devices from kernel\n");
|
|
|
|
|
pci_lock_rescan_remove();
|
|
|
|
|
pci_hp_remove_devices(bridge->subordinate);
|
|
|
|
|
pci_unlock_rescan_remove();
|
|
|
|
|
|
|
|
|
|
/* Switch the CXL protocol on the card */
|
|
|
|
|
if (switch_work->mode == CXL_BIMODE_CXL) {
|
|
|
|
|
dev_info(&bus->dev, "cxl: Switching card to CXL mode\n");
|
|
|
|
|
val &= ~CXL_VSEC_PROTOCOL_MASK;
|
|
|
|
|
val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
|
|
|
|
|
rc = pnv_cxl_enable_phb_kernel_api(hose, true);
|
|
|
|
|
if (rc) {
|
|
|
|
|
dev_err(&bus->dev, "cxl: Failed to enable kernel API"
|
|
|
|
|
" on real PHB, aborting\n");
|
|
|
|
|
goto err_free_work;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
dev_WARN(&bus->dev, "cxl: Switching card to PCI mode not supported!\n");
|
|
|
|
|
goto err_free_work;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rc = CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, switch_work->vsec, val);
|
|
|
|
|
if (rc) {
|
|
|
|
|
dev_err(&bus->dev, "cxl: Failed to configure CXL protocol: %i\n", rc);
|
|
|
|
|
goto err_free_work;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* The CAIA spec (v1.1, Section 10.6 Bi-modal Device Support) states
|
|
|
|
|
* we must wait 100ms after this mode switch before touching PCIe config
|
|
|
|
|
* space.
|
|
|
|
|
*/
|
|
|
|
|
msleep(100);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Hot reset to cause the card to come back in cxl mode. A
|
|
|
|
|
* OPAL_RESET_PCI_LINK would be sufficient, but currently lacks support
|
|
|
|
|
* in skiboot, so we use a hot reset instead.
|
|
|
|
|
*
|
|
|
|
|
* We call pci_set_pcie_reset_state() on the bridge, as a CAPI card is
|
|
|
|
|
* guaranteed to sit directly under the root port, and setting the reset
|
|
|
|
|
* state on a device directly under the root port is equivalent to doing
|
|
|
|
|
* it on the root port iself.
|
|
|
|
|
*/
|
|
|
|
|
dev_info(&bus->dev, "cxl: Configuration write complete, resetting card\n");
|
|
|
|
|
pci_set_pcie_reset_state(bridge, pcie_hot_reset);
|
|
|
|
|
pci_set_pcie_reset_state(bridge, pcie_deassert_reset);
|
|
|
|
|
|
|
|
|
|
dev_dbg(&bus->dev, "cxl: Offlining slot\n");
|
|
|
|
|
rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_OFFLINE);
|
|
|
|
|
if (rc) {
|
|
|
|
|
dev_err(&bus->dev, "cxl: OPAL offlining call failed: %i\n", rc);
|
|
|
|
|
goto err_free_work;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dev_dbg(&bus->dev, "cxl: Onlining and probing slot\n");
|
|
|
|
|
rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_ONLINE);
|
|
|
|
|
if (rc) {
|
|
|
|
|
dev_err(&bus->dev, "cxl: OPAL onlining call failed: %i\n", rc);
|
|
|
|
|
goto err_free_work;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pci_lock_rescan_remove();
|
|
|
|
|
pci_hp_add_devices(bridge->subordinate);
|
|
|
|
|
pci_unlock_rescan_remove();
|
|
|
|
|
|
|
|
|
|
dev_info(&bus->dev, "cxl: CAPI mode switch completed\n");
|
|
|
|
|
kfree(switch_work);
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
err_dev_put:
|
|
|
|
|
/* Release the reference obtained in cxl_check_and_switch_mode() */
|
|
|
|
|
pci_dev_put(dev);
|
|
|
|
|
err_free_work:
|
|
|
|
|
kfree(switch_work);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec)
|
|
|
|
|
{
|
|
|
|
|
struct cxl_switch_work *work;
|
|
|
|
|
u8 val;
|
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_HVMODE))
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return -ENODEV;
|
cxl: Add cxl_check_and_switch_mode() API to switch bi-modal cards
Add a new API, cxl_check_and_switch_mode() to allow for switching of
bi-modal CAPI cards, such as the Mellanox CX-4 network card.
When a driver requests to switch a card to CAPI mode, use PCI hotplug
infrastructure to remove all PCI devices underneath the slot. We then write
an updated mode control register to the CAPI VSEC, hot reset the card, and
reprobe the card.
As the card may present a different set of PCI devices after the mode
switch, use the infrastructure provided by the pnv_php driver and the OPAL
PCI slot management facilities to ensure that:
* the old devices are removed from both the OPAL and Linux device trees
* the new devices are probed by OPAL and added to the OPAL device tree
* the new devices are added to the Linux device tree and probed through
the regular PCI device probe path
As such, introduce a new option, CONFIG_CXL_BIMODAL, with a dependency on
the pnv_php driver.
Refactor existing code that touches the mode control register in the
regular single mode case into a new function, setup_cxl_protocol_area().
Co-authored-by: Ian Munsie <imunsie@au1.ibm.com>
Cc: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-13 21:17:14 +00:00
|
|
|
|
|
|
|
|
|
if (!vsec) {
|
|
|
|
|
vsec = find_cxl_vsec(dev);
|
|
|
|
|
if (!vsec) {
|
|
|
|
|
dev_info(&dev->dev, "CXL VSEC not found\n");
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
2014-10-08 08:55:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
cxl: Add cxl_check_and_switch_mode() API to switch bi-modal cards
Add a new API, cxl_check_and_switch_mode() to allow for switching of
bi-modal CAPI cards, such as the Mellanox CX-4 network card.
When a driver requests to switch a card to CAPI mode, use PCI hotplug
infrastructure to remove all PCI devices underneath the slot. We then write
an updated mode control register to the CAPI VSEC, hot reset the card, and
reprobe the card.
As the card may present a different set of PCI devices after the mode
switch, use the infrastructure provided by the pnv_php driver and the OPAL
PCI slot management facilities to ensure that:
* the old devices are removed from both the OPAL and Linux device trees
* the new devices are probed by OPAL and added to the OPAL device tree
* the new devices are added to the Linux device tree and probed through
the regular PCI device probe path
As such, introduce a new option, CONFIG_CXL_BIMODAL, with a dependency on
the pnv_php driver.
Refactor existing code that touches the mode control register in the
regular single mode case into a new function, setup_cxl_protocol_area().
Co-authored-by: Ian Munsie <imunsie@au1.ibm.com>
Cc: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-13 21:17:14 +00:00
|
|
|
|
rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val);
|
|
|
|
|
if (rc) {
|
|
|
|
|
dev_err(&dev->dev, "Failed to read current mode control: %i", rc);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return rc;
|
|
|
|
|
}
|
cxl: Add cxl_check_and_switch_mode() API to switch bi-modal cards
Add a new API, cxl_check_and_switch_mode() to allow for switching of
bi-modal CAPI cards, such as the Mellanox CX-4 network card.
When a driver requests to switch a card to CAPI mode, use PCI hotplug
infrastructure to remove all PCI devices underneath the slot. We then write
an updated mode control register to the CAPI VSEC, hot reset the card, and
reprobe the card.
As the card may present a different set of PCI devices after the mode
switch, use the infrastructure provided by the pnv_php driver and the OPAL
PCI slot management facilities to ensure that:
* the old devices are removed from both the OPAL and Linux device trees
* the new devices are probed by OPAL and added to the OPAL device tree
* the new devices are added to the Linux device tree and probed through
the regular PCI device probe path
As such, introduce a new option, CONFIG_CXL_BIMODAL, with a dependency on
the pnv_php driver.
Refactor existing code that touches the mode control register in the
regular single mode case into a new function, setup_cxl_protocol_area().
Co-authored-by: Ian Munsie <imunsie@au1.ibm.com>
Cc: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-13 21:17:14 +00:00
|
|
|
|
|
|
|
|
|
if (mode == CXL_BIMODE_PCI) {
|
|
|
|
|
if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) {
|
|
|
|
|
dev_info(&dev->dev, "Card is already in PCI mode\n");
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
/*
|
|
|
|
|
* TODO: Before it's safe to switch the card back to PCI mode
|
|
|
|
|
* we need to disable the CAPP and make sure any cachelines the
|
|
|
|
|
* card holds have been flushed out. Needs skiboot support.
|
|
|
|
|
*/
|
|
|
|
|
dev_WARN(&dev->dev, "CXL mode switch to PCI unsupported!\n");
|
|
|
|
|
return -EIO;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
}
|
cxl: Add cxl_check_and_switch_mode() API to switch bi-modal cards
Add a new API, cxl_check_and_switch_mode() to allow for switching of
bi-modal CAPI cards, such as the Mellanox CX-4 network card.
When a driver requests to switch a card to CAPI mode, use PCI hotplug
infrastructure to remove all PCI devices underneath the slot. We then write
an updated mode control register to the CAPI VSEC, hot reset the card, and
reprobe the card.
As the card may present a different set of PCI devices after the mode
switch, use the infrastructure provided by the pnv_php driver and the OPAL
PCI slot management facilities to ensure that:
* the old devices are removed from both the OPAL and Linux device trees
* the new devices are probed by OPAL and added to the OPAL device tree
* the new devices are added to the Linux device tree and probed through
the regular PCI device probe path
As such, introduce a new option, CONFIG_CXL_BIMODAL, with a dependency on
the pnv_php driver.
Refactor existing code that touches the mode control register in the
regular single mode case into a new function, setup_cxl_protocol_area().
Co-authored-by: Ian Munsie <imunsie@au1.ibm.com>
Cc: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-13 21:17:14 +00:00
|
|
|
|
|
|
|
|
|
if (val & CXL_VSEC_PROTOCOL_ENABLE) {
|
|
|
|
|
dev_info(&dev->dev, "Card is already in CXL mode\n");
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dev_info(&dev->dev, "Card is in PCI mode, scheduling kernel thread "
|
|
|
|
|
"to switch to CXL mode\n");
|
|
|
|
|
|
|
|
|
|
work = kmalloc(sizeof(struct cxl_switch_work), GFP_KERNEL);
|
|
|
|
|
if (!work)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
pci_dev_get(dev);
|
|
|
|
|
work->dev = dev;
|
|
|
|
|
work->vsec = vsec;
|
|
|
|
|
work->mode = mode;
|
|
|
|
|
INIT_WORK(&work->work, switch_card_to_cxl);
|
|
|
|
|
|
|
|
|
|
schedule_work(&work->work);
|
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
/*
|
cxl: Add cxl_check_and_switch_mode() API to switch bi-modal cards
Add a new API, cxl_check_and_switch_mode() to allow for switching of
bi-modal CAPI cards, such as the Mellanox CX-4 network card.
When a driver requests to switch a card to CAPI mode, use PCI hotplug
infrastructure to remove all PCI devices underneath the slot. We then write
an updated mode control register to the CAPI VSEC, hot reset the card, and
reprobe the card.
As the card may present a different set of PCI devices after the mode
switch, use the infrastructure provided by the pnv_php driver and the OPAL
PCI slot management facilities to ensure that:
* the old devices are removed from both the OPAL and Linux device trees
* the new devices are probed by OPAL and added to the OPAL device tree
* the new devices are added to the Linux device tree and probed through
the regular PCI device probe path
As such, introduce a new option, CONFIG_CXL_BIMODAL, with a dependency on
the pnv_php driver.
Refactor existing code that touches the mode control register in the
regular single mode case into a new function, setup_cxl_protocol_area().
Co-authored-by: Ian Munsie <imunsie@au1.ibm.com>
Cc: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-13 21:17:14 +00:00
|
|
|
|
* We return a failure now to abort the driver init. Once the
|
|
|
|
|
* link has been cycled and the card is in cxl mode we will
|
|
|
|
|
* come back (possibly using the generic cxl driver), but
|
|
|
|
|
* return success as the card should then be in cxl mode.
|
|
|
|
|
*
|
|
|
|
|
* TODO: What if the card comes back in PCI mode even after
|
|
|
|
|
* the switch? Don't want to spin endlessly.
|
2014-10-08 08:55:02 +00:00
|
|
|
|
*/
|
cxl: Add cxl_check_and_switch_mode() API to switch bi-modal cards
Add a new API, cxl_check_and_switch_mode() to allow for switching of
bi-modal CAPI cards, such as the Mellanox CX-4 network card.
When a driver requests to switch a card to CAPI mode, use PCI hotplug
infrastructure to remove all PCI devices underneath the slot. We then write
an updated mode control register to the CAPI VSEC, hot reset the card, and
reprobe the card.
As the card may present a different set of PCI devices after the mode
switch, use the infrastructure provided by the pnv_php driver and the OPAL
PCI slot management facilities to ensure that:
* the old devices are removed from both the OPAL and Linux device trees
* the new devices are probed by OPAL and added to the OPAL device tree
* the new devices are added to the Linux device tree and probed through
the regular PCI device probe path
As such, introduce a new option, CONFIG_CXL_BIMODAL, with a dependency on
the pnv_php driver.
Refactor existing code that touches the mode control register in the
regular single mode case into a new function, setup_cxl_protocol_area().
Co-authored-by: Ian Munsie <imunsie@au1.ibm.com>
Cc: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-13 21:17:14 +00:00
|
|
|
|
return -EBUSY;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(cxl_check_and_switch_mode);
|
|
|
|
|
|
|
|
|
|
#endif /* CONFIG_CXL_BIMODAL */
|
|
|
|
|
|
|
|
|
|
static int setup_cxl_protocol_area(struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
u8 val;
|
|
|
|
|
int rc;
|
|
|
|
|
int vsec = find_cxl_vsec(dev);
|
|
|
|
|
|
|
|
|
|
if (!vsec) {
|
|
|
|
|
dev_info(&dev->dev, "CXL VSEC not found\n");
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val);
|
|
|
|
|
if (rc) {
|
|
|
|
|
dev_err(&dev->dev, "Failed to read current mode control: %i\n", rc);
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) {
|
|
|
|
|
dev_err(&dev->dev, "Card not in CAPI mode!\n");
|
|
|
|
|
return -EIO;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((val & CXL_VSEC_PROTOCOL_MASK) != CXL_VSEC_PROTOCOL_256TB) {
|
|
|
|
|
val &= ~CXL_VSEC_PROTOCOL_MASK;
|
|
|
|
|
val |= CXL_VSEC_PROTOCOL_256TB;
|
|
|
|
|
rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val);
|
|
|
|
|
if (rc) {
|
|
|
|
|
dev_err(&dev->dev, "Failed to set CXL protocol area: %i\n", rc);
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
|
|
|
|
u64 p1n_base, p2n_base, afu_desc;
|
|
|
|
|
const u64 p1n_size = 0x100;
|
|
|
|
|
const u64 p2n_size = 0x1000;
|
|
|
|
|
|
|
|
|
|
p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
|
|
|
|
|
p2n_base = p2_base(dev) + (afu->slice * p2n_size);
|
2016-03-04 11:26:35 +00:00
|
|
|
|
afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size));
|
|
|
|
|
afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2016-03-04 11:26:35 +00:00
|
|
|
|
if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size)))
|
2014-10-08 08:55:02 +00:00
|
|
|
|
goto err;
|
|
|
|
|
if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
|
|
|
|
|
goto err1;
|
|
|
|
|
if (afu_desc) {
|
2016-03-04 11:26:35 +00:00
|
|
|
|
if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size)))
|
2014-10-08 08:55:02 +00:00
|
|
|
|
goto err2;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
err2:
|
|
|
|
|
iounmap(afu->p2n_mmio);
|
|
|
|
|
err1:
|
2016-03-04 11:26:35 +00:00
|
|
|
|
iounmap(afu->native->p1n_mmio);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
err:
|
|
|
|
|
dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
static void pci_unmap_slice_regs(struct cxl_afu *afu)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
2015-08-14 07:41:21 +00:00
|
|
|
|
if (afu->p2n_mmio) {
|
2014-10-08 08:55:02 +00:00
|
|
|
|
iounmap(afu->p2n_mmio);
|
2015-08-14 07:41:21 +00:00
|
|
|
|
afu->p2n_mmio = NULL;
|
|
|
|
|
}
|
2016-03-04 11:26:35 +00:00
|
|
|
|
if (afu->native->p1n_mmio) {
|
|
|
|
|
iounmap(afu->native->p1n_mmio);
|
|
|
|
|
afu->native->p1n_mmio = NULL;
|
2015-08-14 07:41:21 +00:00
|
|
|
|
}
|
2016-03-04 11:26:35 +00:00
|
|
|
|
if (afu->native->afu_desc_mmio) {
|
|
|
|
|
iounmap(afu->native->afu_desc_mmio);
|
|
|
|
|
afu->native->afu_desc_mmio = NULL;
|
2015-08-14 07:41:21 +00:00
|
|
|
|
}
|
2014-10-08 08:55:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
void cxl_pci_release_afu(struct device *dev)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
|
|
|
|
struct cxl_afu *afu = to_cxl_afu(dev);
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
pr_devel("%s\n", __func__);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2015-07-09 07:39:42 +00:00
|
|
|
|
idr_destroy(&afu->contexts_idr);
|
2015-08-14 07:41:19 +00:00
|
|
|
|
cxl_release_spa(afu);
|
|
|
|
|
|
2016-03-04 11:26:35 +00:00
|
|
|
|
kfree(afu->native);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
kfree(afu);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Expects AFU struct to have recently been zeroed out */
|
|
|
|
|
static int cxl_read_afu_descriptor(struct cxl_afu *afu)
|
|
|
|
|
{
|
|
|
|
|
u64 val;
|
|
|
|
|
|
|
|
|
|
val = AFUD_READ_INFO(afu);
|
|
|
|
|
afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
|
|
|
|
|
afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
|
2015-02-04 08:09:01 +00:00
|
|
|
|
afu->crs_num = AFUD_NUM_CRS(val);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
if (AFUD_AFU_DIRECTED(val))
|
|
|
|
|
afu->modes_supported |= CXL_MODE_DIRECTED;
|
|
|
|
|
if (AFUD_DEDICATED_PROCESS(val))
|
|
|
|
|
afu->modes_supported |= CXL_MODE_DEDICATED;
|
|
|
|
|
if (AFUD_TIME_SLICED(val))
|
|
|
|
|
afu->modes_supported |= CXL_MODE_TIME_SLICED;
|
|
|
|
|
|
|
|
|
|
val = AFUD_READ_PPPSA(afu);
|
|
|
|
|
afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
|
|
|
|
|
afu->psa = AFUD_PPPSA_PSA(val);
|
|
|
|
|
if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
|
2016-03-04 11:26:35 +00:00
|
|
|
|
afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2015-02-04 08:09:01 +00:00
|
|
|
|
val = AFUD_READ_CR(afu);
|
|
|
|
|
afu->crs_len = AFUD_CR_LEN(val) * 256;
|
|
|
|
|
afu->crs_offset = AFUD_READ_CR_OFF(afu);
|
|
|
|
|
|
2015-05-22 05:26:05 +00:00
|
|
|
|
|
|
|
|
|
/* eb_len is in multiple of 4K */
|
|
|
|
|
afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096;
|
|
|
|
|
afu->eb_offset = AFUD_READ_EB_OFF(afu);
|
|
|
|
|
|
|
|
|
|
/* eb_off is 4K aligned so lower 12 bits are always zero */
|
|
|
|
|
if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) {
|
|
|
|
|
dev_warn(&afu->dev,
|
|
|
|
|
"Invalid AFU error buffer offset %Lx\n",
|
|
|
|
|
afu->eb_offset);
|
|
|
|
|
dev_info(&afu->dev,
|
|
|
|
|
"Ignoring AFU error buffer in the descriptor\n");
|
|
|
|
|
/* indicate that no afu buffer exists */
|
|
|
|
|
afu->eb_len = 0;
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
|
|
|
|
|
{
|
2016-03-04 11:26:28 +00:00
|
|
|
|
int i, rc;
|
|
|
|
|
u32 val;
|
2015-02-04 08:09:02 +00:00
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
if (afu->psa && afu->adapter->ps_size <
|
2016-03-04 11:26:35 +00:00
|
|
|
|
(afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
|
2014-10-08 08:55:02 +00:00
|
|
|
|
dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
|
2017-04-07 14:11:58 +00:00
|
|
|
|
dev_warn(&afu->dev, "AFU uses pp_size(%#016llx) < PAGE_SIZE per-process PSA!\n", afu->pp_size);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2015-02-04 08:09:02 +00:00
|
|
|
|
for (i = 0; i < afu->crs_num; i++) {
|
2016-03-04 11:26:28 +00:00
|
|
|
|
rc = cxl_ops->afu_cr_read32(afu, i, 0, &val);
|
|
|
|
|
if (rc || val == 0) {
|
2015-02-04 08:09:02 +00:00
|
|
|
|
dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-06-29 12:16:25 +00:00
|
|
|
|
|
|
|
|
|
if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) {
|
|
|
|
|
/*
|
|
|
|
|
* We could also check this for the dedicated process model
|
|
|
|
|
* since the architecture indicates it should be set to 1, but
|
|
|
|
|
* in that case we ignore the value and I'd rather not risk
|
|
|
|
|
* breaking any existing dedicated process AFUs that left it as
|
|
|
|
|
* 0 (not that I'm aware of any). It is clearly an error for an
|
|
|
|
|
* AFU directed AFU to set this to 0, and would have previously
|
|
|
|
|
* triggered a bug resulting in the maximum not being enforced
|
|
|
|
|
* at all since idr_alloc treats 0 as no maximum.
|
|
|
|
|
*/
|
|
|
|
|
dev_err(&afu->dev, "AFU does not support any processes\n");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
2015-02-04 08:09:02 +00:00
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-12 14:34:07 +00:00
|
|
|
|
static int sanitise_afu_regs_psl9(struct cxl_afu *afu)
|
|
|
|
|
{
|
|
|
|
|
u64 reg;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Clear out any regs that contain either an IVTE or address or may be
|
|
|
|
|
* waiting on an acknowledgment to try to be a bit safer as we bring
|
|
|
|
|
* it online
|
|
|
|
|
*/
|
|
|
|
|
reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
|
|
|
|
|
if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
|
|
|
|
|
dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
|
|
|
|
|
if (cxl_ops->afu_reset(afu))
|
|
|
|
|
return -EIO;
|
|
|
|
|
if (cxl_afu_disable(afu))
|
|
|
|
|
return -EIO;
|
|
|
|
|
if (cxl_psl_purge(afu))
|
|
|
|
|
return -EIO;
|
|
|
|
|
}
|
|
|
|
|
cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
|
|
|
|
|
cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
|
|
|
|
|
reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
|
|
|
|
|
if (reg) {
|
|
|
|
|
dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
|
|
|
|
|
if (reg & CXL_PSL9_DSISR_An_TF)
|
|
|
|
|
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
|
|
|
|
|
else
|
|
|
|
|
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
|
|
|
|
|
}
|
|
|
|
|
if (afu->adapter->native->sl_ops->register_serr_irq) {
|
|
|
|
|
reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
|
|
|
|
|
if (reg) {
|
|
|
|
|
if (reg & ~0x000000007fffffff)
|
|
|
|
|
dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
|
|
|
|
|
cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
|
|
|
|
|
if (reg) {
|
|
|
|
|
dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
|
|
|
|
|
cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-07 14:11:57 +00:00
|
|
|
|
static int sanitise_afu_regs_psl8(struct cxl_afu *afu)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
|
|
|
|
u64 reg;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Clear out any regs that contain either an IVTE or address or may be
|
|
|
|
|
* waiting on an acknowledgement to try to be a bit safer as we bring
|
|
|
|
|
* it online
|
|
|
|
|
*/
|
|
|
|
|
reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
|
|
|
|
|
if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
|
2015-06-11 11:27:52 +00:00
|
|
|
|
dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
|
2016-03-04 11:26:28 +00:00
|
|
|
|
if (cxl_ops->afu_reset(afu))
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return -EIO;
|
|
|
|
|
if (cxl_afu_disable(afu))
|
|
|
|
|
return -EIO;
|
|
|
|
|
if (cxl_psl_purge(afu))
|
|
|
|
|
return -EIO;
|
|
|
|
|
}
|
|
|
|
|
cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
|
|
|
|
|
cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000);
|
|
|
|
|
cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000);
|
|
|
|
|
cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
|
|
|
|
|
cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000);
|
|
|
|
|
cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000);
|
|
|
|
|
cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000);
|
|
|
|
|
cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000);
|
|
|
|
|
cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000);
|
|
|
|
|
cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000);
|
|
|
|
|
cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
|
|
|
|
|
reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
|
|
|
|
|
if (reg) {
|
2015-06-11 11:27:52 +00:00
|
|
|
|
dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
if (reg & CXL_PSL_DSISR_TRANS)
|
|
|
|
|
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
|
|
|
|
|
else
|
|
|
|
|
cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
|
|
|
|
|
}
|
2016-05-23 17:39:18 +00:00
|
|
|
|
if (afu->adapter->native->sl_ops->register_serr_irq) {
|
|
|
|
|
reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
|
|
|
|
|
if (reg) {
|
|
|
|
|
if (reg & ~0xffff)
|
|
|
|
|
dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
|
|
|
|
|
cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
|
|
|
|
|
}
|
2014-10-08 08:55:02 +00:00
|
|
|
|
}
|
|
|
|
|
reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
|
|
|
|
|
if (reg) {
|
2015-06-11 11:27:52 +00:00
|
|
|
|
dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2015-05-22 05:26:05 +00:00
|
|
|
|
#define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
|
|
|
|
|
/*
|
|
|
|
|
* afu_eb_read:
|
|
|
|
|
* Called from sysfs and reads the afu error info buffer. The h/w only supports
|
|
|
|
|
* 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
|
|
|
|
|
* aligned the function uses a bounce buffer which can be max PAGE_SIZE.
|
|
|
|
|
*/
|
2016-03-04 11:26:29 +00:00
|
|
|
|
ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
|
2015-05-22 05:26:05 +00:00
|
|
|
|
loff_t off, size_t count)
|
|
|
|
|
{
|
|
|
|
|
loff_t aligned_start, aligned_end;
|
|
|
|
|
size_t aligned_length;
|
|
|
|
|
void *tbuf;
|
2016-03-04 11:26:35 +00:00
|
|
|
|
const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset;
|
2015-05-22 05:26:05 +00:00
|
|
|
|
|
|
|
|
|
if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* calculate aligned read window */
|
|
|
|
|
count = min((size_t)(afu->eb_len - off), count);
|
|
|
|
|
aligned_start = round_down(off, 8);
|
|
|
|
|
aligned_end = round_up(off + count, 8);
|
|
|
|
|
aligned_length = aligned_end - aligned_start;
|
|
|
|
|
|
|
|
|
|
/* max we can copy in one read is PAGE_SIZE */
|
|
|
|
|
if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
|
|
|
|
|
aligned_length = ERR_BUFF_MAX_COPY_SIZE;
|
|
|
|
|
count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* use bounce buffer for copy */
|
2017-09-13 23:28:29 +00:00
|
|
|
|
tbuf = (void *)__get_free_page(GFP_KERNEL);
|
2015-05-22 05:26:05 +00:00
|
|
|
|
if (!tbuf)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
/* perform aligned read from the mmio region */
|
|
|
|
|
memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
|
|
|
|
|
memcpy(buf, tbuf + (off & 0x7), count);
|
|
|
|
|
|
|
|
|
|
free_page((unsigned long)tbuf);
|
|
|
|
|
|
|
|
|
|
return count;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
|
|
|
|
int rc;
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
if ((rc = pci_map_slice_regs(afu, adapter, dev)))
|
2015-08-14 07:41:23 +00:00
|
|
|
|
return rc;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2017-04-07 14:11:56 +00:00
|
|
|
|
if (adapter->native->sl_ops->sanitise_afu_regs) {
|
|
|
|
|
rc = adapter->native->sl_ops->sanitise_afu_regs(afu);
|
|
|
|
|
if (rc)
|
|
|
|
|
goto err1;
|
|
|
|
|
}
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
/* We need to reset the AFU before we can read the AFU descriptor */
|
2016-03-04 11:26:28 +00:00
|
|
|
|
if ((rc = cxl_ops->afu_reset(afu)))
|
2015-08-14 07:41:23 +00:00
|
|
|
|
goto err1;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
if (cxl_verbose)
|
|
|
|
|
dump_afu_descriptor(afu);
|
|
|
|
|
|
|
|
|
|
if ((rc = cxl_read_afu_descriptor(afu)))
|
2015-08-14 07:41:23 +00:00
|
|
|
|
goto err1;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
if ((rc = cxl_afu_descriptor_looks_ok(afu)))
|
2015-08-14 07:41:23 +00:00
|
|
|
|
goto err1;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2016-05-23 17:39:18 +00:00
|
|
|
|
if (adapter->native->sl_ops->afu_regs_init)
|
|
|
|
|
if ((rc = adapter->native->sl_ops->afu_regs_init(afu)))
|
|
|
|
|
goto err1;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2016-05-23 17:39:18 +00:00
|
|
|
|
if (adapter->native->sl_ops->register_serr_irq)
|
|
|
|
|
if ((rc = adapter->native->sl_ops->register_serr_irq(afu)))
|
|
|
|
|
goto err1;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
if ((rc = cxl_native_register_psl_irq(afu)))
|
2015-08-14 07:41:23 +00:00
|
|
|
|
goto err2;
|
|
|
|
|
|
2017-02-06 01:07:17 +00:00
|
|
|
|
atomic_set(&afu->configured_state, 0);
|
2015-08-14 07:41:23 +00:00
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err2:
|
2016-05-23 17:39:18 +00:00
|
|
|
|
if (adapter->native->sl_ops->release_serr_irq)
|
|
|
|
|
adapter->native->sl_ops->release_serr_irq(afu);
|
2015-08-14 07:41:23 +00:00
|
|
|
|
err1:
|
2016-03-04 11:26:29 +00:00
|
|
|
|
pci_unmap_slice_regs(afu);
|
2015-08-14 07:41:23 +00:00
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
static void pci_deconfigure_afu(struct cxl_afu *afu)
|
2015-08-14 07:41:23 +00:00
|
|
|
|
{
|
2017-02-06 01:07:17 +00:00
|
|
|
|
/*
|
|
|
|
|
* It's okay to deconfigure when AFU is already locked, otherwise wait
|
|
|
|
|
* until there are no readers
|
|
|
|
|
*/
|
|
|
|
|
if (atomic_read(&afu->configured_state) != -1) {
|
|
|
|
|
while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1)
|
|
|
|
|
schedule();
|
|
|
|
|
}
|
2016-03-04 11:26:29 +00:00
|
|
|
|
cxl_native_release_psl_irq(afu);
|
2016-05-23 17:39:18 +00:00
|
|
|
|
if (afu->adapter->native->sl_ops->release_serr_irq)
|
|
|
|
|
afu->adapter->native->sl_ops->release_serr_irq(afu);
|
2016-03-04 11:26:29 +00:00
|
|
|
|
pci_unmap_slice_regs(afu);
|
2015-08-14 07:41:23 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
|
2015-08-14 07:41:23 +00:00
|
|
|
|
{
|
|
|
|
|
struct cxl_afu *afu;
|
2016-03-04 11:26:35 +00:00
|
|
|
|
int rc = -ENOMEM;
|
2015-08-14 07:41:23 +00:00
|
|
|
|
|
|
|
|
|
afu = cxl_alloc_afu(adapter, slice);
|
|
|
|
|
if (!afu)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
2016-03-04 11:26:35 +00:00
|
|
|
|
afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL);
|
|
|
|
|
if (!afu->native)
|
|
|
|
|
goto err_free_afu;
|
|
|
|
|
|
|
|
|
|
mutex_init(&afu->native->spa_mutex);
|
|
|
|
|
|
2015-08-14 07:41:23 +00:00
|
|
|
|
rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
|
|
|
|
|
if (rc)
|
2016-03-04 11:26:35 +00:00
|
|
|
|
goto err_free_native;
|
2015-08-14 07:41:23 +00:00
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
rc = pci_configure_afu(afu, adapter, dev);
|
2015-08-14 07:41:23 +00:00
|
|
|
|
if (rc)
|
2016-03-04 11:26:35 +00:00
|
|
|
|
goto err_free_native;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
/* Don't care if this fails */
|
|
|
|
|
cxl_debugfs_afu_add(afu);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* After we call this function we must not free the afu directly, even
|
|
|
|
|
* if it returns an error!
|
|
|
|
|
*/
|
|
|
|
|
if ((rc = cxl_register_afu(afu)))
|
|
|
|
|
goto err_put1;
|
|
|
|
|
|
|
|
|
|
if ((rc = cxl_sysfs_afu_add(afu)))
|
|
|
|
|
goto err_put1;
|
|
|
|
|
|
|
|
|
|
adapter->afu[afu->slice] = afu;
|
|
|
|
|
|
2015-05-27 06:07:18 +00:00
|
|
|
|
if ((rc = cxl_pci_vphb_add(afu)))
|
|
|
|
|
dev_info(&afu->dev, "Can't register vPHB\n");
|
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err_put1:
|
2016-03-04 11:26:29 +00:00
|
|
|
|
pci_deconfigure_afu(afu);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
cxl_debugfs_afu_remove(afu);
|
2015-08-14 07:41:23 +00:00
|
|
|
|
device_unregister(&afu->dev);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return rc;
|
2015-08-14 07:41:23 +00:00
|
|
|
|
|
2016-03-04 11:26:35 +00:00
|
|
|
|
err_free_native:
|
|
|
|
|
kfree(afu->native);
|
|
|
|
|
err_free_afu:
|
2015-08-14 07:41:23 +00:00
|
|
|
|
kfree(afu);
|
|
|
|
|
return rc;
|
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
static void cxl_pci_remove_afu(struct cxl_afu *afu)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
2016-03-04 11:26:29 +00:00
|
|
|
|
pr_devel("%s\n", __func__);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
if (!afu)
|
|
|
|
|
return;
|
|
|
|
|
|
2016-03-04 11:26:40 +00:00
|
|
|
|
cxl_pci_vphb_remove(afu);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
cxl_sysfs_afu_remove(afu);
|
|
|
|
|
cxl_debugfs_afu_remove(afu);
|
|
|
|
|
|
|
|
|
|
spin_lock(&afu->adapter->afu_list_lock);
|
|
|
|
|
afu->adapter->afu[afu->slice] = NULL;
|
|
|
|
|
spin_unlock(&afu->adapter->afu_list_lock);
|
|
|
|
|
|
|
|
|
|
cxl_context_detach_all(afu);
|
2016-03-04 11:26:28 +00:00
|
|
|
|
cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
pci_deconfigure_afu(afu);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
device_unregister(&afu->dev);
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
int cxl_pci_reset(struct cxl *adapter)
|
2015-01-19 17:52:51 +00:00
|
|
|
|
{
|
|
|
|
|
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
|
|
|
|
|
int rc;
|
|
|
|
|
|
2015-08-14 07:41:25 +00:00
|
|
|
|
if (adapter->perst_same_image) {
|
|
|
|
|
dev_warn(&dev->dev,
|
|
|
|
|
"cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2015-01-19 17:52:51 +00:00
|
|
|
|
dev_info(&dev->dev, "CXL reset\n");
|
2016-10-03 19:36:02 +00:00
|
|
|
|
|
2017-04-07 14:11:58 +00:00
|
|
|
|
/*
|
|
|
|
|
* The adapter is about to be reset, so ignore errors.
|
|
|
|
|
*/
|
2018-02-15 15:49:24 +00:00
|
|
|
|
cxl_data_cache_flush(adapter);
|
2015-01-19 17:52:51 +00:00
|
|
|
|
|
|
|
|
|
/* pcie_warm_reset requests a fundamental pci reset which includes a
|
|
|
|
|
* PERST assert/deassert. PERST triggers a loading of the image
|
|
|
|
|
* if "user" or "factory" is selected in sysfs */
|
|
|
|
|
if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
|
|
|
|
|
dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
if (pci_request_region(dev, 2, "priv 2 regs"))
|
|
|
|
|
goto err1;
|
|
|
|
|
if (pci_request_region(dev, 0, "priv 1 regs"))
|
|
|
|
|
goto err2;
|
|
|
|
|
|
2015-06-11 11:27:52 +00:00
|
|
|
|
pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
|
2014-10-08 08:55:02 +00:00
|
|
|
|
p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
|
|
|
|
|
|
2016-03-04 11:26:35 +00:00
|
|
|
|
if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
|
2014-10-08 08:55:02 +00:00
|
|
|
|
goto err3;
|
|
|
|
|
|
2016-03-04 11:26:35 +00:00
|
|
|
|
if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
|
2014-10-08 08:55:02 +00:00
|
|
|
|
goto err4;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err4:
|
2016-03-04 11:26:35 +00:00
|
|
|
|
iounmap(adapter->native->p1_mmio);
|
|
|
|
|
adapter->native->p1_mmio = NULL;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
err3:
|
|
|
|
|
pci_release_region(dev, 0);
|
|
|
|
|
err2:
|
|
|
|
|
pci_release_region(dev, 2);
|
|
|
|
|
err1:
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void cxl_unmap_adapter_regs(struct cxl *adapter)
|
|
|
|
|
{
|
2016-03-04 11:26:35 +00:00
|
|
|
|
if (adapter->native->p1_mmio) {
|
|
|
|
|
iounmap(adapter->native->p1_mmio);
|
|
|
|
|
adapter->native->p1_mmio = NULL;
|
2015-08-14 07:41:21 +00:00
|
|
|
|
pci_release_region(to_pci_dev(adapter->dev.parent), 2);
|
|
|
|
|
}
|
2016-03-04 11:26:35 +00:00
|
|
|
|
if (adapter->native->p2_mmio) {
|
|
|
|
|
iounmap(adapter->native->p2_mmio);
|
|
|
|
|
adapter->native->p2_mmio = NULL;
|
2015-08-14 07:41:21 +00:00
|
|
|
|
pci_release_region(to_pci_dev(adapter->dev.parent), 0);
|
|
|
|
|
}
|
2014-10-08 08:55:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
int vsec;
|
|
|
|
|
u32 afu_desc_off, afu_desc_size;
|
|
|
|
|
u32 ps_off, ps_size;
|
|
|
|
|
u16 vseclen;
|
|
|
|
|
u8 image_state;
|
|
|
|
|
|
|
|
|
|
if (!(vsec = find_cxl_vsec(dev))) {
|
2015-05-27 06:07:04 +00:00
|
|
|
|
dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
|
|
|
|
|
if (vseclen < CXL_VSEC_MIN_SIZE) {
|
2015-05-27 06:07:04 +00:00
|
|
|
|
dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n");
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status);
|
|
|
|
|
CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev);
|
|
|
|
|
CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major);
|
|
|
|
|
CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor);
|
|
|
|
|
CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
|
|
|
|
|
CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
|
|
|
|
|
adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
|
2015-01-19 17:52:48 +00:00
|
|
|
|
adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
|
2017-04-07 14:11:53 +00:00
|
|
|
|
adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
|
|
|
|
|
CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
|
|
|
|
|
CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size);
|
|
|
|
|
CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off);
|
|
|
|
|
CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size);
|
|
|
|
|
|
|
|
|
|
/* Convert everything to bytes, because there is NO WAY I'd look at the
|
|
|
|
|
* code a month later and forget what units these are in ;-) */
|
2016-03-04 11:26:35 +00:00
|
|
|
|
adapter->native->ps_off = ps_off * 64 * 1024;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
adapter->ps_size = ps_size * 64 * 1024;
|
2016-03-04 11:26:35 +00:00
|
|
|
|
adapter->native->afu_desc_off = afu_desc_off * 64 * 1024;
|
|
|
|
|
adapter->native->afu_desc_size = afu_desc_size * 64 * 1024;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
/* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
|
|
|
|
|
adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2015-10-02 05:23:33 +00:00
|
|
|
|
/*
|
|
|
|
|
* Workaround a PCIe Host Bridge defect on some cards, that can cause
|
|
|
|
|
* malformed Transaction Layer Packet (TLP) errors to be erroneously
|
|
|
|
|
* reported. Mask this error in the Uncorrectable Error Mask Register.
|
|
|
|
|
*
|
|
|
|
|
* The upper nibble of the PSL revision is used to distinguish between
|
|
|
|
|
* different cards. The affected ones have it set to 0.
|
|
|
|
|
*/
|
|
|
|
|
static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
int aer;
|
|
|
|
|
u32 data;
|
|
|
|
|
|
|
|
|
|
if (adapter->psl_rev & 0xf000)
|
|
|
|
|
return;
|
|
|
|
|
if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)))
|
|
|
|
|
return;
|
|
|
|
|
pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data);
|
|
|
|
|
if (data & PCI_ERR_UNC_MALF_TLP)
|
|
|
|
|
if (data & PCI_ERR_UNC_INTN)
|
|
|
|
|
return;
|
|
|
|
|
data |= PCI_ERR_UNC_MALF_TLP;
|
|
|
|
|
data |= PCI_ERR_UNC_INTN;
|
|
|
|
|
pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-07 14:11:58 +00:00
|
|
|
|
static bool cxl_compatible_caia_version(struct cxl *adapter)
|
|
|
|
|
{
|
|
|
|
|
if (cxl_is_power8() && (adapter->caia_major == 1))
|
|
|
|
|
return true;
|
|
|
|
|
|
2017-04-12 14:34:07 +00:00
|
|
|
|
if (cxl_is_power9() && (adapter->caia_major == 2))
|
|
|
|
|
return true;
|
|
|
|
|
|
2017-04-07 14:11:58 +00:00
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
|
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
|
|
if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
|
2015-05-27 06:07:04 +00:00
|
|
|
|
dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n");
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-07 14:11:58 +00:00
|
|
|
|
if (!cxl_compatible_caia_version(adapter)) {
|
|
|
|
|
dev_info(&dev->dev, "Ignoring card. PSL type is not supported (caia version: %d)\n",
|
|
|
|
|
adapter->caia_major);
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
if (!adapter->slices) {
|
|
|
|
|
/* Once we support dynamic reprogramming we can use the card if
|
|
|
|
|
* it supports loadable AFUs */
|
2015-05-27 06:07:04 +00:00
|
|
|
|
dev_err(&dev->dev, "ABORTING: Device has no AFUs\n");
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:35 +00:00
|
|
|
|
if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) {
|
2015-05-27 06:07:04 +00:00
|
|
|
|
dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:35 +00:00
|
|
|
|
if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) {
|
2015-05-27 06:07:04 +00:00
|
|
|
|
dev_err(&dev->dev, "ABORTING: Problem state size larger than "
|
2014-10-08 08:55:02 +00:00
|
|
|
|
"available in BAR2: 0x%llx > 0x%llx\n",
|
2016-03-04 11:26:35 +00:00
|
|
|
|
adapter->ps_size, p2_size(dev) - adapter->native->ps_off);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:40 +00:00
|
|
|
|
ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
|
|
|
|
|
{
|
|
|
|
|
return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf);
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
static void cxl_release_adapter(struct device *dev)
|
|
|
|
|
{
|
|
|
|
|
struct cxl *adapter = to_cxl_adapter(dev);
|
|
|
|
|
|
|
|
|
|
pr_devel("cxl_release_adapter\n");
|
|
|
|
|
|
2015-08-14 07:41:22 +00:00
|
|
|
|
cxl_remove_adapter_nr(adapter);
|
|
|
|
|
|
2016-03-04 11:26:35 +00:00
|
|
|
|
kfree(adapter->native);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
kfree(adapter);
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-28 07:37:36 +00:00
|
|
|
|
#define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
|
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
static int sanitise_adapter_regs(struct cxl *adapter)
|
|
|
|
|
{
|
2017-04-07 14:11:56 +00:00
|
|
|
|
int rc = 0;
|
|
|
|
|
|
2015-08-28 07:37:36 +00:00
|
|
|
|
/* Clear PSL tberror bit by writing 1 to it */
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
|
2017-04-07 14:11:56 +00:00
|
|
|
|
|
2017-04-12 14:34:07 +00:00
|
|
|
|
if (adapter->native->sl_ops->invalidate_all) {
|
|
|
|
|
/* do not invalidate ERAT entries when not reloading on PERST */
|
|
|
|
|
if (cxl_is_power9() && (adapter->perst_loads_image))
|
|
|
|
|
return 0;
|
2017-04-07 14:11:56 +00:00
|
|
|
|
rc = adapter->native->sl_ops->invalidate_all(adapter);
|
2017-04-12 14:34:07 +00:00
|
|
|
|
}
|
2017-04-07 14:11:56 +00:00
|
|
|
|
|
|
|
|
|
return rc;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-08-14 07:41:22 +00:00
|
|
|
|
/* This should contain *only* operations that can safely be done in
|
|
|
|
|
* both creation and recovery.
|
|
|
|
|
*/
|
|
|
|
|
static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
|
|
|
|
int rc;
|
|
|
|
|
|
2015-08-14 07:41:22 +00:00
|
|
|
|
adapter->dev.parent = &dev->dev;
|
|
|
|
|
adapter->dev.release = cxl_release_adapter;
|
|
|
|
|
pci_set_drvdata(dev, adapter);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2015-08-14 07:41:22 +00:00
|
|
|
|
rc = pci_enable_device(dev);
|
|
|
|
|
if (rc) {
|
|
|
|
|
dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
|
|
|
|
|
return rc;
|
|
|
|
|
}
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2015-05-27 06:07:04 +00:00
|
|
|
|
if ((rc = cxl_read_vsec(adapter, dev)))
|
2015-08-14 07:41:22 +00:00
|
|
|
|
return rc;
|
2015-05-27 06:07:04 +00:00
|
|
|
|
|
|
|
|
|
if ((rc = cxl_vsec_looks_ok(adapter, dev)))
|
2015-08-14 07:41:22 +00:00
|
|
|
|
return rc;
|
2015-05-27 06:07:04 +00:00
|
|
|
|
|
2015-10-02 05:23:33 +00:00
|
|
|
|
cxl_fixup_malformed_tlp(adapter, dev);
|
|
|
|
|
|
2015-05-27 06:07:04 +00:00
|
|
|
|
if ((rc = setup_cxl_bars(dev)))
|
2015-08-14 07:41:22 +00:00
|
|
|
|
return rc;
|
2015-05-27 06:07:04 +00:00
|
|
|
|
|
cxl: Add cxl_check_and_switch_mode() API to switch bi-modal cards
Add a new API, cxl_check_and_switch_mode() to allow for switching of
bi-modal CAPI cards, such as the Mellanox CX-4 network card.
When a driver requests to switch a card to CAPI mode, use PCI hotplug
infrastructure to remove all PCI devices underneath the slot. We then write
an updated mode control register to the CAPI VSEC, hot reset the card, and
reprobe the card.
As the card may present a different set of PCI devices after the mode
switch, use the infrastructure provided by the pnv_php driver and the OPAL
PCI slot management facilities to ensure that:
* the old devices are removed from both the OPAL and Linux device trees
* the new devices are probed by OPAL and added to the OPAL device tree
* the new devices are added to the Linux device tree and probed through
the regular PCI device probe path
As such, introduce a new option, CONFIG_CXL_BIMODAL, with a dependency on
the pnv_php driver.
Refactor existing code that touches the mode control register in the
regular single mode case into a new function, setup_cxl_protocol_area().
Co-authored-by: Ian Munsie <imunsie@au1.ibm.com>
Cc: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-13 21:17:14 +00:00
|
|
|
|
if ((rc = setup_cxl_protocol_area(dev)))
|
2015-08-14 07:41:22 +00:00
|
|
|
|
return rc;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2015-01-19 17:52:48 +00:00
|
|
|
|
if ((rc = cxl_update_image_control(adapter)))
|
2015-08-14 07:41:22 +00:00
|
|
|
|
return rc;
|
2015-01-19 17:52:48 +00:00
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
if ((rc = cxl_map_adapter_regs(adapter, dev)))
|
2015-08-14 07:41:22 +00:00
|
|
|
|
return rc;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
if ((rc = sanitise_adapter_regs(adapter)))
|
2015-08-14 07:41:22 +00:00
|
|
|
|
goto err;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2016-05-23 17:39:18 +00:00
|
|
|
|
if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev)))
|
2015-08-14 07:41:22 +00:00
|
|
|
|
goto err;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2016-07-13 21:17:02 +00:00
|
|
|
|
/* Required for devices using CAPP DMA mode, harmless for others */
|
|
|
|
|
pci_set_master(dev);
|
|
|
|
|
|
2016-06-08 05:09:54 +00:00
|
|
|
|
if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode)))
|
2015-08-14 07:41:22 +00:00
|
|
|
|
goto err;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2015-01-19 17:52:50 +00:00
|
|
|
|
/* If recovery happened, the last step is to turn on snooping.
|
|
|
|
|
* In the non-recovery case this has no effect */
|
2015-08-14 07:41:22 +00:00
|
|
|
|
if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
|
|
|
|
|
goto err;
|
2015-01-19 17:52:50 +00:00
|
|
|
|
|
2016-03-21 19:32:48 +00:00
|
|
|
|
/* Ignore error, adapter init is not dependant on timebase sync */
|
|
|
|
|
cxl_setup_psl_timebase(adapter, dev);
|
2015-08-28 07:37:36 +00:00
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
if ((rc = cxl_native_register_psl_err_irq(adapter)))
|
2015-08-14 07:41:22 +00:00
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err:
|
|
|
|
|
cxl_unmap_adapter_regs(adapter);
|
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void cxl_deconfigure_adapter(struct cxl *adapter)
|
|
|
|
|
{
|
|
|
|
|
struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
cxl_native_release_psl_err_irq(adapter);
|
2015-08-14 07:41:22 +00:00
|
|
|
|
cxl_unmap_adapter_regs(adapter);
|
|
|
|
|
|
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
|
}
|
|
|
|
|
|
2017-10-11 12:30:20 +00:00
|
|
|
|
static void cxl_stop_trace_psl9(struct cxl *adapter)
|
|
|
|
|
{
|
|
|
|
|
int traceid;
|
|
|
|
|
u64 trace_state, trace_mask;
|
|
|
|
|
struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
|
|
|
|
|
|
|
|
|
|
/* read each tracearray state and issue mmio to stop them is needed */
|
|
|
|
|
for (traceid = 0; traceid <= CXL_PSL9_TRACEID_MAX; ++traceid) {
|
|
|
|
|
trace_state = cxl_p1_read(adapter, CXL_PSL9_CTCCFG);
|
|
|
|
|
trace_mask = (0x3ULL << (62 - traceid * 2));
|
|
|
|
|
trace_state = (trace_state & trace_mask) >> (62 - traceid * 2);
|
|
|
|
|
dev_dbg(&dev->dev, "cxl: Traceid-%d trace_state=0x%0llX\n",
|
|
|
|
|
traceid, trace_state);
|
|
|
|
|
|
|
|
|
|
/* issue mmio if the trace array isn't in FIN state */
|
|
|
|
|
if (trace_state != CXL_PSL9_TRACESTATE_FIN)
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL9_TRACECFG,
|
|
|
|
|
0x8400000000000000ULL | traceid);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void cxl_stop_trace_psl8(struct cxl *adapter)
|
|
|
|
|
{
|
|
|
|
|
int slice;
|
|
|
|
|
|
|
|
|
|
/* Stop the trace */
|
|
|
|
|
cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL);
|
|
|
|
|
|
|
|
|
|
/* Stop the slice traces */
|
|
|
|
|
spin_lock(&adapter->afu_list_lock);
|
|
|
|
|
for (slice = 0; slice < adapter->slices; slice++) {
|
|
|
|
|
if (adapter->afu[slice])
|
|
|
|
|
cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE,
|
|
|
|
|
0x8000000000000000LL);
|
|
|
|
|
}
|
|
|
|
|
spin_unlock(&adapter->afu_list_lock);
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-12 14:34:07 +00:00
|
|
|
|
static const struct cxl_service_layer_ops psl9_ops = {
|
|
|
|
|
.adapter_regs_init = init_implementation_adapter_regs_psl9,
|
|
|
|
|
.invalidate_all = cxl_invalidate_all_psl9,
|
|
|
|
|
.afu_regs_init = init_implementation_afu_regs_psl9,
|
|
|
|
|
.sanitise_afu_regs = sanitise_afu_regs_psl9,
|
2016-05-23 17:39:18 +00:00
|
|
|
|
.register_serr_irq = cxl_native_register_serr_irq,
|
|
|
|
|
.release_serr_irq = cxl_native_release_serr_irq,
|
2017-04-12 14:34:07 +00:00
|
|
|
|
.handle_interrupt = cxl_irq_psl9,
|
|
|
|
|
.fail_irq = cxl_fail_irq_psl,
|
|
|
|
|
.activate_dedicated_process = cxl_activate_dedicated_process_psl9,
|
|
|
|
|
.attach_afu_directed = cxl_attach_afu_directed_psl9,
|
|
|
|
|
.attach_dedicated_process = cxl_attach_dedicated_process_psl9,
|
|
|
|
|
.update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl9,
|
|
|
|
|
.debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9,
|
|
|
|
|
.debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9,
|
|
|
|
|
.psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9,
|
2017-10-11 06:14:41 +00:00
|
|
|
|
.err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl9,
|
2017-04-12 14:34:07 +00:00
|
|
|
|
.debugfs_stop_trace = cxl_stop_trace_psl9,
|
|
|
|
|
.timebase_read = timebase_read_psl9,
|
|
|
|
|
.capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
|
|
|
|
|
.needs_reset_before_disable = true,
|
|
|
|
|
};
|
|
|
|
|
|
2017-04-07 14:11:57 +00:00
|
|
|
|
static const struct cxl_service_layer_ops psl8_ops = {
|
|
|
|
|
.adapter_regs_init = init_implementation_adapter_regs_psl8,
|
|
|
|
|
.invalidate_all = cxl_invalidate_all_psl8,
|
|
|
|
|
.afu_regs_init = init_implementation_afu_regs_psl8,
|
|
|
|
|
.sanitise_afu_regs = sanitise_afu_regs_psl8,
|
2016-05-23 17:39:18 +00:00
|
|
|
|
.register_serr_irq = cxl_native_register_serr_irq,
|
|
|
|
|
.release_serr_irq = cxl_native_release_serr_irq,
|
2017-04-07 14:11:57 +00:00
|
|
|
|
.handle_interrupt = cxl_irq_psl8,
|
2017-04-07 14:11:56 +00:00
|
|
|
|
.fail_irq = cxl_fail_irq_psl,
|
2017-04-07 14:11:57 +00:00
|
|
|
|
.activate_dedicated_process = cxl_activate_dedicated_process_psl8,
|
|
|
|
|
.attach_afu_directed = cxl_attach_afu_directed_psl8,
|
|
|
|
|
.attach_dedicated_process = cxl_attach_dedicated_process_psl8,
|
|
|
|
|
.update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8,
|
|
|
|
|
.debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl8,
|
|
|
|
|
.debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl8,
|
|
|
|
|
.psl_irq_dump_registers = cxl_native_irq_dump_regs_psl8,
|
2017-10-11 06:14:41 +00:00
|
|
|
|
.err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl8,
|
2017-04-07 14:11:57 +00:00
|
|
|
|
.debugfs_stop_trace = cxl_stop_trace_psl8,
|
|
|
|
|
.write_timebase_ctrl = write_timebase_ctrl_psl8,
|
|
|
|
|
.timebase_read = timebase_read_psl8,
|
2016-06-08 05:09:54 +00:00
|
|
|
|
.capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
|
2016-06-30 16:50:40 +00:00
|
|
|
|
.needs_reset_before_disable = true,
|
2016-05-23 17:39:18 +00:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static const struct cxl_service_layer_ops xsl_ops = {
|
2017-04-07 14:11:56 +00:00
|
|
|
|
.adapter_regs_init = init_implementation_adapter_regs_xsl,
|
2017-04-07 14:11:57 +00:00
|
|
|
|
.invalidate_all = cxl_invalidate_all_psl8,
|
|
|
|
|
.sanitise_afu_regs = sanitise_afu_regs_psl8,
|
|
|
|
|
.handle_interrupt = cxl_irq_psl8,
|
2017-04-07 14:11:56 +00:00
|
|
|
|
.fail_irq = cxl_fail_irq_psl,
|
2017-04-07 14:11:57 +00:00
|
|
|
|
.activate_dedicated_process = cxl_activate_dedicated_process_psl8,
|
|
|
|
|
.attach_afu_directed = cxl_attach_afu_directed_psl8,
|
|
|
|
|
.attach_dedicated_process = cxl_attach_dedicated_process_psl8,
|
|
|
|
|
.update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8,
|
2017-04-07 14:11:56 +00:00
|
|
|
|
.debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_xsl,
|
2016-05-23 17:39:18 +00:00
|
|
|
|
.write_timebase_ctrl = write_timebase_ctrl_xsl,
|
|
|
|
|
.timebase_read = timebase_read_xsl,
|
2016-06-08 05:09:54 +00:00
|
|
|
|
.capi_mode = OPAL_PHB_CAPI_MODE_DMA,
|
2016-05-23 17:39:18 +00:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) {
|
2016-07-28 05:39:41 +00:00
|
|
|
|
/* Mellanox CX-4 */
|
2016-09-12 10:37:43 +00:00
|
|
|
|
dev_info(&dev->dev, "Device uses an XSL\n");
|
2016-05-23 17:39:18 +00:00
|
|
|
|
adapter->native->sl_ops = &xsl_ops;
|
2016-07-28 05:39:41 +00:00
|
|
|
|
adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */
|
2016-05-23 17:39:18 +00:00
|
|
|
|
} else {
|
2017-04-07 14:11:58 +00:00
|
|
|
|
if (cxl_is_power8()) {
|
|
|
|
|
dev_info(&dev->dev, "Device uses a PSL8\n");
|
|
|
|
|
adapter->native->sl_ops = &psl8_ops;
|
2017-04-12 14:34:07 +00:00
|
|
|
|
} else {
|
|
|
|
|
dev_info(&dev->dev, "Device uses a PSL9\n");
|
|
|
|
|
adapter->native->sl_ops = &psl9_ops;
|
2017-04-07 14:11:58 +00:00
|
|
|
|
}
|
2016-05-23 17:39:18 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
|
2015-08-14 07:41:22 +00:00
|
|
|
|
{
|
|
|
|
|
struct cxl *adapter;
|
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
|
|
adapter = cxl_alloc_adapter();
|
|
|
|
|
if (!adapter)
|
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
2016-03-04 11:26:35 +00:00
|
|
|
|
adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL);
|
|
|
|
|
if (!adapter->native) {
|
|
|
|
|
rc = -ENOMEM;
|
|
|
|
|
goto err_release;
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-23 17:39:18 +00:00
|
|
|
|
set_sl_ops(adapter, dev);
|
|
|
|
|
|
2015-08-14 07:41:22 +00:00
|
|
|
|
/* Set defaults for parameters which need to persist over
|
|
|
|
|
* configure/reconfigure
|
|
|
|
|
*/
|
|
|
|
|
adapter->perst_loads_image = true;
|
2015-08-14 07:41:25 +00:00
|
|
|
|
adapter->perst_same_image = false;
|
2015-08-14 07:41:22 +00:00
|
|
|
|
|
|
|
|
|
rc = cxl_configure_adapter(adapter, dev);
|
|
|
|
|
if (rc) {
|
|
|
|
|
pci_disable_device(dev);
|
2016-03-04 11:26:35 +00:00
|
|
|
|
goto err_release;
|
2015-08-14 07:41:22 +00:00
|
|
|
|
}
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
/* Don't care if this one fails: */
|
|
|
|
|
cxl_debugfs_adapter_add(adapter);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* After we call this function we must not free the adapter directly,
|
|
|
|
|
* even if it returns an error!
|
|
|
|
|
*/
|
|
|
|
|
if ((rc = cxl_register_adapter(adapter)))
|
|
|
|
|
goto err_put1;
|
|
|
|
|
|
|
|
|
|
if ((rc = cxl_sysfs_adapter_add(adapter)))
|
|
|
|
|
goto err_put1;
|
|
|
|
|
|
2017-04-27 05:23:25 +00:00
|
|
|
|
/* Release the context lock as adapter is configured */
|
|
|
|
|
cxl_adapter_context_unlock(adapter);
|
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return adapter;
|
|
|
|
|
|
|
|
|
|
err_put1:
|
2015-08-14 07:41:22 +00:00
|
|
|
|
/* This should mirror cxl_remove_adapter, except without the
|
|
|
|
|
* sysfs parts
|
|
|
|
|
*/
|
2014-10-08 08:55:02 +00:00
|
|
|
|
cxl_debugfs_adapter_remove(adapter);
|
2015-08-14 07:41:22 +00:00
|
|
|
|
cxl_deconfigure_adapter(adapter);
|
|
|
|
|
device_unregister(&adapter->dev);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return ERR_PTR(rc);
|
2016-03-04 11:26:35 +00:00
|
|
|
|
|
|
|
|
|
err_release:
|
|
|
|
|
cxl_release_adapter(&adapter->dev);
|
|
|
|
|
return ERR_PTR(rc);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
static void cxl_pci_remove_adapter(struct cxl *adapter)
|
2014-10-08 08:55:02 +00:00
|
|
|
|
{
|
2015-08-14 07:41:22 +00:00
|
|
|
|
pr_devel("cxl_remove_adapter\n");
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
cxl_sysfs_adapter_remove(adapter);
|
|
|
|
|
cxl_debugfs_adapter_remove(adapter);
|
|
|
|
|
|
2017-04-12 14:34:07 +00:00
|
|
|
|
/*
|
|
|
|
|
* Flush adapter datacache as its about to be removed.
|
|
|
|
|
*/
|
2018-02-15 15:49:24 +00:00
|
|
|
|
cxl_data_cache_flush(adapter);
|
2017-01-04 06:18:52 +00:00
|
|
|
|
|
2015-08-14 07:41:22 +00:00
|
|
|
|
cxl_deconfigure_adapter(adapter);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
2015-08-14 07:41:22 +00:00
|
|
|
|
device_unregister(&adapter->dev);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-07-01 11:32:52 +00:00
|
|
|
|
#define CXL_MAX_PCIEX_PARENT 2
|
|
|
|
|
|
2017-06-22 13:07:27 +00:00
|
|
|
|
int cxl_slot_is_switched(struct pci_dev *dev)
|
2016-07-01 11:32:52 +00:00
|
|
|
|
{
|
|
|
|
|
struct device_node *np;
|
|
|
|
|
int depth = 0;
|
|
|
|
|
const __be32 *prop;
|
|
|
|
|
|
|
|
|
|
if (!(np = pci_device_to_OF_node(dev))) {
|
|
|
|
|
pr_err("cxl: np = NULL\n");
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
|
|
|
|
of_node_get(np);
|
|
|
|
|
while (np) {
|
|
|
|
|
np = of_get_next_parent(np);
|
|
|
|
|
prop = of_get_property(np, "device_type", NULL);
|
|
|
|
|
if (!prop || strcmp((char *)prop, "pciex"))
|
|
|
|
|
break;
|
|
|
|
|
depth++;
|
|
|
|
|
}
|
|
|
|
|
of_node_put(np);
|
|
|
|
|
return (depth > CXL_MAX_PCIEX_PARENT);
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-13 21:17:01 +00:00
|
|
|
|
bool cxl_slot_is_supported(struct pci_dev *dev, int flags)
|
|
|
|
|
{
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_HVMODE))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
if ((flags & CXL_SLOT_FLAG_DMA) && (!pvr_version_is(PVR_POWER8NVL))) {
|
|
|
|
|
/*
|
|
|
|
|
* CAPP DMA mode is technically supported on regular P8, but
|
|
|
|
|
* will EEH if the card attempts to access memory < 4GB, which
|
|
|
|
|
* we cannot realistically avoid. We might be able to work
|
|
|
|
|
* around the issue, but until then return unsupported:
|
|
|
|
|
*/
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (cxl_slot_is_switched(dev))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* XXX: This gets a little tricky on regular P8 (not POWER8NVL) since
|
|
|
|
|
* the CAPP can be connected to PHB 0, 1 or 2 on a first come first
|
|
|
|
|
* served basis, which is racy to check from here. If we need to
|
|
|
|
|
* support this in future we might need to consider having this
|
|
|
|
|
* function effectively reserve it ahead of time.
|
|
|
|
|
*
|
|
|
|
|
* Currently, the only user of this API is the Mellanox CX4, which is
|
|
|
|
|
* only supported on P8NVL due to the above mentioned limitation of
|
|
|
|
|
* CAPP DMA mode and therefore does not need to worry about this. If the
|
|
|
|
|
* issue with CAPP DMA mode is later worked around on P8 we might need
|
|
|
|
|
* to revisit this.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
EXPORT_SYMBOL_GPL(cxl_slot_is_supported);
|
|
|
|
|
|
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|
|
|
|
{
|
|
|
|
|
struct cxl *adapter;
|
|
|
|
|
int slice;
|
|
|
|
|
int rc;
|
|
|
|
|
|
2016-02-29 05:40:53 +00:00
|
|
|
|
if (cxl_pci_is_vphb_device(dev)) {
|
|
|
|
|
dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-01 11:32:52 +00:00
|
|
|
|
if (cxl_slot_is_switched(dev)) {
|
|
|
|
|
dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n");
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
|
|
|
|
|
2017-04-12 14:34:07 +00:00
|
|
|
|
if (cxl_is_power9() && !radix_enabled()) {
|
|
|
|
|
dev_info(&dev->dev, "Only Radix mode supported\n");
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
}
|
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
if (cxl_verbose)
|
|
|
|
|
dump_cxl_config_space(dev);
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
adapter = cxl_pci_init_adapter(dev);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
if (IS_ERR(adapter)) {
|
|
|
|
|
dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
|
|
|
|
|
return PTR_ERR(adapter);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (slice = 0; slice < adapter->slices; slice++) {
|
2016-03-04 11:26:29 +00:00
|
|
|
|
if ((rc = pci_init_afu(adapter, slice, dev))) {
|
2014-10-08 08:55:02 +00:00
|
|
|
|
dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
|
2015-08-14 07:41:23 +00:00
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rc = cxl_afu_select_best_mode(adapter->afu[slice]);
|
|
|
|
|
if (rc)
|
|
|
|
|
dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
2016-07-13 21:17:07 +00:00
|
|
|
|
if (pnv_pci_on_cxl_phb(dev) && adapter->slices >= 1)
|
|
|
|
|
pnv_cxl_phb_set_peer_afu(dev, adapter->afu[0]);
|
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void cxl_remove(struct pci_dev *dev)
|
|
|
|
|
{
|
|
|
|
|
struct cxl *adapter = pci_get_drvdata(dev);
|
2015-05-27 06:07:18 +00:00
|
|
|
|
struct cxl_afu *afu;
|
|
|
|
|
int i;
|
2014-10-08 08:55:02 +00:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Lock to prevent someone grabbing a ref through the adapter list as
|
|
|
|
|
* we are removing it
|
|
|
|
|
*/
|
2015-05-27 06:07:18 +00:00
|
|
|
|
for (i = 0; i < adapter->slices; i++) {
|
|
|
|
|
afu = adapter->afu[i];
|
2016-03-04 11:26:29 +00:00
|
|
|
|
cxl_pci_remove_afu(afu);
|
2015-05-27 06:07:18 +00:00
|
|
|
|
}
|
2016-03-04 11:26:29 +00:00
|
|
|
|
cxl_pci_remove_adapter(adapter);
|
2014-10-08 08:55:02 +00:00
|
|
|
|
}
|
|
|
|
|
|
2015-08-14 07:41:26 +00:00
|
|
|
|
static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
|
|
|
|
|
pci_channel_state_t state)
|
|
|
|
|
{
|
|
|
|
|
struct pci_dev *afu_dev;
|
|
|
|
|
pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
|
|
|
|
|
pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
|
|
|
|
|
|
|
|
|
|
/* There should only be one entry, but go through the list
|
|
|
|
|
* anyway
|
|
|
|
|
*/
|
2017-11-23 03:38:57 +00:00
|
|
|
|
if (afu->phb == NULL)
|
|
|
|
|
return result;
|
|
|
|
|
|
2015-08-14 07:41:26 +00:00
|
|
|
|
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
|
|
|
|
|
if (!afu_dev->driver)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
afu_dev->error_state = state;
|
|
|
|
|
|
|
|
|
|
if (afu_dev->driver->err_handler)
|
|
|
|
|
afu_result = afu_dev->driver->err_handler->error_detected(afu_dev,
|
|
|
|
|
state);
|
|
|
|
|
/* Disconnect trumps all, NONE trumps NEED_RESET */
|
|
|
|
|
if (afu_result == PCI_ERS_RESULT_DISCONNECT)
|
|
|
|
|
result = PCI_ERS_RESULT_DISCONNECT;
|
|
|
|
|
else if ((afu_result == PCI_ERS_RESULT_NONE) &&
|
|
|
|
|
(result == PCI_ERS_RESULT_NEED_RESET))
|
|
|
|
|
result = PCI_ERS_RESULT_NONE;
|
|
|
|
|
}
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
|
|
|
|
|
pci_channel_state_t state)
|
|
|
|
|
{
|
|
|
|
|
struct cxl *adapter = pci_get_drvdata(pdev);
|
|
|
|
|
struct cxl_afu *afu;
|
2017-04-27 05:28:22 +00:00
|
|
|
|
pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET, afu_result;
|
2015-08-14 07:41:26 +00:00
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* At this point, we could still have an interrupt pending.
|
|
|
|
|
* Let's try to get them out of the way before they do
|
|
|
|
|
* anything we don't like.
|
|
|
|
|
*/
|
|
|
|
|
schedule();
|
|
|
|
|
|
|
|
|
|
/* If we're permanently dead, give up. */
|
|
|
|
|
if (state == pci_channel_io_perm_failure) {
|
|
|
|
|
for (i = 0; i < adapter->slices; i++) {
|
|
|
|
|
afu = adapter->afu[i];
|
2017-02-23 03:27:26 +00:00
|
|
|
|
/*
|
|
|
|
|
* Tell the AFU drivers; but we don't care what they
|
|
|
|
|
* say, we're going away.
|
|
|
|
|
*/
|
2017-11-23 03:38:57 +00:00
|
|
|
|
cxl_vphb_error_detected(afu, state);
|
2015-08-14 07:41:26 +00:00
|
|
|
|
}
|
|
|
|
|
return PCI_ERS_RESULT_DISCONNECT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Are we reflashing?
|
|
|
|
|
*
|
|
|
|
|
* If we reflash, we could come back as something entirely
|
|
|
|
|
* different, including a non-CAPI card. As such, by default
|
|
|
|
|
* we don't participate in the process. We'll be unbound and
|
|
|
|
|
* the slot re-probed. (TODO: check EEH doesn't blindly rebind
|
|
|
|
|
* us!)
|
|
|
|
|
*
|
|
|
|
|
* However, this isn't the entire story: for reliablity
|
|
|
|
|
* reasons, we usually want to reflash the FPGA on PERST in
|
|
|
|
|
* order to get back to a more reliable known-good state.
|
|
|
|
|
*
|
|
|
|
|
* This causes us a bit of a problem: if we reflash we can't
|
|
|
|
|
* trust that we'll come back the same - we could have a new
|
|
|
|
|
* image and been PERSTed in order to load that
|
|
|
|
|
* image. However, most of the time we actually *will* come
|
|
|
|
|
* back the same - for example a regular EEH event.
|
|
|
|
|
*
|
|
|
|
|
* Therefore, we allow the user to assert that the image is
|
|
|
|
|
* indeed the same and that we should continue on into EEH
|
|
|
|
|
* anyway.
|
|
|
|
|
*/
|
|
|
|
|
if (adapter->perst_loads_image && !adapter->perst_same_image) {
|
|
|
|
|
/* TODO take the PHB out of CXL mode */
|
|
|
|
|
dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
|
|
|
|
|
return PCI_ERS_RESULT_NONE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* At this point, we want to try to recover. We'll always
|
|
|
|
|
* need a complete slot reset: we don't trust any other reset.
|
|
|
|
|
*
|
|
|
|
|
* Now, we go through each AFU:
|
|
|
|
|
* - We send the driver, if bound, an error_detected callback.
|
|
|
|
|
* We expect it to clean up, but it can also tell us to give
|
|
|
|
|
* up and permanently detach the card. To simplify things, if
|
|
|
|
|
* any bound AFU driver doesn't support EEH, we give up on EEH.
|
|
|
|
|
*
|
|
|
|
|
* - We detach all contexts associated with the AFU. This
|
|
|
|
|
* does not free them, but puts them into a CLOSED state
|
|
|
|
|
* which causes any the associated files to return useful
|
|
|
|
|
* errors to userland. It also unmaps, but does not free,
|
|
|
|
|
* any IRQs.
|
|
|
|
|
*
|
|
|
|
|
* - We clean up our side: releasing and unmapping resources we hold
|
|
|
|
|
* so we can wire them up again when the hardware comes back up.
|
|
|
|
|
*
|
|
|
|
|
* Driver authors should note:
|
|
|
|
|
*
|
|
|
|
|
* - Any contexts you create in your kernel driver (except
|
|
|
|
|
* those associated with anonymous file descriptors) are
|
|
|
|
|
* your responsibility to free and recreate. Likewise with
|
|
|
|
|
* any attached resources.
|
|
|
|
|
*
|
|
|
|
|
* - We will take responsibility for re-initialising the
|
|
|
|
|
* device context (the one set up for you in
|
|
|
|
|
* cxl_pci_enable_device_hook and accessed through
|
|
|
|
|
* cxl_get_context). If you've attached IRQs or other
|
|
|
|
|
* resources to it, they remains yours to free.
|
|
|
|
|
*
|
|
|
|
|
* You can call the same functions to release resources as you
|
|
|
|
|
* normally would: we make sure that these functions continue
|
|
|
|
|
* to work when the hardware is down.
|
|
|
|
|
*
|
|
|
|
|
* Two examples:
|
|
|
|
|
*
|
|
|
|
|
* 1) If you normally free all your resources at the end of
|
|
|
|
|
* each request, or if you use anonymous FDs, your
|
|
|
|
|
* error_detected callback can simply set a flag to tell
|
|
|
|
|
* your driver not to start any new calls. You can then
|
|
|
|
|
* clear the flag in the resume callback.
|
|
|
|
|
*
|
|
|
|
|
* 2) If you normally allocate your resources on startup:
|
|
|
|
|
* * Set a flag in error_detected as above.
|
|
|
|
|
* * Let CXL detach your contexts.
|
|
|
|
|
* * In slot_reset, free the old resources and allocate new ones.
|
|
|
|
|
* * In resume, clear the flag to allow things to start.
|
|
|
|
|
*/
|
|
|
|
|
for (i = 0; i < adapter->slices; i++) {
|
|
|
|
|
afu = adapter->afu[i];
|
|
|
|
|
|
2017-04-27 05:28:22 +00:00
|
|
|
|
afu_result = cxl_vphb_error_detected(afu, state);
|
2015-08-14 07:41:26 +00:00
|
|
|
|
|
|
|
|
|
cxl_context_detach_all(afu);
|
2016-03-04 11:26:28 +00:00
|
|
|
|
cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
|
2016-03-04 11:26:29 +00:00
|
|
|
|
pci_deconfigure_afu(afu);
|
2017-04-27 05:28:22 +00:00
|
|
|
|
|
|
|
|
|
/* Disconnect trumps all, NONE trumps NEED_RESET */
|
|
|
|
|
if (afu_result == PCI_ERS_RESULT_DISCONNECT)
|
|
|
|
|
result = PCI_ERS_RESULT_DISCONNECT;
|
|
|
|
|
else if ((afu_result == PCI_ERS_RESULT_NONE) &&
|
|
|
|
|
(result == PCI_ERS_RESULT_NEED_RESET))
|
|
|
|
|
result = PCI_ERS_RESULT_NONE;
|
2015-08-14 07:41:26 +00:00
|
|
|
|
}
|
2017-04-27 05:23:25 +00:00
|
|
|
|
|
|
|
|
|
/* should take the context lock here */
|
|
|
|
|
if (cxl_adapter_context_lock(adapter) != 0)
|
|
|
|
|
dev_warn(&adapter->dev,
|
|
|
|
|
"Couldn't take context lock with %d active-contexts\n",
|
|
|
|
|
atomic_read(&adapter->contexts_num));
|
|
|
|
|
|
2015-08-14 07:41:26 +00:00
|
|
|
|
cxl_deconfigure_adapter(adapter);
|
|
|
|
|
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
|
|
|
|
|
{
|
|
|
|
|
struct cxl *adapter = pci_get_drvdata(pdev);
|
|
|
|
|
struct cxl_afu *afu;
|
|
|
|
|
struct cxl_context *ctx;
|
|
|
|
|
struct pci_dev *afu_dev;
|
|
|
|
|
pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
|
|
|
|
|
pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
if (cxl_configure_adapter(adapter, pdev))
|
|
|
|
|
goto err;
|
|
|
|
|
|
2017-04-27 05:23:25 +00:00
|
|
|
|
/*
|
|
|
|
|
* Unlock context activation for the adapter. Ideally this should be
|
|
|
|
|
* done in cxl_pci_resume but cxlflash module tries to activate the
|
|
|
|
|
* master context as part of slot_reset callback.
|
|
|
|
|
*/
|
|
|
|
|
cxl_adapter_context_unlock(adapter);
|
|
|
|
|
|
2015-08-14 07:41:26 +00:00
|
|
|
|
for (i = 0; i < adapter->slices; i++) {
|
|
|
|
|
afu = adapter->afu[i];
|
|
|
|
|
|
2016-03-04 11:26:29 +00:00
|
|
|
|
if (pci_configure_afu(afu, adapter, pdev))
|
2015-08-14 07:41:26 +00:00
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
|
|
if (cxl_afu_select_best_mode(afu))
|
|
|
|
|
goto err;
|
|
|
|
|
|
2017-11-23 03:38:57 +00:00
|
|
|
|
if (afu->phb == NULL)
|
|
|
|
|
continue;
|
|
|
|
|
|
2015-08-14 07:41:26 +00:00
|
|
|
|
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
|
|
|
|
|
/* Reset the device context.
|
|
|
|
|
* TODO: make this less disruptive
|
|
|
|
|
*/
|
|
|
|
|
ctx = cxl_get_context(afu_dev);
|
|
|
|
|
|
|
|
|
|
if (ctx && cxl_release_context(ctx))
|
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
|
|
ctx = cxl_dev_context_init(afu_dev);
|
2016-10-30 21:40:47 +00:00
|
|
|
|
if (IS_ERR(ctx))
|
2015-08-14 07:41:26 +00:00
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
|
|
afu_dev->dev.archdata.cxl_ctx = ctx;
|
|
|
|
|
|
2016-03-04 11:26:28 +00:00
|
|
|
|
if (cxl_ops->afu_check_and_enable(afu))
|
2015-08-14 07:41:26 +00:00
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
|
|
afu_dev->error_state = pci_channel_io_normal;
|
|
|
|
|
|
|
|
|
|
/* If there's a driver attached, allow it to
|
|
|
|
|
* chime in on recovery. Drivers should check
|
|
|
|
|
* if everything has come back OK, but
|
|
|
|
|
* shouldn't start new work until we call
|
|
|
|
|
* their resume function.
|
|
|
|
|
*/
|
|
|
|
|
if (!afu_dev->driver)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (afu_dev->driver->err_handler &&
|
|
|
|
|
afu_dev->driver->err_handler->slot_reset)
|
|
|
|
|
afu_result = afu_dev->driver->err_handler->slot_reset(afu_dev);
|
|
|
|
|
|
|
|
|
|
if (afu_result == PCI_ERS_RESULT_DISCONNECT)
|
|
|
|
|
result = PCI_ERS_RESULT_DISCONNECT;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return result;
|
|
|
|
|
|
|
|
|
|
err:
|
|
|
|
|
/* All the bits that happen in both error_detected and cxl_remove
|
|
|
|
|
* should be idempotent, so we don't need to worry about leaving a mix
|
|
|
|
|
* of unconfigured and reconfigured resources.
|
|
|
|
|
*/
|
|
|
|
|
dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
|
|
|
|
|
return PCI_ERS_RESULT_DISCONNECT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void cxl_pci_resume(struct pci_dev *pdev)
|
|
|
|
|
{
|
|
|
|
|
struct cxl *adapter = pci_get_drvdata(pdev);
|
|
|
|
|
struct cxl_afu *afu;
|
|
|
|
|
struct pci_dev *afu_dev;
|
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
/* Everything is back now. Drivers should restart work now.
|
|
|
|
|
* This is not the place to be checking if everything came back up
|
|
|
|
|
* properly, because there's no return value: do that in slot_reset.
|
|
|
|
|
*/
|
|
|
|
|
for (i = 0; i < adapter->slices; i++) {
|
|
|
|
|
afu = adapter->afu[i];
|
|
|
|
|
|
2017-11-23 03:38:57 +00:00
|
|
|
|
if (afu->phb == NULL)
|
|
|
|
|
continue;
|
|
|
|
|
|
2015-08-14 07:41:26 +00:00
|
|
|
|
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
|
|
|
|
|
if (afu_dev->driver && afu_dev->driver->err_handler &&
|
|
|
|
|
afu_dev->driver->err_handler->resume)
|
|
|
|
|
afu_dev->driver->err_handler->resume(afu_dev);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct pci_error_handlers cxl_err_handler = {
|
|
|
|
|
.error_detected = cxl_pci_error_detected,
|
|
|
|
|
.slot_reset = cxl_pci_slot_reset,
|
|
|
|
|
.resume = cxl_pci_resume,
|
|
|
|
|
};
|
|
|
|
|
|
2014-10-08 08:55:02 +00:00
|
|
|
|
struct pci_driver cxl_pci_driver = {
|
|
|
|
|
.name = "cxl-pci",
|
|
|
|
|
.id_table = cxl_pci_tbl,
|
|
|
|
|
.probe = cxl_probe,
|
|
|
|
|
.remove = cxl_remove,
|
2015-05-27 06:07:02 +00:00
|
|
|
|
.shutdown = cxl_remove,
|
2015-08-14 07:41:26 +00:00
|
|
|
|
.err_handler = &cxl_err_handler,
|
2014-10-08 08:55:02 +00:00
|
|
|
|
};
|